From 5b5336f456d6f67c380a3e4f857e31dbca382dec Mon Sep 17 00:00:00 2001 From: Adam Hathcock Date: Mon, 13 Oct 2025 13:02:48 +0100 Subject: [PATCH 1/6] add first pass of zstdsharp into library. Full fat framework doesn't compile. Marked lib as not CLS compliant again --- src/SharpCompress/AssemblyInfo.cs | 2 +- .../Compressors/ZStandard/BitOperations.cs | 249 + .../ZStandard/CompressionStream.cs | 234 + .../Compressors/ZStandard/Compressor.cs | 162 + .../Compressors/ZStandard/Constants.cs | 9 + .../ZStandard/DecompressionStream.cs | 258 + .../Compressors/ZStandard/Decompressor.cs | 133 + .../Compressors/ZStandard/SafeHandles.cs | 168 + .../Compressors/ZStandard/ThrowHelper.cs | 43 + .../ZStandard/Unsafe/Allocations.cs | 44 + .../ZStandard/Unsafe/BIT_CStream_t.cs | 15 + .../ZStandard/Unsafe/BIT_DStream_status.cs | 14 + .../ZStandard/Unsafe/BIT_DStream_t.cs | 14 + .../Compressors/ZStandard/Unsafe/Bits.cs | 57 + .../Compressors/ZStandard/Unsafe/Bitstream.cs | 619 ++ .../ZStandard/Unsafe/BlockSummary.cs | 9 + .../ZStandard/Unsafe/COVER_best_s.cs | 21 + .../ZStandard/Unsafe/COVER_ctx_t.cs | 20 + .../ZStandard/Unsafe/COVER_dictSelection.cs | 12 + .../ZStandard/Unsafe/COVER_epoch_info_t.cs | 11 + .../ZStandard/Unsafe/COVER_map_pair_t_s.cs | 8 + .../ZStandard/Unsafe/COVER_map_s.cs | 10 + .../ZStandard/Unsafe/COVER_segment_t.cs | 12 + .../Unsafe/COVER_tryParameters_data_s.cs | 13 + .../Compressors/ZStandard/Unsafe/Clevels.cs | 113 + .../Compressors/ZStandard/Unsafe/Compiler.cs | 62 + .../Compressors/ZStandard/Unsafe/Cover.cs | 367 + .../ZStandard/Unsafe/DTableDesc.cs | 13 + .../ZStandard/Unsafe/EStats_ress_t.cs | 12 + .../ZStandard/Unsafe/EntropyCommon.cs | 323 + .../ZStandard/Unsafe/ErrorPrivate.cs | 111 + .../ZStandard/Unsafe/EstimatedBlockSize.cs | 8 + .../ZStandard/Unsafe/FASTCOVER_accel_t.cs | 18 + .../ZStandard/Unsafe/FASTCOVER_ctx_t.cs | 20 + .../Unsafe/FASTCOVER_tryParameters_data_s.cs | 13 + .../Compressors/ZStandard/Unsafe/FPStats.cs | 8 + .../ZStandard/Unsafe/FSE_CState_t.cs | 17 + .../ZStandard/Unsafe/FSE_DState_t.cs | 12 + .../ZStandard/Unsafe/FSE_DTableHeader.cs | 9 + .../ZStandard/Unsafe/FSE_DecompressWksp.cs | 7 + .../ZStandard/Unsafe/FSE_decode_t.cs | 9 + .../ZStandard/Unsafe/FSE_repeat.cs | 12 + .../Unsafe/FSE_symbolCompressionTransform.cs | 11 + .../Compressors/ZStandard/Unsafe/Fastcover.cs | 601 ++ .../ZStandard/Unsafe/Fingerprint.cs | 8 + .../Compressors/ZStandard/Unsafe/Fse.cs | 157 + .../ZStandard/Unsafe/FseCompress.cs | 660 ++ .../ZStandard/Unsafe/FseDecompress.cs | 269 + .../ZStandard/Unsafe/HIST_checkInput_e.cs | 8 + .../ZStandard/Unsafe/HUF_CStream_t.cs | 22 + .../ZStandard/Unsafe/HUF_CTableHeader.cs | 9 + .../Unsafe/HUF_CompressWeightsWksp.cs | 10 + .../ZStandard/Unsafe/HUF_DEltX1.cs | 12 + .../ZStandard/Unsafe/HUF_DEltX2.cs | 13 + .../Unsafe/HUF_DecompressFastArgs.cs | 49 + .../Unsafe/HUF_ReadDTableX1_Workspace.cs | 11 + .../Unsafe/HUF_ReadDTableX2_Workspace.cs | 307 + .../ZStandard/Unsafe/HUF_WriteCTableWksp.cs | 10 + .../Unsafe/HUF_buildCTable_wksp_tables.cs | 739 ++ .../ZStandard/Unsafe/HUF_compress_tables_t.cs | 280 + .../ZStandard/Unsafe/HUF_flags_e.cs | 40 + .../ZStandard/Unsafe/HUF_nbStreams_e.cs | 8 + .../ZStandard/Unsafe/HUF_repeat.cs | 12 + .../Compressors/ZStandard/Unsafe/Hist.cs | 208 + .../ZStandard/Unsafe/HufCompress.cs | 1288 +++ .../ZStandard/Unsafe/HufDecompress.cs | 2043 ++++ .../Compressors/ZStandard/Unsafe/Mem.cs | 161 + .../ZStandard/Unsafe/RSyncState_t.cs | 9 + .../Compressors/ZStandard/Unsafe/Range.cs | 14 + .../ZStandard/Unsafe/RawSeqStore_t.cs | 25 + .../ZStandard/Unsafe/RoundBuff_t.cs | 26 + .../ZStandard/Unsafe/SeqCollector.cs | 10 + .../Compressors/ZStandard/Unsafe/SeqDef_s.cs | 14 + .../ZStandard/Unsafe/SeqStore_t.cs | 24 + .../ZStandard/Unsafe/SerialState.cs | 21 + .../ZStandard/Unsafe/SymbolEncodingType_e.cs | 10 + .../Compressors/ZStandard/Unsafe/SyncPoint.cs | 10 + .../ZStandard/Unsafe/XXH32_canonical_t.cs | 11 + .../ZStandard/Unsafe/XXH32_state_s.cs | 30 + .../ZStandard/Unsafe/XXH64_canonical_t.cs | 10 + .../ZStandard/Unsafe/XXH64_state_s.cs | 30 + .../ZStandard/Unsafe/XXH_alignment.cs | 14 + .../ZStandard/Unsafe/XXH_errorcode.cs | 13 + .../Compressors/ZStandard/Unsafe/Xxhash.cs | 598 ++ .../ZStandard/Unsafe/ZDICT_cover_params_t.cs | 25 + .../Unsafe/ZDICT_fastCover_params_t.cs | 25 + .../ZStandard/Unsafe/ZDICT_legacy_params_t.cs | 9 + .../ZStandard/Unsafe/ZDICT_params_t.cs | 19 + .../ZStandard/Unsafe/ZSTDMT_CCtxPool.cs | 13 + .../ZStandard/Unsafe/ZSTDMT_CCtx_s.cs | 32 + .../ZStandard/Unsafe/ZSTDMT_bufferPool_s.cs | 12 + .../ZStandard/Unsafe/ZSTDMT_jobDescription.cs | 44 + .../Unsafe/ZSTD_BlockCompressor_f.cs | 7 + .../ZStandard/Unsafe/ZSTD_BuildCTableWksp.cs | 8 + .../ZStandard/Unsafe/ZSTD_BuildSeqStore_e.cs | 8 + .../ZStandard/Unsafe/ZSTD_CCtx_params_s.cs | 70 + .../ZStandard/Unsafe/ZSTD_CCtx_s.cs | 74 + .../ZStandard/Unsafe/ZSTD_CDict_s.cs | 27 + .../ZStandard/Unsafe/ZSTD_CParamMode_e.cs | 27 + .../ZStandard/Unsafe/ZSTD_DCtx_s.cs | 80 + .../ZStandard/Unsafe/ZSTD_DDictHashSet.cs | 10 + .../ZStandard/Unsafe/ZSTD_DDict_s.cs | 16 + .../ZStandard/Unsafe/ZSTD_DefaultPolicy_e.cs | 8 + .../ZStandard/Unsafe/ZSTD_EndDirective.cs | 20 + .../ZStandard/Unsafe/ZSTD_ErrorCode.cs | 58 + .../ZStandard/Unsafe/ZSTD_MatchState_t.cs | 54 + .../ZStandard/Unsafe/ZSTD_OffsetInfo.cs | 8 + .../ZStandard/Unsafe/ZSTD_OptPrice_e.cs | 8 + .../ZStandard/Unsafe/ZSTD_ResetDirective.cs | 9 + .../ZStandard/Unsafe/ZSTD_Sequence.cs | 38 + .../ZStandard/Unsafe/ZSTD_SequenceLength.cs | 8 + .../ZStandard/Unsafe/ZSTD_SequencePosition.cs | 12 + .../ZStandard/Unsafe/ZSTD_blockSplitCtx.cs | 13 + .../ZStandard/Unsafe/ZSTD_blockState_t.cs | 9 + .../ZStandard/Unsafe/ZSTD_bounds.cs | 9 + .../ZStandard/Unsafe/ZSTD_bufferMode_e.cs | 11 + .../Unsafe/ZSTD_buffered_policy_e.cs | 13 + .../ZStandard/Unsafe/ZSTD_cParameter.cs | 198 + .../ZStandard/Unsafe/ZSTD_cStreamStage.cs | 9 + .../Unsafe/ZSTD_compResetPolicy_e.cs | 15 + .../Unsafe/ZSTD_compressedBlockState_t.cs | 8 + .../Unsafe/ZSTD_compressionParameters.cs | 30 + .../Unsafe/ZSTD_compressionStage_e.cs | 13 + .../ZStandard/Unsafe/ZSTD_customMem.cs | 15 + .../ZStandard/Unsafe/ZSTD_cwksp.cs | 111 + .../Unsafe/ZSTD_cwksp_alloc_phase_e.cs | 13 + .../Unsafe/ZSTD_cwksp_static_alloc_e.cs | 13 + .../ZStandard/Unsafe/ZSTD_dParameter.cs | 38 + .../ZStandard/Unsafe/ZSTD_dStage.cs | 14 + .../ZStandard/Unsafe/ZSTD_dStreamStage.cs | 11 + .../ZStandard/Unsafe/ZSTD_dictAttachPref_e.cs | 14 + .../Unsafe/ZSTD_dictContentType_e.cs | 12 + .../ZStandard/Unsafe/ZSTD_dictLoadMethod_e.cs | 10 + .../ZStandard/Unsafe/ZSTD_dictMode_e.cs | 10 + .../Unsafe/ZSTD_dictTableLoadMethod_e.cs | 8 + .../ZStandard/Unsafe/ZSTD_dictUses_e.cs | 12 + .../Unsafe/ZSTD_entropyCTablesMetadata_t.cs | 8 + .../ZStandard/Unsafe/ZSTD_entropyCTables_t.cs | 8 + .../ZStandard/Unsafe/ZSTD_entropyDTables_t.cs | 1339 +++ .../Unsafe/ZSTD_forceIgnoreChecksum_e.cs | 9 + .../ZStandard/Unsafe/ZSTD_format_e.cs | 12 + .../ZStandard/Unsafe/ZSTD_frameHeader.cs | 19 + .../ZStandard/Unsafe/ZSTD_frameParameters.cs | 12 + .../ZStandard/Unsafe/ZSTD_frameProgression.cs | 18 + .../ZStandard/Unsafe/ZSTD_frameSizeInfo.cs | 15 + .../ZStandard/Unsafe/ZSTD_frameType_e.cs | 8 + .../Unsafe/ZSTD_fseCTablesMetadata_t.cs | 18 + .../ZStandard/Unsafe/ZSTD_fseCTables_t.cs | 12 + .../ZStandard/Unsafe/ZSTD_fseState.cs | 8 + .../ZStandard/Unsafe/ZSTD_getAllMatchesFn.cs | 7 + .../Unsafe/ZSTD_hufCTablesMetadata_t.cs | 17 + .../ZStandard/Unsafe/ZSTD_hufCTables_t.cs | 279 + .../ZStandard/Unsafe/ZSTD_inBuffer_s.cs | 15 + .../Unsafe/ZSTD_indexResetPolicy_e.cs | 13 + .../ZStandard/Unsafe/ZSTD_litLocation_e.cs | 12 + .../Unsafe/ZSTD_literalCompressionMode_e.cs | 15 + .../ZStandard/Unsafe/ZSTD_localDict.cs | 11 + .../ZStandard/Unsafe/ZSTD_longLengthType_e.cs | 13 + .../ZStandard/Unsafe/ZSTD_longOffset_e.cs | 8 + .../ZStandard/Unsafe/ZSTD_match_t.cs | 13 + .../ZStandard/Unsafe/ZSTD_nextInputType_e.cs | 12 + .../ZStandard/Unsafe/ZSTD_optLdm_t.cs | 15 + .../ZStandard/Unsafe/ZSTD_optimal_t.cs | 16 + .../ZStandard/Unsafe/ZSTD_outBuffer_s.cs | 12 + .../ZStandard/Unsafe/ZSTD_overlap_e.cs | 8 + .../ZStandard/Unsafe/ZSTD_paramSwitch_e.cs | 12 + .../ZStandard/Unsafe/ZSTD_parameters.cs | 8 + .../ZStandard/Unsafe/ZSTD_prefixDict_s.cs | 9 + .../Unsafe/ZSTD_refMultipleDDicts_e.cs | 9 + .../ZStandard/Unsafe/ZSTD_resetTarget_e.cs | 8 + .../ZStandard/Unsafe/ZSTD_seqSymbol.cs | 17 + .../ZStandard/Unsafe/ZSTD_seqSymbol_header.cs | 11 + .../ZStandard/Unsafe/ZSTD_sequenceFormat_e.cs | 10 + .../ZStandard/Unsafe/ZSTD_strategy.cs | 16 + .../Unsafe/ZSTD_symbolEncodingTypeStats_t.cs | 16 + .../Unsafe/ZSTD_tableFillPurpose_e.cs | 8 + .../ZStandard/Unsafe/ZSTD_window_t.cs | 21 + .../Compressors/ZStandard/Unsafe/Zdict.cs | 501 + .../Compressors/ZStandard/Unsafe/Zstd.cs | 7 + .../ZStandard/Unsafe/ZstdCommon.cs | 49 + .../ZStandard/Unsafe/ZstdCompress.cs | 8222 +++++++++++++++++ .../ZStandard/Unsafe/ZstdCompressInternal.cs | 1113 +++ .../ZStandard/Unsafe/ZstdCompressLiterals.cs | 199 + .../ZStandard/Unsafe/ZstdCompressSequences.cs | 662 ++ .../Unsafe/ZstdCompressSuperblock.cs | 585 ++ .../Compressors/ZStandard/Unsafe/ZstdCwksp.cs | 518 ++ .../Compressors/ZStandard/Unsafe/ZstdDdict.cs | 237 + .../ZStandard/Unsafe/ZstdDecompress.cs | 2878 ++++++ .../ZStandard/Unsafe/ZstdDecompressBlock.cs | 2138 +++++ .../Unsafe/ZstdDecompressInternal.cs | 205 + .../ZStandard/Unsafe/ZstdDoubleFast.cs | 873 ++ .../Compressors/ZStandard/Unsafe/ZstdFast.cs | 941 ++ .../ZStandard/Unsafe/ZstdInternal.cs | 385 + .../Compressors/ZStandard/Unsafe/ZstdLazy.cs | 2670 ++++++ .../Compressors/ZStandard/Unsafe/ZstdLdm.cs | 757 ++ .../ZStandard/Unsafe/ZstdLdmGeartab.cs | 275 + .../Compressors/ZStandard/Unsafe/ZstdOpt.cs | 1435 +++ .../ZStandard/Unsafe/ZstdPresplit.cs | 246 + .../ZStandard/Unsafe/ZstdmtCompress.cs | 1617 ++++ .../ZStandard/Unsafe/_wksps_e__Union.cs | 15 + .../ZStandard/Unsafe/algo_time_t.cs | 13 + .../ZStandard/Unsafe/base_directive_e.cs | 8 + .../ZStandard/Unsafe/blockProperties_t.cs | 9 + .../ZStandard/Unsafe/blockType_e.cs | 10 + .../Compressors/ZStandard/Unsafe/buffer_s.cs | 15 + .../Compressors/ZStandard/Unsafe/dictItem.cs | 9 + .../Compressors/ZStandard/Unsafe/inBuff_t.cs | 13 + .../ZStandard/Unsafe/ldmEntry_t.cs | 8 + .../ZStandard/Unsafe/ldmMatchCandidate_t.cs | 10 + .../ZStandard/Unsafe/ldmParams_t.cs | 18 + .../ZStandard/Unsafe/ldmRollingHashState_t.cs | 8 + .../ZStandard/Unsafe/ldmState_t.cs | 169 + .../Compressors/ZStandard/Unsafe/nodeElt_s.cs | 13 + .../ZStandard/Unsafe/offsetCount_t.cs | 8 + .../ZStandard/Unsafe/optState_t.cs | 39 + .../Compressors/ZStandard/Unsafe/rankPos.cs | 8 + .../ZStandard/Unsafe/rankValCol_t.cs | 7 + .../Compressors/ZStandard/Unsafe/rawSeq.cs | 12 + .../ZStandard/Unsafe/repcodes_s.cs | 7 + .../ZStandard/Unsafe/searchMethod_e.cs | 9 + .../ZStandard/Unsafe/seqState_t.cs | 17 + .../ZStandard/Unsafe/seqStoreSplits.cs | 11 + .../Compressors/ZStandard/Unsafe/seq_t.cs | 9 + .../ZStandard/Unsafe/sortedSymbol_t.cs | 7 + .../ZStandard/Unsafe/streaming_operation.cs | 9 + .../Compressors/ZStandard/UnsafeHelper.cs | 106 + src/SharpCompress/SharpCompress.csproj | 1 - src/SharpCompress/packages.lock.json | 121 - tests/SharpCompress.Test/packages.lock.json | 52 +- 229 files changed, 41594 insertions(+), 171 deletions(-) create mode 100644 src/SharpCompress/Compressors/ZStandard/BitOperations.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/CompressionStream.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Compressor.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Constants.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/DecompressionStream.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Decompressor.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/SafeHandles.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/ThrowHelper.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/Allocations.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_CStream_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_DStream_status.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_DStream_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/Bits.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/Bitstream.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/BlockSummary.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_best_s.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_ctx_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_dictSelection.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_epoch_info_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_map_pair_t_s.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_map_s.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_segment_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_tryParameters_data_s.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/Clevels.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/Compiler.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/Cover.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/DTableDesc.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/EStats_ress_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/EntropyCommon.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ErrorPrivate.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/EstimatedBlockSize.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_accel_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_ctx_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_tryParameters_data_s.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/FPStats.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_CState_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DState_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DTableHeader.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DecompressWksp.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_decode_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_repeat.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_symbolCompressionTransform.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/Fastcover.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/Fingerprint.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/Fse.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/FseCompress.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/FseDecompress.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/HIST_checkInput_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CStream_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CTableHeader.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CompressWeightsWksp.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DEltX1.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DEltX2.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DecompressFastArgs.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_ReadDTableX1_Workspace.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_ReadDTableX2_Workspace.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_WriteCTableWksp.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_buildCTable_wksp_tables.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_compress_tables_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_flags_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_nbStreams_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_repeat.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/Hist.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/HufCompress.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/HufDecompress.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/Mem.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/RSyncState_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/Range.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/RawSeqStore_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/RoundBuff_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/SeqCollector.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/SeqDef_s.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/SeqStore_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/SerialState.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/SymbolEncodingType_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/SyncPoint.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/XXH32_canonical_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/XXH32_state_s.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/XXH64_canonical_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/XXH64_state_s.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/XXH_alignment.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/XXH_errorcode.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/Xxhash.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_cover_params_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_fastCover_params_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_legacy_params_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_params_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_CCtxPool.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_CCtx_s.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_bufferPool_s.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_jobDescription.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BlockCompressor_f.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BuildCTableWksp.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BuildSeqStore_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_CCtx_params_s.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_CCtx_s.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_CDict_s.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_CParamMode_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_DCtx_s.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_DDictHashSet.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_DDict_s.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_DefaultPolicy_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_EndDirective.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_ErrorCode.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_MatchState_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_OffsetInfo.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_OptPrice_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_ResetDirective.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_Sequence.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_SequenceLength.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_SequencePosition.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_blockSplitCtx.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_blockState_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_bounds.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_bufferMode_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_buffered_policy_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cParameter.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cStreamStage.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compResetPolicy_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressedBlockState_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressionParameters.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressionStage_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_customMem.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp_alloc_phase_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp_static_alloc_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dParameter.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dStage.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dStreamStage.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictAttachPref_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictContentType_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictLoadMethod_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictMode_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictTableLoadMethod_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictUses_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyCTablesMetadata_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyCTables_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyDTables_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_forceIgnoreChecksum_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_format_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameHeader.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameParameters.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameProgression.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameSizeInfo.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameType_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseCTablesMetadata_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseCTables_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseState.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_getAllMatchesFn.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_hufCTablesMetadata_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_hufCTables_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_inBuffer_s.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_indexResetPolicy_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_litLocation_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_literalCompressionMode_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_localDict.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_longLengthType_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_longOffset_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_match_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_nextInputType_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_optLdm_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_optimal_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_outBuffer_s.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_overlap_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_paramSwitch_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_parameters.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_prefixDict_s.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_refMultipleDDicts_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_resetTarget_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_seqSymbol.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_seqSymbol_header.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_sequenceFormat_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_strategy.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_symbolEncodingTypeStats_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_tableFillPurpose_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_window_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/Zdict.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/Zstd.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCommon.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompress.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressInternal.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressLiterals.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressSequences.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressSuperblock.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCwksp.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDdict.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompress.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompressBlock.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompressInternal.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDoubleFast.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdFast.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdInternal.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLazy.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLdm.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLdmGeartab.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdOpt.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdPresplit.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdmtCompress.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/_wksps_e__Union.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/algo_time_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/base_directive_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/blockProperties_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/blockType_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/buffer_s.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/dictItem.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/inBuff_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ldmEntry_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ldmMatchCandidate_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ldmParams_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ldmRollingHashState_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/ldmState_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/nodeElt_s.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/offsetCount_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/optState_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/rankPos.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/rankValCol_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/rawSeq.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/repcodes_s.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/searchMethod_e.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/seqState_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/seqStoreSplits.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/seq_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/sortedSymbol_t.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/streaming_operation.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/UnsafeHelper.cs diff --git a/src/SharpCompress/AssemblyInfo.cs b/src/SharpCompress/AssemblyInfo.cs index e51d97fe0..8d001ba6e 100644 --- a/src/SharpCompress/AssemblyInfo.cs +++ b/src/SharpCompress/AssemblyInfo.cs @@ -1,7 +1,7 @@ using System; using System.Runtime.CompilerServices; -[assembly: CLSCompliant(true)] +[assembly: CLSCompliant(false)] [assembly: InternalsVisibleTo( "SharpCompress.Test,PublicKey=0024000004800000940000000602000000240000525341310004000001000100158bebf1433f76dffc356733c138babea7a47536c65ed8009b16372c6f4edbb20554db74a62687f56b97c20a6ce8c4b123280279e33c894e7b3aa93ab3c573656fde4db576cfe07dba09619ead26375b25d2c4a8e43f7be257d712b0dd2eb546f67adb09281338618a58ac834fc038dd7e2740a7ab3591826252e4f4516306dc" )] diff --git a/src/SharpCompress/Compressors/ZStandard/BitOperations.cs b/src/SharpCompress/Compressors/ZStandard/BitOperations.cs new file mode 100644 index 000000000..d05634307 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/BitOperations.cs @@ -0,0 +1,249 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +#if !NETCOREAPP3_0_OR_GREATER + +using System.Runtime.CompilerServices; +using static ZstdSharp.UnsafeHelper; + +// Some routines inspired by the Stanford Bit Twiddling Hacks by Sean Eron Anderson: +// http://graphics.stanford.edu/~seander/bithacks.html + +namespace System.Numerics +{ + /// + /// Utility methods for intrinsic bit-twiddling operations. + /// The methods use hardware intrinsics when available on the underlying platform, + /// otherwise they use optimized software fallbacks. + /// + public static unsafe class BitOperations + { + // hack: should be public because of inline + public static readonly byte* TrailingZeroCountDeBruijn = GetArrayPointer(new byte[] + { + 00, 01, 28, 02, 29, 14, 24, 03, + 30, 22, 20, 15, 25, 17, 04, 08, + 31, 27, 13, 23, 21, 19, 16, 07, + 26, 12, 18, 06, 11, 05, 10, 09 + }); + + // hack: should be public because of inline + public static readonly byte* Log2DeBruijn = GetArrayPointer(new byte[] + { + 00, 09, 01, 10, 13, 21, 02, 29, + 11, 14, 16, 18, 22, 25, 03, 30, + 08, 12, 20, 28, 15, 17, 24, 07, + 19, 27, 23, 06, 26, 05, 04, 31 + }); + + /// + /// Returns the integer (floor) log of the specified value, base 2. + /// Note that by convention, input value 0 returns 0 since log(0) is undefined. + /// + /// The value. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static int Log2(uint value) + { + // The 0->0 contract is fulfilled by setting the LSB to 1. + // Log(1) is 0, and setting the LSB for values > 1 does not change the log2 result. + value |= 1; + + // value lzcnt actual expected + // ..0001 31 31-31 0 + // ..0010 30 31-30 1 + // 0010.. 2 31-2 29 + // 0100.. 1 31-1 30 + // 1000.. 0 31-0 31 + + // Fallback contract is 0->0 + // No AggressiveInlining due to large method size + // Has conventional contract 0->0 (Log(0) is undefined) + + // Fill trailing zeros with ones, eg 00010010 becomes 00011111 + value |= value >> 01; + value |= value >> 02; + value |= value >> 04; + value |= value >> 08; + value |= value >> 16; + + // uint.MaxValue >> 27 is always in range [0 - 31] so we use Unsafe.AddByteOffset to avoid bounds check + return Log2DeBruijn[ + // Using deBruijn sequence, k=2, n=5 (2^5=32) : 0b_0000_0111_1100_0100_1010_1100_1101_1101u + (int)((value * 0x07C4ACDDu) >> 27)]; + } + + /// + /// Returns the integer (floor) log of the specified value, base 2. + /// Note that by convention, input value 0 returns 0 since log(0) is undefined. + /// + /// The value. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static int Log2(ulong value) + { + value |= 1; + + uint hi = (uint)(value >> 32); + + if (hi == 0) + { + return Log2((uint)value); + } + + return 32 + Log2(hi); + } + + /// + /// Count the number of trailing zero bits in an integer value. + /// Similar in behavior to the x86 instruction TZCNT. + /// + /// The value. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static int TrailingZeroCount(int value) + => TrailingZeroCount((uint)value); + + /// + /// Count the number of trailing zero bits in an integer value. + /// Similar in behavior to the x86 instruction TZCNT. + /// + /// The value. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static int TrailingZeroCount(uint value) + { + // Unguarded fallback contract is 0->0, BSF contract is 0->undefined + if (value == 0) + { + return 32; + } + + // uint.MaxValue >> 27 is always in range [0 - 31] so we use Unsafe.AddByteOffset to avoid bounds check + return TrailingZeroCountDeBruijn[ + // Using deBruijn sequence, k=2, n=5 (2^5=32) : 0b_0000_0111_0111_1100_1011_0101_0011_0001u + (int)(((value & (uint)-(int)value) * 0x077CB531u) >> 27)]; // Multi-cast mitigates redundant conv.u8 + } + + /// + /// Count the number of trailing zero bits in a mask. + /// Similar in behavior to the x86 instruction TZCNT. + /// + /// The value. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static int TrailingZeroCount(long value) + => TrailingZeroCount((ulong)value); + + /// + /// Count the number of trailing zero bits in a mask. + /// Similar in behavior to the x86 instruction TZCNT. + /// + /// The value. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static int TrailingZeroCount(ulong value) + { + uint lo = (uint)value; + + if (lo == 0) + { + return 32 + TrailingZeroCount((uint)(value >> 32)); + } + + return TrailingZeroCount(lo); + } + + /// + /// Rotates the specified value left by the specified number of bits. + /// Similar in behavior to the x86 instruction ROL. + /// + /// The value to rotate. + /// The number of bits to rotate by. + /// Any value outside the range [0..31] is treated as congruent mod 32. + /// The rotated value. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static uint RotateLeft(uint value, int offset) + => (value << offset) | (value >> (32 - offset)); + + /// + /// Rotates the specified value left by the specified number of bits. + /// Similar in behavior to the x86 instruction ROL. + /// + /// The value to rotate. + /// The number of bits to rotate by. + /// Any value outside the range [0..63] is treated as congruent mod 64. + /// The rotated value. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static ulong RotateLeft(ulong value, int offset) + => (value << offset) | (value >> (64 - offset)); + + /// + /// Rotates the specified value right by the specified number of bits. + /// Similar in behavior to the x86 instruction ROR. + /// + /// The value to rotate. + /// The number of bits to rotate by. + /// Any value outside the range [0..31] is treated as congruent mod 32. + /// The rotated value. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static uint RotateRight(uint value, int offset) + => (value >> offset) | (value << (32 - offset)); + + /// + /// Rotates the specified value right by the specified number of bits. + /// Similar in behavior to the x86 instruction ROR. + /// + /// The value to rotate. + /// The number of bits to rotate by. + /// Any value outside the range [0..63] is treated as congruent mod 64. + /// The rotated value. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static ulong RotateRight(ulong value, int offset) + => (value >> offset) | (value << (64 - offset)); + + /// + /// Count the number of leading zero bits in a mask. + /// Similar in behavior to the x86 instruction LZCNT. + /// + /// The value. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static int LeadingZeroCount(uint value) + { + // Unguarded fallback contract is 0->31, BSR contract is 0->undefined + if (value == 0) + { + return 32; + } + + // No AggressiveInlining due to large method size + // Has conventional contract 0->0 (Log(0) is undefined) + + // Fill trailing zeros with ones, eg 00010010 becomes 00011111 + value |= value >> 01; + value |= value >> 02; + value |= value >> 04; + value |= value >> 08; + value |= value >> 16; + + // uint.MaxValue >> 27 is always in range [0 - 31] so we use Unsafe.AddByteOffset to avoid bounds check + return 31 ^ Log2DeBruijn[ + // uint|long -> IntPtr cast on 32-bit platforms does expensive overflow checks not needed here + (int)((value * 0x07C4ACDDu) >> 27)]; + } + + /// + /// Count the number of leading zero bits in a mask. + /// Similar in behavior to the x86 instruction LZCNT. + /// + /// The value. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static int LeadingZeroCount(ulong value) + { + uint hi = (uint)(value >> 32); + + if (hi == 0) + { + return 32 + LeadingZeroCount((uint)value); + } + + return LeadingZeroCount(hi); + } + } +} + +#endif diff --git a/src/SharpCompress/Compressors/ZStandard/CompressionStream.cs b/src/SharpCompress/Compressors/ZStandard/CompressionStream.cs new file mode 100644 index 000000000..2bf952fd3 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/CompressionStream.cs @@ -0,0 +1,234 @@ +using System; +using System.Buffers; +using System.IO; +using System.Threading; +using System.Threading.Tasks; +using ZstdSharp.Unsafe; + +namespace ZstdSharp +{ + public class CompressionStream : Stream + { + private readonly Stream innerStream; + private readonly byte[] outputBuffer; + private readonly bool preserveCompressor; + private readonly bool leaveOpen; + private Compressor? compressor; + private ZSTD_outBuffer_s output; + + public CompressionStream(Stream stream, int level = Compressor.DefaultCompressionLevel, + int bufferSize = 0, bool leaveOpen = true) + : this(stream, new Compressor(level), bufferSize, false, leaveOpen) + { + } + + public CompressionStream(Stream stream, Compressor compressor, int bufferSize = 0, + bool preserveCompressor = true, bool leaveOpen = true) + { + if (stream == null) + throw new ArgumentNullException(nameof(stream)); + + if (!stream.CanWrite) + throw new ArgumentException("Stream is not writable", nameof(stream)); + + if (bufferSize < 0) + throw new ArgumentOutOfRangeException(nameof(bufferSize)); + + innerStream = stream; + this.compressor = compressor; + this.preserveCompressor = preserveCompressor; + this.leaveOpen = leaveOpen; + + var outputBufferSize = + bufferSize > 0 ? bufferSize : (int) Methods.ZSTD_CStreamOutSize().EnsureZstdSuccess(); + outputBuffer = ArrayPool.Shared.Rent(outputBufferSize); + output = new ZSTD_outBuffer_s {pos = 0, size = (nuint) outputBufferSize}; + } + + public void SetParameter(ZSTD_cParameter parameter, int value) + { + EnsureNotDisposed(); + compressor.NotNull().SetParameter(parameter, value); + } + + public int GetParameter(ZSTD_cParameter parameter) + { + EnsureNotDisposed(); + return compressor.NotNull().GetParameter(parameter); + } + + public void LoadDictionary(byte[] dict) + { + EnsureNotDisposed(); + compressor.NotNull().LoadDictionary(dict); + } + + ~CompressionStream() => Dispose(false); + +#if !NETSTANDARD2_0 && !NETFRAMEWORK + public override async ValueTask DisposeAsync() +#else + public async ValueTask DisposeAsync() +#endif + { + if (compressor == null) + return; + + try + { + await FlushInternalAsync(ZSTD_EndDirective.ZSTD_e_end).ConfigureAwait(false); + } + finally + { + ReleaseUnmanagedResources(); + GC.SuppressFinalize(this); + } + } + + protected override void Dispose(bool disposing) + { + if (compressor == null) + return; + + try + { + if (disposing) + FlushInternal(ZSTD_EndDirective.ZSTD_e_end); + } + finally + { + ReleaseUnmanagedResources(); + } + } + + private void ReleaseUnmanagedResources() + { + if (!preserveCompressor) + { + compressor.NotNull().Dispose(); + } + compressor = null; + + if (outputBuffer != null) + { + ArrayPool.Shared.Return(outputBuffer); + } + + if (!leaveOpen) + { + innerStream.Dispose(); + } + } + + public override void Flush() + => FlushInternal(ZSTD_EndDirective.ZSTD_e_flush); + + public override async Task FlushAsync(CancellationToken cancellationToken) + => await FlushInternalAsync(ZSTD_EndDirective.ZSTD_e_flush, cancellationToken).ConfigureAwait(false); + + private void FlushInternal(ZSTD_EndDirective directive) => WriteInternal(null, directive); + + private async Task FlushInternalAsync(ZSTD_EndDirective directive, + CancellationToken cancellationToken = default) => + await WriteInternalAsync(null, directive, cancellationToken).ConfigureAwait(false); + + public override void Write(byte[] buffer, int offset, int count) + => Write(new ReadOnlySpan(buffer, offset, count)); + +#if !NETSTANDARD2_0 && !NETFRAMEWORK + public override void Write(ReadOnlySpan buffer) + => WriteInternal(buffer, ZSTD_EndDirective.ZSTD_e_continue); +#else + public void Write(ReadOnlySpan buffer) + => WriteInternal(buffer, ZSTD_EndDirective.ZSTD_e_continue); +#endif + + private void WriteInternal(ReadOnlySpan buffer, ZSTD_EndDirective directive) + { + EnsureNotDisposed(); + + var input = new ZSTD_inBuffer_s {pos = 0, size = buffer != null ? (nuint) buffer.Length : 0}; + nuint remaining; + do + { + output.pos = 0; + remaining = CompressStream(ref input, buffer, directive); + + var written = (int) output.pos; + if (written > 0) + innerStream.Write(outputBuffer, 0, written); + } while (directive == ZSTD_EndDirective.ZSTD_e_continue ? input.pos < input.size : remaining > 0); + } + + private async ValueTask WriteInternalAsync(ReadOnlyMemory? buffer, ZSTD_EndDirective directive, + CancellationToken cancellationToken = default) + { + EnsureNotDisposed(); + + var input = new ZSTD_inBuffer_s { pos = 0, size = buffer.HasValue ? (nuint)buffer.Value.Length : 0 }; + nuint remaining; + do + { + output.pos = 0; + remaining = CompressStream(ref input, buffer.HasValue ? buffer.Value.Span : null, directive); + + var written = (int) output.pos; + if (written > 0) + await innerStream.WriteAsync(outputBuffer, 0, written, cancellationToken).ConfigureAwait(false); + } while (directive == ZSTD_EndDirective.ZSTD_e_continue ? input.pos < input.size : remaining > 0); + } + + public override Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + => WriteAsync(new ReadOnlyMemory(buffer, offset, count), cancellationToken).AsTask(); + +#if !NETSTANDARD2_0 && !NETFRAMEWORK + public override async ValueTask WriteAsync(ReadOnlyMemory buffer, + CancellationToken cancellationToken = default) + => await WriteInternalAsync(buffer, ZSTD_EndDirective.ZSTD_e_continue, cancellationToken).ConfigureAwait(false); +#else + public async ValueTask WriteAsync(ReadOnlyMemory buffer, + CancellationToken cancellationToken = default) + => await WriteInternalAsync(buffer, ZSTD_EndDirective.ZSTD_e_continue, cancellationToken).ConfigureAwait(false); +#endif + + internal unsafe nuint CompressStream(ref ZSTD_inBuffer_s input, ReadOnlySpan inputBuffer, + ZSTD_EndDirective directive) + { + fixed (byte* inputBufferPtr = inputBuffer) + fixed (byte* outputBufferPtr = outputBuffer) + { + input.src = inputBufferPtr; + output.dst = outputBufferPtr; + return compressor.NotNull().CompressStream(ref input, ref output, directive).EnsureZstdSuccess(); + } + } + + public override bool CanRead => false; + public override bool CanSeek => false; + public override bool CanWrite => true; + + public override long Length => throw new NotSupportedException(); + + public override long Position + { + get => throw new NotSupportedException(); + set => throw new NotSupportedException(); + } + + public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException(); + public override void SetLength(long value) => throw new NotSupportedException(); + public override int Read(byte[] buffer, int offset, int count) => throw new NotSupportedException(); + + private void EnsureNotDisposed() + { + if (compressor == null) + throw new ObjectDisposedException(nameof(CompressionStream)); + } + + public void SetPledgedSrcSize(ulong pledgedSrcSize) + { + EnsureNotDisposed(); + compressor.NotNull().SetPledgedSrcSize(pledgedSrcSize); + } + } +} diff --git a/src/SharpCompress/Compressors/ZStandard/Compressor.cs b/src/SharpCompress/Compressors/ZStandard/Compressor.cs new file mode 100644 index 000000000..71cd6c39e --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Compressor.cs @@ -0,0 +1,162 @@ +using System; +using ZstdSharp.Unsafe; + +namespace ZstdSharp +{ + public unsafe class Compressor : IDisposable + { + /// + /// Minimum negative compression level allowed + /// + public static int MinCompressionLevel => Methods.ZSTD_minCLevel(); + + /// + /// Maximum compression level available + /// + public static int MaxCompressionLevel => Methods.ZSTD_maxCLevel(); + + /// + /// Default compression level + /// + /// + public const int DefaultCompressionLevel = 3; + + private int level = DefaultCompressionLevel; + + private readonly SafeCctxHandle handle; + + public int Level + { + get => level; + set + { + if (level != value) + { + level = value; + SetParameter(ZSTD_cParameter.ZSTD_c_compressionLevel, value); + } + } + } + + public void SetParameter(ZSTD_cParameter parameter, int value) + { + using var cctx = handle.Acquire(); + Methods.ZSTD_CCtx_setParameter(cctx, parameter, value).EnsureZstdSuccess(); + } + + public int GetParameter(ZSTD_cParameter parameter) + { + using var cctx = handle.Acquire(); + int value; + Methods.ZSTD_CCtx_getParameter(cctx, parameter, &value).EnsureZstdSuccess(); + return value; + } + + public void LoadDictionary(byte[] dict) + { + var dictReadOnlySpan = new ReadOnlySpan(dict); + LoadDictionary(dictReadOnlySpan); + } + + public void LoadDictionary(ReadOnlySpan dict) + { + using var cctx = handle.Acquire(); + fixed (byte* dictPtr = dict) + Methods.ZSTD_CCtx_loadDictionary(cctx, dictPtr, (nuint)dict.Length).EnsureZstdSuccess(); + } + + public Compressor(int level = DefaultCompressionLevel) + { + handle = SafeCctxHandle.Create(); + Level = level; + } + + public static int GetCompressBound(int length) + => (int)Methods.ZSTD_compressBound((nuint)length); + + public static ulong GetCompressBoundLong(ulong length) + => Methods.ZSTD_compressBound((nuint)length); + + public Span Wrap(ReadOnlySpan src) + { + var dest = new byte[GetCompressBound(src.Length)]; + var length = Wrap(src, dest); + return new Span(dest, 0, length); + } + + public int Wrap(byte[] src, byte[] dest, int offset) + => Wrap(src, new Span(dest, offset, dest.Length - offset)); + + public int Wrap(ReadOnlySpan src, Span dest) + { + fixed (byte* srcPtr = src) + fixed (byte* destPtr = dest) + { + using var cctx = handle.Acquire(); + return (int)Methods.ZSTD_compress2(cctx, destPtr, (nuint)dest.Length, srcPtr, (nuint)src.Length) + .EnsureZstdSuccess(); + } + } + + public int Wrap(ArraySegment src, ArraySegment dest) + => Wrap((ReadOnlySpan)src, dest); + + public int Wrap(byte[] src, int srcOffset, int srcLength, byte[] dst, int dstOffset, int dstLength) + => Wrap(new ReadOnlySpan(src, srcOffset, srcLength), new Span(dst, dstOffset, dstLength)); + + public bool TryWrap(byte[] src, byte[] dest, int offset, out int written) + => TryWrap(src, new Span(dest, offset, dest.Length - offset), out written); + + public bool TryWrap(ReadOnlySpan src, Span dest, out int written) + { + fixed (byte* srcPtr = src) + fixed (byte* destPtr = dest) + { + nuint returnValue; + using (var cctx = handle.Acquire()) + { + returnValue = + Methods.ZSTD_compress2(cctx, destPtr, (nuint)dest.Length, srcPtr, (nuint)src.Length); + } + + if (returnValue == unchecked(0 - (nuint)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)) + { + written = default; + return false; + } + + returnValue.EnsureZstdSuccess(); + written = (int)returnValue; + return true; + } + } + + public bool TryWrap(ArraySegment src, ArraySegment dest, out int written) + => TryWrap((ReadOnlySpan)src, dest, out written); + + public bool TryWrap(byte[] src, int srcOffset, int srcLength, byte[] dst, int dstOffset, int dstLength, out int written) + => TryWrap(new ReadOnlySpan(src, srcOffset, srcLength), new Span(dst, dstOffset, dstLength), out written); + + public void Dispose() + { + handle.Dispose(); + GC.SuppressFinalize(this); + } + + internal nuint CompressStream(ref ZSTD_inBuffer_s input, ref ZSTD_outBuffer_s output, ZSTD_EndDirective directive) + { + fixed (ZSTD_inBuffer_s* inputPtr = &input) + fixed (ZSTD_outBuffer_s* outputPtr = &output) + { + using var cctx = handle.Acquire(); + return Methods.ZSTD_compressStream2(cctx, outputPtr, inputPtr, directive).EnsureZstdSuccess(); + } + } + + public void SetPledgedSrcSize(ulong pledgedSrcSize) + { + using var cctx = handle.Acquire(); + Methods.ZSTD_CCtx_setPledgedSrcSize(cctx, pledgedSrcSize).EnsureZstdSuccess(); + } + } +} diff --git a/src/SharpCompress/Compressors/ZStandard/Constants.cs b/src/SharpCompress/Compressors/ZStandard/Constants.cs new file mode 100644 index 000000000..c7af12313 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Constants.cs @@ -0,0 +1,9 @@ +namespace ZstdSharp +{ + internal class Constants + { + //NOTE: https://docs.microsoft.com/en-us/dotnet/framework/configure-apps/file-schema/runtime/gcallowverylargeobjects-element#remarks + //NOTE: https://github.com/dotnet/runtime/blob/v5.0.0-rtm.20519.4/src/libraries/System.Private.CoreLib/src/System/Array.cs#L27 + public const ulong MaxByteArrayLength = 0x7FFFFFC7; + } +} diff --git a/src/SharpCompress/Compressors/ZStandard/DecompressionStream.cs b/src/SharpCompress/Compressors/ZStandard/DecompressionStream.cs new file mode 100644 index 000000000..28dc88447 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/DecompressionStream.cs @@ -0,0 +1,258 @@ +using System; +using System.Buffers; +using System.IO; +using System.Threading; +using System.Threading.Tasks; +using ZstdSharp.Unsafe; + +namespace ZstdSharp +{ + public class DecompressionStream : Stream + { + private readonly Stream innerStream; + private readonly byte[] inputBuffer; + private readonly int inputBufferSize; + private readonly bool preserveDecompressor; + private readonly bool leaveOpen; + private readonly bool checkEndOfStream; + private Decompressor? decompressor; + private ZSTD_inBuffer_s input; + private nuint lastDecompressResult = 0; + private bool contextDrained = true; + + public DecompressionStream(Stream stream, int bufferSize = 0, bool checkEndOfStream = true, bool leaveOpen = true) + : this(stream, new Decompressor(), bufferSize, checkEndOfStream, false, leaveOpen) + { + } + + public DecompressionStream(Stream stream, Decompressor decompressor, int bufferSize = 0, + bool checkEndOfStream = true, bool preserveDecompressor = true, bool leaveOpen = true) + { + if (stream == null) + throw new ArgumentNullException(nameof(stream)); + + if (!stream.CanRead) + throw new ArgumentException("Stream is not readable", nameof(stream)); + + if (bufferSize < 0) + throw new ArgumentOutOfRangeException(nameof(bufferSize)); + + innerStream = stream; + this.decompressor = decompressor; + this.preserveDecompressor = preserveDecompressor; + this.leaveOpen = leaveOpen; + this.checkEndOfStream = checkEndOfStream; + + inputBufferSize = bufferSize > 0 ? bufferSize : (int) Methods.ZSTD_DStreamInSize().EnsureZstdSuccess(); + inputBuffer = ArrayPool.Shared.Rent(inputBufferSize); + input = new ZSTD_inBuffer_s {pos = (nuint) inputBufferSize, size = (nuint) inputBufferSize}; + } + + public void SetParameter(ZSTD_dParameter parameter, int value) + { + EnsureNotDisposed(); + decompressor.NotNull().SetParameter(parameter, value); + } + + public int GetParameter(ZSTD_dParameter parameter) + { + EnsureNotDisposed(); + return decompressor.NotNull().GetParameter(parameter); + } + + public void LoadDictionary(byte[] dict) + { + EnsureNotDisposed(); + decompressor.NotNull().LoadDictionary(dict); + } + + ~DecompressionStream() => Dispose(false); + + protected override void Dispose(bool disposing) + { + if (decompressor == null) + return; + + if (!preserveDecompressor) + { + decompressor.Dispose(); + } + decompressor = null; + + if (inputBuffer != null) + { + ArrayPool.Shared.Return(inputBuffer); + } + + if (!leaveOpen) + { + innerStream.Dispose(); + } + } + + public override int Read(byte[] buffer, int offset, int count) + => Read(new Span(buffer, offset, count)); + +#if !NETSTANDARD2_0 && !NETFRAMEWORK + public override int Read(Span buffer) +#else + public int Read(Span buffer) +#endif + { + EnsureNotDisposed(); + + // Guard against infinite loop (output.pos would never become non-zero) + if (buffer.Length == 0) + { + return 0; + } + + var output = new ZSTD_outBuffer_s {pos = 0, size = (nuint) buffer.Length}; + while (true) + { + // If there is still input available, or there might be data buffered in the decompressor context, flush that out + while (input.pos < input.size || !contextDrained) + { + nuint oldInputPos = input.pos; + nuint result = DecompressStream(ref output, buffer); + if (output.pos > 0 || oldInputPos != input.pos) + { + // Keep result from last decompress call that made some progress, so we known if we're at end of frame + lastDecompressResult = result; + } + // If decompression filled the output buffer, there might still be data buffered in the decompressor context + contextDrained = output.pos < output.size; + // If we have data to return, return it immediately, so we won't stall on Read + if (output.pos > 0) + { + return (int) output.pos; + } + } + + // Otherwise, read some more input + int bytesRead; + if ((bytesRead = innerStream.Read(inputBuffer, 0, inputBufferSize)) == 0) + { + if (checkEndOfStream && lastDecompressResult != 0) + { + throw new EndOfStreamException("Premature end of stream"); + } + + return 0; + } + + input.size = (nuint) bytesRead; + input.pos = 0; + } + } + + public override Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + => ReadAsync(new Memory(buffer, offset, count), cancellationToken).AsTask(); + +#if !NETSTANDARD2_0 && !NETFRAMEWORK + public override async ValueTask ReadAsync(Memory buffer, + CancellationToken cancellationToken = default) +#else + public async ValueTask ReadAsync(Memory buffer, + CancellationToken cancellationToken = default) +#endif + { + EnsureNotDisposed(); + + // Guard against infinite loop (output.pos would never become non-zero) + if (buffer.Length == 0) + { + return 0; + } + + var output = new ZSTD_outBuffer_s { pos = 0, size = (nuint)buffer.Length}; + while (true) + { + // If there is still input available, or there might be data buffered in the decompressor context, flush that out + while (input.pos < input.size || !contextDrained) + { + nuint oldInputPos = input.pos; + nuint result = DecompressStream(ref output, buffer.Span); + if (output.pos > 0 || oldInputPos != input.pos) + { + // Keep result from last decompress call that made some progress, so we known if we're at end of frame + lastDecompressResult = result; + } + // If decompression filled the output buffer, there might still be data buffered in the decompressor context + contextDrained = output.pos < output.size; + // If we have data to return, return it immediately, so we won't stall on Read + if (output.pos > 0) + { + return (int)output.pos; + } + } + + // Otherwise, read some more input + int bytesRead; + if ((bytesRead = await innerStream.ReadAsync(inputBuffer, 0, inputBufferSize, cancellationToken) + .ConfigureAwait(false)) == 0) + { + if (checkEndOfStream && lastDecompressResult != 0) + { + throw new EndOfStreamException("Premature end of stream"); + } + + return 0; + } + + input.size = (nuint) bytesRead; + input.pos = 0; + } + } + + private unsafe nuint DecompressStream(ref ZSTD_outBuffer_s output, Span outputBuffer) + { + fixed (byte* inputBufferPtr = inputBuffer) + fixed (byte* outputBufferPtr = outputBuffer) + { + input.src = inputBufferPtr; + output.dst = outputBufferPtr; + return decompressor.NotNull().DecompressStream(ref input, ref output); + } + } + + public override bool CanRead => true; + public override bool CanSeek => false; + public override bool CanWrite => false; + + public override long Length => throw new NotSupportedException(); + + public override long Position + { + get => throw new NotSupportedException(); + set => throw new NotSupportedException(); + } + + public override void Flush() => throw new NotSupportedException(); + + public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException(); + public override void SetLength(long value) => throw new NotSupportedException(); + public override void Write(byte[] buffer, int offset, int count) => throw new NotSupportedException(); + + private void EnsureNotDisposed() + { + if (decompressor == null) + throw new ObjectDisposedException(nameof(DecompressionStream)); + } + +#if NETSTANDARD2_0 || NETFRAMEWORK + public virtual ValueTask DisposeAsync() + { + try + { + Dispose(); + return default; + } + catch (Exception exc) + { + return new ValueTask(Task.FromException(exc)); + } + } +#endif + } +} diff --git a/src/SharpCompress/Compressors/ZStandard/Decompressor.cs b/src/SharpCompress/Compressors/ZStandard/Decompressor.cs new file mode 100644 index 000000000..85b99e778 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Decompressor.cs @@ -0,0 +1,133 @@ +using System; +using ZstdSharp.Unsafe; + +namespace ZstdSharp +{ + public unsafe class Decompressor : IDisposable + { + private readonly SafeDctxHandle handle; + + public Decompressor() + { + handle = SafeDctxHandle.Create(); + } + + public void SetParameter(ZSTD_dParameter parameter, int value) + { + using var dctx = handle.Acquire(); + Methods.ZSTD_DCtx_setParameter(dctx, parameter, value).EnsureZstdSuccess(); + } + + public int GetParameter(ZSTD_dParameter parameter) + { + using var dctx = handle.Acquire(); + int value; + Methods.ZSTD_DCtx_getParameter(dctx, parameter, &value).EnsureZstdSuccess(); + return value; + } + + public void LoadDictionary(byte[] dict) + { + var dictReadOnlySpan = new ReadOnlySpan(dict); + this.LoadDictionary(dictReadOnlySpan); + } + + public void LoadDictionary(ReadOnlySpan dict) + { + using var dctx = handle.Acquire(); + fixed (byte* dictPtr = dict) + Methods.ZSTD_DCtx_loadDictionary(dctx, dictPtr, (nuint)dict.Length).EnsureZstdSuccess(); + } + + public static ulong GetDecompressedSize(ReadOnlySpan src) + { + fixed (byte* srcPtr = src) + return Methods.ZSTD_decompressBound(srcPtr, (nuint)src.Length).EnsureContentSizeOk(); + } + + public static ulong GetDecompressedSize(ArraySegment src) + => GetDecompressedSize((ReadOnlySpan)src); + + public static ulong GetDecompressedSize(byte[] src, int srcOffset, int srcLength) + => GetDecompressedSize(new ReadOnlySpan(src, srcOffset, srcLength)); + + public Span Unwrap(ReadOnlySpan src, int maxDecompressedSize = int.MaxValue) + { + var expectedDstSize = GetDecompressedSize(src); + if (expectedDstSize > (ulong)maxDecompressedSize) + throw new ZstdException(ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall, + $"Decompressed content size {expectedDstSize} is greater than {nameof(maxDecompressedSize)} {maxDecompressedSize}"); + if (expectedDstSize > Constants.MaxByteArrayLength) + throw new ZstdException(ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall, + $"Decompressed content size {expectedDstSize} is greater than max possible byte array size {Constants.MaxByteArrayLength}"); + + var dest = new byte[expectedDstSize]; + var length = Unwrap(src, dest); + return new Span(dest, 0, length); + } + + public int Unwrap(byte[] src, byte[] dest, int offset) + => Unwrap(src, new Span(dest, offset, dest.Length - offset)); + + public int Unwrap(ReadOnlySpan src, Span dest) + { + fixed (byte* srcPtr = src) + fixed (byte* destPtr = dest) + { + using var dctx = handle.Acquire(); + return (int)Methods + .ZSTD_decompressDCtx(dctx, destPtr, (nuint)dest.Length, srcPtr, (nuint)src.Length) + .EnsureZstdSuccess(); + } + } + + public int Unwrap(byte[] src, int srcOffset, int srcLength, byte[] dst, int dstOffset, int dstLength) + => Unwrap(new ReadOnlySpan(src, srcOffset, srcLength), new Span(dst, dstOffset, dstLength)); + + public bool TryUnwrap(byte[] src, byte[] dest, int offset, out int written) + => TryUnwrap(src, new Span(dest, offset, dest.Length - offset), out written); + + public bool TryUnwrap(ReadOnlySpan src, Span dest, out int written) + { + fixed (byte* srcPtr = src) + fixed (byte* destPtr = dest) + { + nuint returnValue; + using (var dctx = handle.Acquire()) + { + returnValue = + Methods.ZSTD_decompressDCtx(dctx, destPtr, (nuint)dest.Length, srcPtr, (nuint)src.Length); + } + + if (returnValue == unchecked(0 - (nuint)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)) + { + written = default; + return false; + } + + returnValue.EnsureZstdSuccess(); + written = (int)returnValue; + return true; + } + } + + public bool TryUnwrap(byte[] src, int srcOffset, int srcLength, byte[] dst, int dstOffset, int dstLength, out int written) + => TryUnwrap(new ReadOnlySpan(src, srcOffset, srcLength), new Span(dst, dstOffset, dstLength), out written); + + public void Dispose() + { + handle.Dispose(); + GC.SuppressFinalize(this); + } + + internal nuint DecompressStream(ref ZSTD_inBuffer_s input, ref ZSTD_outBuffer_s output) + { + fixed (ZSTD_inBuffer_s* inputPtr = &input) + fixed (ZSTD_outBuffer_s* outputPtr = &output) + { + using var dctx = handle.Acquire(); + return Methods.ZSTD_decompressStream(dctx, outputPtr, inputPtr).EnsureZstdSuccess(); + } + } + } +} diff --git a/src/SharpCompress/Compressors/ZStandard/SafeHandles.cs b/src/SharpCompress/Compressors/ZStandard/SafeHandles.cs new file mode 100644 index 000000000..b470a4347 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/SafeHandles.cs @@ -0,0 +1,168 @@ +using System; +using System.Runtime.InteropServices; +using ZstdSharp.Unsafe; + +namespace ZstdSharp +{ + /// + /// Provides the base class for ZstdSharp implementations. + /// + /// + /// Even though ZstdSharp is a managed library, its internals are using unmanaged + /// memory and we are using safe handles in the library's high-level API to ensure + /// proper disposal of unmanaged resources and increase safety. + /// + /// + /// + internal abstract unsafe class SafeZstdHandle : SafeHandle + { + /// + /// Parameterless constructor is hidden. Use the static Create factory + /// method to create a new safe handle instance. + /// + protected SafeZstdHandle() : base(IntPtr.Zero, true) + { + } + + public sealed override bool IsInvalid => handle == IntPtr.Zero; + } + + /// + /// Safely wraps an unmanaged Zstd compression context. + /// + internal sealed unsafe class SafeCctxHandle : SafeZstdHandle + { + /// + private SafeCctxHandle() + { + } + + /// + /// Creates a new instance of . + /// + /// + /// Creation failed. + public static SafeCctxHandle Create() + { + var safeHandle = new SafeCctxHandle(); + bool success = false; + try + { + var cctx = Methods.ZSTD_createCCtx(); + if (cctx == null) + throw new ZstdException(ZSTD_ErrorCode.ZSTD_error_GENERIC, "Failed to create cctx"); + safeHandle.SetHandle((IntPtr)cctx); + success = true; + } + finally + { + if (!success) + { + safeHandle.SetHandleAsInvalid(); + } + } + return safeHandle; + } + + /// + /// Acquires a reference to the safe handle. + /// + /// + /// A instance that can be implicitly converted to a pointer + /// to . + /// + public SafeHandleHolder Acquire() => new(this); + + protected override bool ReleaseHandle() + { + return Methods.ZSTD_freeCCtx((ZSTD_CCtx_s*)handle) == 0; + } + } + + /// + /// Safely wraps an unmanaged Zstd compression context. + /// + internal sealed unsafe class SafeDctxHandle : SafeZstdHandle + { + /// + private SafeDctxHandle() + { + } + + /// + /// Creates a new instance of . + /// + /// + /// Creation failed. + public static SafeDctxHandle Create() + { + var safeHandle = new SafeDctxHandle(); + bool success = false; + try + { + var dctx = Methods.ZSTD_createDCtx(); + if (dctx == null) + throw new ZstdException(ZSTD_ErrorCode.ZSTD_error_GENERIC, "Failed to create dctx"); + safeHandle.SetHandle((IntPtr)dctx); + success = true; + } + finally + { + if (!success) + { + safeHandle.SetHandleAsInvalid(); + } + } + return safeHandle; + } + + /// + /// Acquires a reference to the safe handle. + /// + /// + /// A instance that can be implicitly converted to a pointer + /// to . + /// + public SafeHandleHolder Acquire() => new(this); + + protected override bool ReleaseHandle() + { + return Methods.ZSTD_freeDCtx((ZSTD_DCtx_s*)handle) == 0; + } + } + + /// + /// Provides a convenient interface to safely acquire pointers of a specific type + /// from a , by utilizing blocks. + /// + /// The type of pointers to return. + /// + /// Safe handle holders can be d to decrement the safe handle's + /// reference count, and can be implicitly converted to pointers to . + /// + internal unsafe ref struct SafeHandleHolder where T : unmanaged + { + private readonly SafeHandle _handle; + + private bool _refAdded; + + public SafeHandleHolder(SafeHandle safeHandle) + { + _handle = safeHandle; + _refAdded = false; + safeHandle.DangerousAddRef(ref _refAdded); + } + + public static implicit operator T*(SafeHandleHolder holder) => + (T*)holder._handle.DangerousGetHandle(); + + public void Dispose() + { + if (_refAdded) + { + _handle.DangerousRelease(); + _refAdded = false; + } + } + } +} diff --git a/src/SharpCompress/Compressors/ZStandard/ThrowHelper.cs b/src/SharpCompress/Compressors/ZStandard/ThrowHelper.cs new file mode 100644 index 000000000..380e6acaf --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/ThrowHelper.cs @@ -0,0 +1,43 @@ +using ZstdSharp.Unsafe; + +namespace ZstdSharp +{ + public static unsafe class ThrowHelper + { + private const ulong ZSTD_CONTENTSIZE_UNKNOWN = unchecked(0UL - 1); + private const ulong ZSTD_CONTENTSIZE_ERROR = unchecked(0UL - 2); + + public static nuint EnsureZstdSuccess(this nuint returnValue) + { + if (Methods.ZSTD_isError(returnValue)) + ThrowException(returnValue, Methods.ZSTD_getErrorName(returnValue)); + + return returnValue; + } + + public static nuint EnsureZdictSuccess(this nuint returnValue) + { + if (Methods.ZDICT_isError(returnValue)) + ThrowException(returnValue, Methods.ZDICT_getErrorName(returnValue)); + + return returnValue; + } + + public static ulong EnsureContentSizeOk(this ulong returnValue) + { + if (returnValue == ZSTD_CONTENTSIZE_UNKNOWN) + throw new ZstdException(ZSTD_ErrorCode.ZSTD_error_GENERIC, "Decompressed content size is not specified"); + + if (returnValue == ZSTD_CONTENTSIZE_ERROR) + throw new ZstdException(ZSTD_ErrorCode.ZSTD_error_GENERIC, "Decompressed content size cannot be determined (e.g. invalid magic number, srcSize too small)"); + + return returnValue; + } + + private static void ThrowException(nuint returnValue, string message) + { + var code = 0 - returnValue; + throw new ZstdException((ZSTD_ErrorCode) code, message); + } + } +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Allocations.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Allocations.cs new file mode 100644 index 000000000..ea59312ff --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Allocations.cs @@ -0,0 +1,44 @@ +using System.Runtime.CompilerServices; +using static ZstdSharp.UnsafeHelper; + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + /* custom memory allocation functions */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void* ZSTD_customMalloc(nuint size, ZSTD_customMem customMem) + { + if (customMem.customAlloc != null) + return ((delegate* managed)customMem.customAlloc)(customMem.opaque, size); + return malloc(size); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void* ZSTD_customCalloc(nuint size, ZSTD_customMem customMem) + { + if (customMem.customAlloc != null) + { + /* calloc implemented as malloc+memset; + * not as efficient as calloc, but next best guess for custom malloc */ + void* ptr = ((delegate* managed)customMem.customAlloc)(customMem.opaque, size); + memset(ptr, 0, (uint)size); + return ptr; + } + + return calloc(1, size); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_customFree(void* ptr, ZSTD_customMem customMem) + { + if (ptr != null) + { + if (customMem.customFree != null) + ((delegate* managed)customMem.customFree)(customMem.opaque, ptr); + else + free(ptr); + } + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_CStream_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_CStream_t.cs new file mode 100644 index 000000000..ee8d40ba7 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_CStream_t.cs @@ -0,0 +1,15 @@ +namespace ZstdSharp.Unsafe +{ + /* bitStream can mix input from multiple sources. + * A critical property of these streams is that they encode and decode in **reverse** direction. + * So the first bit sequence you add will be the last to be read, like a LIFO stack. + */ + public unsafe struct BIT_CStream_t + { + public nuint bitContainer; + public uint bitPos; + public sbyte* startPtr; + public sbyte* ptr; + public sbyte* endPtr; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_DStream_status.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_DStream_status.cs new file mode 100644 index 000000000..3fbf57137 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_DStream_status.cs @@ -0,0 +1,14 @@ +namespace ZstdSharp.Unsafe +{ + public enum BIT_DStream_status + { + /* fully refilled */ + BIT_DStream_unfinished = 0, + /* still some bits left in bitstream */ + BIT_DStream_endOfBuffer = 1, + /* bitstream entirely consumed, bit-exact */ + BIT_DStream_completed = 2, + /* user requested more bits than present in bitstream */ + BIT_DStream_overflow = 3 + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_DStream_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_DStream_t.cs new file mode 100644 index 000000000..f47bc40b2 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_DStream_t.cs @@ -0,0 +1,14 @@ +namespace ZstdSharp.Unsafe +{ + /*-******************************************** + * bitStream decoding API (read backward) + **********************************************/ + public unsafe struct BIT_DStream_t + { + public nuint bitContainer; + public uint bitsConsumed; + public sbyte* ptr; + public sbyte* start; + public sbyte* limitPtr; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Bits.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Bits.cs new file mode 100644 index 000000000..94b6010c0 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Bits.cs @@ -0,0 +1,57 @@ +using System.Runtime.CompilerServices; +using static ZstdSharp.UnsafeHelper; +using System; +using System.Numerics; + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_countTrailingZeros32(uint val) + { + assert(val != 0); + return (uint)BitOperations.TrailingZeroCount(val); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_countLeadingZeros32(uint val) + { + assert(val != 0); + return (uint)BitOperations.LeadingZeroCount(val); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_countTrailingZeros64(ulong val) + { + assert(val != 0); + return (uint)BitOperations.TrailingZeroCount(val); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_countLeadingZeros64(ulong val) + { + assert(val != 0); + return (uint)BitOperations.LeadingZeroCount(val); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_NbCommonBytes(nuint val) + { + assert(val != 0); + if (BitConverter.IsLittleEndian) + { + return MEM_64bits ? (uint)BitOperations.TrailingZeroCount(val) >> 3 : (uint)BitOperations.TrailingZeroCount((uint)val) >> 3; + } + + return MEM_64bits ? (uint)BitOperations.LeadingZeroCount(val) >> 3 : (uint)BitOperations.LeadingZeroCount((uint)val) >> 3; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_highbit32(uint val) + { + assert(val != 0); + return (uint)BitOperations.Log2(val); + } + } +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Bitstream.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Bitstream.cs new file mode 100644 index 000000000..068e9fb1a --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Bitstream.cs @@ -0,0 +1,619 @@ +using static ZstdSharp.UnsafeHelper; +using System; +using System.Runtime.InteropServices; +using System.Runtime.CompilerServices; +#if NETCOREAPP3_0_OR_GREATER +using System.Runtime.Intrinsics.X86; +#endif + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { +#if NET7_0_OR_GREATER + private static ReadOnlySpan Span_BIT_mask => new uint[32] + { + 0, + 1, + 3, + 7, + 0xF, + 0x1F, + 0x3F, + 0x7F, + 0xFF, + 0x1FF, + 0x3FF, + 0x7FF, + 0xFFF, + 0x1FFF, + 0x3FFF, + 0x7FFF, + 0xFFFF, + 0x1FFFF, + 0x3FFFF, + 0x7FFFF, + 0xFFFFF, + 0x1FFFFF, + 0x3FFFFF, + 0x7FFFFF, + 0xFFFFFF, + 0x1FFFFFF, + 0x3FFFFFF, + 0x7FFFFFF, + 0xFFFFFFF, + 0x1FFFFFFF, + 0x3FFFFFFF, + 0x7FFFFFFF + }; + private static uint* BIT_mask => (uint*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_BIT_mask)); +#else + + private static readonly uint* BIT_mask = GetArrayPointer(new uint[32] { 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0x1FFFF, 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF, 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF, 0x7FFFFFF, 0xFFFFFFF, 0x1FFFFFFF, 0x3FFFFFFF, 0x7FFFFFFF }); +#endif + /*-************************************************************** + * bitStream encoding + ****************************************************************/ + /*! BIT_initCStream() : + * `dstCapacity` must be > sizeof(size_t) + * @return : 0 if success, + * otherwise an error code (can be tested using ERR_isError()) */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint BIT_initCStream(ref BIT_CStream_t bitC, void* startPtr, nuint dstCapacity) + { + bitC.bitContainer = 0; + bitC.bitPos = 0; + bitC.startPtr = (sbyte*)startPtr; + bitC.ptr = bitC.startPtr; + bitC.endPtr = bitC.startPtr + dstCapacity - sizeof(nuint); + if (dstCapacity <= (nuint)sizeof(nuint)) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + return 0; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint BIT_getLowerBits(nuint bitContainer, uint nbBits) + { + assert(nbBits < sizeof(uint) * 32 / sizeof(uint)); +#if NETCOREAPP3_1_OR_GREATER + if (Bmi2.X64.IsSupported) + { + return (nuint)Bmi2.X64.ZeroHighBits(bitContainer, nbBits); + } + + if (Bmi2.IsSupported) + { + return Bmi2.ZeroHighBits((uint)bitContainer, nbBits); + } +#endif + + return bitContainer & BIT_mask[nbBits]; + } + + /*! BIT_addBits() : + * can add up to 31 bits into `bitC`. + * Note : does not check for register overflow ! */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void BIT_addBits(ref nuint bitC_bitContainer, ref uint bitC_bitPos, nuint value, uint nbBits) + { + assert(nbBits < sizeof(uint) * 32 / sizeof(uint)); + assert(nbBits + bitC_bitPos < (uint)(sizeof(nuint) * 8)); + bitC_bitContainer |= BIT_getLowerBits(value, nbBits) << (int)bitC_bitPos; + bitC_bitPos += nbBits; + } + + /*! BIT_addBitsFast() : + * works only if `value` is _clean_, + * meaning all high bits above nbBits are 0 */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void BIT_addBitsFast(ref nuint bitC_bitContainer, ref uint bitC_bitPos, nuint value, uint nbBits) + { + assert(value >> (int)nbBits == 0); + assert(nbBits + bitC_bitPos < (uint)(sizeof(nuint) * 8)); + bitC_bitContainer |= value << (int)bitC_bitPos; + bitC_bitPos += nbBits; + } + + /*! BIT_flushBitsFast() : + * assumption : bitContainer has not overflowed + * unsafe version; does not check buffer overflow */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void BIT_flushBitsFast(ref nuint bitC_bitContainer, ref uint bitC_bitPos, ref sbyte* bitC_ptr, sbyte* bitC_endPtr) + { + nuint nbBytes = bitC_bitPos >> 3; + assert(bitC_bitPos < (uint)(sizeof(nuint) * 8)); + assert(bitC_ptr <= bitC_endPtr); + MEM_writeLEST(bitC_ptr, bitC_bitContainer); + bitC_ptr += nbBytes; + bitC_bitPos &= 7; + bitC_bitContainer >>= (int)(nbBytes * 8); + } + + /*! BIT_flushBits() : + * assumption : bitContainer has not overflowed + * safe version; check for buffer overflow, and prevents it. + * note : does not signal buffer overflow. + * overflow will be revealed later on using BIT_closeCStream() */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void BIT_flushBits(ref nuint bitC_bitContainer, ref uint bitC_bitPos, ref sbyte* bitC_ptr, sbyte* bitC_endPtr) + { + nuint nbBytes = bitC_bitPos >> 3; + assert(bitC_bitPos < (uint)(sizeof(nuint) * 8)); + assert(bitC_ptr <= bitC_endPtr); + MEM_writeLEST(bitC_ptr, bitC_bitContainer); + bitC_ptr += nbBytes; + if (bitC_ptr > bitC_endPtr) + bitC_ptr = bitC_endPtr; + bitC_bitPos &= 7; + bitC_bitContainer >>= (int)(nbBytes * 8); + } + + /*! BIT_closeCStream() : + * @return : size of CStream, in bytes, + * or 0 if it could not fit into dstBuffer */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint BIT_closeCStream(ref nuint bitC_bitContainer, ref uint bitC_bitPos, sbyte* bitC_ptr, sbyte* bitC_endPtr, sbyte* bitC_startPtr) + { + BIT_addBitsFast(ref bitC_bitContainer, ref bitC_bitPos, 1, 1); + BIT_flushBits(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr); + if (bitC_ptr >= bitC_endPtr) + return 0; + return (nuint)(bitC_ptr - bitC_startPtr) + (nuint)(bitC_bitPos > 0 ? 1 : 0); + } + + /*-******************************************************** + * bitStream decoding + **********************************************************/ + /*! BIT_initDStream() : + * Initialize a BIT_DStream_t. + * `bitD` : a pointer to an already allocated BIT_DStream_t structure. + * `srcSize` must be the *exact* size of the bitStream, in bytes. + * @return : size of stream (== srcSize), or an errorCode if a problem is detected + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint BIT_initDStream(BIT_DStream_t* bitD, void* srcBuffer, nuint srcSize) + { + if (srcSize < 1) + { + *bitD = new BIT_DStream_t(); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + bitD->start = (sbyte*)srcBuffer; + bitD->limitPtr = bitD->start + sizeof(nuint); + if (srcSize >= (nuint)sizeof(nuint)) + { + bitD->ptr = (sbyte*)srcBuffer + srcSize - sizeof(nuint); + bitD->bitContainer = MEM_readLEST(bitD->ptr); + { + byte lastByte = ((byte*)srcBuffer)[srcSize - 1]; + bitD->bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0; + if (lastByte == 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + } + } + else + { + bitD->ptr = bitD->start; + bitD->bitContainer = *(byte*)bitD->start; + switch (srcSize) + { + case 7: + bitD->bitContainer += (nuint)((byte*)srcBuffer)[6] << sizeof(nuint) * 8 - 16; + goto case 6; + case 6: + bitD->bitContainer += (nuint)((byte*)srcBuffer)[5] << sizeof(nuint) * 8 - 24; + goto case 5; + case 5: + bitD->bitContainer += (nuint)((byte*)srcBuffer)[4] << sizeof(nuint) * 8 - 32; + goto case 4; + case 4: + bitD->bitContainer += (nuint)((byte*)srcBuffer)[3] << 24; + goto case 3; + case 3: + bitD->bitContainer += (nuint)((byte*)srcBuffer)[2] << 16; + goto case 2; + case 2: + bitD->bitContainer += (nuint)((byte*)srcBuffer)[1] << 8; + goto default; + default: + break; + } + + { + byte lastByte = ((byte*)srcBuffer)[srcSize - 1]; + bitD->bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0; + if (lastByte == 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + bitD->bitsConsumed += (uint)((nuint)sizeof(nuint) - srcSize) * 8; + } + + return srcSize; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint BIT_getUpperBits(nuint bitContainer, uint start) + { + return bitContainer >> (int)start; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint BIT_getMiddleBits(nuint bitContainer, uint start, uint nbBits) + { + uint regMask = (uint)(sizeof(nuint) * 8 - 1); + assert(nbBits < sizeof(uint) * 32 / sizeof(uint)); +#if NETCOREAPP3_1_OR_GREATER + if (Bmi2.X64.IsSupported) + { + return (nuint)Bmi2.X64.ZeroHighBits(bitContainer >> (int)(start & regMask), nbBits); + } + + if (Bmi2.IsSupported) + { + return Bmi2.ZeroHighBits((uint)(bitContainer >> (int)(start & regMask)), nbBits); + } +#endif + + return (nuint)(bitContainer >> (int)(start & regMask) & ((ulong)1 << (int)nbBits) - 1); + } + + /*! BIT_lookBits() : + * Provides next n bits from local register. + * local register is not modified. + * On 32-bits, maxNbBits==24. + * On 64-bits, maxNbBits==56. + * @return : value extracted */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint BIT_lookBits(BIT_DStream_t* bitD, uint nbBits) + { + return BIT_getMiddleBits(bitD->bitContainer, (uint)(sizeof(nuint) * 8) - bitD->bitsConsumed - nbBits, nbBits); + } + + /*! BIT_lookBitsFast() : + * unsafe version; only works if nbBits >= 1 */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint BIT_lookBitsFast(BIT_DStream_t* bitD, uint nbBits) + { + uint regMask = (uint)(sizeof(nuint) * 8 - 1); + assert(nbBits >= 1); + return bitD->bitContainer << (int)(bitD->bitsConsumed & regMask) >> (int)(regMask + 1 - nbBits & regMask); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void BIT_skipBits(BIT_DStream_t* bitD, uint nbBits) + { + bitD->bitsConsumed += nbBits; + } + + /*! BIT_readBits() : + * Read (consume) next n bits from local register and update. + * Pay attention to not read more than nbBits contained into local register. + * @return : extracted value. */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint BIT_readBits(BIT_DStream_t* bitD, uint nbBits) + { + nuint value = BIT_lookBits(bitD, nbBits); + BIT_skipBits(bitD, nbBits); + return value; + } + + /*! BIT_readBitsFast() : + * unsafe version; only works if nbBits >= 1 */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint BIT_readBitsFast(BIT_DStream_t* bitD, uint nbBits) + { + nuint value = BIT_lookBitsFast(bitD, nbBits); + assert(nbBits >= 1); + BIT_skipBits(bitD, nbBits); + return value; + } + + /*! BIT_reloadDStream_internal() : + * Simple variant of BIT_reloadDStream(), with two conditions: + * 1. bitstream is valid : bitsConsumed <= sizeof(bitD->bitContainer)*8 + * 2. look window is valid after shifted down : bitD->ptr >= bitD->start + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static BIT_DStream_status BIT_reloadDStream_internal(BIT_DStream_t* bitD) + { + assert(bitD->bitsConsumed <= (uint)(sizeof(nuint) * 8)); + bitD->ptr -= bitD->bitsConsumed >> 3; + assert(bitD->ptr >= bitD->start); + bitD->bitsConsumed &= 7; + bitD->bitContainer = MEM_readLEST(bitD->ptr); + return BIT_DStream_status.BIT_DStream_unfinished; + } + + /*! BIT_reloadDStreamFast() : + * Similar to BIT_reloadDStream(), but with two differences: + * 1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold! + * 2. Returns BIT_DStream_overflow when bitD->ptr < bitD->limitPtr, at this + * point you must use BIT_reloadDStream() to reload. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD) + { + if (bitD->ptr < bitD->limitPtr) + return BIT_DStream_status.BIT_DStream_overflow; + return BIT_reloadDStream_internal(bitD); + } + +#if NET7_0_OR_GREATER + private static ReadOnlySpan Span_static_zeroFilled => new byte[] + { + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + }; + private static nuint* static_zeroFilled => (nuint*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_static_zeroFilled)); +#else + + private static readonly nuint* static_zeroFilled = (nuint*)GetArrayPointer(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 }); +#endif + /*! BIT_reloadDStream() : + * Refill `bitD` from buffer previously set in BIT_initDStream() . + * This function is safe, it guarantees it will not never beyond src buffer. + * @return : status of `BIT_DStream_t` internal register. + * when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD) + { + if (bitD->bitsConsumed > (uint)(sizeof(nuint) * 8)) + { + const nuint zeroFilled = 0; + bitD->ptr = (sbyte*)&static_zeroFilled[0]; + return BIT_DStream_status.BIT_DStream_overflow; + } + + assert(bitD->ptr >= bitD->start); + if (bitD->ptr >= bitD->limitPtr) + { + return BIT_reloadDStream_internal(bitD); + } + + if (bitD->ptr == bitD->start) + { + if (bitD->bitsConsumed < (uint)(sizeof(nuint) * 8)) + return BIT_DStream_status.BIT_DStream_endOfBuffer; + return BIT_DStream_status.BIT_DStream_completed; + } + + { + uint nbBytes = bitD->bitsConsumed >> 3; + BIT_DStream_status result = BIT_DStream_status.BIT_DStream_unfinished; + if (bitD->ptr - nbBytes < bitD->start) + { + nbBytes = (uint)(bitD->ptr - bitD->start); + result = BIT_DStream_status.BIT_DStream_endOfBuffer; + } + + bitD->ptr -= nbBytes; + bitD->bitsConsumed -= nbBytes * 8; + bitD->bitContainer = MEM_readLEST(bitD->ptr); + return result; + } + } + + /*! BIT_endOfDStream() : + * @return : 1 if DStream has _exactly_ reached its end (all bits consumed). + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint BIT_endOfDStream(BIT_DStream_t* DStream) + { + return DStream->ptr == DStream->start && DStream->bitsConsumed == (uint)(sizeof(nuint) * 8) ? 1U : 0U; + } + + /*-******************************************************** + * bitStream decoding + **********************************************************/ + /*! BIT_initDStream() : + * Initialize a BIT_DStream_t. + * `bitD` : a pointer to an already allocated BIT_DStream_t structure. + * `srcSize` must be the *exact* size of the bitStream, in bytes. + * @return : size of stream (== srcSize), or an errorCode if a problem is detected + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint BIT_initDStream(ref BIT_DStream_t bitD, void* srcBuffer, nuint srcSize) + { + if (srcSize < 1) + { + bitD = new BIT_DStream_t(); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + bitD.start = (sbyte*)srcBuffer; + bitD.limitPtr = bitD.start + sizeof(nuint); + if (srcSize >= (nuint)sizeof(nuint)) + { + bitD.ptr = (sbyte*)srcBuffer + srcSize - sizeof(nuint); + bitD.bitContainer = MEM_readLEST(bitD.ptr); + { + byte lastByte = ((byte*)srcBuffer)[srcSize - 1]; + bitD.bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0; + if (lastByte == 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + } + } + else + { + bitD.ptr = bitD.start; + bitD.bitContainer = *(byte*)bitD.start; + switch (srcSize) + { + case 7: + bitD.bitContainer += (nuint)((byte*)srcBuffer)[6] << sizeof(nuint) * 8 - 16; + goto case 6; + case 6: + bitD.bitContainer += (nuint)((byte*)srcBuffer)[5] << sizeof(nuint) * 8 - 24; + goto case 5; + case 5: + bitD.bitContainer += (nuint)((byte*)srcBuffer)[4] << sizeof(nuint) * 8 - 32; + goto case 4; + case 4: + bitD.bitContainer += (nuint)((byte*)srcBuffer)[3] << 24; + goto case 3; + case 3: + bitD.bitContainer += (nuint)((byte*)srcBuffer)[2] << 16; + goto case 2; + case 2: + bitD.bitContainer += (nuint)((byte*)srcBuffer)[1] << 8; + goto default; + default: + break; + } + + { + byte lastByte = ((byte*)srcBuffer)[srcSize - 1]; + bitD.bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0; + if (lastByte == 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + bitD.bitsConsumed += (uint)((nuint)sizeof(nuint) - srcSize) * 8; + } + + return srcSize; + } + + /*! BIT_lookBits() : + * Provides next n bits from local register. + * local register is not modified. + * On 32-bits, maxNbBits==24. + * On 64-bits, maxNbBits==56. + * @return : value extracted */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint BIT_lookBits(nuint bitD_bitContainer, uint bitD_bitsConsumed, uint nbBits) + { + return BIT_getMiddleBits(bitD_bitContainer, (uint)(sizeof(nuint) * 8) - bitD_bitsConsumed - nbBits, nbBits); + } + + /*! BIT_lookBitsFast() : + * unsafe version; only works if nbBits >= 1 */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint BIT_lookBitsFast(nuint bitD_bitContainer, uint bitD_bitsConsumed, uint nbBits) + { + uint regMask = (uint)(sizeof(nuint) * 8 - 1); + assert(nbBits >= 1); + return bitD_bitContainer << (int)(bitD_bitsConsumed & regMask) >> (int)(regMask + 1 - nbBits & regMask); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void BIT_skipBits(ref uint bitD_bitsConsumed, uint nbBits) + { + bitD_bitsConsumed += nbBits; + } + + /*! BIT_readBits() : + * Read (consume) next n bits from local register and update. + * Pay attention to not read more than nbBits contained into local register. + * @return : extracted value. */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint BIT_readBits(nuint bitD_bitContainer, ref uint bitD_bitsConsumed, uint nbBits) + { + nuint value = BIT_lookBits(bitD_bitContainer, bitD_bitsConsumed, nbBits); + BIT_skipBits(ref bitD_bitsConsumed, nbBits); + return value; + } + + /*! BIT_readBitsFast() : + * unsafe version; only works if nbBits >= 1 */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint BIT_readBitsFast(nuint bitD_bitContainer, ref uint bitD_bitsConsumed, uint nbBits) + { + nuint value = BIT_lookBitsFast(bitD_bitContainer, bitD_bitsConsumed, nbBits); + assert(nbBits >= 1); + BIT_skipBits(ref bitD_bitsConsumed, nbBits); + return value; + } + + /*! BIT_reloadDStreamFast() : + * Similar to BIT_reloadDStream(), but with two differences: + * 1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold! + * 2. Returns BIT_DStream_overflow when bitD->ptr < bitD->limitPtr, at this + * point you must use BIT_reloadDStream() to reload. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static BIT_DStream_status BIT_reloadDStreamFast(ref nuint bitD_bitContainer, ref uint bitD_bitsConsumed, ref sbyte* bitD_ptr, sbyte* bitD_start, sbyte* bitD_limitPtr) + { + if (bitD_ptr < bitD_limitPtr) + return BIT_DStream_status.BIT_DStream_overflow; + return BIT_reloadDStream_internal(ref bitD_bitContainer, ref bitD_bitsConsumed, ref bitD_ptr, bitD_start); + } + + /*! BIT_reloadDStream() : + * Refill `bitD` from buffer previously set in BIT_initDStream() . + * This function is safe, it guarantees it will not never beyond src buffer. + * @return : status of `BIT_DStream_t` internal register. + * when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static BIT_DStream_status BIT_reloadDStream(ref nuint bitD_bitContainer, ref uint bitD_bitsConsumed, ref sbyte* bitD_ptr, sbyte* bitD_start, sbyte* bitD_limitPtr) + { + if (bitD_bitsConsumed > (uint)(sizeof(nuint) * 8)) + { + const nuint zeroFilled = 0; + bitD_ptr = (sbyte*)&static_zeroFilled[0]; + return BIT_DStream_status.BIT_DStream_overflow; + } + + assert(bitD_ptr >= bitD_start); + if (bitD_ptr >= bitD_limitPtr) + { + return BIT_reloadDStream_internal(ref bitD_bitContainer, ref bitD_bitsConsumed, ref bitD_ptr, bitD_start); + } + + if (bitD_ptr == bitD_start) + { + if (bitD_bitsConsumed < (uint)(sizeof(nuint) * 8)) + return BIT_DStream_status.BIT_DStream_endOfBuffer; + return BIT_DStream_status.BIT_DStream_completed; + } + + { + uint nbBytes = bitD_bitsConsumed >> 3; + BIT_DStream_status result = BIT_DStream_status.BIT_DStream_unfinished; + if (bitD_ptr - nbBytes < bitD_start) + { + nbBytes = (uint)(bitD_ptr - bitD_start); + result = BIT_DStream_status.BIT_DStream_endOfBuffer; + } + + bitD_ptr -= nbBytes; + bitD_bitsConsumed -= nbBytes * 8; + bitD_bitContainer = MEM_readLEST(bitD_ptr); + return result; + } + } + + /*! BIT_reloadDStream_internal() : + * Simple variant of BIT_reloadDStream(), with two conditions: + * 1. bitstream is valid : bitsConsumed <= sizeof(bitD->bitContainer)*8 + * 2. look window is valid after shifted down : bitD->ptr >= bitD->start + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static BIT_DStream_status BIT_reloadDStream_internal(ref nuint bitD_bitContainer, ref uint bitD_bitsConsumed, ref sbyte* bitD_ptr, sbyte* bitD_start) + { + assert(bitD_bitsConsumed <= (uint)(sizeof(nuint) * 8)); + bitD_ptr -= bitD_bitsConsumed >> 3; + assert(bitD_ptr >= bitD_start); + bitD_bitsConsumed &= 7; + bitD_bitContainer = MEM_readLEST(bitD_ptr); + return BIT_DStream_status.BIT_DStream_unfinished; + } + + /*! BIT_endOfDStream() : + * @return : 1 if DStream has _exactly_ reached its end (all bits consumed). + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint BIT_endOfDStream(uint DStream_bitsConsumed, sbyte* DStream_ptr, sbyte* DStream_start) + { + return DStream_ptr == DStream_start && DStream_bitsConsumed == (uint)(sizeof(nuint) * 8) ? 1U : 0U; + } + } +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/BlockSummary.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/BlockSummary.cs new file mode 100644 index 000000000..4a105a854 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/BlockSummary.cs @@ -0,0 +1,9 @@ +namespace ZstdSharp.Unsafe +{ + public struct BlockSummary + { + public nuint nbSequences; + public nuint blockSize; + public nuint litSize; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_best_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_best_s.cs new file mode 100644 index 000000000..629ee4092 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_best_s.cs @@ -0,0 +1,21 @@ +namespace ZstdSharp.Unsafe +{ + /** + * COVER_best_t is used for two purposes: + * 1. Synchronizing threads. + * 2. Saving the best parameters and dictionary. + * + * All of the methods except COVER_best_init() are thread safe if zstd is + * compiled with multithreaded support. + */ + public unsafe struct COVER_best_s + { + public void* mutex; + public void* cond; + public nuint liveJobs; + public void* dict; + public nuint dictSize; + public ZDICT_cover_params_t parameters; + public nuint compressedSize; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_ctx_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_ctx_t.cs new file mode 100644 index 000000000..3e5fb2d61 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_ctx_t.cs @@ -0,0 +1,20 @@ +namespace ZstdSharp.Unsafe +{ + /*-************************************* + * Context + ***************************************/ + public unsafe struct COVER_ctx_t + { + public byte* samples; + public nuint* offsets; + public nuint* samplesSizes; + public nuint nbSamples; + public nuint nbTrainSamples; + public nuint nbTestSamples; + public uint* suffix; + public nuint suffixSize; + public uint* freqs; + public uint* dmerAt; + public uint d; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_dictSelection.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_dictSelection.cs new file mode 100644 index 000000000..21a03e745 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_dictSelection.cs @@ -0,0 +1,12 @@ +namespace ZstdSharp.Unsafe +{ + /** + * Struct used for the dictionary selection function. + */ + public unsafe struct COVER_dictSelection + { + public byte* dictContent; + public nuint dictSize; + public nuint totalCompressedSize; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_epoch_info_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_epoch_info_t.cs new file mode 100644 index 000000000..50b791c36 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_epoch_info_t.cs @@ -0,0 +1,11 @@ +namespace ZstdSharp.Unsafe +{ + /** + *Number of epochs and size of each epoch. + */ + public struct COVER_epoch_info_t + { + public uint num; + public uint size; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_map_pair_t_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_map_pair_t_s.cs new file mode 100644 index 000000000..473be3765 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_map_pair_t_s.cs @@ -0,0 +1,8 @@ +namespace ZstdSharp.Unsafe +{ + public struct COVER_map_pair_t_s + { + public uint key; + public uint value; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_map_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_map_s.cs new file mode 100644 index 000000000..88188beca --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_map_s.cs @@ -0,0 +1,10 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct COVER_map_s + { + public COVER_map_pair_t_s* data; + public uint sizeLog; + public uint size; + public uint sizeMask; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_segment_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_segment_t.cs new file mode 100644 index 000000000..c217ce39a --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_segment_t.cs @@ -0,0 +1,12 @@ +namespace ZstdSharp.Unsafe +{ + /** + * A segment is a range in the source as well as the score of the segment. + */ + public struct COVER_segment_t + { + public uint begin; + public uint end; + public uint score; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_tryParameters_data_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_tryParameters_data_s.cs new file mode 100644 index 000000000..43b10726b --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_tryParameters_data_s.cs @@ -0,0 +1,13 @@ +namespace ZstdSharp.Unsafe +{ + /** + * Parameters for COVER_tryParameters(). + */ + public unsafe struct COVER_tryParameters_data_s + { + public COVER_ctx_t* ctx; + public COVER_best_s* best; + public nuint dictBufferCapacity; + public ZDICT_cover_params_t parameters; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Clevels.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Clevels.cs new file mode 100644 index 000000000..8b5664c3c --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Clevels.cs @@ -0,0 +1,113 @@ +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + private static readonly ZSTD_compressionParameters[][] ZSTD_defaultCParameters = new ZSTD_compressionParameters[4][] + { + new ZSTD_compressionParameters[23] + { + new ZSTD_compressionParameters(windowLog: 19, chainLog: 12, hashLog: 13, searchLog: 1, minMatch: 6, targetLength: 1, strategy: ZSTD_strategy.ZSTD_fast), + new ZSTD_compressionParameters(windowLog: 19, chainLog: 13, hashLog: 14, searchLog: 1, minMatch: 7, targetLength: 0, strategy: ZSTD_strategy.ZSTD_fast), + new ZSTD_compressionParameters(windowLog: 20, chainLog: 15, hashLog: 16, searchLog: 1, minMatch: 6, targetLength: 0, strategy: ZSTD_strategy.ZSTD_fast), + new ZSTD_compressionParameters(windowLog: 21, chainLog: 16, hashLog: 17, searchLog: 1, minMatch: 5, targetLength: 0, strategy: ZSTD_strategy.ZSTD_dfast), + new ZSTD_compressionParameters(windowLog: 21, chainLog: 18, hashLog: 18, searchLog: 1, minMatch: 5, targetLength: 0, strategy: ZSTD_strategy.ZSTD_dfast), + new ZSTD_compressionParameters(windowLog: 21, chainLog: 18, hashLog: 19, searchLog: 3, minMatch: 5, targetLength: 2, strategy: ZSTD_strategy.ZSTD_greedy), + new ZSTD_compressionParameters(windowLog: 21, chainLog: 18, hashLog: 19, searchLog: 3, minMatch: 5, targetLength: 4, strategy: ZSTD_strategy.ZSTD_lazy), + new ZSTD_compressionParameters(windowLog: 21, chainLog: 19, hashLog: 20, searchLog: 4, minMatch: 5, targetLength: 8, strategy: ZSTD_strategy.ZSTD_lazy), + new ZSTD_compressionParameters(windowLog: 21, chainLog: 19, hashLog: 20, searchLog: 4, minMatch: 5, targetLength: 16, strategy: ZSTD_strategy.ZSTD_lazy2), + new ZSTD_compressionParameters(windowLog: 22, chainLog: 20, hashLog: 21, searchLog: 4, minMatch: 5, targetLength: 16, strategy: ZSTD_strategy.ZSTD_lazy2), + new ZSTD_compressionParameters(windowLog: 22, chainLog: 21, hashLog: 22, searchLog: 5, minMatch: 5, targetLength: 16, strategy: ZSTD_strategy.ZSTD_lazy2), + new ZSTD_compressionParameters(windowLog: 22, chainLog: 21, hashLog: 22, searchLog: 6, minMatch: 5, targetLength: 16, strategy: ZSTD_strategy.ZSTD_lazy2), + new ZSTD_compressionParameters(windowLog: 22, chainLog: 22, hashLog: 23, searchLog: 6, minMatch: 5, targetLength: 32, strategy: ZSTD_strategy.ZSTD_lazy2), + new ZSTD_compressionParameters(windowLog: 22, chainLog: 22, hashLog: 22, searchLog: 4, minMatch: 5, targetLength: 32, strategy: ZSTD_strategy.ZSTD_btlazy2), + new ZSTD_compressionParameters(windowLog: 22, chainLog: 22, hashLog: 23, searchLog: 5, minMatch: 5, targetLength: 32, strategy: ZSTD_strategy.ZSTD_btlazy2), + new ZSTD_compressionParameters(windowLog: 22, chainLog: 23, hashLog: 23, searchLog: 6, minMatch: 5, targetLength: 32, strategy: ZSTD_strategy.ZSTD_btlazy2), + new ZSTD_compressionParameters(windowLog: 22, chainLog: 22, hashLog: 22, searchLog: 5, minMatch: 5, targetLength: 48, strategy: ZSTD_strategy.ZSTD_btopt), + new ZSTD_compressionParameters(windowLog: 23, chainLog: 23, hashLog: 22, searchLog: 5, minMatch: 4, targetLength: 64, strategy: ZSTD_strategy.ZSTD_btopt), + new ZSTD_compressionParameters(windowLog: 23, chainLog: 23, hashLog: 22, searchLog: 6, minMatch: 3, targetLength: 64, strategy: ZSTD_strategy.ZSTD_btultra), + new ZSTD_compressionParameters(windowLog: 23, chainLog: 24, hashLog: 22, searchLog: 7, minMatch: 3, targetLength: 256, strategy: ZSTD_strategy.ZSTD_btultra2), + new ZSTD_compressionParameters(windowLog: 25, chainLog: 25, hashLog: 23, searchLog: 7, minMatch: 3, targetLength: 256, strategy: ZSTD_strategy.ZSTD_btultra2), + new ZSTD_compressionParameters(windowLog: 26, chainLog: 26, hashLog: 24, searchLog: 7, minMatch: 3, targetLength: 512, strategy: ZSTD_strategy.ZSTD_btultra2), + new ZSTD_compressionParameters(windowLog: 27, chainLog: 27, hashLog: 25, searchLog: 9, minMatch: 3, targetLength: 999, strategy: ZSTD_strategy.ZSTD_btultra2) + }, + new ZSTD_compressionParameters[23] + { + new ZSTD_compressionParameters(windowLog: 18, chainLog: 12, hashLog: 13, searchLog: 1, minMatch: 5, targetLength: 1, strategy: ZSTD_strategy.ZSTD_fast), + new ZSTD_compressionParameters(windowLog: 18, chainLog: 13, hashLog: 14, searchLog: 1, minMatch: 6, targetLength: 0, strategy: ZSTD_strategy.ZSTD_fast), + new ZSTD_compressionParameters(windowLog: 18, chainLog: 14, hashLog: 14, searchLog: 1, minMatch: 5, targetLength: 0, strategy: ZSTD_strategy.ZSTD_dfast), + new ZSTD_compressionParameters(windowLog: 18, chainLog: 16, hashLog: 16, searchLog: 1, minMatch: 4, targetLength: 0, strategy: ZSTD_strategy.ZSTD_dfast), + new ZSTD_compressionParameters(windowLog: 18, chainLog: 16, hashLog: 17, searchLog: 3, minMatch: 5, targetLength: 2, strategy: ZSTD_strategy.ZSTD_greedy), + new ZSTD_compressionParameters(windowLog: 18, chainLog: 17, hashLog: 18, searchLog: 5, minMatch: 5, targetLength: 2, strategy: ZSTD_strategy.ZSTD_greedy), + new ZSTD_compressionParameters(windowLog: 18, chainLog: 18, hashLog: 19, searchLog: 3, minMatch: 5, targetLength: 4, strategy: ZSTD_strategy.ZSTD_lazy), + new ZSTD_compressionParameters(windowLog: 18, chainLog: 18, hashLog: 19, searchLog: 4, minMatch: 4, targetLength: 4, strategy: ZSTD_strategy.ZSTD_lazy), + new ZSTD_compressionParameters(windowLog: 18, chainLog: 18, hashLog: 19, searchLog: 4, minMatch: 4, targetLength: 8, strategy: ZSTD_strategy.ZSTD_lazy2), + new ZSTD_compressionParameters(windowLog: 18, chainLog: 18, hashLog: 19, searchLog: 5, minMatch: 4, targetLength: 8, strategy: ZSTD_strategy.ZSTD_lazy2), + new ZSTD_compressionParameters(windowLog: 18, chainLog: 18, hashLog: 19, searchLog: 6, minMatch: 4, targetLength: 8, strategy: ZSTD_strategy.ZSTD_lazy2), + new ZSTD_compressionParameters(windowLog: 18, chainLog: 18, hashLog: 19, searchLog: 5, minMatch: 4, targetLength: 12, strategy: ZSTD_strategy.ZSTD_btlazy2), + new ZSTD_compressionParameters(windowLog: 18, chainLog: 19, hashLog: 19, searchLog: 7, minMatch: 4, targetLength: 12, strategy: ZSTD_strategy.ZSTD_btlazy2), + new ZSTD_compressionParameters(windowLog: 18, chainLog: 18, hashLog: 19, searchLog: 4, minMatch: 4, targetLength: 16, strategy: ZSTD_strategy.ZSTD_btopt), + new ZSTD_compressionParameters(windowLog: 18, chainLog: 18, hashLog: 19, searchLog: 4, minMatch: 3, targetLength: 32, strategy: ZSTD_strategy.ZSTD_btopt), + new ZSTD_compressionParameters(windowLog: 18, chainLog: 18, hashLog: 19, searchLog: 6, minMatch: 3, targetLength: 128, strategy: ZSTD_strategy.ZSTD_btopt), + new ZSTD_compressionParameters(windowLog: 18, chainLog: 19, hashLog: 19, searchLog: 6, minMatch: 3, targetLength: 128, strategy: ZSTD_strategy.ZSTD_btultra), + new ZSTD_compressionParameters(windowLog: 18, chainLog: 19, hashLog: 19, searchLog: 8, minMatch: 3, targetLength: 256, strategy: ZSTD_strategy.ZSTD_btultra), + new ZSTD_compressionParameters(windowLog: 18, chainLog: 19, hashLog: 19, searchLog: 6, minMatch: 3, targetLength: 128, strategy: ZSTD_strategy.ZSTD_btultra2), + new ZSTD_compressionParameters(windowLog: 18, chainLog: 19, hashLog: 19, searchLog: 8, minMatch: 3, targetLength: 256, strategy: ZSTD_strategy.ZSTD_btultra2), + new ZSTD_compressionParameters(windowLog: 18, chainLog: 19, hashLog: 19, searchLog: 10, minMatch: 3, targetLength: 512, strategy: ZSTD_strategy.ZSTD_btultra2), + new ZSTD_compressionParameters(windowLog: 18, chainLog: 19, hashLog: 19, searchLog: 12, minMatch: 3, targetLength: 512, strategy: ZSTD_strategy.ZSTD_btultra2), + new ZSTD_compressionParameters(windowLog: 18, chainLog: 19, hashLog: 19, searchLog: 13, minMatch: 3, targetLength: 999, strategy: ZSTD_strategy.ZSTD_btultra2) + }, + new ZSTD_compressionParameters[23] + { + new ZSTD_compressionParameters(windowLog: 17, chainLog: 12, hashLog: 12, searchLog: 1, minMatch: 5, targetLength: 1, strategy: ZSTD_strategy.ZSTD_fast), + new ZSTD_compressionParameters(windowLog: 17, chainLog: 12, hashLog: 13, searchLog: 1, minMatch: 6, targetLength: 0, strategy: ZSTD_strategy.ZSTD_fast), + new ZSTD_compressionParameters(windowLog: 17, chainLog: 13, hashLog: 15, searchLog: 1, minMatch: 5, targetLength: 0, strategy: ZSTD_strategy.ZSTD_fast), + new ZSTD_compressionParameters(windowLog: 17, chainLog: 15, hashLog: 16, searchLog: 2, minMatch: 5, targetLength: 0, strategy: ZSTD_strategy.ZSTD_dfast), + new ZSTD_compressionParameters(windowLog: 17, chainLog: 17, hashLog: 17, searchLog: 2, minMatch: 4, targetLength: 0, strategy: ZSTD_strategy.ZSTD_dfast), + new ZSTD_compressionParameters(windowLog: 17, chainLog: 16, hashLog: 17, searchLog: 3, minMatch: 4, targetLength: 2, strategy: ZSTD_strategy.ZSTD_greedy), + new ZSTD_compressionParameters(windowLog: 17, chainLog: 16, hashLog: 17, searchLog: 3, minMatch: 4, targetLength: 4, strategy: ZSTD_strategy.ZSTD_lazy), + new ZSTD_compressionParameters(windowLog: 17, chainLog: 16, hashLog: 17, searchLog: 3, minMatch: 4, targetLength: 8, strategy: ZSTD_strategy.ZSTD_lazy2), + new ZSTD_compressionParameters(windowLog: 17, chainLog: 16, hashLog: 17, searchLog: 4, minMatch: 4, targetLength: 8, strategy: ZSTD_strategy.ZSTD_lazy2), + new ZSTD_compressionParameters(windowLog: 17, chainLog: 16, hashLog: 17, searchLog: 5, minMatch: 4, targetLength: 8, strategy: ZSTD_strategy.ZSTD_lazy2), + new ZSTD_compressionParameters(windowLog: 17, chainLog: 16, hashLog: 17, searchLog: 6, minMatch: 4, targetLength: 8, strategy: ZSTD_strategy.ZSTD_lazy2), + new ZSTD_compressionParameters(windowLog: 17, chainLog: 17, hashLog: 17, searchLog: 5, minMatch: 4, targetLength: 8, strategy: ZSTD_strategy.ZSTD_btlazy2), + new ZSTD_compressionParameters(windowLog: 17, chainLog: 18, hashLog: 17, searchLog: 7, minMatch: 4, targetLength: 12, strategy: ZSTD_strategy.ZSTD_btlazy2), + new ZSTD_compressionParameters(windowLog: 17, chainLog: 18, hashLog: 17, searchLog: 3, minMatch: 4, targetLength: 12, strategy: ZSTD_strategy.ZSTD_btopt), + new ZSTD_compressionParameters(windowLog: 17, chainLog: 18, hashLog: 17, searchLog: 4, minMatch: 3, targetLength: 32, strategy: ZSTD_strategy.ZSTD_btopt), + new ZSTD_compressionParameters(windowLog: 17, chainLog: 18, hashLog: 17, searchLog: 6, minMatch: 3, targetLength: 256, strategy: ZSTD_strategy.ZSTD_btopt), + new ZSTD_compressionParameters(windowLog: 17, chainLog: 18, hashLog: 17, searchLog: 6, minMatch: 3, targetLength: 128, strategy: ZSTD_strategy.ZSTD_btultra), + new ZSTD_compressionParameters(windowLog: 17, chainLog: 18, hashLog: 17, searchLog: 8, minMatch: 3, targetLength: 256, strategy: ZSTD_strategy.ZSTD_btultra), + new ZSTD_compressionParameters(windowLog: 17, chainLog: 18, hashLog: 17, searchLog: 10, minMatch: 3, targetLength: 512, strategy: ZSTD_strategy.ZSTD_btultra), + new ZSTD_compressionParameters(windowLog: 17, chainLog: 18, hashLog: 17, searchLog: 5, minMatch: 3, targetLength: 256, strategy: ZSTD_strategy.ZSTD_btultra2), + new ZSTD_compressionParameters(windowLog: 17, chainLog: 18, hashLog: 17, searchLog: 7, minMatch: 3, targetLength: 512, strategy: ZSTD_strategy.ZSTD_btultra2), + new ZSTD_compressionParameters(windowLog: 17, chainLog: 18, hashLog: 17, searchLog: 9, minMatch: 3, targetLength: 512, strategy: ZSTD_strategy.ZSTD_btultra2), + new ZSTD_compressionParameters(windowLog: 17, chainLog: 18, hashLog: 17, searchLog: 11, minMatch: 3, targetLength: 999, strategy: ZSTD_strategy.ZSTD_btultra2) + }, + new ZSTD_compressionParameters[23] + { + new ZSTD_compressionParameters(windowLog: 14, chainLog: 12, hashLog: 13, searchLog: 1, minMatch: 5, targetLength: 1, strategy: ZSTD_strategy.ZSTD_fast), + new ZSTD_compressionParameters(windowLog: 14, chainLog: 14, hashLog: 15, searchLog: 1, minMatch: 5, targetLength: 0, strategy: ZSTD_strategy.ZSTD_fast), + new ZSTD_compressionParameters(windowLog: 14, chainLog: 14, hashLog: 15, searchLog: 1, minMatch: 4, targetLength: 0, strategy: ZSTD_strategy.ZSTD_fast), + new ZSTD_compressionParameters(windowLog: 14, chainLog: 14, hashLog: 15, searchLog: 2, minMatch: 4, targetLength: 0, strategy: ZSTD_strategy.ZSTD_dfast), + new ZSTD_compressionParameters(windowLog: 14, chainLog: 14, hashLog: 14, searchLog: 4, minMatch: 4, targetLength: 2, strategy: ZSTD_strategy.ZSTD_greedy), + new ZSTD_compressionParameters(windowLog: 14, chainLog: 14, hashLog: 14, searchLog: 3, minMatch: 4, targetLength: 4, strategy: ZSTD_strategy.ZSTD_lazy), + new ZSTD_compressionParameters(windowLog: 14, chainLog: 14, hashLog: 14, searchLog: 4, minMatch: 4, targetLength: 8, strategy: ZSTD_strategy.ZSTD_lazy2), + new ZSTD_compressionParameters(windowLog: 14, chainLog: 14, hashLog: 14, searchLog: 6, minMatch: 4, targetLength: 8, strategy: ZSTD_strategy.ZSTD_lazy2), + new ZSTD_compressionParameters(windowLog: 14, chainLog: 14, hashLog: 14, searchLog: 8, minMatch: 4, targetLength: 8, strategy: ZSTD_strategy.ZSTD_lazy2), + new ZSTD_compressionParameters(windowLog: 14, chainLog: 15, hashLog: 14, searchLog: 5, minMatch: 4, targetLength: 8, strategy: ZSTD_strategy.ZSTD_btlazy2), + new ZSTD_compressionParameters(windowLog: 14, chainLog: 15, hashLog: 14, searchLog: 9, minMatch: 4, targetLength: 8, strategy: ZSTD_strategy.ZSTD_btlazy2), + new ZSTD_compressionParameters(windowLog: 14, chainLog: 15, hashLog: 14, searchLog: 3, minMatch: 4, targetLength: 12, strategy: ZSTD_strategy.ZSTD_btopt), + new ZSTD_compressionParameters(windowLog: 14, chainLog: 15, hashLog: 14, searchLog: 4, minMatch: 3, targetLength: 24, strategy: ZSTD_strategy.ZSTD_btopt), + new ZSTD_compressionParameters(windowLog: 14, chainLog: 15, hashLog: 14, searchLog: 5, minMatch: 3, targetLength: 32, strategy: ZSTD_strategy.ZSTD_btultra), + new ZSTD_compressionParameters(windowLog: 14, chainLog: 15, hashLog: 15, searchLog: 6, minMatch: 3, targetLength: 64, strategy: ZSTD_strategy.ZSTD_btultra), + new ZSTD_compressionParameters(windowLog: 14, chainLog: 15, hashLog: 15, searchLog: 7, minMatch: 3, targetLength: 256, strategy: ZSTD_strategy.ZSTD_btultra), + new ZSTD_compressionParameters(windowLog: 14, chainLog: 15, hashLog: 15, searchLog: 5, minMatch: 3, targetLength: 48, strategy: ZSTD_strategy.ZSTD_btultra2), + new ZSTD_compressionParameters(windowLog: 14, chainLog: 15, hashLog: 15, searchLog: 6, minMatch: 3, targetLength: 128, strategy: ZSTD_strategy.ZSTD_btultra2), + new ZSTD_compressionParameters(windowLog: 14, chainLog: 15, hashLog: 15, searchLog: 7, minMatch: 3, targetLength: 256, strategy: ZSTD_strategy.ZSTD_btultra2), + new ZSTD_compressionParameters(windowLog: 14, chainLog: 15, hashLog: 15, searchLog: 8, minMatch: 3, targetLength: 256, strategy: ZSTD_strategy.ZSTD_btultra2), + new ZSTD_compressionParameters(windowLog: 14, chainLog: 15, hashLog: 15, searchLog: 8, minMatch: 3, targetLength: 512, strategy: ZSTD_strategy.ZSTD_btultra2), + new ZSTD_compressionParameters(windowLog: 14, chainLog: 15, hashLog: 15, searchLog: 9, minMatch: 3, targetLength: 512, strategy: ZSTD_strategy.ZSTD_btultra2), + new ZSTD_compressionParameters(windowLog: 14, chainLog: 15, hashLog: 15, searchLog: 10, minMatch: 3, targetLength: 999, strategy: ZSTD_strategy.ZSTD_btultra2) + } + }; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Compiler.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Compiler.cs new file mode 100644 index 000000000..27ab2cdf2 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Compiler.cs @@ -0,0 +1,62 @@ +using System.Runtime.CompilerServices; + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + /* @return 1 if @u is a 2^n value, 0 otherwise + * useful to check a value is valid for alignment restrictions */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int ZSTD_isPower2(nuint u) + { + return (u & u - 1) == 0 ? 1 : 0; + } + + /** + * Helper function to perform a wrapped pointer difference without triggering + * UBSAN. + * + * @returns lhs - rhs with wrapping + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nint ZSTD_wrappedPtrDiff(byte* lhs, byte* rhs) + { + return (nint)(lhs - rhs); + } + + /** + * Helper function to perform a wrapped pointer add without triggering UBSAN. + * + * @return ptr + add with wrapping + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static byte* ZSTD_wrappedPtrAdd(byte* ptr, nint add) + { + return ptr + add; + } + + /** + * Helper function to perform a wrapped pointer subtraction without triggering + * UBSAN. + * + * @return ptr - sub with wrapping + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static byte* ZSTD_wrappedPtrSub(byte* ptr, nint sub) + { + return ptr - sub; + } + + /** + * Helper function to add to a pointer that works around C's undefined behavior + * of adding 0 to NULL. + * + * @returns `ptr + add` except it defines `NULL + 0 == NULL`. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static byte* ZSTD_maybeNullPtrAdd(byte* ptr, nint add) + { + return add > 0 ? ptr + add : ptr; + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Cover.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Cover.cs new file mode 100644 index 000000000..ca1d7aa86 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Cover.cs @@ -0,0 +1,367 @@ +using static ZstdSharp.UnsafeHelper; + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + private static int g_displayLevel = 0; + /** + * Returns the sum of the sample sizes. + */ + private static nuint COVER_sum(nuint* samplesSizes, uint nbSamples) + { + nuint sum = 0; + uint i; + for (i = 0; i < nbSamples; ++i) + { + sum += samplesSizes[i]; + } + + return sum; + } + + /** + * Warns the user when their corpus is too small. + */ + private static void COVER_warnOnSmallCorpus(nuint maxDictSize, nuint nbDmers, int displayLevel) + { + double ratio = nbDmers / (double)maxDictSize; + if (ratio >= 10) + { + return; + } + } + + /** + * Computes the number of epochs and the size of each epoch. + * We will make sure that each epoch gets at least 10 * k bytes. + * + * The COVER algorithms divide the data up into epochs of equal size and + * select one segment from each epoch. + * + * @param maxDictSize The maximum allowed dictionary size. + * @param nbDmers The number of dmers we are training on. + * @param k The parameter k (segment size). + * @param passes The target number of passes over the dmer corpus. + * More passes means a better dictionary. + */ + private static COVER_epoch_info_t COVER_computeEpochs(uint maxDictSize, uint nbDmers, uint k, uint passes) + { + uint minEpochSize = k * 10; + COVER_epoch_info_t epochs; + epochs.num = 1 > maxDictSize / k / passes ? 1 : maxDictSize / k / passes; + epochs.size = nbDmers / epochs.num; + if (epochs.size >= minEpochSize) + { + assert(epochs.size * epochs.num <= nbDmers); + return epochs; + } + + epochs.size = minEpochSize < nbDmers ? minEpochSize : nbDmers; + epochs.num = nbDmers / epochs.size; + assert(epochs.size * epochs.num <= nbDmers); + return epochs; + } + + /** + * Checks total compressed size of a dictionary + */ + private static nuint COVER_checkTotalCompressedSize(ZDICT_cover_params_t parameters, nuint* samplesSizes, byte* samples, nuint* offsets, nuint nbTrainSamples, nuint nbSamples, byte* dict, nuint dictBufferCapacity) + { + nuint totalCompressedSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + /* Pointers */ + ZSTD_CCtx_s* cctx; + ZSTD_CDict_s* cdict; + void* dst; + /* Local variables */ + nuint dstCapacity; + nuint i; + { + nuint maxSampleSize = 0; + i = parameters.splitPoint < 1 ? nbTrainSamples : 0; + for (; i < nbSamples; ++i) + { + maxSampleSize = samplesSizes[i] > maxSampleSize ? samplesSizes[i] : maxSampleSize; + } + + dstCapacity = ZSTD_compressBound(maxSampleSize); + dst = malloc(dstCapacity); + } + + cctx = ZSTD_createCCtx(); + cdict = ZSTD_createCDict(dict, dictBufferCapacity, parameters.zParams.compressionLevel); + if (dst == null || cctx == null || cdict == null) + { + goto _compressCleanup; + } + + totalCompressedSize = dictBufferCapacity; + i = parameters.splitPoint < 1 ? nbTrainSamples : 0; + for (; i < nbSamples; ++i) + { + nuint size = ZSTD_compress_usingCDict(cctx, dst, dstCapacity, samples + offsets[i], samplesSizes[i], cdict); + if (ERR_isError(size)) + { + totalCompressedSize = size; + goto _compressCleanup; + } + + totalCompressedSize += size; + } + + _compressCleanup: + ZSTD_freeCCtx(cctx); + ZSTD_freeCDict(cdict); + if (dst != null) + { + free(dst); + } + + return totalCompressedSize; + } + + /** + * Initialize the `COVER_best_t`. + */ + private static void COVER_best_init(COVER_best_s* best) + { + if (best == null) + return; + SynchronizationWrapper.Init(&best->mutex); + best->liveJobs = 0; + best->dict = null; + best->dictSize = 0; + best->compressedSize = unchecked((nuint)(-1)); + best->parameters = new ZDICT_cover_params_t(); + } + + /** + * Wait until liveJobs == 0. + */ + private static void COVER_best_wait(COVER_best_s* best) + { + if (best == null) + { + return; + } + + SynchronizationWrapper.Enter(&best->mutex); + while (best->liveJobs != 0) + { + SynchronizationWrapper.Wait(&best->mutex); + } + + SynchronizationWrapper.Exit(&best->mutex); + } + + /** + * Call COVER_best_wait() and then destroy the COVER_best_t. + */ + private static void COVER_best_destroy(COVER_best_s* best) + { + if (best == null) + { + return; + } + + COVER_best_wait(best); + if (best->dict != null) + { + free(best->dict); + } + + SynchronizationWrapper.Free(&best->mutex); + } + + /** + * Called when a thread is about to be launched. + * Increments liveJobs. + */ + private static void COVER_best_start(COVER_best_s* best) + { + if (best == null) + { + return; + } + + SynchronizationWrapper.Enter(&best->mutex); + ++best->liveJobs; + SynchronizationWrapper.Exit(&best->mutex); + } + + /** + * Called when a thread finishes executing, both on error or success. + * Decrements liveJobs and signals any waiting threads if liveJobs == 0. + * If this dictionary is the best so far save it and its parameters. + */ + private static void COVER_best_finish(COVER_best_s* best, ZDICT_cover_params_t parameters, COVER_dictSelection selection) + { + void* dict = selection.dictContent; + nuint compressedSize = selection.totalCompressedSize; + nuint dictSize = selection.dictSize; + if (best == null) + { + return; + } + + { + nuint liveJobs; + SynchronizationWrapper.Enter(&best->mutex); + --best->liveJobs; + liveJobs = best->liveJobs; + if (compressedSize < best->compressedSize) + { + if (best->dict == null || best->dictSize < dictSize) + { + if (best->dict != null) + { + free(best->dict); + } + + best->dict = malloc(dictSize); + if (best->dict == null) + { + best->compressedSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + best->dictSize = 0; + SynchronizationWrapper.Pulse(&best->mutex); + SynchronizationWrapper.Exit(&best->mutex); + return; + } + } + + if (dict != null) + { + memcpy(best->dict, dict, (uint)dictSize); + best->dictSize = dictSize; + best->parameters = parameters; + best->compressedSize = compressedSize; + } + } + + if (liveJobs == 0) + { + SynchronizationWrapper.PulseAll(&best->mutex); + } + + SynchronizationWrapper.Exit(&best->mutex); + } + } + + private static COVER_dictSelection setDictSelection(byte* buf, nuint s, nuint csz) + { + COVER_dictSelection ds; + ds.dictContent = buf; + ds.dictSize = s; + ds.totalCompressedSize = csz; + return ds; + } + + /** + * Error function for COVER_selectDict function. Returns a struct where + * return.totalCompressedSize is a ZSTD error. + */ + private static COVER_dictSelection COVER_dictSelectionError(nuint error) + { + return setDictSelection(null, 0, error); + } + + /** + * Error function for COVER_selectDict function. Checks if the return + * value is an error. + */ + private static uint COVER_dictSelectionIsError(COVER_dictSelection selection) + { + return ERR_isError(selection.totalCompressedSize) || selection.dictContent == null ? 1U : 0U; + } + + /** + * Always call after selectDict is called to free up used memory from + * newly created dictionary. + */ + private static void COVER_dictSelectionFree(COVER_dictSelection selection) + { + free(selection.dictContent); + } + + /** + * Called to finalize the dictionary and select one based on whether or not + * the shrink-dict flag was enabled. If enabled the dictionary used is the + * smallest dictionary within a specified regression of the compressed size + * from the largest dictionary. + */ + private static COVER_dictSelection COVER_selectDict(byte* customDictContent, nuint dictBufferCapacity, nuint dictContentSize, byte* samplesBuffer, nuint* samplesSizes, uint nbFinalizeSamples, nuint nbCheckSamples, nuint nbSamples, ZDICT_cover_params_t @params, nuint* offsets, nuint totalCompressedSize) + { + nuint largestDict = 0; + nuint largestCompressed = 0; + byte* customDictContentEnd = customDictContent + dictContentSize; + byte* largestDictbuffer = (byte*)malloc(dictBufferCapacity); + byte* candidateDictBuffer = (byte*)malloc(dictBufferCapacity); + double regressionTolerance = (double)@params.shrinkDictMaxRegression / 100 + 1; + if (largestDictbuffer == null || candidateDictBuffer == null) + { + free(largestDictbuffer); + free(candidateDictBuffer); + return COVER_dictSelectionError(dictContentSize); + } + + memcpy(largestDictbuffer, customDictContent, (uint)dictContentSize); + dictContentSize = ZDICT_finalizeDictionary(largestDictbuffer, dictBufferCapacity, customDictContent, dictContentSize, samplesBuffer, samplesSizes, nbFinalizeSamples, @params.zParams); + if (ZDICT_isError(dictContentSize)) + { + free(largestDictbuffer); + free(candidateDictBuffer); + return COVER_dictSelectionError(dictContentSize); + } + + totalCompressedSize = COVER_checkTotalCompressedSize(@params, samplesSizes, samplesBuffer, offsets, nbCheckSamples, nbSamples, largestDictbuffer, dictContentSize); + if (ERR_isError(totalCompressedSize)) + { + free(largestDictbuffer); + free(candidateDictBuffer); + return COVER_dictSelectionError(totalCompressedSize); + } + + if (@params.shrinkDict == 0) + { + free(candidateDictBuffer); + return setDictSelection(largestDictbuffer, dictContentSize, totalCompressedSize); + } + + largestDict = dictContentSize; + largestCompressed = totalCompressedSize; + dictContentSize = 256; + while (dictContentSize < largestDict) + { + memcpy(candidateDictBuffer, largestDictbuffer, (uint)largestDict); + dictContentSize = ZDICT_finalizeDictionary(candidateDictBuffer, dictBufferCapacity, customDictContentEnd - dictContentSize, dictContentSize, samplesBuffer, samplesSizes, nbFinalizeSamples, @params.zParams); + if (ZDICT_isError(dictContentSize)) + { + free(largestDictbuffer); + free(candidateDictBuffer); + return COVER_dictSelectionError(dictContentSize); + } + + totalCompressedSize = COVER_checkTotalCompressedSize(@params, samplesSizes, samplesBuffer, offsets, nbCheckSamples, nbSamples, candidateDictBuffer, dictContentSize); + if (ERR_isError(totalCompressedSize)) + { + free(largestDictbuffer); + free(candidateDictBuffer); + return COVER_dictSelectionError(totalCompressedSize); + } + + if (totalCompressedSize <= largestCompressed * regressionTolerance) + { + free(largestDictbuffer); + return setDictSelection(candidateDictBuffer, dictContentSize, totalCompressedSize); + } + + dictContentSize *= 2; + } + + dictContentSize = largestDict; + totalCompressedSize = largestCompressed; + free(candidateDictBuffer); + return setDictSelection(largestDictbuffer, dictContentSize, totalCompressedSize); + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/DTableDesc.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/DTableDesc.cs new file mode 100644 index 000000000..e3fca967e --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/DTableDesc.cs @@ -0,0 +1,13 @@ +namespace ZstdSharp.Unsafe +{ + /*-***************************/ + /* generic DTableDesc */ + /*-***************************/ + public struct DTableDesc + { + public byte maxTableLog; + public byte tableType; + public byte tableLog; + public byte reserved; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/EStats_ress_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/EStats_ress_t.cs new file mode 100644 index 000000000..ecf8dc2cc --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/EStats_ress_t.cs @@ -0,0 +1,12 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct EStats_ress_t + { + /* dictionary */ + public ZSTD_CDict_s* dict; + /* working context */ + public ZSTD_CCtx_s* zc; + /* must be ZSTD_BLOCKSIZE_MAX allocated */ + public void* workPlace; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/EntropyCommon.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/EntropyCommon.cs new file mode 100644 index 000000000..12c39f6bd --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/EntropyCommon.cs @@ -0,0 +1,323 @@ +using System.Runtime.CompilerServices; +using static ZstdSharp.UnsafeHelper; + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + /*=== Version ===*/ + private static uint FSE_versionNumber() + { + return 0 * 100 * 100 + 9 * 100 + 0; + } + + /*=== Error Management ===*/ + private static bool FSE_isError(nuint code) + { + return ERR_isError(code); + } + + private static string FSE_getErrorName(nuint code) + { + return ERR_getErrorName(code); + } + + /* Error Management */ + private static bool HUF_isError(nuint code) + { + return ERR_isError(code); + } + + private static string HUF_getErrorName(nuint code) + { + return ERR_getErrorName(code); + } + + /*-************************************************************** + * FSE NCount encoding-decoding + ****************************************************************/ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint FSE_readNCount_body(short* normalizedCounter, uint* maxSVPtr, uint* tableLogPtr, void* headerBuffer, nuint hbSize) + { + byte* istart = (byte*)headerBuffer; + byte* iend = istart + hbSize; + byte* ip = istart; + int nbBits; + int remaining; + int threshold; + uint bitStream; + int bitCount; + uint charnum = 0; + uint maxSV1 = *maxSVPtr + 1; + int previous0 = 0; + if (hbSize < 8) + { + sbyte* buffer = stackalloc sbyte[8]; + /* This function only works when hbSize >= 8 */ + memset(buffer, 0, sizeof(sbyte) * 8); + memcpy(buffer, headerBuffer, (uint)hbSize); + { + nuint countSize = FSE_readNCount(normalizedCounter, maxSVPtr, tableLogPtr, buffer, sizeof(sbyte) * 8); + if (FSE_isError(countSize)) + return countSize; + if (countSize > hbSize) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + return countSize; + } + } + + assert(hbSize >= 8); + memset(normalizedCounter, 0, (*maxSVPtr + 1) * sizeof(short)); + bitStream = MEM_readLE32(ip); + nbBits = (int)((bitStream & 0xF) + 5); + if (nbBits > 15) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); + bitStream >>= 4; + bitCount = 4; + *tableLogPtr = (uint)nbBits; + remaining = (1 << nbBits) + 1; + threshold = 1 << nbBits; + nbBits++; + for (; ; ) + { + if (previous0 != 0) + { + /* Count the number of repeats. Each time the + * 2-bit repeat code is 0b11 there is another + * repeat. + * Avoid UB by setting the high bit to 1. + */ + int repeats = (int)(ZSTD_countTrailingZeros32(~bitStream | 0x80000000) >> 1); + while (repeats >= 12) + { + charnum += 3 * 12; + if (ip <= iend - 7) + { + ip += 3; + } + else + { + bitCount -= (int)(8 * (iend - 7 - ip)); + bitCount &= 31; + ip = iend - 4; + } + + bitStream = MEM_readLE32(ip) >> bitCount; + repeats = (int)(ZSTD_countTrailingZeros32(~bitStream | 0x80000000) >> 1); + } + + charnum += (uint)(3 * repeats); + bitStream >>= 2 * repeats; + bitCount += 2 * repeats; + assert((bitStream & 3) < 3); + charnum += bitStream & 3; + bitCount += 2; + if (charnum >= maxSV1) + break; + if (ip <= iend - 7 || ip + (bitCount >> 3) <= iend - 4) + { + assert(bitCount >> 3 <= 3); + ip += bitCount >> 3; + bitCount &= 7; + } + else + { + bitCount -= (int)(8 * (iend - 4 - ip)); + bitCount &= 31; + ip = iend - 4; + } + + bitStream = MEM_readLE32(ip) >> bitCount; + } + + { + int max = 2 * threshold - 1 - remaining; + int count; + if ((bitStream & (uint)(threshold - 1)) < (uint)max) + { + count = (int)(bitStream & (uint)(threshold - 1)); + bitCount += nbBits - 1; + } + else + { + count = (int)(bitStream & (uint)(2 * threshold - 1)); + if (count >= threshold) + count -= max; + bitCount += nbBits; + } + + count--; + if (count >= 0) + { + remaining -= count; + } + else + { + assert(count == -1); + remaining += count; + } + + normalizedCounter[charnum++] = (short)count; + previous0 = count == 0 ? 1 : 0; + assert(threshold > 1); + if (remaining < threshold) + { + if (remaining <= 1) + break; + nbBits = (int)(ZSTD_highbit32((uint)remaining) + 1); + threshold = 1 << nbBits - 1; + } + + if (charnum >= maxSV1) + break; + if (ip <= iend - 7 || ip + (bitCount >> 3) <= iend - 4) + { + ip += bitCount >> 3; + bitCount &= 7; + } + else + { + bitCount -= (int)(8 * (iend - 4 - ip)); + bitCount &= 31; + ip = iend - 4; + } + + bitStream = MEM_readLE32(ip) >> bitCount; + } + } + + if (remaining != 1) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + if (charnum > maxSV1) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooSmall)); + if (bitCount > 32) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + *maxSVPtr = charnum - 1; + ip += bitCount + 7 >> 3; + return (nuint)(ip - istart); + } + + /* Avoids the FORCE_INLINE of the _body() function. */ + private static nuint FSE_readNCount_body_default(short* normalizedCounter, uint* maxSVPtr, uint* tableLogPtr, void* headerBuffer, nuint hbSize) + { + return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize); + } + + /*! FSE_readNCount_bmi2(): + * Same as FSE_readNCount() but pass bmi2=1 when your CPU supports BMI2 and 0 otherwise. + */ + private static nuint FSE_readNCount_bmi2(short* normalizedCounter, uint* maxSVPtr, uint* tableLogPtr, void* headerBuffer, nuint hbSize, int bmi2) + { + return FSE_readNCount_body_default(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize); + } + + /*! FSE_readNCount(): + Read compactly saved 'normalizedCounter' from 'rBuffer'. + @return : size read from 'rBuffer', + or an errorCode, which can be tested using FSE_isError(). + maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */ + private static nuint FSE_readNCount(short* normalizedCounter, uint* maxSVPtr, uint* tableLogPtr, void* headerBuffer, nuint hbSize) + { + return FSE_readNCount_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize, 0); + } + + /*! HUF_readStats() : + Read compact Huffman tree, saved by HUF_writeCTable(). + `huffWeight` is destination buffer. + `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32. + @return : size read from `src` , or an error Code . + Note : Needed by HUF_readCTable() and HUF_readDTableX?() . + */ + private static nuint HUF_readStats(byte* huffWeight, nuint hwSize, uint* rankStats, uint* nbSymbolsPtr, uint* tableLogPtr, void* src, nuint srcSize) + { + uint* wksp = stackalloc uint[219]; + return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, wksp, sizeof(uint) * 219, 0); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint HUF_readStats_body(byte* huffWeight, nuint hwSize, uint* rankStats, uint* nbSymbolsPtr, uint* tableLogPtr, void* src, nuint srcSize, void* workSpace, nuint wkspSize, int bmi2) + { + uint weightTotal; + byte* ip = (byte*)src; + nuint iSize; + nuint oSize; + if (srcSize == 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + iSize = ip[0]; + if (iSize >= 128) + { + oSize = iSize - 127; + iSize = (oSize + 1) / 2; + if (iSize + 1 > srcSize) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + if (oSize >= hwSize) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + ip += 1; + { + uint n; + for (n = 0; n < oSize; n += 2) + { + huffWeight[n] = (byte)(ip[n / 2] >> 4); + huffWeight[n + 1] = (byte)(ip[n / 2] & 15); + } + } + } + else + { + if (iSize + 1 > srcSize) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + oSize = FSE_decompress_wksp_bmi2(huffWeight, hwSize - 1, ip + 1, iSize, 6, workSpace, wkspSize, bmi2); + if (FSE_isError(oSize)) + return oSize; + } + + memset(rankStats, 0, (12 + 1) * sizeof(uint)); + weightTotal = 0; + { + uint n; + for (n = 0; n < oSize; n++) + { + if (huffWeight[n] > 12) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + rankStats[huffWeight[n]]++; + weightTotal += (uint)(1 << huffWeight[n] >> 1); + } + } + + if (weightTotal == 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + { + uint tableLog = ZSTD_highbit32(weightTotal) + 1; + if (tableLog > 12) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + *tableLogPtr = tableLog; + { + uint total = (uint)(1 << (int)tableLog); + uint rest = total - weightTotal; + uint verif = (uint)(1 << (int)ZSTD_highbit32(rest)); + uint lastWeight = ZSTD_highbit32(rest) + 1; + if (verif != rest) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + huffWeight[oSize] = (byte)lastWeight; + rankStats[lastWeight]++; + } + } + + if (rankStats[1] < 2 || (rankStats[1] & 1) != 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + *nbSymbolsPtr = (uint)(oSize + 1); + return iSize + 1; + } + + /* Avoids the FORCE_INLINE of the _body() function. */ + private static nuint HUF_readStats_body_default(byte* huffWeight, nuint hwSize, uint* rankStats, uint* nbSymbolsPtr, uint* tableLogPtr, void* src, nuint srcSize, void* workSpace, nuint wkspSize) + { + return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 0); + } + + private static nuint HUF_readStats_wksp(byte* huffWeight, nuint hwSize, uint* rankStats, uint* nbSymbolsPtr, uint* tableLogPtr, void* src, nuint srcSize, void* workSpace, nuint wkspSize, int flags) + { + return HUF_readStats_body_default(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize); + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ErrorPrivate.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ErrorPrivate.cs new file mode 100644 index 000000000..a04be98f8 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ErrorPrivate.cs @@ -0,0 +1,111 @@ +using System.Runtime.CompilerServices; + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static bool ERR_isError(nuint code) + { + return code > unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxCode)); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ZSTD_ErrorCode ERR_getErrorCode(nuint code) + { + if (!ERR_isError(code)) + return 0; + return (ZSTD_ErrorCode)(0 - code); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static string ERR_getErrorName(nuint code) + { + return ERR_getErrorString(ERR_getErrorCode(code)); + } + + /*-**************************************** + * Error Strings + ******************************************/ + private static string ERR_getErrorString(ZSTD_ErrorCode code) + { + const string notErrorCode = "Unspecified error code"; + switch (code) + { + case ZSTD_ErrorCode.ZSTD_error_no_error: + return "No error detected"; + case ZSTD_ErrorCode.ZSTD_error_GENERIC: + return "Error (generic)"; + case ZSTD_ErrorCode.ZSTD_error_prefix_unknown: + return "Unknown frame descriptor"; + case ZSTD_ErrorCode.ZSTD_error_version_unsupported: + return "Version not supported"; + case ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported: + return "Unsupported frame parameter"; + case ZSTD_ErrorCode.ZSTD_error_frameParameter_windowTooLarge: + return "Frame requires too much memory for decoding"; + case ZSTD_ErrorCode.ZSTD_error_corruption_detected: + return "Data corruption detected"; + case ZSTD_ErrorCode.ZSTD_error_checksum_wrong: + return "Restored data doesn't match checksum"; + case ZSTD_ErrorCode.ZSTD_error_literals_headerWrong: + return "Header of Literals' block doesn't respect format specification"; + case ZSTD_ErrorCode.ZSTD_error_parameter_unsupported: + return "Unsupported parameter"; + case ZSTD_ErrorCode.ZSTD_error_parameter_combination_unsupported: + return "Unsupported combination of parameters"; + case ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound: + return "Parameter is out of bound"; + case ZSTD_ErrorCode.ZSTD_error_init_missing: + return "Context should be init first"; + case ZSTD_ErrorCode.ZSTD_error_memory_allocation: + return "Allocation error : not enough memory"; + case ZSTD_ErrorCode.ZSTD_error_workSpace_tooSmall: + return "workSpace buffer is not large enough"; + case ZSTD_ErrorCode.ZSTD_error_stage_wrong: + return "Operation not authorized at current processing stage"; + case ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge: + return "tableLog requires too much memory : unsupported"; + case ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooLarge: + return "Unsupported max Symbol Value : too large"; + case ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooSmall: + return "Specified maxSymbolValue is too small"; + case ZSTD_ErrorCode.ZSTD_error_cannotProduce_uncompressedBlock: + return "This mode cannot generate an uncompressed block"; + case ZSTD_ErrorCode.ZSTD_error_stabilityCondition_notRespected: + return "pledged buffer stability condition is not respected"; + case ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted: + return "Dictionary is corrupted"; + case ZSTD_ErrorCode.ZSTD_error_dictionary_wrong: + return "Dictionary mismatch"; + case ZSTD_ErrorCode.ZSTD_error_dictionaryCreation_failed: + return "Cannot create Dictionary from provided samples"; + case ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall: + return "Destination buffer is too small"; + case ZSTD_ErrorCode.ZSTD_error_srcSize_wrong: + return "Src size is incorrect"; + case ZSTD_ErrorCode.ZSTD_error_dstBuffer_null: + return "Operation on NULL destination buffer"; + case ZSTD_ErrorCode.ZSTD_error_noForwardProgress_destFull: + return "Operation made no progress over multiple calls, due to output buffer being full"; + case ZSTD_ErrorCode.ZSTD_error_noForwardProgress_inputEmpty: + return "Operation made no progress over multiple calls, due to input being empty"; + case ZSTD_ErrorCode.ZSTD_error_frameIndex_tooLarge: + return "Frame index is too large"; + case ZSTD_ErrorCode.ZSTD_error_seekableIO: + return "An I/O error occurred when reading/seeking"; + case ZSTD_ErrorCode.ZSTD_error_dstBuffer_wrong: + return "Destination buffer is wrong"; + case ZSTD_ErrorCode.ZSTD_error_srcBuffer_wrong: + return "Source buffer is wrong"; + case ZSTD_ErrorCode.ZSTD_error_sequenceProducer_failed: + return "Block-level external sequence producer returned an error code"; + case ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid: + return "External sequences are not valid"; + case ZSTD_ErrorCode.ZSTD_error_maxCode: + default: + return notErrorCode; + } + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/EstimatedBlockSize.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/EstimatedBlockSize.cs new file mode 100644 index 000000000..5e77ffd5d --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/EstimatedBlockSize.cs @@ -0,0 +1,8 @@ +namespace ZstdSharp.Unsafe +{ + public struct EstimatedBlockSize + { + public nuint estLitSize; + public nuint estBlockSize; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_accel_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_accel_t.cs new file mode 100644 index 000000000..73a307fbd --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_accel_t.cs @@ -0,0 +1,18 @@ +namespace ZstdSharp.Unsafe +{ + /*-************************************* + * Acceleration + ***************************************/ + public struct FASTCOVER_accel_t + { + /* Percentage of training samples used for ZDICT_finalizeDictionary */ + public uint finalize; + /* Number of dmer skipped between each dmer counted in computeFrequency */ + public uint skip; + public FASTCOVER_accel_t(uint finalize, uint skip) + { + this.finalize = finalize; + this.skip = skip; + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_ctx_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_ctx_t.cs new file mode 100644 index 000000000..718132baf --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_ctx_t.cs @@ -0,0 +1,20 @@ +namespace ZstdSharp.Unsafe +{ + /*-************************************* + * Context + ***************************************/ + public unsafe struct FASTCOVER_ctx_t + { + public byte* samples; + public nuint* offsets; + public nuint* samplesSizes; + public nuint nbSamples; + public nuint nbTrainSamples; + public nuint nbTestSamples; + public nuint nbDmers; + public uint* freqs; + public uint d; + public uint f; + public FASTCOVER_accel_t accelParams; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_tryParameters_data_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_tryParameters_data_s.cs new file mode 100644 index 000000000..c80a4625e --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_tryParameters_data_s.cs @@ -0,0 +1,13 @@ +namespace ZstdSharp.Unsafe +{ + /** + * Parameters for FASTCOVER_tryParameters(). + */ + public unsafe struct FASTCOVER_tryParameters_data_s + { + public FASTCOVER_ctx_t* ctx; + public COVER_best_s* best; + public nuint dictBufferCapacity; + public ZDICT_cover_params_t parameters; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FPStats.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FPStats.cs new file mode 100644 index 000000000..d97e84828 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FPStats.cs @@ -0,0 +1,8 @@ +namespace ZstdSharp.Unsafe +{ + public struct FPStats + { + public Fingerprint pastEvents; + public Fingerprint newEvents; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_CState_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_CState_t.cs new file mode 100644 index 000000000..a45fabd17 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_CState_t.cs @@ -0,0 +1,17 @@ +namespace ZstdSharp.Unsafe +{ + /* ***************************************** + * FSE symbol compression API + *******************************************/ + /*! + This API consists of small unitary functions, which highly benefit from being inlined. + Hence their body are included in next section. + */ + public unsafe struct FSE_CState_t + { + public nint value; + public void* stateTable; + public void* symbolTT; + public uint stateLog; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DState_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DState_t.cs new file mode 100644 index 000000000..14d967ab1 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DState_t.cs @@ -0,0 +1,12 @@ +namespace ZstdSharp.Unsafe +{ + /* ***************************************** + * FSE symbol decompression API + *******************************************/ + public unsafe struct FSE_DState_t + { + public nuint state; + /* precise table may vary, depending on U16 */ + public void* table; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DTableHeader.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DTableHeader.cs new file mode 100644 index 000000000..f4c638962 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DTableHeader.cs @@ -0,0 +1,9 @@ +namespace ZstdSharp.Unsafe +{ + /* ====== Decompression ====== */ + public struct FSE_DTableHeader + { + public ushort tableLog; + public ushort fastMode; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DecompressWksp.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DecompressWksp.cs new file mode 100644 index 000000000..1a3e02289 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DecompressWksp.cs @@ -0,0 +1,7 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct FSE_DecompressWksp + { + public fixed short ncount[256]; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_decode_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_decode_t.cs new file mode 100644 index 000000000..49f2ab6d8 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_decode_t.cs @@ -0,0 +1,9 @@ +namespace ZstdSharp.Unsafe +{ + public struct FSE_decode_t + { + public ushort newState; + public byte symbol; + public byte nbBits; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_repeat.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_repeat.cs new file mode 100644 index 000000000..157431722 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_repeat.cs @@ -0,0 +1,12 @@ +namespace ZstdSharp.Unsafe +{ + public enum FSE_repeat + { + /**< Cannot use the previous table */ + FSE_repeat_none, + /**< Can use the previous table but it must be checked */ + FSE_repeat_check, + /**< Can use the previous table and it is assumed to be valid */ + FSE_repeat_valid + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_symbolCompressionTransform.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_symbolCompressionTransform.cs new file mode 100644 index 000000000..5172daaaf --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_symbolCompressionTransform.cs @@ -0,0 +1,11 @@ +namespace ZstdSharp.Unsafe +{ + /* ***************************************** + * Implementation of inlined functions + *******************************************/ + public struct FSE_symbolCompressionTransform + { + public int deltaFindState; + public uint deltaNbBits; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Fastcover.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Fastcover.cs new file mode 100644 index 000000000..5d7753448 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Fastcover.cs @@ -0,0 +1,601 @@ +using static ZstdSharp.UnsafeHelper; + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + /*-************************************* + * Hash Functions + ***************************************/ + /** + * Hash the d-byte value pointed to by p and mod 2^f into the frequency vector + */ + private static nuint FASTCOVER_hashPtrToIndex(void* p, uint f, uint d) + { + if (d == 6) + { + return ZSTD_hash6Ptr(p, f); + } + + return ZSTD_hash8Ptr(p, f); + } + + private static readonly FASTCOVER_accel_t* FASTCOVER_defaultAccelParameters = GetArrayPointer(new FASTCOVER_accel_t[11] { new FASTCOVER_accel_t(finalize: 100, skip: 0), new FASTCOVER_accel_t(finalize: 100, skip: 0), new FASTCOVER_accel_t(finalize: 50, skip: 1), new FASTCOVER_accel_t(finalize: 34, skip: 2), new FASTCOVER_accel_t(finalize: 25, skip: 3), new FASTCOVER_accel_t(finalize: 20, skip: 4), new FASTCOVER_accel_t(finalize: 17, skip: 5), new FASTCOVER_accel_t(finalize: 14, skip: 6), new FASTCOVER_accel_t(finalize: 13, skip: 7), new FASTCOVER_accel_t(finalize: 11, skip: 8), new FASTCOVER_accel_t(finalize: 10, skip: 9) }); + /*-************************************* + * Helper functions + ***************************************/ + /** + * Selects the best segment in an epoch. + * Segments of are scored according to the function: + * + * Let F(d) be the frequency of all dmers with hash value d. + * Let S_i be hash value of the dmer at position i of segment S which has length k. + * + * Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1}) + * + * Once the dmer with hash value d is in the dictionary we set F(d) = 0. + */ + private static COVER_segment_t FASTCOVER_selectSegment(FASTCOVER_ctx_t* ctx, uint* freqs, uint begin, uint end, ZDICT_cover_params_t parameters, ushort* segmentFreqs) + { + /* Constants */ + uint k = parameters.k; + uint d = parameters.d; + uint f = ctx->f; + uint dmersInK = k - d + 1; + /* Try each segment (activeSegment) and save the best (bestSegment) */ + COVER_segment_t bestSegment = new COVER_segment_t + { + begin = 0, + end = 0, + score = 0 + }; + COVER_segment_t activeSegment; + activeSegment.begin = begin; + activeSegment.end = begin; + activeSegment.score = 0; + while (activeSegment.end < end) + { + /* Get hash value of current dmer */ + nuint idx = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.end, f, d); + if (segmentFreqs[idx] == 0) + { + activeSegment.score += freqs[idx]; + } + + activeSegment.end += 1; + segmentFreqs[idx] += 1; + if (activeSegment.end - activeSegment.begin == dmersInK + 1) + { + /* Get hash value of the dmer to be eliminated from active segment */ + nuint delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d); + segmentFreqs[delIndex] -= 1; + if (segmentFreqs[delIndex] == 0) + { + activeSegment.score -= freqs[delIndex]; + } + + activeSegment.begin += 1; + } + + if (activeSegment.score > bestSegment.score) + { + bestSegment = activeSegment; + } + } + + while (activeSegment.begin < end) + { + nuint delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d); + segmentFreqs[delIndex] -= 1; + activeSegment.begin += 1; + } + + { + /* Zero the frequency of hash value of each dmer covered by the chosen segment. */ + uint pos; + for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) + { + nuint i = FASTCOVER_hashPtrToIndex(ctx->samples + pos, f, d); + freqs[i] = 0; + } + } + + return bestSegment; + } + + private static int FASTCOVER_checkParameters(ZDICT_cover_params_t parameters, nuint maxDictSize, uint f, uint accel) + { + if (parameters.d == 0 || parameters.k == 0) + { + return 0; + } + + if (parameters.d != 6 && parameters.d != 8) + { + return 0; + } + + if (parameters.k > maxDictSize) + { + return 0; + } + + if (parameters.d > parameters.k) + { + return 0; + } + + if (f > 31 || f == 0) + { + return 0; + } + + if (parameters.splitPoint <= 0 || parameters.splitPoint > 1) + { + return 0; + } + + if (accel > 10 || accel == 0) + { + return 0; + } + + return 1; + } + + /** + * Clean up a context initialized with `FASTCOVER_ctx_init()`. + */ + private static void FASTCOVER_ctx_destroy(FASTCOVER_ctx_t* ctx) + { + if (ctx == null) + return; + free(ctx->freqs); + ctx->freqs = null; + free(ctx->offsets); + ctx->offsets = null; + } + + /** + * Calculate for frequency of hash value of each dmer in ctx->samples + */ + private static void FASTCOVER_computeFrequency(uint* freqs, FASTCOVER_ctx_t* ctx) + { + uint f = ctx->f; + uint d = ctx->d; + uint skip = ctx->accelParams.skip; + uint readLength = d > 8 ? d : 8; + nuint i; + assert(ctx->nbTrainSamples >= 5); + assert(ctx->nbTrainSamples <= ctx->nbSamples); + for (i = 0; i < ctx->nbTrainSamples; i++) + { + /* start of current dmer */ + nuint start = ctx->offsets[i]; + nuint currSampleEnd = ctx->offsets[i + 1]; + while (start + readLength <= currSampleEnd) + { + nuint dmerIndex = FASTCOVER_hashPtrToIndex(ctx->samples + start, f, d); + freqs[dmerIndex]++; + start = start + skip + 1; + } + } + } + + /** + * Prepare a context for dictionary building. + * The context is only dependent on the parameter `d` and can be used multiple + * times. + * Returns 0 on success or error code on error. + * The context must be destroyed with `FASTCOVER_ctx_destroy()`. + */ + private static nuint FASTCOVER_ctx_init(FASTCOVER_ctx_t* ctx, void* samplesBuffer, nuint* samplesSizes, uint nbSamples, uint d, double splitPoint, uint f, FASTCOVER_accel_t accelParams) + { + byte* samples = (byte*)samplesBuffer; + nuint totalSamplesSize = COVER_sum(samplesSizes, nbSamples); + /* Split samples into testing and training sets */ + uint nbTrainSamples = splitPoint < 1 ? (uint)(nbSamples * splitPoint) : nbSamples; + uint nbTestSamples = splitPoint < 1 ? nbSamples - nbTrainSamples : nbSamples; + nuint trainingSamplesSize = splitPoint < 1 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize; + nuint testSamplesSize = splitPoint < 1 ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize; + if (totalSamplesSize < (d > sizeof(ulong) ? d : sizeof(ulong)) || totalSamplesSize >= (sizeof(nuint) == 8 ? unchecked((uint)-1) : 1 * (1U << 30))) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + if (nbTrainSamples < 5) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + if (nbTestSamples < 1) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + *ctx = new FASTCOVER_ctx_t + { + samples = samples, + samplesSizes = samplesSizes, + nbSamples = nbSamples, + nbTrainSamples = nbTrainSamples, + nbTestSamples = nbTestSamples, + nbDmers = trainingSamplesSize - (d > sizeof(ulong) ? d : sizeof(ulong)) + 1, + d = d, + f = f, + accelParams = accelParams, + offsets = (nuint*)calloc(nbSamples + 1, (ulong)sizeof(nuint)) + }; + if (ctx->offsets == null) + { + FASTCOVER_ctx_destroy(ctx); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } + + { + uint i; + ctx->offsets[0] = 0; + assert(nbSamples >= 5); + for (i = 1; i <= nbSamples; ++i) + { + ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1]; + } + } + + ctx->freqs = (uint*)calloc((ulong)1 << (int)f, sizeof(uint)); + if (ctx->freqs == null) + { + FASTCOVER_ctx_destroy(ctx); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } + + FASTCOVER_computeFrequency(ctx->freqs, ctx); + return 0; + } + + /** + * Given the prepared context build the dictionary. + */ + private static nuint FASTCOVER_buildDictionary(FASTCOVER_ctx_t* ctx, uint* freqs, void* dictBuffer, nuint dictBufferCapacity, ZDICT_cover_params_t parameters, ushort* segmentFreqs) + { + byte* dict = (byte*)dictBuffer; + nuint tail = dictBufferCapacity; + /* Divide the data into epochs. We will select one segment from each epoch. */ + COVER_epoch_info_t epochs = COVER_computeEpochs((uint)dictBufferCapacity, (uint)ctx->nbDmers, parameters.k, 1); + const nuint maxZeroScoreRun = 10; + nuint zeroScoreRun = 0; + nuint epoch; + for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs.num) + { + uint epochBegin = (uint)(epoch * epochs.size); + uint epochEnd = epochBegin + epochs.size; + nuint segmentSize; + /* Select a segment */ + COVER_segment_t segment = FASTCOVER_selectSegment(ctx, freqs, epochBegin, epochEnd, parameters, segmentFreqs); + if (segment.score == 0) + { + if (++zeroScoreRun >= maxZeroScoreRun) + { + break; + } + + continue; + } + + zeroScoreRun = 0; + segmentSize = segment.end - segment.begin + parameters.d - 1 < tail ? segment.end - segment.begin + parameters.d - 1 : tail; + if (segmentSize < parameters.d) + { + break; + } + + tail -= segmentSize; + memcpy(dict + tail, ctx->samples + segment.begin, (uint)segmentSize); + } + + return tail; + } + + /** + * Tries a set of parameters and updates the COVER_best_t with the results. + * This function is thread safe if zstd is compiled with multithreaded support. + * It takes its parameters as an *OWNING* opaque pointer to support threading. + */ + private static void FASTCOVER_tryParameters(void* opaque) + { + /* Save parameters as local variables */ + FASTCOVER_tryParameters_data_s* data = (FASTCOVER_tryParameters_data_s*)opaque; + FASTCOVER_ctx_t* ctx = data->ctx; + ZDICT_cover_params_t parameters = data->parameters; + nuint dictBufferCapacity = data->dictBufferCapacity; + nuint totalCompressedSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + /* Initialize array to keep track of frequency of dmer within activeSegment */ + ushort* segmentFreqs = (ushort*)calloc((ulong)1 << (int)ctx->f, sizeof(ushort)); + /* Allocate space for hash table, dict, and freqs */ + byte* dict = (byte*)malloc(dictBufferCapacity); + COVER_dictSelection selection = COVER_dictSelectionError(unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC))); + uint* freqs = (uint*)malloc(((ulong)1 << (int)ctx->f) * sizeof(uint)); + if (segmentFreqs == null || dict == null || freqs == null) + { + goto _cleanup; + } + + memcpy(freqs, ctx->freqs, (uint)(((ulong)1 << (int)ctx->f) * sizeof(uint))); + { + nuint tail = FASTCOVER_buildDictionary(ctx, freqs, dict, dictBufferCapacity, parameters, segmentFreqs); + uint nbFinalizeSamples = (uint)(ctx->nbTrainSamples * ctx->accelParams.finalize / 100); + selection = COVER_selectDict(dict + tail, dictBufferCapacity, dictBufferCapacity - tail, ctx->samples, ctx->samplesSizes, nbFinalizeSamples, ctx->nbTrainSamples, ctx->nbSamples, parameters, ctx->offsets, totalCompressedSize); + if (COVER_dictSelectionIsError(selection) != 0) + { + goto _cleanup; + } + } + + _cleanup: + free(dict); + COVER_best_finish(data->best, parameters, selection); + free(data); + free(segmentFreqs); + COVER_dictSelectionFree(selection); + free(freqs); + } + + private static void FASTCOVER_convertToCoverParams(ZDICT_fastCover_params_t fastCoverParams, ZDICT_cover_params_t* coverParams) + { + coverParams->k = fastCoverParams.k; + coverParams->d = fastCoverParams.d; + coverParams->steps = fastCoverParams.steps; + coverParams->nbThreads = fastCoverParams.nbThreads; + coverParams->splitPoint = fastCoverParams.splitPoint; + coverParams->zParams = fastCoverParams.zParams; + coverParams->shrinkDict = fastCoverParams.shrinkDict; + } + + private static void FASTCOVER_convertToFastCoverParams(ZDICT_cover_params_t coverParams, ZDICT_fastCover_params_t* fastCoverParams, uint f, uint accel) + { + fastCoverParams->k = coverParams.k; + fastCoverParams->d = coverParams.d; + fastCoverParams->steps = coverParams.steps; + fastCoverParams->nbThreads = coverParams.nbThreads; + fastCoverParams->splitPoint = coverParams.splitPoint; + fastCoverParams->f = f; + fastCoverParams->accel = accel; + fastCoverParams->zParams = coverParams.zParams; + fastCoverParams->shrinkDict = coverParams.shrinkDict; + } + + /*! ZDICT_trainFromBuffer_fastCover(): + * Train a dictionary from an array of samples using a modified version of COVER algorithm. + * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, + * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. + * d and k are required. + * All other parameters are optional, will use default values if not provided + * The resulting dictionary will be saved into `dictBuffer`. + * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) + * or an error code, which can be tested with ZDICT_isError(). + * See ZDICT_trainFromBuffer() for details on failure modes. + * Note: ZDICT_trainFromBuffer_fastCover() requires 6 * 2^f bytes of memory. + * Tips: In general, a reasonable dictionary has a size of ~ 100 KB. + * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`. + * In general, it's recommended to provide a few thousands samples, though this can vary a lot. + * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. + */ + public static nuint ZDICT_trainFromBuffer_fastCover(void* dictBuffer, nuint dictBufferCapacity, void* samplesBuffer, nuint* samplesSizes, uint nbSamples, ZDICT_fastCover_params_t parameters) + { + byte* dict = (byte*)dictBuffer; + FASTCOVER_ctx_t ctx; + ZDICT_cover_params_t coverParams; + FASTCOVER_accel_t accelParams; + g_displayLevel = (int)parameters.zParams.notificationLevel; + parameters.splitPoint = 1; + parameters.f = parameters.f == 0 ? 20 : parameters.f; + parameters.accel = parameters.accel == 0 ? 1 : parameters.accel; + coverParams = new ZDICT_cover_params_t(); + FASTCOVER_convertToCoverParams(parameters, &coverParams); + if (FASTCOVER_checkParameters(coverParams, dictBufferCapacity, parameters.f, parameters.accel) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + if (nbSamples == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + if (dictBufferCapacity < 256) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + accelParams = FASTCOVER_defaultAccelParameters[parameters.accel]; + { + nuint initVal = FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, coverParams.d, parameters.splitPoint, parameters.f, accelParams); + if (ERR_isError(initVal)) + { + return initVal; + } + } + + COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, g_displayLevel); + { + /* Initialize array to keep track of frequency of dmer within activeSegment */ + ushort* segmentFreqs = (ushort*)calloc((ulong)1 << (int)parameters.f, sizeof(ushort)); + nuint tail = FASTCOVER_buildDictionary(&ctx, ctx.freqs, dictBuffer, dictBufferCapacity, coverParams, segmentFreqs); + uint nbFinalizeSamples = (uint)(ctx.nbTrainSamples * ctx.accelParams.finalize / 100); + nuint dictionarySize = ZDICT_finalizeDictionary(dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail, samplesBuffer, samplesSizes, nbFinalizeSamples, coverParams.zParams); + if (!ERR_isError(dictionarySize)) + { + } + + FASTCOVER_ctx_destroy(&ctx); + free(segmentFreqs); + return dictionarySize; + } + } + + /*! ZDICT_optimizeTrainFromBuffer_fastCover(): + * The same requirements as above hold for all the parameters except `parameters`. + * This function tries many parameter combinations (specifically, k and d combinations) + * and picks the best parameters. `*parameters` is filled with the best parameters found, + * dictionary constructed with those parameters is stored in `dictBuffer`. + * All of the parameters d, k, steps, f, and accel are optional. + * If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}. + * if steps is zero it defaults to its default value. + * If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000]. + * If f is zero, default value of 20 is used. + * If accel is zero, default value of 1 is used. + * + * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) + * or an error code, which can be tested with ZDICT_isError(). + * On success `*parameters` contains the parameters selected. + * See ZDICT_trainFromBuffer() for details on failure modes. + * Note: ZDICT_optimizeTrainFromBuffer_fastCover() requires about 6 * 2^f bytes of memory for each thread. + */ + public static nuint ZDICT_optimizeTrainFromBuffer_fastCover(void* dictBuffer, nuint dictBufferCapacity, void* samplesBuffer, nuint* samplesSizes, uint nbSamples, ZDICT_fastCover_params_t* parameters) + { + ZDICT_cover_params_t coverParams; + FASTCOVER_accel_t accelParams; + /* constants */ + uint nbThreads = parameters->nbThreads; + double splitPoint = parameters->splitPoint <= 0 ? 0.75 : parameters->splitPoint; + uint kMinD = parameters->d == 0 ? 6 : parameters->d; + uint kMaxD = parameters->d == 0 ? 8 : parameters->d; + uint kMinK = parameters->k == 0 ? 50 : parameters->k; + uint kMaxK = parameters->k == 0 ? 2000 : parameters->k; + uint kSteps = parameters->steps == 0 ? 40 : parameters->steps; + uint kStepSize = (kMaxK - kMinK) / kSteps > 1 ? (kMaxK - kMinK) / kSteps : 1; + uint kIterations = (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize); + uint f = parameters->f == 0 ? 20 : parameters->f; + uint accel = parameters->accel == 0 ? 1 : parameters->accel; + const uint shrinkDict = 0; + /* Local variables */ + int displayLevel = (int)parameters->zParams.notificationLevel; + uint iteration = 1; + uint d; + uint k; + COVER_best_s best; + void* pool = null; + int warned = 0; + if (splitPoint <= 0 || splitPoint > 1) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + if (accel == 0 || accel > 10) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + if (kMinK < kMaxD || kMaxK < kMinK) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + if (nbSamples == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + if (dictBufferCapacity < 256) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + if (nbThreads > 1) + { + pool = POOL_create(nbThreads, 1); + if (pool == null) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } + } + + COVER_best_init(&best); + coverParams = new ZDICT_cover_params_t(); + FASTCOVER_convertToCoverParams(*parameters, &coverParams); + accelParams = FASTCOVER_defaultAccelParameters[accel]; + g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1; + for (d = kMinD; d <= kMaxD; d += 2) + { + /* Initialize the context for this value of d */ + FASTCOVER_ctx_t ctx; + { + nuint initVal = FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint, f, accelParams); + if (ERR_isError(initVal)) + { + COVER_best_destroy(&best); + POOL_free(pool); + return initVal; + } + } + + if (warned == 0) + { + COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, displayLevel); + warned = 1; + } + + for (k = kMinK; k <= kMaxK; k += kStepSize) + { + /* Prepare the arguments */ + FASTCOVER_tryParameters_data_s* data = (FASTCOVER_tryParameters_data_s*)malloc((ulong)sizeof(FASTCOVER_tryParameters_data_s)); + if (data == null) + { + COVER_best_destroy(&best); + FASTCOVER_ctx_destroy(&ctx); + POOL_free(pool); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } + + data->ctx = &ctx; + data->best = &best; + data->dictBufferCapacity = dictBufferCapacity; + data->parameters = coverParams; + data->parameters.k = k; + data->parameters.d = d; + data->parameters.splitPoint = splitPoint; + data->parameters.steps = kSteps; + data->parameters.shrinkDict = shrinkDict; + data->parameters.zParams.notificationLevel = (uint)g_displayLevel; + if (FASTCOVER_checkParameters(data->parameters, dictBufferCapacity, data->ctx->f, accel) == 0) + { + free(data); + continue; + } + + COVER_best_start(&best); + if (pool != null) + { + POOL_add(pool, (delegate* managed)(&FASTCOVER_tryParameters), data); + } + else + { + FASTCOVER_tryParameters(data); + } + + ++iteration; + } + + COVER_best_wait(&best); + FASTCOVER_ctx_destroy(&ctx); + } + + { + nuint dictSize = best.dictSize; + if (ERR_isError(best.compressedSize)) + { + nuint compressedSize = best.compressedSize; + COVER_best_destroy(&best); + POOL_free(pool); + return compressedSize; + } + + FASTCOVER_convertToFastCoverParams(best.parameters, parameters, f, accel); + memcpy(dictBuffer, best.dict, (uint)dictSize); + COVER_best_destroy(&best); + POOL_free(pool); + return dictSize; + } + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Fingerprint.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Fingerprint.cs new file mode 100644 index 000000000..6ef7760f7 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Fingerprint.cs @@ -0,0 +1,8 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct Fingerprint + { + public fixed uint events[1024]; + public nuint nbEvents; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Fse.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Fse.cs new file mode 100644 index 000000000..dc2174b90 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Fse.cs @@ -0,0 +1,157 @@ +using System.Runtime.CompilerServices; +using static ZstdSharp.UnsafeHelper; + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void FSE_initCState(FSE_CState_t* statePtr, uint* ct) + { + void* ptr = ct; + ushort* u16ptr = (ushort*)ptr; + uint tableLog = MEM_read16(ptr); + statePtr->value = (nint)1 << (int)tableLog; + statePtr->stateTable = u16ptr + 2; + statePtr->symbolTT = ct + 1 + (tableLog != 0 ? 1 << (int)(tableLog - 1) : 1); + statePtr->stateLog = tableLog; + } + + /*! FSE_initCState2() : + * Same as FSE_initCState(), but the first symbol to include (which will be the last to be read) + * uses the smallest state value possible, saving the cost of this symbol */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void FSE_initCState2(ref FSE_CState_t statePtr, uint* ct, uint symbol) + { + FSE_initCState(ref statePtr, ct); + { + FSE_symbolCompressionTransform symbolTT = ((FSE_symbolCompressionTransform*)statePtr.symbolTT)[symbol]; + ushort* stateTable = (ushort*)statePtr.stateTable; + uint nbBitsOut = symbolTT.deltaNbBits + (1 << 15) >> 16; + statePtr.value = (nint)((nbBitsOut << 16) - symbolTT.deltaNbBits); + statePtr.value = stateTable[(statePtr.value >> (int)nbBitsOut) + symbolTT.deltaFindState]; + } + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void FSE_encodeSymbol(ref nuint bitC_bitContainer, ref uint bitC_bitPos, ref FSE_CState_t statePtr, uint symbol) + { + FSE_symbolCompressionTransform symbolTT = ((FSE_symbolCompressionTransform*)statePtr.symbolTT)[symbol]; + ushort* stateTable = (ushort*)statePtr.stateTable; + uint nbBitsOut = (uint)statePtr.value + symbolTT.deltaNbBits >> 16; + BIT_addBits(ref bitC_bitContainer, ref bitC_bitPos, (nuint)statePtr.value, nbBitsOut); + statePtr.value = stateTable[(statePtr.value >> (int)nbBitsOut) + symbolTT.deltaFindState]; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void FSE_flushCState(ref nuint bitC_bitContainer, ref uint bitC_bitPos, ref sbyte* bitC_ptr, sbyte* bitC_endPtr, ref FSE_CState_t statePtr) + { + BIT_addBits(ref bitC_bitContainer, ref bitC_bitPos, (nuint)statePtr.value, statePtr.stateLog); + BIT_flushBits(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr); + } + + /* FSE_getMaxNbBits() : + * Approximate maximum cost of a symbol, in bits. + * Fractional get rounded up (i.e. a symbol with a normalized frequency of 3 gives the same result as a frequency of 2) + * note 1 : assume symbolValue is valid (<= maxSymbolValue) + * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint FSE_getMaxNbBits(void* symbolTTPtr, uint symbolValue) + { + FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*)symbolTTPtr; + return symbolTT[symbolValue].deltaNbBits + ((1 << 16) - 1) >> 16; + } + + /* FSE_bitCost() : + * Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) + * note 1 : assume symbolValue is valid (<= maxSymbolValue) + * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint FSE_bitCost(void* symbolTTPtr, uint tableLog, uint symbolValue, uint accuracyLog) + { + FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*)symbolTTPtr; + uint minNbBits = symbolTT[symbolValue].deltaNbBits >> 16; + uint threshold = minNbBits + 1 << 16; + assert(tableLog < 16); + assert(accuracyLog < 31 - tableLog); + { + uint tableSize = (uint)(1 << (int)tableLog); + uint deltaFromThreshold = threshold - (symbolTT[symbolValue].deltaNbBits + tableSize); + /* linear interpolation (very approximate) */ + uint normalizedDeltaFromThreshold = deltaFromThreshold << (int)accuracyLog >> (int)tableLog; + uint bitMultiplier = (uint)(1 << (int)accuracyLog); + assert(symbolTT[symbolValue].deltaNbBits + tableSize <= threshold); + assert(normalizedDeltaFromThreshold <= bitMultiplier); + return (minNbBits + 1) * bitMultiplier - normalizedDeltaFromThreshold; + } + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void FSE_initDState(ref FSE_DState_t DStatePtr, ref BIT_DStream_t bitD, uint* dt) + { + void* ptr = dt; + FSE_DTableHeader* DTableH = (FSE_DTableHeader*)ptr; + DStatePtr.state = BIT_readBits(bitD.bitContainer, ref bitD.bitsConsumed, DTableH->tableLog); + BIT_reloadDStream(ref bitD.bitContainer, ref bitD.bitsConsumed, ref bitD.ptr, bitD.start, bitD.limitPtr); + DStatePtr.table = dt + 1; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static byte FSE_peekSymbol(FSE_DState_t* DStatePtr) + { + FSE_decode_t DInfo = ((FSE_decode_t*)DStatePtr->table)[DStatePtr->state]; + return DInfo.symbol; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void FSE_updateState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) + { + FSE_decode_t DInfo = ((FSE_decode_t*)DStatePtr->table)[DStatePtr->state]; + uint nbBits = DInfo.nbBits; + nuint lowBits = BIT_readBits(bitD, nbBits); + DStatePtr->state = DInfo.newState + lowBits; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static byte FSE_decodeSymbol(ref FSE_DState_t DStatePtr, nuint bitD_bitContainer, ref uint bitD_bitsConsumed) + { + FSE_decode_t DInfo = ((FSE_decode_t*)DStatePtr.table)[DStatePtr.state]; + uint nbBits = DInfo.nbBits; + byte symbol = DInfo.symbol; + nuint lowBits = BIT_readBits(bitD_bitContainer, ref bitD_bitsConsumed, nbBits); + DStatePtr.state = DInfo.newState + lowBits; + return symbol; + } + + /*! FSE_decodeSymbolFast() : + unsafe, only works if no symbol has a probability > 50% */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static byte FSE_decodeSymbolFast(ref FSE_DState_t DStatePtr, nuint bitD_bitContainer, ref uint bitD_bitsConsumed) + { + FSE_decode_t DInfo = ((FSE_decode_t*)DStatePtr.table)[DStatePtr.state]; + uint nbBits = DInfo.nbBits; + byte symbol = DInfo.symbol; + nuint lowBits = BIT_readBitsFast(bitD_bitContainer, ref bitD_bitsConsumed, nbBits); + DStatePtr.state = DInfo.newState + lowBits; + return symbol; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint FSE_endOfDState(FSE_DState_t* DStatePtr) + { + return DStatePtr->state == 0 ? 1U : 0U; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void FSE_initCState(ref FSE_CState_t statePtr, uint* ct) + { + void* ptr = ct; + ushort* u16ptr = (ushort*)ptr; + uint tableLog = MEM_read16(ptr); + statePtr.value = (nint)1 << (int)tableLog; + statePtr.stateTable = u16ptr + 2; + statePtr.symbolTT = ct + 1 + (tableLog != 0 ? 1 << (int)(tableLog - 1) : 1); + statePtr.stateLog = tableLog; + } + } +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FseCompress.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FseCompress.cs new file mode 100644 index 000000000..a218439f1 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FseCompress.cs @@ -0,0 +1,660 @@ +using static ZstdSharp.UnsafeHelper; +using System; +using System.Runtime.InteropServices; + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + /* FSE_buildCTable_wksp() : + * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`). + * wkspSize should be sized to handle worst case situation, which is `1<> 1 : 1); + FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*)FSCT; + uint step = (tableSize >> 1) + (tableSize >> 3) + 3; + uint maxSV1 = maxSymbolValue + 1; + /* size = maxSV1 */ + ushort* cumul = (ushort*)workSpace; + /* size = tableSize */ + byte* tableSymbol = (byte*)(cumul + (maxSV1 + 1)); + uint highThreshold = tableSize - 1; + assert(((nuint)workSpace & 1) == 0); + if (sizeof(uint) * ((maxSymbolValue + 2 + (1UL << (int)tableLog)) / 2 + sizeof(ulong) / sizeof(uint)) > wkspSize) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); + tableU16[-2] = (ushort)tableLog; + tableU16[-1] = (ushort)maxSymbolValue; + assert(tableLog < 16); + { + uint u; + cumul[0] = 0; + for (u = 1; u <= maxSV1; u++) + { + if (normalizedCounter[u - 1] == -1) + { + cumul[u] = (ushort)(cumul[u - 1] + 1); + tableSymbol[highThreshold--] = (byte)(u - 1); + } + else + { + assert(normalizedCounter[u - 1] >= 0); + cumul[u] = (ushort)(cumul[u - 1] + (ushort)normalizedCounter[u - 1]); + assert(cumul[u] >= cumul[u - 1]); + } + } + + cumul[maxSV1] = (ushort)(tableSize + 1); + } + + if (highThreshold == tableSize - 1) + { + /* size = tableSize + 8 (may write beyond tableSize) */ + byte* spread = tableSymbol + tableSize; + { + const ulong add = 0x0101010101010101UL; + nuint pos = 0; + ulong sv = 0; + uint s; + for (s = 0; s < maxSV1; ++s, sv += add) + { + int i; + int n = normalizedCounter[s]; + MEM_write64(spread + pos, sv); + for (i = 8; i < n; i += 8) + { + MEM_write64(spread + pos + i, sv); + } + + assert(n >= 0); + pos += (nuint)n; + } + } + + { + nuint position = 0; + nuint s; + /* Experimentally determined optimal unroll */ + const nuint unroll = 2; + assert(tableSize % unroll == 0); + for (s = 0; s < tableSize; s += unroll) + { + nuint u; + for (u = 0; u < unroll; ++u) + { + nuint uPosition = position + u * step & tableMask; + tableSymbol[uPosition] = spread[s + u]; + } + + position = position + unroll * step & tableMask; + } + + assert(position == 0); + } + } + else + { + uint position = 0; + uint symbol; + for (symbol = 0; symbol < maxSV1; symbol++) + { + int nbOccurrences; + int freq = normalizedCounter[symbol]; + for (nbOccurrences = 0; nbOccurrences < freq; nbOccurrences++) + { + tableSymbol[position] = (byte)symbol; + position = position + step & tableMask; + while (position > highThreshold) + position = position + step & tableMask; + } + } + + assert(position == 0); + } + + { + uint u; + for (u = 0; u < tableSize; u++) + { + /* note : static analyzer may not understand tableSymbol is properly initialized */ + byte s = tableSymbol[u]; + tableU16[cumul[s]++] = (ushort)(tableSize + u); + } + } + + { + uint total = 0; + uint s; + for (s = 0; s <= maxSymbolValue; s++) + { + switch (normalizedCounter[s]) + { + case 0: + symbolTT[s].deltaNbBits = (tableLog + 1 << 16) - (uint)(1 << (int)tableLog); + break; + case -1: + case 1: + symbolTT[s].deltaNbBits = (tableLog << 16) - (uint)(1 << (int)tableLog); + assert(total <= 2147483647); + symbolTT[s].deltaFindState = (int)(total - 1); + total++; + break; + default: + assert(normalizedCounter[s] > 1); + { + uint maxBitsOut = tableLog - ZSTD_highbit32((uint)normalizedCounter[s] - 1); + uint minStatePlus = (uint)normalizedCounter[s] << (int)maxBitsOut; + symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus; + symbolTT[s].deltaFindState = (int)(total - (uint)normalizedCounter[s]); + total += (uint)normalizedCounter[s]; + } + + break; + } + } + } + + return 0; + } + + /*-************************************************************** + * FSE NCount encoding + ****************************************************************/ + private static nuint FSE_NCountWriteBound(uint maxSymbolValue, uint tableLog) + { + nuint maxHeaderSize = ((maxSymbolValue + 1) * tableLog + 4 + 2) / 8 + 1 + 2; + return maxSymbolValue != 0 ? maxHeaderSize : 512; + } + + private static nuint FSE_writeNCount_generic(void* header, nuint headerBufferSize, short* normalizedCounter, uint maxSymbolValue, uint tableLog, uint writeIsSafe) + { + byte* ostart = (byte*)header; + byte* @out = ostart; + byte* oend = ostart + headerBufferSize; + int nbBits; + int tableSize = 1 << (int)tableLog; + int remaining; + int threshold; + uint bitStream = 0; + int bitCount = 0; + uint symbol = 0; + uint alphabetSize = maxSymbolValue + 1; + int previousIs0 = 0; + bitStream += tableLog - 5 << bitCount; + bitCount += 4; + remaining = tableSize + 1; + threshold = tableSize; + nbBits = (int)tableLog + 1; + while (symbol < alphabetSize && remaining > 1) + { + if (previousIs0 != 0) + { + uint start = symbol; + while (symbol < alphabetSize && normalizedCounter[symbol] == 0) + symbol++; + if (symbol == alphabetSize) + break; + while (symbol >= start + 24) + { + start += 24; + bitStream += 0xFFFFU << bitCount; + if (writeIsSafe == 0 && @out > oend - 2) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + @out[0] = (byte)bitStream; + @out[1] = (byte)(bitStream >> 8); + @out += 2; + bitStream >>= 16; + } + + while (symbol >= start + 3) + { + start += 3; + bitStream += 3U << bitCount; + bitCount += 2; + } + + bitStream += symbol - start << bitCount; + bitCount += 2; + if (bitCount > 16) + { + if (writeIsSafe == 0 && @out > oend - 2) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + @out[0] = (byte)bitStream; + @out[1] = (byte)(bitStream >> 8); + @out += 2; + bitStream >>= 16; + bitCount -= 16; + } + } + + { + int count = normalizedCounter[symbol++]; + int max = 2 * threshold - 1 - remaining; + remaining -= count < 0 ? -count : count; + count++; + if (count >= threshold) + count += max; + bitStream += (uint)count << bitCount; + bitCount += nbBits; + bitCount -= count < max ? 1 : 0; + previousIs0 = count == 1 ? 1 : 0; + if (remaining < 1) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + while (remaining < threshold) + { + nbBits--; + threshold >>= 1; + } + } + + if (bitCount > 16) + { + if (writeIsSafe == 0 && @out > oend - 2) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + @out[0] = (byte)bitStream; + @out[1] = (byte)(bitStream >> 8); + @out += 2; + bitStream >>= 16; + bitCount -= 16; + } + } + + if (remaining != 1) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + assert(symbol <= alphabetSize); + if (writeIsSafe == 0 && @out > oend - 2) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + @out[0] = (byte)bitStream; + @out[1] = (byte)(bitStream >> 8); + @out += (bitCount + 7) / 8; + assert(@out >= ostart); + return (nuint)(@out - ostart); + } + + /*! FSE_writeNCount(): + Compactly save 'normalizedCounter' into 'buffer'. + @return : size of the compressed table, + or an errorCode, which can be tested using FSE_isError(). */ + private static nuint FSE_writeNCount(void* buffer, nuint bufferSize, short* normalizedCounter, uint maxSymbolValue, uint tableLog) + { + if (tableLog > 14 - 2) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); + if (tableLog < 5) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog)) + return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0); + return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1); + } + + /* provides the minimum logSize to safely represent a distribution */ + private static uint FSE_minTableLog(nuint srcSize, uint maxSymbolValue) + { + uint minBitsSrc = ZSTD_highbit32((uint)srcSize) + 1; + uint minBitsSymbols = ZSTD_highbit32(maxSymbolValue) + 2; + uint minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols; + assert(srcSize > 1); + return minBits; + } + + /* ***************************************** + * FSE advanced API + ***************************************** */ + private static uint FSE_optimalTableLog_internal(uint maxTableLog, nuint srcSize, uint maxSymbolValue, uint minus) + { + uint maxBitsSrc = ZSTD_highbit32((uint)(srcSize - 1)) - minus; + uint tableLog = maxTableLog; + uint minBits = FSE_minTableLog(srcSize, maxSymbolValue); + assert(srcSize > 1); + if (tableLog == 0) + tableLog = 13 - 2; + if (maxBitsSrc < tableLog) + tableLog = maxBitsSrc; + if (minBits > tableLog) + tableLog = minBits; + if (tableLog < 5) + tableLog = 5; + if (tableLog > 14 - 2) + tableLog = 14 - 2; + return tableLog; + } + + /*! FSE_optimalTableLog(): + dynamically downsize 'tableLog' when conditions are met. + It saves CPU time, by using smaller tables, while preserving or even improving compression ratio. + @return : recommended tableLog (necessarily <= 'maxTableLog') */ + private static uint FSE_optimalTableLog(uint maxTableLog, nuint srcSize, uint maxSymbolValue) + { + return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2); + } + + /* Secondary normalization method. + To be used when primary method fails. */ + private static nuint FSE_normalizeM2(short* norm, uint tableLog, uint* count, nuint total, uint maxSymbolValue, short lowProbCount) + { + const short NOT_YET_ASSIGNED = -2; + uint s; + uint distributed = 0; + uint ToDistribute; + /* Init */ + uint lowThreshold = (uint)(total >> (int)tableLog); + uint lowOne = (uint)(total * 3 >> (int)(tableLog + 1)); + for (s = 0; s <= maxSymbolValue; s++) + { + if (count[s] == 0) + { + norm[s] = 0; + continue; + } + + if (count[s] <= lowThreshold) + { + norm[s] = lowProbCount; + distributed++; + total -= count[s]; + continue; + } + + if (count[s] <= lowOne) + { + norm[s] = 1; + distributed++; + total -= count[s]; + continue; + } + + norm[s] = NOT_YET_ASSIGNED; + } + + ToDistribute = (uint)(1 << (int)tableLog) - distributed; + if (ToDistribute == 0) + return 0; + if (total / ToDistribute > lowOne) + { + lowOne = (uint)(total * 3 / (ToDistribute * 2)); + for (s = 0; s <= maxSymbolValue; s++) + { + if (norm[s] == NOT_YET_ASSIGNED && count[s] <= lowOne) + { + norm[s] = 1; + distributed++; + total -= count[s]; + continue; + } + } + + ToDistribute = (uint)(1 << (int)tableLog) - distributed; + } + + if (distributed == maxSymbolValue + 1) + { + /* all values are pretty poor; + probably incompressible data (should have already been detected); + find max, then give all remaining points to max */ + uint maxV = 0, maxC = 0; + for (s = 0; s <= maxSymbolValue; s++) + if (count[s] > maxC) + { + maxV = s; + maxC = count[s]; + } + + norm[maxV] += (short)ToDistribute; + return 0; + } + + if (total == 0) + { + for (s = 0; ToDistribute > 0; s = (s + 1) % (maxSymbolValue + 1)) + if (norm[s] > 0) + { + ToDistribute--; + norm[s]++; + } + + return 0; + } + + { + ulong vStepLog = 62 - tableLog; + ulong mid = (1UL << (int)(vStepLog - 1)) - 1; + /* scale on remaining */ + ulong rStep = (((ulong)1 << (int)vStepLog) * ToDistribute + mid) / (uint)total; + ulong tmpTotal = mid; + for (s = 0; s <= maxSymbolValue; s++) + { + if (norm[s] == NOT_YET_ASSIGNED) + { + ulong end = tmpTotal + count[s] * rStep; + uint sStart = (uint)(tmpTotal >> (int)vStepLog); + uint sEnd = (uint)(end >> (int)vStepLog); + uint weight = sEnd - sStart; + if (weight < 1) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + norm[s] = (short)weight; + tmpTotal = end; + } + } + } + + return 0; + } + +#if NET7_0_OR_GREATER + private static ReadOnlySpan Span_rtbTable => new uint[8] + { + 0, + 473195, + 504333, + 520860, + 550000, + 700000, + 750000, + 830000 + }; + private static uint* rtbTable => (uint*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_rtbTable)); +#else + + private static readonly uint* rtbTable = GetArrayPointer(new uint[8] { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 }); +#endif + /*! FSE_normalizeCount(): + normalize counts so that sum(count[]) == Power_of_2 (2^tableLog) + 'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1). + useLowProbCount is a boolean parameter which trades off compressed size for + faster header decoding. When it is set to 1, the compressed data will be slightly + smaller. And when it is set to 0, FSE_readNCount() and FSE_buildDTable() will be + faster. If you are compressing a small amount of data (< 2 KB) then useLowProbCount=0 + is a good default, since header deserialization makes a big speed difference. + Otherwise, useLowProbCount=1 is a good default, since the speed difference is small. + @return : tableLog, + or an errorCode, which can be tested using FSE_isError() */ + private static nuint FSE_normalizeCount(short* normalizedCounter, uint tableLog, uint* count, nuint total, uint maxSymbolValue, uint useLowProbCount) + { + if (tableLog == 0) + tableLog = 13 - 2; + if (tableLog < 5) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + if (tableLog > 14 - 2) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); + if (tableLog < FSE_minTableLog(total, maxSymbolValue)) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + { + short lowProbCount = (short)(useLowProbCount != 0 ? -1 : 1); + ulong scale = 62 - tableLog; + /* <== here, one division ! */ + ulong step = ((ulong)1 << 62) / (uint)total; + ulong vStep = 1UL << (int)(scale - 20); + int stillToDistribute = 1 << (int)tableLog; + uint s; + uint largest = 0; + short largestP = 0; + uint lowThreshold = (uint)(total >> (int)tableLog); + for (s = 0; s <= maxSymbolValue; s++) + { + if (count[s] == total) + return 0; + if (count[s] == 0) + { + normalizedCounter[s] = 0; + continue; + } + + if (count[s] <= lowThreshold) + { + normalizedCounter[s] = lowProbCount; + stillToDistribute--; + } + else + { + short proba = (short)(count[s] * step >> (int)scale); + if (proba < 8) + { + ulong restToBeat = vStep * rtbTable[proba]; + proba += (short)(count[s] * step - ((ulong)proba << (int)scale) > restToBeat ? 1 : 0); + } + + if (proba > largestP) + { + largestP = proba; + largest = s; + } + + normalizedCounter[s] = proba; + stillToDistribute -= proba; + } + } + + if (-stillToDistribute >= normalizedCounter[largest] >> 1) + { + /* corner case, need another normalization method */ + nuint errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue, lowProbCount); + if (ERR_isError(errorCode)) + return errorCode; + } + else + normalizedCounter[largest] += (short)stillToDistribute; + } + + return tableLog; + } + + /* fake FSE_CTable, for rle input (always same symbol) */ + private static nuint FSE_buildCTable_rle(uint* ct, byte symbolValue) + { + void* ptr = ct; + ushort* tableU16 = (ushort*)ptr + 2; + void* FSCTptr = (uint*)ptr + 2; + FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*)FSCTptr; + tableU16[-2] = 0; + tableU16[-1] = symbolValue; + tableU16[0] = 0; + tableU16[1] = 0; + symbolTT[symbolValue].deltaNbBits = 0; + symbolTT[symbolValue].deltaFindState = 0; + return 0; + } + + private static nuint FSE_compress_usingCTable_generic(void* dst, nuint dstSize, void* src, nuint srcSize, uint* ct, uint fast) + { + byte* istart = (byte*)src; + byte* iend = istart + srcSize; + byte* ip = iend; + BIT_CStream_t bitC; + System.Runtime.CompilerServices.Unsafe.SkipInit(out bitC); + FSE_CState_t CState1, CState2; + System.Runtime.CompilerServices.Unsafe.SkipInit(out CState1); + System.Runtime.CompilerServices.Unsafe.SkipInit(out CState2); + if (srcSize <= 2) + return 0; + { + nuint initError = BIT_initCStream(ref bitC, dst, dstSize); + if (ERR_isError(initError)) + return 0; + } + + nuint bitC_bitContainer = bitC.bitContainer; + uint bitC_bitPos = bitC.bitPos; + sbyte* bitC_ptr = bitC.ptr; + sbyte* bitC_endPtr = bitC.endPtr; + if ((srcSize & 1) != 0) + { + FSE_initCState2(ref CState1, ct, *--ip); + FSE_initCState2(ref CState2, ct, *--ip); + FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState1, *--ip); + if (fast != 0) + BIT_flushBitsFast(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr); + else + BIT_flushBits(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr); + } + else + { + FSE_initCState2(ref CState2, ct, *--ip); + FSE_initCState2(ref CState1, ct, *--ip); + } + + srcSize -= 2; + if (sizeof(nuint) * 8 > (14 - 2) * 4 + 7 && (srcSize & 2) != 0) + { + FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState2, *--ip); + FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState1, *--ip); + if (fast != 0) + BIT_flushBitsFast(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr); + else + BIT_flushBits(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr); + } + + while (ip > istart) + { + FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState2, *--ip); + if (sizeof(nuint) * 8 < (14 - 2) * 2 + 7) + if (fast != 0) + BIT_flushBitsFast(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr); + else + BIT_flushBits(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr); + FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState1, *--ip); + if (sizeof(nuint) * 8 > (14 - 2) * 4 + 7) + { + FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState2, *--ip); + FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState1, *--ip); + } + + if (fast != 0) + BIT_flushBitsFast(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr); + else + BIT_flushBits(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr); + } + + FSE_flushCState(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr, ref CState2); + FSE_flushCState(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr, ref CState1); + return BIT_closeCStream(ref bitC_bitContainer, ref bitC_bitPos, bitC_ptr, bitC_endPtr, bitC.startPtr); + } + + /*! FSE_compress_usingCTable(): + Compress `src` using `ct` into `dst` which must be already allocated. + @return : size of compressed data (<= `dstCapacity`), + or 0 if compressed data could not fit into `dst`, + or an errorCode, which can be tested using FSE_isError() */ + private static nuint FSE_compress_usingCTable(void* dst, nuint dstSize, void* src, nuint srcSize, uint* ct) + { + uint fast = dstSize >= srcSize + (srcSize >> 7) + 4 + (nuint)sizeof(nuint) ? 1U : 0U; + if (fast != 0) + return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1); + else + return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0); + } + + /*-***************************************** + * Tool functions + ******************************************/ + private static nuint FSE_compressBound(nuint size) + { + return 512 + (size + (size >> 7) + 4 + (nuint)sizeof(nuint)); + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FseDecompress.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FseDecompress.cs new file mode 100644 index 000000000..91ff90d62 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FseDecompress.cs @@ -0,0 +1,269 @@ +using static ZstdSharp.UnsafeHelper; +using System.Runtime.CompilerServices; + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + private static nuint FSE_buildDTable_internal(uint* dt, short* normalizedCounter, uint maxSymbolValue, uint tableLog, void* workSpace, nuint wkspSize) + { + /* because *dt is unsigned, 32-bits aligned on 32-bits */ + void* tdPtr = dt + 1; + FSE_decode_t* tableDecode = (FSE_decode_t*)tdPtr; + ushort* symbolNext = (ushort*)workSpace; + byte* spread = (byte*)(symbolNext + maxSymbolValue + 1); + uint maxSV1 = maxSymbolValue + 1; + uint tableSize = (uint)(1 << (int)tableLog); + uint highThreshold = tableSize - 1; + if (sizeof(short) * (maxSymbolValue + 1) + (1UL << (int)tableLog) + 8 > wkspSize) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooLarge)); + if (maxSymbolValue > 255) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooLarge)); + if (tableLog > 14 - 2) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); + { + FSE_DTableHeader DTableH; + DTableH.tableLog = (ushort)tableLog; + DTableH.fastMode = 1; + { + short largeLimit = (short)(1 << (int)(tableLog - 1)); + uint s; + for (s = 0; s < maxSV1; s++) + { + if (normalizedCounter[s] == -1) + { + tableDecode[highThreshold--].symbol = (byte)s; + symbolNext[s] = 1; + } + else + { + if (normalizedCounter[s] >= largeLimit) + DTableH.fastMode = 0; + symbolNext[s] = (ushort)normalizedCounter[s]; + } + } + } + + memcpy(dt, &DTableH, (uint)sizeof(FSE_DTableHeader)); + } + + if (highThreshold == tableSize - 1) + { + nuint tableMask = tableSize - 1; + nuint step = (tableSize >> 1) + (tableSize >> 3) + 3; + { + const ulong add = 0x0101010101010101UL; + nuint pos = 0; + ulong sv = 0; + uint s; + for (s = 0; s < maxSV1; ++s, sv += add) + { + int i; + int n = normalizedCounter[s]; + MEM_write64(spread + pos, sv); + for (i = 8; i < n; i += 8) + { + MEM_write64(spread + pos + i, sv); + } + + pos += (nuint)n; + } + } + + { + nuint position = 0; + nuint s; + const nuint unroll = 2; + assert(tableSize % unroll == 0); + for (s = 0; s < tableSize; s += unroll) + { + nuint u; + for (u = 0; u < unroll; ++u) + { + nuint uPosition = position + u * step & tableMask; + tableDecode[uPosition].symbol = spread[s + u]; + } + + position = position + unroll * step & tableMask; + } + + assert(position == 0); + } + } + else + { + uint tableMask = tableSize - 1; + uint step = (tableSize >> 1) + (tableSize >> 3) + 3; + uint s, position = 0; + for (s = 0; s < maxSV1; s++) + { + int i; + for (i = 0; i < normalizedCounter[s]; i++) + { + tableDecode[position].symbol = (byte)s; + position = position + step & tableMask; + while (position > highThreshold) + position = position + step & tableMask; + } + } + + if (position != 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + } + + { + uint u; + for (u = 0; u < tableSize; u++) + { + byte symbol = tableDecode[u].symbol; + uint nextState = symbolNext[symbol]++; + tableDecode[u].nbBits = (byte)(tableLog - ZSTD_highbit32(nextState)); + tableDecode[u].newState = (ushort)((nextState << tableDecode[u].nbBits) - tableSize); + } + } + + return 0; + } + + private static nuint FSE_buildDTable_wksp(uint* dt, short* normalizedCounter, uint maxSymbolValue, uint tableLog, void* workSpace, nuint wkspSize) + { + return FSE_buildDTable_internal(dt, normalizedCounter, maxSymbolValue, tableLog, workSpace, wkspSize); + } + + /*-******************************************************* + * Decompression (Byte symbols) + *********************************************************/ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint FSE_decompress_usingDTable_generic(void* dst, nuint maxDstSize, void* cSrc, nuint cSrcSize, uint* dt, uint fast) + { + byte* ostart = (byte*)dst; + byte* op = ostart; + byte* omax = op + maxDstSize; + byte* olimit = omax - 3; + BIT_DStream_t bitD; + System.Runtime.CompilerServices.Unsafe.SkipInit(out bitD); + FSE_DState_t state1; + System.Runtime.CompilerServices.Unsafe.SkipInit(out state1); + FSE_DState_t state2; + System.Runtime.CompilerServices.Unsafe.SkipInit(out state2); + { + /* Init */ + nuint _var_err__ = BIT_initDStream(ref bitD, cSrc, cSrcSize); + if (ERR_isError(_var_err__)) + return _var_err__; + } + + FSE_initDState(ref state1, ref bitD, dt); + FSE_initDState(ref state2, ref bitD, dt); + nuint bitD_bitContainer = bitD.bitContainer; + uint bitD_bitsConsumed = bitD.bitsConsumed; + sbyte* bitD_ptr = bitD.ptr; + sbyte* bitD_start = bitD.start; + sbyte* bitD_limitPtr = bitD.limitPtr; + if (BIT_reloadDStream(ref bitD_bitContainer, ref bitD_bitsConsumed, ref bitD_ptr, bitD_start, bitD_limitPtr) == BIT_DStream_status.BIT_DStream_overflow) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + for (; BIT_reloadDStream(ref bitD_bitContainer, ref bitD_bitsConsumed, ref bitD_ptr, bitD_start, bitD_limitPtr) == BIT_DStream_status.BIT_DStream_unfinished && op < olimit; op += 4) + { + op[0] = fast != 0 ? FSE_decodeSymbolFast(ref state1, bitD_bitContainer, ref bitD_bitsConsumed) : FSE_decodeSymbol(ref state1, bitD_bitContainer, ref bitD_bitsConsumed); + if ((14 - 2) * 2 + 7 > sizeof(nuint) * 8) + BIT_reloadDStream(ref bitD_bitContainer, ref bitD_bitsConsumed, ref bitD_ptr, bitD_start, bitD_limitPtr); + op[1] = fast != 0 ? FSE_decodeSymbolFast(ref state2, bitD_bitContainer, ref bitD_bitsConsumed) : FSE_decodeSymbol(ref state2, bitD_bitContainer, ref bitD_bitsConsumed); + if ((14 - 2) * 4 + 7 > sizeof(nuint) * 8) + { + if (BIT_reloadDStream(ref bitD_bitContainer, ref bitD_bitsConsumed, ref bitD_ptr, bitD_start, bitD_limitPtr) > BIT_DStream_status.BIT_DStream_unfinished) + { + op += 2; + break; + } + } + + op[2] = fast != 0 ? FSE_decodeSymbolFast(ref state1, bitD_bitContainer, ref bitD_bitsConsumed) : FSE_decodeSymbol(ref state1, bitD_bitContainer, ref bitD_bitsConsumed); + if ((14 - 2) * 2 + 7 > sizeof(nuint) * 8) + BIT_reloadDStream(ref bitD_bitContainer, ref bitD_bitsConsumed, ref bitD_ptr, bitD_start, bitD_limitPtr); + op[3] = fast != 0 ? FSE_decodeSymbolFast(ref state2, bitD_bitContainer, ref bitD_bitsConsumed) : FSE_decodeSymbol(ref state2, bitD_bitContainer, ref bitD_bitsConsumed); + } + + while (true) + { + if (op > omax - 2) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + *op++ = fast != 0 ? FSE_decodeSymbolFast(ref state1, bitD_bitContainer, ref bitD_bitsConsumed) : FSE_decodeSymbol(ref state1, bitD_bitContainer, ref bitD_bitsConsumed); + if (BIT_reloadDStream(ref bitD_bitContainer, ref bitD_bitsConsumed, ref bitD_ptr, bitD_start, bitD_limitPtr) == BIT_DStream_status.BIT_DStream_overflow) + { + *op++ = fast != 0 ? FSE_decodeSymbolFast(ref state2, bitD_bitContainer, ref bitD_bitsConsumed) : FSE_decodeSymbol(ref state2, bitD_bitContainer, ref bitD_bitsConsumed); + break; + } + + if (op > omax - 2) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + *op++ = fast != 0 ? FSE_decodeSymbolFast(ref state2, bitD_bitContainer, ref bitD_bitsConsumed) : FSE_decodeSymbol(ref state2, bitD_bitContainer, ref bitD_bitsConsumed); + if (BIT_reloadDStream(ref bitD_bitContainer, ref bitD_bitsConsumed, ref bitD_ptr, bitD_start, bitD_limitPtr) == BIT_DStream_status.BIT_DStream_overflow) + { + *op++ = fast != 0 ? FSE_decodeSymbolFast(ref state1, bitD_bitContainer, ref bitD_bitsConsumed) : FSE_decodeSymbol(ref state1, bitD_bitContainer, ref bitD_bitsConsumed); + break; + } + } + + assert(op >= ostart); + return (nuint)(op - ostart); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint FSE_decompress_wksp_body(void* dst, nuint dstCapacity, void* cSrc, nuint cSrcSize, uint maxLog, void* workSpace, nuint wkspSize, int bmi2) + { + byte* istart = (byte*)cSrc; + byte* ip = istart; + uint tableLog; + uint maxSymbolValue = 255; + FSE_DecompressWksp* wksp = (FSE_DecompressWksp*)workSpace; + nuint dtablePos = (nuint)(sizeof(FSE_DecompressWksp) / sizeof(uint)); + uint* dtable = (uint*)workSpace + dtablePos; + if (wkspSize < (nuint)sizeof(FSE_DecompressWksp)) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + { + nuint NCountLength = FSE_readNCount_bmi2(wksp->ncount, &maxSymbolValue, &tableLog, istart, cSrcSize, bmi2); + if (ERR_isError(NCountLength)) + return NCountLength; + if (tableLog > maxLog) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); + assert(NCountLength <= cSrcSize); + ip += NCountLength; + cSrcSize -= NCountLength; + } + + if (((ulong)(1 + (1 << (int)tableLog) + 1) + (sizeof(short) * (maxSymbolValue + 1) + (1UL << (int)tableLog) + 8 + sizeof(uint) - 1) / sizeof(uint) + (255 + 1) / 2 + 1) * sizeof(uint) > wkspSize) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); + assert((nuint)(sizeof(FSE_DecompressWksp) + (1 + (1 << (int)tableLog)) * sizeof(uint)) <= wkspSize); + workSpace = (byte*)workSpace + sizeof(FSE_DecompressWksp) + (1 + (1 << (int)tableLog)) * sizeof(uint); + wkspSize -= (nuint)(sizeof(FSE_DecompressWksp) + (1 + (1 << (int)tableLog)) * sizeof(uint)); + { + nuint _var_err__ = FSE_buildDTable_internal(dtable, wksp->ncount, maxSymbolValue, tableLog, workSpace, wkspSize); + if (ERR_isError(_var_err__)) + return _var_err__; + } + + { + void* ptr = dtable; + FSE_DTableHeader* DTableH = (FSE_DTableHeader*)ptr; + uint fastMode = DTableH->fastMode; + if (fastMode != 0) + return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, dtable, 1); + return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, dtable, 0); + } + } + + /* Avoids the FORCE_INLINE of the _body() function. */ + private static nuint FSE_decompress_wksp_body_default(void* dst, nuint dstCapacity, void* cSrc, nuint cSrcSize, uint maxLog, void* workSpace, nuint wkspSize) + { + return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 0); + } + + private static nuint FSE_decompress_wksp_bmi2(void* dst, nuint dstCapacity, void* cSrc, nuint cSrcSize, uint maxLog, void* workSpace, nuint wkspSize, int bmi2) + { + return FSE_decompress_wksp_body_default(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize); + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HIST_checkInput_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HIST_checkInput_e.cs new file mode 100644 index 000000000..4d6c1081d --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HIST_checkInput_e.cs @@ -0,0 +1,8 @@ +namespace ZstdSharp.Unsafe +{ + public enum HIST_checkInput_e + { + trustInput, + checkMaxSymbolValue + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CStream_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CStream_t.cs new file mode 100644 index 000000000..c28390d57 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CStream_t.cs @@ -0,0 +1,22 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct HUF_CStream_t + { + public _bitContainer_e__FixedBuffer bitContainer; + public _bitPos_e__FixedBuffer bitPos; + public byte* startPtr; + public byte* ptr; + public byte* endPtr; + public unsafe struct _bitContainer_e__FixedBuffer + { + public nuint e0; + public nuint e1; + } + + public unsafe struct _bitPos_e__FixedBuffer + { + public nuint e0; + public nuint e1; + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CTableHeader.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CTableHeader.cs new file mode 100644 index 000000000..b2d934c7f --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CTableHeader.cs @@ -0,0 +1,9 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct HUF_CTableHeader + { + public byte tableLog; + public byte maxSymbolValue; + public fixed byte unused[6]; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CompressWeightsWksp.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CompressWeightsWksp.cs new file mode 100644 index 000000000..0a1a294bc --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CompressWeightsWksp.cs @@ -0,0 +1,10 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct HUF_CompressWeightsWksp + { + public fixed uint CTable[59]; + public fixed uint scratchBuffer[41]; + public fixed uint count[13]; + public fixed short norm[13]; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DEltX1.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DEltX1.cs new file mode 100644 index 000000000..6d907c47b --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DEltX1.cs @@ -0,0 +1,12 @@ +namespace ZstdSharp.Unsafe +{ + /*-***************************/ + /* single-symbol decoding */ + /*-***************************/ + public struct HUF_DEltX1 + { + /* single-symbol decoding */ + public byte nbBits; + public byte @byte; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DEltX2.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DEltX2.cs new file mode 100644 index 000000000..51accf239 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DEltX2.cs @@ -0,0 +1,13 @@ +namespace ZstdSharp.Unsafe +{ + /* *************************/ + /* double-symbols decoding */ + /* *************************/ + public struct HUF_DEltX2 + { + /* double-symbols decoding */ + public ushort sequence; + public byte nbBits; + public byte length; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DecompressFastArgs.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DecompressFastArgs.cs new file mode 100644 index 000000000..0cebbd524 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DecompressFastArgs.cs @@ -0,0 +1,49 @@ +namespace ZstdSharp.Unsafe +{ + /** + * The input/output arguments to the Huffman fast decoding loop: + * + * ip [in/out] - The input pointers, must be updated to reflect what is consumed. + * op [in/out] - The output pointers, must be updated to reflect what is written. + * bits [in/out] - The bitstream containers, must be updated to reflect the current state. + * dt [in] - The decoding table. + * ilowest [in] - The beginning of the valid range of the input. Decoders may read + * down to this pointer. It may be below iend[0]. + * oend [in] - The end of the output stream. op[3] must not cross oend. + * iend [in] - The end of each input stream. ip[i] may cross iend[i], + * as long as it is above ilowest, but that indicates corruption. + */ + public unsafe struct HUF_DecompressFastArgs + { + public _ip_e__FixedBuffer ip; + public _op_e__FixedBuffer op; + public fixed ulong bits[4]; + public void* dt; + public byte* ilowest; + public byte* oend; + public _iend_e__FixedBuffer iend; + public unsafe struct _ip_e__FixedBuffer + { + public byte* e0; + public byte* e1; + public byte* e2; + public byte* e3; + } + + public unsafe struct _op_e__FixedBuffer + { + public byte* e0; + public byte* e1; + public byte* e2; + public byte* e3; + } + + public unsafe struct _iend_e__FixedBuffer + { + public byte* e0; + public byte* e1; + public byte* e2; + public byte* e3; + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_ReadDTableX1_Workspace.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_ReadDTableX1_Workspace.cs new file mode 100644 index 000000000..e1377ed49 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_ReadDTableX1_Workspace.cs @@ -0,0 +1,11 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct HUF_ReadDTableX1_Workspace + { + public fixed uint rankVal[13]; + public fixed uint rankStart[13]; + public fixed uint statsWksp[219]; + public fixed byte symbols[256]; + public fixed byte huffWeight[256]; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_ReadDTableX2_Workspace.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_ReadDTableX2_Workspace.cs new file mode 100644 index 000000000..9bc3d35ff --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_ReadDTableX2_Workspace.cs @@ -0,0 +1,307 @@ +using System.Runtime.CompilerServices; + +namespace ZstdSharp.Unsafe +{ + public unsafe struct HUF_ReadDTableX2_Workspace + { + public _rankVal_e__FixedBuffer rankVal; + public fixed uint rankStats[13]; + public fixed uint rankStart0[15]; + public _sortedSymbol_e__FixedBuffer sortedSymbol; + public fixed byte weightList[256]; + public fixed uint calleeWksp[219]; +#if NET8_0_OR_GREATER + [InlineArray(12)] + public unsafe struct _rankVal_e__FixedBuffer + { + public rankValCol_t e0; + } + +#else + public unsafe struct _rankVal_e__FixedBuffer + { + public rankValCol_t e0; + public rankValCol_t e1; + public rankValCol_t e2; + public rankValCol_t e3; + public rankValCol_t e4; + public rankValCol_t e5; + public rankValCol_t e6; + public rankValCol_t e7; + public rankValCol_t e8; + public rankValCol_t e9; + public rankValCol_t e10; + public rankValCol_t e11; + } +#endif + +#if NET8_0_OR_GREATER + [InlineArray(256)] + public unsafe struct _sortedSymbol_e__FixedBuffer + { + public sortedSymbol_t e0; + } + +#else + public unsafe struct _sortedSymbol_e__FixedBuffer + { + public sortedSymbol_t e0; + public sortedSymbol_t e1; + public sortedSymbol_t e2; + public sortedSymbol_t e3; + public sortedSymbol_t e4; + public sortedSymbol_t e5; + public sortedSymbol_t e6; + public sortedSymbol_t e7; + public sortedSymbol_t e8; + public sortedSymbol_t e9; + public sortedSymbol_t e10; + public sortedSymbol_t e11; + public sortedSymbol_t e12; + public sortedSymbol_t e13; + public sortedSymbol_t e14; + public sortedSymbol_t e15; + public sortedSymbol_t e16; + public sortedSymbol_t e17; + public sortedSymbol_t e18; + public sortedSymbol_t e19; + public sortedSymbol_t e20; + public sortedSymbol_t e21; + public sortedSymbol_t e22; + public sortedSymbol_t e23; + public sortedSymbol_t e24; + public sortedSymbol_t e25; + public sortedSymbol_t e26; + public sortedSymbol_t e27; + public sortedSymbol_t e28; + public sortedSymbol_t e29; + public sortedSymbol_t e30; + public sortedSymbol_t e31; + public sortedSymbol_t e32; + public sortedSymbol_t e33; + public sortedSymbol_t e34; + public sortedSymbol_t e35; + public sortedSymbol_t e36; + public sortedSymbol_t e37; + public sortedSymbol_t e38; + public sortedSymbol_t e39; + public sortedSymbol_t e40; + public sortedSymbol_t e41; + public sortedSymbol_t e42; + public sortedSymbol_t e43; + public sortedSymbol_t e44; + public sortedSymbol_t e45; + public sortedSymbol_t e46; + public sortedSymbol_t e47; + public sortedSymbol_t e48; + public sortedSymbol_t e49; + public sortedSymbol_t e50; + public sortedSymbol_t e51; + public sortedSymbol_t e52; + public sortedSymbol_t e53; + public sortedSymbol_t e54; + public sortedSymbol_t e55; + public sortedSymbol_t e56; + public sortedSymbol_t e57; + public sortedSymbol_t e58; + public sortedSymbol_t e59; + public sortedSymbol_t e60; + public sortedSymbol_t e61; + public sortedSymbol_t e62; + public sortedSymbol_t e63; + public sortedSymbol_t e64; + public sortedSymbol_t e65; + public sortedSymbol_t e66; + public sortedSymbol_t e67; + public sortedSymbol_t e68; + public sortedSymbol_t e69; + public sortedSymbol_t e70; + public sortedSymbol_t e71; + public sortedSymbol_t e72; + public sortedSymbol_t e73; + public sortedSymbol_t e74; + public sortedSymbol_t e75; + public sortedSymbol_t e76; + public sortedSymbol_t e77; + public sortedSymbol_t e78; + public sortedSymbol_t e79; + public sortedSymbol_t e80; + public sortedSymbol_t e81; + public sortedSymbol_t e82; + public sortedSymbol_t e83; + public sortedSymbol_t e84; + public sortedSymbol_t e85; + public sortedSymbol_t e86; + public sortedSymbol_t e87; + public sortedSymbol_t e88; + public sortedSymbol_t e89; + public sortedSymbol_t e90; + public sortedSymbol_t e91; + public sortedSymbol_t e92; + public sortedSymbol_t e93; + public sortedSymbol_t e94; + public sortedSymbol_t e95; + public sortedSymbol_t e96; + public sortedSymbol_t e97; + public sortedSymbol_t e98; + public sortedSymbol_t e99; + public sortedSymbol_t e100; + public sortedSymbol_t e101; + public sortedSymbol_t e102; + public sortedSymbol_t e103; + public sortedSymbol_t e104; + public sortedSymbol_t e105; + public sortedSymbol_t e106; + public sortedSymbol_t e107; + public sortedSymbol_t e108; + public sortedSymbol_t e109; + public sortedSymbol_t e110; + public sortedSymbol_t e111; + public sortedSymbol_t e112; + public sortedSymbol_t e113; + public sortedSymbol_t e114; + public sortedSymbol_t e115; + public sortedSymbol_t e116; + public sortedSymbol_t e117; + public sortedSymbol_t e118; + public sortedSymbol_t e119; + public sortedSymbol_t e120; + public sortedSymbol_t e121; + public sortedSymbol_t e122; + public sortedSymbol_t e123; + public sortedSymbol_t e124; + public sortedSymbol_t e125; + public sortedSymbol_t e126; + public sortedSymbol_t e127; + public sortedSymbol_t e128; + public sortedSymbol_t e129; + public sortedSymbol_t e130; + public sortedSymbol_t e131; + public sortedSymbol_t e132; + public sortedSymbol_t e133; + public sortedSymbol_t e134; + public sortedSymbol_t e135; + public sortedSymbol_t e136; + public sortedSymbol_t e137; + public sortedSymbol_t e138; + public sortedSymbol_t e139; + public sortedSymbol_t e140; + public sortedSymbol_t e141; + public sortedSymbol_t e142; + public sortedSymbol_t e143; + public sortedSymbol_t e144; + public sortedSymbol_t e145; + public sortedSymbol_t e146; + public sortedSymbol_t e147; + public sortedSymbol_t e148; + public sortedSymbol_t e149; + public sortedSymbol_t e150; + public sortedSymbol_t e151; + public sortedSymbol_t e152; + public sortedSymbol_t e153; + public sortedSymbol_t e154; + public sortedSymbol_t e155; + public sortedSymbol_t e156; + public sortedSymbol_t e157; + public sortedSymbol_t e158; + public sortedSymbol_t e159; + public sortedSymbol_t e160; + public sortedSymbol_t e161; + public sortedSymbol_t e162; + public sortedSymbol_t e163; + public sortedSymbol_t e164; + public sortedSymbol_t e165; + public sortedSymbol_t e166; + public sortedSymbol_t e167; + public sortedSymbol_t e168; + public sortedSymbol_t e169; + public sortedSymbol_t e170; + public sortedSymbol_t e171; + public sortedSymbol_t e172; + public sortedSymbol_t e173; + public sortedSymbol_t e174; + public sortedSymbol_t e175; + public sortedSymbol_t e176; + public sortedSymbol_t e177; + public sortedSymbol_t e178; + public sortedSymbol_t e179; + public sortedSymbol_t e180; + public sortedSymbol_t e181; + public sortedSymbol_t e182; + public sortedSymbol_t e183; + public sortedSymbol_t e184; + public sortedSymbol_t e185; + public sortedSymbol_t e186; + public sortedSymbol_t e187; + public sortedSymbol_t e188; + public sortedSymbol_t e189; + public sortedSymbol_t e190; + public sortedSymbol_t e191; + public sortedSymbol_t e192; + public sortedSymbol_t e193; + public sortedSymbol_t e194; + public sortedSymbol_t e195; + public sortedSymbol_t e196; + public sortedSymbol_t e197; + public sortedSymbol_t e198; + public sortedSymbol_t e199; + public sortedSymbol_t e200; + public sortedSymbol_t e201; + public sortedSymbol_t e202; + public sortedSymbol_t e203; + public sortedSymbol_t e204; + public sortedSymbol_t e205; + public sortedSymbol_t e206; + public sortedSymbol_t e207; + public sortedSymbol_t e208; + public sortedSymbol_t e209; + public sortedSymbol_t e210; + public sortedSymbol_t e211; + public sortedSymbol_t e212; + public sortedSymbol_t e213; + public sortedSymbol_t e214; + public sortedSymbol_t e215; + public sortedSymbol_t e216; + public sortedSymbol_t e217; + public sortedSymbol_t e218; + public sortedSymbol_t e219; + public sortedSymbol_t e220; + public sortedSymbol_t e221; + public sortedSymbol_t e222; + public sortedSymbol_t e223; + public sortedSymbol_t e224; + public sortedSymbol_t e225; + public sortedSymbol_t e226; + public sortedSymbol_t e227; + public sortedSymbol_t e228; + public sortedSymbol_t e229; + public sortedSymbol_t e230; + public sortedSymbol_t e231; + public sortedSymbol_t e232; + public sortedSymbol_t e233; + public sortedSymbol_t e234; + public sortedSymbol_t e235; + public sortedSymbol_t e236; + public sortedSymbol_t e237; + public sortedSymbol_t e238; + public sortedSymbol_t e239; + public sortedSymbol_t e240; + public sortedSymbol_t e241; + public sortedSymbol_t e242; + public sortedSymbol_t e243; + public sortedSymbol_t e244; + public sortedSymbol_t e245; + public sortedSymbol_t e246; + public sortedSymbol_t e247; + public sortedSymbol_t e248; + public sortedSymbol_t e249; + public sortedSymbol_t e250; + public sortedSymbol_t e251; + public sortedSymbol_t e252; + public sortedSymbol_t e253; + public sortedSymbol_t e254; + public sortedSymbol_t e255; + } +#endif + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_WriteCTableWksp.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_WriteCTableWksp.cs new file mode 100644 index 000000000..4e70d5dc7 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_WriteCTableWksp.cs @@ -0,0 +1,10 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct HUF_WriteCTableWksp + { + public HUF_CompressWeightsWksp wksp; + /* precomputed conversion table */ + public fixed byte bitsToWeight[13]; + public fixed byte huffWeight[255]; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_buildCTable_wksp_tables.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_buildCTable_wksp_tables.cs new file mode 100644 index 000000000..c1394b01d --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_buildCTable_wksp_tables.cs @@ -0,0 +1,739 @@ +using System.Runtime.CompilerServices; + +namespace ZstdSharp.Unsafe +{ + public struct HUF_buildCTable_wksp_tables + { + public _huffNodeTbl_e__FixedBuffer huffNodeTbl; + public _rankPosition_e__FixedBuffer rankPosition; +#if NET8_0_OR_GREATER + [InlineArray(512)] + public unsafe struct _huffNodeTbl_e__FixedBuffer + { + public nodeElt_s e0; + } + +#else + public unsafe struct _huffNodeTbl_e__FixedBuffer + { + public nodeElt_s e0; + public nodeElt_s e1; + public nodeElt_s e2; + public nodeElt_s e3; + public nodeElt_s e4; + public nodeElt_s e5; + public nodeElt_s e6; + public nodeElt_s e7; + public nodeElt_s e8; + public nodeElt_s e9; + public nodeElt_s e10; + public nodeElt_s e11; + public nodeElt_s e12; + public nodeElt_s e13; + public nodeElt_s e14; + public nodeElt_s e15; + public nodeElt_s e16; + public nodeElt_s e17; + public nodeElt_s e18; + public nodeElt_s e19; + public nodeElt_s e20; + public nodeElt_s e21; + public nodeElt_s e22; + public nodeElt_s e23; + public nodeElt_s e24; + public nodeElt_s e25; + public nodeElt_s e26; + public nodeElt_s e27; + public nodeElt_s e28; + public nodeElt_s e29; + public nodeElt_s e30; + public nodeElt_s e31; + public nodeElt_s e32; + public nodeElt_s e33; + public nodeElt_s e34; + public nodeElt_s e35; + public nodeElt_s e36; + public nodeElt_s e37; + public nodeElt_s e38; + public nodeElt_s e39; + public nodeElt_s e40; + public nodeElt_s e41; + public nodeElt_s e42; + public nodeElt_s e43; + public nodeElt_s e44; + public nodeElt_s e45; + public nodeElt_s e46; + public nodeElt_s e47; + public nodeElt_s e48; + public nodeElt_s e49; + public nodeElt_s e50; + public nodeElt_s e51; + public nodeElt_s e52; + public nodeElt_s e53; + public nodeElt_s e54; + public nodeElt_s e55; + public nodeElt_s e56; + public nodeElt_s e57; + public nodeElt_s e58; + public nodeElt_s e59; + public nodeElt_s e60; + public nodeElt_s e61; + public nodeElt_s e62; + public nodeElt_s e63; + public nodeElt_s e64; + public nodeElt_s e65; + public nodeElt_s e66; + public nodeElt_s e67; + public nodeElt_s e68; + public nodeElt_s e69; + public nodeElt_s e70; + public nodeElt_s e71; + public nodeElt_s e72; + public nodeElt_s e73; + public nodeElt_s e74; + public nodeElt_s e75; + public nodeElt_s e76; + public nodeElt_s e77; + public nodeElt_s e78; + public nodeElt_s e79; + public nodeElt_s e80; + public nodeElt_s e81; + public nodeElt_s e82; + public nodeElt_s e83; + public nodeElt_s e84; + public nodeElt_s e85; + public nodeElt_s e86; + public nodeElt_s e87; + public nodeElt_s e88; + public nodeElt_s e89; + public nodeElt_s e90; + public nodeElt_s e91; + public nodeElt_s e92; + public nodeElt_s e93; + public nodeElt_s e94; + public nodeElt_s e95; + public nodeElt_s e96; + public nodeElt_s e97; + public nodeElt_s e98; + public nodeElt_s e99; + public nodeElt_s e100; + public nodeElt_s e101; + public nodeElt_s e102; + public nodeElt_s e103; + public nodeElt_s e104; + public nodeElt_s e105; + public nodeElt_s e106; + public nodeElt_s e107; + public nodeElt_s e108; + public nodeElt_s e109; + public nodeElt_s e110; + public nodeElt_s e111; + public nodeElt_s e112; + public nodeElt_s e113; + public nodeElt_s e114; + public nodeElt_s e115; + public nodeElt_s e116; + public nodeElt_s e117; + public nodeElt_s e118; + public nodeElt_s e119; + public nodeElt_s e120; + public nodeElt_s e121; + public nodeElt_s e122; + public nodeElt_s e123; + public nodeElt_s e124; + public nodeElt_s e125; + public nodeElt_s e126; + public nodeElt_s e127; + public nodeElt_s e128; + public nodeElt_s e129; + public nodeElt_s e130; + public nodeElt_s e131; + public nodeElt_s e132; + public nodeElt_s e133; + public nodeElt_s e134; + public nodeElt_s e135; + public nodeElt_s e136; + public nodeElt_s e137; + public nodeElt_s e138; + public nodeElt_s e139; + public nodeElt_s e140; + public nodeElt_s e141; + public nodeElt_s e142; + public nodeElt_s e143; + public nodeElt_s e144; + public nodeElt_s e145; + public nodeElt_s e146; + public nodeElt_s e147; + public nodeElt_s e148; + public nodeElt_s e149; + public nodeElt_s e150; + public nodeElt_s e151; + public nodeElt_s e152; + public nodeElt_s e153; + public nodeElt_s e154; + public nodeElt_s e155; + public nodeElt_s e156; + public nodeElt_s e157; + public nodeElt_s e158; + public nodeElt_s e159; + public nodeElt_s e160; + public nodeElt_s e161; + public nodeElt_s e162; + public nodeElt_s e163; + public nodeElt_s e164; + public nodeElt_s e165; + public nodeElt_s e166; + public nodeElt_s e167; + public nodeElt_s e168; + public nodeElt_s e169; + public nodeElt_s e170; + public nodeElt_s e171; + public nodeElt_s e172; + public nodeElt_s e173; + public nodeElt_s e174; + public nodeElt_s e175; + public nodeElt_s e176; + public nodeElt_s e177; + public nodeElt_s e178; + public nodeElt_s e179; + public nodeElt_s e180; + public nodeElt_s e181; + public nodeElt_s e182; + public nodeElt_s e183; + public nodeElt_s e184; + public nodeElt_s e185; + public nodeElt_s e186; + public nodeElt_s e187; + public nodeElt_s e188; + public nodeElt_s e189; + public nodeElt_s e190; + public nodeElt_s e191; + public nodeElt_s e192; + public nodeElt_s e193; + public nodeElt_s e194; + public nodeElt_s e195; + public nodeElt_s e196; + public nodeElt_s e197; + public nodeElt_s e198; + public nodeElt_s e199; + public nodeElt_s e200; + public nodeElt_s e201; + public nodeElt_s e202; + public nodeElt_s e203; + public nodeElt_s e204; + public nodeElt_s e205; + public nodeElt_s e206; + public nodeElt_s e207; + public nodeElt_s e208; + public nodeElt_s e209; + public nodeElt_s e210; + public nodeElt_s e211; + public nodeElt_s e212; + public nodeElt_s e213; + public nodeElt_s e214; + public nodeElt_s e215; + public nodeElt_s e216; + public nodeElt_s e217; + public nodeElt_s e218; + public nodeElt_s e219; + public nodeElt_s e220; + public nodeElt_s e221; + public nodeElt_s e222; + public nodeElt_s e223; + public nodeElt_s e224; + public nodeElt_s e225; + public nodeElt_s e226; + public nodeElt_s e227; + public nodeElt_s e228; + public nodeElt_s e229; + public nodeElt_s e230; + public nodeElt_s e231; + public nodeElt_s e232; + public nodeElt_s e233; + public nodeElt_s e234; + public nodeElt_s e235; + public nodeElt_s e236; + public nodeElt_s e237; + public nodeElt_s e238; + public nodeElt_s e239; + public nodeElt_s e240; + public nodeElt_s e241; + public nodeElt_s e242; + public nodeElt_s e243; + public nodeElt_s e244; + public nodeElt_s e245; + public nodeElt_s e246; + public nodeElt_s e247; + public nodeElt_s e248; + public nodeElt_s e249; + public nodeElt_s e250; + public nodeElt_s e251; + public nodeElt_s e252; + public nodeElt_s e253; + public nodeElt_s e254; + public nodeElt_s e255; + public nodeElt_s e256; + public nodeElt_s e257; + public nodeElt_s e258; + public nodeElt_s e259; + public nodeElt_s e260; + public nodeElt_s e261; + public nodeElt_s e262; + public nodeElt_s e263; + public nodeElt_s e264; + public nodeElt_s e265; + public nodeElt_s e266; + public nodeElt_s e267; + public nodeElt_s e268; + public nodeElt_s e269; + public nodeElt_s e270; + public nodeElt_s e271; + public nodeElt_s e272; + public nodeElt_s e273; + public nodeElt_s e274; + public nodeElt_s e275; + public nodeElt_s e276; + public nodeElt_s e277; + public nodeElt_s e278; + public nodeElt_s e279; + public nodeElt_s e280; + public nodeElt_s e281; + public nodeElt_s e282; + public nodeElt_s e283; + public nodeElt_s e284; + public nodeElt_s e285; + public nodeElt_s e286; + public nodeElt_s e287; + public nodeElt_s e288; + public nodeElt_s e289; + public nodeElt_s e290; + public nodeElt_s e291; + public nodeElt_s e292; + public nodeElt_s e293; + public nodeElt_s e294; + public nodeElt_s e295; + public nodeElt_s e296; + public nodeElt_s e297; + public nodeElt_s e298; + public nodeElt_s e299; + public nodeElt_s e300; + public nodeElt_s e301; + public nodeElt_s e302; + public nodeElt_s e303; + public nodeElt_s e304; + public nodeElt_s e305; + public nodeElt_s e306; + public nodeElt_s e307; + public nodeElt_s e308; + public nodeElt_s e309; + public nodeElt_s e310; + public nodeElt_s e311; + public nodeElt_s e312; + public nodeElt_s e313; + public nodeElt_s e314; + public nodeElt_s e315; + public nodeElt_s e316; + public nodeElt_s e317; + public nodeElt_s e318; + public nodeElt_s e319; + public nodeElt_s e320; + public nodeElt_s e321; + public nodeElt_s e322; + public nodeElt_s e323; + public nodeElt_s e324; + public nodeElt_s e325; + public nodeElt_s e326; + public nodeElt_s e327; + public nodeElt_s e328; + public nodeElt_s e329; + public nodeElt_s e330; + public nodeElt_s e331; + public nodeElt_s e332; + public nodeElt_s e333; + public nodeElt_s e334; + public nodeElt_s e335; + public nodeElt_s e336; + public nodeElt_s e337; + public nodeElt_s e338; + public nodeElt_s e339; + public nodeElt_s e340; + public nodeElt_s e341; + public nodeElt_s e342; + public nodeElt_s e343; + public nodeElt_s e344; + public nodeElt_s e345; + public nodeElt_s e346; + public nodeElt_s e347; + public nodeElt_s e348; + public nodeElt_s e349; + public nodeElt_s e350; + public nodeElt_s e351; + public nodeElt_s e352; + public nodeElt_s e353; + public nodeElt_s e354; + public nodeElt_s e355; + public nodeElt_s e356; + public nodeElt_s e357; + public nodeElt_s e358; + public nodeElt_s e359; + public nodeElt_s e360; + public nodeElt_s e361; + public nodeElt_s e362; + public nodeElt_s e363; + public nodeElt_s e364; + public nodeElt_s e365; + public nodeElt_s e366; + public nodeElt_s e367; + public nodeElt_s e368; + public nodeElt_s e369; + public nodeElt_s e370; + public nodeElt_s e371; + public nodeElt_s e372; + public nodeElt_s e373; + public nodeElt_s e374; + public nodeElt_s e375; + public nodeElt_s e376; + public nodeElt_s e377; + public nodeElt_s e378; + public nodeElt_s e379; + public nodeElt_s e380; + public nodeElt_s e381; + public nodeElt_s e382; + public nodeElt_s e383; + public nodeElt_s e384; + public nodeElt_s e385; + public nodeElt_s e386; + public nodeElt_s e387; + public nodeElt_s e388; + public nodeElt_s e389; + public nodeElt_s e390; + public nodeElt_s e391; + public nodeElt_s e392; + public nodeElt_s e393; + public nodeElt_s e394; + public nodeElt_s e395; + public nodeElt_s e396; + public nodeElt_s e397; + public nodeElt_s e398; + public nodeElt_s e399; + public nodeElt_s e400; + public nodeElt_s e401; + public nodeElt_s e402; + public nodeElt_s e403; + public nodeElt_s e404; + public nodeElt_s e405; + public nodeElt_s e406; + public nodeElt_s e407; + public nodeElt_s e408; + public nodeElt_s e409; + public nodeElt_s e410; + public nodeElt_s e411; + public nodeElt_s e412; + public nodeElt_s e413; + public nodeElt_s e414; + public nodeElt_s e415; + public nodeElt_s e416; + public nodeElt_s e417; + public nodeElt_s e418; + public nodeElt_s e419; + public nodeElt_s e420; + public nodeElt_s e421; + public nodeElt_s e422; + public nodeElt_s e423; + public nodeElt_s e424; + public nodeElt_s e425; + public nodeElt_s e426; + public nodeElt_s e427; + public nodeElt_s e428; + public nodeElt_s e429; + public nodeElt_s e430; + public nodeElt_s e431; + public nodeElt_s e432; + public nodeElt_s e433; + public nodeElt_s e434; + public nodeElt_s e435; + public nodeElt_s e436; + public nodeElt_s e437; + public nodeElt_s e438; + public nodeElt_s e439; + public nodeElt_s e440; + public nodeElt_s e441; + public nodeElt_s e442; + public nodeElt_s e443; + public nodeElt_s e444; + public nodeElt_s e445; + public nodeElt_s e446; + public nodeElt_s e447; + public nodeElt_s e448; + public nodeElt_s e449; + public nodeElt_s e450; + public nodeElt_s e451; + public nodeElt_s e452; + public nodeElt_s e453; + public nodeElt_s e454; + public nodeElt_s e455; + public nodeElt_s e456; + public nodeElt_s e457; + public nodeElt_s e458; + public nodeElt_s e459; + public nodeElt_s e460; + public nodeElt_s e461; + public nodeElt_s e462; + public nodeElt_s e463; + public nodeElt_s e464; + public nodeElt_s e465; + public nodeElt_s e466; + public nodeElt_s e467; + public nodeElt_s e468; + public nodeElt_s e469; + public nodeElt_s e470; + public nodeElt_s e471; + public nodeElt_s e472; + public nodeElt_s e473; + public nodeElt_s e474; + public nodeElt_s e475; + public nodeElt_s e476; + public nodeElt_s e477; + public nodeElt_s e478; + public nodeElt_s e479; + public nodeElt_s e480; + public nodeElt_s e481; + public nodeElt_s e482; + public nodeElt_s e483; + public nodeElt_s e484; + public nodeElt_s e485; + public nodeElt_s e486; + public nodeElt_s e487; + public nodeElt_s e488; + public nodeElt_s e489; + public nodeElt_s e490; + public nodeElt_s e491; + public nodeElt_s e492; + public nodeElt_s e493; + public nodeElt_s e494; + public nodeElt_s e495; + public nodeElt_s e496; + public nodeElt_s e497; + public nodeElt_s e498; + public nodeElt_s e499; + public nodeElt_s e500; + public nodeElt_s e501; + public nodeElt_s e502; + public nodeElt_s e503; + public nodeElt_s e504; + public nodeElt_s e505; + public nodeElt_s e506; + public nodeElt_s e507; + public nodeElt_s e508; + public nodeElt_s e509; + public nodeElt_s e510; + public nodeElt_s e511; + } +#endif + +#if NET8_0_OR_GREATER + [InlineArray(192)] + public unsafe struct _rankPosition_e__FixedBuffer + { + public rankPos e0; + } + +#else + public unsafe struct _rankPosition_e__FixedBuffer + { + public rankPos e0; + public rankPos e1; + public rankPos e2; + public rankPos e3; + public rankPos e4; + public rankPos e5; + public rankPos e6; + public rankPos e7; + public rankPos e8; + public rankPos e9; + public rankPos e10; + public rankPos e11; + public rankPos e12; + public rankPos e13; + public rankPos e14; + public rankPos e15; + public rankPos e16; + public rankPos e17; + public rankPos e18; + public rankPos e19; + public rankPos e20; + public rankPos e21; + public rankPos e22; + public rankPos e23; + public rankPos e24; + public rankPos e25; + public rankPos e26; + public rankPos e27; + public rankPos e28; + public rankPos e29; + public rankPos e30; + public rankPos e31; + public rankPos e32; + public rankPos e33; + public rankPos e34; + public rankPos e35; + public rankPos e36; + public rankPos e37; + public rankPos e38; + public rankPos e39; + public rankPos e40; + public rankPos e41; + public rankPos e42; + public rankPos e43; + public rankPos e44; + public rankPos e45; + public rankPos e46; + public rankPos e47; + public rankPos e48; + public rankPos e49; + public rankPos e50; + public rankPos e51; + public rankPos e52; + public rankPos e53; + public rankPos e54; + public rankPos e55; + public rankPos e56; + public rankPos e57; + public rankPos e58; + public rankPos e59; + public rankPos e60; + public rankPos e61; + public rankPos e62; + public rankPos e63; + public rankPos e64; + public rankPos e65; + public rankPos e66; + public rankPos e67; + public rankPos e68; + public rankPos e69; + public rankPos e70; + public rankPos e71; + public rankPos e72; + public rankPos e73; + public rankPos e74; + public rankPos e75; + public rankPos e76; + public rankPos e77; + public rankPos e78; + public rankPos e79; + public rankPos e80; + public rankPos e81; + public rankPos e82; + public rankPos e83; + public rankPos e84; + public rankPos e85; + public rankPos e86; + public rankPos e87; + public rankPos e88; + public rankPos e89; + public rankPos e90; + public rankPos e91; + public rankPos e92; + public rankPos e93; + public rankPos e94; + public rankPos e95; + public rankPos e96; + public rankPos e97; + public rankPos e98; + public rankPos e99; + public rankPos e100; + public rankPos e101; + public rankPos e102; + public rankPos e103; + public rankPos e104; + public rankPos e105; + public rankPos e106; + public rankPos e107; + public rankPos e108; + public rankPos e109; + public rankPos e110; + public rankPos e111; + public rankPos e112; + public rankPos e113; + public rankPos e114; + public rankPos e115; + public rankPos e116; + public rankPos e117; + public rankPos e118; + public rankPos e119; + public rankPos e120; + public rankPos e121; + public rankPos e122; + public rankPos e123; + public rankPos e124; + public rankPos e125; + public rankPos e126; + public rankPos e127; + public rankPos e128; + public rankPos e129; + public rankPos e130; + public rankPos e131; + public rankPos e132; + public rankPos e133; + public rankPos e134; + public rankPos e135; + public rankPos e136; + public rankPos e137; + public rankPos e138; + public rankPos e139; + public rankPos e140; + public rankPos e141; + public rankPos e142; + public rankPos e143; + public rankPos e144; + public rankPos e145; + public rankPos e146; + public rankPos e147; + public rankPos e148; + public rankPos e149; + public rankPos e150; + public rankPos e151; + public rankPos e152; + public rankPos e153; + public rankPos e154; + public rankPos e155; + public rankPos e156; + public rankPos e157; + public rankPos e158; + public rankPos e159; + public rankPos e160; + public rankPos e161; + public rankPos e162; + public rankPos e163; + public rankPos e164; + public rankPos e165; + public rankPos e166; + public rankPos e167; + public rankPos e168; + public rankPos e169; + public rankPos e170; + public rankPos e171; + public rankPos e172; + public rankPos e173; + public rankPos e174; + public rankPos e175; + public rankPos e176; + public rankPos e177; + public rankPos e178; + public rankPos e179; + public rankPos e180; + public rankPos e181; + public rankPos e182; + public rankPos e183; + public rankPos e184; + public rankPos e185; + public rankPos e186; + public rankPos e187; + public rankPos e188; + public rankPos e189; + public rankPos e190; + public rankPos e191; + } +#endif + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_compress_tables_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_compress_tables_t.cs new file mode 100644 index 000000000..a13ec931c --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_compress_tables_t.cs @@ -0,0 +1,280 @@ +using System.Runtime.CompilerServices; + +namespace ZstdSharp.Unsafe +{ + public unsafe struct HUF_compress_tables_t + { + public fixed uint count[256]; + public _CTable_e__FixedBuffer CTable; + public _wksps_e__Union wksps; +#if NET8_0_OR_GREATER + [InlineArray(257)] + public unsafe struct _CTable_e__FixedBuffer + { + public nuint e0; + } + +#else + public unsafe struct _CTable_e__FixedBuffer + { + public nuint e0; + public nuint e1; + public nuint e2; + public nuint e3; + public nuint e4; + public nuint e5; + public nuint e6; + public nuint e7; + public nuint e8; + public nuint e9; + public nuint e10; + public nuint e11; + public nuint e12; + public nuint e13; + public nuint e14; + public nuint e15; + public nuint e16; + public nuint e17; + public nuint e18; + public nuint e19; + public nuint e20; + public nuint e21; + public nuint e22; + public nuint e23; + public nuint e24; + public nuint e25; + public nuint e26; + public nuint e27; + public nuint e28; + public nuint e29; + public nuint e30; + public nuint e31; + public nuint e32; + public nuint e33; + public nuint e34; + public nuint e35; + public nuint e36; + public nuint e37; + public nuint e38; + public nuint e39; + public nuint e40; + public nuint e41; + public nuint e42; + public nuint e43; + public nuint e44; + public nuint e45; + public nuint e46; + public nuint e47; + public nuint e48; + public nuint e49; + public nuint e50; + public nuint e51; + public nuint e52; + public nuint e53; + public nuint e54; + public nuint e55; + public nuint e56; + public nuint e57; + public nuint e58; + public nuint e59; + public nuint e60; + public nuint e61; + public nuint e62; + public nuint e63; + public nuint e64; + public nuint e65; + public nuint e66; + public nuint e67; + public nuint e68; + public nuint e69; + public nuint e70; + public nuint e71; + public nuint e72; + public nuint e73; + public nuint e74; + public nuint e75; + public nuint e76; + public nuint e77; + public nuint e78; + public nuint e79; + public nuint e80; + public nuint e81; + public nuint e82; + public nuint e83; + public nuint e84; + public nuint e85; + public nuint e86; + public nuint e87; + public nuint e88; + public nuint e89; + public nuint e90; + public nuint e91; + public nuint e92; + public nuint e93; + public nuint e94; + public nuint e95; + public nuint e96; + public nuint e97; + public nuint e98; + public nuint e99; + public nuint e100; + public nuint e101; + public nuint e102; + public nuint e103; + public nuint e104; + public nuint e105; + public nuint e106; + public nuint e107; + public nuint e108; + public nuint e109; + public nuint e110; + public nuint e111; + public nuint e112; + public nuint e113; + public nuint e114; + public nuint e115; + public nuint e116; + public nuint e117; + public nuint e118; + public nuint e119; + public nuint e120; + public nuint e121; + public nuint e122; + public nuint e123; + public nuint e124; + public nuint e125; + public nuint e126; + public nuint e127; + public nuint e128; + public nuint e129; + public nuint e130; + public nuint e131; + public nuint e132; + public nuint e133; + public nuint e134; + public nuint e135; + public nuint e136; + public nuint e137; + public nuint e138; + public nuint e139; + public nuint e140; + public nuint e141; + public nuint e142; + public nuint e143; + public nuint e144; + public nuint e145; + public nuint e146; + public nuint e147; + public nuint e148; + public nuint e149; + public nuint e150; + public nuint e151; + public nuint e152; + public nuint e153; + public nuint e154; + public nuint e155; + public nuint e156; + public nuint e157; + public nuint e158; + public nuint e159; + public nuint e160; + public nuint e161; + public nuint e162; + public nuint e163; + public nuint e164; + public nuint e165; + public nuint e166; + public nuint e167; + public nuint e168; + public nuint e169; + public nuint e170; + public nuint e171; + public nuint e172; + public nuint e173; + public nuint e174; + public nuint e175; + public nuint e176; + public nuint e177; + public nuint e178; + public nuint e179; + public nuint e180; + public nuint e181; + public nuint e182; + public nuint e183; + public nuint e184; + public nuint e185; + public nuint e186; + public nuint e187; + public nuint e188; + public nuint e189; + public nuint e190; + public nuint e191; + public nuint e192; + public nuint e193; + public nuint e194; + public nuint e195; + public nuint e196; + public nuint e197; + public nuint e198; + public nuint e199; + public nuint e200; + public nuint e201; + public nuint e202; + public nuint e203; + public nuint e204; + public nuint e205; + public nuint e206; + public nuint e207; + public nuint e208; + public nuint e209; + public nuint e210; + public nuint e211; + public nuint e212; + public nuint e213; + public nuint e214; + public nuint e215; + public nuint e216; + public nuint e217; + public nuint e218; + public nuint e219; + public nuint e220; + public nuint e221; + public nuint e222; + public nuint e223; + public nuint e224; + public nuint e225; + public nuint e226; + public nuint e227; + public nuint e228; + public nuint e229; + public nuint e230; + public nuint e231; + public nuint e232; + public nuint e233; + public nuint e234; + public nuint e235; + public nuint e236; + public nuint e237; + public nuint e238; + public nuint e239; + public nuint e240; + public nuint e241; + public nuint e242; + public nuint e243; + public nuint e244; + public nuint e245; + public nuint e246; + public nuint e247; + public nuint e248; + public nuint e249; + public nuint e250; + public nuint e251; + public nuint e252; + public nuint e253; + public nuint e254; + public nuint e255; + public nuint e256; + } +#endif + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_flags_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_flags_e.cs new file mode 100644 index 000000000..9a8903776 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_flags_e.cs @@ -0,0 +1,40 @@ +namespace ZstdSharp.Unsafe +{ + /** + * Huffman flags bitset. + * For all flags, 0 is the default value. + */ + public enum HUF_flags_e + { + /** + * If compiled with DYNAMIC_BMI2: Set flag only if the CPU supports BMI2 at runtime. + * Otherwise: Ignored. + */ + HUF_flags_bmi2 = 1 << 0, + /** + * If set: Test possible table depths to find the one that produces the smallest header + encoded size. + * If unset: Use heuristic to find the table depth. + */ + HUF_flags_optimalDepth = 1 << 1, + /** + * If set: If the previous table can encode the input, always reuse the previous table. + * If unset: If the previous table can encode the input, reuse the previous table if it results in a smaller output. + */ + HUF_flags_preferRepeat = 1 << 2, + /** + * If set: Sample the input and check if the sample is uncompressible, if it is then don't attempt to compress. + * If unset: Always histogram the entire input. + */ + HUF_flags_suspectUncompressible = 1 << 3, + /** + * If set: Don't use assembly implementations + * If unset: Allow using assembly implementations + */ + HUF_flags_disableAsm = 1 << 4, + /** + * If set: Don't use the fast decoding loop, always use the fallback decoding loop. + * If unset: Use the fast decoding loop when possible. + */ + HUF_flags_disableFast = 1 << 5 + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_nbStreams_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_nbStreams_e.cs new file mode 100644 index 000000000..e4d5a86f1 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_nbStreams_e.cs @@ -0,0 +1,8 @@ +namespace ZstdSharp.Unsafe +{ + public enum HUF_nbStreams_e + { + HUF_singleStream, + HUF_fourStreams + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_repeat.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_repeat.cs new file mode 100644 index 000000000..39ed82d8b --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_repeat.cs @@ -0,0 +1,12 @@ +namespace ZstdSharp.Unsafe +{ + public enum HUF_repeat + { + /**< Cannot use the previous table */ + HUF_repeat_none, + /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */ + HUF_repeat_check, + /**< Can use the previous table and it is assumed to be valid */ + HUF_repeat_valid + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Hist.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Hist.cs new file mode 100644 index 000000000..7dce78ffd --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Hist.cs @@ -0,0 +1,208 @@ +using static ZstdSharp.UnsafeHelper; + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + /* --- Error management --- */ + private static bool HIST_isError(nuint code) + { + return ERR_isError(code); + } + + /*-************************************************************** + * Histogram functions + ****************************************************************/ + private static void HIST_add(uint* count, void* src, nuint srcSize) + { + byte* ip = (byte*)src; + byte* end = ip + srcSize; + while (ip < end) + { + count[*ip++]++; + } + } + + /*! HIST_count_simple() : + * Same as HIST_countFast(), this function is unsafe, + * and will segfault if any value within `src` is `> *maxSymbolValuePtr`. + * It is also a bit slower for large inputs. + * However, it does not need any additional memory (not even on stack). + * @return : count of the most frequent symbol. + * Note this function doesn't produce any error (i.e. it must succeed). + */ + private static uint HIST_count_simple(uint* count, uint* maxSymbolValuePtr, void* src, nuint srcSize) + { + byte* ip = (byte*)src; + byte* end = ip + srcSize; + uint maxSymbolValue = *maxSymbolValuePtr; + uint largestCount = 0; + memset(count, 0, (maxSymbolValue + 1) * sizeof(uint)); + if (srcSize == 0) + { + *maxSymbolValuePtr = 0; + return 0; + } + + while (ip < end) + { + assert(*ip <= maxSymbolValue); + count[*ip++]++; + } + + while (count[maxSymbolValue] == 0) + maxSymbolValue--; + *maxSymbolValuePtr = maxSymbolValue; + { + uint s; + for (s = 0; s <= maxSymbolValue; s++) + if (count[s] > largestCount) + largestCount = count[s]; + } + + return largestCount; + } + + /* HIST_count_parallel_wksp() : + * store histogram into 4 intermediate tables, recombined at the end. + * this design makes better use of OoO cpus, + * and is noticeably faster when some values are heavily repeated. + * But it needs some additional workspace for intermediate tables. + * `workSpace` must be a U32 table of size >= HIST_WKSP_SIZE_U32. + * @return : largest histogram frequency, + * or an error code (notably when histogram's alphabet is larger than *maxSymbolValuePtr) */ + private static nuint HIST_count_parallel_wksp(uint* count, uint* maxSymbolValuePtr, void* source, nuint sourceSize, HIST_checkInput_e check, uint* workSpace) + { + byte* ip = (byte*)source; + byte* iend = ip + sourceSize; + nuint countSize = (*maxSymbolValuePtr + 1) * sizeof(uint); + uint max = 0; + uint* Counting1 = workSpace; + uint* Counting2 = Counting1 + 256; + uint* Counting3 = Counting2 + 256; + uint* Counting4 = Counting3 + 256; + assert(*maxSymbolValuePtr <= 255); + if (sourceSize == 0) + { + memset(count, 0, (uint)countSize); + *maxSymbolValuePtr = 0; + return 0; + } + + memset(workSpace, 0, 4 * 256 * sizeof(uint)); + { + uint cached = MEM_read32(ip); + ip += 4; + while (ip < iend - 15) + { + uint c = cached; + cached = MEM_read32(ip); + ip += 4; + Counting1[(byte)c]++; + Counting2[(byte)(c >> 8)]++; + Counting3[(byte)(c >> 16)]++; + Counting4[c >> 24]++; + c = cached; + cached = MEM_read32(ip); + ip += 4; + Counting1[(byte)c]++; + Counting2[(byte)(c >> 8)]++; + Counting3[(byte)(c >> 16)]++; + Counting4[c >> 24]++; + c = cached; + cached = MEM_read32(ip); + ip += 4; + Counting1[(byte)c]++; + Counting2[(byte)(c >> 8)]++; + Counting3[(byte)(c >> 16)]++; + Counting4[c >> 24]++; + c = cached; + cached = MEM_read32(ip); + ip += 4; + Counting1[(byte)c]++; + Counting2[(byte)(c >> 8)]++; + Counting3[(byte)(c >> 16)]++; + Counting4[c >> 24]++; + } + + ip -= 4; + } + + while (ip < iend) + Counting1[*ip++]++; + { + uint s; + for (s = 0; s < 256; s++) + { + Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s]; + if (Counting1[s] > max) + max = Counting1[s]; + } + } + + { + uint maxSymbolValue = 255; + while (Counting1[maxSymbolValue] == 0) + maxSymbolValue--; + if (check != default && maxSymbolValue > *maxSymbolValuePtr) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooSmall)); + *maxSymbolValuePtr = maxSymbolValue; + memmove(count, Counting1, countSize); + } + + return max; + } + + /* HIST_countFast_wksp() : + * Same as HIST_countFast(), but using an externally provided scratch buffer. + * `workSpace` is a writable buffer which must be 4-bytes aligned, + * `workSpaceSize` must be >= HIST_WKSP_SIZE + */ + private static nuint HIST_countFast_wksp(uint* count, uint* maxSymbolValuePtr, void* source, nuint sourceSize, void* workSpace, nuint workSpaceSize) + { + if (sourceSize < 1500) + return HIST_count_simple(count, maxSymbolValuePtr, source, sourceSize); + if (((nuint)workSpace & 3) != 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + if (workSpaceSize < 1024 * sizeof(uint)) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_workSpace_tooSmall)); + return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, HIST_checkInput_e.trustInput, (uint*)workSpace); + } + + /* HIST_count_wksp() : + * Same as HIST_count(), but using an externally provided scratch buffer. + * `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */ + private static nuint HIST_count_wksp(uint* count, uint* maxSymbolValuePtr, void* source, nuint sourceSize, void* workSpace, nuint workSpaceSize) + { + if (((nuint)workSpace & 3) != 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + if (workSpaceSize < 1024 * sizeof(uint)) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_workSpace_tooSmall)); + if (*maxSymbolValuePtr < 255) + return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, HIST_checkInput_e.checkMaxSymbolValue, (uint*)workSpace); + *maxSymbolValuePtr = 255; + return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace, workSpaceSize); + } + + /* fast variant (unsafe : won't check if src contains values beyond count[] limit) */ + private static nuint HIST_countFast(uint* count, uint* maxSymbolValuePtr, void* source, nuint sourceSize) + { + uint* tmpCounters = stackalloc uint[1024]; + return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters, sizeof(uint) * 1024); + } + + /*! HIST_count(): + * Provides the precise count of each byte within a table 'count'. + * 'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1). + * Updates *maxSymbolValuePtr with actual largest symbol value detected. + * @return : count of the most frequent symbol (which isn't identified). + * or an error code, which can be tested using HIST_isError(). + * note : if return == srcSize, there is only one symbol. + */ + private static nuint HIST_count(uint* count, uint* maxSymbolValuePtr, void* src, nuint srcSize) + { + uint* tmpCounters = stackalloc uint[1024]; + return HIST_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters, sizeof(uint) * 1024); + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HufCompress.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HufCompress.cs new file mode 100644 index 000000000..a41274cdb --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HufCompress.cs @@ -0,0 +1,1288 @@ +using static ZstdSharp.UnsafeHelper; +using System.Runtime.CompilerServices; + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + private static void* HUF_alignUpWorkspace(void* workspace, nuint* workspaceSizePtr, nuint align) + { + nuint mask = align - 1; + nuint rem = (nuint)workspace & mask; + nuint add = align - rem & mask; + byte* aligned = (byte*)workspace + add; + assert((align & align - 1) == 0); + assert(align <= 8); + if (*workspaceSizePtr >= add) + { + assert(add < align); + assert(((nuint)aligned & mask) == 0); + *workspaceSizePtr -= add; + return aligned; + } + else + { + *workspaceSizePtr = 0; + return null; + } + } + + private static nuint HUF_compressWeights(void* dst, nuint dstSize, void* weightTable, nuint wtSize, void* workspace, nuint workspaceSize) + { + byte* ostart = (byte*)dst; + byte* op = ostart; + byte* oend = ostart + dstSize; + uint maxSymbolValue = 12; + uint tableLog = 6; + HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, sizeof(uint)); + if (workspaceSize < (nuint)sizeof(HUF_CompressWeightsWksp)) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + if (wtSize <= 1) + return 0; + { + /* never fails */ + uint maxCount = HIST_count_simple(wksp->count, &maxSymbolValue, weightTable, wtSize); + if (maxCount == wtSize) + return 1; + if (maxCount == 1) + return 0; + } + + tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue); + { + /* useLowProbCount */ + nuint _var_err__ = FSE_normalizeCount(wksp->norm, tableLog, wksp->count, wtSize, maxSymbolValue, 0); + if (ERR_isError(_var_err__)) + return _var_err__; + } + + { + nuint hSize = FSE_writeNCount(op, (nuint)(oend - op), wksp->norm, maxSymbolValue, tableLog); + if (ERR_isError(hSize)) + return hSize; + op += hSize; + } + + { + /* Compress */ + nuint _var_err__ = FSE_buildCTable_wksp(wksp->CTable, wksp->norm, maxSymbolValue, tableLog, wksp->scratchBuffer, sizeof(uint) * 41); + if (ERR_isError(_var_err__)) + return _var_err__; + } + + { + nuint cSize = FSE_compress_usingCTable(op, (nuint)(oend - op), weightTable, wtSize, wksp->CTable); + if (ERR_isError(cSize)) + return cSize; + if (cSize == 0) + return 0; + op += cSize; + } + + return (nuint)(op - ostart); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint HUF_getNbBits(nuint elt) + { + return elt & 0xFF; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint HUF_getNbBitsFast(nuint elt) + { + return elt; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint HUF_getValue(nuint elt) + { + return elt & ~(nuint)0xFF; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint HUF_getValueFast(nuint elt) + { + return elt; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void HUF_setNbBits(nuint* elt, nuint nbBits) + { + assert(nbBits <= 12); + *elt = nbBits; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void HUF_setValue(nuint* elt, nuint value) + { + nuint nbBits = HUF_getNbBits(*elt); + if (nbBits > 0) + { + assert(value >> (int)nbBits == 0); + *elt |= value << (int)((nuint)(sizeof(nuint) * 8) - nbBits); + } + } + + /** HUF_readCTableHeader() : + * @returns The header from the CTable specifying the tableLog and the maxSymbolValue. + */ + private static HUF_CTableHeader HUF_readCTableHeader(nuint* ctable) + { + HUF_CTableHeader header; + memcpy(&header, ctable, (uint)sizeof(nuint)); + return header; + } + + private static void HUF_writeCTableHeader(nuint* ctable, uint tableLog, uint maxSymbolValue) + { + HUF_CTableHeader header; + memset(&header, 0, (uint)sizeof(nuint)); + assert(tableLog < 256); + header.tableLog = (byte)tableLog; + assert(maxSymbolValue < 256); + header.maxSymbolValue = (byte)maxSymbolValue; + memcpy(ctable, &header, (uint)sizeof(nuint)); + } + + private static nuint HUF_writeCTable_wksp(void* dst, nuint maxDstSize, nuint* CTable, uint maxSymbolValue, uint huffLog, void* workspace, nuint workspaceSize) + { + nuint* ct = CTable + 1; + byte* op = (byte*)dst; + uint n; + HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, sizeof(uint)); + assert(HUF_readCTableHeader(CTable).maxSymbolValue == maxSymbolValue); + assert(HUF_readCTableHeader(CTable).tableLog == huffLog); + if (workspaceSize < (nuint)sizeof(HUF_WriteCTableWksp)) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + if (maxSymbolValue > 255) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooLarge)); + wksp->bitsToWeight[0] = 0; + for (n = 1; n < huffLog + 1; n++) + wksp->bitsToWeight[n] = (byte)(huffLog + 1 - n); + for (n = 0; n < maxSymbolValue; n++) + wksp->huffWeight[n] = wksp->bitsToWeight[HUF_getNbBits(ct[n])]; + if (maxDstSize < 1) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + { + nuint hSize = HUF_compressWeights(op + 1, maxDstSize - 1, wksp->huffWeight, maxSymbolValue, &wksp->wksp, (nuint)sizeof(HUF_CompressWeightsWksp)); + if (ERR_isError(hSize)) + return hSize; + if (hSize > 1 && hSize < maxSymbolValue / 2) + { + op[0] = (byte)hSize; + return hSize + 1; + } + } + + if (maxSymbolValue > 256 - 128) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + if ((maxSymbolValue + 1) / 2 + 1 > maxDstSize) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + op[0] = (byte)(128 + (maxSymbolValue - 1)); + wksp->huffWeight[maxSymbolValue] = 0; + for (n = 0; n < maxSymbolValue; n += 2) + op[n / 2 + 1] = (byte)((wksp->huffWeight[n] << 4) + wksp->huffWeight[n + 1]); + return (maxSymbolValue + 1) / 2 + 1; + } + + /** HUF_readCTable() : + * Loading a CTable saved with HUF_writeCTable() */ + private static nuint HUF_readCTable(nuint* CTable, uint* maxSymbolValuePtr, void* src, nuint srcSize, uint* hasZeroWeights) + { + /* init not required, even though some static analyzer may complain */ + byte* huffWeight = stackalloc byte[256]; + /* large enough for values from 0 to 16 */ + uint* rankVal = stackalloc uint[13]; + uint tableLog = 0; + uint nbSymbols = 0; + nuint* ct = CTable + 1; + /* get symbol weights */ + nuint readSize = HUF_readStats(huffWeight, 255 + 1, rankVal, &nbSymbols, &tableLog, src, srcSize); + if (ERR_isError(readSize)) + return readSize; + *hasZeroWeights = rankVal[0] > 0 ? 1U : 0U; + if (tableLog > 12) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); + if (nbSymbols > *maxSymbolValuePtr + 1) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooSmall)); + *maxSymbolValuePtr = nbSymbols - 1; + HUF_writeCTableHeader(CTable, tableLog, *maxSymbolValuePtr); + { + uint n, nextRankStart = 0; + for (n = 1; n <= tableLog; n++) + { + uint curr = nextRankStart; + nextRankStart += rankVal[n] << (int)(n - 1); + rankVal[n] = curr; + } + } + + { + uint n; + for (n = 0; n < nbSymbols; n++) + { + uint w = huffWeight[n]; + HUF_setNbBits(ct + n, (nuint)((byte)(tableLog + 1 - w) & -(w != 0 ? 1 : 0))); + } + } + + { + ushort* nbPerRank = stackalloc ushort[14]; + /* support w=0=>n=tableLog+1 */ + memset(nbPerRank, 0, sizeof(ushort) * 14); + ushort* valPerRank = stackalloc ushort[14]; + memset(valPerRank, 0, sizeof(ushort) * 14); + { + uint n; + for (n = 0; n < nbSymbols; n++) + nbPerRank[HUF_getNbBits(ct[n])]++; + } + + valPerRank[tableLog + 1] = 0; + { + ushort min = 0; + /* start at n=tablelog <-> w=1 */ + uint n; + for (n = tableLog; n > 0; n--) + { + valPerRank[n] = min; + min += nbPerRank[n]; + min >>= 1; + } + } + + { + uint n; + for (n = 0; n < nbSymbols; n++) + HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); + } + } + + return readSize; + } + + /** HUF_getNbBitsFromCTable() : + * Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX + * Note 1 : If symbolValue > HUF_readCTableHeader(symbolTable).maxSymbolValue, returns 0 + * Note 2 : is not inlined, as HUF_CElt definition is private + */ + private static uint HUF_getNbBitsFromCTable(nuint* CTable, uint symbolValue) + { + nuint* ct = CTable + 1; + assert(symbolValue <= 255); + if (symbolValue > HUF_readCTableHeader(CTable).maxSymbolValue) + return 0; + return (uint)HUF_getNbBits(ct[symbolValue]); + } + + /** + * HUF_setMaxHeight(): + * Try to enforce @targetNbBits on the Huffman tree described in @huffNode. + * + * It attempts to convert all nodes with nbBits > @targetNbBits + * to employ @targetNbBits instead. Then it adjusts the tree + * so that it remains a valid canonical Huffman tree. + * + * @pre The sum of the ranks of each symbol == 2^largestBits, + * where largestBits == huffNode[lastNonNull].nbBits. + * @post The sum of the ranks of each symbol == 2^largestBits, + * where largestBits is the return value (expected <= targetNbBits). + * + * @param huffNode The Huffman tree modified in place to enforce targetNbBits. + * It's presumed sorted, from most frequent to rarest symbol. + * @param lastNonNull The symbol with the lowest count in the Huffman tree. + * @param targetNbBits The allowed number of bits, which the Huffman tree + * may not respect. After this function the Huffman tree will + * respect targetNbBits. + * @return The maximum number of bits of the Huffman tree after adjustment. + */ + private static uint HUF_setMaxHeight(nodeElt_s* huffNode, uint lastNonNull, uint targetNbBits) + { + uint largestBits = huffNode[lastNonNull].nbBits; + if (largestBits <= targetNbBits) + return largestBits; + { + int totalCost = 0; + uint baseCost = (uint)(1 << (int)(largestBits - targetNbBits)); + int n = (int)lastNonNull; + while (huffNode[n].nbBits > targetNbBits) + { + totalCost += (int)(baseCost - (uint)(1 << (int)(largestBits - huffNode[n].nbBits))); + huffNode[n].nbBits = (byte)targetNbBits; + n--; + } + + assert(huffNode[n].nbBits <= targetNbBits); + while (huffNode[n].nbBits == targetNbBits) + --n; + assert(((uint)totalCost & baseCost - 1) == 0); + totalCost >>= (int)(largestBits - targetNbBits); + assert(totalCost > 0); + { + const uint noSymbol = 0xF0F0F0F0; + uint* rankLast = stackalloc uint[14]; + memset(rankLast, 0xF0, sizeof(uint) * 14); + { + uint currentNbBits = targetNbBits; + int pos; + for (pos = n; pos >= 0; pos--) + { + if (huffNode[pos].nbBits >= currentNbBits) + continue; + currentNbBits = huffNode[pos].nbBits; + rankLast[targetNbBits - currentNbBits] = (uint)pos; + } + } + + while (totalCost > 0) + { + /* Try to reduce the next power of 2 above totalCost because we + * gain back half the rank. + */ + uint nBitsToDecrease = ZSTD_highbit32((uint)totalCost) + 1; + for (; nBitsToDecrease > 1; nBitsToDecrease--) + { + uint highPos = rankLast[nBitsToDecrease]; + uint lowPos = rankLast[nBitsToDecrease - 1]; + if (highPos == noSymbol) + continue; + if (lowPos == noSymbol) + break; + { + uint highTotal = huffNode[highPos].count; + uint lowTotal = 2 * huffNode[lowPos].count; + if (highTotal <= lowTotal) + break; + } + } + + assert(rankLast[nBitsToDecrease] != noSymbol || nBitsToDecrease == 1); + while (nBitsToDecrease <= 12 && rankLast[nBitsToDecrease] == noSymbol) + nBitsToDecrease++; + assert(rankLast[nBitsToDecrease] != noSymbol); + totalCost -= 1 << (int)(nBitsToDecrease - 1); + huffNode[rankLast[nBitsToDecrease]].nbBits++; + if (rankLast[nBitsToDecrease - 1] == noSymbol) + rankLast[nBitsToDecrease - 1] = rankLast[nBitsToDecrease]; + if (rankLast[nBitsToDecrease] == 0) + rankLast[nBitsToDecrease] = noSymbol; + else + { + rankLast[nBitsToDecrease]--; + if (huffNode[rankLast[nBitsToDecrease]].nbBits != targetNbBits - nBitsToDecrease) + rankLast[nBitsToDecrease] = noSymbol; + } + } + + while (totalCost < 0) + { + if (rankLast[1] == noSymbol) + { + while (huffNode[n].nbBits == targetNbBits) + n--; + huffNode[n + 1].nbBits--; + assert(n >= 0); + rankLast[1] = (uint)(n + 1); + totalCost++; + continue; + } + + huffNode[rankLast[1] + 1].nbBits--; + rankLast[1]++; + totalCost++; + } + } + } + + return targetNbBits; + } + + /* Return the appropriate bucket index for a given count. See definition of + * RANK_POSITION_DISTINCT_COUNT_CUTOFF for explanation of bucketing strategy. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint HUF_getIndex(uint count) + { + return count < 192 - 1 - 32 - 1 + ZSTD_highbit32(192 - 1 - 32 - 1) ? count : ZSTD_highbit32(count) + (192 - 1 - 32 - 1); + } + + /* Helper swap function for HUF_quickSortPartition() */ + private static void HUF_swapNodes(nodeElt_s* a, nodeElt_s* b) + { + nodeElt_s tmp = *a; + *a = *b; + *b = tmp; + } + + /* Returns 0 if the huffNode array is not sorted by descending count */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int HUF_isSorted(nodeElt_s* huffNode, uint maxSymbolValue1) + { + uint i; + for (i = 1; i < maxSymbolValue1; ++i) + { + if (huffNode[i].count > huffNode[i - 1].count) + { + return 0; + } + } + + return 1; + } + + /* Insertion sort by descending order */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void HUF_insertionSort(nodeElt_s* huffNode, int low, int high) + { + int i; + int size = high - low + 1; + huffNode += low; + for (i = 1; i < size; ++i) + { + nodeElt_s key = huffNode[i]; + int j = i - 1; + while (j >= 0 && huffNode[j].count < key.count) + { + huffNode[j + 1] = huffNode[j]; + j--; + } + + huffNode[j + 1] = key; + } + } + + /* Pivot helper function for quicksort. */ + private static int HUF_quickSortPartition(nodeElt_s* arr, int low, int high) + { + /* Simply select rightmost element as pivot. "Better" selectors like + * median-of-three don't experimentally appear to have any benefit. + */ + uint pivot = arr[high].count; + int i = low - 1; + int j = low; + for (; j < high; j++) + { + if (arr[j].count > pivot) + { + i++; + HUF_swapNodes(&arr[i], &arr[j]); + } + } + + HUF_swapNodes(&arr[i + 1], &arr[high]); + return i + 1; + } + + /* Classic quicksort by descending with partially iterative calls + * to reduce worst case callstack size. + */ + private static void HUF_simpleQuickSort(nodeElt_s* arr, int low, int high) + { + const int kInsertionSortThreshold = 8; + if (high - low < kInsertionSortThreshold) + { + HUF_insertionSort(arr, low, high); + return; + } + + while (low < high) + { + int idx = HUF_quickSortPartition(arr, low, high); + if (idx - low < high - idx) + { + HUF_simpleQuickSort(arr, low, idx - 1); + low = idx + 1; + } + else + { + HUF_simpleQuickSort(arr, idx + 1, high); + high = idx - 1; + } + } + } + + /** + * HUF_sort(): + * Sorts the symbols [0, maxSymbolValue] by count[symbol] in decreasing order. + * This is a typical bucket sorting strategy that uses either quicksort or insertion sort to sort each bucket. + * + * @param[out] huffNode Sorted symbols by decreasing count. Only members `.count` and `.byte` are filled. + * Must have (maxSymbolValue + 1) entries. + * @param[in] count Histogram of the symbols. + * @param[in] maxSymbolValue Maximum symbol value. + * @param rankPosition This is a scratch workspace. Must have RANK_POSITION_TABLE_SIZE entries. + */ + private static void HUF_sort(nodeElt_s* huffNode, uint* count, uint maxSymbolValue, rankPos* rankPosition) + { + uint n; + uint maxSymbolValue1 = maxSymbolValue + 1; + memset(rankPosition, 0, (uint)(sizeof(rankPos) * 192)); + for (n = 0; n < maxSymbolValue1; ++n) + { + uint lowerRank = HUF_getIndex(count[n]); + assert(lowerRank < 192 - 1); + rankPosition[lowerRank].@base++; + } + + assert(rankPosition[192 - 1].@base == 0); + for (n = 192 - 1; n > 0; --n) + { + rankPosition[n - 1].@base += rankPosition[n].@base; + rankPosition[n - 1].curr = rankPosition[n - 1].@base; + } + + for (n = 0; n < maxSymbolValue1; ++n) + { + uint c = count[n]; + uint r = HUF_getIndex(c) + 1; + uint pos = rankPosition[r].curr++; + assert(pos < maxSymbolValue1); + huffNode[pos].count = c; + huffNode[pos].@byte = (byte)n; + } + + for (n = 192 - 1 - 32 - 1 + ZSTD_highbit32(192 - 1 - 32 - 1); n < 192 - 1; ++n) + { + int bucketSize = rankPosition[n].curr - rankPosition[n].@base; + uint bucketStartIdx = rankPosition[n].@base; + if (bucketSize > 1) + { + assert(bucketStartIdx < maxSymbolValue1); + HUF_simpleQuickSort(huffNode + bucketStartIdx, 0, bucketSize - 1); + } + } + + assert(HUF_isSorted(huffNode, maxSymbolValue1) != 0); + } + + /* HUF_buildTree(): + * Takes the huffNode array sorted by HUF_sort() and builds an unlimited-depth Huffman tree. + * + * @param huffNode The array sorted by HUF_sort(). Builds the Huffman tree in this array. + * @param maxSymbolValue The maximum symbol value. + * @return The smallest node in the Huffman tree (by count). + */ + private static int HUF_buildTree(nodeElt_s* huffNode, uint maxSymbolValue) + { + nodeElt_s* huffNode0 = huffNode - 1; + int nonNullRank; + int lowS, lowN; + int nodeNb = 255 + 1; + int n, nodeRoot; + nonNullRank = (int)maxSymbolValue; + while (huffNode[nonNullRank].count == 0) + nonNullRank--; + lowS = nonNullRank; + nodeRoot = nodeNb + lowS - 1; + lowN = nodeNb; + huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS - 1].count; + huffNode[lowS].parent = huffNode[lowS - 1].parent = (ushort)nodeNb; + nodeNb++; + lowS -= 2; + for (n = nodeNb; n <= nodeRoot; n++) + huffNode[n].count = 1U << 30; + huffNode0[0].count = 1U << 31; + while (nodeNb <= nodeRoot) + { + int n1 = huffNode[lowS].count < huffNode[lowN].count ? lowS-- : lowN++; + int n2 = huffNode[lowS].count < huffNode[lowN].count ? lowS-- : lowN++; + huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count; + huffNode[n1].parent = huffNode[n2].parent = (ushort)nodeNb; + nodeNb++; + } + + huffNode[nodeRoot].nbBits = 0; + for (n = nodeRoot - 1; n >= 255 + 1; n--) + huffNode[n].nbBits = (byte)(huffNode[huffNode[n].parent].nbBits + 1); + for (n = 0; n <= nonNullRank; n++) + huffNode[n].nbBits = (byte)(huffNode[huffNode[n].parent].nbBits + 1); + return nonNullRank; + } + + /** + * HUF_buildCTableFromTree(): + * Build the CTable given the Huffman tree in huffNode. + * + * @param[out] CTable The output Huffman CTable. + * @param huffNode The Huffman tree. + * @param nonNullRank The last and smallest node in the Huffman tree. + * @param maxSymbolValue The maximum symbol value. + * @param maxNbBits The exact maximum number of bits used in the Huffman tree. + */ + private static void HUF_buildCTableFromTree(nuint* CTable, nodeElt_s* huffNode, int nonNullRank, uint maxSymbolValue, uint maxNbBits) + { + nuint* ct = CTable + 1; + /* fill result into ctable (val, nbBits) */ + int n; + ushort* nbPerRank = stackalloc ushort[13]; + memset(nbPerRank, 0, sizeof(ushort) * 13); + ushort* valPerRank = stackalloc ushort[13]; + memset(valPerRank, 0, sizeof(ushort) * 13); + int alphabetSize = (int)(maxSymbolValue + 1); + for (n = 0; n <= nonNullRank; n++) + nbPerRank[huffNode[n].nbBits]++; + { + ushort min = 0; + for (n = (int)maxNbBits; n > 0; n--) + { + valPerRank[n] = min; + min += nbPerRank[n]; + min >>= 1; + } + } + + for (n = 0; n < alphabetSize; n++) + HUF_setNbBits(ct + huffNode[n].@byte, huffNode[n].nbBits); + for (n = 0; n < alphabetSize; n++) + HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); + HUF_writeCTableHeader(CTable, maxNbBits, maxSymbolValue); + } + + private static nuint HUF_buildCTable_wksp(nuint* CTable, uint* count, uint maxSymbolValue, uint maxNbBits, void* workSpace, nuint wkspSize) + { + HUF_buildCTable_wksp_tables* wksp_tables = (HUF_buildCTable_wksp_tables*)HUF_alignUpWorkspace(workSpace, &wkspSize, sizeof(uint)); + nodeElt_s* huffNode0 = &wksp_tables->huffNodeTbl.e0; + nodeElt_s* huffNode = huffNode0 + 1; + int nonNullRank; + if (wkspSize < (nuint)sizeof(HUF_buildCTable_wksp_tables)) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_workSpace_tooSmall)); + if (maxNbBits == 0) + maxNbBits = 11; + if (maxSymbolValue > 255) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooLarge)); + memset(huffNode0, 0, (uint)(sizeof(nodeElt_s) * 512)); + HUF_sort(huffNode, count, maxSymbolValue, &wksp_tables->rankPosition.e0); + nonNullRank = HUF_buildTree(huffNode, maxSymbolValue); + maxNbBits = HUF_setMaxHeight(huffNode, (uint)nonNullRank, maxNbBits); + if (maxNbBits > 12) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + HUF_buildCTableFromTree(CTable, huffNode, nonNullRank, maxSymbolValue, maxNbBits); + return maxNbBits; + } + + private static nuint HUF_estimateCompressedSize(nuint* CTable, uint* count, uint maxSymbolValue) + { + nuint* ct = CTable + 1; + nuint nbBits = 0; + int s; + for (s = 0; s <= (int)maxSymbolValue; ++s) + { + nbBits += HUF_getNbBits(ct[s]) * count[s]; + } + + return nbBits >> 3; + } + + private static int HUF_validateCTable(nuint* CTable, uint* count, uint maxSymbolValue) + { + HUF_CTableHeader header = HUF_readCTableHeader(CTable); + nuint* ct = CTable + 1; + int bad = 0; + int s; + assert(header.tableLog <= 12); + if (header.maxSymbolValue < maxSymbolValue) + return 0; + for (s = 0; s <= (int)maxSymbolValue; ++s) + { + bad |= count[s] != 0 && HUF_getNbBits(ct[s]) == 0 ? 1 : 0; + } + + return bad == 0 ? 1 : 0; + } + + private static nuint HUF_compressBound(nuint size) + { + return 129 + (size + (size >> 8) + 8); + } + + /**! HUF_initCStream(): + * Initializes the bitstream. + * @returns 0 or an error code. + */ + private static nuint HUF_initCStream(ref HUF_CStream_t bitC, void* startPtr, nuint dstCapacity) + { + bitC = new HUF_CStream_t + { + startPtr = (byte*)startPtr, + ptr = (byte*)startPtr, + endPtr = (byte*)startPtr + dstCapacity - sizeof(nuint) + }; + if (dstCapacity <= (nuint)sizeof(nuint)) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + return 0; + } + + /*! HUF_addBits(): + * Adds the symbol stored in HUF_CElt elt to the bitstream. + * + * @param elt The element we're adding. This is a (nbBits, value) pair. + * See the HUF_CStream_t docs for the format. + * @param idx Insert into the bitstream at this idx. + * @param kFast This is a template parameter. If the bitstream is guaranteed + * to have at least 4 unused bits after this call it may be 1, + * otherwise it must be 0. HUF_addBits() is faster when fast is set. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void HUF_addBits(ref nuint bitC_bitContainer_e0, ref nuint bitC_bitPos_e0, nuint elt, int kFast) + { + assert(HUF_getNbBits(elt) <= 12); + bitC_bitContainer_e0 >>= (int)HUF_getNbBits(elt); + bitC_bitContainer_e0 |= kFast != 0 ? HUF_getValueFast(elt) : HUF_getValue(elt); + bitC_bitPos_e0 += HUF_getNbBitsFast(elt); + assert((bitC_bitPos_e0 & 0xFF) <= (nuint)(sizeof(nuint) * 8)); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void HUF_zeroIndex1(ref nuint bitC_bitContainer_e1, ref nuint bitC_bitPos_e1) + { + bitC_bitContainer_e1 = 0; + bitC_bitPos_e1 = 0; + } + + /*! HUF_mergeIndex1() : + * Merges the bit container @ index 1 into the bit container @ index 0 + * and zeros the bit container @ index 1. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void HUF_mergeIndex1(ref nuint bitC_bitContainer_e0, ref nuint bitC_bitPos_e0, ref nuint bitC_bitContainer_e1, ref nuint bitC_bitPos_e1) + { + assert((bitC_bitPos_e1 & 0xFF) < (nuint)(sizeof(nuint) * 8)); + bitC_bitContainer_e0 >>= (int)(bitC_bitPos_e1 & 0xFF); + bitC_bitContainer_e0 |= bitC_bitContainer_e1; + bitC_bitPos_e0 += bitC_bitPos_e1; + assert((bitC_bitPos_e0 & 0xFF) <= (nuint)(sizeof(nuint) * 8)); + } + + /*! HUF_flushBits() : + * Flushes the bits in the bit container @ index 0. + * + * @post bitPos will be < 8. + * @param kFast If kFast is set then we must know a-priori that + * the bit container will not overflow. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void HUF_flushBits(ref nuint bitC_bitContainer_e0, ref nuint bitC_bitPos_e0, ref byte* bitC_ptr, byte* bitC_endPtr, int kFast) + { + /* The upper bits of bitPos are noisy, so we must mask by 0xFF. */ + nuint nbBits = bitC_bitPos_e0 & 0xFF; + nuint nbBytes = nbBits >> 3; + /* The top nbBits bits of bitContainer are the ones we need. */ + nuint bitContainer = bitC_bitContainer_e0 >> (int)((nuint)(sizeof(nuint) * 8) - nbBits); + bitC_bitPos_e0 &= 7; + assert(nbBits > 0); + assert(nbBits <= (nuint)(sizeof(nuint) * 8)); + assert(bitC_ptr <= bitC_endPtr); + MEM_writeLEST(bitC_ptr, bitContainer); + bitC_ptr += nbBytes; + assert(kFast == 0 || bitC_ptr <= bitC_endPtr); + if (kFast == 0 && bitC_ptr > bitC_endPtr) + bitC_ptr = bitC_endPtr; + } + + /*! HUF_endMark() + * @returns The Huffman stream end mark: A 1-bit value = 1. + */ + private static nuint HUF_endMark() + { + nuint endMark; + HUF_setNbBits(&endMark, 1); + HUF_setValue(&endMark, 1); + return endMark; + } + + /*! HUF_closeCStream() : + * @return Size of CStream, in bytes, + * or 0 if it could not fit into dstBuffer */ + private static nuint HUF_closeCStream(ref HUF_CStream_t bitC) + { + HUF_addBits(ref bitC.bitContainer.e0, ref bitC.bitPos.e0, HUF_endMark(), 0); + HUF_flushBits(ref bitC.bitContainer.e0, ref bitC.bitPos.e0, ref bitC.ptr, bitC.endPtr, 0); + { + nuint nbBits = bitC.bitPos.e0 & 0xFF; + if (bitC.ptr >= bitC.endPtr) + return 0; + return (nuint)(bitC.ptr - bitC.startPtr) + (nuint)(nbBits > 0 ? 1 : 0); + } + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void HUF_encodeSymbol(ref nuint bitCPtr_bitContainer_e0, ref nuint bitCPtr_bitPos_e0, uint symbol, nuint* CTable, int fast) + { + HUF_addBits(ref bitCPtr_bitContainer_e0, ref bitCPtr_bitPos_e0, CTable[symbol], fast); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void HUF_compress1X_usingCTable_internal_body_loop(ref HUF_CStream_t bitC, byte* ip, nuint srcSize, nuint* ct, int kUnroll, int kFastFlush, int kLastFast) + { + byte* bitC_ptr = bitC.ptr; + byte* bitC_endPtr = bitC.endPtr; + nuint bitC_bitContainer_e0 = bitC.bitContainer.e0; + nuint bitC_bitPos_e0 = bitC.bitPos.e0; + nuint bitC_bitContainer_e1 = bitC.bitContainer.e1; + nuint bitC_bitPos_e1 = bitC.bitPos.e1; + /* Join to kUnroll */ + int n = (int)srcSize; + int rem = n % kUnroll; + if (rem > 0) + { + for (; rem > 0; --rem) + { + HUF_encodeSymbol(ref bitC_bitContainer_e0, ref bitC_bitPos_e0, ip[--n], ct, 0); + } + + HUF_flushBits(ref bitC_bitContainer_e0, ref bitC_bitPos_e0, ref bitC_ptr, bitC_endPtr, kFastFlush); + } + + assert(n % kUnroll == 0); + if (n % (2 * kUnroll) != 0) + { + int u; + for (u = 1; u < kUnroll; ++u) + { + HUF_encodeSymbol(ref bitC_bitContainer_e0, ref bitC_bitPos_e0, ip[n - u], ct, 1); + } + + HUF_encodeSymbol(ref bitC_bitContainer_e0, ref bitC_bitPos_e0, ip[n - kUnroll], ct, kLastFast); + HUF_flushBits(ref bitC_bitContainer_e0, ref bitC_bitPos_e0, ref bitC_ptr, bitC_endPtr, kFastFlush); + n -= kUnroll; + } + + assert(n % (2 * kUnroll) == 0); + for (; n > 0; n -= 2 * kUnroll) + { + /* Encode kUnroll symbols into the bitstream @ index 0. */ + int u; + for (u = 1; u < kUnroll; ++u) + { + HUF_encodeSymbol(ref bitC_bitContainer_e0, ref bitC_bitPos_e0, ip[n - u], ct, 1); + } + + HUF_encodeSymbol(ref bitC_bitContainer_e0, ref bitC_bitPos_e0, ip[n - kUnroll], ct, kLastFast); + HUF_flushBits(ref bitC_bitContainer_e0, ref bitC_bitPos_e0, ref bitC_ptr, bitC_endPtr, kFastFlush); + HUF_zeroIndex1(ref bitC_bitContainer_e1, ref bitC_bitPos_e1); + for (u = 1; u < kUnroll; ++u) + { + HUF_encodeSymbol(ref bitC_bitContainer_e1, ref bitC_bitPos_e1, ip[n - kUnroll - u], ct, 1); + } + + HUF_encodeSymbol(ref bitC_bitContainer_e1, ref bitC_bitPos_e1, ip[n - kUnroll - kUnroll], ct, kLastFast); + HUF_mergeIndex1(ref bitC_bitContainer_e0, ref bitC_bitPos_e0, ref bitC_bitContainer_e1, ref bitC_bitPos_e1); + HUF_flushBits(ref bitC_bitContainer_e0, ref bitC_bitPos_e0, ref bitC_ptr, bitC_endPtr, kFastFlush); + } + + assert(n == 0); + bitC.ptr = bitC_ptr; + bitC.endPtr = bitC_endPtr; + bitC.bitContainer.e0 = bitC_bitContainer_e0; + bitC.bitPos.e0 = bitC_bitPos_e0; + bitC.bitContainer.e1 = bitC_bitContainer_e1; + bitC.bitPos.e1 = bitC_bitPos_e1; + } + + /** + * Returns a tight upper bound on the output space needed by Huffman + * with 8 bytes buffer to handle over-writes. If the output is at least + * this large we don't need to do bounds checks during Huffman encoding. + */ + private static nuint HUF_tightCompressBound(nuint srcSize, nuint tableLog) + { + return (srcSize * tableLog >> 3) + 8; + } + + private static nuint HUF_compress1X_usingCTable_internal_body(void* dst, nuint dstSize, void* src, nuint srcSize, nuint* CTable) + { + uint tableLog = HUF_readCTableHeader(CTable).tableLog; + nuint* ct = CTable + 1; + byte* ip = (byte*)src; + byte* ostart = (byte*)dst; + byte* oend = ostart + dstSize; + HUF_CStream_t bitC; + System.Runtime.CompilerServices.Unsafe.SkipInit(out bitC); + if (dstSize < 8) + return 0; + { + byte* op = ostart; + nuint initErr = HUF_initCStream(ref bitC, op, (nuint)(oend - op)); + if (ERR_isError(initErr)) + return 0; + } + + if (dstSize < HUF_tightCompressBound(srcSize, tableLog) || tableLog > 11) + HUF_compress1X_usingCTable_internal_body_loop(ref bitC, ip, srcSize, ct, MEM_32bits ? 2 : 4, 0, 0); + else + { + if (MEM_32bits) + { + switch (tableLog) + { + case 11: + HUF_compress1X_usingCTable_internal_body_loop(ref bitC, ip, srcSize, ct, 2, 1, 0); + break; + case 10: + case 9: + case 8: + HUF_compress1X_usingCTable_internal_body_loop(ref bitC, ip, srcSize, ct, 2, 1, 1); + break; + case 7: + default: + HUF_compress1X_usingCTable_internal_body_loop(ref bitC, ip, srcSize, ct, 3, 1, 1); + break; + } + } + else + { + switch (tableLog) + { + case 11: + HUF_compress1X_usingCTable_internal_body_loop(ref bitC, ip, srcSize, ct, 5, 1, 0); + break; + case 10: + HUF_compress1X_usingCTable_internal_body_loop(ref bitC, ip, srcSize, ct, 5, 1, 1); + break; + case 9: + HUF_compress1X_usingCTable_internal_body_loop(ref bitC, ip, srcSize, ct, 6, 1, 0); + break; + case 8: + HUF_compress1X_usingCTable_internal_body_loop(ref bitC, ip, srcSize, ct, 7, 1, 0); + break; + case 7: + HUF_compress1X_usingCTable_internal_body_loop(ref bitC, ip, srcSize, ct, 8, 1, 0); + break; + case 6: + default: + HUF_compress1X_usingCTable_internal_body_loop(ref bitC, ip, srcSize, ct, 9, 1, 1); + break; + } + } + } + + assert(bitC.ptr <= bitC.endPtr); + return HUF_closeCStream(ref bitC); + } + + private static nuint HUF_compress1X_usingCTable_internal(void* dst, nuint dstSize, void* src, nuint srcSize, nuint* CTable, int flags) + { + return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); + } + + /* ====================== */ + /* single stream variants */ + /* ====================== */ + private static nuint HUF_compress1X_usingCTable(void* dst, nuint dstSize, void* src, nuint srcSize, nuint* CTable, int flags) + { + return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, flags); + } + + private static nuint HUF_compress4X_usingCTable_internal(void* dst, nuint dstSize, void* src, nuint srcSize, nuint* CTable, int flags) + { + /* first 3 segments */ + nuint segmentSize = (srcSize + 3) / 4; + byte* ip = (byte*)src; + byte* iend = ip + srcSize; + byte* ostart = (byte*)dst; + byte* oend = ostart + dstSize; + byte* op = ostart; + if (dstSize < 6 + 1 + 1 + 1 + 8) + return 0; + if (srcSize < 12) + return 0; + op += 6; + assert(op <= oend); + { + nuint cSize = HUF_compress1X_usingCTable_internal(op, (nuint)(oend - op), ip, segmentSize, CTable, flags); + if (ERR_isError(cSize)) + return cSize; + if (cSize == 0 || cSize > 65535) + return 0; + MEM_writeLE16(ostart, (ushort)cSize); + op += cSize; + } + + ip += segmentSize; + assert(op <= oend); + { + nuint cSize = HUF_compress1X_usingCTable_internal(op, (nuint)(oend - op), ip, segmentSize, CTable, flags); + if (ERR_isError(cSize)) + return cSize; + if (cSize == 0 || cSize > 65535) + return 0; + MEM_writeLE16(ostart + 2, (ushort)cSize); + op += cSize; + } + + ip += segmentSize; + assert(op <= oend); + { + nuint cSize = HUF_compress1X_usingCTable_internal(op, (nuint)(oend - op), ip, segmentSize, CTable, flags); + if (ERR_isError(cSize)) + return cSize; + if (cSize == 0 || cSize > 65535) + return 0; + MEM_writeLE16(ostart + 4, (ushort)cSize); + op += cSize; + } + + ip += segmentSize; + assert(op <= oend); + assert(ip <= iend); + { + nuint cSize = HUF_compress1X_usingCTable_internal(op, (nuint)(oend - op), ip, (nuint)(iend - ip), CTable, flags); + if (ERR_isError(cSize)) + return cSize; + if (cSize == 0 || cSize > 65535) + return 0; + op += cSize; + } + + return (nuint)(op - ostart); + } + + private static nuint HUF_compress4X_usingCTable(void* dst, nuint dstSize, void* src, nuint srcSize, nuint* CTable, int flags) + { + return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, flags); + } + + private static nuint HUF_compressCTable_internal(byte* ostart, byte* op, byte* oend, void* src, nuint srcSize, HUF_nbStreams_e nbStreams, nuint* CTable, int flags) + { + nuint cSize = nbStreams == HUF_nbStreams_e.HUF_singleStream ? HUF_compress1X_usingCTable_internal(op, (nuint)(oend - op), src, srcSize, CTable, flags) : HUF_compress4X_usingCTable_internal(op, (nuint)(oend - op), src, srcSize, CTable, flags); + if (ERR_isError(cSize)) + { + return cSize; + } + + if (cSize == 0) + { + return 0; + } + + op += cSize; + assert(op >= ostart); + if ((nuint)(op - ostart) >= srcSize - 1) + { + return 0; + } + + return (nuint)(op - ostart); + } + + private static uint HUF_cardinality(uint* count, uint maxSymbolValue) + { + uint cardinality = 0; + uint i; + for (i = 0; i < maxSymbolValue + 1; i++) + { + if (count[i] != 0) + cardinality += 1; + } + + return cardinality; + } + + /*! HUF_compress() does the following: + * 1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h") + * 2. (optional) refine tableLog using HUF_optimalTableLog() + * 3. build Huffman table from count using HUF_buildCTable() + * 4. save Huffman table to memory buffer using HUF_writeCTable() + * 5. encode the data stream using HUF_compress4X_usingCTable() + * + * The following API allows targeting specific sub-functions for advanced tasks. + * For example, it's possible to compress several blocks using the same 'CTable', + * or to save and regenerate 'CTable' using external methods. + */ + private static uint HUF_minTableLog(uint symbolCardinality) + { + uint minBitsSymbols = ZSTD_highbit32(symbolCardinality) + 1; + return minBitsSymbols; + } + + private static uint HUF_optimalTableLog(uint maxTableLog, nuint srcSize, uint maxSymbolValue, void* workSpace, nuint wkspSize, nuint* table, uint* count, int flags) + { + assert(srcSize > 1); + assert(wkspSize >= (nuint)sizeof(HUF_buildCTable_wksp_tables)); + if ((flags & (int)HUF_flags_e.HUF_flags_optimalDepth) == 0) + { + return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1); + } + + { + byte* dst = (byte*)workSpace + sizeof(HUF_WriteCTableWksp); + nuint dstSize = wkspSize - (nuint)sizeof(HUF_WriteCTableWksp); + nuint hSize, newSize; + uint symbolCardinality = HUF_cardinality(count, maxSymbolValue); + uint minTableLog = HUF_minTableLog(symbolCardinality); + nuint optSize = unchecked((nuint)~0) - 1; + uint optLog = maxTableLog, optLogGuess; + for (optLogGuess = minTableLog; optLogGuess <= maxTableLog; optLogGuess++) + { + { + nuint maxBits = HUF_buildCTable_wksp(table, count, maxSymbolValue, optLogGuess, workSpace, wkspSize); + if (ERR_isError(maxBits)) + continue; + if (maxBits < optLogGuess && optLogGuess > minTableLog) + break; + hSize = HUF_writeCTable_wksp(dst, dstSize, table, maxSymbolValue, (uint)maxBits, workSpace, wkspSize); + } + + if (ERR_isError(hSize)) + continue; + newSize = HUF_estimateCompressedSize(table, count, maxSymbolValue) + hSize; + if (newSize > optSize + 1) + { + break; + } + + if (newSize < optSize) + { + optSize = newSize; + optLog = optLogGuess; + } + } + + assert(optLog <= 12); + return optLog; + } + } + + /* HUF_compress_internal() : + * `workSpace_align4` must be aligned on 4-bytes boundaries, + * and occupies the same space as a table of HUF_WORKSPACE_SIZE_U64 unsigned */ + private static nuint HUF_compress_internal(void* dst, nuint dstSize, void* src, nuint srcSize, uint maxSymbolValue, uint huffLog, HUF_nbStreams_e nbStreams, void* workSpace, nuint wkspSize, nuint* oldHufTable, HUF_repeat* repeat, int flags) + { + HUF_compress_tables_t* table = (HUF_compress_tables_t*)HUF_alignUpWorkspace(workSpace, &wkspSize, sizeof(ulong)); + byte* ostart = (byte*)dst; + byte* oend = ostart + dstSize; + byte* op = ostart; + if (wkspSize < (nuint)sizeof(HUF_compress_tables_t)) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_workSpace_tooSmall)); + if (srcSize == 0) + return 0; + if (dstSize == 0) + return 0; + if (srcSize > 128 * 1024) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + if (huffLog > 12) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); + if (maxSymbolValue > 255) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooLarge)); + if (maxSymbolValue == 0) + maxSymbolValue = 255; + if (huffLog == 0) + huffLog = 11; + if ((flags & (int)HUF_flags_e.HUF_flags_preferRepeat) != 0 && repeat != null && *repeat == HUF_repeat.HUF_repeat_valid) + { + return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, nbStreams, oldHufTable, flags); + } + + if ((flags & (int)HUF_flags_e.HUF_flags_suspectUncompressible) != 0 && srcSize >= 4096 * 10) + { + nuint largestTotal = 0; + { + uint maxSymbolValueBegin = maxSymbolValue; + nuint largestBegin = HIST_count_simple(table->count, &maxSymbolValueBegin, (byte*)src, 4096); + if (ERR_isError(largestBegin)) + return largestBegin; + largestTotal += largestBegin; + } + + { + uint maxSymbolValueEnd = maxSymbolValue; + nuint largestEnd = HIST_count_simple(table->count, &maxSymbolValueEnd, (byte*)src + srcSize - 4096, 4096); + if (ERR_isError(largestEnd)) + return largestEnd; + largestTotal += largestEnd; + } + + if (largestTotal <= (2 * 4096 >> 7) + 4) + return 0; + } + + { + nuint largest = HIST_count_wksp(table->count, &maxSymbolValue, (byte*)src, srcSize, table->wksps.hist_wksp, sizeof(uint) * 1024); + if (ERR_isError(largest)) + return largest; + if (largest == srcSize) + { + *ostart = ((byte*)src)[0]; + return 1; + } + + if (largest <= (srcSize >> 7) + 4) + return 0; + } + + if (repeat != null && *repeat == HUF_repeat.HUF_repeat_check && HUF_validateCTable(oldHufTable, table->count, maxSymbolValue) == 0) + { + *repeat = HUF_repeat.HUF_repeat_none; + } + + if ((flags & (int)HUF_flags_e.HUF_flags_preferRepeat) != 0 && repeat != null && *repeat != HUF_repeat.HUF_repeat_none) + { + return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, nbStreams, oldHufTable, flags); + } + + huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue, &table->wksps, (nuint)sizeof(_wksps_e__Union), &table->CTable.e0, table->count, flags); + { + nuint maxBits = HUF_buildCTable_wksp(&table->CTable.e0, table->count, maxSymbolValue, huffLog, &table->wksps.buildCTable_wksp, (nuint)sizeof(HUF_buildCTable_wksp_tables)); + { + nuint _var_err__ = maxBits; + if (ERR_isError(_var_err__)) + return _var_err__; + } + + huffLog = (uint)maxBits; + } + + { + nuint hSize = HUF_writeCTable_wksp(op, dstSize, &table->CTable.e0, maxSymbolValue, huffLog, &table->wksps.writeCTable_wksp, (nuint)sizeof(HUF_WriteCTableWksp)); + if (ERR_isError(hSize)) + return hSize; + if (repeat != null && *repeat != HUF_repeat.HUF_repeat_none) + { + nuint oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue); + nuint newSize = HUF_estimateCompressedSize(&table->CTable.e0, table->count, maxSymbolValue); + if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) + { + return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, nbStreams, oldHufTable, flags); + } + } + + if (hSize + 12U >= srcSize) + { + return 0; + } + + op += hSize; + if (repeat != null) + { + *repeat = HUF_repeat.HUF_repeat_none; + } + + if (oldHufTable != null) + memcpy(oldHufTable, &table->CTable.e0, sizeof(ulong) * 257); + } + + return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, nbStreams, &table->CTable.e0, flags); + } + + /** HUF_compress1X_repeat() : + * Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. + * If it uses hufTable it does not modify hufTable or repeat. + * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. + * If preferRepeat then the old table will always be used if valid. + * If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */ + private static nuint HUF_compress1X_repeat(void* dst, nuint dstSize, void* src, nuint srcSize, uint maxSymbolValue, uint huffLog, void* workSpace, nuint wkspSize, nuint* hufTable, HUF_repeat* repeat, int flags) + { + return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, HUF_nbStreams_e.HUF_singleStream, workSpace, wkspSize, hufTable, repeat, flags); + } + + /* HUF_compress4X_repeat(): + * compress input using 4 streams. + * consider skipping quickly + * reuse an existing huffman compression table */ + private static nuint HUF_compress4X_repeat(void* dst, nuint dstSize, void* src, nuint srcSize, uint maxSymbolValue, uint huffLog, void* workSpace, nuint wkspSize, nuint* hufTable, HUF_repeat* repeat, int flags) + { + return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, HUF_nbStreams_e.HUF_fourStreams, workSpace, wkspSize, hufTable, repeat, flags); + } + } +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HufDecompress.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HufDecompress.cs new file mode 100644 index 000000000..4b131a8f3 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HufDecompress.cs @@ -0,0 +1,2043 @@ +using static ZstdSharp.UnsafeHelper; +using System; +using System.Runtime.CompilerServices; + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + private static DTableDesc HUF_getDTableDesc(uint* table) + { + DTableDesc dtd; + memcpy(&dtd, table, (uint)sizeof(DTableDesc)); + return dtd; + } + + private static nuint HUF_initFastDStream(byte* ip) + { + byte lastByte = ip[7]; + nuint bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0; + nuint value = MEM_readLEST(ip) | 1; + assert(bitsConsumed <= 8); + assert(sizeof(nuint) == 8); + return value << (int)bitsConsumed; + } + + /** + * Initializes args for the fast decoding loop. + * @returns 1 on success + * 0 if the fallback implementation should be used. + * Or an error code on failure. + */ + private static nuint HUF_DecompressFastArgs_init(HUF_DecompressFastArgs* args, void* dst, nuint dstSize, void* src, nuint srcSize, uint* DTable) + { + void* dt = DTable + 1; + uint dtLog = HUF_getDTableDesc(DTable).tableLog; + byte* istart = (byte*)src; + byte* oend = ZSTD_maybeNullPtrAdd((byte*)dst, (nint)dstSize); + if (!BitConverter.IsLittleEndian || MEM_32bits) + return 0; + if (dstSize == 0) + return 0; + assert(dst != null); + if (srcSize < 10) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + if (dtLog != 11) + return 0; + { + nuint length1 = MEM_readLE16(istart); + nuint length2 = MEM_readLE16(istart + 2); + nuint length3 = MEM_readLE16(istart + 4); + nuint length4 = srcSize - (length1 + length2 + length3 + 6); + args->iend.e0 = istart + 6; + args->iend.e1 = args->iend.e0 + length1; + args->iend.e2 = args->iend.e1 + length2; + args->iend.e3 = args->iend.e2 + length3; + if (length1 < 8 || length2 < 8 || length3 < 8 || length4 < 8) + return 0; + if (length4 > srcSize) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + args->ip.e0 = args->iend.e1 - sizeof(ulong); + args->ip.e1 = args->iend.e2 - sizeof(ulong); + args->ip.e2 = args->iend.e3 - sizeof(ulong); + args->ip.e3 = (byte*)src + srcSize - sizeof(ulong); + args->op.e0 = (byte*)dst; + args->op.e1 = args->op.e0 + (dstSize + 3) / 4; + args->op.e2 = args->op.e1 + (dstSize + 3) / 4; + args->op.e3 = args->op.e2 + (dstSize + 3) / 4; + if (args->op.e3 >= oend) + return 0; + args->bits[0] = HUF_initFastDStream(args->ip.e0); + args->bits[1] = HUF_initFastDStream(args->ip.e1); + args->bits[2] = HUF_initFastDStream(args->ip.e2); + args->bits[3] = HUF_initFastDStream(args->ip.e3); + args->ilowest = istart; + args->oend = oend; + args->dt = dt; + return 1; + } + + private static nuint HUF_initRemainingDStream(BIT_DStream_t* bit, HUF_DecompressFastArgs* args, int stream, byte* segmentEnd) + { + if ((&args->op.e0)[stream] > segmentEnd) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + if ((&args->ip.e0)[stream] < (&args->iend.e0)[stream] - 8) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + assert(sizeof(nuint) == 8); + bit->bitContainer = MEM_readLEST((&args->ip.e0)[stream]); + bit->bitsConsumed = ZSTD_countTrailingZeros64(args->bits[stream]); + bit->start = (sbyte*)args->ilowest; + bit->limitPtr = bit->start + sizeof(nuint); + bit->ptr = (sbyte*)(&args->ip.e0)[stream]; + return 0; + } + + /** + * Packs 4 HUF_DEltX1 structs into a U64. This is used to lay down 4 entries at + * a time. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ulong HUF_DEltX1_set4(byte symbol, byte nbBits) + { + ulong D4; + if (BitConverter.IsLittleEndian) + { + D4 = (ulong)((symbol << 8) + nbBits); + } + else + { + D4 = (ulong)(symbol + (nbBits << 8)); + } + + assert(D4 < 1U << 16); + D4 *= 0x0001000100010001UL; + return D4; + } + + /** + * Increase the tableLog to targetTableLog and rescales the stats. + * If tableLog > targetTableLog this is a no-op. + * @returns New tableLog + */ + private static uint HUF_rescaleStats(byte* huffWeight, uint* rankVal, uint nbSymbols, uint tableLog, uint targetTableLog) + { + if (tableLog > targetTableLog) + return tableLog; + if (tableLog < targetTableLog) + { + uint scale = targetTableLog - tableLog; + uint s; + for (s = 0; s < nbSymbols; ++s) + { + huffWeight[s] += (byte)(huffWeight[s] == 0 ? 0 : scale); + } + + for (s = targetTableLog; s > scale; --s) + { + rankVal[s] = rankVal[s - scale]; + } + + for (s = scale; s > 0; --s) + { + rankVal[s] = 0; + } + } + + return targetTableLog; + } + + private static nuint HUF_readDTableX1_wksp(uint* DTable, void* src, nuint srcSize, void* workSpace, nuint wkspSize, int flags) + { + uint tableLog = 0; + uint nbSymbols = 0; + nuint iSize; + void* dtPtr = DTable + 1; + HUF_DEltX1* dt = (HUF_DEltX1*)dtPtr; + HUF_ReadDTableX1_Workspace* wksp = (HUF_ReadDTableX1_Workspace*)workSpace; + if ((nuint)sizeof(HUF_ReadDTableX1_Workspace) > wkspSize) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); + iSize = HUF_readStats_wksp(wksp->huffWeight, 255 + 1, wksp->rankVal, &nbSymbols, &tableLog, src, srcSize, wksp->statsWksp, sizeof(uint) * 219, flags); + if (ERR_isError(iSize)) + return iSize; + { + DTableDesc dtd = HUF_getDTableDesc(DTable); + uint maxTableLog = (uint)(dtd.maxTableLog + 1); + uint targetTableLog = maxTableLog < 11 ? maxTableLog : 11; + tableLog = HUF_rescaleStats(wksp->huffWeight, wksp->rankVal, nbSymbols, tableLog, targetTableLog); + if (tableLog > (uint)(dtd.maxTableLog + 1)) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); + dtd.tableType = 0; + dtd.tableLog = (byte)tableLog; + memcpy(DTable, &dtd, (uint)sizeof(DTableDesc)); + } + + { + int n; + uint nextRankStart = 0; + const int unroll = 4; + int nLimit = (int)nbSymbols - unroll + 1; + for (n = 0; n < (int)tableLog + 1; n++) + { + uint curr = nextRankStart; + nextRankStart += wksp->rankVal[n]; + wksp->rankStart[n] = curr; + } + + for (n = 0; n < nLimit; n += unroll) + { + int u; + for (u = 0; u < unroll; ++u) + { + nuint w = wksp->huffWeight[n + u]; + wksp->symbols[wksp->rankStart[w]++] = (byte)(n + u); + } + } + + for (; n < (int)nbSymbols; ++n) + { + nuint w = wksp->huffWeight[n]; + wksp->symbols[wksp->rankStart[w]++] = (byte)n; + } + } + + { + uint w; + int symbol = (int)wksp->rankVal[0]; + int rankStart = 0; + for (w = 1; w < tableLog + 1; ++w) + { + int symbolCount = (int)wksp->rankVal[w]; + int length = 1 << (int)w >> 1; + int uStart = rankStart; + byte nbBits = (byte)(tableLog + 1 - w); + int s; + int u; + switch (length) + { + case 1: + for (s = 0; s < symbolCount; ++s) + { + HUF_DEltX1 D; + D.@byte = wksp->symbols[symbol + s]; + D.nbBits = nbBits; + dt[uStart] = D; + uStart += 1; + } + + break; + case 2: + for (s = 0; s < symbolCount; ++s) + { + HUF_DEltX1 D; + D.@byte = wksp->symbols[symbol + s]; + D.nbBits = nbBits; + dt[uStart + 0] = D; + dt[uStart + 1] = D; + uStart += 2; + } + + break; + case 4: + for (s = 0; s < symbolCount; ++s) + { + ulong D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits); + MEM_write64(dt + uStart, D4); + uStart += 4; + } + + break; + case 8: + for (s = 0; s < symbolCount; ++s) + { + ulong D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits); + MEM_write64(dt + uStart, D4); + MEM_write64(dt + uStart + 4, D4); + uStart += 8; + } + + break; + default: + for (s = 0; s < symbolCount; ++s) + { + ulong D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits); + for (u = 0; u < length; u += 16) + { + MEM_write64(dt + uStart + u + 0, D4); + MEM_write64(dt + uStart + u + 4, D4); + MEM_write64(dt + uStart + u + 8, D4); + MEM_write64(dt + uStart + u + 12, D4); + } + + assert(u == length); + uStart += length; + } + + break; + } + + symbol += symbolCount; + rankStart += symbolCount * length; + } + } + + return iSize; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static byte HUF_decodeSymbolX1(BIT_DStream_t* Dstream, HUF_DEltX1* dt, uint dtLog) + { + /* note : dtLog >= 1 */ + nuint val = BIT_lookBitsFast(Dstream, dtLog); + byte c = dt[val].@byte; + BIT_skipBits(Dstream, dt[val].nbBits); + return c; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint HUF_decodeStreamX1(byte* p, BIT_DStream_t* bitDPtr, byte* pEnd, HUF_DEltX1* dt, uint dtLog) + { + byte* pStart = p; + if (pEnd - p > 3) + { + while (BIT_reloadDStream(bitDPtr) == BIT_DStream_status.BIT_DStream_unfinished && p < pEnd - 3) + { + if (MEM_64bits) + *p++ = HUF_decodeSymbolX1(bitDPtr, dt, dtLog); + *p++ = HUF_decodeSymbolX1(bitDPtr, dt, dtLog); + if (MEM_64bits) + *p++ = HUF_decodeSymbolX1(bitDPtr, dt, dtLog); + *p++ = HUF_decodeSymbolX1(bitDPtr, dt, dtLog); + } + } + else + { + BIT_reloadDStream(bitDPtr); + } + + if (MEM_32bits) + while (BIT_reloadDStream(bitDPtr) == BIT_DStream_status.BIT_DStream_unfinished && p < pEnd) + *p++ = HUF_decodeSymbolX1(bitDPtr, dt, dtLog); + while (p < pEnd) + *p++ = HUF_decodeSymbolX1(bitDPtr, dt, dtLog); + return (nuint)(pEnd - pStart); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint HUF_decompress1X1_usingDTable_internal_body(void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, uint* DTable) + { + byte* op = (byte*)dst; + byte* oend = ZSTD_maybeNullPtrAdd(op, (nint)dstSize); + void* dtPtr = DTable + 1; + HUF_DEltX1* dt = (HUF_DEltX1*)dtPtr; + BIT_DStream_t bitD; + DTableDesc dtd = HUF_getDTableDesc(DTable); + uint dtLog = dtd.tableLog; + { + nuint _var_err__ = BIT_initDStream(&bitD, cSrc, cSrcSize); + if (ERR_isError(_var_err__)) + return _var_err__; + } + + HUF_decodeStreamX1(op, &bitD, oend, dt, dtLog); + if (BIT_endOfDStream(&bitD) == 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + return dstSize; + } + + /* HUF_decompress4X1_usingDTable_internal_body(): + * Conditions : + * @dstSize >= 6 + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint HUF_decompress4X1_usingDTable_internal_body(void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, uint* DTable) + { + if (cSrcSize < 10) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + if (dstSize < 6) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + { + byte* istart = (byte*)cSrc; + byte* ostart = (byte*)dst; + byte* oend = ostart + dstSize; + byte* olimit = oend - 3; + void* dtPtr = DTable + 1; + HUF_DEltX1* dt = (HUF_DEltX1*)dtPtr; + /* Init */ + BIT_DStream_t bitD1; + BIT_DStream_t bitD2; + BIT_DStream_t bitD3; + BIT_DStream_t bitD4; + nuint length1 = MEM_readLE16(istart); + nuint length2 = MEM_readLE16(istart + 2); + nuint length3 = MEM_readLE16(istart + 4); + nuint length4 = cSrcSize - (length1 + length2 + length3 + 6); + /* jumpTable */ + byte* istart1 = istart + 6; + byte* istart2 = istart1 + length1; + byte* istart3 = istart2 + length2; + byte* istart4 = istart3 + length3; + nuint segmentSize = (dstSize + 3) / 4; + byte* opStart2 = ostart + segmentSize; + byte* opStart3 = opStart2 + segmentSize; + byte* opStart4 = opStart3 + segmentSize; + byte* op1 = ostart; + byte* op2 = opStart2; + byte* op3 = opStart3; + byte* op4 = opStart4; + DTableDesc dtd = HUF_getDTableDesc(DTable); + uint dtLog = dtd.tableLog; + uint endSignal = 1; + if (length4 > cSrcSize) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + if (opStart4 > oend) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + assert(dstSize >= 6); + { + nuint _var_err__ = BIT_initDStream(&bitD1, istart1, length1); + if (ERR_isError(_var_err__)) + return _var_err__; + } + + { + nuint _var_err__ = BIT_initDStream(&bitD2, istart2, length2); + if (ERR_isError(_var_err__)) + return _var_err__; + } + + { + nuint _var_err__ = BIT_initDStream(&bitD3, istart3, length3); + if (ERR_isError(_var_err__)) + return _var_err__; + } + + { + nuint _var_err__ = BIT_initDStream(&bitD4, istart4, length4); + if (ERR_isError(_var_err__)) + return _var_err__; + } + + if ((nuint)(oend - op4) >= (nuint)sizeof(nuint)) + { + for (; (endSignal & (uint)(op4 < olimit ? 1 : 0)) != 0;) + { + if (MEM_64bits) + *op1++ = HUF_decodeSymbolX1(&bitD1, dt, dtLog); + if (MEM_64bits) + *op2++ = HUF_decodeSymbolX1(&bitD2, dt, dtLog); + if (MEM_64bits) + *op3++ = HUF_decodeSymbolX1(&bitD3, dt, dtLog); + if (MEM_64bits) + *op4++ = HUF_decodeSymbolX1(&bitD4, dt, dtLog); + *op1++ = HUF_decodeSymbolX1(&bitD1, dt, dtLog); + *op2++ = HUF_decodeSymbolX1(&bitD2, dt, dtLog); + *op3++ = HUF_decodeSymbolX1(&bitD3, dt, dtLog); + *op4++ = HUF_decodeSymbolX1(&bitD4, dt, dtLog); + if (MEM_64bits) + *op1++ = HUF_decodeSymbolX1(&bitD1, dt, dtLog); + if (MEM_64bits) + *op2++ = HUF_decodeSymbolX1(&bitD2, dt, dtLog); + if (MEM_64bits) + *op3++ = HUF_decodeSymbolX1(&bitD3, dt, dtLog); + if (MEM_64bits) + *op4++ = HUF_decodeSymbolX1(&bitD4, dt, dtLog); + *op1++ = HUF_decodeSymbolX1(&bitD1, dt, dtLog); + *op2++ = HUF_decodeSymbolX1(&bitD2, dt, dtLog); + *op3++ = HUF_decodeSymbolX1(&bitD3, dt, dtLog); + *op4++ = HUF_decodeSymbolX1(&bitD4, dt, dtLog); + endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_status.BIT_DStream_unfinished ? 1U : 0U; + endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_status.BIT_DStream_unfinished ? 1U : 0U; + endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_status.BIT_DStream_unfinished ? 1U : 0U; + endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_status.BIT_DStream_unfinished ? 1U : 0U; + } + } + + if (op1 > opStart2) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + if (op2 > opStart3) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + if (op3 > opStart4) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + HUF_decodeStreamX1(op1, &bitD1, opStart2, dt, dtLog); + HUF_decodeStreamX1(op2, &bitD2, opStart3, dt, dtLog); + HUF_decodeStreamX1(op3, &bitD3, opStart4, dt, dtLog); + HUF_decodeStreamX1(op4, &bitD4, oend, dt, dtLog); + { + uint endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); + if (endCheck == 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + return dstSize; + } + } + + private static nuint HUF_decompress4X1_usingDTable_internal_default(void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, uint* DTable) + { + return HUF_decompress4X1_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable); + } + + private static void HUF_decompress4X1_usingDTable_internal_fast_c_loop(HUF_DecompressFastArgs* args) + { + ulong bits0, bits1, bits2, bits3; + byte* ip0, ip1, ip2, ip3; + byte* op0, op1, op2, op3; + ushort* dtable = (ushort*)args->dt; + byte* oend = args->oend; + byte* ilowest = args->ilowest; + bits0 = args->bits[0]; + bits1 = args->bits[1]; + bits2 = args->bits[2]; + bits3 = args->bits[3]; + ip0 = args->ip.e0; + ip1 = args->ip.e1; + ip2 = args->ip.e2; + ip3 = args->ip.e3; + op0 = args->op.e0; + op1 = args->op.e1; + op2 = args->op.e2; + op3 = args->op.e3; + assert(BitConverter.IsLittleEndian); + assert(!MEM_32bits); + for (; ; ) + { + byte* olimit; + int stream; + { + assert(op0 <= op1); + assert(ip0 >= ilowest); + } + + { + assert(op1 <= op2); + assert(ip1 >= ilowest); + } + + { + assert(op2 <= op3); + assert(ip2 >= ilowest); + } + + { + assert(op3 <= oend); + assert(ip3 >= ilowest); + } + + { + /* Each iteration produces 5 output symbols per stream */ + nuint oiters = (nuint)(oend - op3) / 5; + /* Each iteration consumes up to 11 bits * 5 = 55 bits < 7 bytes + * per stream. + */ + nuint iiters = (nuint)(ip0 - ilowest) / 7; + /* We can safely run iters iterations before running bounds checks */ + nuint iters = oiters < iiters ? oiters : iiters; + nuint symbols = iters * 5; + olimit = op3 + symbols; + if (op3 == olimit) + break; + { + if (ip1 < ip0) + goto _out; + } + + { + if (ip2 < ip1) + goto _out; + } + + { + if (ip3 < ip2) + goto _out; + } + } + + { + assert(ip1 >= ip0); + } + + { + assert(ip2 >= ip1); + } + + { + assert(ip3 >= ip2); + } + + do + { + { + { + /* Decode 5 symbols in each of the 4 streams */ + int index = (int)(bits0 >> 53); + int entry = dtable[index]; + bits0 <<= entry & 0x3F; + op0[0] = (byte)(entry >> 8 & 0xFF); + } + + { + int index = (int)(bits1 >> 53); + int entry = dtable[index]; + bits1 <<= entry & 0x3F; + op1[0] = (byte)(entry >> 8 & 0xFF); + } + + { + int index = (int)(bits2 >> 53); + int entry = dtable[index]; + bits2 <<= entry & 0x3F; + op2[0] = (byte)(entry >> 8 & 0xFF); + } + + { + int index = (int)(bits3 >> 53); + int entry = dtable[index]; + bits3 <<= entry & 0x3F; + op3[0] = (byte)(entry >> 8 & 0xFF); + } + } + + { + { + int index = (int)(bits0 >> 53); + int entry = dtable[index]; + bits0 <<= entry & 0x3F; + op0[1] = (byte)(entry >> 8 & 0xFF); + } + + { + int index = (int)(bits1 >> 53); + int entry = dtable[index]; + bits1 <<= entry & 0x3F; + op1[1] = (byte)(entry >> 8 & 0xFF); + } + + { + int index = (int)(bits2 >> 53); + int entry = dtable[index]; + bits2 <<= entry & 0x3F; + op2[1] = (byte)(entry >> 8 & 0xFF); + } + + { + int index = (int)(bits3 >> 53); + int entry = dtable[index]; + bits3 <<= entry & 0x3F; + op3[1] = (byte)(entry >> 8 & 0xFF); + } + } + + { + { + int index = (int)(bits0 >> 53); + int entry = dtable[index]; + bits0 <<= entry & 0x3F; + op0[2] = (byte)(entry >> 8 & 0xFF); + } + + { + int index = (int)(bits1 >> 53); + int entry = dtable[index]; + bits1 <<= entry & 0x3F; + op1[2] = (byte)(entry >> 8 & 0xFF); + } + + { + int index = (int)(bits2 >> 53); + int entry = dtable[index]; + bits2 <<= entry & 0x3F; + op2[2] = (byte)(entry >> 8 & 0xFF); + } + + { + int index = (int)(bits3 >> 53); + int entry = dtable[index]; + bits3 <<= entry & 0x3F; + op3[2] = (byte)(entry >> 8 & 0xFF); + } + } + + { + { + int index = (int)(bits0 >> 53); + int entry = dtable[index]; + bits0 <<= entry & 0x3F; + op0[3] = (byte)(entry >> 8 & 0xFF); + } + + { + int index = (int)(bits1 >> 53); + int entry = dtable[index]; + bits1 <<= entry & 0x3F; + op1[3] = (byte)(entry >> 8 & 0xFF); + } + + { + int index = (int)(bits2 >> 53); + int entry = dtable[index]; + bits2 <<= entry & 0x3F; + op2[3] = (byte)(entry >> 8 & 0xFF); + } + + { + int index = (int)(bits3 >> 53); + int entry = dtable[index]; + bits3 <<= entry & 0x3F; + op3[3] = (byte)(entry >> 8 & 0xFF); + } + } + + { + { + int index = (int)(bits0 >> 53); + int entry = dtable[index]; + bits0 <<= entry & 0x3F; + op0[4] = (byte)(entry >> 8 & 0xFF); + } + + { + int index = (int)(bits1 >> 53); + int entry = dtable[index]; + bits1 <<= entry & 0x3F; + op1[4] = (byte)(entry >> 8 & 0xFF); + } + + { + int index = (int)(bits2 >> 53); + int entry = dtable[index]; + bits2 <<= entry & 0x3F; + op2[4] = (byte)(entry >> 8 & 0xFF); + } + + { + int index = (int)(bits3 >> 53); + int entry = dtable[index]; + bits3 <<= entry & 0x3F; + op3[4] = (byte)(entry >> 8 & 0xFF); + } + } + + { + { + /* Reload each of the 4 the bitstreams */ + int ctz = (int)ZSTD_countTrailingZeros64(bits0); + int nbBits = ctz & 7; + int nbBytes = ctz >> 3; + op0 += 5; + ip0 -= nbBytes; + bits0 = MEM_read64(ip0) | 1; + bits0 <<= nbBits; + } + + { + int ctz = (int)ZSTD_countTrailingZeros64(bits1); + int nbBits = ctz & 7; + int nbBytes = ctz >> 3; + op1 += 5; + ip1 -= nbBytes; + bits1 = MEM_read64(ip1) | 1; + bits1 <<= nbBits; + } + + { + int ctz = (int)ZSTD_countTrailingZeros64(bits2); + int nbBits = ctz & 7; + int nbBytes = ctz >> 3; + op2 += 5; + ip2 -= nbBytes; + bits2 = MEM_read64(ip2) | 1; + bits2 <<= nbBits; + } + + { + int ctz = (int)ZSTD_countTrailingZeros64(bits3); + int nbBits = ctz & 7; + int nbBytes = ctz >> 3; + op3 += 5; + ip3 -= nbBytes; + bits3 = MEM_read64(ip3) | 1; + bits3 <<= nbBits; + } + } + } + while (op3 < olimit); + } + + _out: + args->bits[0] = bits0; + args->bits[1] = bits1; + args->bits[2] = bits2; + args->bits[3] = bits3; + args->ip.e0 = ip0; + args->ip.e1 = ip1; + args->ip.e2 = ip2; + args->ip.e3 = ip3; + args->op.e0 = op0; + args->op.e1 = op1; + args->op.e2 = op2; + args->op.e3 = op3; + } + + /** + * @returns @p dstSize on success (>= 6) + * 0 if the fallback implementation should be used + * An error if an error occurred + */ + private static nuint HUF_decompress4X1_usingDTable_internal_fast(void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, uint* DTable, void* loopFn) + { + void* dt = DTable + 1; + byte* ilowest = (byte*)cSrc; + byte* oend = ZSTD_maybeNullPtrAdd((byte*)dst, (nint)dstSize); + HUF_DecompressFastArgs args; + { + nuint ret = HUF_DecompressFastArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable); + { + nuint err_code = ret; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (ret == 0) + return 0; + } + + assert(args.ip.e0 >= args.ilowest); + ((delegate* managed)loopFn)(&args); + assert(args.ip.e0 >= ilowest); + assert(args.ip.e0 >= ilowest); + assert(args.ip.e1 >= ilowest); + assert(args.ip.e2 >= ilowest); + assert(args.ip.e3 >= ilowest); + assert(args.op.e3 <= oend); + assert(ilowest == args.ilowest); + assert(ilowest + 6 == args.iend.e0); + { + nuint segmentSize = (dstSize + 3) / 4; + byte* segmentEnd = (byte*)dst; + int i; + for (i = 0; i < 4; ++i) + { + BIT_DStream_t bit; + if (segmentSize <= (nuint)(oend - segmentEnd)) + segmentEnd += segmentSize; + else + segmentEnd = oend; + { + nuint err_code = HUF_initRemainingDStream(&bit, &args, i, segmentEnd); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + (&args.op.e0)[i] += HUF_decodeStreamX1((&args.op.e0)[i], &bit, segmentEnd, (HUF_DEltX1*)dt, 11); + if ((&args.op.e0)[i] != segmentEnd) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + } + + assert(dstSize != 0); + return dstSize; + } + + private static nuint HUF_decompress1X1_usingDTable_internal(void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, uint* DTable, int flags) + { + return HUF_decompress1X1_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable); + } + + private static nuint HUF_decompress4X1_usingDTable_internal(void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, uint* DTable, int flags) + { + void* fallbackFn = (delegate* managed)(&HUF_decompress4X1_usingDTable_internal_default); + void* loopFn = (delegate* managed)(&HUF_decompress4X1_usingDTable_internal_fast_c_loop); + if ((flags & (int)HUF_flags_e.HUF_flags_disableFast) == 0) + { + nuint ret = HUF_decompress4X1_usingDTable_internal_fast(dst, dstSize, cSrc, cSrcSize, DTable, loopFn); + if (ret != 0) + return ret; + } + + return ((delegate* managed)fallbackFn)(dst, dstSize, cSrc, cSrcSize, DTable); + } + + private static nuint HUF_decompress4X1_DCtx_wksp(uint* dctx, void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, void* workSpace, nuint wkspSize, int flags) + { + byte* ip = (byte*)cSrc; + nuint hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize, flags); + if (ERR_isError(hSize)) + return hSize; + if (hSize >= cSrcSize) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + ip += hSize; + cSrcSize -= hSize; + return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags); + } + + /** + * Constructs a HUF_DEltX2 in a U32. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint HUF_buildDEltX2U32(uint symbol, uint nbBits, uint baseSeq, int level) + { + uint seq; + if (BitConverter.IsLittleEndian) + { + seq = level == 1 ? symbol : baseSeq + (symbol << 8); + return seq + (nbBits << 16) + ((uint)level << 24); + } + else + { + seq = level == 1 ? symbol << 8 : (baseSeq << 8) + symbol; + return (seq << 16) + (nbBits << 8) + (uint)level; + } + } + + /** + * Constructs a HUF_DEltX2. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static HUF_DEltX2 HUF_buildDEltX2(uint symbol, uint nbBits, uint baseSeq, int level) + { + HUF_DEltX2 DElt; + uint val = HUF_buildDEltX2U32(symbol, nbBits, baseSeq, level); + memcpy(&DElt, &val, sizeof(uint)); + return DElt; + } + + /** + * Constructs 2 HUF_DEltX2s and packs them into a U64. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ulong HUF_buildDEltX2U64(uint symbol, uint nbBits, ushort baseSeq, int level) + { + uint DElt = HUF_buildDEltX2U32(symbol, nbBits, baseSeq, level); + return DElt + ((ulong)DElt << 32); + } + + /** + * Fills the DTable rank with all the symbols from [begin, end) that are each + * nbBits long. + * + * @param DTableRank The start of the rank in the DTable. + * @param begin The first symbol to fill (inclusive). + * @param end The last symbol to fill (exclusive). + * @param nbBits Each symbol is nbBits long. + * @param tableLog The table log. + * @param baseSeq If level == 1 { 0 } else { the first level symbol } + * @param level The level in the table. Must be 1 or 2. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void HUF_fillDTableX2ForWeight(HUF_DEltX2* DTableRank, sortedSymbol_t* begin, sortedSymbol_t* end, uint nbBits, uint tableLog, ushort baseSeq, int level) + { + /* quiet static-analyzer */ + uint length = 1U << (int)(tableLog - nbBits & 0x1F); + sortedSymbol_t* ptr; + assert(level >= 1 && level <= 2); + switch (length) + { + case 1: + for (ptr = begin; ptr != end; ++ptr) + { + HUF_DEltX2 DElt = HUF_buildDEltX2(ptr->symbol, nbBits, baseSeq, level); + *DTableRank++ = DElt; + } + + break; + case 2: + for (ptr = begin; ptr != end; ++ptr) + { + HUF_DEltX2 DElt = HUF_buildDEltX2(ptr->symbol, nbBits, baseSeq, level); + DTableRank[0] = DElt; + DTableRank[1] = DElt; + DTableRank += 2; + } + + break; + case 4: + for (ptr = begin; ptr != end; ++ptr) + { + ulong DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level); + memcpy(DTableRank + 0, &DEltX2, sizeof(ulong)); + memcpy(DTableRank + 2, &DEltX2, sizeof(ulong)); + DTableRank += 4; + } + + break; + case 8: + for (ptr = begin; ptr != end; ++ptr) + { + ulong DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level); + memcpy(DTableRank + 0, &DEltX2, sizeof(ulong)); + memcpy(DTableRank + 2, &DEltX2, sizeof(ulong)); + memcpy(DTableRank + 4, &DEltX2, sizeof(ulong)); + memcpy(DTableRank + 6, &DEltX2, sizeof(ulong)); + DTableRank += 8; + } + + break; + default: + for (ptr = begin; ptr != end; ++ptr) + { + ulong DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level); + HUF_DEltX2* DTableRankEnd = DTableRank + length; + for (; DTableRank != DTableRankEnd; DTableRank += 8) + { + memcpy(DTableRank + 0, &DEltX2, sizeof(ulong)); + memcpy(DTableRank + 2, &DEltX2, sizeof(ulong)); + memcpy(DTableRank + 4, &DEltX2, sizeof(ulong)); + memcpy(DTableRank + 6, &DEltX2, sizeof(ulong)); + } + } + + break; + } + } + + /* HUF_fillDTableX2Level2() : + * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, uint targetLog, uint consumedBits, uint* rankVal, int minWeight, int maxWeight1, sortedSymbol_t* sortedSymbols, uint* rankStart, uint nbBitsBaseline, ushort baseSeq) + { + if (minWeight > 1) + { + /* quiet static-analyzer */ + uint length = 1U << (int)(targetLog - consumedBits & 0x1F); + /* baseSeq */ + ulong DEltX2 = HUF_buildDEltX2U64(baseSeq, consumedBits, 0, 1); + int skipSize = (int)rankVal[minWeight]; + assert(length > 1); + assert((uint)skipSize < length); + switch (length) + { + case 2: + assert(skipSize == 1); + memcpy(DTable, &DEltX2, sizeof(ulong)); + break; + case 4: + assert(skipSize <= 4); + memcpy(DTable + 0, &DEltX2, sizeof(ulong)); + memcpy(DTable + 2, &DEltX2, sizeof(ulong)); + break; + default: + { + int i; + for (i = 0; i < skipSize; i += 8) + { + memcpy(DTable + i + 0, &DEltX2, sizeof(ulong)); + memcpy(DTable + i + 2, &DEltX2, sizeof(ulong)); + memcpy(DTable + i + 4, &DEltX2, sizeof(ulong)); + memcpy(DTable + i + 6, &DEltX2, sizeof(ulong)); + } + } + + break; + } + } + + { + int w; + for (w = minWeight; w < maxWeight1; ++w) + { + int begin = (int)rankStart[w]; + int end = (int)rankStart[w + 1]; + uint nbBits = nbBitsBaseline - (uint)w; + uint totalBits = nbBits + consumedBits; + HUF_fillDTableX2ForWeight(DTable + rankVal[w], sortedSymbols + begin, sortedSymbols + end, totalBits, targetLog, baseSeq, 2); + } + } + } + + private static void HUF_fillDTableX2(HUF_DEltX2* DTable, uint targetLog, sortedSymbol_t* sortedList, uint* rankStart, rankValCol_t* rankValOrigin, uint maxWeight, uint nbBitsBaseline) + { + uint* rankVal = (uint*)&rankValOrigin[0]; + /* note : targetLog >= srcLog, hence scaleLog <= 1 */ + int scaleLog = (int)(nbBitsBaseline - targetLog); + uint minBits = nbBitsBaseline - maxWeight; + int w; + int wEnd = (int)maxWeight + 1; + for (w = 1; w < wEnd; ++w) + { + int begin = (int)rankStart[w]; + int end = (int)rankStart[w + 1]; + uint nbBits = nbBitsBaseline - (uint)w; + if (targetLog - nbBits >= minBits) + { + /* Enough room for a second symbol. */ + int start = (int)rankVal[w]; + /* quiet static-analyzer */ + uint length = 1U << (int)(targetLog - nbBits & 0x1F); + int minWeight = (int)(nbBits + (uint)scaleLog); + int s; + if (minWeight < 1) + minWeight = 1; + for (s = begin; s != end; ++s) + { + HUF_fillDTableX2Level2(DTable + start, targetLog, nbBits, (uint*)&rankValOrigin[nbBits], minWeight, wEnd, sortedList, rankStart, nbBitsBaseline, sortedList[s].symbol); + start += (int)length; + } + } + else + { + HUF_fillDTableX2ForWeight(DTable + rankVal[w], sortedList + begin, sortedList + end, nbBits, targetLog, 0, 1); + } + } + } + + private static nuint HUF_readDTableX2_wksp(uint* DTable, void* src, nuint srcSize, void* workSpace, nuint wkspSize, int flags) + { + uint tableLog, maxW, nbSymbols; + DTableDesc dtd = HUF_getDTableDesc(DTable); + uint maxTableLog = dtd.maxTableLog; + nuint iSize; + /* force compiler to avoid strict-aliasing */ + void* dtPtr = DTable + 1; + HUF_DEltX2* dt = (HUF_DEltX2*)dtPtr; + uint* rankStart; + HUF_ReadDTableX2_Workspace* wksp = (HUF_ReadDTableX2_Workspace*)workSpace; + if ((nuint)sizeof(HUF_ReadDTableX2_Workspace) > wkspSize) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + rankStart = wksp->rankStart0 + 1; + memset(wksp->rankStats, 0, sizeof(uint) * 13); + memset(wksp->rankStart0, 0, sizeof(uint) * 15); + if (maxTableLog > 12) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); + iSize = HUF_readStats_wksp(wksp->weightList, 255 + 1, wksp->rankStats, &nbSymbols, &tableLog, src, srcSize, wksp->calleeWksp, sizeof(uint) * 219, flags); + if (ERR_isError(iSize)) + return iSize; + if (tableLog > maxTableLog) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); + if (tableLog <= 11 && maxTableLog > 11) + maxTableLog = 11; + for (maxW = tableLog; wksp->rankStats[maxW] == 0; maxW--) + { + } + + { + uint w, nextRankStart = 0; + for (w = 1; w < maxW + 1; w++) + { + uint curr = nextRankStart; + nextRankStart += wksp->rankStats[w]; + rankStart[w] = curr; + } + + rankStart[0] = nextRankStart; + rankStart[maxW + 1] = nextRankStart; + } + + { + uint s; + for (s = 0; s < nbSymbols; s++) + { + uint w = wksp->weightList[s]; + uint r = rankStart[w]++; + (&wksp->sortedSymbol.e0)[r].symbol = (byte)s; + } + + rankStart[0] = 0; + } + + { + uint* rankVal0 = (uint*)&wksp->rankVal.e0; + { + /* tableLog <= maxTableLog */ + int rescale = (int)(maxTableLog - tableLog - 1); + uint nextRankVal = 0; + uint w; + for (w = 1; w < maxW + 1; w++) + { + uint curr = nextRankVal; + nextRankVal += wksp->rankStats[w] << (int)(w + (uint)rescale); + rankVal0[w] = curr; + } + } + + { + uint minBits = tableLog + 1 - maxW; + uint consumed; + for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) + { + uint* rankValPtr = (uint*)&(&wksp->rankVal.e0)[consumed]; + uint w; + for (w = 1; w < maxW + 1; w++) + { + rankValPtr[w] = rankVal0[w] >> (int)consumed; + } + } + } + } + + HUF_fillDTableX2(dt, maxTableLog, &wksp->sortedSymbol.e0, wksp->rankStart0, &wksp->rankVal.e0, maxW, tableLog + 1); + dtd.tableLog = (byte)maxTableLog; + dtd.tableType = 1; + memcpy(DTable, &dtd, (uint)sizeof(DTableDesc)); + return iSize; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint HUF_decodeSymbolX2(void* op, BIT_DStream_t* DStream, HUF_DEltX2* dt, uint dtLog) + { + /* note : dtLog >= 1 */ + nuint val = BIT_lookBitsFast(DStream, dtLog); + memcpy(op, &dt[val].sequence, 2); + BIT_skipBits(DStream, dt[val].nbBits); + return dt[val].length; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint HUF_decodeLastSymbolX2(void* op, BIT_DStream_t* DStream, HUF_DEltX2* dt, uint dtLog) + { + /* note : dtLog >= 1 */ + nuint val = BIT_lookBitsFast(DStream, dtLog); + memcpy(op, &dt[val].sequence, 1); + if (dt[val].length == 1) + { + BIT_skipBits(DStream, dt[val].nbBits); + } + else + { + if (DStream->bitsConsumed < (uint)(sizeof(nuint) * 8)) + { + BIT_skipBits(DStream, dt[val].nbBits); + if (DStream->bitsConsumed > (uint)(sizeof(nuint) * 8)) + DStream->bitsConsumed = (uint)(sizeof(nuint) * 8); + } + } + + return 1; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint HUF_decodeStreamX2(byte* p, BIT_DStream_t* bitDPtr, byte* pEnd, HUF_DEltX2* dt, uint dtLog) + { + byte* pStart = p; + if ((nuint)(pEnd - p) >= (nuint)sizeof(nuint)) + { + if (dtLog <= 11 && MEM_64bits) + { + while (BIT_reloadDStream(bitDPtr) == BIT_DStream_status.BIT_DStream_unfinished && p < pEnd - 9) + { + p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); + p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); + p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); + p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); + p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); + } + } + else + { + while (BIT_reloadDStream(bitDPtr) == BIT_DStream_status.BIT_DStream_unfinished && p < pEnd - (sizeof(nuint) - 1)) + { + if (MEM_64bits) + p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); + p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); + if (MEM_64bits) + p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); + p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); + } + } + } + else + { + BIT_reloadDStream(bitDPtr); + } + + if ((nuint)(pEnd - p) >= 2) + { + while (BIT_reloadDStream(bitDPtr) == BIT_DStream_status.BIT_DStream_unfinished && p <= pEnd - 2) + p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); + while (p <= pEnd - 2) + p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); + } + + if (p < pEnd) + p += HUF_decodeLastSymbolX2(p, bitDPtr, dt, dtLog); + return (nuint)(p - pStart); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint HUF_decompress1X2_usingDTable_internal_body(void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, uint* DTable) + { + BIT_DStream_t bitD; + { + /* Init */ + nuint _var_err__ = BIT_initDStream(&bitD, cSrc, cSrcSize); + if (ERR_isError(_var_err__)) + return _var_err__; + } + + { + byte* ostart = (byte*)dst; + byte* oend = ZSTD_maybeNullPtrAdd(ostart, (nint)dstSize); + /* force compiler to not use strict-aliasing */ + void* dtPtr = DTable + 1; + HUF_DEltX2* dt = (HUF_DEltX2*)dtPtr; + DTableDesc dtd = HUF_getDTableDesc(DTable); + HUF_decodeStreamX2(ostart, &bitD, oend, dt, dtd.tableLog); + } + + if (BIT_endOfDStream(&bitD) == 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + return dstSize; + } + + /* HUF_decompress4X2_usingDTable_internal_body(): + * Conditions: + * @dstSize >= 6 + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint HUF_decompress4X2_usingDTable_internal_body(void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, uint* DTable) + { + if (cSrcSize < 10) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + if (dstSize < 6) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + { + byte* istart = (byte*)cSrc; + byte* ostart = (byte*)dst; + byte* oend = ostart + dstSize; + byte* olimit = oend - (sizeof(nuint) - 1); + void* dtPtr = DTable + 1; + HUF_DEltX2* dt = (HUF_DEltX2*)dtPtr; + /* Init */ + BIT_DStream_t bitD1; + BIT_DStream_t bitD2; + BIT_DStream_t bitD3; + BIT_DStream_t bitD4; + nuint length1 = MEM_readLE16(istart); + nuint length2 = MEM_readLE16(istart + 2); + nuint length3 = MEM_readLE16(istart + 4); + nuint length4 = cSrcSize - (length1 + length2 + length3 + 6); + /* jumpTable */ + byte* istart1 = istart + 6; + byte* istart2 = istart1 + length1; + byte* istart3 = istart2 + length2; + byte* istart4 = istart3 + length3; + nuint segmentSize = (dstSize + 3) / 4; + byte* opStart2 = ostart + segmentSize; + byte* opStart3 = opStart2 + segmentSize; + byte* opStart4 = opStart3 + segmentSize; + byte* op1 = ostart; + byte* op2 = opStart2; + byte* op3 = opStart3; + byte* op4 = opStart4; + uint endSignal = 1; + DTableDesc dtd = HUF_getDTableDesc(DTable); + uint dtLog = dtd.tableLog; + if (length4 > cSrcSize) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + if (opStart4 > oend) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + assert(dstSize >= 6); + { + nuint _var_err__ = BIT_initDStream(&bitD1, istart1, length1); + if (ERR_isError(_var_err__)) + return _var_err__; + } + + { + nuint _var_err__ = BIT_initDStream(&bitD2, istart2, length2); + if (ERR_isError(_var_err__)) + return _var_err__; + } + + { + nuint _var_err__ = BIT_initDStream(&bitD3, istart3, length3); + if (ERR_isError(_var_err__)) + return _var_err__; + } + + { + nuint _var_err__ = BIT_initDStream(&bitD4, istart4, length4); + if (ERR_isError(_var_err__)) + return _var_err__; + } + + if ((nuint)(oend - op4) >= (nuint)sizeof(nuint)) + { + for (; (endSignal & (uint)(op4 < olimit ? 1 : 0)) != 0;) + { + if (MEM_64bits) + op1 += HUF_decodeSymbolX2(op1, &bitD1, dt, dtLog); + op1 += HUF_decodeSymbolX2(op1, &bitD1, dt, dtLog); + if (MEM_64bits) + op1 += HUF_decodeSymbolX2(op1, &bitD1, dt, dtLog); + op1 += HUF_decodeSymbolX2(op1, &bitD1, dt, dtLog); + if (MEM_64bits) + op2 += HUF_decodeSymbolX2(op2, &bitD2, dt, dtLog); + op2 += HUF_decodeSymbolX2(op2, &bitD2, dt, dtLog); + if (MEM_64bits) + op2 += HUF_decodeSymbolX2(op2, &bitD2, dt, dtLog); + op2 += HUF_decodeSymbolX2(op2, &bitD2, dt, dtLog); + endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_status.BIT_DStream_unfinished ? 1U : 0U; + endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_status.BIT_DStream_unfinished ? 1U : 0U; + if (MEM_64bits) + op3 += HUF_decodeSymbolX2(op3, &bitD3, dt, dtLog); + op3 += HUF_decodeSymbolX2(op3, &bitD3, dt, dtLog); + if (MEM_64bits) + op3 += HUF_decodeSymbolX2(op3, &bitD3, dt, dtLog); + op3 += HUF_decodeSymbolX2(op3, &bitD3, dt, dtLog); + if (MEM_64bits) + op4 += HUF_decodeSymbolX2(op4, &bitD4, dt, dtLog); + op4 += HUF_decodeSymbolX2(op4, &bitD4, dt, dtLog); + if (MEM_64bits) + op4 += HUF_decodeSymbolX2(op4, &bitD4, dt, dtLog); + op4 += HUF_decodeSymbolX2(op4, &bitD4, dt, dtLog); + endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_status.BIT_DStream_unfinished ? 1U : 0U; + endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_status.BIT_DStream_unfinished ? 1U : 0U; + } + } + + if (op1 > opStart2) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + if (op2 > opStart3) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + if (op3 > opStart4) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog); + HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog); + HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog); + HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog); + { + uint endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); + if (endCheck == 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + return dstSize; + } + } + + private static nuint HUF_decompress4X2_usingDTable_internal_default(void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, uint* DTable) + { + return HUF_decompress4X2_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable); + } + + private static void HUF_decompress4X2_usingDTable_internal_fast_c_loop(HUF_DecompressFastArgs* args) + { + ulong bits0, bits1, bits2, bits3; + byte* ip0, ip1, ip2, ip3; + byte* op0, op1, op2, op3; + byte* oend0, oend1, oend2, oend3; + HUF_DEltX2* dtable = (HUF_DEltX2*)args->dt; + byte* ilowest = args->ilowest; + bits0 = args->bits[0]; + bits1 = args->bits[1]; + bits2 = args->bits[2]; + bits3 = args->bits[3]; + ip0 = args->ip.e0; + ip1 = args->ip.e1; + ip2 = args->ip.e2; + ip3 = args->ip.e3; + op0 = args->op.e0; + op1 = args->op.e1; + op2 = args->op.e2; + op3 = args->op.e3; + oend0 = op1; + oend1 = op2; + oend2 = op3; + oend3 = args->oend; + assert(BitConverter.IsLittleEndian); + assert(!MEM_32bits); + for (; ; ) + { + byte* olimit; + int stream; + { + assert(op0 <= oend0); + assert(ip0 >= ilowest); + } + + { + assert(op1 <= oend1); + assert(ip1 >= ilowest); + } + + { + assert(op2 <= oend2); + assert(ip2 >= ilowest); + } + + { + assert(op3 <= oend3); + assert(ip3 >= ilowest); + } + + { + /* Each loop does 5 table lookups for each of the 4 streams. + * Each table lookup consumes up to 11 bits of input, and produces + * up to 2 bytes of output. + */ + /* We can consume up to 7 bytes of input per iteration per stream. + * We also know that each input pointer is >= ip[0]. So we can run + * iters loops before running out of input. + */ + nuint iters = (nuint)(ip0 - ilowest) / 7; + { + nuint oiters = (nuint)(oend0 - op0) / 10; + iters = iters < oiters ? iters : oiters; + } + + { + nuint oiters = (nuint)(oend1 - op1) / 10; + iters = iters < oiters ? iters : oiters; + } + + { + nuint oiters = (nuint)(oend2 - op2) / 10; + iters = iters < oiters ? iters : oiters; + } + + { + nuint oiters = (nuint)(oend3 - op3) / 10; + iters = iters < oiters ? iters : oiters; + } + + olimit = op3 + iters * 5; + if (op3 == olimit) + break; + { + if (ip1 < ip0) + goto _out; + } + + { + if (ip2 < ip1) + goto _out; + } + + { + if (ip3 < ip2) + goto _out; + } + } + + { + assert(ip1 >= ip0); + } + + { + assert(ip2 >= ip1); + } + + { + assert(ip3 >= ip2); + } + + do + { + { + { + /* Decode 5 symbols from each of the first 3 streams. + * The final stream will be decoded during the reload phase + * to reduce register pressure. + */ + int index = (int)(bits0 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op0, entry.sequence); + bits0 <<= entry.nbBits & 0x3F; + op0 += entry.length; + } + + { + int index = (int)(bits1 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op1, entry.sequence); + bits1 <<= entry.nbBits & 0x3F; + op1 += entry.length; + } + + { + int index = (int)(bits2 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op2, entry.sequence); + bits2 <<= entry.nbBits & 0x3F; + op2 += entry.length; + } + } + + { + { + int index = (int)(bits0 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op0, entry.sequence); + bits0 <<= entry.nbBits & 0x3F; + op0 += entry.length; + } + + { + int index = (int)(bits1 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op1, entry.sequence); + bits1 <<= entry.nbBits & 0x3F; + op1 += entry.length; + } + + { + int index = (int)(bits2 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op2, entry.sequence); + bits2 <<= entry.nbBits & 0x3F; + op2 += entry.length; + } + } + + { + { + int index = (int)(bits0 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op0, entry.sequence); + bits0 <<= entry.nbBits & 0x3F; + op0 += entry.length; + } + + { + int index = (int)(bits1 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op1, entry.sequence); + bits1 <<= entry.nbBits & 0x3F; + op1 += entry.length; + } + + { + int index = (int)(bits2 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op2, entry.sequence); + bits2 <<= entry.nbBits & 0x3F; + op2 += entry.length; + } + } + + { + { + int index = (int)(bits0 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op0, entry.sequence); + bits0 <<= entry.nbBits & 0x3F; + op0 += entry.length; + } + + { + int index = (int)(bits1 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op1, entry.sequence); + bits1 <<= entry.nbBits & 0x3F; + op1 += entry.length; + } + + { + int index = (int)(bits2 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op2, entry.sequence); + bits2 <<= entry.nbBits & 0x3F; + op2 += entry.length; + } + } + + { + { + int index = (int)(bits0 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op0, entry.sequence); + bits0 <<= entry.nbBits & 0x3F; + op0 += entry.length; + } + + { + int index = (int)(bits1 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op1, entry.sequence); + bits1 <<= entry.nbBits & 0x3F; + op1 += entry.length; + } + + { + int index = (int)(bits2 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op2, entry.sequence); + bits2 <<= entry.nbBits & 0x3F; + op2 += entry.length; + } + } + + { + /* Decode one symbol from the final stream */ + int index = (int)(bits3 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op3, entry.sequence); + bits3 <<= entry.nbBits & 0x3F; + op3 += entry.length; + } + + { + { + { + /* Decode 4 symbols from the final stream & reload bitstreams. + * The final stream is reloaded last, meaning that all 5 symbols + * are decoded from the final stream before it is reloaded. + */ + int index = (int)(bits3 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op3, entry.sequence); + bits3 <<= entry.nbBits & 0x3F; + op3 += entry.length; + } + + { + int ctz = (int)ZSTD_countTrailingZeros64(bits0); + int nbBits = ctz & 7; + int nbBytes = ctz >> 3; + ip0 -= nbBytes; + bits0 = MEM_read64(ip0) | 1; + bits0 <<= nbBits; + } + } + + { + { + int index = (int)(bits3 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op3, entry.sequence); + bits3 <<= entry.nbBits & 0x3F; + op3 += entry.length; + } + + { + int ctz = (int)ZSTD_countTrailingZeros64(bits1); + int nbBits = ctz & 7; + int nbBytes = ctz >> 3; + ip1 -= nbBytes; + bits1 = MEM_read64(ip1) | 1; + bits1 <<= nbBits; + } + } + + { + { + int index = (int)(bits3 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op3, entry.sequence); + bits3 <<= entry.nbBits & 0x3F; + op3 += entry.length; + } + + { + int ctz = (int)ZSTD_countTrailingZeros64(bits2); + int nbBits = ctz & 7; + int nbBytes = ctz >> 3; + ip2 -= nbBytes; + bits2 = MEM_read64(ip2) | 1; + bits2 <<= nbBits; + } + } + + { + { + int index = (int)(bits3 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op3, entry.sequence); + bits3 <<= entry.nbBits & 0x3F; + op3 += entry.length; + } + + { + int ctz = (int)ZSTD_countTrailingZeros64(bits3); + int nbBits = ctz & 7; + int nbBytes = ctz >> 3; + ip3 -= nbBytes; + bits3 = MEM_read64(ip3) | 1; + bits3 <<= nbBits; + } + } + } + } + while (op3 < olimit); + } + + _out: + args->bits[0] = bits0; + args->bits[1] = bits1; + args->bits[2] = bits2; + args->bits[3] = bits3; + args->ip.e0 = ip0; + args->ip.e1 = ip1; + args->ip.e2 = ip2; + args->ip.e3 = ip3; + args->op.e0 = op0; + args->op.e1 = op1; + args->op.e2 = op2; + args->op.e3 = op3; + } + + private static nuint HUF_decompress4X2_usingDTable_internal_fast(void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, uint* DTable, void* loopFn) + { + void* dt = DTable + 1; + byte* ilowest = (byte*)cSrc; + byte* oend = ZSTD_maybeNullPtrAdd((byte*)dst, (nint)dstSize); + HUF_DecompressFastArgs args; + { + nuint ret = HUF_DecompressFastArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable); + { + nuint err_code = ret; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (ret == 0) + return 0; + } + + assert(args.ip.e0 >= args.ilowest); + ((delegate* managed)loopFn)(&args); + assert(args.ip.e0 >= ilowest); + assert(args.ip.e1 >= ilowest); + assert(args.ip.e2 >= ilowest); + assert(args.ip.e3 >= ilowest); + assert(args.op.e3 <= oend); + assert(ilowest == args.ilowest); + assert(ilowest + 6 == args.iend.e0); + { + nuint segmentSize = (dstSize + 3) / 4; + byte* segmentEnd = (byte*)dst; + int i; + for (i = 0; i < 4; ++i) + { + BIT_DStream_t bit; + if (segmentSize <= (nuint)(oend - segmentEnd)) + segmentEnd += segmentSize; + else + segmentEnd = oend; + { + nuint err_code = HUF_initRemainingDStream(&bit, &args, i, segmentEnd); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + (&args.op.e0)[i] += HUF_decodeStreamX2((&args.op.e0)[i], &bit, segmentEnd, (HUF_DEltX2*)dt, 11); + if ((&args.op.e0)[i] != segmentEnd) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + } + + return dstSize; + } + + private static nuint HUF_decompress4X2_usingDTable_internal(void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, uint* DTable, int flags) + { + void* fallbackFn = (delegate* managed)(&HUF_decompress4X2_usingDTable_internal_default); + void* loopFn = (delegate* managed)(&HUF_decompress4X2_usingDTable_internal_fast_c_loop); + if ((flags & (int)HUF_flags_e.HUF_flags_disableFast) == 0) + { + nuint ret = HUF_decompress4X2_usingDTable_internal_fast(dst, dstSize, cSrc, cSrcSize, DTable, loopFn); + if (ret != 0) + return ret; + } + + return ((delegate* managed)fallbackFn)(dst, dstSize, cSrc, cSrcSize, DTable); + } + + private static nuint HUF_decompress1X2_usingDTable_internal(void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, uint* DTable, int flags) + { + return HUF_decompress1X2_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable); + } + + private static nuint HUF_decompress1X2_DCtx_wksp(uint* DCtx, void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, void* workSpace, nuint wkspSize, int flags) + { + byte* ip = (byte*)cSrc; + nuint hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize, flags); + if (ERR_isError(hSize)) + return hSize; + if (hSize >= cSrcSize) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + ip += hSize; + cSrcSize -= hSize; + return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, flags); + } + + private static nuint HUF_decompress4X2_DCtx_wksp(uint* dctx, void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, void* workSpace, nuint wkspSize, int flags) + { + byte* ip = (byte*)cSrc; + nuint hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize, flags); + if (ERR_isError(hSize)) + return hSize; + if (hSize >= cSrcSize) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + ip += hSize; + cSrcSize -= hSize; + return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags); + } + + private static readonly algo_time_t[][] algoTime = new algo_time_t[16][] + { + new algo_time_t[2] + { + new algo_time_t(tableTime: 0, decode256Time: 0), + new algo_time_t(tableTime: 1, decode256Time: 1) + }, + new algo_time_t[2] + { + new algo_time_t(tableTime: 0, decode256Time: 0), + new algo_time_t(tableTime: 1, decode256Time: 1) + }, + new algo_time_t[2] + { + new algo_time_t(tableTime: 150, decode256Time: 216), + new algo_time_t(tableTime: 381, decode256Time: 119) + }, + new algo_time_t[2] + { + new algo_time_t(tableTime: 170, decode256Time: 205), + new algo_time_t(tableTime: 514, decode256Time: 112) + }, + new algo_time_t[2] + { + new algo_time_t(tableTime: 177, decode256Time: 199), + new algo_time_t(tableTime: 539, decode256Time: 110) + }, + new algo_time_t[2] + { + new algo_time_t(tableTime: 197, decode256Time: 194), + new algo_time_t(tableTime: 644, decode256Time: 107) + }, + new algo_time_t[2] + { + new algo_time_t(tableTime: 221, decode256Time: 192), + new algo_time_t(tableTime: 735, decode256Time: 107) + }, + new algo_time_t[2] + { + new algo_time_t(tableTime: 256, decode256Time: 189), + new algo_time_t(tableTime: 881, decode256Time: 106) + }, + new algo_time_t[2] + { + new algo_time_t(tableTime: 359, decode256Time: 188), + new algo_time_t(tableTime: 1167, decode256Time: 109) + }, + new algo_time_t[2] + { + new algo_time_t(tableTime: 582, decode256Time: 187), + new algo_time_t(tableTime: 1570, decode256Time: 114) + }, + new algo_time_t[2] + { + new algo_time_t(tableTime: 688, decode256Time: 187), + new algo_time_t(tableTime: 1712, decode256Time: 122) + }, + new algo_time_t[2] + { + new algo_time_t(tableTime: 825, decode256Time: 186), + new algo_time_t(tableTime: 1965, decode256Time: 136) + }, + new algo_time_t[2] + { + new algo_time_t(tableTime: 976, decode256Time: 185), + new algo_time_t(tableTime: 2131, decode256Time: 150) + }, + new algo_time_t[2] + { + new algo_time_t(tableTime: 1180, decode256Time: 186), + new algo_time_t(tableTime: 2070, decode256Time: 175) + }, + new algo_time_t[2] + { + new algo_time_t(tableTime: 1377, decode256Time: 185), + new algo_time_t(tableTime: 1731, decode256Time: 202) + }, + new algo_time_t[2] + { + new algo_time_t(tableTime: 1412, decode256Time: 185), + new algo_time_t(tableTime: 1695, decode256Time: 202) + } + }; + /** HUF_selectDecoder() : + * Tells which decoder is likely to decode faster, + * based on a set of pre-computed metrics. + * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 . + * Assumption : 0 < dstSize <= 128 KB */ + private static uint HUF_selectDecoder(nuint dstSize, nuint cSrcSize) + { + assert(dstSize > 0); + assert(dstSize <= 128 * 1024); + { + /* Q < 16 */ + uint Q = cSrcSize >= dstSize ? 15 : (uint)(cSrcSize * 16 / dstSize); + uint D256 = (uint)(dstSize >> 8); + uint DTime0 = algoTime[Q][0].tableTime + algoTime[Q][0].decode256Time * D256; + uint DTime1 = algoTime[Q][1].tableTime + algoTime[Q][1].decode256Time * D256; + DTime1 += DTime1 >> 5; + return DTime1 < DTime0 ? 1U : 0U; + } + } + + private static nuint HUF_decompress1X_DCtx_wksp(uint* dctx, void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, void* workSpace, nuint wkspSize, int flags) + { + if (dstSize == 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + if (cSrcSize > dstSize) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + if (cSrcSize == dstSize) + { + memcpy(dst, cSrc, (uint)dstSize); + return dstSize; + } + + if (cSrcSize == 1) + { + memset(dst, *(byte*)cSrc, (uint)dstSize); + return dstSize; + } + + { + uint algoNb = HUF_selectDecoder(dstSize, cSrcSize); + return algoNb != 0 ? HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags) : HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags); + } + } + + /* BMI2 variants. + * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0. + */ + private static nuint HUF_decompress1X_usingDTable(void* dst, nuint maxDstSize, void* cSrc, nuint cSrcSize, uint* DTable, int flags) + { + DTableDesc dtd = HUF_getDTableDesc(DTable); + return dtd.tableType != 0 ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags) : HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags); + } + + private static nuint HUF_decompress1X1_DCtx_wksp(uint* dctx, void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, void* workSpace, nuint wkspSize, int flags) + { + byte* ip = (byte*)cSrc; + nuint hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize, flags); + if (ERR_isError(hSize)) + return hSize; + if (hSize >= cSrcSize) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + ip += hSize; + cSrcSize -= hSize; + return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags); + } + + private static nuint HUF_decompress4X_usingDTable(void* dst, nuint maxDstSize, void* cSrc, nuint cSrcSize, uint* DTable, int flags) + { + DTableDesc dtd = HUF_getDTableDesc(DTable); + return dtd.tableType != 0 ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags) : HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags); + } + + private static nuint HUF_decompress4X_hufOnly_wksp(uint* dctx, void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, void* workSpace, nuint wkspSize, int flags) + { + if (dstSize == 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + if (cSrcSize == 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + { + uint algoNb = HUF_selectDecoder(dstSize, cSrcSize); + return algoNb != 0 ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags) : HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags); + } + } + } +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Mem.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Mem.cs new file mode 100644 index 000000000..1f918fff1 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Mem.cs @@ -0,0 +1,161 @@ +using System; +using System.Buffers.Binary; +using System.Runtime.CompilerServices; +using BclUnsafe = System.Runtime.CompilerServices.Unsafe; + +// ReSharper disable InconsistentNaming +// ReSharper disable IdentifierTypo + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + /*-************************************************************** + * Memory I/O API + *****************************************************************/ + /*=== Static platform detection ===*/ + private static bool MEM_32bits + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + get => sizeof(nint) == 4; + } + + private static bool MEM_64bits + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + get => sizeof(nint) == 8; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + /* default method, safe and standard. + can sometimes prove slower */ + private static ushort MEM_read16(void* memPtr) => BclUnsafe.ReadUnaligned(memPtr); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint MEM_read32(void* memPtr) => BclUnsafe.ReadUnaligned(memPtr); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ulong MEM_read64(void* memPtr) => BclUnsafe.ReadUnaligned(memPtr); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint MEM_readST(void* memPtr) => BclUnsafe.ReadUnaligned(memPtr); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void MEM_write16(void* memPtr, ushort value) => BclUnsafe.WriteUnaligned(memPtr, value); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void MEM_write64(void* memPtr, ulong value) => BclUnsafe.WriteUnaligned(memPtr, value); + + /*=== Little endian r/w ===*/ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ushort MEM_readLE16(void* memPtr) + { + var val = BclUnsafe.ReadUnaligned(memPtr); + if (!BitConverter.IsLittleEndian) + { + val = BinaryPrimitives.ReverseEndianness(val); + } + return val; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void MEM_writeLE16(void* memPtr, ushort val) + { + if (!BitConverter.IsLittleEndian) + { + val = BinaryPrimitives.ReverseEndianness(val); + } + BclUnsafe.WriteUnaligned(memPtr, val); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint MEM_readLE24(void* memPtr) => + (uint)(MEM_readLE16(memPtr) + (((byte*)memPtr)[2] << 16)); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void MEM_writeLE24(void* memPtr, uint val) + { + MEM_writeLE16(memPtr, (ushort)val); + ((byte*)memPtr)[2] = (byte)(val >> 16); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint MEM_readLE32(void* memPtr) + { + var val = BclUnsafe.ReadUnaligned(memPtr); + if (!BitConverter.IsLittleEndian) + { + val = BinaryPrimitives.ReverseEndianness(val); + } + return val; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void MEM_writeLE32(void* memPtr, uint val32) + { + if (!BitConverter.IsLittleEndian) + { + val32 = BinaryPrimitives.ReverseEndianness(val32); + } + BclUnsafe.WriteUnaligned(memPtr, val32); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ulong MEM_readLE64(void* memPtr) + { + var val = BclUnsafe.ReadUnaligned(memPtr); + if (!BitConverter.IsLittleEndian) + { + val = BinaryPrimitives.ReverseEndianness(val); + } + return val; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void MEM_writeLE64(void* memPtr, ulong val64) + { + if (!BitConverter.IsLittleEndian) + { + val64 = BinaryPrimitives.ReverseEndianness(val64); + } + BclUnsafe.WriteUnaligned(memPtr, val64); + } + +#if !NET8_0_OR_GREATER + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ReverseEndiannessNative(nuint val) => + MEM_32bits + ? BinaryPrimitives.ReverseEndianness((uint) val) + : (nuint) BinaryPrimitives.ReverseEndianness(val); +#endif + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint MEM_readLEST(void* memPtr) + { + var val = BclUnsafe.ReadUnaligned(memPtr); + if (!BitConverter.IsLittleEndian) + { +#if NET8_0_OR_GREATER + val = BinaryPrimitives.ReverseEndianness(val); +#else + val = ReverseEndiannessNative(val); +#endif + } + return val; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void MEM_writeLEST(void* memPtr, nuint val) + { + if (!BitConverter.IsLittleEndian) + { +#if NET8_0_OR_GREATER + val = BinaryPrimitives.ReverseEndianness(val); +#else + val = ReverseEndiannessNative(val); +#endif + } + BclUnsafe.WriteUnaligned(memPtr, val); + } + } +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/RSyncState_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/RSyncState_t.cs new file mode 100644 index 000000000..d2065d07d --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/RSyncState_t.cs @@ -0,0 +1,9 @@ +namespace ZstdSharp.Unsafe +{ + public struct RSyncState_t + { + public ulong hash; + public ulong hitMask; + public ulong primePower; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Range.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Range.cs new file mode 100644 index 000000000..bc0c8d34b --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Range.cs @@ -0,0 +1,14 @@ +namespace ZstdSharp.Unsafe +{ + /* ==== Serial State ==== */ + public unsafe struct Range + { + public void* start; + public nuint size; + public Range(void* start, nuint size) + { + this.start = start; + this.size = size; + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/RawSeqStore_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/RawSeqStore_t.cs new file mode 100644 index 000000000..0e2334cd2 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/RawSeqStore_t.cs @@ -0,0 +1,25 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct RawSeqStore_t + { + /* The start of the sequences */ + public rawSeq* seq; + /* The index in seq where reading stopped. pos <= size. */ + public nuint pos; + /* The position within the sequence at seq[pos] where reading + stopped. posInSequence <= seq[pos].litLength + seq[pos].matchLength */ + public nuint posInSequence; + /* The number of sequences. <= capacity. */ + public nuint size; + /* The capacity starting from `seq` pointer */ + public nuint capacity; + public RawSeqStore_t(rawSeq* seq, nuint pos, nuint posInSequence, nuint size, nuint capacity) + { + this.seq = seq; + this.pos = pos; + this.posInSequence = posInSequence; + this.size = size; + this.capacity = capacity; + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/RoundBuff_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/RoundBuff_t.cs new file mode 100644 index 000000000..22631e10f --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/RoundBuff_t.cs @@ -0,0 +1,26 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct RoundBuff_t + { + /* The round input buffer. All jobs get references + * to pieces of the buffer. ZSTDMT_tryGetInputRange() + * handles handing out job input buffers, and makes + * sure it doesn't overlap with any pieces still in use. + */ + public byte* buffer; + /* The capacity of buffer. */ + public nuint capacity; + /* The position of the current inBuff in the round + * buffer. Updated past the end if the inBuff once + * the inBuff is sent to the worker thread. + * pos <= capacity. + */ + public nuint pos; + public RoundBuff_t(byte* buffer, nuint capacity, nuint pos) + { + this.buffer = buffer; + this.capacity = capacity; + this.pos = pos; + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqCollector.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqCollector.cs new file mode 100644 index 000000000..883517c93 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqCollector.cs @@ -0,0 +1,10 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct SeqCollector + { + public int collectSequences; + public ZSTD_Sequence* seqStart; + public nuint seqIndex; + public nuint maxSequences; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqDef_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqDef_s.cs new file mode 100644 index 000000000..660f38661 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqDef_s.cs @@ -0,0 +1,14 @@ +namespace ZstdSharp.Unsafe +{ + /*********************************************** + * Sequences * + ***********************************************/ + public struct SeqDef_s + { + /* offBase == Offset + ZSTD_REP_NUM, or repcode 1,2,3 */ + public uint offBase; + public ushort litLength; + /* mlBase == matchLength - MINMATCH */ + public ushort mlBase; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqStore_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqStore_t.cs new file mode 100644 index 000000000..4b661a552 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqStore_t.cs @@ -0,0 +1,24 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct SeqStore_t + { + public SeqDef_s* sequencesStart; + /* ptr to end of sequences */ + public SeqDef_s* sequences; + public byte* litStart; + /* ptr to end of literals */ + public byte* lit; + public byte* llCode; + public byte* mlCode; + public byte* ofCode; + public nuint maxNbSeq; + public nuint maxNbLit; + /* longLengthPos and longLengthType to allow us to represent either a single litLength or matchLength + * in the seqStore that has a value larger than U16 (if it exists). To do so, we increment + * the existing value of the litLength or matchLength by 0x10000. + */ + public ZSTD_longLengthType_e longLengthType; + /* Index of the sequence to apply long length modification to */ + public uint longLengthPos; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/SerialState.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/SerialState.cs new file mode 100644 index 000000000..4b39c6c40 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/SerialState.cs @@ -0,0 +1,21 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct SerialState + { + /* All variables in the struct are protected by mutex. */ + public void* mutex; + public void* cond; + public ZSTD_CCtx_params_s @params; + public ldmState_t ldmState; + public XXH64_state_s xxhState; + public uint nextJobID; + /* Protects ldmWindow. + * Must be acquired after the main mutex when acquiring both. + */ + public void* ldmWindowMutex; + /* Signaled when ldmWindow is updated */ + public void* ldmWindowCond; + /* A thread-safe copy of ldmState.window */ + public ZSTD_window_t ldmWindow; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/SymbolEncodingType_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/SymbolEncodingType_e.cs new file mode 100644 index 000000000..7763eaf89 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/SymbolEncodingType_e.cs @@ -0,0 +1,10 @@ +namespace ZstdSharp.Unsafe +{ + public enum SymbolEncodingType_e + { + set_basic, + set_rle, + set_compressed, + set_repeat + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/SyncPoint.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/SyncPoint.cs new file mode 100644 index 000000000..bf93adfcc --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/SyncPoint.cs @@ -0,0 +1,10 @@ +namespace ZstdSharp.Unsafe +{ + public struct SyncPoint + { + /* The number of bytes to load from the input. */ + public nuint toLoad; + /* Boolean declaring if we must flush because we found a synchronization point. */ + public int flush; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH32_canonical_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH32_canonical_t.cs new file mode 100644 index 000000000..fbb3572f8 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH32_canonical_t.cs @@ -0,0 +1,11 @@ +namespace ZstdSharp.Unsafe +{ + /*! + * @brief Canonical (big endian) representation of @ref XXH32_hash_t. + */ + public unsafe struct XXH32_canonical_t + { + /*!< Hash bytes, big endian */ + public fixed byte digest[4]; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH32_state_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH32_state_s.cs new file mode 100644 index 000000000..01c6229e3 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH32_state_s.cs @@ -0,0 +1,30 @@ +namespace ZstdSharp.Unsafe +{ + /*! + * @internal + * @brief Structure for XXH32 streaming API. + * + * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, + * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is + * an opaque type. This allows fields to safely be changed. + * + * Typedef'd to @ref XXH32_state_t. + * Do not access the members of this struct directly. + * @see XXH64_state_s, XXH3_state_s + */ + public unsafe struct XXH32_state_s + { + /*!< Total length hashed, modulo 2^32 */ + public uint total_len_32; + /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */ + public uint large_len; + /*!< Accumulator lanes */ + public fixed uint v[4]; + /*!< Internal buffer for partial reads. Treated as unsigned char[16]. */ + public fixed uint mem32[4]; + /*!< Amount of data in @ref mem32 */ + public uint memsize; + /*!< Reserved field. Do not read nor write to it. */ + public uint reserved; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH64_canonical_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH64_canonical_t.cs new file mode 100644 index 000000000..fc5ffb469 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH64_canonical_t.cs @@ -0,0 +1,10 @@ +namespace ZstdSharp.Unsafe +{ + /*! + * @brief Canonical (big endian) representation of @ref XXH64_hash_t. + */ + public unsafe struct XXH64_canonical_t + { + public fixed byte digest[8]; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH64_state_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH64_state_s.cs new file mode 100644 index 000000000..3ba3dd4a9 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH64_state_s.cs @@ -0,0 +1,30 @@ +namespace ZstdSharp.Unsafe +{ + /*! + * @internal + * @brief Structure for XXH64 streaming API. + * + * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, + * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is + * an opaque type. This allows fields to safely be changed. + * + * Typedef'd to @ref XXH64_state_t. + * Do not access the members of this struct directly. + * @see XXH32_state_s, XXH3_state_s + */ + public unsafe struct XXH64_state_s + { + /*!< Total length hashed. This is always 64-bit. */ + public ulong total_len; + /*!< Accumulator lanes */ + public fixed ulong v[4]; + /*!< Internal buffer for partial reads. Treated as unsigned char[32]. */ + public fixed ulong mem64[4]; + /*!< Amount of data in @ref mem64 */ + public uint memsize; + /*!< Reserved field, needed for padding anyways*/ + public uint reserved32; + /*!< Reserved field. Do not read or write to it. */ + public ulong reserved64; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH_alignment.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH_alignment.cs new file mode 100644 index 000000000..e3b721519 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH_alignment.cs @@ -0,0 +1,14 @@ +namespace ZstdSharp.Unsafe +{ + /*! + * @internal + * @brief Enum to indicate whether a pointer is aligned. + */ + public enum XXH_alignment + { + /*!< Aligned */ + XXH_aligned, + /*!< Possibly unaligned */ + XXH_unaligned + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH_errorcode.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH_errorcode.cs new file mode 100644 index 000000000..bd3568df0 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH_errorcode.cs @@ -0,0 +1,13 @@ +namespace ZstdSharp.Unsafe +{ + /*! + * @brief Exit code for the streaming API. + */ + public enum XXH_errorcode + { + /*!< OK */ + XXH_OK = 0, + /*!< Error */ + XXH_ERROR + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Xxhash.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Xxhash.cs new file mode 100644 index 000000000..081fca15a --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Xxhash.cs @@ -0,0 +1,598 @@ +using static ZstdSharp.UnsafeHelper; +using System; +using System.Buffers.Binary; +using System.Numerics; +using System.Runtime.CompilerServices; + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + /*! + * @internal + * @brief Modify this function to use a different routine than malloc(). + */ + private static void* XXH_malloc(nuint s) + { + return malloc(s); + } + + /*! + * @internal + * @brief Modify this function to use a different routine than free(). + */ + private static void XXH_free(void* p) + { + free(p); + } + + /*! + * @internal + * @brief Modify this function to use a different routine than memcpy(). + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void XXH_memcpy(void* dest, void* src, nuint size) + { + memcpy(dest, src, (uint)size); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint XXH_readLE32(void* ptr) + { + return BitConverter.IsLittleEndian ? MEM_read32(ptr) : BinaryPrimitives.ReverseEndianness(MEM_read32(ptr)); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint XXH_readBE32(void* ptr) + { + return BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(MEM_read32(ptr)) : MEM_read32(ptr); + } + + private static uint XXH_readLE32_align(void* ptr, XXH_alignment align) + { + if (align == XXH_alignment.XXH_unaligned) + { + return XXH_readLE32(ptr); + } + else + { + return BitConverter.IsLittleEndian ? *(uint*)ptr : BinaryPrimitives.ReverseEndianness(*(uint*)ptr); + } + } + + /* ************************************* + * Misc + ***************************************/ + /*! @ingroup public */ + private static uint ZSTD_XXH_versionNumber() + { + return 0 * 100 * 100 + 8 * 100 + 2; + } + + /*! + * @internal + * @brief Normal stripe processing routine. + * + * This shuffles the bits so that any bit from @p input impacts several bits in + * @p acc. + * + * @param acc The accumulator lane. + * @param input The stripe of input to mix. + * @return The mixed accumulator lane. + */ + private static uint XXH32_round(uint acc, uint input) + { + acc += input * 0x85EBCA77U; + acc = BitOperations.RotateLeft(acc, 13); + acc *= 0x9E3779B1U; + return acc; + } + + /*! + * @internal + * @brief Mixes all bits to finalize the hash. + * + * The final mix ensures that all input bits have a chance to impact any bit in + * the output digest, resulting in an unbiased distribution. + * + * @param hash The hash to avalanche. + * @return The avalanched hash. + */ + private static uint XXH32_avalanche(uint hash) + { + hash ^= hash >> 15; + hash *= 0x85EBCA77U; + hash ^= hash >> 13; + hash *= 0xC2B2AE3DU; + hash ^= hash >> 16; + return hash; + } + + /*! + * @internal + * @brief Processes the last 0-15 bytes of @p ptr. + * + * There may be up to 15 bytes remaining to consume from the input. + * This final stage will digest them to ensure that all input bytes are present + * in the final mix. + * + * @param hash The hash to finalize. + * @param ptr The pointer to the remaining input. + * @param len The remaining length, modulo 16. + * @param align Whether @p ptr is aligned. + * @return The finalized hash. + * @see XXH64_finalize(). + */ + private static uint XXH32_finalize(uint hash, byte* ptr, nuint len, XXH_alignment align) + { + len &= 15; + while (len >= 4) + { + { + hash += XXH_readLE32_align(ptr, align) * 0xC2B2AE3DU; + ptr += 4; + hash = BitOperations.RotateLeft(hash, 17) * 0x27D4EB2FU; + } + + len -= 4; + } + + while (len > 0) + { + { + hash += *ptr++ * 0x165667B1U; + hash = BitOperations.RotateLeft(hash, 11) * 0x9E3779B1U; + } + + --len; + } + + return XXH32_avalanche(hash); + } + + /*! + * @internal + * @brief The implementation for @ref XXH32(). + * + * @param input , len , seed Directly passed from @ref XXH32(). + * @param align Whether @p input is aligned. + * @return The calculated hash. + */ + private static uint XXH32_endian_align(byte* input, nuint len, uint seed, XXH_alignment align) + { + uint h32; + if (len >= 16) + { + byte* bEnd = input + len; + byte* limit = bEnd - 15; + uint v1 = seed + 0x9E3779B1U + 0x85EBCA77U; + uint v2 = seed + 0x85EBCA77U; + uint v3 = seed + 0; + uint v4 = seed - 0x9E3779B1U; + do + { + v1 = XXH32_round(v1, XXH_readLE32_align(input, align)); + input += 4; + v2 = XXH32_round(v2, XXH_readLE32_align(input, align)); + input += 4; + v3 = XXH32_round(v3, XXH_readLE32_align(input, align)); + input += 4; + v4 = XXH32_round(v4, XXH_readLE32_align(input, align)); + input += 4; + } + while (input < limit); + h32 = BitOperations.RotateLeft(v1, 1) + BitOperations.RotateLeft(v2, 7) + BitOperations.RotateLeft(v3, 12) + BitOperations.RotateLeft(v4, 18); + } + else + { + h32 = seed + 0x165667B1U; + } + + h32 += (uint)len; + return XXH32_finalize(h32, input, len & 15, align); + } + + /*! @ingroup XXH32_family */ + private static uint ZSTD_XXH32(void* input, nuint len, uint seed) + { + return XXH32_endian_align((byte*)input, len, seed, XXH_alignment.XXH_unaligned); + } + + /*! @ingroup XXH32_family */ + private static XXH32_state_s* ZSTD_XXH32_createState() + { + return (XXH32_state_s*)XXH_malloc((nuint)sizeof(XXH32_state_s)); + } + + /*! @ingroup XXH32_family */ + private static XXH_errorcode ZSTD_XXH32_freeState(XXH32_state_s* statePtr) + { + XXH_free(statePtr); + return XXH_errorcode.XXH_OK; + } + + /*! @ingroup XXH32_family */ + private static void ZSTD_XXH32_copyState(XXH32_state_s* dstState, XXH32_state_s* srcState) + { + XXH_memcpy(dstState, srcState, (nuint)sizeof(XXH32_state_s)); + } + + /*! @ingroup XXH32_family */ + private static XXH_errorcode ZSTD_XXH32_reset(XXH32_state_s* statePtr, uint seed) + { + *statePtr = new XXH32_state_s(); + statePtr->v[0] = seed + 0x9E3779B1U + 0x85EBCA77U; + statePtr->v[1] = seed + 0x85EBCA77U; + statePtr->v[2] = seed + 0; + statePtr->v[3] = seed - 0x9E3779B1U; + return XXH_errorcode.XXH_OK; + } + + /*! @ingroup XXH32_family */ + private static XXH_errorcode ZSTD_XXH32_update(XXH32_state_s* state, void* input, nuint len) + { + if (input == null) + { + return XXH_errorcode.XXH_OK; + } + + { + byte* p = (byte*)input; + byte* bEnd = p + len; + state->total_len_32 += (uint)len; + state->large_len |= len >= 16 || state->total_len_32 >= 16 ? 1U : 0U; + if (state->memsize + len < 16) + { + XXH_memcpy((byte*)state->mem32 + state->memsize, input, len); + state->memsize += (uint)len; + return XXH_errorcode.XXH_OK; + } + + if (state->memsize != 0) + { + XXH_memcpy((byte*)state->mem32 + state->memsize, input, 16 - state->memsize); + { + uint* p32 = state->mem32; + state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p32)); + p32++; + state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p32)); + p32++; + state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p32)); + p32++; + state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p32)); + } + + p += 16 - state->memsize; + state->memsize = 0; + } + + if (p <= bEnd - 16) + { + byte* limit = bEnd - 16; + do + { + state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p)); + p += 4; + state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p)); + p += 4; + state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p)); + p += 4; + state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p)); + p += 4; + } + while (p <= limit); + } + + if (p < bEnd) + { + XXH_memcpy(state->mem32, p, (nuint)(bEnd - p)); + state->memsize = (uint)(bEnd - p); + } + } + + return XXH_errorcode.XXH_OK; + } + + /*! @ingroup XXH32_family */ + private static uint ZSTD_XXH32_digest(XXH32_state_s* state) + { + uint h32; + if (state->large_len != 0) + { + h32 = BitOperations.RotateLeft(state->v[0], 1) + BitOperations.RotateLeft(state->v[1], 7) + BitOperations.RotateLeft(state->v[2], 12) + BitOperations.RotateLeft(state->v[3], 18); + } + else + { + h32 = state->v[2] + 0x165667B1U; + } + + h32 += state->total_len_32; + return XXH32_finalize(h32, (byte*)state->mem32, state->memsize, XXH_alignment.XXH_aligned); + } + + /*! @ingroup XXH32_family */ + private static void ZSTD_XXH32_canonicalFromHash(XXH32_canonical_t* dst, uint hash) + { + assert(sizeof(XXH32_canonical_t) == sizeof(uint)); + if (BitConverter.IsLittleEndian) + hash = BinaryPrimitives.ReverseEndianness(hash); + XXH_memcpy(dst, &hash, (nuint)sizeof(XXH32_canonical_t)); + } + + /*! @ingroup XXH32_family */ + private static uint ZSTD_XXH32_hashFromCanonical(XXH32_canonical_t* src) + { + return XXH_readBE32(src); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ulong XXH_readLE64(void* ptr) + { + return BitConverter.IsLittleEndian ? MEM_read64(ptr) : BinaryPrimitives.ReverseEndianness(MEM_read64(ptr)); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ulong XXH_readBE64(void* ptr) + { + return BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(MEM_read64(ptr)) : MEM_read64(ptr); + } + + private static ulong XXH_readLE64_align(void* ptr, XXH_alignment align) + { + if (align == XXH_alignment.XXH_unaligned) + return XXH_readLE64(ptr); + else + return BitConverter.IsLittleEndian ? *(ulong*)ptr : BinaryPrimitives.ReverseEndianness(*(ulong*)ptr); + } + + /*! @copydoc XXH32_round */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ulong XXH64_round(ulong acc, ulong input) + { + acc += input * 0xC2B2AE3D27D4EB4FUL; + acc = BitOperations.RotateLeft(acc, 31); + acc *= 0x9E3779B185EBCA87UL; + return acc; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ulong XXH64_mergeRound(ulong acc, ulong val) + { + val = XXH64_round(0, val); + acc ^= val; + acc = acc * 0x9E3779B185EBCA87UL + 0x85EBCA77C2B2AE63UL; + return acc; + } + + /*! @copydoc XXH32_avalanche */ + private static ulong XXH64_avalanche(ulong hash) + { + hash ^= hash >> 33; + hash *= 0xC2B2AE3D27D4EB4FUL; + hash ^= hash >> 29; + hash *= 0x165667B19E3779F9UL; + hash ^= hash >> 32; + return hash; + } + + /*! + * @internal + * @brief Processes the last 0-31 bytes of @p ptr. + * + * There may be up to 31 bytes remaining to consume from the input. + * This final stage will digest them to ensure that all input bytes are present + * in the final mix. + * + * @param hash The hash to finalize. + * @param ptr The pointer to the remaining input. + * @param len The remaining length, modulo 32. + * @param align Whether @p ptr is aligned. + * @return The finalized hash + * @see XXH32_finalize(). + */ + private static ulong XXH64_finalize(ulong hash, byte* ptr, nuint len, XXH_alignment align) + { + len &= 31; + while (len >= 8) + { + ulong k1 = XXH64_round(0, XXH_readLE64_align(ptr, align)); + ptr += 8; + hash ^= k1; + hash = BitOperations.RotateLeft(hash, 27) * 0x9E3779B185EBCA87UL + 0x85EBCA77C2B2AE63UL; + len -= 8; + } + + if (len >= 4) + { + hash ^= XXH_readLE32_align(ptr, align) * 0x9E3779B185EBCA87UL; + ptr += 4; + hash = BitOperations.RotateLeft(hash, 23) * 0xC2B2AE3D27D4EB4FUL + 0x165667B19E3779F9UL; + len -= 4; + } + + while (len > 0) + { + hash ^= *ptr++ * 0x27D4EB2F165667C5UL; + hash = BitOperations.RotateLeft(hash, 11) * 0x9E3779B185EBCA87UL; + --len; + } + + return XXH64_avalanche(hash); + } + + /*! + * @internal + * @brief The implementation for @ref XXH64(). + * + * @param input , len , seed Directly passed from @ref XXH64(). + * @param align Whether @p input is aligned. + * @return The calculated hash. + */ + private static ulong XXH64_endian_align(byte* input, nuint len, ulong seed, XXH_alignment align) + { + ulong h64; + if (len >= 32) + { + byte* bEnd = input + len; + byte* limit = bEnd - 31; + ulong v1 = seed + 0x9E3779B185EBCA87UL + 0xC2B2AE3D27D4EB4FUL; + ulong v2 = seed + 0xC2B2AE3D27D4EB4FUL; + ulong v3 = seed + 0; + ulong v4 = seed - 0x9E3779B185EBCA87UL; + do + { + v1 = XXH64_round(v1, XXH_readLE64_align(input, align)); + input += 8; + v2 = XXH64_round(v2, XXH_readLE64_align(input, align)); + input += 8; + v3 = XXH64_round(v3, XXH_readLE64_align(input, align)); + input += 8; + v4 = XXH64_round(v4, XXH_readLE64_align(input, align)); + input += 8; + } + while (input < limit); + h64 = BitOperations.RotateLeft(v1, 1) + BitOperations.RotateLeft(v2, 7) + BitOperations.RotateLeft(v3, 12) + BitOperations.RotateLeft(v4, 18); + h64 = XXH64_mergeRound(h64, v1); + h64 = XXH64_mergeRound(h64, v2); + h64 = XXH64_mergeRound(h64, v3); + h64 = XXH64_mergeRound(h64, v4); + } + else + { + h64 = seed + 0x27D4EB2F165667C5UL; + } + + h64 += len; + return XXH64_finalize(h64, input, len, align); + } + + /*! @ingroup XXH64_family */ + private static ulong ZSTD_XXH64(void* input, nuint len, ulong seed) + { + return XXH64_endian_align((byte*)input, len, seed, XXH_alignment.XXH_unaligned); + } + + /*! @ingroup XXH64_family*/ + private static XXH64_state_s* ZSTD_XXH64_createState() + { + return (XXH64_state_s*)XXH_malloc((nuint)sizeof(XXH64_state_s)); + } + + /*! @ingroup XXH64_family */ + private static XXH_errorcode ZSTD_XXH64_freeState(XXH64_state_s* statePtr) + { + XXH_free(statePtr); + return XXH_errorcode.XXH_OK; + } + + /*! @ingroup XXH64_family */ + private static void ZSTD_XXH64_copyState(XXH64_state_s* dstState, XXH64_state_s* srcState) + { + XXH_memcpy(dstState, srcState, (nuint)sizeof(XXH64_state_s)); + } + + /*! @ingroup XXH64_family */ + private static XXH_errorcode ZSTD_XXH64_reset(XXH64_state_s* statePtr, ulong seed) + { + *statePtr = new XXH64_state_s(); + statePtr->v[0] = seed + 0x9E3779B185EBCA87UL + 0xC2B2AE3D27D4EB4FUL; + statePtr->v[1] = seed + 0xC2B2AE3D27D4EB4FUL; + statePtr->v[2] = seed + 0; + statePtr->v[3] = seed - 0x9E3779B185EBCA87UL; + return XXH_errorcode.XXH_OK; + } + + /*! @ingroup XXH64_family */ + private static XXH_errorcode ZSTD_XXH64_update(XXH64_state_s* state, void* input, nuint len) + { + if (input == null) + { + return XXH_errorcode.XXH_OK; + } + + { + byte* p = (byte*)input; + byte* bEnd = p + len; + state->total_len += len; + if (state->memsize + len < 32) + { + XXH_memcpy((byte*)state->mem64 + state->memsize, input, len); + state->memsize += (uint)len; + return XXH_errorcode.XXH_OK; + } + + if (state->memsize != 0) + { + XXH_memcpy((byte*)state->mem64 + state->memsize, input, 32 - state->memsize); + state->v[0] = XXH64_round(state->v[0], XXH_readLE64(state->mem64 + 0)); + state->v[1] = XXH64_round(state->v[1], XXH_readLE64(state->mem64 + 1)); + state->v[2] = XXH64_round(state->v[2], XXH_readLE64(state->mem64 + 2)); + state->v[3] = XXH64_round(state->v[3], XXH_readLE64(state->mem64 + 3)); + p += 32 - state->memsize; + state->memsize = 0; + } + + if (p + 32 <= bEnd) + { + byte* limit = bEnd - 32; + do + { + state->v[0] = XXH64_round(state->v[0], XXH_readLE64(p)); + p += 8; + state->v[1] = XXH64_round(state->v[1], XXH_readLE64(p)); + p += 8; + state->v[2] = XXH64_round(state->v[2], XXH_readLE64(p)); + p += 8; + state->v[3] = XXH64_round(state->v[3], XXH_readLE64(p)); + p += 8; + } + while (p <= limit); + } + + if (p < bEnd) + { + XXH_memcpy(state->mem64, p, (nuint)(bEnd - p)); + state->memsize = (uint)(bEnd - p); + } + } + + return XXH_errorcode.XXH_OK; + } + + /*! @ingroup XXH64_family */ + private static ulong ZSTD_XXH64_digest(XXH64_state_s* state) + { + ulong h64; + if (state->total_len >= 32) + { + h64 = BitOperations.RotateLeft(state->v[0], 1) + BitOperations.RotateLeft(state->v[1], 7) + BitOperations.RotateLeft(state->v[2], 12) + BitOperations.RotateLeft(state->v[3], 18); + h64 = XXH64_mergeRound(h64, state->v[0]); + h64 = XXH64_mergeRound(h64, state->v[1]); + h64 = XXH64_mergeRound(h64, state->v[2]); + h64 = XXH64_mergeRound(h64, state->v[3]); + } + else + { + h64 = state->v[2] + 0x27D4EB2F165667C5UL; + } + + h64 += state->total_len; + return XXH64_finalize(h64, (byte*)state->mem64, (nuint)state->total_len, XXH_alignment.XXH_aligned); + } + + /*! @ingroup XXH64_family */ + private static void ZSTD_XXH64_canonicalFromHash(XXH64_canonical_t* dst, ulong hash) + { + assert(sizeof(XXH64_canonical_t) == sizeof(ulong)); + if (BitConverter.IsLittleEndian) + hash = BinaryPrimitives.ReverseEndianness(hash); + XXH_memcpy(dst, &hash, (nuint)sizeof(XXH64_canonical_t)); + } + + /*! @ingroup XXH64_family */ + private static ulong ZSTD_XXH64_hashFromCanonical(XXH64_canonical_t* src) + { + return XXH_readBE64(src); + } + } +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_cover_params_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_cover_params_t.cs new file mode 100644 index 000000000..2b27c1a10 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_cover_params_t.cs @@ -0,0 +1,25 @@ +namespace ZstdSharp.Unsafe +{ + /*! ZDICT_cover_params_t: + * k and d are the only required parameters. + * For others, value 0 means default. + */ + public struct ZDICT_cover_params_t + { + /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */ + public uint k; + /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */ + public uint d; + /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */ + public uint steps; + /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */ + public uint nbThreads; + /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (1.0), 1.0 when all samples are used for both training and testing */ + public double splitPoint; + /* Train dictionaries to shrink in size starting from the minimum size and selects the smallest dictionary that is shrinkDictMaxRegression% worse than the largest dictionary. 0 means no shrinking and 1 means shrinking */ + public uint shrinkDict; + /* Sets shrinkDictMaxRegression so that a smaller dictionary can be at worse shrinkDictMaxRegression% worse than the max dict size dictionary. */ + public uint shrinkDictMaxRegression; + public ZDICT_params_t zParams; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_fastCover_params_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_fastCover_params_t.cs new file mode 100644 index 000000000..eea3bb332 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_fastCover_params_t.cs @@ -0,0 +1,25 @@ +namespace ZstdSharp.Unsafe +{ + public struct ZDICT_fastCover_params_t + { + /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */ + public uint k; + /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */ + public uint d; + /* log of size of frequency array : constraint: 0 < f <= 31 : 1 means default(20)*/ + public uint f; + /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */ + public uint steps; + /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */ + public uint nbThreads; + /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (0.75), 1.0 when all samples are used for both training and testing */ + public double splitPoint; + /* Acceleration level: constraint: 0 < accel <= 10, higher means faster and less accurate, 0 means default(1) */ + public uint accel; + /* Train dictionaries to shrink in size starting from the minimum size and selects the smallest dictionary that is shrinkDictMaxRegression% worse than the largest dictionary. 0 means no shrinking and 1 means shrinking */ + public uint shrinkDict; + /* Sets shrinkDictMaxRegression so that a smaller dictionary can be at worse shrinkDictMaxRegression% worse than the max dict size dictionary. */ + public uint shrinkDictMaxRegression; + public ZDICT_params_t zParams; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_legacy_params_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_legacy_params_t.cs new file mode 100644 index 000000000..466cf2bc2 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_legacy_params_t.cs @@ -0,0 +1,9 @@ +namespace ZstdSharp.Unsafe +{ + public struct ZDICT_legacy_params_t + { + /* 0 means default; larger => select more => larger dictionary */ + public uint selectivityLevel; + public ZDICT_params_t zParams; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_params_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_params_t.cs new file mode 100644 index 000000000..28668ac51 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_params_t.cs @@ -0,0 +1,19 @@ +namespace ZstdSharp.Unsafe +{ + public struct ZDICT_params_t + { + /**< optimize for a specific zstd compression level; 0 means default */ + public int compressionLevel; + /**< Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */ + public uint notificationLevel; + /**< force dictID value; 0 means auto mode (32-bits random value) + * NOTE: The zstd format reserves some dictionary IDs for future use. + * You may use them in private settings, but be warned that they + * may be used by zstd in a public dictionary registry in the future. + * These dictionary IDs are: + * - low range : <= 32767 + * - high range : >= (2^31) + */ + public uint dictID; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_CCtxPool.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_CCtxPool.cs new file mode 100644 index 000000000..691635a37 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_CCtxPool.cs @@ -0,0 +1,13 @@ +namespace ZstdSharp.Unsafe +{ + /* ===== CCtx Pool ===== */ + /* a single CCtx Pool can be invoked from multiple threads in parallel */ + public unsafe struct ZSTDMT_CCtxPool + { + public void* poolMutex; + public int totalCCtx; + public int availCCtx; + public ZSTD_customMem cMem; + public ZSTD_CCtx_s** cctxs; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_CCtx_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_CCtx_s.cs new file mode 100644 index 000000000..7185cb5a1 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_CCtx_s.cs @@ -0,0 +1,32 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct ZSTDMT_CCtx_s + { + public void* factory; + public ZSTDMT_jobDescription* jobs; + public ZSTDMT_bufferPool_s* bufPool; + public ZSTDMT_CCtxPool* cctxPool; + public ZSTDMT_bufferPool_s* seqPool; + public ZSTD_CCtx_params_s @params; + public nuint targetSectionSize; + public nuint targetPrefixSize; + /* 1 => one job is already prepared, but pool has shortage of workers. Don't create a new job. */ + public int jobReady; + public InBuff_t inBuff; + public RoundBuff_t roundBuff; + public SerialState serial; + public RSyncState_t rsync; + public uint jobIDMask; + public uint doneJobID; + public uint nextJobID; + public uint frameEnded; + public uint allJobsCompleted; + public ulong frameContentSize; + public ulong consumed; + public ulong produced; + public ZSTD_customMem cMem; + public ZSTD_CDict_s* cdictLocal; + public ZSTD_CDict_s* cdict; + public uint providedFactory; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_bufferPool_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_bufferPool_s.cs new file mode 100644 index 000000000..a5333bfe1 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_bufferPool_s.cs @@ -0,0 +1,12 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct ZSTDMT_bufferPool_s + { + public void* poolMutex; + public nuint bufferSize; + public uint totalBuffers; + public uint nbBuffers; + public ZSTD_customMem cMem; + public buffer_s* buffers; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_jobDescription.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_jobDescription.cs new file mode 100644 index 000000000..b0fb7a6e7 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_jobDescription.cs @@ -0,0 +1,44 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct ZSTDMT_jobDescription + { + /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx */ + public nuint consumed; + /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx, then set0 by mtctx */ + public nuint cSize; + /* Thread-safe - used by mtctx and worker */ + public void* job_mutex; + /* Thread-safe - used by mtctx and worker */ + public void* job_cond; + /* Thread-safe - used by mtctx and (all) workers */ + public ZSTDMT_CCtxPool* cctxPool; + /* Thread-safe - used by mtctx and (all) workers */ + public ZSTDMT_bufferPool_s* bufPool; + /* Thread-safe - used by mtctx and (all) workers */ + public ZSTDMT_bufferPool_s* seqPool; + /* Thread-safe - used by mtctx and (all) workers */ + public SerialState* serial; + /* set by worker (or mtctx), then read by worker & mtctx, then modified by mtctx => no barrier */ + public buffer_s dstBuff; + /* set by mtctx, then read by worker & mtctx => no barrier */ + public Range prefix; + /* set by mtctx, then read by worker & mtctx => no barrier */ + public Range src; + /* set by mtctx, then read by worker => no barrier */ + public uint jobID; + /* set by mtctx, then read by worker => no barrier */ + public uint firstJob; + /* set by mtctx, then read by worker => no barrier */ + public uint lastJob; + /* set by mtctx, then read by worker => no barrier */ + public ZSTD_CCtx_params_s @params; + /* set by mtctx, then read by worker => no barrier */ + public ZSTD_CDict_s* cdict; + /* set by mtctx, then read by worker => no barrier */ + public ulong fullFrameSize; + /* used only by mtctx */ + public nuint dstFlushed; + /* used only by mtctx */ + public uint frameChecksumNeeded; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BlockCompressor_f.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BlockCompressor_f.cs new file mode 100644 index 000000000..9d9a7e3b2 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BlockCompressor_f.cs @@ -0,0 +1,7 @@ +using System.Runtime.InteropServices; + +namespace ZstdSharp.Unsafe +{ + [UnmanagedFunctionPointer(CallingConvention.Cdecl)] + public unsafe delegate nuint ZSTD_BlockCompressor_f(ZSTD_MatchState_t* bs, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize); +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BuildCTableWksp.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BuildCTableWksp.cs new file mode 100644 index 000000000..98631bfa2 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BuildCTableWksp.cs @@ -0,0 +1,8 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct ZSTD_BuildCTableWksp + { + public fixed short norm[53]; + public fixed uint wksp[285]; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BuildSeqStore_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BuildSeqStore_e.cs new file mode 100644 index 000000000..6e575561a --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BuildSeqStore_e.cs @@ -0,0 +1,8 @@ +namespace ZstdSharp.Unsafe +{ + public enum ZSTD_BuildSeqStore_e + { + ZSTDbss_compress, + ZSTDbss_noCompress + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_CCtx_params_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_CCtx_params_s.cs new file mode 100644 index 000000000..d29f527d2 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_CCtx_params_s.cs @@ -0,0 +1,70 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct ZSTD_CCtx_params_s + { + public ZSTD_format_e format; + public ZSTD_compressionParameters cParams; + public ZSTD_frameParameters fParams; + public int compressionLevel; + /* force back-references to respect limit of + * 1< 0: + * If litLength != 0: + * rep == 1 --> offset == repeat_offset_1 + * rep == 2 --> offset == repeat_offset_2 + * rep == 3 --> offset == repeat_offset_3 + * If litLength == 0: + * rep == 1 --> offset == repeat_offset_2 + * rep == 2 --> offset == repeat_offset_3 + * rep == 3 --> offset == repeat_offset_1 - 1 + * + * Note: This field is optional. ZSTD_generateSequences() will calculate the value of + * 'rep', but repeat offsets do not necessarily need to be calculated from an external + * sequence provider perspective. For example, ZSTD_compressSequences() does not + * use this 'rep' field at all (as of now). + */ + public uint rep; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_SequenceLength.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_SequenceLength.cs new file mode 100644 index 000000000..721e31a6d --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_SequenceLength.cs @@ -0,0 +1,8 @@ +namespace ZstdSharp.Unsafe +{ + public struct ZSTD_SequenceLength + { + public uint litLength; + public uint matchLength; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_SequencePosition.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_SequencePosition.cs new file mode 100644 index 000000000..cf778120f --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_SequencePosition.cs @@ -0,0 +1,12 @@ +namespace ZstdSharp.Unsafe +{ + public struct ZSTD_SequencePosition + { + /* Index in array of ZSTD_Sequence */ + public uint idx; + /* Position within sequence at idx */ + public uint posInSequence; + /* Number of bytes given by sequences provided so far */ + public nuint posInSrc; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_blockSplitCtx.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_blockSplitCtx.cs new file mode 100644 index 000000000..36b5fc214 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_blockSplitCtx.cs @@ -0,0 +1,13 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct ZSTD_blockSplitCtx + { + public SeqStore_t fullSeqStoreChunk; + public SeqStore_t firstHalfSeqStore; + public SeqStore_t secondHalfSeqStore; + public SeqStore_t currSeqStore; + public SeqStore_t nextSeqStore; + public fixed uint partitions[196]; + public ZSTD_entropyCTablesMetadata_t entropyMetadata; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_blockState_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_blockState_t.cs new file mode 100644 index 000000000..8d1670b6b --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_blockState_t.cs @@ -0,0 +1,9 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct ZSTD_blockState_t + { + public ZSTD_compressedBlockState_t* prevCBlock; + public ZSTD_compressedBlockState_t* nextCBlock; + public ZSTD_MatchState_t matchState; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_bounds.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_bounds.cs new file mode 100644 index 000000000..327fc7f6a --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_bounds.cs @@ -0,0 +1,9 @@ +namespace ZstdSharp.Unsafe +{ + public struct ZSTD_bounds + { + public nuint error; + public int lowerBound; + public int upperBound; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_bufferMode_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_bufferMode_e.cs new file mode 100644 index 000000000..6ff3faff5 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_bufferMode_e.cs @@ -0,0 +1,11 @@ +namespace ZstdSharp.Unsafe +{ + /* Controls whether the input/output buffer is buffered or stable. */ + public enum ZSTD_bufferMode_e + { + /* Buffer the input/output */ + ZSTD_bm_buffered = 0, + /* ZSTD_inBuffer/ZSTD_outBuffer is stable */ + ZSTD_bm_stable = 1 + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_buffered_policy_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_buffered_policy_e.cs new file mode 100644 index 000000000..bac9d2f87 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_buffered_policy_e.cs @@ -0,0 +1,13 @@ +namespace ZstdSharp.Unsafe +{ + /** + * Indicates whether this compression proceeds directly from user-provided + * source buffer to user-provided destination buffer (ZSTDb_not_buffered), or + * whether the context needs to buffer the input/output (ZSTDb_buffered). + */ + public enum ZSTD_buffered_policy_e + { + ZSTDb_not_buffered, + ZSTDb_buffered + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cParameter.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cParameter.cs new file mode 100644 index 000000000..061fb5771 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cParameter.cs @@ -0,0 +1,198 @@ +namespace ZstdSharp.Unsafe +{ + public enum ZSTD_cParameter + { + /* Set compression parameters according to pre-defined cLevel table. + * Note that exact compression parameters are dynamically determined, + * depending on both compression level and srcSize (when known). + * Default level is ZSTD_CLEVEL_DEFAULT==3. + * Special: value 0 means default, which is controlled by ZSTD_CLEVEL_DEFAULT. + * Note 1 : it's possible to pass a negative compression level. + * Note 2 : setting a level does not automatically set all other compression parameters + * to default. Setting this will however eventually dynamically impact the compression + * parameters which have not been manually set. The manually set + * ones will 'stick'. */ + ZSTD_c_compressionLevel = 100, + /* Maximum allowed back-reference distance, expressed as power of 2. + * This will set a memory budget for streaming decompression, + * with larger values requiring more memory + * and typically compressing more. + * Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX. + * Special: value 0 means "use default windowLog". + * Note: Using a windowLog greater than ZSTD_WINDOWLOG_LIMIT_DEFAULT + * requires explicitly allowing such size at streaming decompression stage. */ + ZSTD_c_windowLog = 101, + /* Size of the initial probe table, as a power of 2. + * Resulting memory usage is (1 << (hashLog+2)). + * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX. + * Larger tables improve compression ratio of strategies <= dFast, + * and improve speed of strategies > dFast. + * Special: value 0 means "use default hashLog". */ + ZSTD_c_hashLog = 102, + /* Size of the multi-probe search table, as a power of 2. + * Resulting memory usage is (1 << (chainLog+2)). + * Must be clamped between ZSTD_CHAINLOG_MIN and ZSTD_CHAINLOG_MAX. + * Larger tables result in better and slower compression. + * This parameter is useless for "fast" strategy. + * It's still useful when using "dfast" strategy, + * in which case it defines a secondary probe table. + * Special: value 0 means "use default chainLog". */ + ZSTD_c_chainLog = 103, + /* Number of search attempts, as a power of 2. + * More attempts result in better and slower compression. + * This parameter is useless for "fast" and "dFast" strategies. + * Special: value 0 means "use default searchLog". */ + ZSTD_c_searchLog = 104, + /* Minimum size of searched matches. + * Note that Zstandard can still find matches of smaller size, + * it just tweaks its search algorithm to look for this size and larger. + * Larger values increase compression and decompression speed, but decrease ratio. + * Must be clamped between ZSTD_MINMATCH_MIN and ZSTD_MINMATCH_MAX. + * Note that currently, for all strategies < btopt, effective minimum is 4. + * , for all strategies > fast, effective maximum is 6. + * Special: value 0 means "use default minMatchLength". */ + ZSTD_c_minMatch = 105, + /* Impact of this field depends on strategy. + * For strategies btopt, btultra & btultra2: + * Length of Match considered "good enough" to stop search. + * Larger values make compression stronger, and slower. + * For strategy fast: + * Distance between match sampling. + * Larger values make compression faster, and weaker. + * Special: value 0 means "use default targetLength". */ + ZSTD_c_targetLength = 106, + /* See ZSTD_strategy enum definition. + * The higher the value of selected strategy, the more complex it is, + * resulting in stronger and slower compression. + * Special: value 0 means "use default strategy". */ + ZSTD_c_strategy = 107, + /* v1.5.6+ + * Attempts to fit compressed block size into approximately targetCBlockSize. + * Bound by ZSTD_TARGETCBLOCKSIZE_MIN and ZSTD_TARGETCBLOCKSIZE_MAX. + * Note that it's not a guarantee, just a convergence target (default:0). + * No target when targetCBlockSize == 0. + * This is helpful in low bandwidth streaming environments to improve end-to-end latency, + * when a client can make use of partial documents (a prominent example being Chrome). + * Note: this parameter is stable since v1.5.6. + * It was present as an experimental parameter in earlier versions, + * but it's not recommended using it with earlier library versions + * due to massive performance regressions. + */ + ZSTD_c_targetCBlockSize = 130, + /* Enable long distance matching. + * This parameter is designed to improve compression ratio + * for large inputs, by finding large matches at long distance. + * It increases memory usage and window size. + * Note: enabling this parameter increases default ZSTD_c_windowLog to 128 MB + * except when expressly set to a different value. + * Note: will be enabled by default if ZSTD_c_windowLog >= 128 MB and + * compression strategy >= ZSTD_btopt (== compression level 16+) */ + ZSTD_c_enableLongDistanceMatching = 160, + /* Size of the table for long distance matching, as a power of 2. + * Larger values increase memory usage and compression ratio, + * but decrease compression speed. + * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX + * default: windowlog - 7. + * Special: value 0 means "automatically determine hashlog". */ + ZSTD_c_ldmHashLog = 161, + /* Minimum match size for long distance matcher. + * Larger/too small values usually decrease compression ratio. + * Must be clamped between ZSTD_LDM_MINMATCH_MIN and ZSTD_LDM_MINMATCH_MAX. + * Special: value 0 means "use default value" (default: 64). */ + ZSTD_c_ldmMinMatch = 162, + /* Log size of each bucket in the LDM hash table for collision resolution. + * Larger values improve collision resolution but decrease compression speed. + * The maximum value is ZSTD_LDM_BUCKETSIZELOG_MAX. + * Special: value 0 means "use default value" (default: 3). */ + ZSTD_c_ldmBucketSizeLog = 163, + /* Frequency of inserting/looking up entries into the LDM hash table. + * Must be clamped between 0 and (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN). + * Default is MAX(0, (windowLog - ldmHashLog)), optimizing hash table usage. + * Larger values improve compression speed. + * Deviating far from default value will likely result in a compression ratio decrease. + * Special: value 0 means "automatically determine hashRateLog". */ + ZSTD_c_ldmHashRateLog = 164, + /* Content size will be written into frame header _whenever known_ (default:1) + * Content size must be known at the beginning of compression. + * This is automatically the case when using ZSTD_compress2(), + * For streaming scenarios, content size must be provided with ZSTD_CCtx_setPledgedSrcSize() */ + ZSTD_c_contentSizeFlag = 200, + /* A 32-bits checksum of content is written at end of frame (default:0) */ + ZSTD_c_checksumFlag = 201, + /* When applicable, dictionary's ID is written into frame header (default:1) */ + ZSTD_c_dictIDFlag = 202, + /* Select how many threads will be spawned to compress in parallel. + * When nbWorkers >= 1, triggers asynchronous mode when invoking ZSTD_compressStream*() : + * ZSTD_compressStream*() consumes input and flush output if possible, but immediately gives back control to caller, + * while compression is performed in parallel, within worker thread(s). + * (note : a strong exception to this rule is when first invocation of ZSTD_compressStream2() sets ZSTD_e_end : + * in which case, ZSTD_compressStream2() delegates to ZSTD_compress2(), which is always a blocking call). + * More workers improve speed, but also increase memory usage. + * Default value is `0`, aka "single-threaded mode" : no worker is spawned, + * compression is performed inside Caller's thread, and all invocations are blocking */ + ZSTD_c_nbWorkers = 400, + /* Size of a compression job. This value is enforced only when nbWorkers >= 1. + * Each compression job is completed in parallel, so this value can indirectly impact the nb of active threads. + * 0 means default, which is dynamically determined based on compression parameters. + * Job size must be a minimum of overlap size, or ZSTDMT_JOBSIZE_MIN (= 512 KB), whichever is largest. + * The minimum size is automatically and transparently enforced. */ + ZSTD_c_jobSize = 401, + /* Control the overlap size, as a fraction of window size. + * The overlap size is an amount of data reloaded from previous job at the beginning of a new job. + * It helps preserve compression ratio, while each job is compressed in parallel. + * This value is enforced only when nbWorkers >= 1. + * Larger values increase compression ratio, but decrease speed. + * Possible values range from 0 to 9 : + * - 0 means "default" : value will be determined by the library, depending on strategy + * - 1 means "no overlap" + * - 9 means "full overlap", using a full window size. + * Each intermediate rank increases/decreases load size by a factor 2 : + * 9: full window; 8: w/2; 7: w/4; 6: w/8; 5:w/16; 4: w/32; 3:w/64; 2:w/128; 1:no overlap; 0:default + * default value varies between 6 and 9, depending on strategy */ + ZSTD_c_overlapLog = 402, + /* note : additional experimental parameters are also available + * within the experimental section of the API. + * At the time of this writing, they include : + * ZSTD_c_rsyncable + * ZSTD_c_format + * ZSTD_c_forceMaxWindow + * ZSTD_c_forceAttachDict + * ZSTD_c_literalCompressionMode + * ZSTD_c_srcSizeHint + * ZSTD_c_enableDedicatedDictSearch + * ZSTD_c_stableInBuffer + * ZSTD_c_stableOutBuffer + * ZSTD_c_blockDelimiters + * ZSTD_c_validateSequences + * ZSTD_c_blockSplitterLevel + * ZSTD_c_splitAfterSequences + * ZSTD_c_useRowMatchFinder + * ZSTD_c_prefetchCDictTables + * ZSTD_c_enableSeqProducerFallback + * ZSTD_c_maxBlockSize + * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them. + * note : never ever use experimentalParam? names directly; + * also, the enums values themselves are unstable and can still change. + */ + ZSTD_c_experimentalParam1 = 500, + ZSTD_c_experimentalParam2 = 10, + ZSTD_c_experimentalParam3 = 1000, + ZSTD_c_experimentalParam4 = 1001, + ZSTD_c_experimentalParam5 = 1002, + /* was ZSTD_c_experimentalParam6=1003; is now ZSTD_c_targetCBlockSize */ + ZSTD_c_experimentalParam7 = 1004, + ZSTD_c_experimentalParam8 = 1005, + ZSTD_c_experimentalParam9 = 1006, + ZSTD_c_experimentalParam10 = 1007, + ZSTD_c_experimentalParam11 = 1008, + ZSTD_c_experimentalParam12 = 1009, + ZSTD_c_experimentalParam13 = 1010, + ZSTD_c_experimentalParam14 = 1011, + ZSTD_c_experimentalParam15 = 1012, + ZSTD_c_experimentalParam16 = 1013, + ZSTD_c_experimentalParam17 = 1014, + ZSTD_c_experimentalParam18 = 1015, + ZSTD_c_experimentalParam19 = 1016, + ZSTD_c_experimentalParam20 = 1017 + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cStreamStage.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cStreamStage.cs new file mode 100644 index 000000000..770eaa918 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cStreamStage.cs @@ -0,0 +1,9 @@ +namespace ZstdSharp.Unsafe +{ + public enum ZSTD_cStreamStage + { + zcss_init = 0, + zcss_load, + zcss_flush + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compResetPolicy_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compResetPolicy_e.cs new file mode 100644 index 000000000..0f474bc05 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compResetPolicy_e.cs @@ -0,0 +1,15 @@ +namespace ZstdSharp.Unsafe +{ + /** + * Controls, for this matchState reset, whether the tables need to be cleared / + * prepared for the coming compression (ZSTDcrp_makeClean), or whether the + * tables can be left unclean (ZSTDcrp_leaveDirty), because we know that a + * subsequent operation will overwrite the table space anyways (e.g., copying + * the matchState contents in from a CDict). + */ + public enum ZSTD_compResetPolicy_e + { + ZSTDcrp_makeClean, + ZSTDcrp_leaveDirty + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressedBlockState_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressedBlockState_t.cs new file mode 100644 index 000000000..24a9e6f8d --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressedBlockState_t.cs @@ -0,0 +1,8 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct ZSTD_compressedBlockState_t + { + public ZSTD_entropyCTables_t entropy; + public fixed uint rep[3]; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressionParameters.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressionParameters.cs new file mode 100644 index 000000000..f27b7cf8f --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressionParameters.cs @@ -0,0 +1,30 @@ +namespace ZstdSharp.Unsafe +{ + public struct ZSTD_compressionParameters + { + /**< largest match distance : larger == more compression, more memory needed during decompression */ + public uint windowLog; + /**< fully searched segment : larger == more compression, slower, more memory (useless for fast) */ + public uint chainLog; + /**< dispatch table : larger == faster, more memory */ + public uint hashLog; + /**< nb of searches : larger == more compression, slower */ + public uint searchLog; + /**< match length searched : larger == faster decompression, sometimes less compression */ + public uint minMatch; + /**< acceptable match size for optimal parser (only) : larger == more compression, slower */ + public uint targetLength; + /**< see ZSTD_strategy definition above */ + public ZSTD_strategy strategy; + public ZSTD_compressionParameters(uint windowLog, uint chainLog, uint hashLog, uint searchLog, uint minMatch, uint targetLength, ZSTD_strategy strategy) + { + this.windowLog = windowLog; + this.chainLog = chainLog; + this.hashLog = hashLog; + this.searchLog = searchLog; + this.minMatch = minMatch; + this.targetLength = targetLength; + this.strategy = strategy; + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressionStage_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressionStage_e.cs new file mode 100644 index 000000000..79447b487 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressionStage_e.cs @@ -0,0 +1,13 @@ +namespace ZstdSharp.Unsafe +{ + /*-************************************* + * Context memory management + ***************************************/ + public enum ZSTD_compressionStage_e + { + ZSTDcs_created = 0, + ZSTDcs_init, + ZSTDcs_ongoing, + ZSTDcs_ending + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_customMem.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_customMem.cs new file mode 100644 index 000000000..e34bfd7ba --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_customMem.cs @@ -0,0 +1,15 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct ZSTD_customMem + { + public void* customAlloc; + public void* customFree; + public void* opaque; + public ZSTD_customMem(void* customAlloc, void* customFree, void* opaque) + { + this.customAlloc = customAlloc; + this.customFree = customFree; + this.opaque = opaque; + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp.cs new file mode 100644 index 000000000..d60a74682 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp.cs @@ -0,0 +1,111 @@ +namespace ZstdSharp.Unsafe +{ + /** + * Zstd fits all its internal datastructures into a single continuous buffer, + * so that it only needs to perform a single OS allocation (or so that a buffer + * can be provided to it and it can perform no allocations at all). This buffer + * is called the workspace. + * + * Several optimizations complicate that process of allocating memory ranges + * from this workspace for each internal datastructure: + * + * - These different internal datastructures have different setup requirements: + * + * - The static objects need to be cleared once and can then be trivially + * reused for each compression. + * + * - Various buffers don't need to be initialized at all--they are always + * written into before they're read. + * + * - The matchstate tables have a unique requirement that they don't need + * their memory to be totally cleared, but they do need the memory to have + * some bound, i.e., a guarantee that all values in the memory they've been + * allocated is less than some maximum value (which is the starting value + * for the indices that they will then use for compression). When this + * guarantee is provided to them, they can use the memory without any setup + * work. When it can't, they have to clear the area. + * + * - These buffers also have different alignment requirements. + * + * - We would like to reuse the objects in the workspace for multiple + * compressions without having to perform any expensive reallocation or + * reinitialization work. + * + * - We would like to be able to efficiently reuse the workspace across + * multiple compressions **even when the compression parameters change** and + * we need to resize some of the objects (where possible). + * + * To attempt to manage this buffer, given these constraints, the ZSTD_cwksp + * abstraction was created. It works as follows: + * + * Workspace Layout: + * + * [ ... workspace ... ] + * [objects][tables ->] free space [<- buffers][<- aligned][<- init once] + * + * The various objects that live in the workspace are divided into the + * following categories, and are allocated separately: + * + * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict, + * so that literally everything fits in a single buffer. Note: if present, + * this must be the first object in the workspace, since ZSTD_customFree{CCtx, + * CDict}() rely on a pointer comparison to see whether one or two frees are + * required. + * + * - Fixed size objects: these are fixed-size, fixed-count objects that are + * nonetheless "dynamically" allocated in the workspace so that we can + * control how they're initialized separately from the broader ZSTD_CCtx. + * Examples: + * - Entropy Workspace + * - 2 x ZSTD_compressedBlockState_t + * - CDict dictionary contents + * + * - Tables: these are any of several different datastructures (hash tables, + * chain tables, binary trees) that all respect a common format: they are + * uint32_t arrays, all of whose values are between 0 and (nextSrc - base). + * Their sizes depend on the cparams. These tables are 64-byte aligned. + * + * - Init once: these buffers require to be initialized at least once before + * use. They should be used when we want to skip memory initialization + * while not triggering memory checkers (like Valgrind) when reading from + * from this memory without writing to it first. + * These buffers should be used carefully as they might contain data + * from previous compressions. + * Buffers are aligned to 64 bytes. + * + * - Aligned: these buffers don't require any initialization before they're + * used. The user of the buffer should make sure they write into a buffer + * location before reading from it. + * Buffers are aligned to 64 bytes. + * + * - Buffers: these buffers are used for various purposes that don't require + * any alignment or initialization before they're used. This means they can + * be moved around at no cost for a new compression. + * + * Allocating Memory: + * + * The various types of objects must be allocated in order, so they can be + * correctly packed into the workspace buffer. That order is: + * + * 1. Objects + * 2. Init once / Tables + * 3. Aligned / Tables + * 4. Buffers / Tables + * + * Attempts to reserve objects of different types out of order will fail. + */ + public unsafe struct ZSTD_cwksp + { + public void* workspace; + public void* workspaceEnd; + public void* objectEnd; + public void* tableEnd; + public void* tableValidEnd; + public void* allocStart; + public void* initOnceStart; + public byte allocFailed; + public int workspaceOversizedDuration; + public ZSTD_cwksp_alloc_phase_e phase; + public ZSTD_cwksp_static_alloc_e isStatic; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp_alloc_phase_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp_alloc_phase_e.cs new file mode 100644 index 000000000..86136383a --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp_alloc_phase_e.cs @@ -0,0 +1,13 @@ +namespace ZstdSharp.Unsafe +{ + /*-************************************* + * Structures + ***************************************/ + public enum ZSTD_cwksp_alloc_phase_e + { + ZSTD_cwksp_alloc_objects, + ZSTD_cwksp_alloc_aligned_init_once, + ZSTD_cwksp_alloc_aligned, + ZSTD_cwksp_alloc_buffers + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp_static_alloc_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp_static_alloc_e.cs new file mode 100644 index 000000000..08d03b7e7 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp_static_alloc_e.cs @@ -0,0 +1,13 @@ +namespace ZstdSharp.Unsafe +{ + /** + * Used to describe whether the workspace is statically allocated (and will not + * necessarily ever be freed), or if it's dynamically allocated and we can + * expect a well-formed caller to free this. + */ + public enum ZSTD_cwksp_static_alloc_e + { + ZSTD_cwksp_dynamic_alloc, + ZSTD_cwksp_static_alloc + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dParameter.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dParameter.cs new file mode 100644 index 000000000..ef9d206a8 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dParameter.cs @@ -0,0 +1,38 @@ +namespace ZstdSharp.Unsafe +{ + /* The advanced API pushes parameters one by one into an existing DCtx context. + * Parameters are sticky, and remain valid for all following frames + * using the same DCtx context. + * It's possible to reset parameters to default values using ZSTD_DCtx_reset(). + * Note : This API is compatible with existing ZSTD_decompressDCtx() and ZSTD_decompressStream(). + * Therefore, no new decompression function is necessary. + */ + public enum ZSTD_dParameter + { + /* Select a size limit (in power of 2) beyond which + * the streaming API will refuse to allocate memory buffer + * in order to protect the host from unreasonable memory requirements. + * This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode. + * By default, a decompression context accepts window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT). + * Special: value 0 means "use default maximum windowLog". */ + ZSTD_d_windowLogMax = 100, + /* note : additional experimental parameters are also available + * within the experimental section of the API. + * At the time of this writing, they include : + * ZSTD_d_format + * ZSTD_d_stableOutBuffer + * ZSTD_d_forceIgnoreChecksum + * ZSTD_d_refMultipleDDicts + * ZSTD_d_disableHuffmanAssembly + * ZSTD_d_maxBlockSize + * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them. + * note : never ever use experimentalParam? names directly + */ + ZSTD_d_experimentalParam1 = 1000, + ZSTD_d_experimentalParam2 = 1001, + ZSTD_d_experimentalParam3 = 1002, + ZSTD_d_experimentalParam4 = 1003, + ZSTD_d_experimentalParam5 = 1004, + ZSTD_d_experimentalParam6 = 1005 + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dStage.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dStage.cs new file mode 100644 index 000000000..7374cd074 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dStage.cs @@ -0,0 +1,14 @@ +namespace ZstdSharp.Unsafe +{ + public enum ZSTD_dStage + { + ZSTDds_getFrameHeaderSize, + ZSTDds_decodeFrameHeader, + ZSTDds_decodeBlockHeader, + ZSTDds_decompressBlock, + ZSTDds_decompressLastBlock, + ZSTDds_checkChecksum, + ZSTDds_decodeSkippableHeader, + ZSTDds_skipFrame + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dStreamStage.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dStreamStage.cs new file mode 100644 index 000000000..eea32f243 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dStreamStage.cs @@ -0,0 +1,11 @@ +namespace ZstdSharp.Unsafe +{ + public enum ZSTD_dStreamStage + { + zdss_init = 0, + zdss_loadHeader, + zdss_read, + zdss_load, + zdss_flush + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictAttachPref_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictAttachPref_e.cs new file mode 100644 index 000000000..59e4eb33c --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictAttachPref_e.cs @@ -0,0 +1,14 @@ +namespace ZstdSharp.Unsafe +{ + public enum ZSTD_dictAttachPref_e + { + /* Use the default heuristic. */ + ZSTD_dictDefaultAttach = 0, + /* Never copy the dictionary. */ + ZSTD_dictForceAttach = 1, + /* Always copy the dictionary. */ + ZSTD_dictForceCopy = 2, + /* Always reload the dictionary */ + ZSTD_dictForceLoad = 3 + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictContentType_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictContentType_e.cs new file mode 100644 index 000000000..5ecb77db7 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictContentType_e.cs @@ -0,0 +1,12 @@ +namespace ZstdSharp.Unsafe +{ + public enum ZSTD_dictContentType_e + { + /* dictionary is "full" when starting with ZSTD_MAGIC_DICTIONARY, otherwise it is "rawContent" */ + ZSTD_dct_auto = 0, + /* ensures dictionary is always loaded as rawContent, even if it starts with ZSTD_MAGIC_DICTIONARY */ + ZSTD_dct_rawContent = 1, + /* refuses to load a dictionary if it does not respect Zstandard's specification, starting with ZSTD_MAGIC_DICTIONARY */ + ZSTD_dct_fullDict = 2 + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictLoadMethod_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictLoadMethod_e.cs new file mode 100644 index 000000000..68fa70a61 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictLoadMethod_e.cs @@ -0,0 +1,10 @@ +namespace ZstdSharp.Unsafe +{ + public enum ZSTD_dictLoadMethod_e + { + /**< Copy dictionary content internally */ + ZSTD_dlm_byCopy = 0, + /**< Reference dictionary content -- the dictionary buffer must outlive its users. */ + ZSTD_dlm_byRef = 1 + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictMode_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictMode_e.cs new file mode 100644 index 000000000..1db92e439 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictMode_e.cs @@ -0,0 +1,10 @@ +namespace ZstdSharp.Unsafe +{ + public enum ZSTD_dictMode_e + { + ZSTD_noDict = 0, + ZSTD_extDict = 1, + ZSTD_dictMatchState = 2, + ZSTD_dedicatedDictSearch = 3 + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictTableLoadMethod_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictTableLoadMethod_e.cs new file mode 100644 index 000000000..daffd0b58 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictTableLoadMethod_e.cs @@ -0,0 +1,8 @@ +namespace ZstdSharp.Unsafe +{ + public enum ZSTD_dictTableLoadMethod_e + { + ZSTD_dtlm_fast, + ZSTD_dtlm_full + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictUses_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictUses_e.cs new file mode 100644 index 000000000..8cc95816e --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictUses_e.cs @@ -0,0 +1,12 @@ +namespace ZstdSharp.Unsafe +{ + public enum ZSTD_dictUses_e + { + /* Use the dictionary indefinitely */ + ZSTD_use_indefinitely = -1, + /* Do not use the dictionary (if one exists free it) */ + ZSTD_dont_use = 0, + /* Use the dictionary once and set to ZSTD_dont_use */ + ZSTD_use_once = 1 + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyCTablesMetadata_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyCTablesMetadata_t.cs new file mode 100644 index 000000000..7b5b682d7 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyCTablesMetadata_t.cs @@ -0,0 +1,8 @@ +namespace ZstdSharp.Unsafe +{ + public struct ZSTD_entropyCTablesMetadata_t + { + public ZSTD_hufCTablesMetadata_t hufMetadata; + public ZSTD_fseCTablesMetadata_t fseMetadata; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyCTables_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyCTables_t.cs new file mode 100644 index 000000000..297f30bcf --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyCTables_t.cs @@ -0,0 +1,8 @@ +namespace ZstdSharp.Unsafe +{ + public struct ZSTD_entropyCTables_t + { + public ZSTD_hufCTables_t huf; + public ZSTD_fseCTables_t fse; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyDTables_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyDTables_t.cs new file mode 100644 index 000000000..9f336d2f6 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyDTables_t.cs @@ -0,0 +1,1339 @@ +using System.Runtime.CompilerServices; + +namespace ZstdSharp.Unsafe +{ + public unsafe struct ZSTD_entropyDTables_t + { + /* Note : Space reserved for FSE Tables */ + public _LLTable_e__FixedBuffer LLTable; + /* is also used as temporary workspace while building hufTable during DDict creation */ + public _OFTable_e__FixedBuffer OFTable; + /* and therefore must be at least HUF_DECOMPRESS_WORKSPACE_SIZE large */ + public _MLTable_e__FixedBuffer MLTable; + /* can accommodate HUF_decompress4X */ + public fixed uint hufTable[4097]; + public fixed uint rep[3]; + public fixed uint workspace[157]; +#if NET8_0_OR_GREATER + [InlineArray(513)] + public unsafe struct _LLTable_e__FixedBuffer + { + public ZSTD_seqSymbol e0; + } + +#else + public unsafe struct _LLTable_e__FixedBuffer + { + public ZSTD_seqSymbol e0; + public ZSTD_seqSymbol e1; + public ZSTD_seqSymbol e2; + public ZSTD_seqSymbol e3; + public ZSTD_seqSymbol e4; + public ZSTD_seqSymbol e5; + public ZSTD_seqSymbol e6; + public ZSTD_seqSymbol e7; + public ZSTD_seqSymbol e8; + public ZSTD_seqSymbol e9; + public ZSTD_seqSymbol e10; + public ZSTD_seqSymbol e11; + public ZSTD_seqSymbol e12; + public ZSTD_seqSymbol e13; + public ZSTD_seqSymbol e14; + public ZSTD_seqSymbol e15; + public ZSTD_seqSymbol e16; + public ZSTD_seqSymbol e17; + public ZSTD_seqSymbol e18; + public ZSTD_seqSymbol e19; + public ZSTD_seqSymbol e20; + public ZSTD_seqSymbol e21; + public ZSTD_seqSymbol e22; + public ZSTD_seqSymbol e23; + public ZSTD_seqSymbol e24; + public ZSTD_seqSymbol e25; + public ZSTD_seqSymbol e26; + public ZSTD_seqSymbol e27; + public ZSTD_seqSymbol e28; + public ZSTD_seqSymbol e29; + public ZSTD_seqSymbol e30; + public ZSTD_seqSymbol e31; + public ZSTD_seqSymbol e32; + public ZSTD_seqSymbol e33; + public ZSTD_seqSymbol e34; + public ZSTD_seqSymbol e35; + public ZSTD_seqSymbol e36; + public ZSTD_seqSymbol e37; + public ZSTD_seqSymbol e38; + public ZSTD_seqSymbol e39; + public ZSTD_seqSymbol e40; + public ZSTD_seqSymbol e41; + public ZSTD_seqSymbol e42; + public ZSTD_seqSymbol e43; + public ZSTD_seqSymbol e44; + public ZSTD_seqSymbol e45; + public ZSTD_seqSymbol e46; + public ZSTD_seqSymbol e47; + public ZSTD_seqSymbol e48; + public ZSTD_seqSymbol e49; + public ZSTD_seqSymbol e50; + public ZSTD_seqSymbol e51; + public ZSTD_seqSymbol e52; + public ZSTD_seqSymbol e53; + public ZSTD_seqSymbol e54; + public ZSTD_seqSymbol e55; + public ZSTD_seqSymbol e56; + public ZSTD_seqSymbol e57; + public ZSTD_seqSymbol e58; + public ZSTD_seqSymbol e59; + public ZSTD_seqSymbol e60; + public ZSTD_seqSymbol e61; + public ZSTD_seqSymbol e62; + public ZSTD_seqSymbol e63; + public ZSTD_seqSymbol e64; + public ZSTD_seqSymbol e65; + public ZSTD_seqSymbol e66; + public ZSTD_seqSymbol e67; + public ZSTD_seqSymbol e68; + public ZSTD_seqSymbol e69; + public ZSTD_seqSymbol e70; + public ZSTD_seqSymbol e71; + public ZSTD_seqSymbol e72; + public ZSTD_seqSymbol e73; + public ZSTD_seqSymbol e74; + public ZSTD_seqSymbol e75; + public ZSTD_seqSymbol e76; + public ZSTD_seqSymbol e77; + public ZSTD_seqSymbol e78; + public ZSTD_seqSymbol e79; + public ZSTD_seqSymbol e80; + public ZSTD_seqSymbol e81; + public ZSTD_seqSymbol e82; + public ZSTD_seqSymbol e83; + public ZSTD_seqSymbol e84; + public ZSTD_seqSymbol e85; + public ZSTD_seqSymbol e86; + public ZSTD_seqSymbol e87; + public ZSTD_seqSymbol e88; + public ZSTD_seqSymbol e89; + public ZSTD_seqSymbol e90; + public ZSTD_seqSymbol e91; + public ZSTD_seqSymbol e92; + public ZSTD_seqSymbol e93; + public ZSTD_seqSymbol e94; + public ZSTD_seqSymbol e95; + public ZSTD_seqSymbol e96; + public ZSTD_seqSymbol e97; + public ZSTD_seqSymbol e98; + public ZSTD_seqSymbol e99; + public ZSTD_seqSymbol e100; + public ZSTD_seqSymbol e101; + public ZSTD_seqSymbol e102; + public ZSTD_seqSymbol e103; + public ZSTD_seqSymbol e104; + public ZSTD_seqSymbol e105; + public ZSTD_seqSymbol e106; + public ZSTD_seqSymbol e107; + public ZSTD_seqSymbol e108; + public ZSTD_seqSymbol e109; + public ZSTD_seqSymbol e110; + public ZSTD_seqSymbol e111; + public ZSTD_seqSymbol e112; + public ZSTD_seqSymbol e113; + public ZSTD_seqSymbol e114; + public ZSTD_seqSymbol e115; + public ZSTD_seqSymbol e116; + public ZSTD_seqSymbol e117; + public ZSTD_seqSymbol e118; + public ZSTD_seqSymbol e119; + public ZSTD_seqSymbol e120; + public ZSTD_seqSymbol e121; + public ZSTD_seqSymbol e122; + public ZSTD_seqSymbol e123; + public ZSTD_seqSymbol e124; + public ZSTD_seqSymbol e125; + public ZSTD_seqSymbol e126; + public ZSTD_seqSymbol e127; + public ZSTD_seqSymbol e128; + public ZSTD_seqSymbol e129; + public ZSTD_seqSymbol e130; + public ZSTD_seqSymbol e131; + public ZSTD_seqSymbol e132; + public ZSTD_seqSymbol e133; + public ZSTD_seqSymbol e134; + public ZSTD_seqSymbol e135; + public ZSTD_seqSymbol e136; + public ZSTD_seqSymbol e137; + public ZSTD_seqSymbol e138; + public ZSTD_seqSymbol e139; + public ZSTD_seqSymbol e140; + public ZSTD_seqSymbol e141; + public ZSTD_seqSymbol e142; + public ZSTD_seqSymbol e143; + public ZSTD_seqSymbol e144; + public ZSTD_seqSymbol e145; + public ZSTD_seqSymbol e146; + public ZSTD_seqSymbol e147; + public ZSTD_seqSymbol e148; + public ZSTD_seqSymbol e149; + public ZSTD_seqSymbol e150; + public ZSTD_seqSymbol e151; + public ZSTD_seqSymbol e152; + public ZSTD_seqSymbol e153; + public ZSTD_seqSymbol e154; + public ZSTD_seqSymbol e155; + public ZSTD_seqSymbol e156; + public ZSTD_seqSymbol e157; + public ZSTD_seqSymbol e158; + public ZSTD_seqSymbol e159; + public ZSTD_seqSymbol e160; + public ZSTD_seqSymbol e161; + public ZSTD_seqSymbol e162; + public ZSTD_seqSymbol e163; + public ZSTD_seqSymbol e164; + public ZSTD_seqSymbol e165; + public ZSTD_seqSymbol e166; + public ZSTD_seqSymbol e167; + public ZSTD_seqSymbol e168; + public ZSTD_seqSymbol e169; + public ZSTD_seqSymbol e170; + public ZSTD_seqSymbol e171; + public ZSTD_seqSymbol e172; + public ZSTD_seqSymbol e173; + public ZSTD_seqSymbol e174; + public ZSTD_seqSymbol e175; + public ZSTD_seqSymbol e176; + public ZSTD_seqSymbol e177; + public ZSTD_seqSymbol e178; + public ZSTD_seqSymbol e179; + public ZSTD_seqSymbol e180; + public ZSTD_seqSymbol e181; + public ZSTD_seqSymbol e182; + public ZSTD_seqSymbol e183; + public ZSTD_seqSymbol e184; + public ZSTD_seqSymbol e185; + public ZSTD_seqSymbol e186; + public ZSTD_seqSymbol e187; + public ZSTD_seqSymbol e188; + public ZSTD_seqSymbol e189; + public ZSTD_seqSymbol e190; + public ZSTD_seqSymbol e191; + public ZSTD_seqSymbol e192; + public ZSTD_seqSymbol e193; + public ZSTD_seqSymbol e194; + public ZSTD_seqSymbol e195; + public ZSTD_seqSymbol e196; + public ZSTD_seqSymbol e197; + public ZSTD_seqSymbol e198; + public ZSTD_seqSymbol e199; + public ZSTD_seqSymbol e200; + public ZSTD_seqSymbol e201; + public ZSTD_seqSymbol e202; + public ZSTD_seqSymbol e203; + public ZSTD_seqSymbol e204; + public ZSTD_seqSymbol e205; + public ZSTD_seqSymbol e206; + public ZSTD_seqSymbol e207; + public ZSTD_seqSymbol e208; + public ZSTD_seqSymbol e209; + public ZSTD_seqSymbol e210; + public ZSTD_seqSymbol e211; + public ZSTD_seqSymbol e212; + public ZSTD_seqSymbol e213; + public ZSTD_seqSymbol e214; + public ZSTD_seqSymbol e215; + public ZSTD_seqSymbol e216; + public ZSTD_seqSymbol e217; + public ZSTD_seqSymbol e218; + public ZSTD_seqSymbol e219; + public ZSTD_seqSymbol e220; + public ZSTD_seqSymbol e221; + public ZSTD_seqSymbol e222; + public ZSTD_seqSymbol e223; + public ZSTD_seqSymbol e224; + public ZSTD_seqSymbol e225; + public ZSTD_seqSymbol e226; + public ZSTD_seqSymbol e227; + public ZSTD_seqSymbol e228; + public ZSTD_seqSymbol e229; + public ZSTD_seqSymbol e230; + public ZSTD_seqSymbol e231; + public ZSTD_seqSymbol e232; + public ZSTD_seqSymbol e233; + public ZSTD_seqSymbol e234; + public ZSTD_seqSymbol e235; + public ZSTD_seqSymbol e236; + public ZSTD_seqSymbol e237; + public ZSTD_seqSymbol e238; + public ZSTD_seqSymbol e239; + public ZSTD_seqSymbol e240; + public ZSTD_seqSymbol e241; + public ZSTD_seqSymbol e242; + public ZSTD_seqSymbol e243; + public ZSTD_seqSymbol e244; + public ZSTD_seqSymbol e245; + public ZSTD_seqSymbol e246; + public ZSTD_seqSymbol e247; + public ZSTD_seqSymbol e248; + public ZSTD_seqSymbol e249; + public ZSTD_seqSymbol e250; + public ZSTD_seqSymbol e251; + public ZSTD_seqSymbol e252; + public ZSTD_seqSymbol e253; + public ZSTD_seqSymbol e254; + public ZSTD_seqSymbol e255; + public ZSTD_seqSymbol e256; + public ZSTD_seqSymbol e257; + public ZSTD_seqSymbol e258; + public ZSTD_seqSymbol e259; + public ZSTD_seqSymbol e260; + public ZSTD_seqSymbol e261; + public ZSTD_seqSymbol e262; + public ZSTD_seqSymbol e263; + public ZSTD_seqSymbol e264; + public ZSTD_seqSymbol e265; + public ZSTD_seqSymbol e266; + public ZSTD_seqSymbol e267; + public ZSTD_seqSymbol e268; + public ZSTD_seqSymbol e269; + public ZSTD_seqSymbol e270; + public ZSTD_seqSymbol e271; + public ZSTD_seqSymbol e272; + public ZSTD_seqSymbol e273; + public ZSTD_seqSymbol e274; + public ZSTD_seqSymbol e275; + public ZSTD_seqSymbol e276; + public ZSTD_seqSymbol e277; + public ZSTD_seqSymbol e278; + public ZSTD_seqSymbol e279; + public ZSTD_seqSymbol e280; + public ZSTD_seqSymbol e281; + public ZSTD_seqSymbol e282; + public ZSTD_seqSymbol e283; + public ZSTD_seqSymbol e284; + public ZSTD_seqSymbol e285; + public ZSTD_seqSymbol e286; + public ZSTD_seqSymbol e287; + public ZSTD_seqSymbol e288; + public ZSTD_seqSymbol e289; + public ZSTD_seqSymbol e290; + public ZSTD_seqSymbol e291; + public ZSTD_seqSymbol e292; + public ZSTD_seqSymbol e293; + public ZSTD_seqSymbol e294; + public ZSTD_seqSymbol e295; + public ZSTD_seqSymbol e296; + public ZSTD_seqSymbol e297; + public ZSTD_seqSymbol e298; + public ZSTD_seqSymbol e299; + public ZSTD_seqSymbol e300; + public ZSTD_seqSymbol e301; + public ZSTD_seqSymbol e302; + public ZSTD_seqSymbol e303; + public ZSTD_seqSymbol e304; + public ZSTD_seqSymbol e305; + public ZSTD_seqSymbol e306; + public ZSTD_seqSymbol e307; + public ZSTD_seqSymbol e308; + public ZSTD_seqSymbol e309; + public ZSTD_seqSymbol e310; + public ZSTD_seqSymbol e311; + public ZSTD_seqSymbol e312; + public ZSTD_seqSymbol e313; + public ZSTD_seqSymbol e314; + public ZSTD_seqSymbol e315; + public ZSTD_seqSymbol e316; + public ZSTD_seqSymbol e317; + public ZSTD_seqSymbol e318; + public ZSTD_seqSymbol e319; + public ZSTD_seqSymbol e320; + public ZSTD_seqSymbol e321; + public ZSTD_seqSymbol e322; + public ZSTD_seqSymbol e323; + public ZSTD_seqSymbol e324; + public ZSTD_seqSymbol e325; + public ZSTD_seqSymbol e326; + public ZSTD_seqSymbol e327; + public ZSTD_seqSymbol e328; + public ZSTD_seqSymbol e329; + public ZSTD_seqSymbol e330; + public ZSTD_seqSymbol e331; + public ZSTD_seqSymbol e332; + public ZSTD_seqSymbol e333; + public ZSTD_seqSymbol e334; + public ZSTD_seqSymbol e335; + public ZSTD_seqSymbol e336; + public ZSTD_seqSymbol e337; + public ZSTD_seqSymbol e338; + public ZSTD_seqSymbol e339; + public ZSTD_seqSymbol e340; + public ZSTD_seqSymbol e341; + public ZSTD_seqSymbol e342; + public ZSTD_seqSymbol e343; + public ZSTD_seqSymbol e344; + public ZSTD_seqSymbol e345; + public ZSTD_seqSymbol e346; + public ZSTD_seqSymbol e347; + public ZSTD_seqSymbol e348; + public ZSTD_seqSymbol e349; + public ZSTD_seqSymbol e350; + public ZSTD_seqSymbol e351; + public ZSTD_seqSymbol e352; + public ZSTD_seqSymbol e353; + public ZSTD_seqSymbol e354; + public ZSTD_seqSymbol e355; + public ZSTD_seqSymbol e356; + public ZSTD_seqSymbol e357; + public ZSTD_seqSymbol e358; + public ZSTD_seqSymbol e359; + public ZSTD_seqSymbol e360; + public ZSTD_seqSymbol e361; + public ZSTD_seqSymbol e362; + public ZSTD_seqSymbol e363; + public ZSTD_seqSymbol e364; + public ZSTD_seqSymbol e365; + public ZSTD_seqSymbol e366; + public ZSTD_seqSymbol e367; + public ZSTD_seqSymbol e368; + public ZSTD_seqSymbol e369; + public ZSTD_seqSymbol e370; + public ZSTD_seqSymbol e371; + public ZSTD_seqSymbol e372; + public ZSTD_seqSymbol e373; + public ZSTD_seqSymbol e374; + public ZSTD_seqSymbol e375; + public ZSTD_seqSymbol e376; + public ZSTD_seqSymbol e377; + public ZSTD_seqSymbol e378; + public ZSTD_seqSymbol e379; + public ZSTD_seqSymbol e380; + public ZSTD_seqSymbol e381; + public ZSTD_seqSymbol e382; + public ZSTD_seqSymbol e383; + public ZSTD_seqSymbol e384; + public ZSTD_seqSymbol e385; + public ZSTD_seqSymbol e386; + public ZSTD_seqSymbol e387; + public ZSTD_seqSymbol e388; + public ZSTD_seqSymbol e389; + public ZSTD_seqSymbol e390; + public ZSTD_seqSymbol e391; + public ZSTD_seqSymbol e392; + public ZSTD_seqSymbol e393; + public ZSTD_seqSymbol e394; + public ZSTD_seqSymbol e395; + public ZSTD_seqSymbol e396; + public ZSTD_seqSymbol e397; + public ZSTD_seqSymbol e398; + public ZSTD_seqSymbol e399; + public ZSTD_seqSymbol e400; + public ZSTD_seqSymbol e401; + public ZSTD_seqSymbol e402; + public ZSTD_seqSymbol e403; + public ZSTD_seqSymbol e404; + public ZSTD_seqSymbol e405; + public ZSTD_seqSymbol e406; + public ZSTD_seqSymbol e407; + public ZSTD_seqSymbol e408; + public ZSTD_seqSymbol e409; + public ZSTD_seqSymbol e410; + public ZSTD_seqSymbol e411; + public ZSTD_seqSymbol e412; + public ZSTD_seqSymbol e413; + public ZSTD_seqSymbol e414; + public ZSTD_seqSymbol e415; + public ZSTD_seqSymbol e416; + public ZSTD_seqSymbol e417; + public ZSTD_seqSymbol e418; + public ZSTD_seqSymbol e419; + public ZSTD_seqSymbol e420; + public ZSTD_seqSymbol e421; + public ZSTD_seqSymbol e422; + public ZSTD_seqSymbol e423; + public ZSTD_seqSymbol e424; + public ZSTD_seqSymbol e425; + public ZSTD_seqSymbol e426; + public ZSTD_seqSymbol e427; + public ZSTD_seqSymbol e428; + public ZSTD_seqSymbol e429; + public ZSTD_seqSymbol e430; + public ZSTD_seqSymbol e431; + public ZSTD_seqSymbol e432; + public ZSTD_seqSymbol e433; + public ZSTD_seqSymbol e434; + public ZSTD_seqSymbol e435; + public ZSTD_seqSymbol e436; + public ZSTD_seqSymbol e437; + public ZSTD_seqSymbol e438; + public ZSTD_seqSymbol e439; + public ZSTD_seqSymbol e440; + public ZSTD_seqSymbol e441; + public ZSTD_seqSymbol e442; + public ZSTD_seqSymbol e443; + public ZSTD_seqSymbol e444; + public ZSTD_seqSymbol e445; + public ZSTD_seqSymbol e446; + public ZSTD_seqSymbol e447; + public ZSTD_seqSymbol e448; + public ZSTD_seqSymbol e449; + public ZSTD_seqSymbol e450; + public ZSTD_seqSymbol e451; + public ZSTD_seqSymbol e452; + public ZSTD_seqSymbol e453; + public ZSTD_seqSymbol e454; + public ZSTD_seqSymbol e455; + public ZSTD_seqSymbol e456; + public ZSTD_seqSymbol e457; + public ZSTD_seqSymbol e458; + public ZSTD_seqSymbol e459; + public ZSTD_seqSymbol e460; + public ZSTD_seqSymbol e461; + public ZSTD_seqSymbol e462; + public ZSTD_seqSymbol e463; + public ZSTD_seqSymbol e464; + public ZSTD_seqSymbol e465; + public ZSTD_seqSymbol e466; + public ZSTD_seqSymbol e467; + public ZSTD_seqSymbol e468; + public ZSTD_seqSymbol e469; + public ZSTD_seqSymbol e470; + public ZSTD_seqSymbol e471; + public ZSTD_seqSymbol e472; + public ZSTD_seqSymbol e473; + public ZSTD_seqSymbol e474; + public ZSTD_seqSymbol e475; + public ZSTD_seqSymbol e476; + public ZSTD_seqSymbol e477; + public ZSTD_seqSymbol e478; + public ZSTD_seqSymbol e479; + public ZSTD_seqSymbol e480; + public ZSTD_seqSymbol e481; + public ZSTD_seqSymbol e482; + public ZSTD_seqSymbol e483; + public ZSTD_seqSymbol e484; + public ZSTD_seqSymbol e485; + public ZSTD_seqSymbol e486; + public ZSTD_seqSymbol e487; + public ZSTD_seqSymbol e488; + public ZSTD_seqSymbol e489; + public ZSTD_seqSymbol e490; + public ZSTD_seqSymbol e491; + public ZSTD_seqSymbol e492; + public ZSTD_seqSymbol e493; + public ZSTD_seqSymbol e494; + public ZSTD_seqSymbol e495; + public ZSTD_seqSymbol e496; + public ZSTD_seqSymbol e497; + public ZSTD_seqSymbol e498; + public ZSTD_seqSymbol e499; + public ZSTD_seqSymbol e500; + public ZSTD_seqSymbol e501; + public ZSTD_seqSymbol e502; + public ZSTD_seqSymbol e503; + public ZSTD_seqSymbol e504; + public ZSTD_seqSymbol e505; + public ZSTD_seqSymbol e506; + public ZSTD_seqSymbol e507; + public ZSTD_seqSymbol e508; + public ZSTD_seqSymbol e509; + public ZSTD_seqSymbol e510; + public ZSTD_seqSymbol e511; + public ZSTD_seqSymbol e512; + } +#endif + +#if NET8_0_OR_GREATER + [InlineArray(257)] + public unsafe struct _OFTable_e__FixedBuffer + { + public ZSTD_seqSymbol e0; + } + +#else + public unsafe struct _OFTable_e__FixedBuffer + { + public ZSTD_seqSymbol e0; + public ZSTD_seqSymbol e1; + public ZSTD_seqSymbol e2; + public ZSTD_seqSymbol e3; + public ZSTD_seqSymbol e4; + public ZSTD_seqSymbol e5; + public ZSTD_seqSymbol e6; + public ZSTD_seqSymbol e7; + public ZSTD_seqSymbol e8; + public ZSTD_seqSymbol e9; + public ZSTD_seqSymbol e10; + public ZSTD_seqSymbol e11; + public ZSTD_seqSymbol e12; + public ZSTD_seqSymbol e13; + public ZSTD_seqSymbol e14; + public ZSTD_seqSymbol e15; + public ZSTD_seqSymbol e16; + public ZSTD_seqSymbol e17; + public ZSTD_seqSymbol e18; + public ZSTD_seqSymbol e19; + public ZSTD_seqSymbol e20; + public ZSTD_seqSymbol e21; + public ZSTD_seqSymbol e22; + public ZSTD_seqSymbol e23; + public ZSTD_seqSymbol e24; + public ZSTD_seqSymbol e25; + public ZSTD_seqSymbol e26; + public ZSTD_seqSymbol e27; + public ZSTD_seqSymbol e28; + public ZSTD_seqSymbol e29; + public ZSTD_seqSymbol e30; + public ZSTD_seqSymbol e31; + public ZSTD_seqSymbol e32; + public ZSTD_seqSymbol e33; + public ZSTD_seqSymbol e34; + public ZSTD_seqSymbol e35; + public ZSTD_seqSymbol e36; + public ZSTD_seqSymbol e37; + public ZSTD_seqSymbol e38; + public ZSTD_seqSymbol e39; + public ZSTD_seqSymbol e40; + public ZSTD_seqSymbol e41; + public ZSTD_seqSymbol e42; + public ZSTD_seqSymbol e43; + public ZSTD_seqSymbol e44; + public ZSTD_seqSymbol e45; + public ZSTD_seqSymbol e46; + public ZSTD_seqSymbol e47; + public ZSTD_seqSymbol e48; + public ZSTD_seqSymbol e49; + public ZSTD_seqSymbol e50; + public ZSTD_seqSymbol e51; + public ZSTD_seqSymbol e52; + public ZSTD_seqSymbol e53; + public ZSTD_seqSymbol e54; + public ZSTD_seqSymbol e55; + public ZSTD_seqSymbol e56; + public ZSTD_seqSymbol e57; + public ZSTD_seqSymbol e58; + public ZSTD_seqSymbol e59; + public ZSTD_seqSymbol e60; + public ZSTD_seqSymbol e61; + public ZSTD_seqSymbol e62; + public ZSTD_seqSymbol e63; + public ZSTD_seqSymbol e64; + public ZSTD_seqSymbol e65; + public ZSTD_seqSymbol e66; + public ZSTD_seqSymbol e67; + public ZSTD_seqSymbol e68; + public ZSTD_seqSymbol e69; + public ZSTD_seqSymbol e70; + public ZSTD_seqSymbol e71; + public ZSTD_seqSymbol e72; + public ZSTD_seqSymbol e73; + public ZSTD_seqSymbol e74; + public ZSTD_seqSymbol e75; + public ZSTD_seqSymbol e76; + public ZSTD_seqSymbol e77; + public ZSTD_seqSymbol e78; + public ZSTD_seqSymbol e79; + public ZSTD_seqSymbol e80; + public ZSTD_seqSymbol e81; + public ZSTD_seqSymbol e82; + public ZSTD_seqSymbol e83; + public ZSTD_seqSymbol e84; + public ZSTD_seqSymbol e85; + public ZSTD_seqSymbol e86; + public ZSTD_seqSymbol e87; + public ZSTD_seqSymbol e88; + public ZSTD_seqSymbol e89; + public ZSTD_seqSymbol e90; + public ZSTD_seqSymbol e91; + public ZSTD_seqSymbol e92; + public ZSTD_seqSymbol e93; + public ZSTD_seqSymbol e94; + public ZSTD_seqSymbol e95; + public ZSTD_seqSymbol e96; + public ZSTD_seqSymbol e97; + public ZSTD_seqSymbol e98; + public ZSTD_seqSymbol e99; + public ZSTD_seqSymbol e100; + public ZSTD_seqSymbol e101; + public ZSTD_seqSymbol e102; + public ZSTD_seqSymbol e103; + public ZSTD_seqSymbol e104; + public ZSTD_seqSymbol e105; + public ZSTD_seqSymbol e106; + public ZSTD_seqSymbol e107; + public ZSTD_seqSymbol e108; + public ZSTD_seqSymbol e109; + public ZSTD_seqSymbol e110; + public ZSTD_seqSymbol e111; + public ZSTD_seqSymbol e112; + public ZSTD_seqSymbol e113; + public ZSTD_seqSymbol e114; + public ZSTD_seqSymbol e115; + public ZSTD_seqSymbol e116; + public ZSTD_seqSymbol e117; + public ZSTD_seqSymbol e118; + public ZSTD_seqSymbol e119; + public ZSTD_seqSymbol e120; + public ZSTD_seqSymbol e121; + public ZSTD_seqSymbol e122; + public ZSTD_seqSymbol e123; + public ZSTD_seqSymbol e124; + public ZSTD_seqSymbol e125; + public ZSTD_seqSymbol e126; + public ZSTD_seqSymbol e127; + public ZSTD_seqSymbol e128; + public ZSTD_seqSymbol e129; + public ZSTD_seqSymbol e130; + public ZSTD_seqSymbol e131; + public ZSTD_seqSymbol e132; + public ZSTD_seqSymbol e133; + public ZSTD_seqSymbol e134; + public ZSTD_seqSymbol e135; + public ZSTD_seqSymbol e136; + public ZSTD_seqSymbol e137; + public ZSTD_seqSymbol e138; + public ZSTD_seqSymbol e139; + public ZSTD_seqSymbol e140; + public ZSTD_seqSymbol e141; + public ZSTD_seqSymbol e142; + public ZSTD_seqSymbol e143; + public ZSTD_seqSymbol e144; + public ZSTD_seqSymbol e145; + public ZSTD_seqSymbol e146; + public ZSTD_seqSymbol e147; + public ZSTD_seqSymbol e148; + public ZSTD_seqSymbol e149; + public ZSTD_seqSymbol e150; + public ZSTD_seqSymbol e151; + public ZSTD_seqSymbol e152; + public ZSTD_seqSymbol e153; + public ZSTD_seqSymbol e154; + public ZSTD_seqSymbol e155; + public ZSTD_seqSymbol e156; + public ZSTD_seqSymbol e157; + public ZSTD_seqSymbol e158; + public ZSTD_seqSymbol e159; + public ZSTD_seqSymbol e160; + public ZSTD_seqSymbol e161; + public ZSTD_seqSymbol e162; + public ZSTD_seqSymbol e163; + public ZSTD_seqSymbol e164; + public ZSTD_seqSymbol e165; + public ZSTD_seqSymbol e166; + public ZSTD_seqSymbol e167; + public ZSTD_seqSymbol e168; + public ZSTD_seqSymbol e169; + public ZSTD_seqSymbol e170; + public ZSTD_seqSymbol e171; + public ZSTD_seqSymbol e172; + public ZSTD_seqSymbol e173; + public ZSTD_seqSymbol e174; + public ZSTD_seqSymbol e175; + public ZSTD_seqSymbol e176; + public ZSTD_seqSymbol e177; + public ZSTD_seqSymbol e178; + public ZSTD_seqSymbol e179; + public ZSTD_seqSymbol e180; + public ZSTD_seqSymbol e181; + public ZSTD_seqSymbol e182; + public ZSTD_seqSymbol e183; + public ZSTD_seqSymbol e184; + public ZSTD_seqSymbol e185; + public ZSTD_seqSymbol e186; + public ZSTD_seqSymbol e187; + public ZSTD_seqSymbol e188; + public ZSTD_seqSymbol e189; + public ZSTD_seqSymbol e190; + public ZSTD_seqSymbol e191; + public ZSTD_seqSymbol e192; + public ZSTD_seqSymbol e193; + public ZSTD_seqSymbol e194; + public ZSTD_seqSymbol e195; + public ZSTD_seqSymbol e196; + public ZSTD_seqSymbol e197; + public ZSTD_seqSymbol e198; + public ZSTD_seqSymbol e199; + public ZSTD_seqSymbol e200; + public ZSTD_seqSymbol e201; + public ZSTD_seqSymbol e202; + public ZSTD_seqSymbol e203; + public ZSTD_seqSymbol e204; + public ZSTD_seqSymbol e205; + public ZSTD_seqSymbol e206; + public ZSTD_seqSymbol e207; + public ZSTD_seqSymbol e208; + public ZSTD_seqSymbol e209; + public ZSTD_seqSymbol e210; + public ZSTD_seqSymbol e211; + public ZSTD_seqSymbol e212; + public ZSTD_seqSymbol e213; + public ZSTD_seqSymbol e214; + public ZSTD_seqSymbol e215; + public ZSTD_seqSymbol e216; + public ZSTD_seqSymbol e217; + public ZSTD_seqSymbol e218; + public ZSTD_seqSymbol e219; + public ZSTD_seqSymbol e220; + public ZSTD_seqSymbol e221; + public ZSTD_seqSymbol e222; + public ZSTD_seqSymbol e223; + public ZSTD_seqSymbol e224; + public ZSTD_seqSymbol e225; + public ZSTD_seqSymbol e226; + public ZSTD_seqSymbol e227; + public ZSTD_seqSymbol e228; + public ZSTD_seqSymbol e229; + public ZSTD_seqSymbol e230; + public ZSTD_seqSymbol e231; + public ZSTD_seqSymbol e232; + public ZSTD_seqSymbol e233; + public ZSTD_seqSymbol e234; + public ZSTD_seqSymbol e235; + public ZSTD_seqSymbol e236; + public ZSTD_seqSymbol e237; + public ZSTD_seqSymbol e238; + public ZSTD_seqSymbol e239; + public ZSTD_seqSymbol e240; + public ZSTD_seqSymbol e241; + public ZSTD_seqSymbol e242; + public ZSTD_seqSymbol e243; + public ZSTD_seqSymbol e244; + public ZSTD_seqSymbol e245; + public ZSTD_seqSymbol e246; + public ZSTD_seqSymbol e247; + public ZSTD_seqSymbol e248; + public ZSTD_seqSymbol e249; + public ZSTD_seqSymbol e250; + public ZSTD_seqSymbol e251; + public ZSTD_seqSymbol e252; + public ZSTD_seqSymbol e253; + public ZSTD_seqSymbol e254; + public ZSTD_seqSymbol e255; + public ZSTD_seqSymbol e256; + } +#endif + +#if NET8_0_OR_GREATER + [InlineArray(513)] + public unsafe struct _MLTable_e__FixedBuffer + { + public ZSTD_seqSymbol e0; + } + +#else + public unsafe struct _MLTable_e__FixedBuffer + { + public ZSTD_seqSymbol e0; + public ZSTD_seqSymbol e1; + public ZSTD_seqSymbol e2; + public ZSTD_seqSymbol e3; + public ZSTD_seqSymbol e4; + public ZSTD_seqSymbol e5; + public ZSTD_seqSymbol e6; + public ZSTD_seqSymbol e7; + public ZSTD_seqSymbol e8; + public ZSTD_seqSymbol e9; + public ZSTD_seqSymbol e10; + public ZSTD_seqSymbol e11; + public ZSTD_seqSymbol e12; + public ZSTD_seqSymbol e13; + public ZSTD_seqSymbol e14; + public ZSTD_seqSymbol e15; + public ZSTD_seqSymbol e16; + public ZSTD_seqSymbol e17; + public ZSTD_seqSymbol e18; + public ZSTD_seqSymbol e19; + public ZSTD_seqSymbol e20; + public ZSTD_seqSymbol e21; + public ZSTD_seqSymbol e22; + public ZSTD_seqSymbol e23; + public ZSTD_seqSymbol e24; + public ZSTD_seqSymbol e25; + public ZSTD_seqSymbol e26; + public ZSTD_seqSymbol e27; + public ZSTD_seqSymbol e28; + public ZSTD_seqSymbol e29; + public ZSTD_seqSymbol e30; + public ZSTD_seqSymbol e31; + public ZSTD_seqSymbol e32; + public ZSTD_seqSymbol e33; + public ZSTD_seqSymbol e34; + public ZSTD_seqSymbol e35; + public ZSTD_seqSymbol e36; + public ZSTD_seqSymbol e37; + public ZSTD_seqSymbol e38; + public ZSTD_seqSymbol e39; + public ZSTD_seqSymbol e40; + public ZSTD_seqSymbol e41; + public ZSTD_seqSymbol e42; + public ZSTD_seqSymbol e43; + public ZSTD_seqSymbol e44; + public ZSTD_seqSymbol e45; + public ZSTD_seqSymbol e46; + public ZSTD_seqSymbol e47; + public ZSTD_seqSymbol e48; + public ZSTD_seqSymbol e49; + public ZSTD_seqSymbol e50; + public ZSTD_seqSymbol e51; + public ZSTD_seqSymbol e52; + public ZSTD_seqSymbol e53; + public ZSTD_seqSymbol e54; + public ZSTD_seqSymbol e55; + public ZSTD_seqSymbol e56; + public ZSTD_seqSymbol e57; + public ZSTD_seqSymbol e58; + public ZSTD_seqSymbol e59; + public ZSTD_seqSymbol e60; + public ZSTD_seqSymbol e61; + public ZSTD_seqSymbol e62; + public ZSTD_seqSymbol e63; + public ZSTD_seqSymbol e64; + public ZSTD_seqSymbol e65; + public ZSTD_seqSymbol e66; + public ZSTD_seqSymbol e67; + public ZSTD_seqSymbol e68; + public ZSTD_seqSymbol e69; + public ZSTD_seqSymbol e70; + public ZSTD_seqSymbol e71; + public ZSTD_seqSymbol e72; + public ZSTD_seqSymbol e73; + public ZSTD_seqSymbol e74; + public ZSTD_seqSymbol e75; + public ZSTD_seqSymbol e76; + public ZSTD_seqSymbol e77; + public ZSTD_seqSymbol e78; + public ZSTD_seqSymbol e79; + public ZSTD_seqSymbol e80; + public ZSTD_seqSymbol e81; + public ZSTD_seqSymbol e82; + public ZSTD_seqSymbol e83; + public ZSTD_seqSymbol e84; + public ZSTD_seqSymbol e85; + public ZSTD_seqSymbol e86; + public ZSTD_seqSymbol e87; + public ZSTD_seqSymbol e88; + public ZSTD_seqSymbol e89; + public ZSTD_seqSymbol e90; + public ZSTD_seqSymbol e91; + public ZSTD_seqSymbol e92; + public ZSTD_seqSymbol e93; + public ZSTD_seqSymbol e94; + public ZSTD_seqSymbol e95; + public ZSTD_seqSymbol e96; + public ZSTD_seqSymbol e97; + public ZSTD_seqSymbol e98; + public ZSTD_seqSymbol e99; + public ZSTD_seqSymbol e100; + public ZSTD_seqSymbol e101; + public ZSTD_seqSymbol e102; + public ZSTD_seqSymbol e103; + public ZSTD_seqSymbol e104; + public ZSTD_seqSymbol e105; + public ZSTD_seqSymbol e106; + public ZSTD_seqSymbol e107; + public ZSTD_seqSymbol e108; + public ZSTD_seqSymbol e109; + public ZSTD_seqSymbol e110; + public ZSTD_seqSymbol e111; + public ZSTD_seqSymbol e112; + public ZSTD_seqSymbol e113; + public ZSTD_seqSymbol e114; + public ZSTD_seqSymbol e115; + public ZSTD_seqSymbol e116; + public ZSTD_seqSymbol e117; + public ZSTD_seqSymbol e118; + public ZSTD_seqSymbol e119; + public ZSTD_seqSymbol e120; + public ZSTD_seqSymbol e121; + public ZSTD_seqSymbol e122; + public ZSTD_seqSymbol e123; + public ZSTD_seqSymbol e124; + public ZSTD_seqSymbol e125; + public ZSTD_seqSymbol e126; + public ZSTD_seqSymbol e127; + public ZSTD_seqSymbol e128; + public ZSTD_seqSymbol e129; + public ZSTD_seqSymbol e130; + public ZSTD_seqSymbol e131; + public ZSTD_seqSymbol e132; + public ZSTD_seqSymbol e133; + public ZSTD_seqSymbol e134; + public ZSTD_seqSymbol e135; + public ZSTD_seqSymbol e136; + public ZSTD_seqSymbol e137; + public ZSTD_seqSymbol e138; + public ZSTD_seqSymbol e139; + public ZSTD_seqSymbol e140; + public ZSTD_seqSymbol e141; + public ZSTD_seqSymbol e142; + public ZSTD_seqSymbol e143; + public ZSTD_seqSymbol e144; + public ZSTD_seqSymbol e145; + public ZSTD_seqSymbol e146; + public ZSTD_seqSymbol e147; + public ZSTD_seqSymbol e148; + public ZSTD_seqSymbol e149; + public ZSTD_seqSymbol e150; + public ZSTD_seqSymbol e151; + public ZSTD_seqSymbol e152; + public ZSTD_seqSymbol e153; + public ZSTD_seqSymbol e154; + public ZSTD_seqSymbol e155; + public ZSTD_seqSymbol e156; + public ZSTD_seqSymbol e157; + public ZSTD_seqSymbol e158; + public ZSTD_seqSymbol e159; + public ZSTD_seqSymbol e160; + public ZSTD_seqSymbol e161; + public ZSTD_seqSymbol e162; + public ZSTD_seqSymbol e163; + public ZSTD_seqSymbol e164; + public ZSTD_seqSymbol e165; + public ZSTD_seqSymbol e166; + public ZSTD_seqSymbol e167; + public ZSTD_seqSymbol e168; + public ZSTD_seqSymbol e169; + public ZSTD_seqSymbol e170; + public ZSTD_seqSymbol e171; + public ZSTD_seqSymbol e172; + public ZSTD_seqSymbol e173; + public ZSTD_seqSymbol e174; + public ZSTD_seqSymbol e175; + public ZSTD_seqSymbol e176; + public ZSTD_seqSymbol e177; + public ZSTD_seqSymbol e178; + public ZSTD_seqSymbol e179; + public ZSTD_seqSymbol e180; + public ZSTD_seqSymbol e181; + public ZSTD_seqSymbol e182; + public ZSTD_seqSymbol e183; + public ZSTD_seqSymbol e184; + public ZSTD_seqSymbol e185; + public ZSTD_seqSymbol e186; + public ZSTD_seqSymbol e187; + public ZSTD_seqSymbol e188; + public ZSTD_seqSymbol e189; + public ZSTD_seqSymbol e190; + public ZSTD_seqSymbol e191; + public ZSTD_seqSymbol e192; + public ZSTD_seqSymbol e193; + public ZSTD_seqSymbol e194; + public ZSTD_seqSymbol e195; + public ZSTD_seqSymbol e196; + public ZSTD_seqSymbol e197; + public ZSTD_seqSymbol e198; + public ZSTD_seqSymbol e199; + public ZSTD_seqSymbol e200; + public ZSTD_seqSymbol e201; + public ZSTD_seqSymbol e202; + public ZSTD_seqSymbol e203; + public ZSTD_seqSymbol e204; + public ZSTD_seqSymbol e205; + public ZSTD_seqSymbol e206; + public ZSTD_seqSymbol e207; + public ZSTD_seqSymbol e208; + public ZSTD_seqSymbol e209; + public ZSTD_seqSymbol e210; + public ZSTD_seqSymbol e211; + public ZSTD_seqSymbol e212; + public ZSTD_seqSymbol e213; + public ZSTD_seqSymbol e214; + public ZSTD_seqSymbol e215; + public ZSTD_seqSymbol e216; + public ZSTD_seqSymbol e217; + public ZSTD_seqSymbol e218; + public ZSTD_seqSymbol e219; + public ZSTD_seqSymbol e220; + public ZSTD_seqSymbol e221; + public ZSTD_seqSymbol e222; + public ZSTD_seqSymbol e223; + public ZSTD_seqSymbol e224; + public ZSTD_seqSymbol e225; + public ZSTD_seqSymbol e226; + public ZSTD_seqSymbol e227; + public ZSTD_seqSymbol e228; + public ZSTD_seqSymbol e229; + public ZSTD_seqSymbol e230; + public ZSTD_seqSymbol e231; + public ZSTD_seqSymbol e232; + public ZSTD_seqSymbol e233; + public ZSTD_seqSymbol e234; + public ZSTD_seqSymbol e235; + public ZSTD_seqSymbol e236; + public ZSTD_seqSymbol e237; + public ZSTD_seqSymbol e238; + public ZSTD_seqSymbol e239; + public ZSTD_seqSymbol e240; + public ZSTD_seqSymbol e241; + public ZSTD_seqSymbol e242; + public ZSTD_seqSymbol e243; + public ZSTD_seqSymbol e244; + public ZSTD_seqSymbol e245; + public ZSTD_seqSymbol e246; + public ZSTD_seqSymbol e247; + public ZSTD_seqSymbol e248; + public ZSTD_seqSymbol e249; + public ZSTD_seqSymbol e250; + public ZSTD_seqSymbol e251; + public ZSTD_seqSymbol e252; + public ZSTD_seqSymbol e253; + public ZSTD_seqSymbol e254; + public ZSTD_seqSymbol e255; + public ZSTD_seqSymbol e256; + public ZSTD_seqSymbol e257; + public ZSTD_seqSymbol e258; + public ZSTD_seqSymbol e259; + public ZSTD_seqSymbol e260; + public ZSTD_seqSymbol e261; + public ZSTD_seqSymbol e262; + public ZSTD_seqSymbol e263; + public ZSTD_seqSymbol e264; + public ZSTD_seqSymbol e265; + public ZSTD_seqSymbol e266; + public ZSTD_seqSymbol e267; + public ZSTD_seqSymbol e268; + public ZSTD_seqSymbol e269; + public ZSTD_seqSymbol e270; + public ZSTD_seqSymbol e271; + public ZSTD_seqSymbol e272; + public ZSTD_seqSymbol e273; + public ZSTD_seqSymbol e274; + public ZSTD_seqSymbol e275; + public ZSTD_seqSymbol e276; + public ZSTD_seqSymbol e277; + public ZSTD_seqSymbol e278; + public ZSTD_seqSymbol e279; + public ZSTD_seqSymbol e280; + public ZSTD_seqSymbol e281; + public ZSTD_seqSymbol e282; + public ZSTD_seqSymbol e283; + public ZSTD_seqSymbol e284; + public ZSTD_seqSymbol e285; + public ZSTD_seqSymbol e286; + public ZSTD_seqSymbol e287; + public ZSTD_seqSymbol e288; + public ZSTD_seqSymbol e289; + public ZSTD_seqSymbol e290; + public ZSTD_seqSymbol e291; + public ZSTD_seqSymbol e292; + public ZSTD_seqSymbol e293; + public ZSTD_seqSymbol e294; + public ZSTD_seqSymbol e295; + public ZSTD_seqSymbol e296; + public ZSTD_seqSymbol e297; + public ZSTD_seqSymbol e298; + public ZSTD_seqSymbol e299; + public ZSTD_seqSymbol e300; + public ZSTD_seqSymbol e301; + public ZSTD_seqSymbol e302; + public ZSTD_seqSymbol e303; + public ZSTD_seqSymbol e304; + public ZSTD_seqSymbol e305; + public ZSTD_seqSymbol e306; + public ZSTD_seqSymbol e307; + public ZSTD_seqSymbol e308; + public ZSTD_seqSymbol e309; + public ZSTD_seqSymbol e310; + public ZSTD_seqSymbol e311; + public ZSTD_seqSymbol e312; + public ZSTD_seqSymbol e313; + public ZSTD_seqSymbol e314; + public ZSTD_seqSymbol e315; + public ZSTD_seqSymbol e316; + public ZSTD_seqSymbol e317; + public ZSTD_seqSymbol e318; + public ZSTD_seqSymbol e319; + public ZSTD_seqSymbol e320; + public ZSTD_seqSymbol e321; + public ZSTD_seqSymbol e322; + public ZSTD_seqSymbol e323; + public ZSTD_seqSymbol e324; + public ZSTD_seqSymbol e325; + public ZSTD_seqSymbol e326; + public ZSTD_seqSymbol e327; + public ZSTD_seqSymbol e328; + public ZSTD_seqSymbol e329; + public ZSTD_seqSymbol e330; + public ZSTD_seqSymbol e331; + public ZSTD_seqSymbol e332; + public ZSTD_seqSymbol e333; + public ZSTD_seqSymbol e334; + public ZSTD_seqSymbol e335; + public ZSTD_seqSymbol e336; + public ZSTD_seqSymbol e337; + public ZSTD_seqSymbol e338; + public ZSTD_seqSymbol e339; + public ZSTD_seqSymbol e340; + public ZSTD_seqSymbol e341; + public ZSTD_seqSymbol e342; + public ZSTD_seqSymbol e343; + public ZSTD_seqSymbol e344; + public ZSTD_seqSymbol e345; + public ZSTD_seqSymbol e346; + public ZSTD_seqSymbol e347; + public ZSTD_seqSymbol e348; + public ZSTD_seqSymbol e349; + public ZSTD_seqSymbol e350; + public ZSTD_seqSymbol e351; + public ZSTD_seqSymbol e352; + public ZSTD_seqSymbol e353; + public ZSTD_seqSymbol e354; + public ZSTD_seqSymbol e355; + public ZSTD_seqSymbol e356; + public ZSTD_seqSymbol e357; + public ZSTD_seqSymbol e358; + public ZSTD_seqSymbol e359; + public ZSTD_seqSymbol e360; + public ZSTD_seqSymbol e361; + public ZSTD_seqSymbol e362; + public ZSTD_seqSymbol e363; + public ZSTD_seqSymbol e364; + public ZSTD_seqSymbol e365; + public ZSTD_seqSymbol e366; + public ZSTD_seqSymbol e367; + public ZSTD_seqSymbol e368; + public ZSTD_seqSymbol e369; + public ZSTD_seqSymbol e370; + public ZSTD_seqSymbol e371; + public ZSTD_seqSymbol e372; + public ZSTD_seqSymbol e373; + public ZSTD_seqSymbol e374; + public ZSTD_seqSymbol e375; + public ZSTD_seqSymbol e376; + public ZSTD_seqSymbol e377; + public ZSTD_seqSymbol e378; + public ZSTD_seqSymbol e379; + public ZSTD_seqSymbol e380; + public ZSTD_seqSymbol e381; + public ZSTD_seqSymbol e382; + public ZSTD_seqSymbol e383; + public ZSTD_seqSymbol e384; + public ZSTD_seqSymbol e385; + public ZSTD_seqSymbol e386; + public ZSTD_seqSymbol e387; + public ZSTD_seqSymbol e388; + public ZSTD_seqSymbol e389; + public ZSTD_seqSymbol e390; + public ZSTD_seqSymbol e391; + public ZSTD_seqSymbol e392; + public ZSTD_seqSymbol e393; + public ZSTD_seqSymbol e394; + public ZSTD_seqSymbol e395; + public ZSTD_seqSymbol e396; + public ZSTD_seqSymbol e397; + public ZSTD_seqSymbol e398; + public ZSTD_seqSymbol e399; + public ZSTD_seqSymbol e400; + public ZSTD_seqSymbol e401; + public ZSTD_seqSymbol e402; + public ZSTD_seqSymbol e403; + public ZSTD_seqSymbol e404; + public ZSTD_seqSymbol e405; + public ZSTD_seqSymbol e406; + public ZSTD_seqSymbol e407; + public ZSTD_seqSymbol e408; + public ZSTD_seqSymbol e409; + public ZSTD_seqSymbol e410; + public ZSTD_seqSymbol e411; + public ZSTD_seqSymbol e412; + public ZSTD_seqSymbol e413; + public ZSTD_seqSymbol e414; + public ZSTD_seqSymbol e415; + public ZSTD_seqSymbol e416; + public ZSTD_seqSymbol e417; + public ZSTD_seqSymbol e418; + public ZSTD_seqSymbol e419; + public ZSTD_seqSymbol e420; + public ZSTD_seqSymbol e421; + public ZSTD_seqSymbol e422; + public ZSTD_seqSymbol e423; + public ZSTD_seqSymbol e424; + public ZSTD_seqSymbol e425; + public ZSTD_seqSymbol e426; + public ZSTD_seqSymbol e427; + public ZSTD_seqSymbol e428; + public ZSTD_seqSymbol e429; + public ZSTD_seqSymbol e430; + public ZSTD_seqSymbol e431; + public ZSTD_seqSymbol e432; + public ZSTD_seqSymbol e433; + public ZSTD_seqSymbol e434; + public ZSTD_seqSymbol e435; + public ZSTD_seqSymbol e436; + public ZSTD_seqSymbol e437; + public ZSTD_seqSymbol e438; + public ZSTD_seqSymbol e439; + public ZSTD_seqSymbol e440; + public ZSTD_seqSymbol e441; + public ZSTD_seqSymbol e442; + public ZSTD_seqSymbol e443; + public ZSTD_seqSymbol e444; + public ZSTD_seqSymbol e445; + public ZSTD_seqSymbol e446; + public ZSTD_seqSymbol e447; + public ZSTD_seqSymbol e448; + public ZSTD_seqSymbol e449; + public ZSTD_seqSymbol e450; + public ZSTD_seqSymbol e451; + public ZSTD_seqSymbol e452; + public ZSTD_seqSymbol e453; + public ZSTD_seqSymbol e454; + public ZSTD_seqSymbol e455; + public ZSTD_seqSymbol e456; + public ZSTD_seqSymbol e457; + public ZSTD_seqSymbol e458; + public ZSTD_seqSymbol e459; + public ZSTD_seqSymbol e460; + public ZSTD_seqSymbol e461; + public ZSTD_seqSymbol e462; + public ZSTD_seqSymbol e463; + public ZSTD_seqSymbol e464; + public ZSTD_seqSymbol e465; + public ZSTD_seqSymbol e466; + public ZSTD_seqSymbol e467; + public ZSTD_seqSymbol e468; + public ZSTD_seqSymbol e469; + public ZSTD_seqSymbol e470; + public ZSTD_seqSymbol e471; + public ZSTD_seqSymbol e472; + public ZSTD_seqSymbol e473; + public ZSTD_seqSymbol e474; + public ZSTD_seqSymbol e475; + public ZSTD_seqSymbol e476; + public ZSTD_seqSymbol e477; + public ZSTD_seqSymbol e478; + public ZSTD_seqSymbol e479; + public ZSTD_seqSymbol e480; + public ZSTD_seqSymbol e481; + public ZSTD_seqSymbol e482; + public ZSTD_seqSymbol e483; + public ZSTD_seqSymbol e484; + public ZSTD_seqSymbol e485; + public ZSTD_seqSymbol e486; + public ZSTD_seqSymbol e487; + public ZSTD_seqSymbol e488; + public ZSTD_seqSymbol e489; + public ZSTD_seqSymbol e490; + public ZSTD_seqSymbol e491; + public ZSTD_seqSymbol e492; + public ZSTD_seqSymbol e493; + public ZSTD_seqSymbol e494; + public ZSTD_seqSymbol e495; + public ZSTD_seqSymbol e496; + public ZSTD_seqSymbol e497; + public ZSTD_seqSymbol e498; + public ZSTD_seqSymbol e499; + public ZSTD_seqSymbol e500; + public ZSTD_seqSymbol e501; + public ZSTD_seqSymbol e502; + public ZSTD_seqSymbol e503; + public ZSTD_seqSymbol e504; + public ZSTD_seqSymbol e505; + public ZSTD_seqSymbol e506; + public ZSTD_seqSymbol e507; + public ZSTD_seqSymbol e508; + public ZSTD_seqSymbol e509; + public ZSTD_seqSymbol e510; + public ZSTD_seqSymbol e511; + public ZSTD_seqSymbol e512; + } +#endif + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_forceIgnoreChecksum_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_forceIgnoreChecksum_e.cs new file mode 100644 index 000000000..e66390080 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_forceIgnoreChecksum_e.cs @@ -0,0 +1,9 @@ +namespace ZstdSharp.Unsafe +{ + public enum ZSTD_forceIgnoreChecksum_e + { + /* Note: this enum controls ZSTD_d_forceIgnoreChecksum */ + ZSTD_d_validateChecksum = 0, + ZSTD_d_ignoreChecksum = 1 + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_format_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_format_e.cs new file mode 100644 index 000000000..3d3178801 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_format_e.cs @@ -0,0 +1,12 @@ +namespace ZstdSharp.Unsafe +{ + public enum ZSTD_format_e + { + /* zstd frame format, specified in zstd_compression_format.md (default) */ + ZSTD_f_zstd1 = 0, + /* Variant of zstd frame format, without initial 4-bytes magic number. + * Useful to save 4 bytes per generated frame. + * Decoder cannot recognise automatically this format, requiring this instruction. */ + ZSTD_f_zstd1_magicless = 1 + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameHeader.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameHeader.cs new file mode 100644 index 000000000..8baa840de --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameHeader.cs @@ -0,0 +1,19 @@ +namespace ZstdSharp.Unsafe +{ + public struct ZSTD_frameHeader + { + /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */ + public ulong frameContentSize; + /* can be very large, up to <= frameContentSize */ + public ulong windowSize; + public uint blockSizeMax; + /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */ + public ZSTD_frameType_e frameType; + public uint headerSize; + /* for ZSTD_skippableFrame, contains the skippable magic variant [0-15] */ + public uint dictID; + public uint checksumFlag; + public uint _reserved1; + public uint _reserved2; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameParameters.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameParameters.cs new file mode 100644 index 000000000..54c509374 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameParameters.cs @@ -0,0 +1,12 @@ +namespace ZstdSharp.Unsafe +{ + public struct ZSTD_frameParameters + { + /**< 1: content size will be in frame header (when known) */ + public int contentSizeFlag; + /**< 1: generate a 32-bits checksum using XXH64 algorithm at end of frame, for error detection */ + public int checksumFlag; + /**< 1: no dictID will be saved into frame header (dictID is only useful for dictionary compression) */ + public int noDictIDFlag; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameProgression.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameProgression.cs new file mode 100644 index 000000000..22b7fece6 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameProgression.cs @@ -0,0 +1,18 @@ +namespace ZstdSharp.Unsafe +{ + public struct ZSTD_frameProgression + { + /* nb input bytes read and buffered */ + public ulong ingested; + /* nb input bytes actually compressed */ + public ulong consumed; + /* nb of compressed bytes generated and buffered */ + public ulong produced; + /* nb of compressed bytes flushed : not provided; can be tracked from caller side */ + public ulong flushed; + /* MT only : latest started job nb */ + public uint currentJobID; + /* MT only : nb of workers actively compressing at probe time */ + public uint nbActiveWorkers; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameSizeInfo.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameSizeInfo.cs new file mode 100644 index 000000000..65eca157c --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameSizeInfo.cs @@ -0,0 +1,15 @@ +namespace ZstdSharp.Unsafe +{ + /** + * Contains the compressed frame size and an upper-bound for the decompressed frame size. + * Note: before using `compressedSize`, check for errors using ZSTD_isError(). + * similarly, before using `decompressedBound`, check for errors using: + * `decompressedBound != ZSTD_CONTENTSIZE_ERROR` + */ + public struct ZSTD_frameSizeInfo + { + public nuint nbBlocks; + public nuint compressedSize; + public ulong decompressedBound; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameType_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameType_e.cs new file mode 100644 index 000000000..9a451ee98 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameType_e.cs @@ -0,0 +1,8 @@ +namespace ZstdSharp.Unsafe +{ + public enum ZSTD_frameType_e + { + ZSTD_frame, + ZSTD_skippableFrame + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseCTablesMetadata_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseCTablesMetadata_t.cs new file mode 100644 index 000000000..a051c501d --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseCTablesMetadata_t.cs @@ -0,0 +1,18 @@ +namespace ZstdSharp.Unsafe +{ + /** ZSTD_fseCTablesMetadata_t : + * Stores symbol compression modes for a super-block in {ll, ol, ml}Type, and + * fse tables in fseTablesBuffer. + * fseTablesSize refers to the size of fse tables in bytes. + * This metadata is populated in ZSTD_buildBlockEntropyStats_sequences() */ + public unsafe struct ZSTD_fseCTablesMetadata_t + { + public SymbolEncodingType_e llType; + public SymbolEncodingType_e ofType; + public SymbolEncodingType_e mlType; + public fixed byte fseTablesBuffer[133]; + public nuint fseTablesSize; + /* This is to account for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */ + public nuint lastCountSize; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseCTables_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseCTables_t.cs new file mode 100644 index 000000000..b8b43c914 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseCTables_t.cs @@ -0,0 +1,12 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct ZSTD_fseCTables_t + { + public fixed uint offcodeCTable[193]; + public fixed uint matchlengthCTable[363]; + public fixed uint litlengthCTable[329]; + public FSE_repeat offcode_repeatMode; + public FSE_repeat matchlength_repeatMode; + public FSE_repeat litlength_repeatMode; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseState.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseState.cs new file mode 100644 index 000000000..46fe512b3 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseState.cs @@ -0,0 +1,8 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct ZSTD_fseState + { + public nuint state; + public ZSTD_seqSymbol* table; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_getAllMatchesFn.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_getAllMatchesFn.cs new file mode 100644 index 000000000..ab0bb109d --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_getAllMatchesFn.cs @@ -0,0 +1,7 @@ +using System.Runtime.InteropServices; + +namespace ZstdSharp.Unsafe +{ + [UnmanagedFunctionPointer(CallingConvention.Cdecl)] + public unsafe delegate uint ZSTD_getAllMatchesFn(ZSTD_match_t* param0, ZSTD_MatchState_t* param1, uint* param2, byte* param3, byte* param4, uint* rep, uint ll0, uint lengthToBeat); +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_hufCTablesMetadata_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_hufCTablesMetadata_t.cs new file mode 100644 index 000000000..7a2b76049 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_hufCTablesMetadata_t.cs @@ -0,0 +1,17 @@ +namespace ZstdSharp.Unsafe +{ + /*********************************************** + * Entropy buffer statistics structs and funcs * + ***********************************************/ + /** ZSTD_hufCTablesMetadata_t : + * Stores Literals Block Type for a super-block in hType, and + * huffman tree description in hufDesBuffer. + * hufDesSize refers to the size of huffman tree description in bytes. + * This metadata is populated in ZSTD_buildBlockEntropyStats_literals() */ + public unsafe struct ZSTD_hufCTablesMetadata_t + { + public SymbolEncodingType_e hType; + public fixed byte hufDesBuffer[128]; + public nuint hufDesSize; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_hufCTables_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_hufCTables_t.cs new file mode 100644 index 000000000..00311fbb4 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_hufCTables_t.cs @@ -0,0 +1,279 @@ +using System.Runtime.CompilerServices; + +namespace ZstdSharp.Unsafe +{ + public unsafe struct ZSTD_hufCTables_t + { + public _CTable_e__FixedBuffer CTable; + public HUF_repeat repeatMode; +#if NET8_0_OR_GREATER + [InlineArray(257)] + public unsafe struct _CTable_e__FixedBuffer + { + public nuint e0; + } + +#else + public unsafe struct _CTable_e__FixedBuffer + { + public nuint e0; + public nuint e1; + public nuint e2; + public nuint e3; + public nuint e4; + public nuint e5; + public nuint e6; + public nuint e7; + public nuint e8; + public nuint e9; + public nuint e10; + public nuint e11; + public nuint e12; + public nuint e13; + public nuint e14; + public nuint e15; + public nuint e16; + public nuint e17; + public nuint e18; + public nuint e19; + public nuint e20; + public nuint e21; + public nuint e22; + public nuint e23; + public nuint e24; + public nuint e25; + public nuint e26; + public nuint e27; + public nuint e28; + public nuint e29; + public nuint e30; + public nuint e31; + public nuint e32; + public nuint e33; + public nuint e34; + public nuint e35; + public nuint e36; + public nuint e37; + public nuint e38; + public nuint e39; + public nuint e40; + public nuint e41; + public nuint e42; + public nuint e43; + public nuint e44; + public nuint e45; + public nuint e46; + public nuint e47; + public nuint e48; + public nuint e49; + public nuint e50; + public nuint e51; + public nuint e52; + public nuint e53; + public nuint e54; + public nuint e55; + public nuint e56; + public nuint e57; + public nuint e58; + public nuint e59; + public nuint e60; + public nuint e61; + public nuint e62; + public nuint e63; + public nuint e64; + public nuint e65; + public nuint e66; + public nuint e67; + public nuint e68; + public nuint e69; + public nuint e70; + public nuint e71; + public nuint e72; + public nuint e73; + public nuint e74; + public nuint e75; + public nuint e76; + public nuint e77; + public nuint e78; + public nuint e79; + public nuint e80; + public nuint e81; + public nuint e82; + public nuint e83; + public nuint e84; + public nuint e85; + public nuint e86; + public nuint e87; + public nuint e88; + public nuint e89; + public nuint e90; + public nuint e91; + public nuint e92; + public nuint e93; + public nuint e94; + public nuint e95; + public nuint e96; + public nuint e97; + public nuint e98; + public nuint e99; + public nuint e100; + public nuint e101; + public nuint e102; + public nuint e103; + public nuint e104; + public nuint e105; + public nuint e106; + public nuint e107; + public nuint e108; + public nuint e109; + public nuint e110; + public nuint e111; + public nuint e112; + public nuint e113; + public nuint e114; + public nuint e115; + public nuint e116; + public nuint e117; + public nuint e118; + public nuint e119; + public nuint e120; + public nuint e121; + public nuint e122; + public nuint e123; + public nuint e124; + public nuint e125; + public nuint e126; + public nuint e127; + public nuint e128; + public nuint e129; + public nuint e130; + public nuint e131; + public nuint e132; + public nuint e133; + public nuint e134; + public nuint e135; + public nuint e136; + public nuint e137; + public nuint e138; + public nuint e139; + public nuint e140; + public nuint e141; + public nuint e142; + public nuint e143; + public nuint e144; + public nuint e145; + public nuint e146; + public nuint e147; + public nuint e148; + public nuint e149; + public nuint e150; + public nuint e151; + public nuint e152; + public nuint e153; + public nuint e154; + public nuint e155; + public nuint e156; + public nuint e157; + public nuint e158; + public nuint e159; + public nuint e160; + public nuint e161; + public nuint e162; + public nuint e163; + public nuint e164; + public nuint e165; + public nuint e166; + public nuint e167; + public nuint e168; + public nuint e169; + public nuint e170; + public nuint e171; + public nuint e172; + public nuint e173; + public nuint e174; + public nuint e175; + public nuint e176; + public nuint e177; + public nuint e178; + public nuint e179; + public nuint e180; + public nuint e181; + public nuint e182; + public nuint e183; + public nuint e184; + public nuint e185; + public nuint e186; + public nuint e187; + public nuint e188; + public nuint e189; + public nuint e190; + public nuint e191; + public nuint e192; + public nuint e193; + public nuint e194; + public nuint e195; + public nuint e196; + public nuint e197; + public nuint e198; + public nuint e199; + public nuint e200; + public nuint e201; + public nuint e202; + public nuint e203; + public nuint e204; + public nuint e205; + public nuint e206; + public nuint e207; + public nuint e208; + public nuint e209; + public nuint e210; + public nuint e211; + public nuint e212; + public nuint e213; + public nuint e214; + public nuint e215; + public nuint e216; + public nuint e217; + public nuint e218; + public nuint e219; + public nuint e220; + public nuint e221; + public nuint e222; + public nuint e223; + public nuint e224; + public nuint e225; + public nuint e226; + public nuint e227; + public nuint e228; + public nuint e229; + public nuint e230; + public nuint e231; + public nuint e232; + public nuint e233; + public nuint e234; + public nuint e235; + public nuint e236; + public nuint e237; + public nuint e238; + public nuint e239; + public nuint e240; + public nuint e241; + public nuint e242; + public nuint e243; + public nuint e244; + public nuint e245; + public nuint e246; + public nuint e247; + public nuint e248; + public nuint e249; + public nuint e250; + public nuint e251; + public nuint e252; + public nuint e253; + public nuint e254; + public nuint e255; + public nuint e256; + } +#endif + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_inBuffer_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_inBuffer_s.cs new file mode 100644 index 000000000..660b16124 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_inBuffer_s.cs @@ -0,0 +1,15 @@ +namespace ZstdSharp.Unsafe +{ + /**************************** + * Streaming + ****************************/ + public unsafe struct ZSTD_inBuffer_s + { + /**< start of input buffer */ + public void* src; + /**< size of input buffer */ + public nuint size; + /**< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size */ + public nuint pos; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_indexResetPolicy_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_indexResetPolicy_e.cs new file mode 100644 index 000000000..3f225a7ea --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_indexResetPolicy_e.cs @@ -0,0 +1,13 @@ +namespace ZstdSharp.Unsafe +{ + /** + * Controls, for this matchState reset, whether indexing can continue where it + * left off (ZSTDirp_continue), or whether it needs to be restarted from zero + * (ZSTDirp_reset). + */ + public enum ZSTD_indexResetPolicy_e + { + ZSTDirp_continue, + ZSTDirp_reset + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_litLocation_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_litLocation_e.cs new file mode 100644 index 000000000..8136cc517 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_litLocation_e.cs @@ -0,0 +1,12 @@ +namespace ZstdSharp.Unsafe +{ + public enum ZSTD_litLocation_e + { + /* Stored entirely within litExtraBuffer */ + ZSTD_not_in_dst = 0, + /* Stored entirely within dst (in memory after current output write) */ + ZSTD_in_dst = 1, + /* Split between litExtraBuffer and dst */ + ZSTD_split = 2 + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_literalCompressionMode_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_literalCompressionMode_e.cs new file mode 100644 index 000000000..6382a0c86 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_literalCompressionMode_e.cs @@ -0,0 +1,15 @@ +namespace ZstdSharp.Unsafe +{ + public enum ZSTD_literalCompressionMode_e + { + /**< Automatically determine the compression mode based on the compression level. + * Negative compression levels will be uncompressed, and positive compression + * levels will be compressed. */ + ZSTD_lcm_auto = 0, + /**< Always attempt Huffman compression. Uncompressed literals will still be + * emitted if Huffman compression is not profitable. */ + ZSTD_lcm_huffman = 1, + /**< Always emit uncompressed literals. */ + ZSTD_lcm_uncompressed = 2 + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_localDict.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_localDict.cs new file mode 100644 index 000000000..9f8fa4a4f --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_localDict.cs @@ -0,0 +1,11 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct ZSTD_localDict + { + public void* dictBuffer; + public void* dict; + public nuint dictSize; + public ZSTD_dictContentType_e dictContentType; + public ZSTD_CDict_s* cdict; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_longLengthType_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_longLengthType_e.cs new file mode 100644 index 000000000..a31f06b2a --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_longLengthType_e.cs @@ -0,0 +1,13 @@ +namespace ZstdSharp.Unsafe +{ + /* Controls whether seqStore has a single "long" litLength or matchLength. See SeqStore_t. */ + public enum ZSTD_longLengthType_e + { + /* no longLengthType */ + ZSTD_llt_none = 0, + /* represents a long literal */ + ZSTD_llt_literalLength = 1, + /* represents a long match */ + ZSTD_llt_matchLength = 2 + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_longOffset_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_longOffset_e.cs new file mode 100644 index 000000000..2c67c67bd --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_longOffset_e.cs @@ -0,0 +1,8 @@ +namespace ZstdSharp.Unsafe +{ + public enum ZSTD_longOffset_e + { + ZSTD_lo_isRegularOffset, + ZSTD_lo_isLongOffset = 1 + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_match_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_match_t.cs new file mode 100644 index 000000000..b8a5f3d32 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_match_t.cs @@ -0,0 +1,13 @@ +namespace ZstdSharp.Unsafe +{ + /********************************* + * Compression internals structs * + *********************************/ + public struct ZSTD_match_t + { + /* Offset sumtype code for the match, using ZSTD_storeSeq() format */ + public uint off; + /* Raw length of match */ + public uint len; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_nextInputType_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_nextInputType_e.cs new file mode 100644 index 000000000..af341a4ff --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_nextInputType_e.cs @@ -0,0 +1,12 @@ +namespace ZstdSharp.Unsafe +{ + public enum ZSTD_nextInputType_e + { + ZSTDnit_frameHeader, + ZSTDnit_blockHeader, + ZSTDnit_block, + ZSTDnit_lastBlock, + ZSTDnit_checksum, + ZSTDnit_skippableFrame + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_optLdm_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_optLdm_t.cs new file mode 100644 index 000000000..adc1a38f6 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_optLdm_t.cs @@ -0,0 +1,15 @@ +namespace ZstdSharp.Unsafe +{ + /* Struct containing info needed to make decision about ldm inclusion */ + public struct ZSTD_optLdm_t + { + /* External match candidates store for this block */ + public RawSeqStore_t seqStore; + /* Start position of the current match candidate */ + public uint startPosInBlock; + /* End position of the current match candidate */ + public uint endPosInBlock; + /* Offset of the match candidate */ + public uint offset; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_optimal_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_optimal_t.cs new file mode 100644 index 000000000..e7d7f5cb1 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_optimal_t.cs @@ -0,0 +1,16 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct ZSTD_optimal_t + { + /* price from beginning of segment to this position */ + public int price; + /* offset of previous match */ + public uint off; + /* length of previous match */ + public uint mlen; + /* nb of literals since previous match */ + public uint litlen; + /* offset history after previous match */ + public fixed uint rep[3]; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_outBuffer_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_outBuffer_s.cs new file mode 100644 index 000000000..6ecb86adc --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_outBuffer_s.cs @@ -0,0 +1,12 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct ZSTD_outBuffer_s + { + /**< start of output buffer */ + public void* dst; + /**< size of output buffer */ + public nuint size; + /**< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size */ + public nuint pos; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_overlap_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_overlap_e.cs new file mode 100644 index 000000000..02941dd0e --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_overlap_e.cs @@ -0,0 +1,8 @@ +namespace ZstdSharp.Unsafe +{ + public enum ZSTD_overlap_e + { + ZSTD_no_overlap, + ZSTD_overlap_src_before_dst + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_paramSwitch_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_paramSwitch_e.cs new file mode 100644 index 000000000..99acee5c0 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_paramSwitch_e.cs @@ -0,0 +1,12 @@ +namespace ZstdSharp.Unsafe +{ + public enum ZSTD_paramSwitch_e + { + /* Let the library automatically determine whether the feature shall be enabled */ + ZSTD_ps_auto = 0, + /* Force-enable the feature */ + ZSTD_ps_enable = 1, + /* Do not use the feature */ + ZSTD_ps_disable = 2 + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_parameters.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_parameters.cs new file mode 100644 index 000000000..cff10cd14 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_parameters.cs @@ -0,0 +1,8 @@ +namespace ZstdSharp.Unsafe +{ + public struct ZSTD_parameters + { + public ZSTD_compressionParameters cParams; + public ZSTD_frameParameters fParams; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_prefixDict_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_prefixDict_s.cs new file mode 100644 index 000000000..d3fc6d936 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_prefixDict_s.cs @@ -0,0 +1,9 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct ZSTD_prefixDict_s + { + public void* dict; + public nuint dictSize; + public ZSTD_dictContentType_e dictContentType; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_refMultipleDDicts_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_refMultipleDDicts_e.cs new file mode 100644 index 000000000..36ebf2493 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_refMultipleDDicts_e.cs @@ -0,0 +1,9 @@ +namespace ZstdSharp.Unsafe +{ + public enum ZSTD_refMultipleDDicts_e + { + /* Note: this enum controls ZSTD_d_refMultipleDDicts */ + ZSTD_rmd_refSingleDDict = 0, + ZSTD_rmd_refMultipleDDicts = 1 + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_resetTarget_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_resetTarget_e.cs new file mode 100644 index 000000000..417ff5b66 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_resetTarget_e.cs @@ -0,0 +1,8 @@ +namespace ZstdSharp.Unsafe +{ + public enum ZSTD_resetTarget_e + { + ZSTD_resetTarget_CDict, + ZSTD_resetTarget_CCtx + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_seqSymbol.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_seqSymbol.cs new file mode 100644 index 000000000..f78770e59 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_seqSymbol.cs @@ -0,0 +1,17 @@ +namespace ZstdSharp.Unsafe +{ + public struct ZSTD_seqSymbol + { + public ushort nextState; + public byte nbAdditionalBits; + public byte nbBits; + public uint baseValue; + public ZSTD_seqSymbol(ushort nextState, byte nbAdditionalBits, byte nbBits, uint baseValue) + { + this.nextState = nextState; + this.nbAdditionalBits = nbAdditionalBits; + this.nbBits = nbBits; + this.baseValue = baseValue; + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_seqSymbol_header.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_seqSymbol_header.cs new file mode 100644 index 000000000..16925999b --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_seqSymbol_header.cs @@ -0,0 +1,11 @@ +namespace ZstdSharp.Unsafe +{ + /*-******************************************************* + * Decompression types + *********************************************************/ + public struct ZSTD_seqSymbol_header + { + public uint fastMode; + public uint tableLog; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_sequenceFormat_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_sequenceFormat_e.cs new file mode 100644 index 000000000..ed8d9c8e6 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_sequenceFormat_e.cs @@ -0,0 +1,10 @@ +namespace ZstdSharp.Unsafe +{ + public enum ZSTD_sequenceFormat_e + { + /* ZSTD_Sequence[] has no block delimiters, just sequences */ + ZSTD_sf_noBlockDelimiters = 0, + /* ZSTD_Sequence[] contains explicit block delimiters */ + ZSTD_sf_explicitBlockDelimiters = 1 + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_strategy.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_strategy.cs new file mode 100644 index 000000000..e4e5e4b50 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_strategy.cs @@ -0,0 +1,16 @@ +namespace ZstdSharp.Unsafe +{ + /* Compression strategies, listed from fastest to strongest */ + public enum ZSTD_strategy + { + ZSTD_fast = 1, + ZSTD_dfast = 2, + ZSTD_greedy = 3, + ZSTD_lazy = 4, + ZSTD_lazy2 = 5, + ZSTD_btlazy2 = 6, + ZSTD_btopt = 7, + ZSTD_btultra = 8, + ZSTD_btultra2 = 9 + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_symbolEncodingTypeStats_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_symbolEncodingTypeStats_t.cs new file mode 100644 index 000000000..bcfc57b2d --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_symbolEncodingTypeStats_t.cs @@ -0,0 +1,16 @@ +namespace ZstdSharp.Unsafe +{ + /* Type returned by ZSTD_buildSequencesStatistics containing finalized symbol encoding types + * and size of the sequences statistics + */ + public struct ZSTD_symbolEncodingTypeStats_t + { + public uint LLtype; + public uint Offtype; + public uint MLtype; + public nuint size; + /* Accounts for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */ + public nuint lastCountSize; + public int longOffsets; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_tableFillPurpose_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_tableFillPurpose_e.cs new file mode 100644 index 000000000..8bb66024a --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_tableFillPurpose_e.cs @@ -0,0 +1,8 @@ +namespace ZstdSharp.Unsafe +{ + public enum ZSTD_tableFillPurpose_e + { + ZSTD_tfp_forCCtx, + ZSTD_tfp_forCDict + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_window_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_window_t.cs new file mode 100644 index 000000000..46325d7b4 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_window_t.cs @@ -0,0 +1,21 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct ZSTD_window_t + { + /* next block here to continue on current prefix */ + public byte* nextSrc; + /* All regular indexes relative to this position */ + public byte* @base; + /* extDict indexes relative to this position */ + public byte* dictBase; + /* below that point, need extDict */ + public uint dictLimit; + /* below that point, no more valid data */ + public uint lowLimit; + /* Number of times overflow correction has run since + * ZSTD_window_init(). Useful for debugging coredumps + * and for ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY. + */ + public uint nbOverflowCorrections; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Zdict.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Zdict.cs new file mode 100644 index 000000000..ba6262bc1 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Zdict.cs @@ -0,0 +1,501 @@ +using static ZstdSharp.UnsafeHelper; + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + /*-******************************************************** + * Helper functions + **********************************************************/ + public static bool ZDICT_isError(nuint errorCode) + { + return ERR_isError(errorCode); + } + + public static string ZDICT_getErrorName(nuint errorCode) + { + return ERR_getErrorName(errorCode); + } + + private static void ZDICT_countEStats(EStats_ress_t esr, ZSTD_parameters* @params, uint* countLit, uint* offsetcodeCount, uint* matchlengthCount, uint* litlengthCount, uint* repOffsets, void* src, nuint srcSize, uint notificationLevel) + { + nuint blockSizeMax = (nuint)(1 << 17 < 1 << (int)@params->cParams.windowLog ? 1 << 17 : 1 << (int)@params->cParams.windowLog); + nuint cSize; + if (srcSize > blockSizeMax) + srcSize = blockSizeMax; + { + nuint errorCode = ZSTD_compressBegin_usingCDict_deprecated(esr.zc, esr.dict); + if (ERR_isError(errorCode)) + { + return; + } + } + + cSize = ZSTD_compressBlock_deprecated(esr.zc, esr.workPlace, 1 << 17, src, srcSize); + if (ERR_isError(cSize)) + { + return; + } + + if (cSize != 0) + { + SeqStore_t* seqStorePtr = ZSTD_getSeqStore(esr.zc); + { + byte* bytePtr; + for (bytePtr = seqStorePtr->litStart; bytePtr < seqStorePtr->lit; bytePtr++) + countLit[*bytePtr]++; + } + + { + uint nbSeq = (uint)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + ZSTD_seqToCodes(seqStorePtr); + { + byte* codePtr = seqStorePtr->ofCode; + uint u; + for (u = 0; u < nbSeq; u++) + offsetcodeCount[codePtr[u]]++; + } + + { + byte* codePtr = seqStorePtr->mlCode; + uint u; + for (u = 0; u < nbSeq; u++) + matchlengthCount[codePtr[u]]++; + } + + { + byte* codePtr = seqStorePtr->llCode; + uint u; + for (u = 0; u < nbSeq; u++) + litlengthCount[codePtr[u]]++; + } + + if (nbSeq >= 2) + { + SeqDef_s* seq = seqStorePtr->sequencesStart; + uint offset1 = seq[0].offBase - 3; + uint offset2 = seq[1].offBase - 3; + if (offset1 >= 1024) + offset1 = 0; + if (offset2 >= 1024) + offset2 = 0; + repOffsets[offset1] += 3; + repOffsets[offset2] += 1; + } + } + } + } + + private static nuint ZDICT_totalSampleSize(nuint* fileSizes, uint nbFiles) + { + nuint total = 0; + uint u; + for (u = 0; u < nbFiles; u++) + total += fileSizes[u]; + return total; + } + + private static void ZDICT_insertSortCount(offsetCount_t* table, uint val, uint count) + { + uint u; + table[3].offset = val; + table[3].count = count; + for (u = 3; u > 0; u--) + { + offsetCount_t tmp; + if (table[u - 1].count >= table[u].count) + break; + tmp = table[u - 1]; + table[u - 1] = table[u]; + table[u] = tmp; + } + } + + /* ZDICT_flatLit() : + * rewrite `countLit` to contain a mostly flat but still compressible distribution of literals. + * necessary to avoid generating a non-compressible distribution that HUF_writeCTable() cannot encode. + */ + private static void ZDICT_flatLit(uint* countLit) + { + int u; + for (u = 1; u < 256; u++) + countLit[u] = 2; + countLit[0] = 4; + countLit[253] = 1; + countLit[254] = 1; + } + + private static nuint ZDICT_analyzeEntropy(void* dstBuffer, nuint maxDstSize, int compressionLevel, void* srcBuffer, nuint* fileSizes, uint nbFiles, void* dictBuffer, nuint dictBufferSize, uint notificationLevel) + { + uint* countLit = stackalloc uint[256]; + /* no final ; */ + nuint* hufTable = stackalloc nuint[257]; + uint* offcodeCount = stackalloc uint[31]; + short* offcodeNCount = stackalloc short[31]; + uint offcodeMax = ZSTD_highbit32((uint)(dictBufferSize + 128 * (1 << 10))); + uint* matchLengthCount = stackalloc uint[53]; + short* matchLengthNCount = stackalloc short[53]; + uint* litLengthCount = stackalloc uint[36]; + short* litLengthNCount = stackalloc short[36]; + uint* repOffset = stackalloc uint[1024]; + offsetCount_t* bestRepOffset = stackalloc offsetCount_t[4]; + EStats_ress_t esr = new EStats_ress_t + { + dict = null, + zc = null, + workPlace = null + }; + ZSTD_parameters @params; + uint u, huffLog = 11, Offlog = 8, mlLog = 9, llLog = 9, total; + nuint pos = 0, errorCode; + nuint eSize = 0; + nuint totalSrcSize = ZDICT_totalSampleSize(fileSizes, nbFiles); + nuint averageSampleSize = totalSrcSize / (nbFiles + (uint)(nbFiles == 0 ? 1 : 0)); + byte* dstPtr = (byte*)dstBuffer; + uint* wksp = stackalloc uint[1216]; + if (offcodeMax > 30) + { + eSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionaryCreation_failed)); + goto _cleanup; + } + + for (u = 0; u < 256; u++) + countLit[u] = 1; + for (u = 0; u <= offcodeMax; u++) + offcodeCount[u] = 1; + for (u = 0; u <= 52; u++) + matchLengthCount[u] = 1; + for (u = 0; u <= 35; u++) + litLengthCount[u] = 1; + memset(repOffset, 0, sizeof(uint) * 1024); + repOffset[1] = repOffset[4] = repOffset[8] = 1; + memset(bestRepOffset, 0, (uint)(sizeof(offsetCount_t) * 4)); + if (compressionLevel == 0) + compressionLevel = 3; + @params = ZSTD_getParams(compressionLevel, averageSampleSize, dictBufferSize); + esr.dict = ZSTD_createCDict_advanced(dictBuffer, dictBufferSize, ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, ZSTD_dictContentType_e.ZSTD_dct_rawContent, @params.cParams, ZSTD_defaultCMem); + esr.zc = ZSTD_createCCtx(); + esr.workPlace = malloc(1 << 17); + if (esr.dict == null || esr.zc == null || esr.workPlace == null) + { + eSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + goto _cleanup; + } + + for (u = 0; u < nbFiles; u++) + { + ZDICT_countEStats(esr, &@params, countLit, offcodeCount, matchLengthCount, litLengthCount, repOffset, (sbyte*)srcBuffer + pos, fileSizes[u], notificationLevel); + pos += fileSizes[u]; + } + + if (notificationLevel >= 4) + { + for (u = 0; u <= offcodeMax; u++) + { + } + } + + { + nuint maxNbBits = HUF_buildCTable_wksp(hufTable, countLit, 255, huffLog, wksp, sizeof(uint) * 1216); + if (ERR_isError(maxNbBits)) + { + eSize = maxNbBits; + goto _cleanup; + } + + if (maxNbBits == 8) + { + ZDICT_flatLit(countLit); + maxNbBits = HUF_buildCTable_wksp(hufTable, countLit, 255, huffLog, wksp, sizeof(uint) * 1216); + assert(maxNbBits == 9); + } + + huffLog = (uint)maxNbBits; + } + + { + uint offset; + for (offset = 1; offset < 1024; offset++) + ZDICT_insertSortCount(bestRepOffset, offset, repOffset[offset]); + } + + total = 0; + for (u = 0; u <= offcodeMax; u++) + total += offcodeCount[u]; + errorCode = FSE_normalizeCount(offcodeNCount, Offlog, offcodeCount, total, offcodeMax, 1); + if (ERR_isError(errorCode)) + { + eSize = errorCode; + goto _cleanup; + } + + Offlog = (uint)errorCode; + total = 0; + for (u = 0; u <= 52; u++) + total += matchLengthCount[u]; + errorCode = FSE_normalizeCount(matchLengthNCount, mlLog, matchLengthCount, total, 52, 1); + if (ERR_isError(errorCode)) + { + eSize = errorCode; + goto _cleanup; + } + + mlLog = (uint)errorCode; + total = 0; + for (u = 0; u <= 35; u++) + total += litLengthCount[u]; + errorCode = FSE_normalizeCount(litLengthNCount, llLog, litLengthCount, total, 35, 1); + if (ERR_isError(errorCode)) + { + eSize = errorCode; + goto _cleanup; + } + + llLog = (uint)errorCode; + { + nuint hhSize = HUF_writeCTable_wksp(dstPtr, maxDstSize, hufTable, 255, huffLog, wksp, sizeof(uint) * 1216); + if (ERR_isError(hhSize)) + { + eSize = hhSize; + goto _cleanup; + } + + dstPtr += hhSize; + maxDstSize -= hhSize; + eSize += hhSize; + } + + { + nuint ohSize = FSE_writeNCount(dstPtr, maxDstSize, offcodeNCount, 30, Offlog); + if (ERR_isError(ohSize)) + { + eSize = ohSize; + goto _cleanup; + } + + dstPtr += ohSize; + maxDstSize -= ohSize; + eSize += ohSize; + } + + { + nuint mhSize = FSE_writeNCount(dstPtr, maxDstSize, matchLengthNCount, 52, mlLog); + if (ERR_isError(mhSize)) + { + eSize = mhSize; + goto _cleanup; + } + + dstPtr += mhSize; + maxDstSize -= mhSize; + eSize += mhSize; + } + + { + nuint lhSize = FSE_writeNCount(dstPtr, maxDstSize, litLengthNCount, 35, llLog); + if (ERR_isError(lhSize)) + { + eSize = lhSize; + goto _cleanup; + } + + dstPtr += lhSize; + maxDstSize -= lhSize; + eSize += lhSize; + } + + if (maxDstSize < 12) + { + eSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + goto _cleanup; + } + + MEM_writeLE32(dstPtr + 0, repStartValue[0]); + MEM_writeLE32(dstPtr + 4, repStartValue[1]); + MEM_writeLE32(dstPtr + 8, repStartValue[2]); + eSize += 12; + _cleanup: + ZSTD_freeCDict(esr.dict); + ZSTD_freeCCtx(esr.zc); + free(esr.workPlace); + return eSize; + } + + /** + * @returns the maximum repcode value + */ + private static uint ZDICT_maxRep(uint* reps) + { + uint maxRep = reps[0]; + int r; + for (r = 1; r < 3; ++r) + maxRep = maxRep > reps[r] ? maxRep : reps[r]; + return maxRep; + } + + /*! ZDICT_finalizeDictionary(): + * Given a custom content as a basis for dictionary, and a set of samples, + * finalize dictionary by adding headers and statistics according to the zstd + * dictionary format. + * + * Samples must be stored concatenated in a flat buffer `samplesBuffer`, + * supplied with an array of sizes `samplesSizes`, providing the size of each + * sample in order. The samples are used to construct the statistics, so they + * should be representative of what you will compress with this dictionary. + * + * The compression level can be set in `parameters`. You should pass the + * compression level you expect to use in production. The statistics for each + * compression level differ, so tuning the dictionary for the compression level + * can help quite a bit. + * + * You can set an explicit dictionary ID in `parameters`, or allow us to pick + * a random dictionary ID for you, but we can't guarantee no collisions. + * + * The dstDictBuffer and the dictContent may overlap, and the content will be + * appended to the end of the header. If the header + the content doesn't fit in + * maxDictSize the beginning of the content is truncated to make room, since it + * is presumed that the most profitable content is at the end of the dictionary, + * since that is the cheapest to reference. + * + * `maxDictSize` must be >= max(dictContentSize, ZDICT_DICTSIZE_MIN). + * + * @return: size of dictionary stored into `dstDictBuffer` (<= `maxDictSize`), + * or an error code, which can be tested by ZDICT_isError(). + * Note: ZDICT_finalizeDictionary() will push notifications into stderr if + * instructed to, using notificationLevel>0. + * NOTE: This function currently may fail in several edge cases including: + * * Not enough samples + * * Samples are uncompressible + * * Samples are all exactly the same + */ + public static nuint ZDICT_finalizeDictionary(void* dictBuffer, nuint dictBufferCapacity, void* customDictContent, nuint dictContentSize, void* samplesBuffer, nuint* samplesSizes, uint nbSamples, ZDICT_params_t @params) + { + nuint hSize; + byte* header = stackalloc byte[256]; + int compressionLevel = @params.compressionLevel == 0 ? 3 : @params.compressionLevel; + uint notificationLevel = @params.notificationLevel; + /* The final dictionary content must be at least as large as the largest repcode */ + nuint minContentSize = ZDICT_maxRep(repStartValue); + nuint paddingSize; + if (dictBufferCapacity < dictContentSize) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + if (dictBufferCapacity < 256) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + MEM_writeLE32(header, 0xEC30A437); + { + ulong randomID = ZSTD_XXH64(customDictContent, dictContentSize, 0); + uint compliantID = (uint)(randomID % ((1U << 31) - 32768) + 32768); + uint dictID = @params.dictID != 0 ? @params.dictID : compliantID; + MEM_writeLE32(header + 4, dictID); + } + + hSize = 8; + { + nuint eSize = ZDICT_analyzeEntropy(header + hSize, 256 - hSize, compressionLevel, samplesBuffer, samplesSizes, nbSamples, customDictContent, dictContentSize, notificationLevel); + if (ZDICT_isError(eSize)) + return eSize; + hSize += eSize; + } + + if (hSize + dictContentSize > dictBufferCapacity) + { + dictContentSize = dictBufferCapacity - hSize; + } + + if (dictContentSize < minContentSize) + { + if (hSize + minContentSize > dictBufferCapacity) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + paddingSize = minContentSize - dictContentSize; + } + else + { + paddingSize = 0; + } + + { + nuint dictSize = hSize + paddingSize + dictContentSize; + /* The dictionary consists of the header, optional padding, and the content. + * The padding comes before the content because the "best" position in the + * dictionary is the last byte. + */ + byte* outDictHeader = (byte*)dictBuffer; + byte* outDictPadding = outDictHeader + hSize; + byte* outDictContent = outDictPadding + paddingSize; + assert(dictSize <= dictBufferCapacity); + assert(outDictContent + dictContentSize == (byte*)dictBuffer + dictSize); + memmove(outDictContent, customDictContent, dictContentSize); + memcpy(outDictHeader, header, (uint)hSize); + memset(outDictPadding, 0, (uint)paddingSize); + return dictSize; + } + } + + private static nuint ZDICT_addEntropyTablesFromBuffer_advanced(void* dictBuffer, nuint dictContentSize, nuint dictBufferCapacity, void* samplesBuffer, nuint* samplesSizes, uint nbSamples, ZDICT_params_t @params) + { + int compressionLevel = @params.compressionLevel == 0 ? 3 : @params.compressionLevel; + uint notificationLevel = @params.notificationLevel; + nuint hSize = 8; + { + nuint eSize = ZDICT_analyzeEntropy((sbyte*)dictBuffer + hSize, dictBufferCapacity - hSize, compressionLevel, samplesBuffer, samplesSizes, nbSamples, (sbyte*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize, notificationLevel); + if (ZDICT_isError(eSize)) + return eSize; + hSize += eSize; + } + + MEM_writeLE32(dictBuffer, 0xEC30A437); + { + ulong randomID = ZSTD_XXH64((sbyte*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize, 0); + uint compliantID = (uint)(randomID % ((1U << 31) - 32768) + 32768); + uint dictID = @params.dictID != 0 ? @params.dictID : compliantID; + MEM_writeLE32((sbyte*)dictBuffer + 4, dictID); + } + + if (hSize + dictContentSize < dictBufferCapacity) + memmove((sbyte*)dictBuffer + hSize, (sbyte*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize); + return dictBufferCapacity < hSize + dictContentSize ? dictBufferCapacity : hSize + dictContentSize; + } + + /*! ZDICT_trainFromBuffer(): + * Train a dictionary from an array of samples. + * Redirect towards ZDICT_optimizeTrainFromBuffer_fastCover() single-threaded, with d=8, steps=4, + * f=20, and accel=1. + * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, + * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. + * The resulting dictionary will be saved into `dictBuffer`. + * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) + * or an error code, which can be tested with ZDICT_isError(). + * Note: Dictionary training will fail if there are not enough samples to construct a + * dictionary, or if most of the samples are too small (< 8 bytes being the lower limit). + * If dictionary training fails, you should use zstd without a dictionary, as the dictionary + * would've been ineffective anyways. If you believe your samples would benefit from a dictionary + * please open an issue with details, and we can look into it. + * Note: ZDICT_trainFromBuffer()'s memory usage is about 6 MB. + * Tips: In general, a reasonable dictionary has a size of ~ 100 KB. + * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`. + * In general, it's recommended to provide a few thousands samples, though this can vary a lot. + * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. + */ + public static nuint ZDICT_trainFromBuffer(void* dictBuffer, nuint dictBufferCapacity, void* samplesBuffer, nuint* samplesSizes, uint nbSamples) + { + ZDICT_fastCover_params_t @params; + @params = new ZDICT_fastCover_params_t + { + d = 8, + steps = 4 + }; + @params.zParams.compressionLevel = 3; + return ZDICT_optimizeTrainFromBuffer_fastCover(dictBuffer, dictBufferCapacity, samplesBuffer, samplesSizes, nbSamples, &@params); + } + + public static nuint ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, nuint dictContentSize, nuint dictBufferCapacity, void* samplesBuffer, nuint* samplesSizes, uint nbSamples) + { + ZDICT_params_t @params; + @params = new ZDICT_params_t(); + return ZDICT_addEntropyTablesFromBuffer_advanced(dictBuffer, dictContentSize, dictBufferCapacity, samplesBuffer, samplesSizes, nbSamples, @params); + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Zstd.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Zstd.cs new file mode 100644 index 000000000..875d50bab --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Zstd.cs @@ -0,0 +1,7 @@ +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + private static readonly ZSTD_customMem ZSTD_defaultCMem = new ZSTD_customMem(customAlloc: null, customFree: null, opaque: null); + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCommon.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCommon.cs new file mode 100644 index 000000000..4cb1c744b --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCommon.cs @@ -0,0 +1,49 @@ +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + /*-**************************************** + * Version + ******************************************/ + public static uint ZSTD_versionNumber() + { + return 1 * 100 * 100 + 5 * 100 + 7; + } + + /*! ZSTD_versionString() : + * Return runtime library version, like "1.4.5". Requires v1.3.0+. */ + public static string ZSTD_versionString() + { + return "1.5.7"; + } + + /*! ZSTD_isError() : + * tells if a return value is an error code + * symbol is required for external callers */ + public static bool ZSTD_isError(nuint code) + { + return ERR_isError(code); + } + + /*! ZSTD_getErrorName() : + * provides error code string from function result (useful for debugging) */ + public static string ZSTD_getErrorName(nuint code) + { + return ERR_getErrorName(code); + } + + /*! ZSTD_getError() : + * convert a `size_t` function result into a proper ZSTD_errorCode enum */ + public static ZSTD_ErrorCode ZSTD_getErrorCode(nuint code) + { + return ERR_getErrorCode(code); + } + + /*! ZSTD_getErrorString() : + * provides error code string from enum */ + public static string ZSTD_getErrorString(ZSTD_ErrorCode code) + { + return ERR_getErrorString(code); + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompress.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompress.cs new file mode 100644 index 000000000..3e2e36a07 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompress.cs @@ -0,0 +1,8222 @@ +using static ZstdSharp.UnsafeHelper; +using System; +using System.Runtime.InteropServices; +using System.Numerics; +using System.Runtime.CompilerServices; +using System.Diagnostics; + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + /*-************************************* + * Helper functions + ***************************************/ + /* ZSTD_compressBound() + * Note that the result from this function is only valid for + * the one-pass compression functions. + * When employing the streaming mode, + * if flushes are frequently altering the size of blocks, + * the overhead from block headers can make the compressed data larger + * than the return value of ZSTD_compressBound(). + */ + public static nuint ZSTD_compressBound(nuint srcSize) + { + nuint r = srcSize >= (sizeof(nuint) == 8 ? 0xFF00FF00FF00FF00UL : 0xFF00FF00U) ? 0 : srcSize + (srcSize >> 8) + (srcSize < 128 << 10 ? (128 << 10) - srcSize >> 11 : 0); + if (r == 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + return r; + } + + public static ZSTD_CCtx_s* ZSTD_createCCtx() + { + return ZSTD_createCCtx_advanced(ZSTD_defaultCMem); + } + + private static void ZSTD_initCCtx(ZSTD_CCtx_s* cctx, ZSTD_customMem memManager) + { + assert(cctx != null); + *cctx = new ZSTD_CCtx_s + { + customMem = memManager, + bmi2 = 0 + }; + { + nuint err = ZSTD_CCtx_reset(cctx, ZSTD_ResetDirective.ZSTD_reset_parameters); + assert(!ERR_isError(err)); + } + } + + public static ZSTD_CCtx_s* ZSTD_createCCtx_advanced(ZSTD_customMem customMem) + { + if (((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) != 0) + return null; + { + ZSTD_CCtx_s* cctx = (ZSTD_CCtx_s*)ZSTD_customMalloc((nuint)sizeof(ZSTD_CCtx_s), customMem); + if (cctx == null) + return null; + ZSTD_initCCtx(cctx, customMem); + return cctx; + } + } + + /*! ZSTD_initStatic*() : + * Initialize an object using a pre-allocated fixed-size buffer. + * workspace: The memory area to emplace the object into. + * Provided pointer *must be 8-bytes aligned*. + * Buffer must outlive object. + * workspaceSize: Use ZSTD_estimate*Size() to determine + * how large workspace must be to support target scenario. + * @return : pointer to object (same address as workspace, just different type), + * or NULL if error (size too small, incorrect alignment, etc.) + * Note : zstd will never resize nor malloc() when using a static buffer. + * If the object requires more memory than available, + * zstd will just error out (typically ZSTD_error_memory_allocation). + * Note 2 : there is no corresponding "free" function. + * Since workspace is allocated externally, it must be freed externally too. + * Note 3 : cParams : use ZSTD_getCParams() to convert a compression level + * into its associated cParams. + * Limitation 1 : currently not compatible with internal dictionary creation, triggered by + * ZSTD_CCtx_loadDictionary(), ZSTD_initCStream_usingDict() or ZSTD_initDStream_usingDict(). + * Limitation 2 : static cctx currently not compatible with multi-threading. + * Limitation 3 : static dctx is incompatible with legacy support. + */ + public static ZSTD_CCtx_s* ZSTD_initStaticCCtx(void* workspace, nuint workspaceSize) + { + ZSTD_cwksp ws; + ZSTD_CCtx_s* cctx; + if (workspaceSize <= (nuint)sizeof(ZSTD_CCtx_s)) + return null; + if (((nuint)workspace & 7) != 0) + return null; + ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc_e.ZSTD_cwksp_static_alloc); + cctx = (ZSTD_CCtx_s*)ZSTD_cwksp_reserve_object(&ws, (nuint)sizeof(ZSTD_CCtx_s)); + if (cctx == null) + return null; + *cctx = new ZSTD_CCtx_s(); + ZSTD_cwksp_move(&cctx->workspace, &ws); + cctx->staticSize = workspaceSize; + if (ZSTD_cwksp_check_available(&cctx->workspace, (nuint)(((8 << 10) + 512 + sizeof(uint) * (52 + 2) > 8208 ? (8 << 10) + 512 + sizeof(uint) * (52 + 2) : 8208) + 2 * sizeof(ZSTD_compressedBlockState_t))) == 0) + return null; + cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, (nuint)sizeof(ZSTD_compressedBlockState_t)); + cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, (nuint)sizeof(ZSTD_compressedBlockState_t)); + cctx->tmpWorkspace = ZSTD_cwksp_reserve_object(&cctx->workspace, (8 << 10) + 512 + sizeof(uint) * (52 + 2) > 8208 ? (8 << 10) + 512 + sizeof(uint) * (52 + 2) : 8208); + cctx->tmpWkspSize = (8 << 10) + 512 + sizeof(uint) * (52 + 2) > 8208 ? (8 << 10) + 512 + sizeof(uint) * (52 + 2) : 8208; + cctx->bmi2 = 0; + return cctx; + } + + /** + * Clears and frees all of the dictionaries in the CCtx. + */ + private static void ZSTD_clearAllDicts(ZSTD_CCtx_s* cctx) + { + ZSTD_customFree(cctx->localDict.dictBuffer, cctx->customMem); + ZSTD_freeCDict(cctx->localDict.cdict); + cctx->localDict = new ZSTD_localDict(); + cctx->prefixDict = new ZSTD_prefixDict_s(); + cctx->cdict = null; + } + + private static nuint ZSTD_sizeof_localDict(ZSTD_localDict dict) + { + nuint bufferSize = dict.dictBuffer != null ? dict.dictSize : 0; + nuint cdictSize = ZSTD_sizeof_CDict(dict.cdict); + return bufferSize + cdictSize; + } + + private static void ZSTD_freeCCtxContent(ZSTD_CCtx_s* cctx) + { + assert(cctx != null); + assert(cctx->staticSize == 0); + ZSTD_clearAllDicts(cctx); + ZSTDMT_freeCCtx(cctx->mtctx); + cctx->mtctx = null; + ZSTD_cwksp_free(&cctx->workspace, cctx->customMem); + } + + public static nuint ZSTD_freeCCtx(ZSTD_CCtx_s* cctx) + { + if (cctx == null) + return 0; + if (cctx->staticSize != 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } + + { + int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx); + ZSTD_freeCCtxContent(cctx); + if (cctxInWorkspace == 0) + ZSTD_customFree(cctx, cctx->customMem); + } + + return 0; + } + + private static nuint ZSTD_sizeof_mtctx(ZSTD_CCtx_s* cctx) + { + return ZSTDMT_sizeof_CCtx(cctx->mtctx); + } + + /*! ZSTD_sizeof_*() : Requires v1.4.0+ + * These functions give the _current_ memory usage of selected object. + * Note that object memory usage can evolve (increase or decrease) over time. */ + public static nuint ZSTD_sizeof_CCtx(ZSTD_CCtx_s* cctx) + { + if (cctx == null) + return 0; + return (nuint)(cctx->workspace.workspace == cctx ? 0 : sizeof(ZSTD_CCtx_s)) + ZSTD_cwksp_sizeof(&cctx->workspace) + ZSTD_sizeof_localDict(cctx->localDict) + ZSTD_sizeof_mtctx(cctx); + } + + public static nuint ZSTD_sizeof_CStream(ZSTD_CCtx_s* zcs) + { + return ZSTD_sizeof_CCtx(zcs); + } + + /* private API call, for dictBuilder only */ + private static SeqStore_t* ZSTD_getSeqStore(ZSTD_CCtx_s* ctx) + { + return &ctx->seqStore; + } + + /* Returns true if the strategy supports using a row based matchfinder */ + private static int ZSTD_rowMatchFinderSupported(ZSTD_strategy strategy) + { + return strategy >= ZSTD_strategy.ZSTD_greedy && strategy <= ZSTD_strategy.ZSTD_lazy2 ? 1 : 0; + } + + /* Returns true if the strategy and useRowMatchFinder mode indicate that we will use the row based matchfinder + * for this compression. + */ + private static int ZSTD_rowMatchFinderUsed(ZSTD_strategy strategy, ZSTD_paramSwitch_e mode) + { + assert(mode != ZSTD_paramSwitch_e.ZSTD_ps_auto); + return ZSTD_rowMatchFinderSupported(strategy) != 0 && mode == ZSTD_paramSwitch_e.ZSTD_ps_enable ? 1 : 0; + } + + /* Returns row matchfinder usage given an initial mode and cParams */ + private static ZSTD_paramSwitch_e ZSTD_resolveRowMatchFinderMode(ZSTD_paramSwitch_e mode, ZSTD_compressionParameters* cParams) + { + if (mode != ZSTD_paramSwitch_e.ZSTD_ps_auto) + return mode; + mode = ZSTD_paramSwitch_e.ZSTD_ps_disable; + if (ZSTD_rowMatchFinderSupported(cParams->strategy) == 0) + return mode; + if (cParams->windowLog > 14) + mode = ZSTD_paramSwitch_e.ZSTD_ps_enable; + return mode; + } + + /* Returns block splitter usage (generally speaking, when using slower/stronger compression modes) */ + private static ZSTD_paramSwitch_e ZSTD_resolveBlockSplitterMode(ZSTD_paramSwitch_e mode, ZSTD_compressionParameters* cParams) + { + if (mode != ZSTD_paramSwitch_e.ZSTD_ps_auto) + return mode; + return cParams->strategy >= ZSTD_strategy.ZSTD_btopt && cParams->windowLog >= 17 ? ZSTD_paramSwitch_e.ZSTD_ps_enable : ZSTD_paramSwitch_e.ZSTD_ps_disable; + } + + /* Returns 1 if the arguments indicate that we should allocate a chainTable, 0 otherwise */ + private static int ZSTD_allocateChainTable(ZSTD_strategy strategy, ZSTD_paramSwitch_e useRowMatchFinder, uint forDDSDict) + { + assert(useRowMatchFinder != ZSTD_paramSwitch_e.ZSTD_ps_auto); + return forDDSDict != 0 || strategy != ZSTD_strategy.ZSTD_fast && ZSTD_rowMatchFinderUsed(strategy, useRowMatchFinder) == 0 ? 1 : 0; + } + + /* Returns ZSTD_ps_enable if compression parameters are such that we should + * enable long distance matching (wlog >= 27, strategy >= btopt). + * Returns ZSTD_ps_disable otherwise. + */ + private static ZSTD_paramSwitch_e ZSTD_resolveEnableLdm(ZSTD_paramSwitch_e mode, ZSTD_compressionParameters* cParams) + { + if (mode != ZSTD_paramSwitch_e.ZSTD_ps_auto) + return mode; + return cParams->strategy >= ZSTD_strategy.ZSTD_btopt && cParams->windowLog >= 27 ? ZSTD_paramSwitch_e.ZSTD_ps_enable : ZSTD_paramSwitch_e.ZSTD_ps_disable; + } + + private static int ZSTD_resolveExternalSequenceValidation(int mode) + { + return mode; + } + + /* Resolves maxBlockSize to the default if no value is present. */ + private static nuint ZSTD_resolveMaxBlockSize(nuint maxBlockSize) + { + if (maxBlockSize == 0) + { + return 1 << 17; + } + else + { + return maxBlockSize; + } + } + + private static ZSTD_paramSwitch_e ZSTD_resolveExternalRepcodeSearch(ZSTD_paramSwitch_e value, int cLevel) + { + if (value != ZSTD_paramSwitch_e.ZSTD_ps_auto) + return value; + if (cLevel < 10) + { + return ZSTD_paramSwitch_e.ZSTD_ps_disable; + } + else + { + return ZSTD_paramSwitch_e.ZSTD_ps_enable; + } + } + + /* Returns 1 if compression parameters are such that CDict hashtable and chaintable indices are tagged. + * If so, the tags need to be removed in ZSTD_resetCCtx_byCopyingCDict. */ + private static int ZSTD_CDictIndicesAreTagged(ZSTD_compressionParameters* cParams) + { + return cParams->strategy == ZSTD_strategy.ZSTD_fast || cParams->strategy == ZSTD_strategy.ZSTD_dfast ? 1 : 0; + } + + private static ZSTD_CCtx_params_s ZSTD_makeCCtxParamsFromCParams(ZSTD_compressionParameters cParams) + { + ZSTD_CCtx_params_s cctxParams; + ZSTD_CCtxParams_init(&cctxParams, 3); + cctxParams.cParams = cParams; + cctxParams.ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams.ldmParams.enableLdm, &cParams); + if (cctxParams.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) + { + ZSTD_ldm_adjustParameters(&cctxParams.ldmParams, &cParams); + assert(cctxParams.ldmParams.hashLog >= cctxParams.ldmParams.bucketSizeLog); + assert(cctxParams.ldmParams.hashRateLog < 32); + } + + cctxParams.postBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams.postBlockSplitter, &cParams); + cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams); + cctxParams.validateSequences = ZSTD_resolveExternalSequenceValidation(cctxParams.validateSequences); + cctxParams.maxBlockSize = ZSTD_resolveMaxBlockSize(cctxParams.maxBlockSize); + cctxParams.searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(cctxParams.searchForExternalRepcodes, cctxParams.compressionLevel); + assert(ZSTD_checkCParams(cParams) == 0); + return cctxParams; + } + + private static ZSTD_CCtx_params_s* ZSTD_createCCtxParams_advanced(ZSTD_customMem customMem) + { + ZSTD_CCtx_params_s* @params; + if (((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) != 0) + return null; + @params = (ZSTD_CCtx_params_s*)ZSTD_customCalloc((nuint)sizeof(ZSTD_CCtx_params_s), customMem); + if (@params == null) + { + return null; + } + + ZSTD_CCtxParams_init(@params, 3); + @params->customMem = customMem; + return @params; + } + + /*! ZSTD_CCtx_params : + * Quick howto : + * - ZSTD_createCCtxParams() : Create a ZSTD_CCtx_params structure + * - ZSTD_CCtxParams_setParameter() : Push parameters one by one into + * an existing ZSTD_CCtx_params structure. + * This is similar to + * ZSTD_CCtx_setParameter(). + * - ZSTD_CCtx_setParametersUsingCCtxParams() : Apply parameters to + * an existing CCtx. + * These parameters will be applied to + * all subsequent frames. + * - ZSTD_compressStream2() : Do compression using the CCtx. + * - ZSTD_freeCCtxParams() : Free the memory, accept NULL pointer. + * + * This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams() + * for static allocation of CCtx for single-threaded compression. + */ + public static ZSTD_CCtx_params_s* ZSTD_createCCtxParams() + { + return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem); + } + + public static nuint ZSTD_freeCCtxParams(ZSTD_CCtx_params_s* @params) + { + if (@params == null) + { + return 0; + } + + ZSTD_customFree(@params, @params->customMem); + return 0; + } + + /*! ZSTD_CCtxParams_reset() : + * Reset params to default values. + */ + public static nuint ZSTD_CCtxParams_reset(ZSTD_CCtx_params_s* @params) + { + return ZSTD_CCtxParams_init(@params, 3); + } + + /*! ZSTD_CCtxParams_init() : + * Initializes the compression parameters of cctxParams according to + * compression level. All other parameters are reset to their default values. + */ + public static nuint ZSTD_CCtxParams_init(ZSTD_CCtx_params_s* cctxParams, int compressionLevel) + { + if (cctxParams == null) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + } + + *cctxParams = new ZSTD_CCtx_params_s + { + compressionLevel = compressionLevel + }; + cctxParams->fParams.contentSizeFlag = 1; + return 0; + } + + /** + * Initializes `cctxParams` from `params` and `compressionLevel`. + * @param compressionLevel If params are derived from a compression level then that compression level, otherwise ZSTD_NO_CLEVEL. + */ + private static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params_s* cctxParams, ZSTD_parameters* @params, int compressionLevel) + { + assert(ZSTD_checkCParams(@params->cParams) == 0); + *cctxParams = new ZSTD_CCtx_params_s + { + cParams = @params->cParams, + fParams = @params->fParams, + compressionLevel = compressionLevel, + useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams->useRowMatchFinder, &@params->cParams), + postBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams->postBlockSplitter, &@params->cParams) + }; + cctxParams->ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams->ldmParams.enableLdm, &@params->cParams); + cctxParams->validateSequences = ZSTD_resolveExternalSequenceValidation(cctxParams->validateSequences); + cctxParams->maxBlockSize = ZSTD_resolveMaxBlockSize(cctxParams->maxBlockSize); + cctxParams->searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(cctxParams->searchForExternalRepcodes, compressionLevel); + } + + /*! ZSTD_CCtxParams_init_advanced() : + * Initializes the compression and frame parameters of cctxParams according to + * params. All other parameters are reset to their default values. + */ + public static nuint ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params_s* cctxParams, ZSTD_parameters @params) + { + if (cctxParams == null) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + } + + { + nuint err_code = ZSTD_checkCParams(@params.cParams); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + ZSTD_CCtxParams_init_internal(cctxParams, &@params, 0); + return 0; + } + + /** + * Sets cctxParams' cParams and fParams from params, but otherwise leaves them alone. + * @param params Validated zstd parameters. + */ + private static void ZSTD_CCtxParams_setZstdParams(ZSTD_CCtx_params_s* cctxParams, ZSTD_parameters* @params) + { + assert(ZSTD_checkCParams(@params->cParams) == 0); + cctxParams->cParams = @params->cParams; + cctxParams->fParams = @params->fParams; + cctxParams->compressionLevel = 0; + } + + /*! ZSTD_cParam_getBounds() : + * All parameters must belong to an interval with lower and upper bounds, + * otherwise they will either trigger an error or be automatically clamped. + * @return : a structure, ZSTD_bounds, which contains + * - an error status field, which must be tested using ZSTD_isError() + * - lower and upper bounds, both inclusive + */ + public static ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param) + { + ZSTD_bounds bounds = new ZSTD_bounds + { + error = 0, + lowerBound = 0, + upperBound = 0 + }; + switch (param) + { + case ZSTD_cParameter.ZSTD_c_compressionLevel: + bounds.lowerBound = ZSTD_minCLevel(); + bounds.upperBound = ZSTD_maxCLevel(); + return bounds; + case ZSTD_cParameter.ZSTD_c_windowLog: + bounds.lowerBound = 10; + bounds.upperBound = sizeof(nuint) == 4 ? 30 : 31; + return bounds; + case ZSTD_cParameter.ZSTD_c_hashLog: + bounds.lowerBound = 6; + bounds.upperBound = (sizeof(nuint) == 4 ? 30 : 31) < 30 ? sizeof(nuint) == 4 ? 30 : 31 : 30; + return bounds; + case ZSTD_cParameter.ZSTD_c_chainLog: + bounds.lowerBound = 6; + bounds.upperBound = sizeof(nuint) == 4 ? 29 : 30; + return bounds; + case ZSTD_cParameter.ZSTD_c_searchLog: + bounds.lowerBound = 1; + bounds.upperBound = (sizeof(nuint) == 4 ? 30 : 31) - 1; + return bounds; + case ZSTD_cParameter.ZSTD_c_minMatch: + bounds.lowerBound = 3; + bounds.upperBound = 7; + return bounds; + case ZSTD_cParameter.ZSTD_c_targetLength: + bounds.lowerBound = 0; + bounds.upperBound = 1 << 17; + return bounds; + case ZSTD_cParameter.ZSTD_c_strategy: + bounds.lowerBound = (int)ZSTD_strategy.ZSTD_fast; + bounds.upperBound = (int)ZSTD_strategy.ZSTD_btultra2; + return bounds; + case ZSTD_cParameter.ZSTD_c_contentSizeFlag: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + case ZSTD_cParameter.ZSTD_c_checksumFlag: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + case ZSTD_cParameter.ZSTD_c_dictIDFlag: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + case ZSTD_cParameter.ZSTD_c_nbWorkers: + bounds.lowerBound = 0; + bounds.upperBound = sizeof(void*) == 4 ? 64 : 256; + return bounds; + case ZSTD_cParameter.ZSTD_c_jobSize: + bounds.lowerBound = 0; + bounds.upperBound = MEM_32bits ? 512 * (1 << 20) : 1024 * (1 << 20); + return bounds; + case ZSTD_cParameter.ZSTD_c_overlapLog: + bounds.lowerBound = 0; + bounds.upperBound = 9; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam8: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + case ZSTD_cParameter.ZSTD_c_enableLongDistanceMatching: + bounds.lowerBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_disable; + return bounds; + case ZSTD_cParameter.ZSTD_c_ldmHashLog: + bounds.lowerBound = 6; + bounds.upperBound = (sizeof(nuint) == 4 ? 30 : 31) < 30 ? sizeof(nuint) == 4 ? 30 : 31 : 30; + return bounds; + case ZSTD_cParameter.ZSTD_c_ldmMinMatch: + bounds.lowerBound = 4; + bounds.upperBound = 4096; + return bounds; + case ZSTD_cParameter.ZSTD_c_ldmBucketSizeLog: + bounds.lowerBound = 1; + bounds.upperBound = 8; + return bounds; + case ZSTD_cParameter.ZSTD_c_ldmHashRateLog: + bounds.lowerBound = 0; + bounds.upperBound = (sizeof(nuint) == 4 ? 30 : 31) - 6; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam1: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam3: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam2: + bounds.lowerBound = (int)ZSTD_format_e.ZSTD_f_zstd1; + bounds.upperBound = (int)ZSTD_format_e.ZSTD_f_zstd1_magicless; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam4: + bounds.lowerBound = (int)ZSTD_dictAttachPref_e.ZSTD_dictDefaultAttach; + bounds.upperBound = (int)ZSTD_dictAttachPref_e.ZSTD_dictForceLoad; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam5: + bounds.lowerBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_disable; + return bounds; + case ZSTD_cParameter.ZSTD_c_targetCBlockSize: + bounds.lowerBound = 1340; + bounds.upperBound = 1 << 17; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam7: + bounds.lowerBound = 0; + bounds.upperBound = 2147483647; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam9: + case ZSTD_cParameter.ZSTD_c_experimentalParam10: + bounds.lowerBound = (int)ZSTD_bufferMode_e.ZSTD_bm_buffered; + bounds.upperBound = (int)ZSTD_bufferMode_e.ZSTD_bm_stable; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam11: + bounds.lowerBound = (int)ZSTD_sequenceFormat_e.ZSTD_sf_noBlockDelimiters; + bounds.upperBound = (int)ZSTD_sequenceFormat_e.ZSTD_sf_explicitBlockDelimiters; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam12: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam13: + bounds.lowerBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_disable; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam20: + bounds.lowerBound = 0; + bounds.upperBound = 6; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam14: + bounds.lowerBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_disable; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam15: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam16: + bounds.lowerBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_disable; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam17: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam18: + bounds.lowerBound = 1 << 10; + bounds.upperBound = 1 << 17; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam19: + bounds.lowerBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_disable; + return bounds; + default: + bounds.error = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported)); + return bounds; + } + } + + /* ZSTD_cParam_clampBounds: + * Clamps the value into the bounded range. + */ + private static nuint ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value) + { + ZSTD_bounds bounds = ZSTD_cParam_getBounds(cParam); + if (ERR_isError(bounds.error)) + return bounds.error; + if (*value < bounds.lowerBound) + *value = bounds.lowerBound; + if (*value > bounds.upperBound) + *value = bounds.upperBound; + return 0; + } + + private static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param) + { + switch (param) + { + case ZSTD_cParameter.ZSTD_c_compressionLevel: + case ZSTD_cParameter.ZSTD_c_hashLog: + case ZSTD_cParameter.ZSTD_c_chainLog: + case ZSTD_cParameter.ZSTD_c_searchLog: + case ZSTD_cParameter.ZSTD_c_minMatch: + case ZSTD_cParameter.ZSTD_c_targetLength: + case ZSTD_cParameter.ZSTD_c_strategy: + case ZSTD_cParameter.ZSTD_c_experimentalParam20: + return 1; + case ZSTD_cParameter.ZSTD_c_experimentalParam2: + case ZSTD_cParameter.ZSTD_c_windowLog: + case ZSTD_cParameter.ZSTD_c_contentSizeFlag: + case ZSTD_cParameter.ZSTD_c_checksumFlag: + case ZSTD_cParameter.ZSTD_c_dictIDFlag: + case ZSTD_cParameter.ZSTD_c_experimentalParam3: + case ZSTD_cParameter.ZSTD_c_nbWorkers: + case ZSTD_cParameter.ZSTD_c_jobSize: + case ZSTD_cParameter.ZSTD_c_overlapLog: + case ZSTD_cParameter.ZSTD_c_experimentalParam1: + case ZSTD_cParameter.ZSTD_c_experimentalParam8: + case ZSTD_cParameter.ZSTD_c_enableLongDistanceMatching: + case ZSTD_cParameter.ZSTD_c_ldmHashLog: + case ZSTD_cParameter.ZSTD_c_ldmMinMatch: + case ZSTD_cParameter.ZSTD_c_ldmBucketSizeLog: + case ZSTD_cParameter.ZSTD_c_ldmHashRateLog: + case ZSTD_cParameter.ZSTD_c_experimentalParam4: + case ZSTD_cParameter.ZSTD_c_experimentalParam5: + case ZSTD_cParameter.ZSTD_c_targetCBlockSize: + case ZSTD_cParameter.ZSTD_c_experimentalParam7: + case ZSTD_cParameter.ZSTD_c_experimentalParam9: + case ZSTD_cParameter.ZSTD_c_experimentalParam10: + case ZSTD_cParameter.ZSTD_c_experimentalParam11: + case ZSTD_cParameter.ZSTD_c_experimentalParam12: + case ZSTD_cParameter.ZSTD_c_experimentalParam13: + case ZSTD_cParameter.ZSTD_c_experimentalParam14: + case ZSTD_cParameter.ZSTD_c_experimentalParam15: + case ZSTD_cParameter.ZSTD_c_experimentalParam16: + case ZSTD_cParameter.ZSTD_c_experimentalParam17: + case ZSTD_cParameter.ZSTD_c_experimentalParam18: + case ZSTD_cParameter.ZSTD_c_experimentalParam19: + default: + return 0; + } + } + + /*! ZSTD_CCtx_setParameter() : + * Set one compression parameter, selected by enum ZSTD_cParameter. + * All parameters have valid bounds. Bounds can be queried using ZSTD_cParam_getBounds(). + * Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter). + * Setting a parameter is generally only possible during frame initialization (before starting compression). + * Exception : when using multi-threading mode (nbWorkers >= 1), + * the following parameters can be updated _during_ compression (within same frame): + * => compressionLevel, hashLog, chainLog, searchLog, minMatch, targetLength and strategy. + * new parameters will be active for next job only (after a flush()). + * @return : an error code (which can be tested using ZSTD_isError()). + */ + public static nuint ZSTD_CCtx_setParameter(ZSTD_CCtx_s* cctx, ZSTD_cParameter param, int value) + { + if (cctx->streamStage != ZSTD_cStreamStage.zcss_init) + { + if (ZSTD_isUpdateAuthorized(param) != 0) + { + cctx->cParamsChanged = 1; + } + else + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); + } + } + + switch (param) + { + case ZSTD_cParameter.ZSTD_c_nbWorkers: + if (value != 0 && cctx->staticSize != 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported)); + } + + break; + case ZSTD_cParameter.ZSTD_c_compressionLevel: + case ZSTD_cParameter.ZSTD_c_windowLog: + case ZSTD_cParameter.ZSTD_c_hashLog: + case ZSTD_cParameter.ZSTD_c_chainLog: + case ZSTD_cParameter.ZSTD_c_searchLog: + case ZSTD_cParameter.ZSTD_c_minMatch: + case ZSTD_cParameter.ZSTD_c_targetLength: + case ZSTD_cParameter.ZSTD_c_strategy: + case ZSTD_cParameter.ZSTD_c_ldmHashRateLog: + case ZSTD_cParameter.ZSTD_c_experimentalParam2: + case ZSTD_cParameter.ZSTD_c_contentSizeFlag: + case ZSTD_cParameter.ZSTD_c_checksumFlag: + case ZSTD_cParameter.ZSTD_c_dictIDFlag: + case ZSTD_cParameter.ZSTD_c_experimentalParam3: + case ZSTD_cParameter.ZSTD_c_experimentalParam4: + case ZSTD_cParameter.ZSTD_c_experimentalParam5: + case ZSTD_cParameter.ZSTD_c_jobSize: + case ZSTD_cParameter.ZSTD_c_overlapLog: + case ZSTD_cParameter.ZSTD_c_experimentalParam1: + case ZSTD_cParameter.ZSTD_c_experimentalParam8: + case ZSTD_cParameter.ZSTD_c_enableLongDistanceMatching: + case ZSTD_cParameter.ZSTD_c_ldmHashLog: + case ZSTD_cParameter.ZSTD_c_ldmMinMatch: + case ZSTD_cParameter.ZSTD_c_ldmBucketSizeLog: + case ZSTD_cParameter.ZSTD_c_targetCBlockSize: + case ZSTD_cParameter.ZSTD_c_experimentalParam7: + case ZSTD_cParameter.ZSTD_c_experimentalParam9: + case ZSTD_cParameter.ZSTD_c_experimentalParam10: + case ZSTD_cParameter.ZSTD_c_experimentalParam11: + case ZSTD_cParameter.ZSTD_c_experimentalParam12: + case ZSTD_cParameter.ZSTD_c_experimentalParam13: + case ZSTD_cParameter.ZSTD_c_experimentalParam20: + case ZSTD_cParameter.ZSTD_c_experimentalParam14: + case ZSTD_cParameter.ZSTD_c_experimentalParam15: + case ZSTD_cParameter.ZSTD_c_experimentalParam16: + case ZSTD_cParameter.ZSTD_c_experimentalParam17: + case ZSTD_cParameter.ZSTD_c_experimentalParam18: + case ZSTD_cParameter.ZSTD_c_experimentalParam19: + break; + default: + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported)); + } + + return ZSTD_CCtxParams_setParameter(&cctx->requestedParams, param, value); + } + + /*! ZSTD_CCtxParams_setParameter() : Requires v1.4.0+ + * Similar to ZSTD_CCtx_setParameter. + * Set one compression parameter, selected by enum ZSTD_cParameter. + * Parameters must be applied to a ZSTD_CCtx using + * ZSTD_CCtx_setParametersUsingCCtxParams(). + * @result : a code representing success or failure (which can be tested with + * ZSTD_isError()). + */ + public static nuint ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params_s* CCtxParams, ZSTD_cParameter param, int value) + { + switch (param) + { + case ZSTD_cParameter.ZSTD_c_experimentalParam2: + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam2, value) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + CCtxParams->format = (ZSTD_format_e)value; + return (nuint)CCtxParams->format; + case ZSTD_cParameter.ZSTD_c_compressionLevel: + { + { + nuint err_code = ZSTD_cParam_clampBounds(param, &value); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (value == 0) + CCtxParams->compressionLevel = 3; + else + CCtxParams->compressionLevel = value; + if (CCtxParams->compressionLevel >= 0) + return (nuint)CCtxParams->compressionLevel; + return 0; + } + + case ZSTD_cParameter.ZSTD_c_windowLog: + if (value != 0) + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_windowLog, value) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + CCtxParams->cParams.windowLog = (uint)value; + return CCtxParams->cParams.windowLog; + case ZSTD_cParameter.ZSTD_c_hashLog: + if (value != 0) + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_hashLog, value) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + CCtxParams->cParams.hashLog = (uint)value; + return CCtxParams->cParams.hashLog; + case ZSTD_cParameter.ZSTD_c_chainLog: + if (value != 0) + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_chainLog, value) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + CCtxParams->cParams.chainLog = (uint)value; + return CCtxParams->cParams.chainLog; + case ZSTD_cParameter.ZSTD_c_searchLog: + if (value != 0) + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_searchLog, value) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + CCtxParams->cParams.searchLog = (uint)value; + return (nuint)value; + case ZSTD_cParameter.ZSTD_c_minMatch: + if (value != 0) + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_minMatch, value) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + CCtxParams->cParams.minMatch = (uint)value; + return CCtxParams->cParams.minMatch; + case ZSTD_cParameter.ZSTD_c_targetLength: + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_targetLength, value) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + CCtxParams->cParams.targetLength = (uint)value; + return CCtxParams->cParams.targetLength; + case ZSTD_cParameter.ZSTD_c_strategy: + if (value != 0) + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_strategy, value) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + CCtxParams->cParams.strategy = (ZSTD_strategy)value; + return (nuint)CCtxParams->cParams.strategy; + case ZSTD_cParameter.ZSTD_c_contentSizeFlag: + CCtxParams->fParams.contentSizeFlag = value != 0 ? 1 : 0; + return (nuint)CCtxParams->fParams.contentSizeFlag; + case ZSTD_cParameter.ZSTD_c_checksumFlag: + CCtxParams->fParams.checksumFlag = value != 0 ? 1 : 0; + return (nuint)CCtxParams->fParams.checksumFlag; + case ZSTD_cParameter.ZSTD_c_dictIDFlag: + CCtxParams->fParams.noDictIDFlag = value == 0 ? 1 : 0; + return CCtxParams->fParams.noDictIDFlag == 0 ? 1U : 0U; + case ZSTD_cParameter.ZSTD_c_experimentalParam3: + CCtxParams->forceWindow = value != 0 ? 1 : 0; + return (nuint)CCtxParams->forceWindow; + case ZSTD_cParameter.ZSTD_c_experimentalParam4: + { + ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value; + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam4, (int)pref) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + CCtxParams->attachDictPref = pref; + return (nuint)CCtxParams->attachDictPref; + } + + case ZSTD_cParameter.ZSTD_c_experimentalParam5: + { + ZSTD_paramSwitch_e lcm = (ZSTD_paramSwitch_e)value; + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam5, (int)lcm) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + CCtxParams->literalCompressionMode = lcm; + return (nuint)CCtxParams->literalCompressionMode; + } + + case ZSTD_cParameter.ZSTD_c_nbWorkers: + { + nuint err_code = ZSTD_cParam_clampBounds(param, &value); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + CCtxParams->nbWorkers = value; + return (nuint)CCtxParams->nbWorkers; + case ZSTD_cParameter.ZSTD_c_jobSize: + if (value != 0 && value < 512 * (1 << 10)) + value = 512 * (1 << 10); + { + nuint err_code = ZSTD_cParam_clampBounds(param, &value); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + assert(value >= 0); + CCtxParams->jobSize = (nuint)value; + return CCtxParams->jobSize; + case ZSTD_cParameter.ZSTD_c_overlapLog: + { + nuint err_code = ZSTD_cParam_clampBounds(ZSTD_cParameter.ZSTD_c_overlapLog, &value); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + CCtxParams->overlapLog = value; + return (nuint)CCtxParams->overlapLog; + case ZSTD_cParameter.ZSTD_c_experimentalParam1: + { + nuint err_code = ZSTD_cParam_clampBounds(ZSTD_cParameter.ZSTD_c_overlapLog, &value); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + CCtxParams->rsyncable = value; + return (nuint)CCtxParams->rsyncable; + case ZSTD_cParameter.ZSTD_c_experimentalParam8: + CCtxParams->enableDedicatedDictSearch = value != 0 ? 1 : 0; + return (nuint)CCtxParams->enableDedicatedDictSearch; + case ZSTD_cParameter.ZSTD_c_enableLongDistanceMatching: + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_enableLongDistanceMatching, value) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + CCtxParams->ldmParams.enableLdm = (ZSTD_paramSwitch_e)value; + return (nuint)CCtxParams->ldmParams.enableLdm; + case ZSTD_cParameter.ZSTD_c_ldmHashLog: + if (value != 0) + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_ldmHashLog, value) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + CCtxParams->ldmParams.hashLog = (uint)value; + return CCtxParams->ldmParams.hashLog; + case ZSTD_cParameter.ZSTD_c_ldmMinMatch: + if (value != 0) + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_ldmMinMatch, value) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + CCtxParams->ldmParams.minMatchLength = (uint)value; + return CCtxParams->ldmParams.minMatchLength; + case ZSTD_cParameter.ZSTD_c_ldmBucketSizeLog: + if (value != 0) + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_ldmBucketSizeLog, value) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + CCtxParams->ldmParams.bucketSizeLog = (uint)value; + return CCtxParams->ldmParams.bucketSizeLog; + case ZSTD_cParameter.ZSTD_c_ldmHashRateLog: + if (value != 0) + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_ldmHashRateLog, value) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + CCtxParams->ldmParams.hashRateLog = (uint)value; + return CCtxParams->ldmParams.hashRateLog; + case ZSTD_cParameter.ZSTD_c_targetCBlockSize: + if (value != 0) + { + value = value > 1340 ? value : 1340; + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_targetCBlockSize, value) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + } + + CCtxParams->targetCBlockSize = (uint)value; + return CCtxParams->targetCBlockSize; + case ZSTD_cParameter.ZSTD_c_experimentalParam7: + if (value != 0) + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam7, value) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + CCtxParams->srcSizeHint = value; + return (nuint)CCtxParams->srcSizeHint; + case ZSTD_cParameter.ZSTD_c_experimentalParam9: + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam9, value) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + CCtxParams->inBufferMode = (ZSTD_bufferMode_e)value; + return (nuint)CCtxParams->inBufferMode; + case ZSTD_cParameter.ZSTD_c_experimentalParam10: + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam10, value) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + CCtxParams->outBufferMode = (ZSTD_bufferMode_e)value; + return (nuint)CCtxParams->outBufferMode; + case ZSTD_cParameter.ZSTD_c_experimentalParam11: + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam11, value) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + CCtxParams->blockDelimiters = (ZSTD_sequenceFormat_e)value; + return (nuint)CCtxParams->blockDelimiters; + case ZSTD_cParameter.ZSTD_c_experimentalParam12: + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam12, value) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + CCtxParams->validateSequences = value; + return (nuint)CCtxParams->validateSequences; + case ZSTD_cParameter.ZSTD_c_experimentalParam13: + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam13, value) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + CCtxParams->postBlockSplitter = (ZSTD_paramSwitch_e)value; + return (nuint)CCtxParams->postBlockSplitter; + case ZSTD_cParameter.ZSTD_c_experimentalParam20: + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam20, value) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + CCtxParams->preBlockSplitter_level = value; + return (nuint)CCtxParams->preBlockSplitter_level; + case ZSTD_cParameter.ZSTD_c_experimentalParam14: + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam14, value) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + CCtxParams->useRowMatchFinder = (ZSTD_paramSwitch_e)value; + return (nuint)CCtxParams->useRowMatchFinder; + case ZSTD_cParameter.ZSTD_c_experimentalParam15: + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam15, value) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + CCtxParams->deterministicRefPrefix = !(value == 0) ? 1 : 0; + return (nuint)CCtxParams->deterministicRefPrefix; + case ZSTD_cParameter.ZSTD_c_experimentalParam16: + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam16, value) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + CCtxParams->prefetchCDictTables = (ZSTD_paramSwitch_e)value; + return (nuint)CCtxParams->prefetchCDictTables; + case ZSTD_cParameter.ZSTD_c_experimentalParam17: + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam17, value) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + CCtxParams->enableMatchFinderFallback = value; + return (nuint)CCtxParams->enableMatchFinderFallback; + case ZSTD_cParameter.ZSTD_c_experimentalParam18: + if (value != 0) + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam18, value) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + assert(value >= 0); + CCtxParams->maxBlockSize = (nuint)value; + return CCtxParams->maxBlockSize; + case ZSTD_cParameter.ZSTD_c_experimentalParam19: + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam19, value) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + CCtxParams->searchForExternalRepcodes = (ZSTD_paramSwitch_e)value; + return (nuint)CCtxParams->searchForExternalRepcodes; + default: + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported)); + } + } + + /*! ZSTD_CCtx_getParameter() : + * Get the requested compression parameter value, selected by enum ZSTD_cParameter, + * and store it into int* value. + * @return : 0, or an error code (which can be tested with ZSTD_isError()). + */ + public static nuint ZSTD_CCtx_getParameter(ZSTD_CCtx_s* cctx, ZSTD_cParameter param, int* value) + { + return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value); + } + + /*! ZSTD_CCtxParams_getParameter() : + * Similar to ZSTD_CCtx_getParameter. + * Get the requested value of one compression parameter, selected by enum ZSTD_cParameter. + * @result : 0, or an error code (which can be tested with ZSTD_isError()). + */ + public static nuint ZSTD_CCtxParams_getParameter(ZSTD_CCtx_params_s* CCtxParams, ZSTD_cParameter param, int* value) + { + switch (param) + { + case ZSTD_cParameter.ZSTD_c_experimentalParam2: + *value = (int)CCtxParams->format; + break; + case ZSTD_cParameter.ZSTD_c_compressionLevel: + *value = CCtxParams->compressionLevel; + break; + case ZSTD_cParameter.ZSTD_c_windowLog: + *value = (int)CCtxParams->cParams.windowLog; + break; + case ZSTD_cParameter.ZSTD_c_hashLog: + *value = (int)CCtxParams->cParams.hashLog; + break; + case ZSTD_cParameter.ZSTD_c_chainLog: + *value = (int)CCtxParams->cParams.chainLog; + break; + case ZSTD_cParameter.ZSTD_c_searchLog: + *value = (int)CCtxParams->cParams.searchLog; + break; + case ZSTD_cParameter.ZSTD_c_minMatch: + *value = (int)CCtxParams->cParams.minMatch; + break; + case ZSTD_cParameter.ZSTD_c_targetLength: + *value = (int)CCtxParams->cParams.targetLength; + break; + case ZSTD_cParameter.ZSTD_c_strategy: + *value = (int)CCtxParams->cParams.strategy; + break; + case ZSTD_cParameter.ZSTD_c_contentSizeFlag: + *value = CCtxParams->fParams.contentSizeFlag; + break; + case ZSTD_cParameter.ZSTD_c_checksumFlag: + *value = CCtxParams->fParams.checksumFlag; + break; + case ZSTD_cParameter.ZSTD_c_dictIDFlag: + *value = CCtxParams->fParams.noDictIDFlag == 0 ? 1 : 0; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam3: + *value = CCtxParams->forceWindow; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam4: + *value = (int)CCtxParams->attachDictPref; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam5: + *value = (int)CCtxParams->literalCompressionMode; + break; + case ZSTD_cParameter.ZSTD_c_nbWorkers: + *value = CCtxParams->nbWorkers; + break; + case ZSTD_cParameter.ZSTD_c_jobSize: + assert(CCtxParams->jobSize <= 2147483647); + *value = (int)CCtxParams->jobSize; + break; + case ZSTD_cParameter.ZSTD_c_overlapLog: + *value = CCtxParams->overlapLog; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam1: + *value = CCtxParams->rsyncable; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam8: + *value = CCtxParams->enableDedicatedDictSearch; + break; + case ZSTD_cParameter.ZSTD_c_enableLongDistanceMatching: + *value = (int)CCtxParams->ldmParams.enableLdm; + break; + case ZSTD_cParameter.ZSTD_c_ldmHashLog: + *value = (int)CCtxParams->ldmParams.hashLog; + break; + case ZSTD_cParameter.ZSTD_c_ldmMinMatch: + *value = (int)CCtxParams->ldmParams.minMatchLength; + break; + case ZSTD_cParameter.ZSTD_c_ldmBucketSizeLog: + *value = (int)CCtxParams->ldmParams.bucketSizeLog; + break; + case ZSTD_cParameter.ZSTD_c_ldmHashRateLog: + *value = (int)CCtxParams->ldmParams.hashRateLog; + break; + case ZSTD_cParameter.ZSTD_c_targetCBlockSize: + *value = (int)CCtxParams->targetCBlockSize; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam7: + *value = CCtxParams->srcSizeHint; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam9: + *value = (int)CCtxParams->inBufferMode; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam10: + *value = (int)CCtxParams->outBufferMode; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam11: + *value = (int)CCtxParams->blockDelimiters; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam12: + *value = CCtxParams->validateSequences; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam13: + *value = (int)CCtxParams->postBlockSplitter; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam20: + *value = CCtxParams->preBlockSplitter_level; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam14: + *value = (int)CCtxParams->useRowMatchFinder; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam15: + *value = CCtxParams->deterministicRefPrefix; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam16: + *value = (int)CCtxParams->prefetchCDictTables; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam17: + *value = CCtxParams->enableMatchFinderFallback; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam18: + *value = (int)CCtxParams->maxBlockSize; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam19: + *value = (int)CCtxParams->searchForExternalRepcodes; + break; + default: + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported)); + } + + return 0; + } + + /** ZSTD_CCtx_setParametersUsingCCtxParams() : + * just applies `params` into `cctx` + * no action is performed, parameters are merely stored. + * If ZSTDMT is enabled, parameters are pushed to cctx->mtctx. + * This is possible even if a compression is ongoing. + * In which case, new parameters will be applied on the fly, starting with next compression job. + */ + public static nuint ZSTD_CCtx_setParametersUsingCCtxParams(ZSTD_CCtx_s* cctx, ZSTD_CCtx_params_s* @params) + { + if (cctx->streamStage != ZSTD_cStreamStage.zcss_init) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); + } + + if (cctx->cdict != null) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); + } + + cctx->requestedParams = *@params; + return 0; + } + + /*! ZSTD_CCtx_setCParams() : + * Set all parameters provided within @p cparams into the working @p cctx. + * Note : if modifying parameters during compression (MT mode only), + * note that changes to the .windowLog parameter will be ignored. + * @return 0 on success, or an error code (can be checked with ZSTD_isError()). + * On failure, no parameters are updated. + */ + public static nuint ZSTD_CCtx_setCParams(ZSTD_CCtx_s* cctx, ZSTD_compressionParameters cparams) + { + { + /* only update if all parameters are valid */ + nuint err_code = ZSTD_checkCParams(cparams); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + { + nuint err_code = ZSTD_CCtx_setParameter(cctx, ZSTD_cParameter.ZSTD_c_windowLog, (int)cparams.windowLog); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + { + nuint err_code = ZSTD_CCtx_setParameter(cctx, ZSTD_cParameter.ZSTD_c_chainLog, (int)cparams.chainLog); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + { + nuint err_code = ZSTD_CCtx_setParameter(cctx, ZSTD_cParameter.ZSTD_c_hashLog, (int)cparams.hashLog); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + { + nuint err_code = ZSTD_CCtx_setParameter(cctx, ZSTD_cParameter.ZSTD_c_searchLog, (int)cparams.searchLog); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + { + nuint err_code = ZSTD_CCtx_setParameter(cctx, ZSTD_cParameter.ZSTD_c_minMatch, (int)cparams.minMatch); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + { + nuint err_code = ZSTD_CCtx_setParameter(cctx, ZSTD_cParameter.ZSTD_c_targetLength, (int)cparams.targetLength); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + { + nuint err_code = ZSTD_CCtx_setParameter(cctx, ZSTD_cParameter.ZSTD_c_strategy, (int)cparams.strategy); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + return 0; + } + + /*! ZSTD_CCtx_setFParams() : + * Set all parameters provided within @p fparams into the working @p cctx. + * @return 0 on success, or an error code (can be checked with ZSTD_isError()). + */ + public static nuint ZSTD_CCtx_setFParams(ZSTD_CCtx_s* cctx, ZSTD_frameParameters fparams) + { + { + nuint err_code = ZSTD_CCtx_setParameter(cctx, ZSTD_cParameter.ZSTD_c_contentSizeFlag, fparams.contentSizeFlag != 0 ? 1 : 0); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + { + nuint err_code = ZSTD_CCtx_setParameter(cctx, ZSTD_cParameter.ZSTD_c_checksumFlag, fparams.checksumFlag != 0 ? 1 : 0); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + { + nuint err_code = ZSTD_CCtx_setParameter(cctx, ZSTD_cParameter.ZSTD_c_dictIDFlag, fparams.noDictIDFlag == 0 ? 1 : 0); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + return 0; + } + + /*! ZSTD_CCtx_setParams() : + * Set all parameters provided within @p params into the working @p cctx. + * @return 0 on success, or an error code (can be checked with ZSTD_isError()). + */ + public static nuint ZSTD_CCtx_setParams(ZSTD_CCtx_s* cctx, ZSTD_parameters @params) + { + { + /* First check cParams, because we want to update all or none. */ + nuint err_code = ZSTD_checkCParams(@params.cParams); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + { + /* Next set fParams, because this could fail if the cctx isn't in init stage. */ + nuint err_code = ZSTD_CCtx_setFParams(cctx, @params.fParams); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + { + /* Finally set cParams, which should succeed. */ + nuint err_code = ZSTD_CCtx_setCParams(cctx, @params.cParams); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + return 0; + } + + /*! ZSTD_CCtx_setPledgedSrcSize() : + * Total input data size to be compressed as a single frame. + * Value will be written in frame header, unless if explicitly forbidden using ZSTD_c_contentSizeFlag. + * This value will also be controlled at end of frame, and trigger an error if not respected. + * @result : 0, or an error code (which can be tested with ZSTD_isError()). + * Note 1 : pledgedSrcSize==0 actually means zero, aka an empty frame. + * In order to mean "unknown content size", pass constant ZSTD_CONTENTSIZE_UNKNOWN. + * ZSTD_CONTENTSIZE_UNKNOWN is default value for any new frame. + * Note 2 : pledgedSrcSize is only valid once, for the next frame. + * It's discarded at the end of the frame, and replaced by ZSTD_CONTENTSIZE_UNKNOWN. + * Note 3 : Whenever all input data is provided and consumed in a single round, + * for example with ZSTD_compress2(), + * or invoking immediately ZSTD_compressStream2(,,,ZSTD_e_end), + * this value is automatically overridden by srcSize instead. + */ + public static nuint ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx_s* cctx, ulong pledgedSrcSize) + { + if (cctx->streamStage != ZSTD_cStreamStage.zcss_init) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); + } + + cctx->pledgedSrcSizePlusOne = pledgedSrcSize + 1; + return 0; + } + + /** + * Initializes the local dictionary using requested parameters. + * NOTE: Initialization does not employ the pledged src size, + * because the dictionary may be used for multiple compressions. + */ + private static nuint ZSTD_initLocalDict(ZSTD_CCtx_s* cctx) + { + ZSTD_localDict* dl = &cctx->localDict; + if (dl->dict == null) + { + assert(dl->dictBuffer == null); + assert(dl->cdict == null); + assert(dl->dictSize == 0); + return 0; + } + + if (dl->cdict != null) + { + assert(cctx->cdict == dl->cdict); + return 0; + } + + assert(dl->dictSize > 0); + assert(cctx->cdict == null); + assert(cctx->prefixDict.dict == null); + dl->cdict = ZSTD_createCDict_advanced2(dl->dict, dl->dictSize, ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, dl->dictContentType, &cctx->requestedParams, cctx->customMem); + if (dl->cdict == null) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } + + cctx->cdict = dl->cdict; + return 0; + } + + /*! ZSTD_CCtx_loadDictionary_advanced() : + * Same as ZSTD_CCtx_loadDictionary(), but gives finer control over + * how to load the dictionary (by copy ? by reference ?) + * and how to interpret it (automatic ? force raw mode ? full mode only ?) */ + public static nuint ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx_s* cctx, void* dict, nuint dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType) + { + if (cctx->streamStage != ZSTD_cStreamStage.zcss_init) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); + } + + ZSTD_clearAllDicts(cctx); + if (dict == null || dictSize == 0) + return 0; + if (dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef) + { + cctx->localDict.dict = dict; + } + else + { + /* copy dictionary content inside CCtx to own its lifetime */ + void* dictBuffer; + if (cctx->staticSize != 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } + + dictBuffer = ZSTD_customMalloc(dictSize, cctx->customMem); + if (dictBuffer == null) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } + + memcpy(dictBuffer, dict, (uint)dictSize); + cctx->localDict.dictBuffer = dictBuffer; + cctx->localDict.dict = dictBuffer; + } + + cctx->localDict.dictSize = dictSize; + cctx->localDict.dictContentType = dictContentType; + return 0; + } + + /*! ZSTD_CCtx_loadDictionary_byReference() : + * Same as ZSTD_CCtx_loadDictionary(), but dictionary content is referenced, instead of being copied into CCtx. + * It saves some memory, but also requires that `dict` outlives its usage within `cctx` */ + public static nuint ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx_s* cctx, void* dict, nuint dictSize) + { + return ZSTD_CCtx_loadDictionary_advanced(cctx, dict, dictSize, ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, ZSTD_dictContentType_e.ZSTD_dct_auto); + } + + /*! ZSTD_CCtx_loadDictionary() : Requires v1.4.0+ + * Create an internal CDict from `dict` buffer. + * Decompression will have to use same dictionary. + * @result : 0, or an error code (which can be tested with ZSTD_isError()). + * Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary, + * meaning "return to no-dictionary mode". + * Note 1 : Dictionary is sticky, it will be used for all future compressed frames, + * until parameters are reset, a new dictionary is loaded, or the dictionary + * is explicitly invalidated by loading a NULL dictionary. + * Note 2 : Loading a dictionary involves building tables. + * It's also a CPU consuming operation, with non-negligible impact on latency. + * Tables are dependent on compression parameters, and for this reason, + * compression parameters can no longer be changed after loading a dictionary. + * Note 3 :`dict` content will be copied internally. + * Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead. + * In such a case, dictionary buffer must outlive its users. + * Note 4 : Use ZSTD_CCtx_loadDictionary_advanced() + * to precisely select how dictionary content must be interpreted. + * Note 5 : This method does not benefit from LDM (long distance mode). + * If you want to employ LDM on some large dictionary content, + * prefer employing ZSTD_CCtx_refPrefix() described below. + */ + public static nuint ZSTD_CCtx_loadDictionary(ZSTD_CCtx_s* cctx, void* dict, nuint dictSize) + { + return ZSTD_CCtx_loadDictionary_advanced(cctx, dict, dictSize, ZSTD_dictLoadMethod_e.ZSTD_dlm_byCopy, ZSTD_dictContentType_e.ZSTD_dct_auto); + } + + /*! ZSTD_CCtx_refCDict() : Requires v1.4.0+ + * Reference a prepared dictionary, to be used for all future compressed frames. + * Note that compression parameters are enforced from within CDict, + * and supersede any compression parameter previously set within CCtx. + * The parameters ignored are labelled as "superseded-by-cdict" in the ZSTD_cParameter enum docs. + * The ignored parameters will be used again if the CCtx is returned to no-dictionary mode. + * The dictionary will remain valid for future compressed frames using same CCtx. + * @result : 0, or an error code (which can be tested with ZSTD_isError()). + * Special : Referencing a NULL CDict means "return to no-dictionary mode". + * Note 1 : Currently, only one dictionary can be managed. + * Referencing a new dictionary effectively "discards" any previous one. + * Note 2 : CDict is just referenced, its lifetime must outlive its usage within CCtx. */ + public static nuint ZSTD_CCtx_refCDict(ZSTD_CCtx_s* cctx, ZSTD_CDict_s* cdict) + { + if (cctx->streamStage != ZSTD_cStreamStage.zcss_init) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); + } + + ZSTD_clearAllDicts(cctx); + cctx->cdict = cdict; + return 0; + } + + public static nuint ZSTD_CCtx_refThreadPool(ZSTD_CCtx_s* cctx, void* pool) + { + if (cctx->streamStage != ZSTD_cStreamStage.zcss_init) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); + } + + cctx->pool = pool; + return 0; + } + + /*! ZSTD_CCtx_refPrefix() : Requires v1.4.0+ + * Reference a prefix (single-usage dictionary) for next compressed frame. + * A prefix is **only used once**. Tables are discarded at end of frame (ZSTD_e_end). + * Decompression will need same prefix to properly regenerate data. + * Compressing with a prefix is similar in outcome as performing a diff and compressing it, + * but performs much faster, especially during decompression (compression speed is tunable with compression level). + * This method is compatible with LDM (long distance mode). + * @result : 0, or an error code (which can be tested with ZSTD_isError()). + * Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary + * Note 1 : Prefix buffer is referenced. It **must** outlive compression. + * Its content must remain unmodified during compression. + * Note 2 : If the intention is to diff some large src data blob with some prior version of itself, + * ensure that the window size is large enough to contain the entire source. + * See ZSTD_c_windowLog. + * Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters. + * It's a CPU consuming operation, with non-negligible impact on latency. + * If there is a need to use the same prefix multiple times, consider loadDictionary instead. + * Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dct_rawContent). + * Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation. */ + public static nuint ZSTD_CCtx_refPrefix(ZSTD_CCtx_s* cctx, void* prefix, nuint prefixSize) + { + return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dictContentType_e.ZSTD_dct_rawContent); + } + + /*! ZSTD_CCtx_refPrefix_advanced() : + * Same as ZSTD_CCtx_refPrefix(), but gives finer control over + * how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */ + public static nuint ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx_s* cctx, void* prefix, nuint prefixSize, ZSTD_dictContentType_e dictContentType) + { + if (cctx->streamStage != ZSTD_cStreamStage.zcss_init) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); + } + + ZSTD_clearAllDicts(cctx); + if (prefix != null && prefixSize > 0) + { + cctx->prefixDict.dict = prefix; + cctx->prefixDict.dictSize = prefixSize; + cctx->prefixDict.dictContentType = dictContentType; + } + + return 0; + } + + /*! ZSTD_CCtx_reset() : + * Also dumps dictionary */ + public static nuint ZSTD_CCtx_reset(ZSTD_CCtx_s* cctx, ZSTD_ResetDirective reset) + { + if (reset == ZSTD_ResetDirective.ZSTD_reset_session_only || reset == ZSTD_ResetDirective.ZSTD_reset_session_and_parameters) + { + cctx->streamStage = ZSTD_cStreamStage.zcss_init; + cctx->pledgedSrcSizePlusOne = 0; + } + + if (reset == ZSTD_ResetDirective.ZSTD_reset_parameters || reset == ZSTD_ResetDirective.ZSTD_reset_session_and_parameters) + { + if (cctx->streamStage != ZSTD_cStreamStage.zcss_init) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); + } + + ZSTD_clearAllDicts(cctx); + return ZSTD_CCtxParams_reset(&cctx->requestedParams); + } + + return 0; + } + + /** ZSTD_checkCParams() : + control CParam values remain within authorized range. + @return : 0, or an error code if one value is beyond authorized range */ + public static nuint ZSTD_checkCParams(ZSTD_compressionParameters cParams) + { + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_windowLog, (int)cParams.windowLog) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_chainLog, (int)cParams.chainLog) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_hashLog, (int)cParams.hashLog) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_searchLog, (int)cParams.searchLog) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_minMatch, (int)cParams.minMatch) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_targetLength, (int)cParams.targetLength) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_strategy, (int)cParams.strategy) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + return 0; + } + + /** ZSTD_clampCParams() : + * make CParam values within valid range. + * @return : valid CParams */ + private static ZSTD_compressionParameters ZSTD_clampCParams(ZSTD_compressionParameters cParams) + { + { + ZSTD_bounds bounds = ZSTD_cParam_getBounds(ZSTD_cParameter.ZSTD_c_windowLog); + if ((int)cParams.windowLog < bounds.lowerBound) + cParams.windowLog = (uint)bounds.lowerBound; + else if ((int)cParams.windowLog > bounds.upperBound) + cParams.windowLog = (uint)bounds.upperBound; + } + + { + ZSTD_bounds bounds = ZSTD_cParam_getBounds(ZSTD_cParameter.ZSTD_c_chainLog); + if ((int)cParams.chainLog < bounds.lowerBound) + cParams.chainLog = (uint)bounds.lowerBound; + else if ((int)cParams.chainLog > bounds.upperBound) + cParams.chainLog = (uint)bounds.upperBound; + } + + { + ZSTD_bounds bounds = ZSTD_cParam_getBounds(ZSTD_cParameter.ZSTD_c_hashLog); + if ((int)cParams.hashLog < bounds.lowerBound) + cParams.hashLog = (uint)bounds.lowerBound; + else if ((int)cParams.hashLog > bounds.upperBound) + cParams.hashLog = (uint)bounds.upperBound; + } + + { + ZSTD_bounds bounds = ZSTD_cParam_getBounds(ZSTD_cParameter.ZSTD_c_searchLog); + if ((int)cParams.searchLog < bounds.lowerBound) + cParams.searchLog = (uint)bounds.lowerBound; + else if ((int)cParams.searchLog > bounds.upperBound) + cParams.searchLog = (uint)bounds.upperBound; + } + + { + ZSTD_bounds bounds = ZSTD_cParam_getBounds(ZSTD_cParameter.ZSTD_c_minMatch); + if ((int)cParams.minMatch < bounds.lowerBound) + cParams.minMatch = (uint)bounds.lowerBound; + else if ((int)cParams.minMatch > bounds.upperBound) + cParams.minMatch = (uint)bounds.upperBound; + } + + { + ZSTD_bounds bounds = ZSTD_cParam_getBounds(ZSTD_cParameter.ZSTD_c_targetLength); + if ((int)cParams.targetLength < bounds.lowerBound) + cParams.targetLength = (uint)bounds.lowerBound; + else if ((int)cParams.targetLength > bounds.upperBound) + cParams.targetLength = (uint)bounds.upperBound; + } + + { + ZSTD_bounds bounds = ZSTD_cParam_getBounds(ZSTD_cParameter.ZSTD_c_strategy); + if ((int)cParams.strategy < bounds.lowerBound) + cParams.strategy = (ZSTD_strategy)bounds.lowerBound; + else if ((int)cParams.strategy > bounds.upperBound) + cParams.strategy = (ZSTD_strategy)bounds.upperBound; + } + + return cParams; + } + + /** ZSTD_cycleLog() : + * condition for correct operation : hashLog > 1 */ + private static uint ZSTD_cycleLog(uint hashLog, ZSTD_strategy strat) + { + uint btScale = (uint)strat >= (uint)ZSTD_strategy.ZSTD_btlazy2 ? 1U : 0U; + return hashLog - btScale; + } + + /** ZSTD_dictAndWindowLog() : + * Returns an adjusted window log that is large enough to fit the source and the dictionary. + * The zstd format says that the entire dictionary is valid if one byte of the dictionary + * is within the window. So the hashLog and chainLog should be large enough to reference both + * the dictionary and the window. So we must use this adjusted dictAndWindowLog when downsizing + * the hashLog and windowLog. + * NOTE: srcSize must not be ZSTD_CONTENTSIZE_UNKNOWN. + */ + private static uint ZSTD_dictAndWindowLog(uint windowLog, ulong srcSize, ulong dictSize) + { + ulong maxWindowSize = 1UL << (sizeof(nuint) == 4 ? 30 : 31); + if (dictSize == 0) + { + return windowLog; + } + + assert(windowLog <= (uint)(sizeof(nuint) == 4 ? 30 : 31)); + assert(srcSize != unchecked(0UL - 1)); + { + ulong windowSize = 1UL << (int)windowLog; + ulong dictAndWindowSize = dictSize + windowSize; + if (windowSize >= dictSize + srcSize) + { + return windowLog; + } + else if (dictAndWindowSize >= maxWindowSize) + { + return (uint)(sizeof(nuint) == 4 ? 30 : 31); + } + else + { + return ZSTD_highbit32((uint)dictAndWindowSize - 1) + 1; + } + } + } + + /** ZSTD_adjustCParams_internal() : + * optimize `cPar` for a specified input (`srcSize` and `dictSize`). + * mostly downsize to reduce memory consumption and initialization latency. + * `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known. + * `mode` is the mode for parameter adjustment. See docs for `ZSTD_CParamMode_e`. + * note : `srcSize==0` means 0! + * condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */ + private static ZSTD_compressionParameters ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar, ulong srcSize, nuint dictSize, ZSTD_CParamMode_e mode, ZSTD_paramSwitch_e useRowMatchFinder) + { + /* (1<<9) + 1 */ + const ulong minSrcSize = 513; + ulong maxWindowResize = 1UL << (sizeof(nuint) == 4 ? 30 : 31) - 1; + assert(ZSTD_checkCParams(cPar) == 0); + switch (mode) + { + case ZSTD_CParamMode_e.ZSTD_cpm_unknown: + case ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict: + break; + case ZSTD_CParamMode_e.ZSTD_cpm_createCDict: + if (dictSize != 0 && srcSize == unchecked(0UL - 1)) + srcSize = minSrcSize; + break; + case ZSTD_CParamMode_e.ZSTD_cpm_attachDict: + dictSize = 0; + break; + default: + assert(0 != 0); + break; + } + + if (srcSize <= maxWindowResize && dictSize <= maxWindowResize) + { + uint tSize = (uint)(srcSize + dictSize); + const uint hashSizeMin = 1 << 6; + uint srcLog = tSize < hashSizeMin ? 6 : ZSTD_highbit32(tSize - 1) + 1; + if (cPar.windowLog > srcLog) + cPar.windowLog = srcLog; + } + + if (srcSize != unchecked(0UL - 1)) + { + uint dictAndWindowLog = ZSTD_dictAndWindowLog(cPar.windowLog, srcSize, dictSize); + uint cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy); + if (cPar.hashLog > dictAndWindowLog + 1) + cPar.hashLog = dictAndWindowLog + 1; + if (cycleLog > dictAndWindowLog) + cPar.chainLog -= cycleLog - dictAndWindowLog; + } + + if (cPar.windowLog < 10) + cPar.windowLog = 10; + if (mode == ZSTD_CParamMode_e.ZSTD_cpm_createCDict && ZSTD_CDictIndicesAreTagged(&cPar) != 0) + { + const uint maxShortCacheHashLog = 32 - 8; + if (cPar.hashLog > maxShortCacheHashLog) + { + cPar.hashLog = maxShortCacheHashLog; + } + + if (cPar.chainLog > maxShortCacheHashLog) + { + cPar.chainLog = maxShortCacheHashLog; + } + } + + if (useRowMatchFinder == ZSTD_paramSwitch_e.ZSTD_ps_auto) + useRowMatchFinder = ZSTD_paramSwitch_e.ZSTD_ps_enable; + if (ZSTD_rowMatchFinderUsed(cPar.strategy, useRowMatchFinder) != 0) + { + /* Switch to 32-entry rows if searchLog is 5 (or more) */ + uint rowLog = cPar.searchLog <= 4 ? 4 : cPar.searchLog <= 6 ? cPar.searchLog : 6; + const uint maxRowHashLog = 32 - 8; + uint maxHashLog = maxRowHashLog + rowLog; + assert(cPar.hashLog >= rowLog); + if (cPar.hashLog > maxHashLog) + { + cPar.hashLog = maxHashLog; + } + } + + return cPar; + } + + /*! ZSTD_adjustCParams() : + * optimize params for a given `srcSize` and `dictSize`. + * `srcSize` can be unknown, in which case use ZSTD_CONTENTSIZE_UNKNOWN. + * `dictSize` must be `0` when there is no dictionary. + * cPar can be invalid : all parameters will be clamped within valid range in the @return struct. + * This function never fails (wide contract) */ + public static ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, ulong srcSize, nuint dictSize) + { + cPar = ZSTD_clampCParams(cPar); + if (srcSize == 0) + srcSize = unchecked(0UL - 1); + return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_CParamMode_e.ZSTD_cpm_unknown, ZSTD_paramSwitch_e.ZSTD_ps_auto); + } + + private static void ZSTD_overrideCParams(ZSTD_compressionParameters* cParams, ZSTD_compressionParameters* overrides) + { + if (overrides->windowLog != 0) + cParams->windowLog = overrides->windowLog; + if (overrides->hashLog != 0) + cParams->hashLog = overrides->hashLog; + if (overrides->chainLog != 0) + cParams->chainLog = overrides->chainLog; + if (overrides->searchLog != 0) + cParams->searchLog = overrides->searchLog; + if (overrides->minMatch != 0) + cParams->minMatch = overrides->minMatch; + if (overrides->targetLength != 0) + cParams->targetLength = overrides->targetLength; + if (overrides->strategy != default) + cParams->strategy = overrides->strategy; + } + + /* ZSTD_getCParamsFromCCtxParams() : + * cParams are built depending on compressionLevel, src size hints, + * LDM and manually set compression parameters. + * Note: srcSizeHint == 0 means 0! + */ + private static ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(ZSTD_CCtx_params_s* CCtxParams, ulong srcSizeHint, nuint dictSize, ZSTD_CParamMode_e mode) + { + ZSTD_compressionParameters cParams; + if (srcSizeHint == unchecked(0UL - 1) && CCtxParams->srcSizeHint > 0) + { + assert(CCtxParams->srcSizeHint >= 0); + srcSizeHint = (ulong)CCtxParams->srcSizeHint; + } + + cParams = ZSTD_getCParams_internal(CCtxParams->compressionLevel, srcSizeHint, dictSize, mode); + if (CCtxParams->ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) + cParams.windowLog = 27; + ZSTD_overrideCParams(&cParams, &CCtxParams->cParams); + assert(ZSTD_checkCParams(cParams) == 0); + return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize, mode, CCtxParams->useRowMatchFinder); + } + + private static nuint ZSTD_sizeof_matchState(ZSTD_compressionParameters* cParams, ZSTD_paramSwitch_e useRowMatchFinder, int enableDedicatedDictSearch, uint forCCtx) + { + /* chain table size should be 0 for fast or row-hash strategies */ + nuint chainSize = ZSTD_allocateChainTable(cParams->strategy, useRowMatchFinder, enableDedicatedDictSearch != 0 && forCCtx == 0 ? 1U : 0U) != 0 ? (nuint)1 << (int)cParams->chainLog : 0; + nuint hSize = (nuint)1 << (int)cParams->hashLog; + uint hashLog3 = forCCtx != 0 && cParams->minMatch == 3 ? 17 < cParams->windowLog ? 17 : cParams->windowLog : 0; + nuint h3Size = hashLog3 != 0 ? (nuint)1 << (int)hashLog3 : 0; + /* We don't use ZSTD_cwksp_alloc_size() here because the tables aren't + * surrounded by redzones in ASAN. */ + nuint tableSpace = chainSize * sizeof(uint) + hSize * sizeof(uint) + h3Size * sizeof(uint); + nuint optPotentialSpace = ZSTD_cwksp_aligned64_alloc_size((52 + 1) * sizeof(uint)) + ZSTD_cwksp_aligned64_alloc_size((35 + 1) * sizeof(uint)) + ZSTD_cwksp_aligned64_alloc_size((31 + 1) * sizeof(uint)) + ZSTD_cwksp_aligned64_alloc_size((1 << 8) * sizeof(uint)) + ZSTD_cwksp_aligned64_alloc_size((nuint)(((1 << 12) + 3) * sizeof(ZSTD_match_t))) + ZSTD_cwksp_aligned64_alloc_size((nuint)(((1 << 12) + 3) * sizeof(ZSTD_optimal_t))); + nuint lazyAdditionalSpace = ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder) != 0 ? ZSTD_cwksp_aligned64_alloc_size(hSize) : 0; + nuint optSpace = forCCtx != 0 && cParams->strategy >= ZSTD_strategy.ZSTD_btopt ? optPotentialSpace : 0; + nuint slackSpace = ZSTD_cwksp_slack_space_required(); + assert(useRowMatchFinder != ZSTD_paramSwitch_e.ZSTD_ps_auto); + return tableSpace + optSpace + slackSpace + lazyAdditionalSpace; + } + + /* Helper function for calculating memory requirements. + * Gives a tighter bound than ZSTD_sequenceBound() by taking minMatch into account. */ + private static nuint ZSTD_maxNbSeq(nuint blockSize, uint minMatch, int useSequenceProducer) + { + uint divider = (uint)(minMatch == 3 || useSequenceProducer != 0 ? 3 : 4); + return blockSize / divider; + } + + private static nuint ZSTD_estimateCCtxSize_usingCCtxParams_internal(ZSTD_compressionParameters* cParams, ldmParams_t* ldmParams, int isStatic, ZSTD_paramSwitch_e useRowMatchFinder, nuint buffInSize, nuint buffOutSize, ulong pledgedSrcSize, int useSequenceProducer, nuint maxBlockSize) + { + nuint windowSize = (nuint)(1UL << (int)cParams->windowLog <= 1UL ? 1UL : 1UL << (int)cParams->windowLog <= pledgedSrcSize ? 1UL << (int)cParams->windowLog : pledgedSrcSize); + nuint blockSize = ZSTD_resolveMaxBlockSize(maxBlockSize) < windowSize ? ZSTD_resolveMaxBlockSize(maxBlockSize) : windowSize; + nuint maxNbSeq = ZSTD_maxNbSeq(blockSize, cParams->minMatch, useSequenceProducer); + nuint tokenSpace = ZSTD_cwksp_alloc_size(32 + blockSize) + ZSTD_cwksp_aligned64_alloc_size(maxNbSeq * (nuint)sizeof(SeqDef_s)) + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(byte)); + nuint tmpWorkSpace = ZSTD_cwksp_alloc_size((8 << 10) + 512 + sizeof(uint) * (52 + 2) > 8208 ? (8 << 10) + 512 + sizeof(uint) * (52 + 2) : 8208); + nuint blockStateSpace = 2 * ZSTD_cwksp_alloc_size((nuint)sizeof(ZSTD_compressedBlockState_t)); + /* enableDedicatedDictSearch */ + nuint matchStateSize = ZSTD_sizeof_matchState(cParams, useRowMatchFinder, 0, 1); + nuint ldmSpace = ZSTD_ldm_getTableSize(*ldmParams); + nuint maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(*ldmParams, blockSize); + nuint ldmSeqSpace = ldmParams->enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable ? ZSTD_cwksp_aligned64_alloc_size(maxNbLdmSeq * (nuint)sizeof(rawSeq)) : 0; + nuint bufferSpace = ZSTD_cwksp_alloc_size(buffInSize) + ZSTD_cwksp_alloc_size(buffOutSize); + nuint cctxSpace = isStatic != 0 ? ZSTD_cwksp_alloc_size((nuint)sizeof(ZSTD_CCtx_s)) : 0; + nuint maxNbExternalSeq = ZSTD_sequenceBound(blockSize); + nuint externalSeqSpace = useSequenceProducer != 0 ? ZSTD_cwksp_aligned64_alloc_size(maxNbExternalSeq * (nuint)sizeof(ZSTD_Sequence)) : 0; + nuint neededSpace = cctxSpace + tmpWorkSpace + blockStateSpace + ldmSpace + ldmSeqSpace + matchStateSize + tokenSpace + bufferSpace + externalSeqSpace; + return neededSpace; + } + + public static nuint ZSTD_estimateCCtxSize_usingCCtxParams(ZSTD_CCtx_params_s* @params) + { + ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams(@params, unchecked(0UL - 1), 0, ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict); + ZSTD_paramSwitch_e useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(@params->useRowMatchFinder, &cParams); + if (@params->nbWorkers > 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + } + + return ZSTD_estimateCCtxSize_usingCCtxParams_internal(&cParams, &@params->ldmParams, 1, useRowMatchFinder, 0, 0, unchecked(0UL - 1), ZSTD_hasExtSeqProd(@params), @params->maxBlockSize); + } + + public static nuint ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams) + { + ZSTD_CCtx_params_s initialParams = ZSTD_makeCCtxParamsFromCParams(cParams); + if (ZSTD_rowMatchFinderSupported(cParams.strategy) != 0) + { + /* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */ + nuint noRowCCtxSize; + nuint rowCCtxSize; + initialParams.useRowMatchFinder = ZSTD_paramSwitch_e.ZSTD_ps_disable; + noRowCCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams); + initialParams.useRowMatchFinder = ZSTD_paramSwitch_e.ZSTD_ps_enable; + rowCCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams); + return noRowCCtxSize > rowCCtxSize ? noRowCCtxSize : rowCCtxSize; + } + else + { + return ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams); + } + } + +#if NET7_0_OR_GREATER + private static ReadOnlySpan Span_srcSizeTiers => new ulong[4] + { + 16 * (1 << 10), + 128 * (1 << 10), + 256 * (1 << 10), + unchecked(0UL - 1) + }; + private static ulong* srcSizeTiers => (ulong*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_srcSizeTiers)); +#else + + private static readonly ulong* srcSizeTiers = GetArrayPointer(new ulong[4] { (ulong)(16 * (1 << 10)), (ulong)(128 * (1 << 10)), (ulong)(256 * (1 << 10)), (unchecked(0UL - 1)) }); +#endif + private static nuint ZSTD_estimateCCtxSize_internal(int compressionLevel) + { + int tier = 0; + nuint largestSize = 0; + for (; tier < 4; ++tier) + { + /* Choose the set of cParams for a given level across all srcSizes that give the largest cctxSize */ + ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeTiers[tier], 0, ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict); + largestSize = ZSTD_estimateCCtxSize_usingCParams(cParams) > largestSize ? ZSTD_estimateCCtxSize_usingCParams(cParams) : largestSize; + } + + return largestSize; + } + + /*! ZSTD_estimate*() : + * These functions make it possible to estimate memory usage + * of a future {D,C}Ctx, before its creation. + * This is useful in combination with ZSTD_initStatic(), + * which makes it possible to employ a static buffer for ZSTD_CCtx* state. + * + * ZSTD_estimateCCtxSize() will provide a memory budget large enough + * to compress data of any size using one-shot compression ZSTD_compressCCtx() or ZSTD_compress2() + * associated with any compression level up to max specified one. + * The estimate will assume the input may be arbitrarily large, + * which is the worst case. + * + * Note that the size estimation is specific for one-shot compression, + * it is not valid for streaming (see ZSTD_estimateCStreamSize*()) + * nor other potential ways of using a ZSTD_CCtx* state. + * + * When srcSize can be bound by a known and rather "small" value, + * this knowledge can be used to provide a tighter budget estimation + * because the ZSTD_CCtx* state will need less memory for small inputs. + * This tighter estimation can be provided by employing more advanced functions + * ZSTD_estimateCCtxSize_usingCParams(), which can be used in tandem with ZSTD_getCParams(), + * and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter(). + * Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits. + * + * Note : only single-threaded compression is supported. + * ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1. + */ + public static nuint ZSTD_estimateCCtxSize(int compressionLevel) + { + int level; + nuint memBudget = 0; + for (level = compressionLevel < 1 ? compressionLevel : 1; level <= compressionLevel; level++) + { + /* Ensure monotonically increasing memory usage as compression level increases */ + nuint newMB = ZSTD_estimateCCtxSize_internal(level); + if (newMB > memBudget) + memBudget = newMB; + } + + return memBudget; + } + + public static nuint ZSTD_estimateCStreamSize_usingCCtxParams(ZSTD_CCtx_params_s* @params) + { + if (@params->nbWorkers > 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + } + + { + ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams(@params, unchecked(0UL - 1), 0, ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict); + nuint blockSize = ZSTD_resolveMaxBlockSize(@params->maxBlockSize) < (nuint)1 << (int)cParams.windowLog ? ZSTD_resolveMaxBlockSize(@params->maxBlockSize) : (nuint)1 << (int)cParams.windowLog; + nuint inBuffSize = @params->inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered ? ((nuint)1 << (int)cParams.windowLog) + blockSize : 0; + nuint outBuffSize = @params->outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered ? ZSTD_compressBound(blockSize) + 1 : 0; + ZSTD_paramSwitch_e useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(@params->useRowMatchFinder, &@params->cParams); + return ZSTD_estimateCCtxSize_usingCCtxParams_internal(&cParams, &@params->ldmParams, 1, useRowMatchFinder, inBuffSize, outBuffSize, unchecked(0UL - 1), ZSTD_hasExtSeqProd(@params), @params->maxBlockSize); + } + } + + public static nuint ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams) + { + ZSTD_CCtx_params_s initialParams = ZSTD_makeCCtxParamsFromCParams(cParams); + if (ZSTD_rowMatchFinderSupported(cParams.strategy) != 0) + { + /* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */ + nuint noRowCCtxSize; + nuint rowCCtxSize; + initialParams.useRowMatchFinder = ZSTD_paramSwitch_e.ZSTD_ps_disable; + noRowCCtxSize = ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams); + initialParams.useRowMatchFinder = ZSTD_paramSwitch_e.ZSTD_ps_enable; + rowCCtxSize = ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams); + return noRowCCtxSize > rowCCtxSize ? noRowCCtxSize : rowCCtxSize; + } + else + { + return ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams); + } + } + + private static nuint ZSTD_estimateCStreamSize_internal(int compressionLevel) + { + ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, unchecked(0UL - 1), 0, ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict); + return ZSTD_estimateCStreamSize_usingCParams(cParams); + } + + /*! ZSTD_estimateCStreamSize() : + * ZSTD_estimateCStreamSize() will provide a memory budget large enough for streaming compression + * using any compression level up to the max specified one. + * It will also consider src size to be arbitrarily "large", which is a worst case scenario. + * If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation. + * ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel. + * ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1. + * Note : CStream size estimation is only correct for single-threaded compression. + * ZSTD_estimateCStreamSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1. + * Note 2 : ZSTD_estimateCStreamSize* functions are not compatible with the Block-Level Sequence Producer API at this time. + * Size estimates assume that no external sequence producer is registered. + * + * ZSTD_DStream memory budget depends on frame's window Size. + * This information can be passed manually, using ZSTD_estimateDStreamSize, + * or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame(); + * Any frame requesting a window size larger than max specified one will be rejected. + * Note : if streaming is init with function ZSTD_init?Stream_usingDict(), + * an internal ?Dict will be created, which additional size is not estimated here. + * In this case, get total size by adding ZSTD_estimate?DictSize + */ + public static nuint ZSTD_estimateCStreamSize(int compressionLevel) + { + int level; + nuint memBudget = 0; + for (level = compressionLevel < 1 ? compressionLevel : 1; level <= compressionLevel; level++) + { + nuint newMB = ZSTD_estimateCStreamSize_internal(level); + if (newMB > memBudget) + memBudget = newMB; + } + + return memBudget; + } + + /* ZSTD_getFrameProgression(): + * tells how much data has been consumed (input) and produced (output) for current frame. + * able to count progression inside worker threads (non-blocking mode). + */ + public static ZSTD_frameProgression ZSTD_getFrameProgression(ZSTD_CCtx_s* cctx) + { + if (cctx->appliedParams.nbWorkers > 0) + { + return ZSTDMT_getFrameProgression(cctx->mtctx); + } + + { + ZSTD_frameProgression fp; + nuint buffered = cctx->inBuff == null ? 0 : cctx->inBuffPos - cctx->inToCompress; +#if DEBUG + if (buffered != 0) + assert(cctx->inBuffPos >= cctx->inToCompress); +#endif + assert(buffered <= 1 << 17); + fp.ingested = cctx->consumedSrcSize + buffered; + fp.consumed = cctx->consumedSrcSize; + fp.produced = cctx->producedCSize; + fp.flushed = cctx->producedCSize; + fp.currentJobID = 0; + fp.nbActiveWorkers = 0; + return fp; + } + } + + /*! ZSTD_toFlushNow() + * Only useful for multithreading scenarios currently (nbWorkers >= 1). + */ + public static nuint ZSTD_toFlushNow(ZSTD_CCtx_s* cctx) + { + if (cctx->appliedParams.nbWorkers > 0) + { + return ZSTDMT_toFlushNow(cctx->mtctx); + } + + return 0; + } + + [Conditional("DEBUG")] + private static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1, ZSTD_compressionParameters cParams2) + { + assert(cParams1.windowLog == cParams2.windowLog); + assert(cParams1.chainLog == cParams2.chainLog); + assert(cParams1.hashLog == cParams2.hashLog); + assert(cParams1.searchLog == cParams2.searchLog); + assert(cParams1.minMatch == cParams2.minMatch); + assert(cParams1.targetLength == cParams2.targetLength); + assert(cParams1.strategy == cParams2.strategy); + } + + private static void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs) + { + int i; + for (i = 0; i < 3; ++i) + bs->rep[i] = repStartValue[i]; + bs->entropy.huf.repeatMode = HUF_repeat.HUF_repeat_none; + bs->entropy.fse.offcode_repeatMode = FSE_repeat.FSE_repeat_none; + bs->entropy.fse.matchlength_repeatMode = FSE_repeat.FSE_repeat_none; + bs->entropy.fse.litlength_repeatMode = FSE_repeat.FSE_repeat_none; + } + + /*! ZSTD_invalidateMatchState() + * Invalidate all the matches in the match finder tables. + * Requires nextSrc and base to be set (can be NULL). + */ + private static void ZSTD_invalidateMatchState(ZSTD_MatchState_t* ms) + { + ZSTD_window_clear(&ms->window); + ms->nextToUpdate = ms->window.dictLimit; + ms->loadedDictEnd = 0; + ms->opt.litLengthSum = 0; + ms->dictMatchState = null; + } + + /* Mixes bits in a 64 bits in a value, based on XXH3_rrmxmx */ + private static ulong ZSTD_bitmix(ulong val, ulong len) + { + val ^= BitOperations.RotateRight(val, 49) ^ BitOperations.RotateRight(val, 24); + val *= 0x9FB21C651E98DF25UL; + val ^= (val >> 35) + len; + val *= 0x9FB21C651E98DF25UL; + return val ^ val >> 28; + } + + /* Mixes in the hashSalt and hashSaltEntropy to create a new hashSalt */ + private static void ZSTD_advanceHashSalt(ZSTD_MatchState_t* ms) + { + ms->hashSalt = ZSTD_bitmix(ms->hashSalt, 8) ^ ZSTD_bitmix(ms->hashSaltEntropy, 4); + } + + private static nuint ZSTD_reset_matchState(ZSTD_MatchState_t* ms, ZSTD_cwksp* ws, ZSTD_compressionParameters* cParams, ZSTD_paramSwitch_e useRowMatchFinder, ZSTD_compResetPolicy_e crp, ZSTD_indexResetPolicy_e forceResetIndex, ZSTD_resetTarget_e forWho) + { + /* disable chain table allocation for fast or row-based strategies */ + nuint chainSize = ZSTD_allocateChainTable(cParams->strategy, useRowMatchFinder, ms->dedicatedDictSearch != 0 && forWho == ZSTD_resetTarget_e.ZSTD_resetTarget_CDict ? 1U : 0U) != 0 ? (nuint)1 << (int)cParams->chainLog : 0; + nuint hSize = (nuint)1 << (int)cParams->hashLog; + uint hashLog3 = forWho == ZSTD_resetTarget_e.ZSTD_resetTarget_CCtx && cParams->minMatch == 3 ? 17 < cParams->windowLog ? 17 : cParams->windowLog : 0; + nuint h3Size = hashLog3 != 0 ? (nuint)1 << (int)hashLog3 : 0; + assert(useRowMatchFinder != ZSTD_paramSwitch_e.ZSTD_ps_auto); + if (forceResetIndex == ZSTD_indexResetPolicy_e.ZSTDirp_reset) + { + ZSTD_window_init(&ms->window); + ZSTD_cwksp_mark_tables_dirty(ws); + } + + ms->hashLog3 = hashLog3; + ms->lazySkipping = 0; + ZSTD_invalidateMatchState(ms); + assert(ZSTD_cwksp_reserve_failed(ws) == 0); + ZSTD_cwksp_clear_tables(ws); + ms->hashTable = (uint*)ZSTD_cwksp_reserve_table(ws, hSize * sizeof(uint)); + ms->chainTable = (uint*)ZSTD_cwksp_reserve_table(ws, chainSize * sizeof(uint)); + ms->hashTable3 = (uint*)ZSTD_cwksp_reserve_table(ws, h3Size * sizeof(uint)); + if (ZSTD_cwksp_reserve_failed(ws) != 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } + + if (crp != ZSTD_compResetPolicy_e.ZSTDcrp_leaveDirty) + { + ZSTD_cwksp_clean_tables(ws); + } + + if (ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder) != 0) + { + /* Row match finder needs an additional table of hashes ("tags") */ + nuint tagTableSize = hSize; + if (forWho == ZSTD_resetTarget_e.ZSTD_resetTarget_CCtx) + { + ms->tagTable = (byte*)ZSTD_cwksp_reserve_aligned_init_once(ws, tagTableSize); + ZSTD_advanceHashSalt(ms); + } + else + { + ms->tagTable = (byte*)ZSTD_cwksp_reserve_aligned64(ws, tagTableSize); + memset(ms->tagTable, 0, (uint)tagTableSize); + ms->hashSalt = 0; + } + + { + uint rowLog = cParams->searchLog <= 4 ? 4 : cParams->searchLog <= 6 ? cParams->searchLog : 6; + assert(cParams->hashLog >= rowLog); + ms->rowHashLog = cParams->hashLog - rowLog; + } + } + + if (forWho == ZSTD_resetTarget_e.ZSTD_resetTarget_CCtx && cParams->strategy >= ZSTD_strategy.ZSTD_btopt) + { + ms->opt.litFreq = (uint*)ZSTD_cwksp_reserve_aligned64(ws, (1 << 8) * sizeof(uint)); + ms->opt.litLengthFreq = (uint*)ZSTD_cwksp_reserve_aligned64(ws, (35 + 1) * sizeof(uint)); + ms->opt.matchLengthFreq = (uint*)ZSTD_cwksp_reserve_aligned64(ws, (52 + 1) * sizeof(uint)); + ms->opt.offCodeFreq = (uint*)ZSTD_cwksp_reserve_aligned64(ws, (31 + 1) * sizeof(uint)); + ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned64(ws, (nuint)(((1 << 12) + 3) * sizeof(ZSTD_match_t))); + ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned64(ws, (nuint)(((1 << 12) + 3) * sizeof(ZSTD_optimal_t))); + } + + ms->cParams = *cParams; + if (ZSTD_cwksp_reserve_failed(ws) != 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } + + return 0; + } + + private static int ZSTD_indexTooCloseToMax(ZSTD_window_t w) + { + return (nuint)(w.nextSrc - w.@base) > (MEM_64bits ? 3500U * (1 << 20) : 2000U * (1 << 20)) - 16 * (1 << 20) ? 1 : 0; + } + + /** ZSTD_dictTooBig(): + * When dictionaries are larger than ZSTD_CHUNKSIZE_MAX they can't be loaded in + * one go generically. So we ensure that in that case we reset the tables to zero, + * so that we can load as much of the dictionary as possible. + */ + private static int ZSTD_dictTooBig(nuint loadedDictSize) + { + return loadedDictSize > unchecked((uint)-1) - (MEM_64bits ? 3500U * (1 << 20) : 2000U * (1 << 20)) ? 1 : 0; + } + + /*! ZSTD_resetCCtx_internal() : + * @param loadedDictSize The size of the dictionary to be loaded + * into the context, if any. If no dictionary is used, or the + * dictionary is being attached / copied, then pass 0. + * note : `params` are assumed fully validated at this stage. + */ + private static nuint ZSTD_resetCCtx_internal(ZSTD_CCtx_s* zc, ZSTD_CCtx_params_s* @params, ulong pledgedSrcSize, nuint loadedDictSize, ZSTD_compResetPolicy_e crp, ZSTD_buffered_policy_e zbuff) + { + ZSTD_cwksp* ws = &zc->workspace; + assert(!ERR_isError(ZSTD_checkCParams(@params->cParams))); + zc->isFirstBlock = 1; + zc->appliedParams = *@params; + @params = &zc->appliedParams; + assert(@params->useRowMatchFinder != ZSTD_paramSwitch_e.ZSTD_ps_auto); + assert(@params->postBlockSplitter != ZSTD_paramSwitch_e.ZSTD_ps_auto); + assert(@params->ldmParams.enableLdm != ZSTD_paramSwitch_e.ZSTD_ps_auto); + assert(@params->maxBlockSize != 0); + if (@params->ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) + { + ZSTD_ldm_adjustParameters(&zc->appliedParams.ldmParams, &@params->cParams); + assert(@params->ldmParams.hashLog >= @params->ldmParams.bucketSizeLog); + assert(@params->ldmParams.hashRateLog < 32); + } + + { + nuint windowSize = 1 > (nuint)((ulong)1 << (int)@params->cParams.windowLog < pledgedSrcSize ? (ulong)1 << (int)@params->cParams.windowLog : pledgedSrcSize) ? 1 : (nuint)((ulong)1 << (int)@params->cParams.windowLog < pledgedSrcSize ? (ulong)1 << (int)@params->cParams.windowLog : pledgedSrcSize); + nuint blockSize = @params->maxBlockSize < windowSize ? @params->maxBlockSize : windowSize; + nuint maxNbSeq = ZSTD_maxNbSeq(blockSize, @params->cParams.minMatch, ZSTD_hasExtSeqProd(@params)); + nuint buffOutSize = zbuff == ZSTD_buffered_policy_e.ZSTDb_buffered && @params->outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered ? ZSTD_compressBound(blockSize) + 1 : 0; + nuint buffInSize = zbuff == ZSTD_buffered_policy_e.ZSTDb_buffered && @params->inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered ? windowSize + blockSize : 0; + nuint maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(@params->ldmParams, blockSize); + int indexTooClose = ZSTD_indexTooCloseToMax(zc->blockState.matchState.window); + int dictTooBig = ZSTD_dictTooBig(loadedDictSize); + ZSTD_indexResetPolicy_e needsIndexReset = indexTooClose != 0 || dictTooBig != 0 || zc->initialized == 0 ? ZSTD_indexResetPolicy_e.ZSTDirp_reset : ZSTD_indexResetPolicy_e.ZSTDirp_continue; + nuint neededSpace = ZSTD_estimateCCtxSize_usingCCtxParams_internal(&@params->cParams, &@params->ldmParams, zc->staticSize != 0 ? 1 : 0, @params->useRowMatchFinder, buffInSize, buffOutSize, pledgedSrcSize, ZSTD_hasExtSeqProd(@params), @params->maxBlockSize); + { + nuint err_code = neededSpace; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (zc->staticSize == 0) + ZSTD_cwksp_bump_oversized_duration(ws, 0); + { + int workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace ? 1 : 0; + int workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace); + int resizeWorkspace = workspaceTooSmall != 0 || workspaceWasteful != 0 ? 1 : 0; + if (resizeWorkspace != 0) + { + if (zc->staticSize != 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } + + needsIndexReset = ZSTD_indexResetPolicy_e.ZSTDirp_reset; + ZSTD_cwksp_free(ws, zc->customMem); + { + nuint err_code = ZSTD_cwksp_create(ws, neededSpace, zc->customMem); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + assert(ZSTD_cwksp_check_available(ws, (nuint)(2 * sizeof(ZSTD_compressedBlockState_t))) != 0); + zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(ws, (nuint)sizeof(ZSTD_compressedBlockState_t)); + if (zc->blockState.prevCBlock == null) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } + + zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(ws, (nuint)sizeof(ZSTD_compressedBlockState_t)); + if (zc->blockState.nextCBlock == null) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } + + zc->tmpWorkspace = ZSTD_cwksp_reserve_object(ws, (8 << 10) + 512 + sizeof(uint) * (52 + 2) > 8208 ? (8 << 10) + 512 + sizeof(uint) * (52 + 2) : 8208); + if (zc->tmpWorkspace == null) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } + + zc->tmpWkspSize = (8 << 10) + 512 + sizeof(uint) * (52 + 2) > 8208 ? (8 << 10) + 512 + sizeof(uint) * (52 + 2) : 8208; + } + } + + ZSTD_cwksp_clear(ws); + zc->blockState.matchState.cParams = @params->cParams; + zc->blockState.matchState.prefetchCDictTables = @params->prefetchCDictTables == ZSTD_paramSwitch_e.ZSTD_ps_enable ? 1 : 0; + zc->pledgedSrcSizePlusOne = pledgedSrcSize + 1; + zc->consumedSrcSize = 0; + zc->producedCSize = 0; + if (pledgedSrcSize == unchecked(0UL - 1)) + zc->appliedParams.fParams.contentSizeFlag = 0; + zc->blockSizeMax = blockSize; + ZSTD_XXH64_reset(&zc->xxhState, 0); + zc->stage = ZSTD_compressionStage_e.ZSTDcs_init; + zc->dictID = 0; + zc->dictContentSize = 0; + ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock); + { + nuint err_code = ZSTD_reset_matchState(&zc->blockState.matchState, ws, &@params->cParams, @params->useRowMatchFinder, crp, needsIndexReset, ZSTD_resetTarget_e.ZSTD_resetTarget_CCtx); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + zc->seqStore.sequencesStart = (SeqDef_s*)ZSTD_cwksp_reserve_aligned64(ws, maxNbSeq * (nuint)sizeof(SeqDef_s)); + if (@params->ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) + { + /* TODO: avoid memset? */ + nuint ldmHSize = (nuint)1 << (int)@params->ldmParams.hashLog; + zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned64(ws, ldmHSize * (nuint)sizeof(ldmEntry_t)); + memset(zc->ldmState.hashTable, 0, (uint)(ldmHSize * (nuint)sizeof(ldmEntry_t))); + zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned64(ws, maxNbLdmSeq * (nuint)sizeof(rawSeq)); + zc->maxNbLdmSequences = maxNbLdmSeq; + ZSTD_window_init(&zc->ldmState.window); + zc->ldmState.loadedDictEnd = 0; + } + + if (ZSTD_hasExtSeqProd(@params) != 0) + { + nuint maxNbExternalSeq = ZSTD_sequenceBound(blockSize); + zc->extSeqBufCapacity = maxNbExternalSeq; + zc->extSeqBuf = (ZSTD_Sequence*)ZSTD_cwksp_reserve_aligned64(ws, maxNbExternalSeq * (nuint)sizeof(ZSTD_Sequence)); + } + + zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + 32); + zc->seqStore.maxNbLit = blockSize; + zc->bufferedPolicy = zbuff; + zc->inBuffSize = buffInSize; + zc->inBuff = (sbyte*)ZSTD_cwksp_reserve_buffer(ws, buffInSize); + zc->outBuffSize = buffOutSize; + zc->outBuff = (sbyte*)ZSTD_cwksp_reserve_buffer(ws, buffOutSize); + if (@params->ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) + { + /* TODO: avoid memset? */ + nuint numBuckets = (nuint)1 << (int)(@params->ldmParams.hashLog - @params->ldmParams.bucketSizeLog); + zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, numBuckets); + memset(zc->ldmState.bucketOffsets, 0, (uint)numBuckets); + } + + ZSTD_referenceExternalSequences(zc, null, 0); + zc->seqStore.maxNbSeq = maxNbSeq; + zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(byte)); + zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(byte)); + zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(byte)); + assert(ZSTD_cwksp_estimated_space_within_bounds(ws, neededSpace) != 0); + zc->initialized = 1; + return 0; + } + } + + /* ZSTD_invalidateRepCodes() : + * ensures next compression will not use repcodes from previous block. + * Note : only works with regular variant; + * do not use with extDict variant ! */ + private static void ZSTD_invalidateRepCodes(ZSTD_CCtx_s* cctx) + { + int i; + for (i = 0; i < 3; i++) + cctx->blockState.prevCBlock->rep[i] = 0; + assert(ZSTD_window_hasExtDict(cctx->blockState.matchState.window) == 0); + } + + private static readonly nuint* attachDictSizeCutoffs = GetArrayPointer(new nuint[10] { 8 * (1 << 10), 8 * (1 << 10), 16 * (1 << 10), 32 * (1 << 10), 32 * (1 << 10), 32 * (1 << 10), 32 * (1 << 10), 32 * (1 << 10), 8 * (1 << 10), 8 * (1 << 10) }); + private static int ZSTD_shouldAttachDict(ZSTD_CDict_s* cdict, ZSTD_CCtx_params_s* @params, ulong pledgedSrcSize) + { + nuint cutoff = attachDictSizeCutoffs[(int)cdict->matchState.cParams.strategy]; + int dedicatedDictSearch = cdict->matchState.dedicatedDictSearch; + return dedicatedDictSearch != 0 || (pledgedSrcSize <= cutoff || pledgedSrcSize == unchecked(0UL - 1) || @params->attachDictPref == ZSTD_dictAttachPref_e.ZSTD_dictForceAttach) && @params->attachDictPref != ZSTD_dictAttachPref_e.ZSTD_dictForceCopy && @params->forceWindow == 0 ? 1 : 0; + } + + private static nuint ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx_s* cctx, ZSTD_CDict_s* cdict, ZSTD_CCtx_params_s @params, ulong pledgedSrcSize, ZSTD_buffered_policy_e zbuff) + { + { + ZSTD_compressionParameters adjusted_cdict_cParams = cdict->matchState.cParams; + uint windowLog = @params.cParams.windowLog; + assert(windowLog != 0); + if (cdict->matchState.dedicatedDictSearch != 0) + { + ZSTD_dedicatedDictSearch_revertCParams(&adjusted_cdict_cParams); + } + + @params.cParams = ZSTD_adjustCParams_internal(adjusted_cdict_cParams, pledgedSrcSize, cdict->dictContentSize, ZSTD_CParamMode_e.ZSTD_cpm_attachDict, @params.useRowMatchFinder); + @params.cParams.windowLog = windowLog; + @params.useRowMatchFinder = cdict->useRowMatchFinder; + { + nuint err_code = ZSTD_resetCCtx_internal(cctx, &@params, pledgedSrcSize, 0, ZSTD_compResetPolicy_e.ZSTDcrp_makeClean, zbuff); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + assert(cctx->appliedParams.cParams.strategy == adjusted_cdict_cParams.strategy); + } + + { + uint cdictEnd = (uint)(cdict->matchState.window.nextSrc - cdict->matchState.window.@base); + uint cdictLen = cdictEnd - cdict->matchState.window.dictLimit; + if (cdictLen != 0) + { + cctx->blockState.matchState.dictMatchState = &cdict->matchState; + if (cctx->blockState.matchState.window.dictLimit < cdictEnd) + { + cctx->blockState.matchState.window.nextSrc = cctx->blockState.matchState.window.@base + cdictEnd; + ZSTD_window_clear(&cctx->blockState.matchState.window); + } + + cctx->blockState.matchState.loadedDictEnd = cctx->blockState.matchState.window.dictLimit; + } + } + + cctx->dictID = cdict->dictID; + cctx->dictContentSize = cdict->dictContentSize; + memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, (uint)sizeof(ZSTD_compressedBlockState_t)); + return 0; + } + + private static void ZSTD_copyCDictTableIntoCCtx(uint* dst, uint* src, nuint tableSize, ZSTD_compressionParameters* cParams) + { + if (ZSTD_CDictIndicesAreTagged(cParams) != 0) + { + /* Remove tags from the CDict table if they are present. + * See docs on "short cache" in zstd_compress_internal.h for context. */ + nuint i; + for (i = 0; i < tableSize; i++) + { + uint taggedIndex = src[i]; + uint index = taggedIndex >> 8; + dst[i] = index; + } + } + else + { + memcpy(dst, src, (uint)(tableSize * sizeof(uint))); + } + } + + private static nuint ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx_s* cctx, ZSTD_CDict_s* cdict, ZSTD_CCtx_params_s @params, ulong pledgedSrcSize, ZSTD_buffered_policy_e zbuff) + { + ZSTD_compressionParameters* cdict_cParams = &cdict->matchState.cParams; + assert(cdict->matchState.dedicatedDictSearch == 0); + { + uint windowLog = @params.cParams.windowLog; + assert(windowLog != 0); + @params.cParams = *cdict_cParams; + @params.cParams.windowLog = windowLog; + @params.useRowMatchFinder = cdict->useRowMatchFinder; + { + nuint err_code = ZSTD_resetCCtx_internal(cctx, &@params, pledgedSrcSize, 0, ZSTD_compResetPolicy_e.ZSTDcrp_leaveDirty, zbuff); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy); + assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog); + assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog); + } + + ZSTD_cwksp_mark_tables_dirty(&cctx->workspace); + assert(@params.useRowMatchFinder != ZSTD_paramSwitch_e.ZSTD_ps_auto); + { + /* DDS guaranteed disabled */ + nuint chainSize = ZSTD_allocateChainTable(cdict_cParams->strategy, cdict->useRowMatchFinder, 0) != 0 ? (nuint)1 << (int)cdict_cParams->chainLog : 0; + nuint hSize = (nuint)1 << (int)cdict_cParams->hashLog; + ZSTD_copyCDictTableIntoCCtx(cctx->blockState.matchState.hashTable, cdict->matchState.hashTable, hSize, cdict_cParams); + if (ZSTD_allocateChainTable(cctx->appliedParams.cParams.strategy, cctx->appliedParams.useRowMatchFinder, 0) != 0) + { + ZSTD_copyCDictTableIntoCCtx(cctx->blockState.matchState.chainTable, cdict->matchState.chainTable, chainSize, cdict_cParams); + } + + if (ZSTD_rowMatchFinderUsed(cdict_cParams->strategy, cdict->useRowMatchFinder) != 0) + { + nuint tagTableSize = hSize; + memcpy(cctx->blockState.matchState.tagTable, cdict->matchState.tagTable, (uint)tagTableSize); + cctx->blockState.matchState.hashSalt = cdict->matchState.hashSalt; + } + } + + assert(cctx->blockState.matchState.hashLog3 <= 31); + { + uint h3log = cctx->blockState.matchState.hashLog3; + nuint h3Size = h3log != 0 ? (nuint)1 << (int)h3log : 0; + assert(cdict->matchState.hashLog3 == 0); + memset(cctx->blockState.matchState.hashTable3, 0, (uint)(h3Size * sizeof(uint))); + } + + ZSTD_cwksp_mark_tables_clean(&cctx->workspace); + { + ZSTD_MatchState_t* srcMatchState = &cdict->matchState; + ZSTD_MatchState_t* dstMatchState = &cctx->blockState.matchState; + dstMatchState->window = srcMatchState->window; + dstMatchState->nextToUpdate = srcMatchState->nextToUpdate; + dstMatchState->loadedDictEnd = srcMatchState->loadedDictEnd; + } + + cctx->dictID = cdict->dictID; + cctx->dictContentSize = cdict->dictContentSize; + memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, (uint)sizeof(ZSTD_compressedBlockState_t)); + return 0; + } + + /* We have a choice between copying the dictionary context into the working + * context, or referencing the dictionary context from the working context + * in-place. We decide here which strategy to use. */ + private static nuint ZSTD_resetCCtx_usingCDict(ZSTD_CCtx_s* cctx, ZSTD_CDict_s* cdict, ZSTD_CCtx_params_s* @params, ulong pledgedSrcSize, ZSTD_buffered_policy_e zbuff) + { + if (ZSTD_shouldAttachDict(cdict, @params, pledgedSrcSize) != 0) + { + return ZSTD_resetCCtx_byAttachingCDict(cctx, cdict, *@params, pledgedSrcSize, zbuff); + } + else + { + return ZSTD_resetCCtx_byCopyingCDict(cctx, cdict, *@params, pledgedSrcSize, zbuff); + } + } + + /*! ZSTD_copyCCtx_internal() : + * Duplicate an existing context `srcCCtx` into another one `dstCCtx`. + * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). + * The "context", in this case, refers to the hash and chain tables, + * entropy tables, and dictionary references. + * `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx. + * @return : 0, or an error code */ + private static nuint ZSTD_copyCCtx_internal(ZSTD_CCtx_s* dstCCtx, ZSTD_CCtx_s* srcCCtx, ZSTD_frameParameters fParams, ulong pledgedSrcSize, ZSTD_buffered_policy_e zbuff) + { + if (srcCCtx->stage != ZSTD_compressionStage_e.ZSTDcs_init) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); + } + + memcpy(&dstCCtx->customMem, &srcCCtx->customMem, (uint)sizeof(ZSTD_customMem)); + { + ZSTD_CCtx_params_s @params = dstCCtx->requestedParams; + @params.cParams = srcCCtx->appliedParams.cParams; + assert(srcCCtx->appliedParams.useRowMatchFinder != ZSTD_paramSwitch_e.ZSTD_ps_auto); + assert(srcCCtx->appliedParams.postBlockSplitter != ZSTD_paramSwitch_e.ZSTD_ps_auto); + assert(srcCCtx->appliedParams.ldmParams.enableLdm != ZSTD_paramSwitch_e.ZSTD_ps_auto); + @params.useRowMatchFinder = srcCCtx->appliedParams.useRowMatchFinder; + @params.postBlockSplitter = srcCCtx->appliedParams.postBlockSplitter; + @params.ldmParams = srcCCtx->appliedParams.ldmParams; + @params.fParams = fParams; + @params.maxBlockSize = srcCCtx->appliedParams.maxBlockSize; + ZSTD_resetCCtx_internal(dstCCtx, &@params, pledgedSrcSize, 0, ZSTD_compResetPolicy_e.ZSTDcrp_leaveDirty, zbuff); + assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog); + assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy); + assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog); + assert(dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog); + assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3); + } + + ZSTD_cwksp_mark_tables_dirty(&dstCCtx->workspace); + { + nuint chainSize = ZSTD_allocateChainTable(srcCCtx->appliedParams.cParams.strategy, srcCCtx->appliedParams.useRowMatchFinder, 0) != 0 ? (nuint)1 << (int)srcCCtx->appliedParams.cParams.chainLog : 0; + nuint hSize = (nuint)1 << (int)srcCCtx->appliedParams.cParams.hashLog; + uint h3log = srcCCtx->blockState.matchState.hashLog3; + nuint h3Size = h3log != 0 ? (nuint)1 << (int)h3log : 0; + memcpy(dstCCtx->blockState.matchState.hashTable, srcCCtx->blockState.matchState.hashTable, (uint)(hSize * sizeof(uint))); + memcpy(dstCCtx->blockState.matchState.chainTable, srcCCtx->blockState.matchState.chainTable, (uint)(chainSize * sizeof(uint))); + memcpy(dstCCtx->blockState.matchState.hashTable3, srcCCtx->blockState.matchState.hashTable3, (uint)(h3Size * sizeof(uint))); + } + + ZSTD_cwksp_mark_tables_clean(&dstCCtx->workspace); + { + ZSTD_MatchState_t* srcMatchState = &srcCCtx->blockState.matchState; + ZSTD_MatchState_t* dstMatchState = &dstCCtx->blockState.matchState; + dstMatchState->window = srcMatchState->window; + dstMatchState->nextToUpdate = srcMatchState->nextToUpdate; + dstMatchState->loadedDictEnd = srcMatchState->loadedDictEnd; + } + + dstCCtx->dictID = srcCCtx->dictID; + dstCCtx->dictContentSize = srcCCtx->dictContentSize; + memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, (uint)sizeof(ZSTD_compressedBlockState_t)); + return 0; + } + + /*! ZSTD_copyCCtx() : + * Duplicate an existing context `srcCCtx` into another one `dstCCtx`. + * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). + * pledgedSrcSize==0 means "unknown". + * @return : 0, or an error code */ + public static nuint ZSTD_copyCCtx(ZSTD_CCtx_s* dstCCtx, ZSTD_CCtx_s* srcCCtx, ulong pledgedSrcSize) + { + /*content*/ + ZSTD_frameParameters fParams = new ZSTD_frameParameters + { + contentSizeFlag = 1, + checksumFlag = 0, + noDictIDFlag = 0 + }; + ZSTD_buffered_policy_e zbuff = srcCCtx->bufferedPolicy; + if (pledgedSrcSize == 0) + pledgedSrcSize = unchecked(0UL - 1); + fParams.contentSizeFlag = pledgedSrcSize != unchecked(0UL - 1) ? 1 : 0; + return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx, fParams, pledgedSrcSize, zbuff); + } + + /*! ZSTD_reduceTable() : + * reduce table indexes by `reducerValue`, or squash to zero. + * PreserveMark preserves "unsorted mark" for btlazy2 strategy. + * It must be set to a clear 0/1 value, to remove branch during inlining. + * Presume table size is a multiple of ZSTD_ROWSIZE + * to help auto-vectorization */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_reduceTable_internal(uint* table, uint size, uint reducerValue, int preserveMark) + { + int nbRows = (int)size / 16; + int cellNb = 0; + int rowNb; + /* Protect special index values < ZSTD_WINDOW_START_INDEX. */ + uint reducerThreshold = reducerValue + 2; + assert((size & 16 - 1) == 0); + assert(size < 1U << 31); + for (rowNb = 0; rowNb < nbRows; rowNb++) + { + int column; + for (column = 0; column < 16; column++) + { + uint newVal; + if (preserveMark != 0 && table[cellNb] == 1) + { + newVal = 1; + } + else if (table[cellNb] < reducerThreshold) + { + newVal = 0; + } + else + { + newVal = table[cellNb] - reducerValue; + } + + table[cellNb] = newVal; + cellNb++; + } + } + } + + private static void ZSTD_reduceTable(uint* table, uint size, uint reducerValue) + { + ZSTD_reduceTable_internal(table, size, reducerValue, 0); + } + + private static void ZSTD_reduceTable_btlazy2(uint* table, uint size, uint reducerValue) + { + ZSTD_reduceTable_internal(table, size, reducerValue, 1); + } + + /*! ZSTD_reduceIndex() : + * rescale all indexes to avoid future overflow (indexes are U32) */ + private static void ZSTD_reduceIndex(ZSTD_MatchState_t* ms, ZSTD_CCtx_params_s* @params, uint reducerValue) + { + { + uint hSize = (uint)1 << (int)@params->cParams.hashLog; + ZSTD_reduceTable(ms->hashTable, hSize, reducerValue); + } + + if (ZSTD_allocateChainTable(@params->cParams.strategy, @params->useRowMatchFinder, (uint)ms->dedicatedDictSearch) != 0) + { + uint chainSize = (uint)1 << (int)@params->cParams.chainLog; + if (@params->cParams.strategy == ZSTD_strategy.ZSTD_btlazy2) + ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue); + else + ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue); + } + + if (ms->hashLog3 != 0) + { + uint h3Size = (uint)1 << (int)ms->hashLog3; + ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue); + } + } + + /* See doc/zstd_compression_format.md for detailed format description */ + private static int ZSTD_seqToCodes(SeqStore_t* seqStorePtr) + { + SeqDef_s* sequences = seqStorePtr->sequencesStart; + byte* llCodeTable = seqStorePtr->llCode; + byte* ofCodeTable = seqStorePtr->ofCode; + byte* mlCodeTable = seqStorePtr->mlCode; + uint nbSeq = (uint)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + uint u; + int longOffsets = 0; + assert(nbSeq <= seqStorePtr->maxNbSeq); + for (u = 0; u < nbSeq; u++) + { + uint llv = sequences[u].litLength; + uint ofCode = ZSTD_highbit32(sequences[u].offBase); + uint mlv = sequences[u].mlBase; + llCodeTable[u] = (byte)ZSTD_LLcode(llv); + ofCodeTable[u] = (byte)ofCode; + mlCodeTable[u] = (byte)ZSTD_MLcode(mlv); + assert(!(MEM_64bits && ofCode >= (uint)(MEM_32bits ? 25 : 57))); + if (MEM_32bits && ofCode >= (uint)(MEM_32bits ? 25 : 57)) + longOffsets = 1; + } + + if (seqStorePtr->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_literalLength) + llCodeTable[seqStorePtr->longLengthPos] = 35; + if (seqStorePtr->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_matchLength) + mlCodeTable[seqStorePtr->longLengthPos] = 52; + return longOffsets; + } + + /* ZSTD_useTargetCBlockSize(): + * Returns if target compressed block size param is being used. + * If used, compression will do best effort to make a compressed block size to be around targetCBlockSize. + * Returns 1 if true, 0 otherwise. */ + private static int ZSTD_useTargetCBlockSize(ZSTD_CCtx_params_s* cctxParams) + { + return cctxParams->targetCBlockSize != 0 ? 1 : 0; + } + + /* ZSTD_blockSplitterEnabled(): + * Returns if block splitting param is being used + * If used, compression will do best effort to split a block in order to improve compression ratio. + * At the time this function is called, the parameter must be finalized. + * Returns 1 if true, 0 otherwise. */ + private static int ZSTD_blockSplitterEnabled(ZSTD_CCtx_params_s* cctxParams) + { + assert(cctxParams->postBlockSplitter != ZSTD_paramSwitch_e.ZSTD_ps_auto); + return cctxParams->postBlockSplitter == ZSTD_paramSwitch_e.ZSTD_ps_enable ? 1 : 0; + } + + /* ZSTD_buildSequencesStatistics(): + * Returns a ZSTD_symbolEncodingTypeStats_t, or a zstd error code in the `size` field. + * Modifies `nextEntropy` to have the appropriate values as a side effect. + * nbSeq must be greater than 0. + * + * entropyWkspSize must be of size at least ENTROPY_WORKSPACE_SIZE - (MaxSeq + 1)*sizeof(U32) + */ + private static ZSTD_symbolEncodingTypeStats_t ZSTD_buildSequencesStatistics(SeqStore_t* seqStorePtr, nuint nbSeq, ZSTD_fseCTables_t* prevEntropy, ZSTD_fseCTables_t* nextEntropy, byte* dst, byte* dstEnd, ZSTD_strategy strategy, uint* countWorkspace, void* entropyWorkspace, nuint entropyWkspSize) + { + byte* ostart = dst; + byte* oend = dstEnd; + byte* op = ostart; + uint* CTable_LitLength = nextEntropy->litlengthCTable; + uint* CTable_OffsetBits = nextEntropy->offcodeCTable; + uint* CTable_MatchLength = nextEntropy->matchlengthCTable; + byte* ofCodeTable = seqStorePtr->ofCode; + byte* llCodeTable = seqStorePtr->llCode; + byte* mlCodeTable = seqStorePtr->mlCode; + ZSTD_symbolEncodingTypeStats_t stats; + System.Runtime.CompilerServices.Unsafe.SkipInit(out stats); + stats.lastCountSize = 0; + stats.longOffsets = ZSTD_seqToCodes(seqStorePtr); + assert(op <= oend); + assert(nbSeq != 0); + { + uint max = 35; + /* can't fail */ + nuint mostFrequent = HIST_countFast_wksp(countWorkspace, &max, llCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); + nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode; + stats.LLtype = (uint)ZSTD_selectEncodingType(&nextEntropy->litlength_repeatMode, countWorkspace, max, mostFrequent, nbSeq, 9, prevEntropy->litlengthCTable, LL_defaultNorm, LL_defaultNormLog, ZSTD_DefaultPolicy_e.ZSTD_defaultAllowed, strategy); + assert(SymbolEncodingType_e.set_basic < SymbolEncodingType_e.set_compressed && SymbolEncodingType_e.set_rle < SymbolEncodingType_e.set_compressed); + assert(!(stats.LLtype < (uint)SymbolEncodingType_e.set_compressed && nextEntropy->litlength_repeatMode != FSE_repeat.FSE_repeat_none)); + { + nuint countSize = ZSTD_buildCTable(op, (nuint)(oend - op), CTable_LitLength, 9, (SymbolEncodingType_e)stats.LLtype, countWorkspace, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, 35, prevEntropy->litlengthCTable, sizeof(uint) * 329, entropyWorkspace, entropyWkspSize); + if (ERR_isError(countSize)) + { + stats.size = countSize; + return stats; + } + + if (stats.LLtype == (uint)SymbolEncodingType_e.set_compressed) + stats.lastCountSize = countSize; + op += countSize; + assert(op <= oend); + } + } + + { + uint max = 31; + nuint mostFrequent = HIST_countFast_wksp(countWorkspace, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); + /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */ + ZSTD_DefaultPolicy_e defaultPolicy = max <= 28 ? ZSTD_DefaultPolicy_e.ZSTD_defaultAllowed : ZSTD_DefaultPolicy_e.ZSTD_defaultDisallowed; + nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode; + stats.Offtype = (uint)ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode, countWorkspace, max, mostFrequent, nbSeq, 8, prevEntropy->offcodeCTable, OF_defaultNorm, OF_defaultNormLog, defaultPolicy, strategy); + assert(!(stats.Offtype < (uint)SymbolEncodingType_e.set_compressed && nextEntropy->offcode_repeatMode != FSE_repeat.FSE_repeat_none)); + { + nuint countSize = ZSTD_buildCTable(op, (nuint)(oend - op), CTable_OffsetBits, 8, (SymbolEncodingType_e)stats.Offtype, countWorkspace, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, 28, prevEntropy->offcodeCTable, sizeof(uint) * 193, entropyWorkspace, entropyWkspSize); + if (ERR_isError(countSize)) + { + stats.size = countSize; + return stats; + } + + if (stats.Offtype == (uint)SymbolEncodingType_e.set_compressed) + stats.lastCountSize = countSize; + op += countSize; + assert(op <= oend); + } + } + + { + uint max = 52; + nuint mostFrequent = HIST_countFast_wksp(countWorkspace, &max, mlCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); + nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode; + stats.MLtype = (uint)ZSTD_selectEncodingType(&nextEntropy->matchlength_repeatMode, countWorkspace, max, mostFrequent, nbSeq, 9, prevEntropy->matchlengthCTable, ML_defaultNorm, ML_defaultNormLog, ZSTD_DefaultPolicy_e.ZSTD_defaultAllowed, strategy); + assert(!(stats.MLtype < (uint)SymbolEncodingType_e.set_compressed && nextEntropy->matchlength_repeatMode != FSE_repeat.FSE_repeat_none)); + { + nuint countSize = ZSTD_buildCTable(op, (nuint)(oend - op), CTable_MatchLength, 9, (SymbolEncodingType_e)stats.MLtype, countWorkspace, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, 52, prevEntropy->matchlengthCTable, sizeof(uint) * 363, entropyWorkspace, entropyWkspSize); + if (ERR_isError(countSize)) + { + stats.size = countSize; + return stats; + } + + if (stats.MLtype == (uint)SymbolEncodingType_e.set_compressed) + stats.lastCountSize = countSize; + op += countSize; + assert(op <= oend); + } + } + + stats.size = (nuint)(op - ostart); + return stats; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_entropyCompressSeqStore_internal(void* dst, nuint dstCapacity, void* literals, nuint litSize, SeqStore_t* seqStorePtr, ZSTD_entropyCTables_t* prevEntropy, ZSTD_entropyCTables_t* nextEntropy, ZSTD_CCtx_params_s* cctxParams, void* entropyWorkspace, nuint entropyWkspSize, int bmi2) + { + ZSTD_strategy strategy = cctxParams->cParams.strategy; + uint* count = (uint*)entropyWorkspace; + uint* CTable_LitLength = nextEntropy->fse.litlengthCTable; + uint* CTable_OffsetBits = nextEntropy->fse.offcodeCTable; + uint* CTable_MatchLength = nextEntropy->fse.matchlengthCTable; + SeqDef_s* sequences = seqStorePtr->sequencesStart; + nuint nbSeq = (nuint)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + byte* ofCodeTable = seqStorePtr->ofCode; + byte* llCodeTable = seqStorePtr->llCode; + byte* mlCodeTable = seqStorePtr->mlCode; + byte* ostart = (byte*)dst; + byte* oend = ostart + dstCapacity; + byte* op = ostart; + nuint lastCountSize; + int longOffsets = 0; + entropyWorkspace = count + (52 + 1); + entropyWkspSize -= (52 + 1) * sizeof(uint); + assert(entropyWkspSize >= (8 << 10) + 512); + { + nuint numSequences = (nuint)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + /* Base suspicion of uncompressibility on ratio of literals to sequences */ + int suspectUncompressible = numSequences == 0 || litSize / numSequences >= 20 ? 1 : 0; + nuint cSize = ZSTD_compressLiterals(op, dstCapacity, literals, litSize, entropyWorkspace, entropyWkspSize, &prevEntropy->huf, &nextEntropy->huf, cctxParams->cParams.strategy, ZSTD_literalsCompressionIsDisabled(cctxParams), suspectUncompressible, bmi2); + { + nuint err_code = cSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + assert(cSize <= dstCapacity); + op += cSize; + } + + if (oend - op < 3 + 1) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + if (nbSeq < 128) + { + *op++ = (byte)nbSeq; + } + else if (nbSeq < 0x7F00) + { + op[0] = (byte)((nbSeq >> 8) + 0x80); + op[1] = (byte)nbSeq; + op += 2; + } + else + { + op[0] = 0xFF; + MEM_writeLE16(op + 1, (ushort)(nbSeq - 0x7F00)); + op += 3; + } + + assert(op <= oend); + if (nbSeq == 0) + { + memcpy(&nextEntropy->fse, &prevEntropy->fse, (uint)sizeof(ZSTD_fseCTables_t)); + return (nuint)(op - ostart); + } + + { + byte* seqHead = op++; + /* build stats for sequences */ + ZSTD_symbolEncodingTypeStats_t stats = ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq, &prevEntropy->fse, &nextEntropy->fse, op, oend, strategy, count, entropyWorkspace, entropyWkspSize); + { + nuint err_code = stats.size; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + *seqHead = (byte)((stats.LLtype << 6) + (stats.Offtype << 4) + (stats.MLtype << 2)); + lastCountSize = stats.lastCountSize; + op += stats.size; + longOffsets = stats.longOffsets; + } + + { + nuint bitstreamSize = ZSTD_encodeSequences(op, (nuint)(oend - op), CTable_MatchLength, mlCodeTable, CTable_OffsetBits, ofCodeTable, CTable_LitLength, llCodeTable, sequences, nbSeq, longOffsets, bmi2); + { + nuint err_code = bitstreamSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + op += bitstreamSize; + assert(op <= oend); + if (lastCountSize != 0 && lastCountSize + bitstreamSize < 4) + { + assert(lastCountSize + bitstreamSize == 3); + return 0; + } + } + + return (nuint)(op - ostart); + } + + private static nuint ZSTD_entropyCompressSeqStore_wExtLitBuffer(void* dst, nuint dstCapacity, void* literals, nuint litSize, nuint blockSize, SeqStore_t* seqStorePtr, ZSTD_entropyCTables_t* prevEntropy, ZSTD_entropyCTables_t* nextEntropy, ZSTD_CCtx_params_s* cctxParams, void* entropyWorkspace, nuint entropyWkspSize, int bmi2) + { + nuint cSize = ZSTD_entropyCompressSeqStore_internal(dst, dstCapacity, literals, litSize, seqStorePtr, prevEntropy, nextEntropy, cctxParams, entropyWorkspace, entropyWkspSize, bmi2); + if (cSize == 0) + return 0; + if (cSize == unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)) && blockSize <= dstCapacity) + { + return 0; + } + + { + nuint err_code = cSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + { + nuint maxCSize = blockSize - ZSTD_minGain(blockSize, cctxParams->cParams.strategy); + if (cSize >= maxCSize) + return 0; + } + + assert(cSize < 1 << 17); + return cSize; + } + + private static nuint ZSTD_entropyCompressSeqStore(SeqStore_t* seqStorePtr, ZSTD_entropyCTables_t* prevEntropy, ZSTD_entropyCTables_t* nextEntropy, ZSTD_CCtx_params_s* cctxParams, void* dst, nuint dstCapacity, nuint srcSize, void* entropyWorkspace, nuint entropyWkspSize, int bmi2) + { + return ZSTD_entropyCompressSeqStore_wExtLitBuffer(dst, dstCapacity, seqStorePtr->litStart, (nuint)(seqStorePtr->lit - seqStorePtr->litStart), srcSize, seqStorePtr, prevEntropy, nextEntropy, cctxParams, entropyWorkspace, entropyWkspSize, bmi2); + } + + private static readonly ZSTD_BlockCompressor_f[][] blockCompressor = new ZSTD_BlockCompressor_f[4][] + { + new ZSTD_BlockCompressor_f[10] + { + ZSTD_compressBlock_fast, + ZSTD_compressBlock_fast, + ZSTD_compressBlock_doubleFast, + ZSTD_compressBlock_greedy, + ZSTD_compressBlock_lazy, + ZSTD_compressBlock_lazy2, + ZSTD_compressBlock_btlazy2, + ZSTD_compressBlock_btopt, + ZSTD_compressBlock_btultra, + ZSTD_compressBlock_btultra2 + }, + new ZSTD_BlockCompressor_f[10] + { + ZSTD_compressBlock_fast_extDict, + ZSTD_compressBlock_fast_extDict, + ZSTD_compressBlock_doubleFast_extDict, + ZSTD_compressBlock_greedy_extDict, + ZSTD_compressBlock_lazy_extDict, + ZSTD_compressBlock_lazy2_extDict, + ZSTD_compressBlock_btlazy2_extDict, + ZSTD_compressBlock_btopt_extDict, + ZSTD_compressBlock_btultra_extDict, + ZSTD_compressBlock_btultra_extDict + }, + new ZSTD_BlockCompressor_f[10] + { + ZSTD_compressBlock_fast_dictMatchState, + ZSTD_compressBlock_fast_dictMatchState, + ZSTD_compressBlock_doubleFast_dictMatchState, + ZSTD_compressBlock_greedy_dictMatchState, + ZSTD_compressBlock_lazy_dictMatchState, + ZSTD_compressBlock_lazy2_dictMatchState, + ZSTD_compressBlock_btlazy2_dictMatchState, + ZSTD_compressBlock_btopt_dictMatchState, + ZSTD_compressBlock_btultra_dictMatchState, + ZSTD_compressBlock_btultra_dictMatchState + }, + new ZSTD_BlockCompressor_f[10] + { + null, + null, + null, + ZSTD_compressBlock_greedy_dedicatedDictSearch, + ZSTD_compressBlock_lazy_dedicatedDictSearch, + ZSTD_compressBlock_lazy2_dedicatedDictSearch, + null, + null, + null, + null + } + }; + private static readonly ZSTD_BlockCompressor_f[][] rowBasedBlockCompressors = new ZSTD_BlockCompressor_f[4][] + { + new ZSTD_BlockCompressor_f[3] + { + ZSTD_compressBlock_greedy_row, + ZSTD_compressBlock_lazy_row, + ZSTD_compressBlock_lazy2_row + }, + new ZSTD_BlockCompressor_f[3] + { + ZSTD_compressBlock_greedy_extDict_row, + ZSTD_compressBlock_lazy_extDict_row, + ZSTD_compressBlock_lazy2_extDict_row + }, + new ZSTD_BlockCompressor_f[3] + { + ZSTD_compressBlock_greedy_dictMatchState_row, + ZSTD_compressBlock_lazy_dictMatchState_row, + ZSTD_compressBlock_lazy2_dictMatchState_row + }, + new ZSTD_BlockCompressor_f[3] + { + ZSTD_compressBlock_greedy_dedicatedDictSearch_row, + ZSTD_compressBlock_lazy_dedicatedDictSearch_row, + ZSTD_compressBlock_lazy2_dedicatedDictSearch_row + } + }; + /* ZSTD_selectBlockCompressor() : + * Not static, but internal use only (used by long distance matcher) + * assumption : strat is a valid strategy */ + private static ZSTD_BlockCompressor_f ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramSwitch_e useRowMatchFinder, ZSTD_dictMode_e dictMode) + { + ZSTD_BlockCompressor_f selectedCompressor; + assert(ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_strategy, (int)strat) != 0); + if (ZSTD_rowMatchFinderUsed(strat, useRowMatchFinder) != 0) + { + assert(useRowMatchFinder != ZSTD_paramSwitch_e.ZSTD_ps_auto); + selectedCompressor = rowBasedBlockCompressors[(int)dictMode][(int)strat - (int)ZSTD_strategy.ZSTD_greedy]; + } + else + { + selectedCompressor = blockCompressor[(int)dictMode][(int)strat]; + } + + assert(selectedCompressor != null); + return selectedCompressor; + } + + private static void ZSTD_storeLastLiterals(SeqStore_t* seqStorePtr, byte* anchor, nuint lastLLSize) + { + memcpy(seqStorePtr->lit, anchor, (uint)lastLLSize); + seqStorePtr->lit += lastLLSize; + } + + private static void ZSTD_resetSeqStore(SeqStore_t* ssPtr) + { + ssPtr->lit = ssPtr->litStart; + ssPtr->sequences = ssPtr->sequencesStart; + ssPtr->longLengthType = ZSTD_longLengthType_e.ZSTD_llt_none; + } + + /* ZSTD_postProcessSequenceProducerResult() : + * Validates and post-processes sequences obtained through the external matchfinder API: + * - Checks whether nbExternalSeqs represents an error condition. + * - Appends a block delimiter to outSeqs if one is not already present. + * See zstd.h for context regarding block delimiters. + * Returns the number of sequences after post-processing, or an error code. */ + private static nuint ZSTD_postProcessSequenceProducerResult(ZSTD_Sequence* outSeqs, nuint nbExternalSeqs, nuint outSeqsCapacity, nuint srcSize) + { + if (nbExternalSeqs > outSeqsCapacity) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_sequenceProducer_failed)); + } + + if (nbExternalSeqs == 0 && srcSize > 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_sequenceProducer_failed)); + } + + if (srcSize == 0) + { + outSeqs[0] = new ZSTD_Sequence(); + return 1; + } + + { + ZSTD_Sequence lastSeq = outSeqs[nbExternalSeqs - 1]; + if (lastSeq.offset == 0 && lastSeq.matchLength == 0) + { + return nbExternalSeqs; + } + + if (nbExternalSeqs == outSeqsCapacity) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_sequenceProducer_failed)); + } + + outSeqs[nbExternalSeqs] = new ZSTD_Sequence(); + return nbExternalSeqs + 1; + } + } + + /* ZSTD_fastSequenceLengthSum() : + * Returns sum(litLen) + sum(matchLen) + lastLits for *seqBuf*. + * Similar to another function in zstd_compress.c (determine_blockSize), + * except it doesn't check for a block delimiter to end summation. + * Removing the early exit allows the compiler to auto-vectorize (https://godbolt.org/z/cY1cajz9P). + * This function can be deleted and replaced by determine_blockSize after we resolve issue #3456. */ + private static nuint ZSTD_fastSequenceLengthSum(ZSTD_Sequence* seqBuf, nuint seqBufSize) + { + nuint matchLenSum, litLenSum, i; + matchLenSum = 0; + litLenSum = 0; + for (i = 0; i < seqBufSize; i++) + { + litLenSum += seqBuf[i].litLength; + matchLenSum += seqBuf[i].matchLength; + } + + return litLenSum + matchLenSum; + } + + /** + * Function to validate sequences produced by a block compressor. + */ + private static void ZSTD_validateSeqStore(SeqStore_t* seqStore, ZSTD_compressionParameters* cParams) + { + } + + private static nuint ZSTD_buildSeqStore(ZSTD_CCtx_s* zc, void* src, nuint srcSize) + { + ZSTD_MatchState_t* ms = &zc->blockState.matchState; + assert(srcSize <= 1 << 17); + ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams); + if (srcSize < (nuint)(1 + 1) + ZSTD_blockHeaderSize + 1 + 1) + { + if (zc->appliedParams.cParams.strategy >= ZSTD_strategy.ZSTD_btopt) + { + ZSTD_ldm_skipRawSeqStoreBytes(&zc->externSeqStore, srcSize); + } + else + { + ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch); + } + + return (nuint)ZSTD_BuildSeqStore_e.ZSTDbss_noCompress; + } + + ZSTD_resetSeqStore(&zc->seqStore); + ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy; + ms->opt.literalCompressionMode = zc->appliedParams.literalCompressionMode; + assert(ms->dictMatchState == null || ms->loadedDictEnd == ms->window.dictLimit); + { + byte* @base = ms->window.@base; + byte* istart = (byte*)src; + uint curr = (uint)(istart - @base); +#if DEBUG + if (sizeof(nint) == 8) + assert(istart - @base < (nint)unchecked((uint)-1)); +#endif + if (curr > ms->nextToUpdate + 384) + ms->nextToUpdate = curr - (192 < curr - ms->nextToUpdate - 384 ? 192 : curr - ms->nextToUpdate - 384); + } + + { + ZSTD_dictMode_e dictMode = ZSTD_matchState_dictMode(ms); + nuint lastLLSize; + { + int i; + for (i = 0; i < 3; ++i) + zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i]; + } + + if (zc->externSeqStore.pos < zc->externSeqStore.size) + { + assert(zc->appliedParams.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_disable); + if (ZSTD_hasExtSeqProd(&zc->appliedParams) != 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_combination_unsupported)); + } + + lastLLSize = ZSTD_ldm_blockCompress(&zc->externSeqStore, ms, &zc->seqStore, zc->blockState.nextCBlock->rep, zc->appliedParams.useRowMatchFinder, src, srcSize); + assert(zc->externSeqStore.pos <= zc->externSeqStore.size); + } + else if (zc->appliedParams.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) + { + RawSeqStore_t ldmSeqStore = kNullRawSeqStore; + if (ZSTD_hasExtSeqProd(&zc->appliedParams) != 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_combination_unsupported)); + } + + ldmSeqStore.seq = zc->ldmSequences; + ldmSeqStore.capacity = zc->maxNbLdmSequences; + { + /* Updates ldmSeqStore.size */ + nuint err_code = ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore, &zc->appliedParams.ldmParams, src, srcSize); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + lastLLSize = ZSTD_ldm_blockCompress(&ldmSeqStore, ms, &zc->seqStore, zc->blockState.nextCBlock->rep, zc->appliedParams.useRowMatchFinder, src, srcSize); + assert(ldmSeqStore.pos == ldmSeqStore.size); + } + else if (ZSTD_hasExtSeqProd(&zc->appliedParams) != 0) + { + assert(zc->extSeqBufCapacity >= ZSTD_sequenceBound(srcSize)); + assert(zc->appliedParams.extSeqProdFunc != null); + { + uint windowSize = (uint)1 << (int)zc->appliedParams.cParams.windowLog; + nuint nbExternalSeqs = ((delegate* managed)zc->appliedParams.extSeqProdFunc)(zc->appliedParams.extSeqProdState, zc->extSeqBuf, zc->extSeqBufCapacity, src, srcSize, null, 0, zc->appliedParams.compressionLevel, windowSize); + nuint nbPostProcessedSeqs = ZSTD_postProcessSequenceProducerResult(zc->extSeqBuf, nbExternalSeqs, zc->extSeqBufCapacity, srcSize); + if (!ERR_isError(nbPostProcessedSeqs)) + { + ZSTD_SequencePosition seqPos = new ZSTD_SequencePosition + { + idx = 0, + posInSequence = 0, + posInSrc = 0 + }; + nuint seqLenSum = ZSTD_fastSequenceLengthSum(zc->extSeqBuf, nbPostProcessedSeqs); + if (seqLenSum > srcSize) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); + } + + { + nuint err_code = ZSTD_transferSequences_wBlockDelim(zc, &seqPos, zc->extSeqBuf, nbPostProcessedSeqs, src, srcSize, zc->appliedParams.searchForExternalRepcodes); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + ms->ldmSeqStore = null; + return (nuint)ZSTD_BuildSeqStore_e.ZSTDbss_compress; + } + + if (zc->appliedParams.enableMatchFinderFallback == 0) + { + return nbPostProcessedSeqs; + } + + { + ZSTD_BlockCompressor_f blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, zc->appliedParams.useRowMatchFinder, dictMode); + ms->ldmSeqStore = null; + lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize); + } + } + } + else + { + ZSTD_BlockCompressor_f blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, zc->appliedParams.useRowMatchFinder, dictMode); + ms->ldmSeqStore = null; + lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize); + } + + { + byte* lastLiterals = (byte*)src + srcSize - lastLLSize; + ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize); + } + } + + ZSTD_validateSeqStore(&zc->seqStore, &zc->appliedParams.cParams); + return (nuint)ZSTD_BuildSeqStore_e.ZSTDbss_compress; + } + + private static nuint ZSTD_copyBlockSequences(SeqCollector* seqCollector, SeqStore_t* seqStore, uint* prevRepcodes) + { + SeqDef_s* inSeqs = seqStore->sequencesStart; + nuint nbInSequences = (nuint)(seqStore->sequences - inSeqs); + nuint nbInLiterals = (nuint)(seqStore->lit - seqStore->litStart); + ZSTD_Sequence* outSeqs = seqCollector->seqIndex == 0 ? seqCollector->seqStart : seqCollector->seqStart + seqCollector->seqIndex; + nuint nbOutSequences = nbInSequences + 1; + nuint nbOutLiterals = 0; + repcodes_s repcodes; + nuint i; + assert(seqCollector->seqIndex <= seqCollector->maxSequences); + if (nbOutSequences > seqCollector->maxSequences - seqCollector->seqIndex) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + memcpy(&repcodes, prevRepcodes, (uint)sizeof(repcodes_s)); + for (i = 0; i < nbInSequences; ++i) + { + uint rawOffset; + outSeqs[i].litLength = inSeqs[i].litLength; + outSeqs[i].matchLength = (uint)(inSeqs[i].mlBase + 3); + outSeqs[i].rep = 0; + if (i == seqStore->longLengthPos) + { + if (seqStore->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_literalLength) + { + outSeqs[i].litLength += 0x10000; + } + else if (seqStore->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_matchLength) + { + outSeqs[i].matchLength += 0x10000; + } + } + + if (1 <= inSeqs[i].offBase && inSeqs[i].offBase <= 3) + { + assert(1 <= inSeqs[i].offBase && inSeqs[i].offBase <= 3); + uint repcode = inSeqs[i].offBase; + assert(repcode > 0); + outSeqs[i].rep = repcode; + if (outSeqs[i].litLength != 0) + { + rawOffset = repcodes.rep[repcode - 1]; + } + else + { + if (repcode == 3) + { + assert(repcodes.rep[0] > 1); + rawOffset = repcodes.rep[0] - 1; + } + else + { + rawOffset = repcodes.rep[repcode]; + } + } + } + else + { + assert(inSeqs[i].offBase > 3); + rawOffset = inSeqs[i].offBase - 3; + } + + outSeqs[i].offset = rawOffset; + ZSTD_updateRep(repcodes.rep, inSeqs[i].offBase, inSeqs[i].litLength == 0 ? 1U : 0U); + nbOutLiterals += outSeqs[i].litLength; + } + + assert(nbInLiterals >= nbOutLiterals); + { + nuint lastLLSize = nbInLiterals - nbOutLiterals; + outSeqs[nbInSequences].litLength = (uint)lastLLSize; + outSeqs[nbInSequences].matchLength = 0; + outSeqs[nbInSequences].offset = 0; + assert(nbOutSequences == nbInSequences + 1); + } + + seqCollector->seqIndex += nbOutSequences; + assert(seqCollector->seqIndex <= seqCollector->maxSequences); + return 0; + } + + /*! ZSTD_sequenceBound() : + * `srcSize` : size of the input buffer + * @return : upper-bound for the number of sequences that can be generated + * from a buffer of srcSize bytes + * + * note : returns number of sequences - to get bytes, multiply by sizeof(ZSTD_Sequence). + */ + public static nuint ZSTD_sequenceBound(nuint srcSize) + { + nuint maxNbSeq = srcSize / 3 + 1; + nuint maxNbDelims = srcSize / (1 << 10) + 1; + return maxNbSeq + maxNbDelims; + } + + /*! ZSTD_generateSequences() : + * WARNING: This function is meant for debugging and informational purposes ONLY! + * Its implementation is flawed, and it will be deleted in a future version. + * It is not guaranteed to succeed, as there are several cases where it will give + * up and fail. You should NOT use this function in production code. + * + * This function is deprecated, and will be removed in a future version. + * + * Generate sequences using ZSTD_compress2(), given a source buffer. + * + * @param zc The compression context to be used for ZSTD_compress2(). Set any + * compression parameters you need on this context. + * @param outSeqs The output sequences buffer of size @p outSeqsSize + * @param outSeqsCapacity The size of the output sequences buffer. + * ZSTD_sequenceBound(srcSize) is an upper bound on the number + * of sequences that can be generated. + * @param src The source buffer to generate sequences from of size @p srcSize. + * @param srcSize The size of the source buffer. + * + * Each block will end with a dummy sequence + * with offset == 0, matchLength == 0, and litLength == length of last literals. + * litLength may be == 0, and if so, then the sequence of (of: 0 ml: 0 ll: 0) + * simply acts as a block delimiter. + * + * @returns The number of sequences generated, necessarily less than + * ZSTD_sequenceBound(srcSize), or an error code that can be checked + * with ZSTD_isError(). + */ + public static nuint ZSTD_generateSequences(ZSTD_CCtx_s* zc, ZSTD_Sequence* outSeqs, nuint outSeqsSize, void* src, nuint srcSize) + { + nuint dstCapacity = ZSTD_compressBound(srcSize); + /* Make C90 happy. */ + void* dst; + SeqCollector seqCollector; + { + int targetCBlockSize; + { + nuint err_code = ZSTD_CCtx_getParameter(zc, ZSTD_cParameter.ZSTD_c_targetCBlockSize, &targetCBlockSize); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (targetCBlockSize != 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported)); + } + } + + { + int nbWorkers; + { + nuint err_code = ZSTD_CCtx_getParameter(zc, ZSTD_cParameter.ZSTD_c_nbWorkers, &nbWorkers); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (nbWorkers != 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported)); + } + } + + dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem); + if (dst == null) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } + + seqCollector.collectSequences = 1; + seqCollector.seqStart = outSeqs; + seqCollector.seqIndex = 0; + seqCollector.maxSequences = outSeqsSize; + zc->seqCollector = seqCollector; + { + nuint ret = ZSTD_compress2(zc, dst, dstCapacity, src, srcSize); + ZSTD_customFree(dst, ZSTD_defaultCMem); + { + nuint err_code = ret; + if (ERR_isError(err_code)) + { + return err_code; + } + } + } + + assert(zc->seqCollector.seqIndex <= ZSTD_sequenceBound(srcSize)); + return zc->seqCollector.seqIndex; + } + + /*! ZSTD_mergeBlockDelimiters() : + * Given an array of ZSTD_Sequence, remove all sequences that represent block delimiters/last literals + * by merging them into the literals of the next sequence. + * + * As such, the final generated result has no explicit representation of block boundaries, + * and the final last literals segment is not represented in the sequences. + * + * The output of this function can be fed into ZSTD_compressSequences() with CCtx + * setting of ZSTD_c_blockDelimiters as ZSTD_sf_noBlockDelimiters + * @return : number of sequences left after merging + */ + public static nuint ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, nuint seqsSize) + { + nuint @in = 0; + nuint @out = 0; + for (; @in < seqsSize; ++@in) + { + if (sequences[@in].offset == 0 && sequences[@in].matchLength == 0) + { + if (@in != seqsSize - 1) + { + sequences[@in + 1].litLength += sequences[@in].litLength; + } + } + else + { + sequences[@out] = sequences[@in]; + ++@out; + } + } + + return @out; + } + + /* Unrolled loop to read four size_ts of input at a time. Returns 1 if is RLE, 0 if not. */ + private static int ZSTD_isRLE(byte* src, nuint length) + { + byte* ip = src; + byte value = ip[0]; + nuint valueST = (nuint)(value * 0x0101010101010101UL); + nuint unrollSize = (nuint)(sizeof(nuint) * 4); + nuint unrollMask = unrollSize - 1; + nuint prefixLength = length & unrollMask; + nuint i; + if (length == 1) + return 1; + if (prefixLength != 0 && ZSTD_count(ip + 1, ip, ip + prefixLength) != prefixLength - 1) + { + return 0; + } + + for (i = prefixLength; i != length; i += unrollSize) + { + nuint u; + for (u = 0; u < unrollSize; u += (nuint)sizeof(nuint)) + { + if (MEM_readST(ip + i + u) != valueST) + { + return 0; + } + } + } + + return 1; + } + + /* Returns true if the given block may be RLE. + * This is just a heuristic based on the compressibility. + * It may return both false positives and false negatives. + */ + private static int ZSTD_maybeRLE(SeqStore_t* seqStore) + { + nuint nbSeqs = (nuint)(seqStore->sequences - seqStore->sequencesStart); + nuint nbLits = (nuint)(seqStore->lit - seqStore->litStart); + return nbSeqs < 4 && nbLits < 10 ? 1 : 0; + } + + private static void ZSTD_blockState_confirmRepcodesAndEntropyTables(ZSTD_blockState_t* bs) + { + ZSTD_compressedBlockState_t* tmp = bs->prevCBlock; + bs->prevCBlock = bs->nextCBlock; + bs->nextCBlock = tmp; + } + + /* Writes the block header */ + private static void writeBlockHeader(void* op, nuint cSize, nuint blockSize, uint lastBlock) + { + uint cBlockHeader = cSize == 1 ? lastBlock + ((uint)blockType_e.bt_rle << 1) + (uint)(blockSize << 3) : lastBlock + ((uint)blockType_e.bt_compressed << 1) + (uint)(cSize << 3); + MEM_writeLE24(op, cBlockHeader); + } + + /** ZSTD_buildBlockEntropyStats_literals() : + * Builds entropy for the literals. + * Stores literals block type (raw, rle, compressed, repeat) and + * huffman description table to hufMetadata. + * Requires ENTROPY_WORKSPACE_SIZE workspace + * @return : size of huffman description table, or an error code + */ + private static nuint ZSTD_buildBlockEntropyStats_literals(void* src, nuint srcSize, ZSTD_hufCTables_t* prevHuf, ZSTD_hufCTables_t* nextHuf, ZSTD_hufCTablesMetadata_t* hufMetadata, int literalsCompressionIsDisabled, void* workspace, nuint wkspSize, int hufFlags) + { + byte* wkspStart = (byte*)workspace; + byte* wkspEnd = wkspStart + wkspSize; + byte* countWkspStart = wkspStart; + uint* countWksp = (uint*)workspace; + const nuint countWkspSize = (255 + 1) * sizeof(uint); + byte* nodeWksp = countWkspStart + countWkspSize; + nuint nodeWkspSize = (nuint)(wkspEnd - nodeWksp); + uint maxSymbolValue = 255; + uint huffLog = 11; + HUF_repeat repeat = prevHuf->repeatMode; + memcpy(nextHuf, prevHuf, (uint)sizeof(ZSTD_hufCTables_t)); + if (literalsCompressionIsDisabled != 0) + { + hufMetadata->hType = SymbolEncodingType_e.set_basic; + return 0; + } + + { + nuint minLitSize = (nuint)(prevHuf->repeatMode == HUF_repeat.HUF_repeat_valid ? 6 : 63); + if (srcSize <= minLitSize) + { + hufMetadata->hType = SymbolEncodingType_e.set_basic; + return 0; + } + } + + { + nuint largest = HIST_count_wksp(countWksp, &maxSymbolValue, (byte*)src, srcSize, workspace, wkspSize); + { + nuint err_code = largest; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (largest == srcSize) + { + hufMetadata->hType = SymbolEncodingType_e.set_rle; + return 0; + } + + if (largest <= (srcSize >> 7) + 4) + { + hufMetadata->hType = SymbolEncodingType_e.set_basic; + return 0; + } + } + + if (repeat == HUF_repeat.HUF_repeat_check && HUF_validateCTable(&prevHuf->CTable.e0, countWksp, maxSymbolValue) == 0) + { + repeat = HUF_repeat.HUF_repeat_none; + } + + memset(&nextHuf->CTable.e0, 0, sizeof(ulong) * 257); + huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue, nodeWksp, nodeWkspSize, &nextHuf->CTable.e0, countWksp, hufFlags); + assert(huffLog <= 11); + { + nuint maxBits = HUF_buildCTable_wksp(&nextHuf->CTable.e0, countWksp, maxSymbolValue, huffLog, nodeWksp, nodeWkspSize); + { + nuint err_code = maxBits; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + huffLog = (uint)maxBits; + } + + { + nuint newCSize = HUF_estimateCompressedSize(&nextHuf->CTable.e0, countWksp, maxSymbolValue); + nuint hSize = HUF_writeCTable_wksp(hufMetadata->hufDesBuffer, sizeof(byte) * 128, &nextHuf->CTable.e0, maxSymbolValue, huffLog, nodeWksp, nodeWkspSize); + if (repeat != HUF_repeat.HUF_repeat_none) + { + nuint oldCSize = HUF_estimateCompressedSize(&prevHuf->CTable.e0, countWksp, maxSymbolValue); + if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) + { + memcpy(nextHuf, prevHuf, (uint)sizeof(ZSTD_hufCTables_t)); + hufMetadata->hType = SymbolEncodingType_e.set_repeat; + return 0; + } + } + + if (newCSize + hSize >= srcSize) + { + memcpy(nextHuf, prevHuf, (uint)sizeof(ZSTD_hufCTables_t)); + hufMetadata->hType = SymbolEncodingType_e.set_basic; + return 0; + } + + hufMetadata->hType = SymbolEncodingType_e.set_compressed; + nextHuf->repeatMode = HUF_repeat.HUF_repeat_check; + return hSize; + } + } + + /* ZSTD_buildDummySequencesStatistics(): + * Returns a ZSTD_symbolEncodingTypeStats_t with all encoding types as set_basic, + * and updates nextEntropy to the appropriate repeatMode. + */ + private static ZSTD_symbolEncodingTypeStats_t ZSTD_buildDummySequencesStatistics(ZSTD_fseCTables_t* nextEntropy) + { + ZSTD_symbolEncodingTypeStats_t stats = new ZSTD_symbolEncodingTypeStats_t + { + LLtype = (uint)SymbolEncodingType_e.set_basic, + Offtype = (uint)SymbolEncodingType_e.set_basic, + MLtype = (uint)SymbolEncodingType_e.set_basic, + size = 0, + lastCountSize = 0, + longOffsets = 0 + }; + nextEntropy->litlength_repeatMode = FSE_repeat.FSE_repeat_none; + nextEntropy->offcode_repeatMode = FSE_repeat.FSE_repeat_none; + nextEntropy->matchlength_repeatMode = FSE_repeat.FSE_repeat_none; + return stats; + } + + /** ZSTD_buildBlockEntropyStats_sequences() : + * Builds entropy for the sequences. + * Stores symbol compression modes and fse table to fseMetadata. + * Requires ENTROPY_WORKSPACE_SIZE wksp. + * @return : size of fse tables or error code */ + private static nuint ZSTD_buildBlockEntropyStats_sequences(SeqStore_t* seqStorePtr, ZSTD_fseCTables_t* prevEntropy, ZSTD_fseCTables_t* nextEntropy, ZSTD_CCtx_params_s* cctxParams, ZSTD_fseCTablesMetadata_t* fseMetadata, void* workspace, nuint wkspSize) + { + ZSTD_strategy strategy = cctxParams->cParams.strategy; + nuint nbSeq = (nuint)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + byte* ostart = fseMetadata->fseTablesBuffer; + byte* oend = ostart + sizeof(byte) * 133; + byte* op = ostart; + uint* countWorkspace = (uint*)workspace; + uint* entropyWorkspace = countWorkspace + (52 + 1); + nuint entropyWorkspaceSize = wkspSize - (52 + 1) * sizeof(uint); + ZSTD_symbolEncodingTypeStats_t stats; + stats = nbSeq != 0 ? ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq, prevEntropy, nextEntropy, op, oend, strategy, countWorkspace, entropyWorkspace, entropyWorkspaceSize) : ZSTD_buildDummySequencesStatistics(nextEntropy); + { + nuint err_code = stats.size; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + fseMetadata->llType = (SymbolEncodingType_e)stats.LLtype; + fseMetadata->ofType = (SymbolEncodingType_e)stats.Offtype; + fseMetadata->mlType = (SymbolEncodingType_e)stats.MLtype; + fseMetadata->lastCountSize = stats.lastCountSize; + return stats.size; + } + + /** ZSTD_buildBlockEntropyStats() : + * Builds entropy for the block. + * Requires workspace size ENTROPY_WORKSPACE_SIZE + * @return : 0 on success, or an error code + * Note : also employed in superblock + */ + private static nuint ZSTD_buildBlockEntropyStats(SeqStore_t* seqStorePtr, ZSTD_entropyCTables_t* prevEntropy, ZSTD_entropyCTables_t* nextEntropy, ZSTD_CCtx_params_s* cctxParams, ZSTD_entropyCTablesMetadata_t* entropyMetadata, void* workspace, nuint wkspSize) + { + nuint litSize = (nuint)(seqStorePtr->lit - seqStorePtr->litStart); + int huf_useOptDepth = cctxParams->cParams.strategy >= ZSTD_strategy.ZSTD_btultra ? 1 : 0; + int hufFlags = huf_useOptDepth != 0 ? (int)HUF_flags_e.HUF_flags_optimalDepth : 0; + entropyMetadata->hufMetadata.hufDesSize = ZSTD_buildBlockEntropyStats_literals(seqStorePtr->litStart, litSize, &prevEntropy->huf, &nextEntropy->huf, &entropyMetadata->hufMetadata, ZSTD_literalsCompressionIsDisabled(cctxParams), workspace, wkspSize, hufFlags); + { + nuint err_code = entropyMetadata->hufMetadata.hufDesSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + entropyMetadata->fseMetadata.fseTablesSize = ZSTD_buildBlockEntropyStats_sequences(seqStorePtr, &prevEntropy->fse, &nextEntropy->fse, cctxParams, &entropyMetadata->fseMetadata, workspace, wkspSize); + { + nuint err_code = entropyMetadata->fseMetadata.fseTablesSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + return 0; + } + + /* Returns the size estimate for the literals section (header + content) of a block */ + private static nuint ZSTD_estimateBlockSize_literal(byte* literals, nuint litSize, ZSTD_hufCTables_t* huf, ZSTD_hufCTablesMetadata_t* hufMetadata, void* workspace, nuint wkspSize, int writeEntropy) + { + uint* countWksp = (uint*)workspace; + uint maxSymbolValue = 255; + nuint literalSectionHeaderSize = (nuint)(3 + (litSize >= 1 * (1 << 10) ? 1 : 0) + (litSize >= 16 * (1 << 10) ? 1 : 0)); + uint singleStream = litSize < 256 ? 1U : 0U; + if (hufMetadata->hType == SymbolEncodingType_e.set_basic) + return litSize; + else if (hufMetadata->hType == SymbolEncodingType_e.set_rle) + return 1; + else if (hufMetadata->hType == SymbolEncodingType_e.set_compressed || hufMetadata->hType == SymbolEncodingType_e.set_repeat) + { + nuint largest = HIST_count_wksp(countWksp, &maxSymbolValue, literals, litSize, workspace, wkspSize); + if (ERR_isError(largest)) + return litSize; + { + nuint cLitSizeEstimate = HUF_estimateCompressedSize(&huf->CTable.e0, countWksp, maxSymbolValue); + if (writeEntropy != 0) + cLitSizeEstimate += hufMetadata->hufDesSize; + if (singleStream == 0) + cLitSizeEstimate += 6; + return cLitSizeEstimate + literalSectionHeaderSize; + } + } + + assert(0 != 0); + return 0; + } + + /* Returns the size estimate for the FSE-compressed symbols (of, ml, ll) of a block */ + private static nuint ZSTD_estimateBlockSize_symbolType(SymbolEncodingType_e type, byte* codeTable, nuint nbSeq, uint maxCode, uint* fseCTable, byte* additionalBits, short* defaultNorm, uint defaultNormLog, uint defaultMax, void* workspace, nuint wkspSize) + { + uint* countWksp = (uint*)workspace; + byte* ctp = codeTable; + byte* ctStart = ctp; + byte* ctEnd = ctStart + nbSeq; + nuint cSymbolTypeSizeEstimateInBits = 0; + uint max = maxCode; + HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); + if (type == SymbolEncodingType_e.set_basic) + { + assert(max <= defaultMax); + cSymbolTypeSizeEstimateInBits = ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max); + } + else if (type == SymbolEncodingType_e.set_rle) + { + cSymbolTypeSizeEstimateInBits = 0; + } + else if (type == SymbolEncodingType_e.set_compressed || type == SymbolEncodingType_e.set_repeat) + { + cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max); + } + + if (ERR_isError(cSymbolTypeSizeEstimateInBits)) + { + return nbSeq * 10; + } + + while (ctp < ctEnd) + { + if (additionalBits != null) + cSymbolTypeSizeEstimateInBits += additionalBits[*ctp]; + else + cSymbolTypeSizeEstimateInBits += *ctp; + ctp++; + } + + return cSymbolTypeSizeEstimateInBits >> 3; + } + + /* Returns the size estimate for the sequences section (header + content) of a block */ + private static nuint ZSTD_estimateBlockSize_sequences(byte* ofCodeTable, byte* llCodeTable, byte* mlCodeTable, nuint nbSeq, ZSTD_fseCTables_t* fseTables, ZSTD_fseCTablesMetadata_t* fseMetadata, void* workspace, nuint wkspSize, int writeEntropy) + { + /* seqHead */ + nuint sequencesSectionHeaderSize = (nuint)(1 + 1 + (nbSeq >= 128 ? 1 : 0) + (nbSeq >= 0x7F00 ? 1 : 0)); + nuint cSeqSizeEstimate = 0; + cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, nbSeq, 31, fseTables->offcodeCTable, null, OF_defaultNorm, OF_defaultNormLog, 28, workspace, wkspSize); + cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->llType, llCodeTable, nbSeq, 35, fseTables->litlengthCTable, LL_bits, LL_defaultNorm, LL_defaultNormLog, 35, workspace, wkspSize); + cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, nbSeq, 52, fseTables->matchlengthCTable, ML_bits, ML_defaultNorm, ML_defaultNormLog, 52, workspace, wkspSize); + if (writeEntropy != 0) + cSeqSizeEstimate += fseMetadata->fseTablesSize; + return cSeqSizeEstimate + sequencesSectionHeaderSize; + } + + /* Returns the size estimate for a given stream of literals, of, ll, ml */ + private static nuint ZSTD_estimateBlockSize(byte* literals, nuint litSize, byte* ofCodeTable, byte* llCodeTable, byte* mlCodeTable, nuint nbSeq, ZSTD_entropyCTables_t* entropy, ZSTD_entropyCTablesMetadata_t* entropyMetadata, void* workspace, nuint wkspSize, int writeLitEntropy, int writeSeqEntropy) + { + nuint literalsSize = ZSTD_estimateBlockSize_literal(literals, litSize, &entropy->huf, &entropyMetadata->hufMetadata, workspace, wkspSize, writeLitEntropy); + nuint seqSize = ZSTD_estimateBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable, nbSeq, &entropy->fse, &entropyMetadata->fseMetadata, workspace, wkspSize, writeSeqEntropy); + return seqSize + literalsSize + ZSTD_blockHeaderSize; + } + + /* Builds entropy statistics and uses them for blocksize estimation. + * + * @return: estimated compressed size of the seqStore, or a zstd error. + */ + private static nuint ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(SeqStore_t* seqStore, ZSTD_CCtx_s* zc) + { + ZSTD_entropyCTablesMetadata_t* entropyMetadata = &zc->blockSplitCtx.entropyMetadata; + { + nuint err_code = ZSTD_buildBlockEntropyStats(seqStore, &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy, &zc->appliedParams, entropyMetadata, zc->tmpWorkspace, zc->tmpWkspSize); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + return ZSTD_estimateBlockSize(seqStore->litStart, (nuint)(seqStore->lit - seqStore->litStart), seqStore->ofCode, seqStore->llCode, seqStore->mlCode, (nuint)(seqStore->sequences - seqStore->sequencesStart), &zc->blockState.nextCBlock->entropy, entropyMetadata, zc->tmpWorkspace, zc->tmpWkspSize, entropyMetadata->hufMetadata.hType == SymbolEncodingType_e.set_compressed ? 1 : 0, 1); + } + + /* Returns literals bytes represented in a seqStore */ + private static nuint ZSTD_countSeqStoreLiteralsBytes(SeqStore_t* seqStore) + { + nuint literalsBytes = 0; + nuint nbSeqs = (nuint)(seqStore->sequences - seqStore->sequencesStart); + nuint i; + for (i = 0; i < nbSeqs; ++i) + { + SeqDef_s seq = seqStore->sequencesStart[i]; + literalsBytes += seq.litLength; + if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_literalLength) + { + literalsBytes += 0x10000; + } + } + + return literalsBytes; + } + + /* Returns match bytes represented in a seqStore */ + private static nuint ZSTD_countSeqStoreMatchBytes(SeqStore_t* seqStore) + { + nuint matchBytes = 0; + nuint nbSeqs = (nuint)(seqStore->sequences - seqStore->sequencesStart); + nuint i; + for (i = 0; i < nbSeqs; ++i) + { + SeqDef_s seq = seqStore->sequencesStart[i]; + matchBytes += (nuint)(seq.mlBase + 3); + if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_matchLength) + { + matchBytes += 0x10000; + } + } + + return matchBytes; + } + + /* Derives the seqStore that is a chunk of the originalSeqStore from [startIdx, endIdx). + * Stores the result in resultSeqStore. + */ + private static void ZSTD_deriveSeqStoreChunk(SeqStore_t* resultSeqStore, SeqStore_t* originalSeqStore, nuint startIdx, nuint endIdx) + { + *resultSeqStore = *originalSeqStore; + if (startIdx > 0) + { + resultSeqStore->sequences = originalSeqStore->sequencesStart + startIdx; + resultSeqStore->litStart += ZSTD_countSeqStoreLiteralsBytes(resultSeqStore); + } + + if (originalSeqStore->longLengthType != ZSTD_longLengthType_e.ZSTD_llt_none) + { + if (originalSeqStore->longLengthPos < startIdx || originalSeqStore->longLengthPos > endIdx) + { + resultSeqStore->longLengthType = ZSTD_longLengthType_e.ZSTD_llt_none; + } + else + { + resultSeqStore->longLengthPos -= (uint)startIdx; + } + } + + resultSeqStore->sequencesStart = originalSeqStore->sequencesStart + startIdx; + resultSeqStore->sequences = originalSeqStore->sequencesStart + endIdx; + if (endIdx == (nuint)(originalSeqStore->sequences - originalSeqStore->sequencesStart)) + { + assert(resultSeqStore->lit == originalSeqStore->lit); + } + else + { + nuint literalsBytes = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore); + resultSeqStore->lit = resultSeqStore->litStart + literalsBytes; + } + + resultSeqStore->llCode += startIdx; + resultSeqStore->mlCode += startIdx; + resultSeqStore->ofCode += startIdx; + } + + /** + * Returns the raw offset represented by the combination of offBase, ll0, and repcode history. + * offBase must represent a repcode in the numeric representation of ZSTD_storeSeq(). + */ + private static uint ZSTD_resolveRepcodeToRawOffset(uint* rep, uint offBase, uint ll0) + { + assert(1 <= offBase && offBase <= 3); + /* [ 0 - 3 ] */ + uint adjustedRepCode = offBase - 1 + ll0; + assert(1 <= offBase && offBase <= 3); + if (adjustedRepCode == 3) + { + assert(ll0 != 0); + return rep[0] - 1; + } + + return rep[adjustedRepCode]; + } + + /** + * ZSTD_seqStore_resolveOffCodes() reconciles any possible divergences in offset history that may arise + * due to emission of RLE/raw blocks that disturb the offset history, + * and replaces any repcodes within the seqStore that may be invalid. + * + * dRepcodes are updated as would be on the decompression side. + * cRepcodes are updated exactly in accordance with the seqStore. + * + * Note : this function assumes seq->offBase respects the following numbering scheme : + * 0 : invalid + * 1-3 : repcode 1-3 + * 4+ : real_offset+3 + */ + private static void ZSTD_seqStore_resolveOffCodes(repcodes_s* dRepcodes, repcodes_s* cRepcodes, SeqStore_t* seqStore, uint nbSeq) + { + uint idx = 0; + uint longLitLenIdx = seqStore->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_literalLength ? seqStore->longLengthPos : nbSeq; + for (; idx < nbSeq; ++idx) + { + SeqDef_s* seq = seqStore->sequencesStart + idx; + uint ll0 = seq->litLength == 0 && idx != longLitLenIdx ? 1U : 0U; + uint offBase = seq->offBase; + assert(offBase > 0); + if (1 <= offBase && offBase <= 3) + { + uint dRawOffset = ZSTD_resolveRepcodeToRawOffset(dRepcodes->rep, offBase, ll0); + uint cRawOffset = ZSTD_resolveRepcodeToRawOffset(cRepcodes->rep, offBase, ll0); + if (dRawOffset != cRawOffset) + { + assert(cRawOffset > 0); + seq->offBase = cRawOffset + 3; + } + } + + ZSTD_updateRep(dRepcodes->rep, seq->offBase, ll0); + ZSTD_updateRep(cRepcodes->rep, offBase, ll0); + } + } + + /* ZSTD_compressSeqStore_singleBlock(): + * Compresses a seqStore into a block with a block header, into the buffer dst. + * + * Returns the total size of that block (including header) or a ZSTD error code. + */ + private static nuint ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx_s* zc, SeqStore_t* seqStore, repcodes_s* dRep, repcodes_s* cRep, void* dst, nuint dstCapacity, void* src, nuint srcSize, uint lastBlock, uint isPartition) + { + const uint rleMaxLength = 25; + byte* op = (byte*)dst; + byte* ip = (byte*)src; + nuint cSize; + nuint cSeqsSize; + /* In case of an RLE or raw block, the simulated decompression repcode history must be reset */ + repcodes_s dRepOriginal = *dRep; + if (isPartition != 0) + ZSTD_seqStore_resolveOffCodes(dRep, cRep, seqStore, (uint)(seqStore->sequences - seqStore->sequencesStart)); + if (dstCapacity < ZSTD_blockHeaderSize) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + cSeqsSize = ZSTD_entropyCompressSeqStore(seqStore, &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy, &zc->appliedParams, op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize, srcSize, zc->tmpWorkspace, zc->tmpWkspSize, zc->bmi2); + { + nuint err_code = cSeqsSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (zc->isFirstBlock == 0 && cSeqsSize < rleMaxLength && ZSTD_isRLE((byte*)src, srcSize) != 0) + { + cSeqsSize = 1; + } + + if (zc->seqCollector.collectSequences != 0) + { + { + nuint err_code = ZSTD_copyBlockSequences(&zc->seqCollector, seqStore, dRepOriginal.rep); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); + return 0; + } + + if (cSeqsSize == 0) + { + cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, srcSize, lastBlock); + { + nuint err_code = cSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + *dRep = dRepOriginal; + } + else if (cSeqsSize == 1) + { + cSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, srcSize, lastBlock); + { + nuint err_code = cSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + *dRep = dRepOriginal; + } + else + { + ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); + writeBlockHeader(op, cSeqsSize, srcSize, lastBlock); + cSize = ZSTD_blockHeaderSize + cSeqsSize; + } + + if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat.FSE_repeat_valid) + zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat.FSE_repeat_check; + return cSize; + } + + /* Helper function to perform the recursive search for block splits. + * Estimates the cost of seqStore prior to split, and estimates the cost of splitting the sequences in half. + * If advantageous to split, then we recurse down the two sub-blocks. + * If not, or if an error occurred in estimation, then we do not recurse. + * + * Note: The recursion depth is capped by a heuristic minimum number of sequences, + * defined by MIN_SEQUENCES_BLOCK_SPLITTING. + * In theory, this means the absolute largest recursion depth is 10 == log2(maxNbSeqInBlock/MIN_SEQUENCES_BLOCK_SPLITTING). + * In practice, recursion depth usually doesn't go beyond 4. + * + * Furthermore, the number of splits is capped by ZSTD_MAX_NB_BLOCK_SPLITS. + * At ZSTD_MAX_NB_BLOCK_SPLITS == 196 with the current existing blockSize + * maximum of 128 KB, this value is actually impossible to reach. + */ + private static void ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, nuint startIdx, nuint endIdx, ZSTD_CCtx_s* zc, SeqStore_t* origSeqStore) + { + SeqStore_t* fullSeqStoreChunk = &zc->blockSplitCtx.fullSeqStoreChunk; + SeqStore_t* firstHalfSeqStore = &zc->blockSplitCtx.firstHalfSeqStore; + SeqStore_t* secondHalfSeqStore = &zc->blockSplitCtx.secondHalfSeqStore; + nuint estimatedOriginalSize; + nuint estimatedFirstHalfSize; + nuint estimatedSecondHalfSize; + nuint midIdx = (startIdx + endIdx) / 2; + assert(endIdx >= startIdx); + if (endIdx - startIdx < 300 || splits->idx >= 196) + { + return; + } + + ZSTD_deriveSeqStoreChunk(fullSeqStoreChunk, origSeqStore, startIdx, endIdx); + ZSTD_deriveSeqStoreChunk(firstHalfSeqStore, origSeqStore, startIdx, midIdx); + ZSTD_deriveSeqStoreChunk(secondHalfSeqStore, origSeqStore, midIdx, endIdx); + estimatedOriginalSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(fullSeqStoreChunk, zc); + estimatedFirstHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(firstHalfSeqStore, zc); + estimatedSecondHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(secondHalfSeqStore, zc); + if (ERR_isError(estimatedOriginalSize) || ERR_isError(estimatedFirstHalfSize) || ERR_isError(estimatedSecondHalfSize)) + { + return; + } + + if (estimatedFirstHalfSize + estimatedSecondHalfSize < estimatedOriginalSize) + { + ZSTD_deriveBlockSplitsHelper(splits, startIdx, midIdx, zc, origSeqStore); + splits->splitLocations[splits->idx] = (uint)midIdx; + splits->idx++; + ZSTD_deriveBlockSplitsHelper(splits, midIdx, endIdx, zc, origSeqStore); + } + } + + /* Base recursive function. + * Populates a table with intra-block partition indices that can improve compression ratio. + * + * @return: number of splits made (which equals the size of the partition table - 1). + */ + private static nuint ZSTD_deriveBlockSplits(ZSTD_CCtx_s* zc, uint* partitions, uint nbSeq) + { + seqStoreSplits splits; + splits.splitLocations = partitions; + splits.idx = 0; + if (nbSeq <= 4) + { + return 0; + } + + ZSTD_deriveBlockSplitsHelper(&splits, 0, nbSeq, zc, &zc->seqStore); + splits.splitLocations[splits.idx] = nbSeq; + return splits.idx; + } + + /* ZSTD_compressBlock_splitBlock(): + * Attempts to split a given block into multiple blocks to improve compression ratio. + * + * Returns combined size of all blocks (which includes headers), or a ZSTD error code. + */ + private static nuint ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx_s* zc, void* dst, nuint dstCapacity, void* src, nuint blockSize, uint lastBlock, uint nbSeq) + { + nuint cSize = 0; + byte* ip = (byte*)src; + byte* op = (byte*)dst; + nuint i = 0; + nuint srcBytesTotal = 0; + /* size == ZSTD_MAX_NB_BLOCK_SPLITS */ + uint* partitions = zc->blockSplitCtx.partitions; + SeqStore_t* nextSeqStore = &zc->blockSplitCtx.nextSeqStore; + SeqStore_t* currSeqStore = &zc->blockSplitCtx.currSeqStore; + nuint numSplits = ZSTD_deriveBlockSplits(zc, partitions, nbSeq); + /* If a block is split and some partitions are emitted as RLE/uncompressed, then repcode history + * may become invalid. In order to reconcile potentially invalid repcodes, we keep track of two + * separate repcode histories that simulate repcode history on compression and decompression side, + * and use the histories to determine whether we must replace a particular repcode with its raw offset. + * + * 1) cRep gets updated for each partition, regardless of whether the block was emitted as uncompressed + * or RLE. This allows us to retrieve the offset value that an invalid repcode references within + * a nocompress/RLE block. + * 2) dRep gets updated only for compressed partitions, and when a repcode gets replaced, will use + * the replacement offset value rather than the original repcode to update the repcode history. + * dRep also will be the final repcode history sent to the next block. + * + * See ZSTD_seqStore_resolveOffCodes() for more details. + */ + repcodes_s dRep; + repcodes_s cRep; + memcpy(dRep.rep, zc->blockState.prevCBlock->rep, (uint)sizeof(repcodes_s)); + memcpy(cRep.rep, zc->blockState.prevCBlock->rep, (uint)sizeof(repcodes_s)); + *nextSeqStore = new SeqStore_t(); + if (numSplits == 0) + { + nuint cSizeSingleBlock = ZSTD_compressSeqStore_singleBlock(zc, &zc->seqStore, &dRep, &cRep, op, dstCapacity, ip, blockSize, lastBlock, 0); + { + nuint err_code = cSizeSingleBlock; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + assert(zc->blockSizeMax <= 1 << 17); + assert(cSizeSingleBlock <= zc->blockSizeMax + ZSTD_blockHeaderSize); + return cSizeSingleBlock; + } + + ZSTD_deriveSeqStoreChunk(currSeqStore, &zc->seqStore, 0, partitions[0]); + for (i = 0; i <= numSplits; ++i) + { + nuint cSizeChunk; + uint lastPartition = i == numSplits ? 1U : 0U; + uint lastBlockEntireSrc = 0; + nuint srcBytes = ZSTD_countSeqStoreLiteralsBytes(currSeqStore) + ZSTD_countSeqStoreMatchBytes(currSeqStore); + srcBytesTotal += srcBytes; + if (lastPartition != 0) + { + srcBytes += blockSize - srcBytesTotal; + lastBlockEntireSrc = lastBlock; + } + else + { + ZSTD_deriveSeqStoreChunk(nextSeqStore, &zc->seqStore, partitions[i], partitions[i + 1]); + } + + cSizeChunk = ZSTD_compressSeqStore_singleBlock(zc, currSeqStore, &dRep, &cRep, op, dstCapacity, ip, srcBytes, lastBlockEntireSrc, 1); + { + nuint err_code = cSizeChunk; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + ip += srcBytes; + op += cSizeChunk; + dstCapacity -= cSizeChunk; + cSize += cSizeChunk; + *currSeqStore = *nextSeqStore; + assert(cSizeChunk <= zc->blockSizeMax + ZSTD_blockHeaderSize); + } + + memcpy(zc->blockState.prevCBlock->rep, dRep.rep, (uint)sizeof(repcodes_s)); + return cSize; + } + + private static nuint ZSTD_compressBlock_splitBlock(ZSTD_CCtx_s* zc, void* dst, nuint dstCapacity, void* src, nuint srcSize, uint lastBlock) + { + uint nbSeq; + nuint cSize; + assert(zc->appliedParams.postBlockSplitter == ZSTD_paramSwitch_e.ZSTD_ps_enable); + { + nuint bss = ZSTD_buildSeqStore(zc, src, srcSize); + { + nuint err_code = bss; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (bss == (nuint)ZSTD_BuildSeqStore_e.ZSTDbss_noCompress) + { + if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat.FSE_repeat_valid) + zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat.FSE_repeat_check; + if (zc->seqCollector.collectSequences != 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_sequenceProducer_failed)); + } + + cSize = ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock); + { + nuint err_code = cSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + return cSize; + } + + nbSeq = (uint)(zc->seqStore.sequences - zc->seqStore.sequencesStart); + } + + cSize = ZSTD_compressBlock_splitBlock_internal(zc, dst, dstCapacity, src, srcSize, lastBlock, nbSeq); + { + nuint err_code = cSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + return cSize; + } + + private static nuint ZSTD_compressBlock_internal(ZSTD_CCtx_s* zc, void* dst, nuint dstCapacity, void* src, nuint srcSize, uint frame) + { + /* This is an estimated upper bound for the length of an rle block. + * This isn't the actual upper bound. + * Finding the real threshold needs further investigation. + */ + const uint rleMaxLength = 25; + nuint cSize; + byte* ip = (byte*)src; + byte* op = (byte*)dst; + { + nuint bss = ZSTD_buildSeqStore(zc, src, srcSize); + { + nuint err_code = bss; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (bss == (nuint)ZSTD_BuildSeqStore_e.ZSTDbss_noCompress) + { + if (zc->seqCollector.collectSequences != 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_sequenceProducer_failed)); + } + + cSize = 0; + goto @out; + } + } + + if (zc->seqCollector.collectSequences != 0) + { + { + nuint err_code = ZSTD_copyBlockSequences(&zc->seqCollector, ZSTD_getSeqStore(zc), zc->blockState.prevCBlock->rep); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); + return 0; + } + + cSize = ZSTD_entropyCompressSeqStore(&zc->seqStore, &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy, &zc->appliedParams, dst, dstCapacity, srcSize, zc->tmpWorkspace, zc->tmpWkspSize, zc->bmi2); + if (frame != 0 && zc->isFirstBlock == 0 && cSize < rleMaxLength && ZSTD_isRLE(ip, srcSize) != 0) + { + cSize = 1; + op[0] = ip[0]; + } + + @out: + if (!ERR_isError(cSize) && cSize > 1) + { + ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); + } + + if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat.FSE_repeat_valid) + zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat.FSE_repeat_check; + return cSize; + } + + private static nuint ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx_s* zc, void* dst, nuint dstCapacity, void* src, nuint srcSize, nuint bss, uint lastBlock) + { + if (bss == (nuint)ZSTD_BuildSeqStore_e.ZSTDbss_compress) + { + if (zc->isFirstBlock == 0 && ZSTD_maybeRLE(&zc->seqStore) != 0 && ZSTD_isRLE((byte*)src, srcSize) != 0) + { + return ZSTD_rleCompressBlock(dst, dstCapacity, *(byte*)src, srcSize, lastBlock); + } + + { + nuint cSize = ZSTD_compressSuperBlock(zc, dst, dstCapacity, src, srcSize, lastBlock); + if (cSize != unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall))) + { + nuint maxCSize = srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy); + { + nuint err_code = cSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (cSize != 0 && cSize < maxCSize + ZSTD_blockHeaderSize) + { + ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); + return cSize; + } + } + } + } + + return ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock); + } + + private static nuint ZSTD_compressBlock_targetCBlockSize(ZSTD_CCtx_s* zc, void* dst, nuint dstCapacity, void* src, nuint srcSize, uint lastBlock) + { + nuint cSize = 0; + nuint bss = ZSTD_buildSeqStore(zc, src, srcSize); + { + nuint err_code = bss; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + cSize = ZSTD_compressBlock_targetCBlockSize_body(zc, dst, dstCapacity, src, srcSize, bss, lastBlock); + { + nuint err_code = cSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat.FSE_repeat_valid) + zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat.FSE_repeat_check; + return cSize; + } + + private static void ZSTD_overflowCorrectIfNeeded(ZSTD_MatchState_t* ms, ZSTD_cwksp* ws, ZSTD_CCtx_params_s* @params, void* ip, void* iend) + { + uint cycleLog = ZSTD_cycleLog(@params->cParams.chainLog, @params->cParams.strategy); + uint maxDist = (uint)1 << (int)@params->cParams.windowLog; + if (ZSTD_window_needOverflowCorrection(ms->window, cycleLog, maxDist, ms->loadedDictEnd, ip, iend) != 0) + { + uint correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip); + ZSTD_cwksp_mark_tables_dirty(ws); + ZSTD_reduceIndex(ms, @params, correction); + ZSTD_cwksp_mark_tables_clean(ws); + if (ms->nextToUpdate < correction) + ms->nextToUpdate = 0; + else + ms->nextToUpdate -= correction; + ms->loadedDictEnd = 0; + ms->dictMatchState = null; + } + } + +#if NET7_0_OR_GREATER + private static ReadOnlySpan Span_splitLevels => new int[10] + { + 0, + 0, + 1, + 2, + 2, + 3, + 3, + 4, + 4, + 4 + }; + private static int* splitLevels => (int*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_splitLevels)); +#else + + private static readonly int* splitLevels = GetArrayPointer(new int[10] { 0, 0, 1, 2, 2, 3, 3, 4, 4, 4 }); +#endif + private static nuint ZSTD_optimalBlockSize(ZSTD_CCtx_s* cctx, void* src, nuint srcSize, nuint blockSizeMax, int splitLevel, ZSTD_strategy strat, long savings) + { + if (srcSize < 128 * (1 << 10) || blockSizeMax < 128 * (1 << 10)) + return srcSize < blockSizeMax ? srcSize : blockSizeMax; + if (savings < 3) + { + return 128 * (1 << 10); + } + + if (splitLevel == 1) + return 128 * (1 << 10); + if (splitLevel == 0) + { + assert(ZSTD_strategy.ZSTD_fast <= strat && strat <= ZSTD_strategy.ZSTD_btultra2); + splitLevel = splitLevels[(int)strat]; + } + else + { + assert(2 <= splitLevel && splitLevel <= 6); + splitLevel -= 2; + } + + return ZSTD_splitBlock(src, blockSizeMax, splitLevel, cctx->tmpWorkspace, cctx->tmpWkspSize); + } + + /*! ZSTD_compress_frameChunk() : + * Compress a chunk of data into one or multiple blocks. + * All blocks will be terminated, all input will be consumed. + * Function will issue an error if there is not enough `dstCapacity` to hold the compressed content. + * Frame is supposed already started (header already produced) + * @return : compressed size, or an error code + */ + private static nuint ZSTD_compress_frameChunk(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, void* src, nuint srcSize, uint lastFrameChunk) + { + nuint blockSizeMax = cctx->blockSizeMax; + nuint remaining = srcSize; + byte* ip = (byte*)src; + byte* ostart = (byte*)dst; + byte* op = ostart; + uint maxDist = (uint)1 << (int)cctx->appliedParams.cParams.windowLog; + long savings = (long)cctx->consumedSrcSize - (long)cctx->producedCSize; + assert(cctx->appliedParams.cParams.windowLog <= (uint)(sizeof(nuint) == 4 ? 30 : 31)); + if (cctx->appliedParams.fParams.checksumFlag != 0 && srcSize != 0) + ZSTD_XXH64_update(&cctx->xxhState, src, srcSize); + while (remaining != 0) + { + ZSTD_MatchState_t* ms = &cctx->blockState.matchState; + nuint blockSize = ZSTD_optimalBlockSize(cctx, ip, remaining, blockSizeMax, cctx->appliedParams.preBlockSplitter_level, cctx->appliedParams.cParams.strategy, savings); + uint lastBlock = lastFrameChunk & (uint)(blockSize == remaining ? 1 : 0); + assert(blockSize <= remaining); + if (dstCapacity < ZSTD_blockHeaderSize + (nuint)(1 + 1) + 1) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + ZSTD_overflowCorrectIfNeeded(ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize); + ZSTD_checkDictValidity(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState); + ZSTD_window_enforceMaxDist(&ms->window, ip, maxDist, &ms->loadedDictEnd, &ms->dictMatchState); + if (ms->nextToUpdate < ms->window.lowLimit) + ms->nextToUpdate = ms->window.lowLimit; + { + nuint cSize; + if (ZSTD_useTargetCBlockSize(&cctx->appliedParams) != 0) + { + cSize = ZSTD_compressBlock_targetCBlockSize(cctx, op, dstCapacity, ip, blockSize, lastBlock); + { + nuint err_code = cSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + assert(cSize > 0); + assert(cSize <= blockSize + ZSTD_blockHeaderSize); + } + else if (ZSTD_blockSplitterEnabled(&cctx->appliedParams) != 0) + { + cSize = ZSTD_compressBlock_splitBlock(cctx, op, dstCapacity, ip, blockSize, lastBlock); + { + nuint err_code = cSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + assert(cSize > 0 || cctx->seqCollector.collectSequences == 1); + } + else + { + cSize = ZSTD_compressBlock_internal(cctx, op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize, ip, blockSize, 1); + { + nuint err_code = cSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (cSize == 0) + { + cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); + { + nuint err_code = cSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + } + else + { + uint cBlockHeader = cSize == 1 ? lastBlock + ((uint)blockType_e.bt_rle << 1) + (uint)(blockSize << 3) : lastBlock + ((uint)blockType_e.bt_compressed << 1) + (uint)(cSize << 3); + MEM_writeLE24(op, cBlockHeader); + cSize += ZSTD_blockHeaderSize; + } + } + + savings += (long)blockSize - (long)cSize; + ip += blockSize; + assert(remaining >= blockSize); + remaining -= blockSize; + op += cSize; + assert(dstCapacity >= cSize); + dstCapacity -= cSize; + cctx->isFirstBlock = 0; + } + } + + if (lastFrameChunk != 0 && op > ostart) + cctx->stage = ZSTD_compressionStage_e.ZSTDcs_ending; + return (nuint)(op - ostart); + } + + private static nuint ZSTD_writeFrameHeader(void* dst, nuint dstCapacity, ZSTD_CCtx_params_s* @params, ulong pledgedSrcSize, uint dictID) + { + byte* op = (byte*)dst; + /* 0-3 */ + uint dictIDSizeCodeLength = (uint)((dictID > 0 ? 1 : 0) + (dictID >= 256 ? 1 : 0) + (dictID >= 65536 ? 1 : 0)); + /* 0-3 */ + uint dictIDSizeCode = @params->fParams.noDictIDFlag != 0 ? 0 : dictIDSizeCodeLength; + uint checksumFlag = @params->fParams.checksumFlag > 0 ? 1U : 0U; + uint windowSize = (uint)1 << (int)@params->cParams.windowLog; + uint singleSegment = @params->fParams.contentSizeFlag != 0 && windowSize >= pledgedSrcSize ? 1U : 0U; + byte windowLogByte = (byte)(@params->cParams.windowLog - 10 << 3); + uint fcsCode = (uint)(@params->fParams.contentSizeFlag != 0 ? (pledgedSrcSize >= 256 ? 1 : 0) + (pledgedSrcSize >= 65536 + 256 ? 1 : 0) + (pledgedSrcSize >= 0xFFFFFFFFU ? 1 : 0) : 0); + byte frameHeaderDescriptionByte = (byte)(dictIDSizeCode + (checksumFlag << 2) + (singleSegment << 5) + (fcsCode << 6)); + nuint pos = 0; + assert(!(@params->fParams.contentSizeFlag != 0 && pledgedSrcSize == unchecked(0UL - 1))); + if (dstCapacity < 18) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + if (@params->format == ZSTD_format_e.ZSTD_f_zstd1) + { + MEM_writeLE32(dst, 0xFD2FB528); + pos = 4; + } + + op[pos++] = frameHeaderDescriptionByte; + if (singleSegment == 0) + op[pos++] = windowLogByte; + switch (dictIDSizeCode) + { + default: + assert(0 != 0); + goto case 0; + case 0: + break; + case 1: + op[pos] = (byte)dictID; + pos++; + break; + case 2: + MEM_writeLE16(op + pos, (ushort)dictID); + pos += 2; + break; + case 3: + MEM_writeLE32(op + pos, dictID); + pos += 4; + break; + } + + switch (fcsCode) + { + default: + assert(0 != 0); + goto case 0; + case 0: + if (singleSegment != 0) + op[pos++] = (byte)pledgedSrcSize; + break; + case 1: + MEM_writeLE16(op + pos, (ushort)(pledgedSrcSize - 256)); + pos += 2; + break; + case 2: + MEM_writeLE32(op + pos, (uint)pledgedSrcSize); + pos += 4; + break; + case 3: + MEM_writeLE64(op + pos, pledgedSrcSize); + pos += 8; + break; + } + + return pos; + } + + /* ZSTD_writeSkippableFrame_advanced() : + * Writes out a skippable frame with the specified magic number variant (16 are supported), + * from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15, and the desired source data. + * + * Returns the total number of bytes written, or a ZSTD error code. + */ + public static nuint ZSTD_writeSkippableFrame(void* dst, nuint dstCapacity, void* src, nuint srcSize, uint magicVariant) + { + byte* op = (byte*)dst; + if (dstCapacity < srcSize + 8) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + if (srcSize > 0xFFFFFFFF) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + if (magicVariant > 15) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + MEM_writeLE32(op, 0x184D2A50 + magicVariant); + MEM_writeLE32(op + 4, (uint)srcSize); + memcpy(op + 8, src, (uint)srcSize); + return srcSize + 8; + } + + /* ZSTD_writeLastEmptyBlock() : + * output an empty Block with end-of-frame mark to complete a frame + * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h)) + * or an error code if `dstCapacity` is too small (stage == ZSTD_compressionStage_e.ZSTDcs_init); + assert(nbSeq == 0 || cctx->appliedParams.ldmParams.enableLdm != ZSTD_paramSwitch_e.ZSTD_ps_enable); + cctx->externSeqStore.seq = seq; + cctx->externSeqStore.size = nbSeq; + cctx->externSeqStore.capacity = nbSeq; + cctx->externSeqStore.pos = 0; + cctx->externSeqStore.posInSequence = 0; + } + + private static nuint ZSTD_compressContinue_internal(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, void* src, nuint srcSize, uint frame, uint lastFrameChunk) + { + ZSTD_MatchState_t* ms = &cctx->blockState.matchState; + nuint fhSize = 0; + if (cctx->stage == ZSTD_compressionStage_e.ZSTDcs_created) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); + } + + if (frame != 0 && cctx->stage == ZSTD_compressionStage_e.ZSTDcs_init) + { + fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, cctx->pledgedSrcSizePlusOne - 1, cctx->dictID); + { + nuint err_code = fhSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + assert(fhSize <= dstCapacity); + dstCapacity -= fhSize; + dst = (sbyte*)dst + fhSize; + cctx->stage = ZSTD_compressionStage_e.ZSTDcs_ongoing; + } + + if (srcSize == 0) + return fhSize; + if (ZSTD_window_update(&ms->window, src, srcSize, ms->forceNonContiguous) == 0) + { + ms->forceNonContiguous = 0; + ms->nextToUpdate = ms->window.dictLimit; + } + + if (cctx->appliedParams.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) + { + ZSTD_window_update(&cctx->ldmState.window, src, srcSize, 0); + } + + if (frame == 0) + { + ZSTD_overflowCorrectIfNeeded(ms, &cctx->workspace, &cctx->appliedParams, src, (byte*)src + srcSize); + } + + { + nuint cSize = frame != 0 ? ZSTD_compress_frameChunk(cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) : ZSTD_compressBlock_internal(cctx, dst, dstCapacity, src, srcSize, 0); + { + nuint err_code = cSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + cctx->consumedSrcSize += srcSize; + cctx->producedCSize += cSize + fhSize; + assert(!(cctx->appliedParams.fParams.contentSizeFlag != 0 && cctx->pledgedSrcSizePlusOne == 0)); + if (cctx->pledgedSrcSizePlusOne != 0) + { + if (cctx->consumedSrcSize + 1 > cctx->pledgedSrcSizePlusOne) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + } + + return cSize + fhSize; + } + } + + private static nuint ZSTD_compressContinue_public(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, void* src, nuint srcSize) + { + return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 0); + } + + /* NOTE: Must just wrap ZSTD_compressContinue_public() */ + public static nuint ZSTD_compressContinue(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, void* src, nuint srcSize) + { + return ZSTD_compressContinue_public(cctx, dst, dstCapacity, src, srcSize); + } + + private static nuint ZSTD_getBlockSize_deprecated(ZSTD_CCtx_s* cctx) + { + ZSTD_compressionParameters cParams = cctx->appliedParams.cParams; + assert(ZSTD_checkCParams(cParams) == 0); + return cctx->appliedParams.maxBlockSize < (nuint)1 << (int)cParams.windowLog ? cctx->appliedParams.maxBlockSize : (nuint)1 << (int)cParams.windowLog; + } + + /* NOTE: Must just wrap ZSTD_getBlockSize_deprecated() */ + public static nuint ZSTD_getBlockSize(ZSTD_CCtx_s* cctx) + { + return ZSTD_getBlockSize_deprecated(cctx); + } + + /* NOTE: Must just wrap ZSTD_compressBlock_deprecated() */ + private static nuint ZSTD_compressBlock_deprecated(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, void* src, nuint srcSize) + { + { + nuint blockSizeMax = ZSTD_getBlockSize_deprecated(cctx); + if (srcSize > blockSizeMax) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + } + + return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0, 0); + } + + /* NOTE: Must just wrap ZSTD_compressBlock_deprecated() */ + public static nuint ZSTD_compressBlock(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, void* src, nuint srcSize) + { + return ZSTD_compressBlock_deprecated(cctx, dst, dstCapacity, src, srcSize); + } + + /*! ZSTD_loadDictionaryContent() : + * @return : 0, or an error code + */ + private static nuint ZSTD_loadDictionaryContent(ZSTD_MatchState_t* ms, ldmState_t* ls, ZSTD_cwksp* ws, ZSTD_CCtx_params_s* @params, void* src, nuint srcSize, ZSTD_dictTableLoadMethod_e dtlm, ZSTD_tableFillPurpose_e tfp) + { + byte* ip = (byte*)src; + byte* iend = ip + srcSize; + int loadLdmDict = @params->ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable && ls != null ? 1 : 0; + ZSTD_assertEqualCParams(@params->cParams, ms->cParams); + { + /* Allow the dictionary to set indices up to exactly ZSTD_CURRENT_MAX. + * Dictionaries right at the edge will immediately trigger overflow + * correction, but I don't want to insert extra constraints here. + */ + uint maxDictSize = (MEM_64bits ? 3500U * (1 << 20) : 2000U * (1 << 20)) - 2; + int CDictTaggedIndices = ZSTD_CDictIndicesAreTagged(&@params->cParams); + if (CDictTaggedIndices != 0 && tfp == ZSTD_tableFillPurpose_e.ZSTD_tfp_forCDict) + { + /* Some dictionary matchfinders in zstd use "short cache", + * which treats the lower ZSTD_SHORT_CACHE_TAG_BITS of each + * CDict hashtable entry as a tag rather than as part of an index. + * When short cache is used, we need to truncate the dictionary + * so that its indices don't overlap with the tag. */ + const uint shortCacheMaxDictSize = (1U << 32 - 8) - 2; + maxDictSize = maxDictSize < shortCacheMaxDictSize ? maxDictSize : shortCacheMaxDictSize; + assert(loadLdmDict == 0); + } + + if (srcSize > maxDictSize) + { + ip = iend - maxDictSize; + src = ip; + srcSize = maxDictSize; + } + } + + if (srcSize > unchecked((uint)-1) - (MEM_64bits ? 3500U * (1 << 20) : 2000U * (1 << 20))) + { + assert(ZSTD_window_isEmpty(ms->window) != 0); +#if DEBUG + if (loadLdmDict != 0) + assert(ZSTD_window_isEmpty(ls->window) != 0); +#endif + } + + ZSTD_window_update(&ms->window, src, srcSize, 0); + if (loadLdmDict != 0) + { + ZSTD_window_update(&ls->window, src, srcSize, 0); + ls->loadedDictEnd = @params->forceWindow != 0 ? 0 : (uint)(iend - ls->window.@base); + ZSTD_ldm_fillHashTable(ls, ip, iend, &@params->ldmParams); + } + + { + uint maxDictSize = 1U << (int)((@params->cParams.hashLog + 3 > @params->cParams.chainLog + 1 ? @params->cParams.hashLog + 3 : @params->cParams.chainLog + 1) < 31 ? @params->cParams.hashLog + 3 > @params->cParams.chainLog + 1 ? @params->cParams.hashLog + 3 : @params->cParams.chainLog + 1 : 31); + if (srcSize > maxDictSize) + { + ip = iend - maxDictSize; + src = ip; + srcSize = maxDictSize; + } + } + + ms->nextToUpdate = (uint)(ip - ms->window.@base); + ms->loadedDictEnd = @params->forceWindow != 0 ? 0 : (uint)(iend - ms->window.@base); + ms->forceNonContiguous = @params->deterministicRefPrefix; + if (srcSize <= 8) + return 0; + ZSTD_overflowCorrectIfNeeded(ms, ws, @params, ip, iend); + switch (@params->cParams.strategy) + { + case ZSTD_strategy.ZSTD_fast: + ZSTD_fillHashTable(ms, iend, dtlm, tfp); + break; + case ZSTD_strategy.ZSTD_dfast: + ZSTD_fillDoubleHashTable(ms, iend, dtlm, tfp); + break; + case ZSTD_strategy.ZSTD_greedy: + case ZSTD_strategy.ZSTD_lazy: + case ZSTD_strategy.ZSTD_lazy2: + assert(srcSize >= 8); + if (ms->dedicatedDictSearch != 0) + { + assert(ms->chainTable != null); + ZSTD_dedicatedDictSearch_lazy_loadDictionary(ms, iend - 8); + } + else + { + assert(@params->useRowMatchFinder != ZSTD_paramSwitch_e.ZSTD_ps_auto); + if (@params->useRowMatchFinder == ZSTD_paramSwitch_e.ZSTD_ps_enable) + { + nuint tagTableSize = (nuint)1 << (int)@params->cParams.hashLog; + memset(ms->tagTable, 0, (uint)tagTableSize); + ZSTD_row_update(ms, iend - 8); + } + else + { + ZSTD_insertAndFindFirstIndex(ms, iend - 8); + } + } + + break; + case ZSTD_strategy.ZSTD_btlazy2: + case ZSTD_strategy.ZSTD_btopt: + case ZSTD_strategy.ZSTD_btultra: + case ZSTD_strategy.ZSTD_btultra2: + assert(srcSize >= 8); + ZSTD_updateTree(ms, iend - 8, iend); + break; + default: + assert(0 != 0); + break; + } + + ms->nextToUpdate = (uint)(iend - ms->window.@base); + return 0; + } + + /* Dictionaries that assign zero probability to symbols that show up causes problems + * when FSE encoding. Mark dictionaries with zero probability symbols as FSE_repeat_check + * and only dictionaries with 100% valid symbols can be assumed valid. + */ + private static FSE_repeat ZSTD_dictNCountRepeat(short* normalizedCounter, uint dictMaxSymbolValue, uint maxSymbolValue) + { + uint s; + if (dictMaxSymbolValue < maxSymbolValue) + { + return FSE_repeat.FSE_repeat_check; + } + + for (s = 0; s <= maxSymbolValue; ++s) + { + if (normalizedCounter[s] == 0) + { + return FSE_repeat.FSE_repeat_check; + } + } + + return FSE_repeat.FSE_repeat_valid; + } + + /* ZSTD_loadCEntropy() : + * dict : must point at beginning of a valid zstd dictionary. + * return : size of dictionary header (size of magic number + dict ID + entropy tables) + * assumptions : magic number supposed already checked + * and dictSize >= 8 */ + private static nuint ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace, void* dict, nuint dictSize) + { + short* offcodeNCount = stackalloc short[32]; + uint offcodeMaxValue = 31; + /* skip magic num and dict ID */ + byte* dictPtr = (byte*)dict; + byte* dictEnd = dictPtr + dictSize; + dictPtr += 8; + bs->entropy.huf.repeatMode = HUF_repeat.HUF_repeat_check; + { + uint maxSymbolValue = 255; + uint hasZeroWeights = 1; + nuint hufHeaderSize = HUF_readCTable(&bs->entropy.huf.CTable.e0, &maxSymbolValue, dictPtr, (nuint)(dictEnd - dictPtr), &hasZeroWeights); + if (hasZeroWeights == 0 && maxSymbolValue == 255) + bs->entropy.huf.repeatMode = HUF_repeat.HUF_repeat_valid; + if (ERR_isError(hufHeaderSize)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + + dictPtr += hufHeaderSize; + } + + { + uint offcodeLog; + nuint offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, (nuint)(dictEnd - dictPtr)); + if (ERR_isError(offcodeHeaderSize)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + + if (offcodeLog > 8) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + + if (ERR_isError(FSE_buildCTable_wksp(bs->entropy.fse.offcodeCTable, offcodeNCount, 31, offcodeLog, workspace, (8 << 10) + 512))) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + + dictPtr += offcodeHeaderSize; + } + + { + short* matchlengthNCount = stackalloc short[53]; + uint matchlengthMaxValue = 52, matchlengthLog; + nuint matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, (nuint)(dictEnd - dictPtr)); + if (ERR_isError(matchlengthHeaderSize)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + + if (matchlengthLog > 9) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + + if (ERR_isError(FSE_buildCTable_wksp(bs->entropy.fse.matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, workspace, (8 << 10) + 512))) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + + bs->entropy.fse.matchlength_repeatMode = ZSTD_dictNCountRepeat(matchlengthNCount, matchlengthMaxValue, 52); + dictPtr += matchlengthHeaderSize; + } + + { + short* litlengthNCount = stackalloc short[36]; + uint litlengthMaxValue = 35, litlengthLog; + nuint litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, (nuint)(dictEnd - dictPtr)); + if (ERR_isError(litlengthHeaderSize)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + + if (litlengthLog > 9) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + + if (ERR_isError(FSE_buildCTable_wksp(bs->entropy.fse.litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, workspace, (8 << 10) + 512))) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + + bs->entropy.fse.litlength_repeatMode = ZSTD_dictNCountRepeat(litlengthNCount, litlengthMaxValue, 35); + dictPtr += litlengthHeaderSize; + } + + if (dictPtr + 12 > dictEnd) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + + bs->rep[0] = MEM_readLE32(dictPtr + 0); + bs->rep[1] = MEM_readLE32(dictPtr + 4); + bs->rep[2] = MEM_readLE32(dictPtr + 8); + dictPtr += 12; + { + nuint dictContentSize = (nuint)(dictEnd - dictPtr); + uint offcodeMax = 31; + if (dictContentSize <= unchecked((uint)-1) - 128 * (1 << 10)) + { + /* The maximum offset that must be supported */ + uint maxOffset = (uint)dictContentSize + 128 * (1 << 10); + offcodeMax = ZSTD_highbit32(maxOffset); + } + + bs->entropy.fse.offcode_repeatMode = ZSTD_dictNCountRepeat(offcodeNCount, offcodeMaxValue, offcodeMax < 31 ? offcodeMax : 31); + { + uint u; + for (u = 0; u < 3; u++) + { + if (bs->rep[u] == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + + if (bs->rep[u] > dictContentSize) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + } + } + } + + return (nuint)(dictPtr - (byte*)dict); + } + + /* Dictionary format : + * See : + * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#dictionary-format + */ + /*! ZSTD_loadZstdDictionary() : + * @return : dictID, or an error code + * assumptions : magic number supposed already checked + * dictSize supposed >= 8 + */ + private static nuint ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs, ZSTD_MatchState_t* ms, ZSTD_cwksp* ws, ZSTD_CCtx_params_s* @params, void* dict, nuint dictSize, ZSTD_dictTableLoadMethod_e dtlm, ZSTD_tableFillPurpose_e tfp, void* workspace) + { + byte* dictPtr = (byte*)dict; + byte* dictEnd = dictPtr + dictSize; + nuint dictID; + nuint eSize; + assert(dictSize >= 8); + assert(MEM_readLE32(dictPtr) == 0xEC30A437); + dictID = @params->fParams.noDictIDFlag != 0 ? 0 : MEM_readLE32(dictPtr + 4); + eSize = ZSTD_loadCEntropy(bs, workspace, dict, dictSize); + { + nuint err_code = eSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + dictPtr += eSize; + { + nuint dictContentSize = (nuint)(dictEnd - dictPtr); + { + nuint err_code = ZSTD_loadDictionaryContent(ms, null, ws, @params, dictPtr, dictContentSize, dtlm, tfp); + if (ERR_isError(err_code)) + { + return err_code; + } + } + } + + return dictID; + } + + /** ZSTD_compress_insertDictionary() : + * @return : dictID, or an error code */ + private static nuint ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs, ZSTD_MatchState_t* ms, ldmState_t* ls, ZSTD_cwksp* ws, ZSTD_CCtx_params_s* @params, void* dict, nuint dictSize, ZSTD_dictContentType_e dictContentType, ZSTD_dictTableLoadMethod_e dtlm, ZSTD_tableFillPurpose_e tfp, void* workspace) + { + if (dict == null || dictSize < 8) + { + if (dictContentType == ZSTD_dictContentType_e.ZSTD_dct_fullDict) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_wrong)); + } + + return 0; + } + + ZSTD_reset_compressedBlockState(bs); + if (dictContentType == ZSTD_dictContentType_e.ZSTD_dct_rawContent) + return ZSTD_loadDictionaryContent(ms, ls, ws, @params, dict, dictSize, dtlm, tfp); + if (MEM_readLE32(dict) != 0xEC30A437) + { + if (dictContentType == ZSTD_dictContentType_e.ZSTD_dct_auto) + { + return ZSTD_loadDictionaryContent(ms, ls, ws, @params, dict, dictSize, dtlm, tfp); + } + + if (dictContentType == ZSTD_dictContentType_e.ZSTD_dct_fullDict) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_wrong)); + } + + assert(0 != 0); + } + + return ZSTD_loadZstdDictionary(bs, ms, ws, @params, dict, dictSize, dtlm, tfp, workspace); + } + + /*! ZSTD_compressBegin_internal() : + * Assumption : either @dict OR @cdict (or none) is non-NULL, never both + * @return : 0, or an error code */ + private static nuint ZSTD_compressBegin_internal(ZSTD_CCtx_s* cctx, void* dict, nuint dictSize, ZSTD_dictContentType_e dictContentType, ZSTD_dictTableLoadMethod_e dtlm, ZSTD_CDict_s* cdict, ZSTD_CCtx_params_s* @params, ulong pledgedSrcSize, ZSTD_buffered_policy_e zbuff) + { + nuint dictContentSize = cdict != null ? cdict->dictContentSize : dictSize; + assert(!ERR_isError(ZSTD_checkCParams(@params->cParams))); + assert(!(dict != null && cdict != null)); + if (cdict != null && cdict->dictContentSize > 0 && (pledgedSrcSize < 128 * (1 << 10) || pledgedSrcSize < cdict->dictContentSize * 6UL || pledgedSrcSize == unchecked(0UL - 1) || cdict->compressionLevel == 0) && @params->attachDictPref != ZSTD_dictAttachPref_e.ZSTD_dictForceLoad) + { + return ZSTD_resetCCtx_usingCDict(cctx, cdict, @params, pledgedSrcSize, zbuff); + } + + { + nuint err_code = ZSTD_resetCCtx_internal(cctx, @params, pledgedSrcSize, dictContentSize, ZSTD_compResetPolicy_e.ZSTDcrp_makeClean, zbuff); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + { + nuint dictID = cdict != null ? ZSTD_compress_insertDictionary(cctx->blockState.prevCBlock, &cctx->blockState.matchState, &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, cdict->dictContent, cdict->dictContentSize, cdict->dictContentType, dtlm, ZSTD_tableFillPurpose_e.ZSTD_tfp_forCCtx, cctx->tmpWorkspace) : ZSTD_compress_insertDictionary(cctx->blockState.prevCBlock, &cctx->blockState.matchState, &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, dict, dictSize, dictContentType, dtlm, ZSTD_tableFillPurpose_e.ZSTD_tfp_forCCtx, cctx->tmpWorkspace); + { + nuint err_code = dictID; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + assert(dictID <= 0xffffffff); + cctx->dictID = (uint)dictID; + cctx->dictContentSize = dictContentSize; + } + + return 0; + } + + /* ZSTD_compressBegin_advanced_internal() : + * Private use only. To be called from zstdmt_compress.c. */ + private static nuint ZSTD_compressBegin_advanced_internal(ZSTD_CCtx_s* cctx, void* dict, nuint dictSize, ZSTD_dictContentType_e dictContentType, ZSTD_dictTableLoadMethod_e dtlm, ZSTD_CDict_s* cdict, ZSTD_CCtx_params_s* @params, ulong pledgedSrcSize) + { + { + /* compression parameters verification and optimization */ + nuint err_code = ZSTD_checkCParams(@params->cParams); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + return ZSTD_compressBegin_internal(cctx, dict, dictSize, dictContentType, dtlm, cdict, @params, pledgedSrcSize, ZSTD_buffered_policy_e.ZSTDb_not_buffered); + } + + /*! ZSTD_compressBegin_advanced() : + * @return : 0, or an error code */ + public static nuint ZSTD_compressBegin_advanced(ZSTD_CCtx_s* cctx, void* dict, nuint dictSize, ZSTD_parameters @params, ulong pledgedSrcSize) + { + ZSTD_CCtx_params_s cctxParams; + ZSTD_CCtxParams_init_internal(&cctxParams, &@params, 0); + return ZSTD_compressBegin_advanced_internal(cctx, dict, dictSize, ZSTD_dictContentType_e.ZSTD_dct_auto, ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, null, &cctxParams, pledgedSrcSize); + } + + private static nuint ZSTD_compressBegin_usingDict_deprecated(ZSTD_CCtx_s* cctx, void* dict, nuint dictSize, int compressionLevel) + { + ZSTD_CCtx_params_s cctxParams; + { + ZSTD_parameters @params = ZSTD_getParams_internal(compressionLevel, unchecked(0UL - 1), dictSize, ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict); + ZSTD_CCtxParams_init_internal(&cctxParams, &@params, compressionLevel == 0 ? 3 : compressionLevel); + } + + return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dictContentType_e.ZSTD_dct_auto, ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, null, &cctxParams, unchecked(0UL - 1), ZSTD_buffered_policy_e.ZSTDb_not_buffered); + } + + public static nuint ZSTD_compressBegin_usingDict(ZSTD_CCtx_s* cctx, void* dict, nuint dictSize, int compressionLevel) + { + return ZSTD_compressBegin_usingDict_deprecated(cctx, dict, dictSize, compressionLevel); + } + + /*===== Buffer-less streaming compression functions =====*/ + public static nuint ZSTD_compressBegin(ZSTD_CCtx_s* cctx, int compressionLevel) + { + return ZSTD_compressBegin_usingDict_deprecated(cctx, null, 0, compressionLevel); + } + + /*! ZSTD_writeEpilogue() : + * Ends a frame. + * @return : nb of bytes written into dst (or an error code) */ + private static nuint ZSTD_writeEpilogue(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity) + { + byte* ostart = (byte*)dst; + byte* op = ostart; + if (cctx->stage == ZSTD_compressionStage_e.ZSTDcs_created) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); + } + + if (cctx->stage == ZSTD_compressionStage_e.ZSTDcs_init) + { + nuint fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0); + { + nuint err_code = fhSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + dstCapacity -= fhSize; + op += fhSize; + cctx->stage = ZSTD_compressionStage_e.ZSTDcs_ongoing; + } + + if (cctx->stage != ZSTD_compressionStage_e.ZSTDcs_ending) + { + /* last block */ + uint cBlockHeader24 = 1 + ((uint)blockType_e.bt_raw << 1) + 0; + if (dstCapacity < 3) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + MEM_writeLE24(op, cBlockHeader24); + op += ZSTD_blockHeaderSize; + dstCapacity -= ZSTD_blockHeaderSize; + } + + if (cctx->appliedParams.fParams.checksumFlag != 0) + { + uint checksum = (uint)ZSTD_XXH64_digest(&cctx->xxhState); + if (dstCapacity < 4) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + MEM_writeLE32(op, checksum); + op += 4; + } + + cctx->stage = ZSTD_compressionStage_e.ZSTDcs_created; + return (nuint)(op - ostart); + } + + /** ZSTD_CCtx_trace() : + * Trace the end of a compression call. + */ + private static void ZSTD_CCtx_trace(ZSTD_CCtx_s* cctx, nuint extraCSize) + { + } + + private static nuint ZSTD_compressEnd_public(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, void* src, nuint srcSize) + { + nuint endResult; + nuint cSize = ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 1); + { + nuint err_code = cSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + endResult = ZSTD_writeEpilogue(cctx, (sbyte*)dst + cSize, dstCapacity - cSize); + { + nuint err_code = endResult; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + assert(!(cctx->appliedParams.fParams.contentSizeFlag != 0 && cctx->pledgedSrcSizePlusOne == 0)); + if (cctx->pledgedSrcSizePlusOne != 0) + { + if (cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize + 1) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + } + + ZSTD_CCtx_trace(cctx, endResult); + return cSize + endResult; + } + + /* NOTE: Must just wrap ZSTD_compressEnd_public() */ + public static nuint ZSTD_compressEnd(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, void* src, nuint srcSize) + { + return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize); + } + + /*! ZSTD_compress_advanced() : + * Note : this function is now DEPRECATED. + * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters. + * This prototype will generate compilation warnings. */ + public static nuint ZSTD_compress_advanced(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, void* src, nuint srcSize, void* dict, nuint dictSize, ZSTD_parameters @params) + { + { + nuint err_code = ZSTD_checkCParams(@params.cParams); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + ZSTD_CCtxParams_init_internal(&cctx->simpleApiParams, &@params, 0); + return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctx->simpleApiParams); + } + + /* Internal */ + private static nuint ZSTD_compress_advanced_internal(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, void* src, nuint srcSize, void* dict, nuint dictSize, ZSTD_CCtx_params_s* @params) + { + { + nuint err_code = ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dictContentType_e.ZSTD_dct_auto, ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, null, @params, srcSize, ZSTD_buffered_policy_e.ZSTDb_not_buffered); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize); + } + + /************************** + * Simple dictionary API + ***************************/ + /*! ZSTD_compress_usingDict() : + * Compression at an explicit compression level using a Dictionary. + * A dictionary can be any arbitrary data segment (also called a prefix), + * or a buffer with specified information (see zdict.h). + * Note : This function loads the dictionary, resulting in significant startup delay. + * It's intended for a dictionary used only once. + * Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used. */ + public static nuint ZSTD_compress_usingDict(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, void* src, nuint srcSize, void* dict, nuint dictSize, int compressionLevel) + { + { + ZSTD_parameters @params = ZSTD_getParams_internal(compressionLevel, srcSize, dict != null ? dictSize : 0, ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict); + assert(@params.fParams.contentSizeFlag == 1); + ZSTD_CCtxParams_init_internal(&cctx->simpleApiParams, &@params, compressionLevel == 0 ? 3 : compressionLevel); + } + + return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctx->simpleApiParams); + } + + /*! ZSTD_compressCCtx() : + * Same as ZSTD_compress(), using an explicit ZSTD_CCtx. + * Important : in order to mirror `ZSTD_compress()` behavior, + * this function compresses at the requested compression level, + * __ignoring any other advanced parameter__ . + * If any advanced parameter was set using the advanced API, + * they will all be reset. Only @compressionLevel remains. + */ + public static nuint ZSTD_compressCCtx(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, void* src, nuint srcSize, int compressionLevel) + { + assert(cctx != null); + return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, null, 0, compressionLevel); + } + + /*************************************** + * Simple Core API + ***************************************/ + /*! ZSTD_compress() : + * Compresses `src` content as a single zstd compressed frame into already allocated `dst`. + * NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have + * enough space to successfully compress the data. + * @return : compressed size written into `dst` (<= `dstCapacity), + * or an error code if it fails (which can be tested using ZSTD_isError()). */ + public static nuint ZSTD_compress(void* dst, nuint dstCapacity, void* src, nuint srcSize, int compressionLevel) + { + nuint result; + ZSTD_CCtx_s ctxBody; + ZSTD_initCCtx(&ctxBody, ZSTD_defaultCMem); + result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel); + ZSTD_freeCCtxContent(&ctxBody); + return result; + } + + /*! ZSTD_estimateCDictSize_advanced() : + * Estimate amount of memory that will be needed to create a dictionary with following arguments */ + public static nuint ZSTD_estimateCDictSize_advanced(nuint dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod) + { + return ZSTD_cwksp_alloc_size((nuint)sizeof(ZSTD_CDict_s)) + ZSTD_cwksp_alloc_size((8 << 10) + 512) + ZSTD_sizeof_matchState(&cParams, ZSTD_resolveRowMatchFinderMode(ZSTD_paramSwitch_e.ZSTD_ps_auto, &cParams), 1, 0) + (dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef ? 0 : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, (nuint)sizeof(void*)))); + } + + /*! ZSTD_estimate?DictSize() : + * ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict(). + * ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced(). + * Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller. + */ + public static nuint ZSTD_estimateCDictSize(nuint dictSize, int compressionLevel) + { + ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, unchecked(0UL - 1), dictSize, ZSTD_CParamMode_e.ZSTD_cpm_createCDict); + return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dictLoadMethod_e.ZSTD_dlm_byCopy); + } + + public static nuint ZSTD_sizeof_CDict(ZSTD_CDict_s* cdict) + { + if (cdict == null) + return 0; + return (nuint)(cdict->workspace.workspace == cdict ? 0 : sizeof(ZSTD_CDict_s)) + ZSTD_cwksp_sizeof(&cdict->workspace); + } + + private static nuint ZSTD_initCDict_internal(ZSTD_CDict_s* cdict, void* dictBuffer, nuint dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_CCtx_params_s @params) + { + assert(ZSTD_checkCParams(@params.cParams) == 0); + cdict->matchState.cParams = @params.cParams; + cdict->matchState.dedicatedDictSearch = @params.enableDedicatedDictSearch; + if (dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef || dictBuffer == null || dictSize == 0) + { + cdict->dictContent = dictBuffer; + } + else + { + void* internalBuffer = ZSTD_cwksp_reserve_object(&cdict->workspace, ZSTD_cwksp_align(dictSize, (nuint)sizeof(void*))); + if (internalBuffer == null) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } + + cdict->dictContent = internalBuffer; + memcpy(internalBuffer, dictBuffer, (uint)dictSize); + } + + cdict->dictContentSize = dictSize; + cdict->dictContentType = dictContentType; + cdict->entropyWorkspace = (uint*)ZSTD_cwksp_reserve_object(&cdict->workspace, (8 << 10) + 512); + ZSTD_reset_compressedBlockState(&cdict->cBlockState); + { + nuint err_code = ZSTD_reset_matchState(&cdict->matchState, &cdict->workspace, &@params.cParams, @params.useRowMatchFinder, ZSTD_compResetPolicy_e.ZSTDcrp_makeClean, ZSTD_indexResetPolicy_e.ZSTDirp_reset, ZSTD_resetTarget_e.ZSTD_resetTarget_CDict); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + { + @params.compressionLevel = 3; + @params.fParams.contentSizeFlag = 1; + { + nuint dictID = ZSTD_compress_insertDictionary(&cdict->cBlockState, &cdict->matchState, null, &cdict->workspace, &@params, cdict->dictContent, cdict->dictContentSize, dictContentType, ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_full, ZSTD_tableFillPurpose_e.ZSTD_tfp_forCDict, cdict->entropyWorkspace); + { + nuint err_code = dictID; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + assert(dictID <= unchecked((uint)-1)); + cdict->dictID = (uint)dictID; + } + } + + return 0; + } + + private static ZSTD_CDict_s* ZSTD_createCDict_advanced_internal(nuint dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_compressionParameters cParams, ZSTD_paramSwitch_e useRowMatchFinder, int enableDedicatedDictSearch, ZSTD_customMem customMem) + { + if (((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) != 0) + return null; + { + nuint workspaceSize = ZSTD_cwksp_alloc_size((nuint)sizeof(ZSTD_CDict_s)) + ZSTD_cwksp_alloc_size((8 << 10) + 512) + ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, enableDedicatedDictSearch, 0) + (dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef ? 0 : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, (nuint)sizeof(void*)))); + void* workspace = ZSTD_customMalloc(workspaceSize, customMem); + ZSTD_cwksp ws; + ZSTD_CDict_s* cdict; + if (workspace == null) + { + ZSTD_customFree(workspace, customMem); + return null; + } + + ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc_e.ZSTD_cwksp_dynamic_alloc); + cdict = (ZSTD_CDict_s*)ZSTD_cwksp_reserve_object(&ws, (nuint)sizeof(ZSTD_CDict_s)); + assert(cdict != null); + ZSTD_cwksp_move(&cdict->workspace, &ws); + cdict->customMem = customMem; + cdict->compressionLevel = 0; + cdict->useRowMatchFinder = useRowMatchFinder; + return cdict; + } + } + + public static ZSTD_CDict_s* ZSTD_createCDict_advanced(void* dictBuffer, nuint dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_compressionParameters cParams, ZSTD_customMem customMem) + { + ZSTD_CCtx_params_s cctxParams; + cctxParams = new ZSTD_CCtx_params_s(); + ZSTD_CCtxParams_init(&cctxParams, 0); + cctxParams.cParams = cParams; + cctxParams.customMem = customMem; + return ZSTD_createCDict_advanced2(dictBuffer, dictSize, dictLoadMethod, dictContentType, &cctxParams, customMem); + } + + /* + * This API is temporary and is expected to change or disappear in the future! + */ + public static ZSTD_CDict_s* ZSTD_createCDict_advanced2(void* dict, nuint dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_CCtx_params_s* originalCctxParams, ZSTD_customMem customMem) + { + ZSTD_CCtx_params_s cctxParams = *originalCctxParams; + ZSTD_compressionParameters cParams; + ZSTD_CDict_s* cdict; + if (((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) != 0) + return null; + if (cctxParams.enableDedicatedDictSearch != 0) + { + cParams = ZSTD_dedicatedDictSearch_getCParams(cctxParams.compressionLevel, dictSize); + ZSTD_overrideCParams(&cParams, &cctxParams.cParams); + } + else + { + cParams = ZSTD_getCParamsFromCCtxParams(&cctxParams, unchecked(0UL - 1), dictSize, ZSTD_CParamMode_e.ZSTD_cpm_createCDict); + } + + if (ZSTD_dedicatedDictSearch_isSupported(&cParams) == 0) + { + cctxParams.enableDedicatedDictSearch = 0; + cParams = ZSTD_getCParamsFromCCtxParams(&cctxParams, unchecked(0UL - 1), dictSize, ZSTD_CParamMode_e.ZSTD_cpm_createCDict); + } + + cctxParams.cParams = cParams; + cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams); + cdict = ZSTD_createCDict_advanced_internal(dictSize, dictLoadMethod, cctxParams.cParams, cctxParams.useRowMatchFinder, cctxParams.enableDedicatedDictSearch, customMem); + if (cdict == null || ERR_isError(ZSTD_initCDict_internal(cdict, dict, dictSize, dictLoadMethod, dictContentType, cctxParams))) + { + ZSTD_freeCDict(cdict); + return null; + } + + return cdict; + } + + /*! ZSTD_createCDict() : + * When compressing multiple messages or blocks using the same dictionary, + * it's recommended to digest the dictionary only once, since it's a costly operation. + * ZSTD_createCDict() will create a state from digesting a dictionary. + * The resulting state can be used for future compression operations with very limited startup cost. + * ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only. + * @dictBuffer can be released after ZSTD_CDict creation, because its content is copied within CDict. + * Note 1 : Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate @dictBuffer content. + * Note 2 : A ZSTD_CDict can be created from an empty @dictBuffer, + * in which case the only thing that it transports is the @compressionLevel. + * This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively, + * expecting a ZSTD_CDict parameter with any data, including those without a known dictionary. */ + public static ZSTD_CDict_s* ZSTD_createCDict(void* dict, nuint dictSize, int compressionLevel) + { + ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, unchecked(0UL - 1), dictSize, ZSTD_CParamMode_e.ZSTD_cpm_createCDict); + ZSTD_CDict_s* cdict = ZSTD_createCDict_advanced(dict, dictSize, ZSTD_dictLoadMethod_e.ZSTD_dlm_byCopy, ZSTD_dictContentType_e.ZSTD_dct_auto, cParams, ZSTD_defaultCMem); + if (cdict != null) + cdict->compressionLevel = compressionLevel == 0 ? 3 : compressionLevel; + return cdict; + } + + /*! ZSTD_createCDict_byReference() : + * Create a digested dictionary for compression + * Dictionary content is just referenced, not duplicated. + * As a consequence, `dictBuffer` **must** outlive CDict, + * and its content must remain unmodified throughout the lifetime of CDict. + * note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef */ + public static ZSTD_CDict_s* ZSTD_createCDict_byReference(void* dict, nuint dictSize, int compressionLevel) + { + ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, unchecked(0UL - 1), dictSize, ZSTD_CParamMode_e.ZSTD_cpm_createCDict); + ZSTD_CDict_s* cdict = ZSTD_createCDict_advanced(dict, dictSize, ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, ZSTD_dictContentType_e.ZSTD_dct_auto, cParams, ZSTD_defaultCMem); + if (cdict != null) + cdict->compressionLevel = compressionLevel == 0 ? 3 : compressionLevel; + return cdict; + } + + /*! ZSTD_freeCDict() : + * Function frees memory allocated by ZSTD_createCDict(). + * If a NULL pointer is passed, no operation is performed. */ + public static nuint ZSTD_freeCDict(ZSTD_CDict_s* cdict) + { + if (cdict == null) + return 0; + { + ZSTD_customMem cMem = cdict->customMem; + int cdictInWorkspace = ZSTD_cwksp_owns_buffer(&cdict->workspace, cdict); + ZSTD_cwksp_free(&cdict->workspace, cMem); + if (cdictInWorkspace == 0) + { + ZSTD_customFree(cdict, cMem); + } + + return 0; + } + } + + /*! ZSTD_initStaticCDict_advanced() : + * Generate a digested dictionary in provided memory area. + * workspace: The memory area to emplace the dictionary into. + * Provided pointer must 8-bytes aligned. + * It must outlive dictionary usage. + * workspaceSize: Use ZSTD_estimateCDictSize() + * to determine how large workspace must be. + * cParams : use ZSTD_getCParams() to transform a compression level + * into its relevant cParams. + * @return : pointer to ZSTD_CDict*, or NULL if error (size too small) + * Note : there is no corresponding "free" function. + * Since workspace was allocated externally, it must be freed externally. + */ + public static ZSTD_CDict_s* ZSTD_initStaticCDict(void* workspace, nuint workspaceSize, void* dict, nuint dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_compressionParameters cParams) + { + ZSTD_paramSwitch_e useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(ZSTD_paramSwitch_e.ZSTD_ps_auto, &cParams); + /* enableDedicatedDictSearch */ + nuint matchStateSize = ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, 1, 0); + nuint neededSize = ZSTD_cwksp_alloc_size((nuint)sizeof(ZSTD_CDict_s)) + (dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef ? 0 : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, (nuint)sizeof(void*)))) + ZSTD_cwksp_alloc_size((8 << 10) + 512) + matchStateSize; + ZSTD_CDict_s* cdict; + ZSTD_CCtx_params_s @params; + if (((nuint)workspace & 7) != 0) + return null; + { + ZSTD_cwksp ws; + ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc_e.ZSTD_cwksp_static_alloc); + cdict = (ZSTD_CDict_s*)ZSTD_cwksp_reserve_object(&ws, (nuint)sizeof(ZSTD_CDict_s)); + if (cdict == null) + return null; + ZSTD_cwksp_move(&cdict->workspace, &ws); + } + + if (workspaceSize < neededSize) + return null; + ZSTD_CCtxParams_init(&@params, 0); + @params.cParams = cParams; + @params.useRowMatchFinder = useRowMatchFinder; + cdict->useRowMatchFinder = useRowMatchFinder; + cdict->compressionLevel = 0; + if (ERR_isError(ZSTD_initCDict_internal(cdict, dict, dictSize, dictLoadMethod, dictContentType, @params))) + return null; + return cdict; + } + + /*! ZSTD_getCParamsFromCDict() : + * as the name implies */ + private static ZSTD_compressionParameters ZSTD_getCParamsFromCDict(ZSTD_CDict_s* cdict) + { + assert(cdict != null); + return cdict->matchState.cParams; + } + + /*! ZSTD_getDictID_fromCDict() : + * Provides the dictID of the dictionary loaded into `cdict`. + * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. + * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ + public static uint ZSTD_getDictID_fromCDict(ZSTD_CDict_s* cdict) + { + if (cdict == null) + return 0; + return cdict->dictID; + } + + /* ZSTD_compressBegin_usingCDict_internal() : + * Implementation of various ZSTD_compressBegin_usingCDict* functions. + */ + private static nuint ZSTD_compressBegin_usingCDict_internal(ZSTD_CCtx_s* cctx, ZSTD_CDict_s* cdict, ZSTD_frameParameters fParams, ulong pledgedSrcSize) + { + ZSTD_CCtx_params_s cctxParams; + if (cdict == null) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_wrong)); + } + + { + ZSTD_parameters @params; + @params.fParams = fParams; + @params.cParams = pledgedSrcSize < 128 * (1 << 10) || pledgedSrcSize < cdict->dictContentSize * 6UL || pledgedSrcSize == unchecked(0UL - 1) || cdict->compressionLevel == 0 ? ZSTD_getCParamsFromCDict(cdict) : ZSTD_getCParams(cdict->compressionLevel, pledgedSrcSize, cdict->dictContentSize); + ZSTD_CCtxParams_init_internal(&cctxParams, &@params, cdict->compressionLevel); + } + + if (pledgedSrcSize != unchecked(0UL - 1)) + { + uint limitedSrcSize = (uint)(pledgedSrcSize < 1U << 19 ? pledgedSrcSize : 1U << 19); + uint limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1; + cctxParams.cParams.windowLog = cctxParams.cParams.windowLog > limitedSrcLog ? cctxParams.cParams.windowLog : limitedSrcLog; + } + + return ZSTD_compressBegin_internal(cctx, null, 0, ZSTD_dictContentType_e.ZSTD_dct_auto, ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, cdict, &cctxParams, pledgedSrcSize, ZSTD_buffered_policy_e.ZSTDb_not_buffered); + } + + /* ZSTD_compressBegin_usingCDict_advanced() : + * This function is DEPRECATED. + * cdict must be != NULL */ + public static nuint ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx_s* cctx, ZSTD_CDict_s* cdict, ZSTD_frameParameters fParams, ulong pledgedSrcSize) + { + return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, pledgedSrcSize); + } + + /* ZSTD_compressBegin_usingCDict() : + * cdict must be != NULL */ + private static nuint ZSTD_compressBegin_usingCDict_deprecated(ZSTD_CCtx_s* cctx, ZSTD_CDict_s* cdict) + { + /*content*/ + ZSTD_frameParameters fParams = new ZSTD_frameParameters + { + contentSizeFlag = 0, + checksumFlag = 0, + noDictIDFlag = 0 + }; + return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, unchecked(0UL - 1)); + } + + public static nuint ZSTD_compressBegin_usingCDict(ZSTD_CCtx_s* cctx, ZSTD_CDict_s* cdict) + { + return ZSTD_compressBegin_usingCDict_deprecated(cctx, cdict); + } + + /*! ZSTD_compress_usingCDict_internal(): + * Implementation of various ZSTD_compress_usingCDict* functions. + */ + private static nuint ZSTD_compress_usingCDict_internal(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, void* src, nuint srcSize, ZSTD_CDict_s* cdict, ZSTD_frameParameters fParams) + { + { + /* will check if cdict != NULL */ + nuint err_code = ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, srcSize); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize); + } + + /*! ZSTD_compress_usingCDict_advanced(): + * This function is DEPRECATED. + */ + public static nuint ZSTD_compress_usingCDict_advanced(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, void* src, nuint srcSize, ZSTD_CDict_s* cdict, ZSTD_frameParameters fParams) + { + return ZSTD_compress_usingCDict_internal(cctx, dst, dstCapacity, src, srcSize, cdict, fParams); + } + + /*! ZSTD_compress_usingCDict() : + * Compression using a digested Dictionary. + * Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times. + * Note that compression parameters are decided at CDict creation time + * while frame parameters are hardcoded */ + public static nuint ZSTD_compress_usingCDict(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, void* src, nuint srcSize, ZSTD_CDict_s* cdict) + { + /*content*/ + ZSTD_frameParameters fParams = new ZSTD_frameParameters + { + contentSizeFlag = 1, + checksumFlag = 0, + noDictIDFlag = 0 + }; + return ZSTD_compress_usingCDict_internal(cctx, dst, dstCapacity, src, srcSize, cdict, fParams); + } + + /* ****************************************************************** + * Streaming + ********************************************************************/ + public static ZSTD_CCtx_s* ZSTD_createCStream() + { + return ZSTD_createCStream_advanced(ZSTD_defaultCMem); + } + + public static ZSTD_CCtx_s* ZSTD_initStaticCStream(void* workspace, nuint workspaceSize) + { + return ZSTD_initStaticCCtx(workspace, workspaceSize); + } + + public static ZSTD_CCtx_s* ZSTD_createCStream_advanced(ZSTD_customMem customMem) + { + return ZSTD_createCCtx_advanced(customMem); + } + + public static nuint ZSTD_freeCStream(ZSTD_CCtx_s* zcs) + { + return ZSTD_freeCCtx(zcs); + } + + /*====== Initialization ======*/ + public static nuint ZSTD_CStreamInSize() + { + return 1 << 17; + } + + public static nuint ZSTD_CStreamOutSize() + { + return ZSTD_compressBound(1 << 17) + ZSTD_blockHeaderSize + 4; + } + + private static ZSTD_CParamMode_e ZSTD_getCParamMode(ZSTD_CDict_s* cdict, ZSTD_CCtx_params_s* @params, ulong pledgedSrcSize) + { + if (cdict != null && ZSTD_shouldAttachDict(cdict, @params, pledgedSrcSize) != 0) + return ZSTD_CParamMode_e.ZSTD_cpm_attachDict; + else + return ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict; + } + + /* ZSTD_resetCStream(): + * pledgedSrcSize == 0 means "unknown" */ + public static nuint ZSTD_resetCStream(ZSTD_CCtx_s* zcs, ulong pss) + { + /* temporary : 0 interpreted as "unknown" during transition period. + * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. + * 0 will be interpreted as "empty" in the future. + */ + ulong pledgedSrcSize = pss == 0 ? unchecked(0UL - 1) : pss; + { + nuint err_code = ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + { + nuint err_code = ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + return 0; + } + + /*! ZSTD_initCStream_internal() : + * Note : for lib/compress only. Used by zstdmt_compress.c. + * Assumption 1 : params are valid + * Assumption 2 : either dict, or cdict, is defined, not both */ + private static nuint ZSTD_initCStream_internal(ZSTD_CCtx_s* zcs, void* dict, nuint dictSize, ZSTD_CDict_s* cdict, ZSTD_CCtx_params_s* @params, ulong pledgedSrcSize) + { + { + nuint err_code = ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + { + nuint err_code = ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + assert(!ERR_isError(ZSTD_checkCParams(@params->cParams))); + zcs->requestedParams = *@params; + assert(!(dict != null && cdict != null)); + if (dict != null) + { + nuint err_code = ZSTD_CCtx_loadDictionary(zcs, dict, dictSize); + if (ERR_isError(err_code)) + { + return err_code; + } + } + else + { + /* Dictionary is cleared if !cdict */ + nuint err_code = ZSTD_CCtx_refCDict(zcs, cdict); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + return 0; + } + + /* ZSTD_initCStream_usingCDict_advanced() : + * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */ + public static nuint ZSTD_initCStream_usingCDict_advanced(ZSTD_CCtx_s* zcs, ZSTD_CDict_s* cdict, ZSTD_frameParameters fParams, ulong pledgedSrcSize) + { + { + nuint err_code = ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + { + nuint err_code = ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + zcs->requestedParams.fParams = fParams; + { + nuint err_code = ZSTD_CCtx_refCDict(zcs, cdict); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + return 0; + } + + /* note : cdict must outlive compression session */ + public static nuint ZSTD_initCStream_usingCDict(ZSTD_CCtx_s* zcs, ZSTD_CDict_s* cdict) + { + { + nuint err_code = ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + { + nuint err_code = ZSTD_CCtx_refCDict(zcs, cdict); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + return 0; + } + + /* ZSTD_initCStream_advanced() : + * pledgedSrcSize must be exact. + * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. + * dict is loaded with default parameters ZSTD_dct_auto and ZSTD_dlm_byCopy. */ + public static nuint ZSTD_initCStream_advanced(ZSTD_CCtx_s* zcs, void* dict, nuint dictSize, ZSTD_parameters @params, ulong pss) + { + /* for compatibility with older programs relying on this behavior. + * Users should now specify ZSTD_CONTENTSIZE_UNKNOWN. + * This line will be removed in the future. + */ + ulong pledgedSrcSize = pss == 0 && @params.fParams.contentSizeFlag == 0 ? unchecked(0UL - 1) : pss; + { + nuint err_code = ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + { + nuint err_code = ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + { + nuint err_code = ZSTD_checkCParams(@params.cParams); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + ZSTD_CCtxParams_setZstdParams(&zcs->requestedParams, &@params); + { + nuint err_code = ZSTD_CCtx_loadDictionary(zcs, dict, dictSize); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + return 0; + } + + /*! ZSTD_initCStream_usingDict() : + * This function is DEPRECATED, and is equivalent to: + * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); + * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); + * ZSTD_CCtx_loadDictionary(zcs, dict, dictSize); + * + * Creates of an internal CDict (incompatible with static CCtx), except if + * dict == NULL or dictSize < 8, in which case no dict is used. + * Note: dict is loaded with ZSTD_dct_auto (treated as a full zstd dictionary if + * it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy. + * This prototype will generate compilation warnings. + */ + public static nuint ZSTD_initCStream_usingDict(ZSTD_CCtx_s* zcs, void* dict, nuint dictSize, int compressionLevel) + { + { + nuint err_code = ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + { + nuint err_code = ZSTD_CCtx_setParameter(zcs, ZSTD_cParameter.ZSTD_c_compressionLevel, compressionLevel); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + { + nuint err_code = ZSTD_CCtx_loadDictionary(zcs, dict, dictSize); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + return 0; + } + + /*! ZSTD_initCStream_srcSize() : + * This function is DEPRECATED, and equivalent to: + * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); + * ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any) + * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); + * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); + * + * pledgedSrcSize must be correct. If it is not known at init time, use + * ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs, + * "0" also disables frame content size field. It may be enabled in the future. + * This prototype will generate compilation warnings. + */ + public static nuint ZSTD_initCStream_srcSize(ZSTD_CCtx_s* zcs, int compressionLevel, ulong pss) + { + /* temporary : 0 interpreted as "unknown" during transition period. + * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. + * 0 will be interpreted as "empty" in the future. + */ + ulong pledgedSrcSize = pss == 0 ? unchecked(0UL - 1) : pss; + { + nuint err_code = ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + { + nuint err_code = ZSTD_CCtx_refCDict(zcs, null); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + { + nuint err_code = ZSTD_CCtx_setParameter(zcs, ZSTD_cParameter.ZSTD_c_compressionLevel, compressionLevel); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + { + nuint err_code = ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + return 0; + } + + /*! + * Equivalent to: + * + * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); + * ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any) + * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); + * + * Note that ZSTD_initCStream() clears any previously set dictionary. Use the new API + * to compress with a dictionary. + */ + public static nuint ZSTD_initCStream(ZSTD_CCtx_s* zcs, int compressionLevel) + { + { + nuint err_code = ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + { + nuint err_code = ZSTD_CCtx_refCDict(zcs, null); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + { + nuint err_code = ZSTD_CCtx_setParameter(zcs, ZSTD_cParameter.ZSTD_c_compressionLevel, compressionLevel); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + return 0; + } + + /*====== Compression ======*/ + private static nuint ZSTD_nextInputSizeHint(ZSTD_CCtx_s* cctx) + { + if (cctx->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable) + { + return cctx->blockSizeMax - cctx->stableIn_notConsumed; + } + + assert(cctx->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered); + { + nuint hintInSize = cctx->inBuffTarget - cctx->inBuffPos; + if (hintInSize == 0) + hintInSize = cctx->blockSizeMax; + return hintInSize; + } + } + + /** ZSTD_compressStream_generic(): + * internal function for all *compressStream*() variants + * @return : hint size for next input to complete ongoing block */ + private static nuint ZSTD_compressStream_generic(ZSTD_CCtx_s* zcs, ZSTD_outBuffer_s* output, ZSTD_inBuffer_s* input, ZSTD_EndDirective flushMode) + { + assert(input != null); + sbyte* istart = (sbyte*)input->src; + sbyte* iend = istart != null ? istart + input->size : istart; + sbyte* ip = istart != null ? istart + input->pos : istart; + assert(output != null); + sbyte* ostart = (sbyte*)output->dst; + sbyte* oend = ostart != null ? ostart + output->size : ostart; + sbyte* op = ostart != null ? ostart + output->pos : ostart; + uint someMoreWork = 1; + assert(zcs != null); + if (zcs->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable) + { + assert(input->pos >= zcs->stableIn_notConsumed); + input->pos -= zcs->stableIn_notConsumed; + if (ip != null) + ip -= zcs->stableIn_notConsumed; + zcs->stableIn_notConsumed = 0; + } + +#if DEBUG + if (zcs->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered) + { + assert(zcs->inBuff != null); + assert(zcs->inBuffSize > 0); + } +#endif + +#if DEBUG + if (zcs->appliedParams.outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered) + { + assert(zcs->outBuff != null); + assert(zcs->outBuffSize > 0); + } +#endif + +#if DEBUG + if (input->src == null) + assert(input->size == 0); +#endif + assert(input->pos <= input->size); +#if DEBUG + if (output->dst == null) + assert(output->size == 0); +#endif + assert(output->pos <= output->size); + assert((uint)flushMode <= (uint)ZSTD_EndDirective.ZSTD_e_end); + while (someMoreWork != 0) + { + switch (zcs->streamStage) + { + case ZSTD_cStreamStage.zcss_init: + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_init_missing)); + case ZSTD_cStreamStage.zcss_load: + if (flushMode == ZSTD_EndDirective.ZSTD_e_end && ((nuint)(oend - op) >= ZSTD_compressBound((nuint)(iend - ip)) || zcs->appliedParams.outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable) && zcs->inBuffPos == 0) + { + /* shortcut to compression pass directly into output buffer */ + nuint cSize = ZSTD_compressEnd_public(zcs, op, (nuint)(oend - op), ip, (nuint)(iend - ip)); + { + nuint err_code = cSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + ip = iend; + op += cSize; + zcs->frameEnded = 1; + ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); + someMoreWork = 0; + break; + } + + if (zcs->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered) + { + nuint toLoad = zcs->inBuffTarget - zcs->inBuffPos; + nuint loaded = ZSTD_limitCopy(zcs->inBuff + zcs->inBuffPos, toLoad, ip, (nuint)(iend - ip)); + zcs->inBuffPos += loaded; + if (ip != null) + ip += loaded; + if (flushMode == ZSTD_EndDirective.ZSTD_e_continue && zcs->inBuffPos < zcs->inBuffTarget) + { + someMoreWork = 0; + break; + } + + if (flushMode == ZSTD_EndDirective.ZSTD_e_flush && zcs->inBuffPos == zcs->inToCompress) + { + someMoreWork = 0; + break; + } + } + else + { + assert(zcs->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable); + if (flushMode == ZSTD_EndDirective.ZSTD_e_continue && (nuint)(iend - ip) < zcs->blockSizeMax) + { + zcs->stableIn_notConsumed = (nuint)(iend - ip); + ip = iend; + someMoreWork = 0; + break; + } + + if (flushMode == ZSTD_EndDirective.ZSTD_e_flush && ip == iend) + { + someMoreWork = 0; + break; + } + } + + { + int inputBuffered = zcs->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered ? 1 : 0; + void* cDst; + nuint cSize; + nuint oSize = (nuint)(oend - op); + nuint iSize = inputBuffered != 0 ? zcs->inBuffPos - zcs->inToCompress : (nuint)(iend - ip) < zcs->blockSizeMax ? (nuint)(iend - ip) : zcs->blockSizeMax; + if (oSize >= ZSTD_compressBound(iSize) || zcs->appliedParams.outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable) + cDst = op; + else + { + cDst = zcs->outBuff; + oSize = zcs->outBuffSize; + } + + if (inputBuffered != 0) + { + uint lastBlock = flushMode == ZSTD_EndDirective.ZSTD_e_end && ip == iend ? 1U : 0U; + cSize = lastBlock != 0 ? ZSTD_compressEnd_public(zcs, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize) : ZSTD_compressContinue_public(zcs, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize); + { + nuint err_code = cSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + zcs->frameEnded = lastBlock; + zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSizeMax; + if (zcs->inBuffTarget > zcs->inBuffSize) + { + zcs->inBuffPos = 0; + zcs->inBuffTarget = zcs->blockSizeMax; + } + +#if DEBUG + if (lastBlock == 0) + assert(zcs->inBuffTarget <= zcs->inBuffSize); +#endif + zcs->inToCompress = zcs->inBuffPos; + } + else + { + uint lastBlock = flushMode == ZSTD_EndDirective.ZSTD_e_end && ip + iSize == iend ? 1U : 0U; + cSize = lastBlock != 0 ? ZSTD_compressEnd_public(zcs, cDst, oSize, ip, iSize) : ZSTD_compressContinue_public(zcs, cDst, oSize, ip, iSize); + if (ip != null) + ip += iSize; + { + nuint err_code = cSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + zcs->frameEnded = lastBlock; +#if DEBUG + if (lastBlock != 0) + assert(ip == iend); +#endif + } + + if (cDst == op) + { + op += cSize; + if (zcs->frameEnded != 0) + { + someMoreWork = 0; + ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); + } + + break; + } + + zcs->outBuffContentSize = cSize; + zcs->outBuffFlushedSize = 0; + zcs->streamStage = ZSTD_cStreamStage.zcss_flush; + } + + goto case ZSTD_cStreamStage.zcss_flush; + case ZSTD_cStreamStage.zcss_flush: + assert(zcs->appliedParams.outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered); + { + nuint toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; + nuint flushed = ZSTD_limitCopy(op, (nuint)(oend - op), zcs->outBuff + zcs->outBuffFlushedSize, toFlush); + if (flushed != 0) + op += flushed; + zcs->outBuffFlushedSize += flushed; + if (toFlush != flushed) + { + assert(op == oend); + someMoreWork = 0; + break; + } + + zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0; + if (zcs->frameEnded != 0) + { + someMoreWork = 0; + ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); + break; + } + + zcs->streamStage = ZSTD_cStreamStage.zcss_load; + break; + } + + default: + assert(0 != 0); + break; + } + } + + input->pos = (nuint)(ip - istart); + output->pos = (nuint)(op - ostart); + if (zcs->frameEnded != 0) + return 0; + return ZSTD_nextInputSizeHint(zcs); + } + + private static nuint ZSTD_nextInputSizeHint_MTorST(ZSTD_CCtx_s* cctx) + { + if (cctx->appliedParams.nbWorkers >= 1) + { + assert(cctx->mtctx != null); + return ZSTDMT_nextInputSizeHint(cctx->mtctx); + } + + return ZSTD_nextInputSizeHint(cctx); + } + + /*! + * Alternative for ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue). + * NOTE: The return value is different. ZSTD_compressStream() returns a hint for + * the next read size (if non-zero and not an error). ZSTD_compressStream2() + * returns the minimum nb of bytes left to flush (if non-zero and not an error). + */ + public static nuint ZSTD_compressStream(ZSTD_CCtx_s* zcs, ZSTD_outBuffer_s* output, ZSTD_inBuffer_s* input) + { + { + nuint err_code = ZSTD_compressStream2(zcs, output, input, ZSTD_EndDirective.ZSTD_e_continue); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + return ZSTD_nextInputSizeHint_MTorST(zcs); + } + + /* After a compression call set the expected input/output buffer. + * This is validated at the start of the next compression call. + */ + private static void ZSTD_setBufferExpectations(ZSTD_CCtx_s* cctx, ZSTD_outBuffer_s* output, ZSTD_inBuffer_s* input) + { + if (cctx->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable) + { + cctx->expectedInBuffer = *input; + } + + if (cctx->appliedParams.outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable) + { + cctx->expectedOutBufferSize = output->size - output->pos; + } + } + + /* Validate that the input/output buffers match the expectations set by + * ZSTD_setBufferExpectations. + */ + private static nuint ZSTD_checkBufferStability(ZSTD_CCtx_s* cctx, ZSTD_outBuffer_s* output, ZSTD_inBuffer_s* input, ZSTD_EndDirective endOp) + { + if (cctx->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable) + { + ZSTD_inBuffer_s expect = cctx->expectedInBuffer; + if (expect.src != input->src || expect.pos != input->pos) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stabilityCondition_notRespected)); + } + + if (cctx->appliedParams.outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable) + { + nuint outBufferSize = output->size - output->pos; + if (cctx->expectedOutBufferSize != outBufferSize) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stabilityCondition_notRespected)); + } + + return 0; + } + + /* + * If @endOp == ZSTD_e_end, @inSize becomes pledgedSrcSize. + * Otherwise, it's ignored. + * @return: 0 on success, or a ZSTD_error code otherwise. + */ + private static nuint ZSTD_CCtx_init_compressStream2(ZSTD_CCtx_s* cctx, ZSTD_EndDirective endOp, nuint inSize) + { + ZSTD_CCtx_params_s @params = cctx->requestedParams; + ZSTD_prefixDict_s prefixDict = cctx->prefixDict; + { + /* Init the local dict if present. */ + nuint err_code = ZSTD_initLocalDict(cctx); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + cctx->prefixDict = new ZSTD_prefixDict_s(); + assert(prefixDict.dict == null || cctx->cdict == null); + if (cctx->cdict != null && cctx->localDict.cdict == null) + { + @params.compressionLevel = cctx->cdict->compressionLevel; + } + + if (endOp == ZSTD_EndDirective.ZSTD_e_end) + cctx->pledgedSrcSizePlusOne = inSize + 1; + { + nuint dictSize = prefixDict.dict != null ? prefixDict.dictSize : cctx->cdict != null ? cctx->cdict->dictContentSize : 0; + ZSTD_CParamMode_e mode = ZSTD_getCParamMode(cctx->cdict, &@params, cctx->pledgedSrcSizePlusOne - 1); + @params.cParams = ZSTD_getCParamsFromCCtxParams(&@params, cctx->pledgedSrcSizePlusOne - 1, dictSize, mode); + } + + @params.postBlockSplitter = ZSTD_resolveBlockSplitterMode(@params.postBlockSplitter, &@params.cParams); + @params.ldmParams.enableLdm = ZSTD_resolveEnableLdm(@params.ldmParams.enableLdm, &@params.cParams); + @params.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(@params.useRowMatchFinder, &@params.cParams); + @params.validateSequences = ZSTD_resolveExternalSequenceValidation(@params.validateSequences); + @params.maxBlockSize = ZSTD_resolveMaxBlockSize(@params.maxBlockSize); + @params.searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(@params.searchForExternalRepcodes, @params.compressionLevel); + if (ZSTD_hasExtSeqProd(&@params) != 0 && @params.nbWorkers >= 1) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_combination_unsupported)); + } + + if (cctx->pledgedSrcSizePlusOne - 1 <= 512 * (1 << 10)) + { + @params.nbWorkers = 0; + } + + if (@params.nbWorkers > 0) + { + if (cctx->mtctx == null) + { + cctx->mtctx = ZSTDMT_createCCtx_advanced((uint)@params.nbWorkers, cctx->customMem, cctx->pool); + if (cctx->mtctx == null) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } + } + + { + nuint err_code = ZSTDMT_initCStream_internal(cctx->mtctx, prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType, cctx->cdict, @params, cctx->pledgedSrcSizePlusOne - 1); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + cctx->dictID = cctx->cdict != null ? cctx->cdict->dictID : 0; + cctx->dictContentSize = cctx->cdict != null ? cctx->cdict->dictContentSize : prefixDict.dictSize; + cctx->consumedSrcSize = 0; + cctx->producedCSize = 0; + cctx->streamStage = ZSTD_cStreamStage.zcss_load; + cctx->appliedParams = @params; + } + else + { + ulong pledgedSrcSize = cctx->pledgedSrcSizePlusOne - 1; + assert(!ERR_isError(ZSTD_checkCParams(@params.cParams))); + { + nuint err_code = ZSTD_compressBegin_internal(cctx, prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType, ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, cctx->cdict, &@params, pledgedSrcSize, ZSTD_buffered_policy_e.ZSTDb_buffered); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + assert(cctx->appliedParams.nbWorkers == 0); + cctx->inToCompress = 0; + cctx->inBuffPos = 0; + if (cctx->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered) + { + cctx->inBuffTarget = cctx->blockSizeMax + (nuint)(cctx->blockSizeMax == pledgedSrcSize ? 1 : 0); + } + else + { + cctx->inBuffTarget = 0; + } + + cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0; + cctx->streamStage = ZSTD_cStreamStage.zcss_load; + cctx->frameEnded = 0; + } + + return 0; + } + + /* @return provides a minimum amount of data remaining to be flushed from internal buffers + */ + public static nuint ZSTD_compressStream2(ZSTD_CCtx_s* cctx, ZSTD_outBuffer_s* output, ZSTD_inBuffer_s* input, ZSTD_EndDirective endOp) + { + if (output->pos > output->size) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + if (input->pos > input->size) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + if ((uint)endOp > (uint)ZSTD_EndDirective.ZSTD_e_end) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + assert(cctx != null); + if (cctx->streamStage == ZSTD_cStreamStage.zcss_init) + { + /* no obligation to start from pos==0 */ + nuint inputSize = input->size - input->pos; + nuint totalInputSize = inputSize + cctx->stableIn_notConsumed; + if (cctx->requestedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable && endOp == ZSTD_EndDirective.ZSTD_e_continue && totalInputSize < 1 << 17) + { + if (cctx->stableIn_notConsumed != 0) + { + if (input->src != cctx->expectedInBuffer.src) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stabilityCondition_notRespected)); + } + + if (input->pos != cctx->expectedInBuffer.size) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stabilityCondition_notRespected)); + } + } + + input->pos = input->size; + cctx->expectedInBuffer = *input; + cctx->stableIn_notConsumed += inputSize; + return (nuint)(cctx->requestedParams.format == ZSTD_format_e.ZSTD_f_zstd1 ? 6 : 2); + } + + { + nuint err_code = ZSTD_CCtx_init_compressStream2(cctx, endOp, totalInputSize); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + ZSTD_setBufferExpectations(cctx, output, input); + } + + { + /* end of transparent initialization stage */ + nuint err_code = ZSTD_checkBufferStability(cctx, output, input, endOp); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (cctx->appliedParams.nbWorkers > 0) + { + nuint flushMin; + if (cctx->cParamsChanged != 0) + { + ZSTDMT_updateCParams_whileCompressing(cctx->mtctx, &cctx->requestedParams); + cctx->cParamsChanged = 0; + } + + if (cctx->stableIn_notConsumed != 0) + { + assert(cctx->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable); + assert(input->pos >= cctx->stableIn_notConsumed); + input->pos -= cctx->stableIn_notConsumed; + cctx->stableIn_notConsumed = 0; + } + + for (; ; ) + { + nuint ipos = input->pos; + nuint opos = output->pos; + flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp); + cctx->consumedSrcSize += input->pos - ipos; + cctx->producedCSize += output->pos - opos; + if (ERR_isError(flushMin) || endOp == ZSTD_EndDirective.ZSTD_e_end && flushMin == 0) + { + if (flushMin == 0) + ZSTD_CCtx_trace(cctx, 0); + ZSTD_CCtx_reset(cctx, ZSTD_ResetDirective.ZSTD_reset_session_only); + } + + { + nuint err_code = flushMin; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (endOp == ZSTD_EndDirective.ZSTD_e_continue) + { + if (input->pos != ipos || output->pos != opos || input->pos == input->size || output->pos == output->size) + break; + } + else + { + assert(endOp == ZSTD_EndDirective.ZSTD_e_flush || endOp == ZSTD_EndDirective.ZSTD_e_end); + if (flushMin == 0 || output->pos == output->size) + break; + } + } + + assert(endOp == ZSTD_EndDirective.ZSTD_e_continue || flushMin == 0 || output->pos == output->size); + ZSTD_setBufferExpectations(cctx, output, input); + return flushMin; + } + + { + nuint err_code = ZSTD_compressStream_generic(cctx, output, input, endOp); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + ZSTD_setBufferExpectations(cctx, output, input); + return cctx->outBuffContentSize - cctx->outBuffFlushedSize; + } + + /*! ZSTD_compressStream2_simpleArgs() : + * Same as ZSTD_compressStream2(), + * but using only integral types as arguments. + * This variant might be helpful for binders from dynamic languages + * which have troubles handling structures containing memory pointers. + */ + public static nuint ZSTD_compressStream2_simpleArgs(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, nuint* dstPos, void* src, nuint srcSize, nuint* srcPos, ZSTD_EndDirective endOp) + { + ZSTD_outBuffer_s output; + ZSTD_inBuffer_s input; + output.dst = dst; + output.size = dstCapacity; + output.pos = *dstPos; + input.src = src; + input.size = srcSize; + input.pos = *srcPos; + { + nuint cErr = ZSTD_compressStream2(cctx, &output, &input, endOp); + *dstPos = output.pos; + *srcPos = input.pos; + return cErr; + } + } + + /*! ZSTD_compress2() : + * Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API. + * (note that this entry point doesn't even expose a compression level parameter). + * ZSTD_compress2() always starts a new frame. + * Should cctx hold data from a previously unfinished frame, everything about it is forgotten. + * - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*() + * - The function is always blocking, returns when compression is completed. + * NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have + * enough space to successfully compress the data, though it is possible it fails for other reasons. + * @return : compressed size written into `dst` (<= `dstCapacity), + * or an error code if it fails (which can be tested using ZSTD_isError()). + */ + public static nuint ZSTD_compress2(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, void* src, nuint srcSize) + { + ZSTD_bufferMode_e originalInBufferMode = cctx->requestedParams.inBufferMode; + ZSTD_bufferMode_e originalOutBufferMode = cctx->requestedParams.outBufferMode; + ZSTD_CCtx_reset(cctx, ZSTD_ResetDirective.ZSTD_reset_session_only); + cctx->requestedParams.inBufferMode = ZSTD_bufferMode_e.ZSTD_bm_stable; + cctx->requestedParams.outBufferMode = ZSTD_bufferMode_e.ZSTD_bm_stable; + { + nuint oPos = 0; + nuint iPos = 0; + nuint result = ZSTD_compressStream2_simpleArgs(cctx, dst, dstCapacity, &oPos, src, srcSize, &iPos, ZSTD_EndDirective.ZSTD_e_end); + cctx->requestedParams.inBufferMode = originalInBufferMode; + cctx->requestedParams.outBufferMode = originalOutBufferMode; + { + nuint err_code = result; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (result != 0) + { + assert(oPos == dstCapacity); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + assert(iPos == srcSize); + return oPos; + } + } + + /* ZSTD_validateSequence() : + * @offBase : must use the format required by ZSTD_storeSeq() + * @returns a ZSTD error code if sequence is not valid + */ + private static nuint ZSTD_validateSequence(uint offBase, uint matchLength, uint minMatch, nuint posInSrc, uint windowLog, nuint dictSize, int useSequenceProducer) + { + uint windowSize = 1U << (int)windowLog; + /* posInSrc represents the amount of data the decoder would decode up to this point. + * As long as the amount of data decoded is less than or equal to window size, offsets may be + * larger than the total length of output decoded in order to reference the dict, even larger than + * window size. After output surpasses windowSize, we're limited to windowSize offsets again. + */ + nuint offsetBound = posInSrc > windowSize ? windowSize : posInSrc + dictSize; + nuint matchLenLowerBound = (nuint)(minMatch == 3 || useSequenceProducer != 0 ? 3 : 4); + { + assert(offsetBound > 0); + if (offBase > offsetBound + 3) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); + } + } + + if (matchLength < matchLenLowerBound) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); + } + + return 0; + } + + /* Returns an offset code, given a sequence's raw offset, the ongoing repcode array, and whether litLength == 0 */ + private static uint ZSTD_finalizeOffBase(uint rawOffset, uint* rep, uint ll0) + { + assert(rawOffset > 0); + uint offBase = rawOffset + 3; + if (ll0 == 0 && rawOffset == rep[0]) + { + assert(1 >= 1); + assert(1 <= 3); + offBase = 1; + } + else if (rawOffset == rep[1]) + { + assert(2 - ll0 >= 1); + assert(2 - ll0 <= 3); + offBase = 2 - ll0; + } + else if (rawOffset == rep[2]) + { + assert(3 - ll0 >= 1); + assert(3 - ll0 <= 3); + offBase = 3 - ll0; + } + else if (ll0 != 0 && rawOffset == rep[0] - 1) + { + assert(3 >= 1); + assert(3 <= 3); + offBase = 3; + } + + return offBase; + } + + /* This function scans through an array of ZSTD_Sequence, + * storing the sequences it reads, until it reaches a block delimiter. + * Note that the block delimiter includes the last literals of the block. + * @blockSize must be == sum(sequence_lengths). + * @returns @blockSize on success, and a ZSTD_error otherwise. + */ + private static nuint ZSTD_transferSequences_wBlockDelim(ZSTD_CCtx_s* cctx, ZSTD_SequencePosition* seqPos, ZSTD_Sequence* inSeqs, nuint inSeqsSize, void* src, nuint blockSize, ZSTD_paramSwitch_e externalRepSearch) + { + uint idx = seqPos->idx; + uint startIdx = idx; + byte* ip = (byte*)src; + byte* iend = ip + blockSize; + repcodes_s updatedRepcodes; + uint dictSize; + if (cctx->cdict != null) + { + dictSize = (uint)cctx->cdict->dictContentSize; + } + else if (cctx->prefixDict.dict != null) + { + dictSize = (uint)cctx->prefixDict.dictSize; + } + else + { + dictSize = 0; + } + + memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, (uint)sizeof(repcodes_s)); + for (; idx < inSeqsSize && (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0); ++idx) + { + uint litLength = inSeqs[idx].litLength; + uint matchLength = inSeqs[idx].matchLength; + uint offBase; + if (externalRepSearch == ZSTD_paramSwitch_e.ZSTD_ps_disable) + { + assert(inSeqs[idx].offset > 0); + offBase = inSeqs[idx].offset + 3; + } + else + { + uint ll0 = litLength == 0 ? 1U : 0U; + offBase = ZSTD_finalizeOffBase(inSeqs[idx].offset, updatedRepcodes.rep, ll0); + ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0); + } + + if (cctx->appliedParams.validateSequences != 0) + { + seqPos->posInSrc += litLength + matchLength; + { + nuint err_code = ZSTD_validateSequence(offBase, matchLength, cctx->appliedParams.cParams.minMatch, seqPos->posInSrc, cctx->appliedParams.cParams.windowLog, dictSize, ZSTD_hasExtSeqProd(&cctx->appliedParams)); + if (ERR_isError(err_code)) + { + return err_code; + } + } + } + + if (idx - seqPos->idx >= cctx->seqStore.maxNbSeq) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); + } + + ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offBase, matchLength); + ip += matchLength + litLength; + } + + if (idx == inSeqsSize) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); + } + + assert(externalRepSearch != ZSTD_paramSwitch_e.ZSTD_ps_auto); + assert(idx >= startIdx); + if (externalRepSearch == ZSTD_paramSwitch_e.ZSTD_ps_disable && idx != startIdx) + { + uint* rep = updatedRepcodes.rep; + /* index of last non-block-delimiter sequence */ + uint lastSeqIdx = idx - 1; + if (lastSeqIdx >= startIdx + 2) + { + rep[2] = inSeqs[lastSeqIdx - 2].offset; + rep[1] = inSeqs[lastSeqIdx - 1].offset; + rep[0] = inSeqs[lastSeqIdx].offset; + } + else if (lastSeqIdx == startIdx + 1) + { + rep[2] = rep[0]; + rep[1] = inSeqs[lastSeqIdx - 1].offset; + rep[0] = inSeqs[lastSeqIdx].offset; + } + else + { + assert(lastSeqIdx == startIdx); + rep[2] = rep[1]; + rep[1] = rep[0]; + rep[0] = inSeqs[lastSeqIdx].offset; + } + } + + memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, (uint)sizeof(repcodes_s)); + if (inSeqs[idx].litLength != 0) + { + ZSTD_storeLastLiterals(&cctx->seqStore, ip, inSeqs[idx].litLength); + ip += inSeqs[idx].litLength; + seqPos->posInSrc += inSeqs[idx].litLength; + } + + if (ip != iend) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); + } + + seqPos->idx = idx + 1; + return blockSize; + } + + /* + * This function attempts to scan through @blockSize bytes in @src + * represented by the sequences in @inSeqs, + * storing any (partial) sequences. + * + * Occasionally, we may want to reduce the actual number of bytes consumed from @src + * to avoid splitting a match, notably if it would produce a match smaller than MINMATCH. + * + * @returns the number of bytes consumed from @src, necessarily <= @blockSize. + * Otherwise, it may return a ZSTD error if something went wrong. + */ + private static nuint ZSTD_transferSequences_noDelim(ZSTD_CCtx_s* cctx, ZSTD_SequencePosition* seqPos, ZSTD_Sequence* inSeqs, nuint inSeqsSize, void* src, nuint blockSize, ZSTD_paramSwitch_e externalRepSearch) + { + uint idx = seqPos->idx; + uint startPosInSequence = seqPos->posInSequence; + uint endPosInSequence = seqPos->posInSequence + (uint)blockSize; + nuint dictSize; + byte* istart = (byte*)src; + byte* ip = istart; + /* May be adjusted if we decide to process fewer than blockSize bytes */ + byte* iend = istart + blockSize; + repcodes_s updatedRepcodes; + uint bytesAdjustment = 0; + uint finalMatchSplit = 0; + if (cctx->cdict != null) + { + dictSize = cctx->cdict->dictContentSize; + } + else if (cctx->prefixDict.dict != null) + { + dictSize = cctx->prefixDict.dictSize; + } + else + { + dictSize = 0; + } + + memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, (uint)sizeof(repcodes_s)); + while (endPosInSequence != 0 && idx < inSeqsSize && finalMatchSplit == 0) + { + ZSTD_Sequence currSeq = inSeqs[idx]; + uint litLength = currSeq.litLength; + uint matchLength = currSeq.matchLength; + uint rawOffset = currSeq.offset; + uint offBase; + if (endPosInSequence >= currSeq.litLength + currSeq.matchLength) + { + if (startPosInSequence >= litLength) + { + startPosInSequence -= litLength; + litLength = 0; + matchLength -= startPosInSequence; + } + else + { + litLength -= startPosInSequence; + } + + endPosInSequence -= currSeq.litLength + currSeq.matchLength; + startPosInSequence = 0; + } + else + { + if (endPosInSequence > litLength) + { + uint firstHalfMatchLength; + litLength = startPosInSequence >= litLength ? 0 : litLength - startPosInSequence; + firstHalfMatchLength = endPosInSequence - startPosInSequence - litLength; + if (matchLength > blockSize && firstHalfMatchLength >= cctx->appliedParams.cParams.minMatch) + { + /* Only ever split the match if it is larger than the block size */ + uint secondHalfMatchLength = currSeq.matchLength + currSeq.litLength - endPosInSequence; + if (secondHalfMatchLength < cctx->appliedParams.cParams.minMatch) + { + endPosInSequence -= cctx->appliedParams.cParams.minMatch - secondHalfMatchLength; + bytesAdjustment = cctx->appliedParams.cParams.minMatch - secondHalfMatchLength; + firstHalfMatchLength -= bytesAdjustment; + } + + matchLength = firstHalfMatchLength; + finalMatchSplit = 1; + } + else + { + bytesAdjustment = endPosInSequence - currSeq.litLength; + endPosInSequence = currSeq.litLength; + break; + } + } + else + { + break; + } + } + + { + uint ll0 = litLength == 0 ? 1U : 0U; + offBase = ZSTD_finalizeOffBase(rawOffset, updatedRepcodes.rep, ll0); + ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0); + } + + if (cctx->appliedParams.validateSequences != 0) + { + seqPos->posInSrc += litLength + matchLength; + { + nuint err_code = ZSTD_validateSequence(offBase, matchLength, cctx->appliedParams.cParams.minMatch, seqPos->posInSrc, cctx->appliedParams.cParams.windowLog, dictSize, ZSTD_hasExtSeqProd(&cctx->appliedParams)); + if (ERR_isError(err_code)) + { + return err_code; + } + } + } + + if (idx - seqPos->idx >= cctx->seqStore.maxNbSeq) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); + } + + ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offBase, matchLength); + ip += matchLength + litLength; + if (finalMatchSplit == 0) + idx++; + } + + assert(idx == inSeqsSize || endPosInSequence <= inSeqs[idx].litLength + inSeqs[idx].matchLength); + seqPos->idx = idx; + seqPos->posInSequence = endPosInSequence; + memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, (uint)sizeof(repcodes_s)); + iend -= bytesAdjustment; + if (ip != iend) + { + /* Store any last literals */ + uint lastLLSize = (uint)(iend - ip); + assert(ip <= iend); + ZSTD_storeLastLiterals(&cctx->seqStore, ip, lastLLSize); + seqPos->posInSrc += lastLLSize; + } + + return (nuint)(iend - istart); + } + + private static void* ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode) + { + assert(ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam11, (int)mode) != 0); + if (mode == ZSTD_sequenceFormat_e.ZSTD_sf_explicitBlockDelimiters) + { + return (delegate* managed)(&ZSTD_transferSequences_wBlockDelim); + } + + assert(mode == ZSTD_sequenceFormat_e.ZSTD_sf_noBlockDelimiters); + return (delegate* managed)(&ZSTD_transferSequences_noDelim); + } + + /* Discover the size of next block by searching for the delimiter. + * Note that a block delimiter **must** exist in this mode, + * otherwise it's an input error. + * The block size retrieved will be later compared to ensure it remains within bounds */ + private static nuint blockSize_explicitDelimiter(ZSTD_Sequence* inSeqs, nuint inSeqsSize, ZSTD_SequencePosition seqPos) + { + int end = 0; + nuint blockSize = 0; + nuint spos = seqPos.idx; + assert(spos <= inSeqsSize); + while (spos < inSeqsSize) + { + end = inSeqs[spos].offset == 0 ? 1 : 0; + blockSize += inSeqs[spos].litLength + inSeqs[spos].matchLength; + if (end != 0) + { + if (inSeqs[spos].matchLength != 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); + break; + } + + spos++; + } + + if (end == 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); + return blockSize; + } + + private static nuint determine_blockSize(ZSTD_sequenceFormat_e mode, nuint blockSize, nuint remaining, ZSTD_Sequence* inSeqs, nuint inSeqsSize, ZSTD_SequencePosition seqPos) + { + if (mode == ZSTD_sequenceFormat_e.ZSTD_sf_noBlockDelimiters) + { + return remaining < blockSize ? remaining : blockSize; + } + + assert(mode == ZSTD_sequenceFormat_e.ZSTD_sf_explicitBlockDelimiters); + { + nuint explicitBlockSize = blockSize_explicitDelimiter(inSeqs, inSeqsSize, seqPos); + { + nuint err_code = explicitBlockSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (explicitBlockSize > blockSize) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); + if (explicitBlockSize > remaining) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); + return explicitBlockSize; + } + } + + /* Compress all provided sequences, block-by-block. + * + * Returns the cumulative size of all compressed blocks (including their headers), + * otherwise a ZSTD error. + */ + private static nuint ZSTD_compressSequences_internal(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, ZSTD_Sequence* inSeqs, nuint inSeqsSize, void* src, nuint srcSize) + { + nuint cSize = 0; + nuint remaining = srcSize; + ZSTD_SequencePosition seqPos = new ZSTD_SequencePosition + { + idx = 0, + posInSequence = 0, + posInSrc = 0 + }; + byte* ip = (byte*)src; + byte* op = (byte*)dst; + void* sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters); + if (remaining == 0) + { + /* last block */ + uint cBlockHeader24 = 1 + ((uint)blockType_e.bt_raw << 1); + if (dstCapacity < 4) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + MEM_writeLE32(op, cBlockHeader24); + op += ZSTD_blockHeaderSize; + dstCapacity -= ZSTD_blockHeaderSize; + cSize += ZSTD_blockHeaderSize; + } + + while (remaining != 0) + { + nuint compressedSeqsSize; + nuint cBlockSize; + nuint blockSize = determine_blockSize(cctx->appliedParams.blockDelimiters, cctx->blockSizeMax, remaining, inSeqs, inSeqsSize, seqPos); + uint lastBlock = blockSize == remaining ? 1U : 0U; + { + nuint err_code = blockSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + assert(blockSize <= remaining); + ZSTD_resetSeqStore(&cctx->seqStore); + blockSize = ((delegate* managed)sequenceCopier)(cctx, &seqPos, inSeqs, inSeqsSize, ip, blockSize, cctx->appliedParams.searchForExternalRepcodes); + { + nuint err_code = blockSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (blockSize < (nuint)(1 + 1) + ZSTD_blockHeaderSize + 1 + 1) + { + cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); + { + nuint err_code = cBlockSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + cSize += cBlockSize; + ip += blockSize; + op += cBlockSize; + remaining -= blockSize; + dstCapacity -= cBlockSize; + continue; + } + + if (dstCapacity < ZSTD_blockHeaderSize) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + compressedSeqsSize = ZSTD_entropyCompressSeqStore(&cctx->seqStore, &cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy, &cctx->appliedParams, op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize, blockSize, cctx->tmpWorkspace, cctx->tmpWkspSize, cctx->bmi2); + { + nuint err_code = compressedSeqsSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (cctx->isFirstBlock == 0 && ZSTD_maybeRLE(&cctx->seqStore) != 0 && ZSTD_isRLE(ip, blockSize) != 0) + { + compressedSeqsSize = 1; + } + + if (compressedSeqsSize == 0) + { + cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); + { + nuint err_code = cBlockSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + } + else if (compressedSeqsSize == 1) + { + cBlockSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, blockSize, lastBlock); + { + nuint err_code = cBlockSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + } + else + { + uint cBlockHeader; + ZSTD_blockState_confirmRepcodesAndEntropyTables(&cctx->blockState); + if (cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat.FSE_repeat_valid) + cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat.FSE_repeat_check; + cBlockHeader = lastBlock + ((uint)blockType_e.bt_compressed << 1) + (uint)(compressedSeqsSize << 3); + MEM_writeLE24(op, cBlockHeader); + cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize; + } + + cSize += cBlockSize; + if (lastBlock != 0) + { + break; + } + else + { + ip += blockSize; + op += cBlockSize; + remaining -= blockSize; + dstCapacity -= cBlockSize; + cctx->isFirstBlock = 0; + } + } + + return cSize; + } + + /*! ZSTD_compressSequences() : + * Compress an array of ZSTD_Sequence, associated with @src buffer, into dst. + * @src contains the entire input (not just the literals). + * If @srcSize > sum(sequence.length), the remaining bytes are considered all literals + * If a dictionary is included, then the cctx should reference the dict (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.). + * The entire source is compressed into a single frame. + * + * The compression behavior changes based on cctx params. In particular: + * If ZSTD_c_blockDelimiters == ZSTD_sf_noBlockDelimiters, the array of ZSTD_Sequence is expected to contain + * no block delimiters (defined in ZSTD_Sequence). Block boundaries are roughly determined based on + * the block size derived from the cctx, and sequences may be split. This is the default setting. + * + * If ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, the array of ZSTD_Sequence is expected to contain + * valid block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided. + * + * When ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, it's possible to decide generating repcodes + * using the advanced parameter ZSTD_c_repcodeResolution. Repcodes will improve compression ratio, though the benefit + * can vary greatly depending on Sequences. On the other hand, repcode resolution is an expensive operation. + * By default, it's disabled at low (<10) compression levels, and enabled above the threshold (>=10). + * ZSTD_c_repcodeResolution makes it possible to directly manage this processing in either direction. + * + * If ZSTD_c_validateSequences == 0, this function blindly accepts the Sequences provided. Invalid Sequences cause undefined + * behavior. If ZSTD_c_validateSequences == 1, then the function will detect invalid Sequences (see doc/zstd_compression_format.md for + * specifics regarding offset/matchlength requirements) and then bail out and return an error. + * + * In addition to the two adjustable experimental params, there are other important cctx params. + * - ZSTD_c_minMatch MUST be set as less than or equal to the smallest match generated by the match finder. It has a minimum value of ZSTD_MINMATCH_MIN. + * - ZSTD_c_compressionLevel accordingly adjusts the strength of the entropy coder, as it would in typical compression. + * - ZSTD_c_windowLog affects offset validation: this function will return an error at higher debug levels if a provided offset + * is larger than what the spec allows for a given window log and dictionary (if present). See: doc/zstd_compression_format.md + * + * Note: Repcodes are, as of now, always re-calculated within this function, ZSTD_Sequence.rep is effectively unused. + * Dev Note: Once ability to ingest repcodes become available, the explicit block delims mode must respect those repcodes exactly, + * and cannot emit an RLE block that disagrees with the repcode history. + * @return : final compressed size, or a ZSTD error code. + */ + public static nuint ZSTD_compressSequences(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, ZSTD_Sequence* inSeqs, nuint inSeqsSize, void* src, nuint srcSize) + { + byte* op = (byte*)dst; + nuint cSize = 0; + assert(cctx != null); + { + nuint err_code = ZSTD_CCtx_init_compressStream2(cctx, ZSTD_EndDirective.ZSTD_e_end, srcSize); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + { + nuint frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, &cctx->appliedParams, srcSize, cctx->dictID); + op += frameHeaderSize; + assert(frameHeaderSize <= dstCapacity); + dstCapacity -= frameHeaderSize; + cSize += frameHeaderSize; + } + + if (cctx->appliedParams.fParams.checksumFlag != 0 && srcSize != 0) + { + ZSTD_XXH64_update(&cctx->xxhState, src, srcSize); + } + + { + nuint cBlocksSize = ZSTD_compressSequences_internal(cctx, op, dstCapacity, inSeqs, inSeqsSize, src, srcSize); + { + nuint err_code = cBlocksSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + cSize += cBlocksSize; + assert(cBlocksSize <= dstCapacity); + dstCapacity -= cBlocksSize; + } + + if (cctx->appliedParams.fParams.checksumFlag != 0) + { + uint checksum = (uint)ZSTD_XXH64_digest(&cctx->xxhState); + if (dstCapacity < 4) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + MEM_writeLE32((sbyte*)dst + cSize, checksum); + cSize += 4; + } + + return cSize; + } + + private static nuint convertSequences_noRepcodes(SeqDef_s* dstSeqs, ZSTD_Sequence* inSeqs, nuint nbSequences) + { + nuint longLen = 0; + nuint n; + for (n = 0; n < nbSequences; n++) + { + assert(inSeqs[n].offset > 0); + dstSeqs[n].offBase = inSeqs[n].offset + 3; + dstSeqs[n].litLength = (ushort)inSeqs[n].litLength; + dstSeqs[n].mlBase = (ushort)(inSeqs[n].matchLength - 3); + if (inSeqs[n].matchLength > 65535 + 3) + { + assert(longLen == 0); + longLen = n + 1; + } + + if (inSeqs[n].litLength > 65535) + { + assert(longLen == 0); + longLen = n + nbSequences + 1; + } + } + + return longLen; + } + + /* + * Precondition: Sequences must end on an explicit Block Delimiter + * @return: 0 on success, or an error code. + * Note: Sequence validation functionality has been disabled (removed). + * This is helpful to generate a lean main pipeline, improving performance. + * It may be re-inserted later. + */ + private static nuint ZSTD_convertBlockSequences(ZSTD_CCtx_s* cctx, ZSTD_Sequence* inSeqs, nuint nbSequences, int repcodeResolution) + { + repcodes_s updatedRepcodes; + nuint seqNb = 0; + if (nbSequences >= cctx->seqStore.maxNbSeq) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); + } + + memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, (uint)sizeof(repcodes_s)); + assert(nbSequences >= 1); + assert(inSeqs[nbSequences - 1].matchLength == 0); + assert(inSeqs[nbSequences - 1].offset == 0); + if (repcodeResolution == 0) + { + nuint longl = convertSequences_noRepcodes(cctx->seqStore.sequencesStart, inSeqs, nbSequences - 1); + cctx->seqStore.sequences = cctx->seqStore.sequencesStart + nbSequences - 1; + if (longl != 0) + { + assert(cctx->seqStore.longLengthType == ZSTD_longLengthType_e.ZSTD_llt_none); + if (longl <= nbSequences - 1) + { + cctx->seqStore.longLengthType = ZSTD_longLengthType_e.ZSTD_llt_matchLength; + cctx->seqStore.longLengthPos = (uint)(longl - 1); + } + else + { + assert(longl <= 2 * (nbSequences - 1)); + cctx->seqStore.longLengthType = ZSTD_longLengthType_e.ZSTD_llt_literalLength; + cctx->seqStore.longLengthPos = (uint)(longl - (nbSequences - 1) - 1); + } + } + } + else + { + for (seqNb = 0; seqNb < nbSequences - 1; seqNb++) + { + uint litLength = inSeqs[seqNb].litLength; + uint matchLength = inSeqs[seqNb].matchLength; + uint ll0 = litLength == 0 ? 1U : 0U; + uint offBase = ZSTD_finalizeOffBase(inSeqs[seqNb].offset, updatedRepcodes.rep, ll0); + ZSTD_storeSeqOnly(&cctx->seqStore, litLength, offBase, matchLength); + ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0); + } + } + + if (repcodeResolution == 0 && nbSequences > 1) + { + uint* rep = updatedRepcodes.rep; + if (nbSequences >= 4) + { + /* index of last full sequence */ + uint lastSeqIdx = (uint)nbSequences - 2; + rep[2] = inSeqs[lastSeqIdx - 2].offset; + rep[1] = inSeqs[lastSeqIdx - 1].offset; + rep[0] = inSeqs[lastSeqIdx].offset; + } + else if (nbSequences == 3) + { + rep[2] = rep[0]; + rep[1] = inSeqs[0].offset; + rep[0] = inSeqs[1].offset; + } + else + { + assert(nbSequences == 2); + rep[2] = rep[1]; + rep[1] = rep[0]; + rep[0] = inSeqs[0].offset; + } + } + + memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, (uint)sizeof(repcodes_s)); + return 0; + } + + private static BlockSummary ZSTD_get1BlockSummary(ZSTD_Sequence* seqs, nuint nbSeqs) + { + nuint totalMatchSize = 0; + nuint litSize = 0; + nuint n; + assert(seqs != null); + for (n = 0; n < nbSeqs; n++) + { + totalMatchSize += seqs[n].matchLength; + litSize += seqs[n].litLength; + if (seqs[n].matchLength == 0) + { + assert(seqs[n].offset == 0); + break; + } + } + + if (n == nbSeqs) + { + BlockSummary bs; + System.Runtime.CompilerServices.Unsafe.SkipInit(out bs); + bs.nbSequences = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); + return bs; + } + + { + BlockSummary bs; + bs.nbSequences = n + 1; + bs.blockSize = litSize + totalMatchSize; + bs.litSize = litSize; + return bs; + } + } + + private static nuint ZSTD_compressSequencesAndLiterals_internal(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, ZSTD_Sequence* inSeqs, nuint nbSequences, void* literals, nuint litSize, nuint srcSize) + { + nuint remaining = srcSize; + nuint cSize = 0; + byte* op = (byte*)dst; + int repcodeResolution = cctx->appliedParams.searchForExternalRepcodes == ZSTD_paramSwitch_e.ZSTD_ps_enable ? 1 : 0; + assert(cctx->appliedParams.searchForExternalRepcodes != ZSTD_paramSwitch_e.ZSTD_ps_auto); + if (nbSequences == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); + } + + if (nbSequences == 1 && inSeqs[0].litLength == 0) + { + /* last block */ + uint cBlockHeader24 = 1 + ((uint)blockType_e.bt_raw << 1); + if (dstCapacity < 3) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + MEM_writeLE24(op, cBlockHeader24); + op += ZSTD_blockHeaderSize; + dstCapacity -= ZSTD_blockHeaderSize; + cSize += ZSTD_blockHeaderSize; + } + + while (nbSequences != 0) + { + nuint compressedSeqsSize, cBlockSize, conversionStatus; + BlockSummary block = ZSTD_get1BlockSummary(inSeqs, nbSequences); + uint lastBlock = block.nbSequences == nbSequences ? 1U : 0U; + { + nuint err_code = block.nbSequences; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + assert(block.nbSequences <= nbSequences); + if (block.litSize > litSize) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); + } + + ZSTD_resetSeqStore(&cctx->seqStore); + conversionStatus = ZSTD_convertBlockSequences(cctx, inSeqs, block.nbSequences, repcodeResolution); + { + nuint err_code = conversionStatus; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + inSeqs += block.nbSequences; + nbSequences -= block.nbSequences; + remaining -= block.blockSize; + if (dstCapacity < ZSTD_blockHeaderSize) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + compressedSeqsSize = ZSTD_entropyCompressSeqStore_internal(op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize, literals, block.litSize, &cctx->seqStore, &cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy, &cctx->appliedParams, cctx->tmpWorkspace, cctx->tmpWkspSize, cctx->bmi2); + { + nuint err_code = compressedSeqsSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (compressedSeqsSize > cctx->blockSizeMax) + compressedSeqsSize = 0; + litSize -= block.litSize; + literals = (sbyte*)literals + block.litSize; + if (compressedSeqsSize == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_cannotProduce_uncompressedBlock)); + } + else + { + uint cBlockHeader; + assert(compressedSeqsSize > 1); + ZSTD_blockState_confirmRepcodesAndEntropyTables(&cctx->blockState); + if (cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat.FSE_repeat_valid) + cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat.FSE_repeat_check; + cBlockHeader = lastBlock + ((uint)blockType_e.bt_compressed << 1) + (uint)(compressedSeqsSize << 3); + MEM_writeLE24(op, cBlockHeader); + cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize; + } + + cSize += cBlockSize; + op += cBlockSize; + dstCapacity -= cBlockSize; + cctx->isFirstBlock = 0; + if (lastBlock != 0) + { + assert(nbSequences == 0); + break; + } + } + + if (litSize != 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); + } + + if (remaining != 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); + } + + return cSize; + } + + /*! ZSTD_compressSequencesAndLiterals() : + * This is a variant of ZSTD_compressSequences() which, + * instead of receiving (src,srcSize) as input parameter, receives (literals,litSize), + * aka all the literals, already extracted and laid out into a single continuous buffer. + * This can be useful if the process generating the sequences also happens to generate the buffer of literals, + * thus skipping an extraction + caching stage. + * It's a speed optimization, useful when the right conditions are met, + * but it also features the following limitations: + * - Only supports explicit delimiter mode + * - Currently does not support Sequences validation (so input Sequences are trusted) + * - Not compatible with frame checksum, which must be disabled + * - If any block is incompressible, will fail and return an error + * - @litSize must be == sum of all @.litLength fields in @inSeqs. Any discrepancy will generate an error. + * - @litBufCapacity is the size of the underlying buffer into which literals are written, starting at address @literals. + * @litBufCapacity must be at least 8 bytes larger than @litSize. + * - @decompressedSize must be correct, and correspond to the sum of all Sequences. Any discrepancy will generate an error. + * @return : final compressed size, or a ZSTD error code. + */ + public static nuint ZSTD_compressSequencesAndLiterals(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, ZSTD_Sequence* inSeqs, nuint inSeqsSize, void* literals, nuint litSize, nuint litCapacity, nuint decompressedSize) + { + byte* op = (byte*)dst; + nuint cSize = 0; + assert(cctx != null); + if (litCapacity < litSize) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_workSpace_tooSmall)); + } + + { + nuint err_code = ZSTD_CCtx_init_compressStream2(cctx, ZSTD_EndDirective.ZSTD_e_end, decompressedSize); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (cctx->appliedParams.blockDelimiters == ZSTD_sequenceFormat_e.ZSTD_sf_noBlockDelimiters) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported)); + } + + if (cctx->appliedParams.validateSequences != 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported)); + } + + if (cctx->appliedParams.fParams.checksumFlag != 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported)); + } + + { + nuint frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, &cctx->appliedParams, decompressedSize, cctx->dictID); + op += frameHeaderSize; + assert(frameHeaderSize <= dstCapacity); + dstCapacity -= frameHeaderSize; + cSize += frameHeaderSize; + } + + { + nuint cBlocksSize = ZSTD_compressSequencesAndLiterals_internal(cctx, op, dstCapacity, inSeqs, inSeqsSize, literals, litSize, decompressedSize); + { + nuint err_code = cBlocksSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + cSize += cBlocksSize; + assert(cBlocksSize <= dstCapacity); + dstCapacity -= cBlocksSize; + } + + return cSize; + } + + /*====== Finalize ======*/ + private static ZSTD_inBuffer_s inBuffer_forEndFlush(ZSTD_CCtx_s* zcs) + { + ZSTD_inBuffer_s nullInput = new ZSTD_inBuffer_s + { + src = null, + size = 0, + pos = 0 + }; + int stableInput = zcs->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable ? 1 : 0; + return stableInput != 0 ? zcs->expectedInBuffer : nullInput; + } + + /*! ZSTD_flushStream() : + * @return : amount of data remaining to flush */ + public static nuint ZSTD_flushStream(ZSTD_CCtx_s* zcs, ZSTD_outBuffer_s* output) + { + ZSTD_inBuffer_s input = inBuffer_forEndFlush(zcs); + input.size = input.pos; + return ZSTD_compressStream2(zcs, output, &input, ZSTD_EndDirective.ZSTD_e_flush); + } + + /*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_end). */ + public static nuint ZSTD_endStream(ZSTD_CCtx_s* zcs, ZSTD_outBuffer_s* output) + { + ZSTD_inBuffer_s input = inBuffer_forEndFlush(zcs); + nuint remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_EndDirective.ZSTD_e_end); + { + nuint err_code = remainingToFlush; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (zcs->appliedParams.nbWorkers > 0) + return remainingToFlush; + { + nuint lastBlockSize = (nuint)(zcs->frameEnded != 0 ? 0 : 3); + nuint checksumSize = (nuint)(zcs->frameEnded != 0 ? 0 : zcs->appliedParams.fParams.checksumFlag * 4); + nuint toFlush = remainingToFlush + lastBlockSize + checksumSize; + return toFlush; + } + } + + public static int ZSTD_maxCLevel() + { + return 22; + } + + public static int ZSTD_minCLevel() + { + return -(1 << 17); + } + + public static int ZSTD_defaultCLevel() + { + return 3; + } + + private static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(int compressionLevel, nuint dictSize) + { + ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, 0, dictSize, ZSTD_CParamMode_e.ZSTD_cpm_createCDict); + switch (cParams.strategy) + { + case ZSTD_strategy.ZSTD_fast: + case ZSTD_strategy.ZSTD_dfast: + break; + case ZSTD_strategy.ZSTD_greedy: + case ZSTD_strategy.ZSTD_lazy: + case ZSTD_strategy.ZSTD_lazy2: + cParams.hashLog += 2; + break; + case ZSTD_strategy.ZSTD_btlazy2: + case ZSTD_strategy.ZSTD_btopt: + case ZSTD_strategy.ZSTD_btultra: + case ZSTD_strategy.ZSTD_btultra2: + break; + } + + return cParams; + } + + private static int ZSTD_dedicatedDictSearch_isSupported(ZSTD_compressionParameters* cParams) + { + return cParams->strategy >= ZSTD_strategy.ZSTD_greedy && cParams->strategy <= ZSTD_strategy.ZSTD_lazy2 && cParams->hashLog > cParams->chainLog && cParams->chainLog <= 24 ? 1 : 0; + } + + /** + * Reverses the adjustment applied to cparams when enabling dedicated dict + * search. This is used to recover the params set to be used in the working + * context. (Otherwise, those tables would also grow.) + */ + private static void ZSTD_dedicatedDictSearch_revertCParams(ZSTD_compressionParameters* cParams) + { + switch (cParams->strategy) + { + case ZSTD_strategy.ZSTD_fast: + case ZSTD_strategy.ZSTD_dfast: + break; + case ZSTD_strategy.ZSTD_greedy: + case ZSTD_strategy.ZSTD_lazy: + case ZSTD_strategy.ZSTD_lazy2: + cParams->hashLog -= 2; + if (cParams->hashLog < 6) + { + cParams->hashLog = 6; + } + + break; + case ZSTD_strategy.ZSTD_btlazy2: + case ZSTD_strategy.ZSTD_btopt: + case ZSTD_strategy.ZSTD_btultra: + case ZSTD_strategy.ZSTD_btultra2: + break; + } + } + + private static ulong ZSTD_getCParamRowSize(ulong srcSizeHint, nuint dictSize, ZSTD_CParamMode_e mode) + { + switch (mode) + { + case ZSTD_CParamMode_e.ZSTD_cpm_unknown: + case ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict: + case ZSTD_CParamMode_e.ZSTD_cpm_createCDict: + break; + case ZSTD_CParamMode_e.ZSTD_cpm_attachDict: + dictSize = 0; + break; + default: + assert(0 != 0); + break; + } + + { + int unknown = srcSizeHint == unchecked(0UL - 1) ? 1 : 0; + nuint addedSize = (nuint)(unknown != 0 && dictSize > 0 ? 500 : 0); + return unknown != 0 && dictSize == 0 ? unchecked(0UL - 1) : srcSizeHint + dictSize + addedSize; + } + } + + /*! ZSTD_getCParams_internal() : + * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize. + * Note: srcSizeHint 0 means 0, use ZSTD_CONTENTSIZE_UNKNOWN for unknown. + * Use dictSize == 0 for unknown or unused. + * Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_CParamMode_e`. */ + private static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, ulong srcSizeHint, nuint dictSize, ZSTD_CParamMode_e mode) + { + ulong rSize = ZSTD_getCParamRowSize(srcSizeHint, dictSize, mode); + uint tableID = (uint)((rSize <= 256 * (1 << 10) ? 1 : 0) + (rSize <= 128 * (1 << 10) ? 1 : 0) + (rSize <= 16 * (1 << 10) ? 1 : 0)); + int row; + if (compressionLevel == 0) + row = 3; + else if (compressionLevel < 0) + row = 0; + else if (compressionLevel > 22) + row = 22; + else + row = compressionLevel; + { + ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row]; + if (compressionLevel < 0) + { + int clampedCompressionLevel = ZSTD_minCLevel() > compressionLevel ? ZSTD_minCLevel() : compressionLevel; + cp.targetLength = (uint)-clampedCompressionLevel; + } + + return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize, mode, ZSTD_paramSwitch_e.ZSTD_ps_auto); + } + } + + /*! ZSTD_getCParams() : + * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize. + * Size values are optional, provide 0 if not known or unused */ + public static ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, ulong srcSizeHint, nuint dictSize) + { + if (srcSizeHint == 0) + srcSizeHint = unchecked(0UL - 1); + return ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_CParamMode_e.ZSTD_cpm_unknown); + } + + /*! ZSTD_getParams() : + * same idea as ZSTD_getCParams() + * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`). + * Fields of `ZSTD_frameParameters` are set to default values */ + private static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, ulong srcSizeHint, nuint dictSize, ZSTD_CParamMode_e mode) + { + ZSTD_parameters @params; + ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, mode); + @params = new ZSTD_parameters + { + cParams = cParams + }; + @params.fParams.contentSizeFlag = 1; + return @params; + } + + /*! ZSTD_getParams() : + * same idea as ZSTD_getCParams() + * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`). + * Fields of `ZSTD_frameParameters` are set to default values */ + public static ZSTD_parameters ZSTD_getParams(int compressionLevel, ulong srcSizeHint, nuint dictSize) + { + if (srcSizeHint == 0) + srcSizeHint = unchecked(0UL - 1); + return ZSTD_getParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_CParamMode_e.ZSTD_cpm_unknown); + } + + /*! ZSTD_registerSequenceProducer() : + * Instruct zstd to use a block-level external sequence producer function. + * + * The sequenceProducerState must be initialized by the caller, and the caller is + * responsible for managing its lifetime. This parameter is sticky across + * compressions. It will remain set until the user explicitly resets compression + * parameters. + * + * Sequence producer registration is considered to be an "advanced parameter", + * part of the "advanced API". This means it will only have an effect on compression + * APIs which respect advanced parameters, such as compress2() and compressStream2(). + * Older compression APIs such as compressCCtx(), which predate the introduction of + * "advanced parameters", will ignore any external sequence producer setting. + * + * The sequence producer can be "cleared" by registering a NULL function pointer. This + * removes all limitations described above in the "LIMITATIONS" section of the API docs. + * + * The user is strongly encouraged to read the full API documentation (above) before + * calling this function. */ + public static void ZSTD_registerSequenceProducer(ZSTD_CCtx_s* zc, void* extSeqProdState, void* extSeqProdFunc) + { + assert(zc != null); + ZSTD_CCtxParams_registerSequenceProducer(&zc->requestedParams, extSeqProdState, extSeqProdFunc); + } + + /*! ZSTD_CCtxParams_registerSequenceProducer() : + * Same as ZSTD_registerSequenceProducer(), but operates on ZSTD_CCtx_params. + * This is used for accurate size estimation with ZSTD_estimateCCtxSize_usingCCtxParams(), + * which is needed when creating a ZSTD_CCtx with ZSTD_initStaticCCtx(). + * + * If you are using the external sequence producer API in a scenario where ZSTD_initStaticCCtx() + * is required, then this function is for you. Otherwise, you probably don't need it. + * + * See tests/zstreamtest.c for example usage. */ + public static void ZSTD_CCtxParams_registerSequenceProducer(ZSTD_CCtx_params_s* @params, void* extSeqProdState, void* extSeqProdFunc) + { + assert(@params != null); + if (extSeqProdFunc != null) + { + @params->extSeqProdFunc = extSeqProdFunc; + @params->extSeqProdState = extSeqProdState; + } + else + { + @params->extSeqProdFunc = null; + @params->extSeqProdState = null; + } + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressInternal.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressInternal.cs new file mode 100644 index 000000000..03da49072 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressInternal.cs @@ -0,0 +1,1113 @@ +using System.Runtime.CompilerServices; +using static ZstdSharp.UnsafeHelper; +using System; +using System.Runtime.InteropServices; + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + /** + * Returns the ZSTD_SequenceLength for the given sequences. It handles the decoding of long sequences + * indicated by longLengthPos and longLengthType, and adds MINMATCH back to matchLength. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ZSTD_SequenceLength ZSTD_getSequenceLength(SeqStore_t* seqStore, SeqDef_s* seq) + { + ZSTD_SequenceLength seqLen; + seqLen.litLength = seq->litLength; + seqLen.matchLength = (uint)(seq->mlBase + 3); + if (seqStore->longLengthPos == (uint)(seq - seqStore->sequencesStart)) + { + if (seqStore->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_literalLength) + { + seqLen.litLength += 0x10000; + } + + if (seqStore->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_matchLength) + { + seqLen.matchLength += 0x10000; + } + } + + return seqLen; + } + + private static readonly RawSeqStore_t kNullRawSeqStore = new RawSeqStore_t(seq: null, pos: 0, posInSequence: 0, size: 0, capacity: 0); +#if NET7_0_OR_GREATER + private static ReadOnlySpan Span_LL_Code => new byte[64] + { + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 16, + 17, + 17, + 18, + 18, + 19, + 19, + 20, + 20, + 20, + 20, + 21, + 21, + 21, + 21, + 22, + 22, + 22, + 22, + 22, + 22, + 22, + 22, + 23, + 23, + 23, + 23, + 23, + 23, + 23, + 23, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24 + }; + private static byte* LL_Code => (byte*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_LL_Code)); +#else + + private static readonly byte* LL_Code = GetArrayPointer(new byte[64] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24 }); +#endif + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_LLcode(uint litLength) + { + const uint LL_deltaCode = 19; + return litLength > 63 ? ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength]; + } + +#if NET7_0_OR_GREATER + private static ReadOnlySpan Span_ML_Code => new byte[128] + { + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 32, + 33, + 33, + 34, + 34, + 35, + 35, + 36, + 36, + 36, + 36, + 37, + 37, + 37, + 37, + 38, + 38, + 38, + 38, + 38, + 38, + 38, + 38, + 39, + 39, + 39, + 39, + 39, + 39, + 39, + 39, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42 + }; + private static byte* ML_Code => (byte*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_ML_Code)); +#else + + private static readonly byte* ML_Code = GetArrayPointer(new byte[128] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 }); +#endif + /* ZSTD_MLcode() : + * note : mlBase = matchLength - MINMATCH; + * because it's the format it's stored in seqStore->sequences */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_MLcode(uint mlBase) + { + const uint ML_deltaCode = 36; + return mlBase > 127 ? ZSTD_highbit32(mlBase) + ML_deltaCode : ML_Code[mlBase]; + } + + /* ZSTD_cParam_withinBounds: + * @return 1 if value is within cParam bounds, + * 0 otherwise */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value) + { + ZSTD_bounds bounds = ZSTD_cParam_getBounds(cParam); + if (ERR_isError(bounds.error)) + return 0; + if (value < bounds.lowerBound) + return 0; + if (value > bounds.upperBound) + return 0; + return 1; + } + + /* ZSTD_selectAddr: + * @return index >= lowLimit ? candidate : backup, + * tries to force branchless codegen. */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static byte* ZSTD_selectAddr(uint index, uint lowLimit, byte* candidate, byte* backup) + { + return index >= lowLimit ? candidate : backup; + } + + /* ZSTD_noCompressBlock() : + * Writes uncompressed block to dst buffer from given src. + * Returns the size of the block */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_noCompressBlock(void* dst, nuint dstCapacity, void* src, nuint srcSize, uint lastBlock) + { + uint cBlockHeader24 = lastBlock + ((uint)blockType_e.bt_raw << 1) + (uint)(srcSize << 3); + if (srcSize + ZSTD_blockHeaderSize > dstCapacity) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + MEM_writeLE24(dst, cBlockHeader24); + memcpy((byte*)dst + ZSTD_blockHeaderSize, src, (uint)srcSize); + return ZSTD_blockHeaderSize + srcSize; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_rleCompressBlock(void* dst, nuint dstCapacity, byte src, nuint srcSize, uint lastBlock) + { + byte* op = (byte*)dst; + uint cBlockHeader = lastBlock + ((uint)blockType_e.bt_rle << 1) + (uint)(srcSize << 3); + if (dstCapacity < 4) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + MEM_writeLE24(op, cBlockHeader); + op[3] = src; + return 4; + } + + /* ZSTD_minGain() : + * minimum compression required + * to generate a compress block or a compressed literals section. + * note : use same formula for both situations */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_minGain(nuint srcSize, ZSTD_strategy strat) + { + uint minlog = strat >= ZSTD_strategy.ZSTD_btultra ? (uint)strat - 1 : 6; + assert(ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_strategy, (int)strat) != 0); + return (srcSize >> (int)minlog) + 2; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int ZSTD_literalsCompressionIsDisabled(ZSTD_CCtx_params_s* cctxParams) + { + switch (cctxParams->literalCompressionMode) + { + case ZSTD_paramSwitch_e.ZSTD_ps_enable: + return 0; + case ZSTD_paramSwitch_e.ZSTD_ps_disable: + return 1; + default: + assert(0 != 0); + goto case ZSTD_paramSwitch_e.ZSTD_ps_auto; + case ZSTD_paramSwitch_e.ZSTD_ps_auto: + return cctxParams->cParams.strategy == ZSTD_strategy.ZSTD_fast && cctxParams->cParams.targetLength > 0 ? 1 : 0; + } + } + + /*! ZSTD_safecopyLiterals() : + * memcpy() function that won't read beyond more than WILDCOPY_OVERLENGTH bytes past ilimit_w. + * Only called when the sequence ends past ilimit_w, so it only needs to be optimized for single + * large copies. + */ + private static void ZSTD_safecopyLiterals(byte* op, byte* ip, byte* iend, byte* ilimit_w) + { + assert(iend > ilimit_w); + if (ip <= ilimit_w) + { + ZSTD_wildcopy(op, ip, (nint)(ilimit_w - ip), ZSTD_overlap_e.ZSTD_no_overlap); + op += ilimit_w - ip; + ip = ilimit_w; + } + + while (ip < iend) + *op++ = *ip++; + } + + /*! ZSTD_storeSeqOnly() : + * Store a sequence (litlen, litPtr, offBase and matchLength) into SeqStore_t. + * Literals themselves are not copied, but @litPtr is updated. + * @offBase : Users should employ macros REPCODE_TO_OFFBASE() and OFFSET_TO_OFFBASE(). + * @matchLength : must be >= MINMATCH + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_storeSeqOnly(SeqStore_t* seqStorePtr, nuint litLength, uint offBase, nuint matchLength) + { + assert((nuint)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq); + assert(litLength <= 1 << 17); + if (litLength > 0xFFFF) + { + assert(seqStorePtr->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_none); + seqStorePtr->longLengthType = ZSTD_longLengthType_e.ZSTD_llt_literalLength; + seqStorePtr->longLengthPos = (uint)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + } + + seqStorePtr->sequences[0].litLength = (ushort)litLength; + seqStorePtr->sequences[0].offBase = offBase; + assert(matchLength <= 1 << 17); + assert(matchLength >= 3); + { + nuint mlBase = matchLength - 3; + if (mlBase > 0xFFFF) + { + assert(seqStorePtr->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_none); + seqStorePtr->longLengthType = ZSTD_longLengthType_e.ZSTD_llt_matchLength; + seqStorePtr->longLengthPos = (uint)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + } + + seqStorePtr->sequences[0].mlBase = (ushort)mlBase; + } + + seqStorePtr->sequences++; + } + + /*! ZSTD_storeSeq() : + * Store a sequence (litlen, litPtr, offBase and matchLength) into SeqStore_t. + * @offBase : Users should employ macros REPCODE_TO_OFFBASE() and OFFSET_TO_OFFBASE(). + * @matchLength : must be >= MINMATCH + * Allowed to over-read literals up to litLimit. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_storeSeq(SeqStore_t* seqStorePtr, nuint litLength, byte* literals, byte* litLimit, uint offBase, nuint matchLength) + { + byte* litLimit_w = litLimit - 32; + byte* litEnd = literals + litLength; + assert((nuint)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq); + assert(seqStorePtr->maxNbLit <= 128 * (1 << 10)); + assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit); + assert(literals + litLength <= litLimit); + if (litEnd <= litLimit_w) + { + ZSTD_copy16(seqStorePtr->lit, literals); + if (litLength > 16) + { + ZSTD_wildcopy(seqStorePtr->lit + 16, literals + 16, (nint)litLength - 16, ZSTD_overlap_e.ZSTD_no_overlap); + } + } + else + { + ZSTD_safecopyLiterals(seqStorePtr->lit, literals, litEnd, litLimit_w); + } + + seqStorePtr->lit += litLength; + ZSTD_storeSeqOnly(seqStorePtr, litLength, offBase, matchLength); + } + + /* ZSTD_updateRep() : + * updates in-place @rep (array of repeat offsets) + * @offBase : sum-type, using numeric representation of ZSTD_storeSeq() + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_updateRep(uint* rep, uint offBase, uint ll0) + { + if (offBase > 3) + { + rep[2] = rep[1]; + rep[1] = rep[0]; + assert(offBase > 3); + rep[0] = offBase - 3; + } + else + { + assert(1 <= offBase && offBase <= 3); + uint repCode = offBase - 1 + ll0; + if (repCode > 0) + { + uint currentOffset = repCode == 3 ? rep[0] - 1 : rep[repCode]; + rep[2] = repCode >= 2 ? rep[1] : rep[2]; + rep[1] = rep[0]; + rep[0] = currentOffset; + } + } + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static repcodes_s ZSTD_newRep(uint* rep, uint offBase, uint ll0) + { + repcodes_s newReps; + memcpy(&newReps, rep, (uint)sizeof(repcodes_s)); + ZSTD_updateRep(newReps.rep, offBase, ll0); + return newReps; + } + + /*-************************************* + * Match length counter + ***************************************/ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_count(byte* pIn, byte* pMatch, byte* pInLimit) + { + byte* pStart = pIn; + byte* pInLoopLimit = pInLimit - (sizeof(nuint) - 1); + if (pIn < pInLoopLimit) + { + { + nuint diff = MEM_readST(pMatch) ^ MEM_readST(pIn); + if (diff != 0) + return ZSTD_NbCommonBytes(diff); + } + + pIn += sizeof(nuint); + pMatch += sizeof(nuint); + while (pIn < pInLoopLimit) + { + nuint diff = MEM_readST(pMatch) ^ MEM_readST(pIn); + if (diff == 0) + { + pIn += sizeof(nuint); + pMatch += sizeof(nuint); + continue; + } + + pIn += ZSTD_NbCommonBytes(diff); + return (nuint)(pIn - pStart); + } + } + + if (MEM_64bits && pIn < pInLimit - 3 && MEM_read32(pMatch) == MEM_read32(pIn)) + { + pIn += 4; + pMatch += 4; + } + + if (pIn < pInLimit - 1 && MEM_read16(pMatch) == MEM_read16(pIn)) + { + pIn += 2; + pMatch += 2; + } + + if (pIn < pInLimit && *pMatch == *pIn) + pIn++; + return (nuint)(pIn - pStart); + } + + /** ZSTD_count_2segments() : + * can count match length with `ip` & `match` in 2 different segments. + * convention : on reaching mEnd, match count continue starting from iStart + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_count_2segments(byte* ip, byte* match, byte* iEnd, byte* mEnd, byte* iStart) + { + byte* vEnd = ip + (mEnd - match) < iEnd ? ip + (mEnd - match) : iEnd; + nuint matchLength = ZSTD_count(ip, match, vEnd); + if (match + matchLength != mEnd) + return matchLength; + return matchLength + ZSTD_count(ip + matchLength, iStart, iEnd); + } + + private const uint prime3bytes = 506832829U; + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_hash3(uint u, uint h, uint s) + { + assert(h <= 32); + return ((u << 32 - 24) * prime3bytes ^ s) >> (int)(32 - h); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hash3Ptr(void* ptr, uint h) + { + return ZSTD_hash3(MEM_readLE32(ptr), h, 0); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hash3PtrS(void* ptr, uint h, uint s) + { + return ZSTD_hash3(MEM_readLE32(ptr), h, s); + } + + private const uint prime4bytes = 2654435761U; + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_hash4(uint u, uint h, uint s) + { + assert(h <= 32); + return (u * prime4bytes ^ s) >> (int)(32 - h); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hash4Ptr(void* ptr, uint h) + { + return ZSTD_hash4(MEM_readLE32(ptr), h, 0); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hash4PtrS(void* ptr, uint h, uint s) + { + return ZSTD_hash4(MEM_readLE32(ptr), h, s); + } + + private const ulong prime5bytes = 889523592379UL; + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hash5(ulong u, uint h, ulong s) + { + assert(h <= 64); + return (nuint)(((u << 64 - 40) * prime5bytes ^ s) >> (int)(64 - h)); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hash5Ptr(void* p, uint h) + { + return ZSTD_hash5(MEM_readLE64(p), h, 0); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hash5PtrS(void* p, uint h, ulong s) + { + return ZSTD_hash5(MEM_readLE64(p), h, s); + } + + private const ulong prime6bytes = 227718039650203UL; + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hash6(ulong u, uint h, ulong s) + { + assert(h <= 64); + return (nuint)(((u << 64 - 48) * prime6bytes ^ s) >> (int)(64 - h)); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hash6Ptr(void* p, uint h) + { + return ZSTD_hash6(MEM_readLE64(p), h, 0); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hash6PtrS(void* p, uint h, ulong s) + { + return ZSTD_hash6(MEM_readLE64(p), h, s); + } + + private const ulong prime7bytes = 58295818150454627UL; + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hash7(ulong u, uint h, ulong s) + { + assert(h <= 64); + return (nuint)(((u << 64 - 56) * prime7bytes ^ s) >> (int)(64 - h)); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hash7Ptr(void* p, uint h) + { + return ZSTD_hash7(MEM_readLE64(p), h, 0); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hash7PtrS(void* p, uint h, ulong s) + { + return ZSTD_hash7(MEM_readLE64(p), h, s); + } + + private const ulong prime8bytes = 0xCF1BBCDCB7A56463UL; + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hash8(ulong u, uint h, ulong s) + { + assert(h <= 64); + return (nuint)((u * prime8bytes ^ s) >> (int)(64 - h)); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hash8Ptr(void* p, uint h) + { + return ZSTD_hash8(MEM_readLE64(p), h, 0); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hash8PtrS(void* p, uint h, ulong s) + { + return ZSTD_hash8(MEM_readLE64(p), h, s); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hashPtr(void* p, uint hBits, uint mls) + { + assert(hBits <= 32); + if (mls == 5) + return ZSTD_hash5Ptr(p, hBits); + if (mls == 6) + return ZSTD_hash6Ptr(p, hBits); + if (mls == 7) + return ZSTD_hash7Ptr(p, hBits); + if (mls == 8) + return ZSTD_hash8Ptr(p, hBits); + return ZSTD_hash4Ptr(p, hBits); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hashPtrSalted(void* p, uint hBits, uint mls, ulong hashSalt) + { + assert(hBits <= 32); + if (mls == 5) + return ZSTD_hash5PtrS(p, hBits, hashSalt); + if (mls == 6) + return ZSTD_hash6PtrS(p, hBits, hashSalt); + if (mls == 7) + return ZSTD_hash7PtrS(p, hBits, hashSalt); + if (mls == 8) + return ZSTD_hash8PtrS(p, hBits, hashSalt); + return ZSTD_hash4PtrS(p, hBits, (uint)hashSalt); + } + + /** ZSTD_ipow() : + * Return base^exponent. + */ + private static ulong ZSTD_ipow(ulong @base, ulong exponent) + { + ulong power = 1; + while (exponent != 0) + { + if ((exponent & 1) != 0) + power *= @base; + exponent >>= 1; + @base *= @base; + } + + return power; + } + + /** ZSTD_rollingHash_append() : + * Add the buffer to the hash value. + */ + private static ulong ZSTD_rollingHash_append(ulong hash, void* buf, nuint size) + { + byte* istart = (byte*)buf; + nuint pos; + for (pos = 0; pos < size; ++pos) + { + hash *= prime8bytes; + hash += (ulong)(istart[pos] + 10); + } + + return hash; + } + + /** ZSTD_rollingHash_compute() : + * Compute the rolling hash value of the buffer. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ulong ZSTD_rollingHash_compute(void* buf, nuint size) + { + return ZSTD_rollingHash_append(0, buf, size); + } + + /** ZSTD_rollingHash_primePower() : + * Compute the primePower to be passed to ZSTD_rollingHash_rotate() for a hash + * over a window of length bytes. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ulong ZSTD_rollingHash_primePower(uint length) + { + return ZSTD_ipow(prime8bytes, length - 1); + } + + /** ZSTD_rollingHash_rotate() : + * Rotate the rolling hash by one byte. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ulong ZSTD_rollingHash_rotate(ulong hash, byte toRemove, byte toAdd, ulong primePower) + { + hash -= (ulong)(toRemove + 10) * primePower; + hash *= prime8bytes; + hash += (ulong)(toAdd + 10); + return hash; + } + + /** + * ZSTD_window_clear(): + * Clears the window containing the history by simply setting it to empty. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_window_clear(ZSTD_window_t* window) + { + nuint endT = (nuint)(window->nextSrc - window->@base); + uint end = (uint)endT; + window->lowLimit = end; + window->dictLimit = end; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_window_isEmpty(ZSTD_window_t window) + { + return window.dictLimit == 2 && window.lowLimit == 2 && window.nextSrc - window.@base == 2 ? 1U : 0U; + } + + /** + * ZSTD_window_hasExtDict(): + * Returns non-zero if the window has a non-empty extDict. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_window_hasExtDict(ZSTD_window_t window) + { + return window.lowLimit < window.dictLimit ? 1U : 0U; + } + + /** + * ZSTD_matchState_dictMode(): + * Inspects the provided matchState and figures out what dictMode should be + * passed to the compressor. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ZSTD_dictMode_e ZSTD_matchState_dictMode(ZSTD_MatchState_t* ms) + { + return ZSTD_window_hasExtDict(ms->window) != 0 ? ZSTD_dictMode_e.ZSTD_extDict : ms->dictMatchState != null ? ms->dictMatchState->dedicatedDictSearch != 0 ? ZSTD_dictMode_e.ZSTD_dedicatedDictSearch : ZSTD_dictMode_e.ZSTD_dictMatchState : ZSTD_dictMode_e.ZSTD_noDict; + } + + /** + * ZSTD_window_canOverflowCorrect(): + * Returns non-zero if the indices are large enough for overflow correction + * to work correctly without impacting compression ratio. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_window_canOverflowCorrect(ZSTD_window_t window, uint cycleLog, uint maxDist, uint loadedDictEnd, void* src) + { + uint cycleSize = 1U << (int)cycleLog; + uint curr = (uint)((byte*)src - window.@base); + uint minIndexToOverflowCorrect = cycleSize + (maxDist > cycleSize ? maxDist : cycleSize) + 2; + /* Adjust the min index to backoff the overflow correction frequency, + * so we don't waste too much CPU in overflow correction. If this + * computation overflows we don't really care, we just need to make + * sure it is at least minIndexToOverflowCorrect. + */ + uint adjustment = window.nbOverflowCorrections + 1; + uint adjustedIndex = minIndexToOverflowCorrect * adjustment > minIndexToOverflowCorrect ? minIndexToOverflowCorrect * adjustment : minIndexToOverflowCorrect; + uint indexLargeEnough = curr > adjustedIndex ? 1U : 0U; + /* Only overflow correct early if the dictionary is invalidated already, + * so we don't hurt compression ratio. + */ + uint dictionaryInvalidated = curr > maxDist + loadedDictEnd ? 1U : 0U; + return indexLargeEnough != 0 && dictionaryInvalidated != 0 ? 1U : 0U; + } + + /** + * ZSTD_window_needOverflowCorrection(): + * Returns non-zero if the indices are getting too large and need overflow + * protection. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_window_needOverflowCorrection(ZSTD_window_t window, uint cycleLog, uint maxDist, uint loadedDictEnd, void* src, void* srcEnd) + { + uint curr = (uint)((byte*)srcEnd - window.@base); + return curr > (MEM_64bits ? 3500U * (1 << 20) : 2000U * (1 << 20)) ? 1U : 0U; + } + + /** + * ZSTD_window_correctOverflow(): + * Reduces the indices to protect from index overflow. + * Returns the correction made to the indices, which must be applied to every + * stored index. + * + * The least significant cycleLog bits of the indices must remain the same, + * which may be 0. Every index up to maxDist in the past must be valid. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_window_correctOverflow(ZSTD_window_t* window, uint cycleLog, uint maxDist, void* src) + { + /* preemptive overflow correction: + * 1. correction is large enough: + * lowLimit > (3<<29) ==> current > 3<<29 + 1< (3<<29 + 1< (3<<29) - (1< (3<<29) - (1<<30) (NOTE: chainLog <= 30) + * > 1<<29 + * + * 2. (ip+ZSTD_CHUNKSIZE_MAX - cctx->base) doesn't overflow: + * After correction, current is less than (1<base < 1<<32. + * 3. (cctx->lowLimit + 1< 3<<29 + 1<@base); + uint currentCycle = curr & cycleMask; + /* Ensure newCurrent - maxDist >= ZSTD_WINDOW_START_INDEX. */ + uint currentCycleCorrection = currentCycle < 2 ? cycleSize > 2 ? cycleSize : 2 : 0; + uint newCurrent = currentCycle + currentCycleCorrection + (maxDist > cycleSize ? maxDist : cycleSize); + uint correction = curr - newCurrent; + assert((maxDist & maxDist - 1) == 0); + assert((curr & cycleMask) == (newCurrent & cycleMask)); + assert(curr > newCurrent); + { + assert(correction > 1 << 28); + } + + window->@base += correction; + window->dictBase += correction; + if (window->lowLimit < correction + 2) + { + window->lowLimit = 2; + } + else + { + window->lowLimit -= correction; + } + + if (window->dictLimit < correction + 2) + { + window->dictLimit = 2; + } + else + { + window->dictLimit -= correction; + } + + assert(newCurrent >= maxDist); + assert(newCurrent - maxDist >= 2); + assert(window->lowLimit <= newCurrent); + assert(window->dictLimit <= newCurrent); + ++window->nbOverflowCorrections; + return correction; + } + + /** + * ZSTD_window_enforceMaxDist(): + * Updates lowLimit so that: + * (srcEnd - base) - lowLimit == maxDist + loadedDictEnd + * + * It ensures index is valid as long as index >= lowLimit. + * This must be called before a block compression call. + * + * loadedDictEnd is only defined if a dictionary is in use for current compression. + * As the name implies, loadedDictEnd represents the index at end of dictionary. + * The value lies within context's referential, it can be directly compared to blockEndIdx. + * + * If loadedDictEndPtr is NULL, no dictionary is in use, and we use loadedDictEnd == 0. + * If loadedDictEndPtr is not NULL, we set it to zero after updating lowLimit. + * This is because dictionaries are allowed to be referenced fully + * as long as the last byte of the dictionary is in the window. + * Once input has progressed beyond window size, dictionary cannot be referenced anymore. + * + * In normal dict mode, the dictionary lies between lowLimit and dictLimit. + * In dictMatchState mode, lowLimit and dictLimit are the same, + * and the dictionary is below them. + * forceWindow and dictMatchState are therefore incompatible. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_window_enforceMaxDist(ZSTD_window_t* window, void* blockEnd, uint maxDist, uint* loadedDictEndPtr, ZSTD_MatchState_t** dictMatchStatePtr) + { + uint blockEndIdx = (uint)((byte*)blockEnd - window->@base); + uint loadedDictEnd = loadedDictEndPtr != null ? *loadedDictEndPtr : 0; + if (blockEndIdx > maxDist + loadedDictEnd) + { + uint newLowLimit = blockEndIdx - maxDist; + if (window->lowLimit < newLowLimit) + window->lowLimit = newLowLimit; + if (window->dictLimit < window->lowLimit) + { + window->dictLimit = window->lowLimit; + } + + if (loadedDictEndPtr != null) + *loadedDictEndPtr = 0; + if (dictMatchStatePtr != null) + *dictMatchStatePtr = null; + } + } + + /* Similar to ZSTD_window_enforceMaxDist(), + * but only invalidates dictionary + * when input progresses beyond window size. + * assumption : loadedDictEndPtr and dictMatchStatePtr are valid (non NULL) + * loadedDictEnd uses same referential as window->base + * maxDist is the window size */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_checkDictValidity(ZSTD_window_t* window, void* blockEnd, uint maxDist, uint* loadedDictEndPtr, ZSTD_MatchState_t** dictMatchStatePtr) + { + assert(loadedDictEndPtr != null); + assert(dictMatchStatePtr != null); + { + uint blockEndIdx = (uint)((byte*)blockEnd - window->@base); + uint loadedDictEnd = *loadedDictEndPtr; + assert(blockEndIdx >= loadedDictEnd); + if (blockEndIdx > loadedDictEnd + maxDist || loadedDictEnd != window->dictLimit) + { + *loadedDictEndPtr = 0; + *dictMatchStatePtr = null; + } + } + } + +#if NET7_0_OR_GREATER + private static ReadOnlySpan Span_stringToByte_20_00 => new byte[] + { + 32, + 0 + }; + private static byte* stringToByte_20_00 => (byte*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_stringToByte_20_00)); +#else + + private static readonly byte* stringToByte_20_00 = GetArrayPointer(new byte[] { 32, 0 }); +#endif + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_window_init(ZSTD_window_t* window) + { + *window = new ZSTD_window_t + { + @base = stringToByte_20_00, + dictBase = stringToByte_20_00, + dictLimit = 2, + lowLimit = 2, + nextSrc = stringToByte_20_00 + 2, + nbOverflowCorrections = 0 + }; + } + + /** + * ZSTD_window_update(): + * Updates the window by appending [src, src + srcSize) to the window. + * If it is not contiguous, the current prefix becomes the extDict, and we + * forget about the extDict. Handles overlap of the prefix and extDict. + * Returns non-zero if the segment is contiguous. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_window_update(ZSTD_window_t* window, void* src, nuint srcSize, int forceNonContiguous) + { + byte* ip = (byte*)src; + uint contiguous = 1; + if (srcSize == 0) + return contiguous; + assert(window->@base != null); + assert(window->dictBase != null); + if (src != window->nextSrc || forceNonContiguous != 0) + { + /* not contiguous */ + nuint distanceFromBase = (nuint)(window->nextSrc - window->@base); + window->lowLimit = window->dictLimit; + assert(distanceFromBase == (uint)distanceFromBase); + window->dictLimit = (uint)distanceFromBase; + window->dictBase = window->@base; + window->@base = ip - distanceFromBase; + if (window->dictLimit - window->lowLimit < 8) + window->lowLimit = window->dictLimit; + contiguous = 0; + } + + window->nextSrc = ip + srcSize; + if (ip + srcSize > window->dictBase + window->lowLimit && ip < window->dictBase + window->dictLimit) + { + nuint highInputIdx = (nuint)(ip + srcSize - window->dictBase); + uint lowLimitMax = highInputIdx > window->dictLimit ? window->dictLimit : (uint)highInputIdx; + assert(highInputIdx < 0xffffffff); + window->lowLimit = lowLimitMax; + } + + return contiguous; + } + + /** + * Returns the lowest allowed match index. It may either be in the ext-dict or the prefix. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_getLowestMatchIndex(ZSTD_MatchState_t* ms, uint curr, uint windowLog) + { + uint maxDistance = 1U << (int)windowLog; + uint lowestValid = ms->window.lowLimit; + uint withinWindow = curr - lowestValid > maxDistance ? curr - maxDistance : lowestValid; + uint isDictionary = ms->loadedDictEnd != 0 ? 1U : 0U; + /* When using a dictionary the entire dictionary is valid if a single byte of the dictionary + * is within the window. We invalidate the dictionary (and set loadedDictEnd to 0) when it isn't + * valid for the entire block. So this check is sufficient to find the lowest valid match index. + */ + uint matchLowest = isDictionary != 0 ? lowestValid : withinWindow; + return matchLowest; + } + + /** + * Returns the lowest allowed match index in the prefix. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_getLowestPrefixIndex(ZSTD_MatchState_t* ms, uint curr, uint windowLog) + { + uint maxDistance = 1U << (int)windowLog; + uint lowestValid = ms->window.dictLimit; + uint withinWindow = curr - lowestValid > maxDistance ? curr - maxDistance : lowestValid; + uint isDictionary = ms->loadedDictEnd != 0 ? 1U : 0U; + /* When computing the lowest prefix index we need to take the dictionary into account to handle + * the edge case where the dictionary and the source are contiguous in memory. + */ + uint matchLowest = isDictionary != 0 ? lowestValid : withinWindow; + return matchLowest; + } + + /* index_safety_check: + * intentional underflow : ensure repIndex isn't overlapping dict + prefix + * @return 1 if values are not overlapping, + * 0 otherwise */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int ZSTD_index_overlap_check(uint prefixLowestIndex, uint repIndex) + { + return prefixLowestIndex - 1 - repIndex >= 3 ? 1 : 0; + } + + /* Helper function for ZSTD_fillHashTable and ZSTD_fillDoubleHashTable. + * Unpacks hashAndTag into (hash, tag), then packs (index, tag) into hashTable[hash]. */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_writeTaggedIndex(uint* hashTable, nuint hashAndTag, uint index) + { + nuint hash = hashAndTag >> 8; + uint tag = (uint)(hashAndTag & (1U << 8) - 1); + assert(index >> 32 - 8 == 0); + hashTable[hash] = index << 8 | tag; + } + + /* Helper function for short cache matchfinders. + * Unpacks tag1 and tag2 from lower bits of packedTag1 and packedTag2, then checks if the tags match. */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int ZSTD_comparePackedTags(nuint packedTag1, nuint packedTag2) + { + uint tag1 = (uint)(packedTag1 & (1U << 8) - 1); + uint tag2 = (uint)(packedTag2 & (1U << 8) - 1); + return tag1 == tag2 ? 1 : 0; + } + + /* Returns 1 if an external sequence producer is registered, otherwise returns 0. */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int ZSTD_hasExtSeqProd(ZSTD_CCtx_params_s* @params) + { + return @params->extSeqProdFunc != null ? 1 : 0; + } + } +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressLiterals.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressLiterals.cs new file mode 100644 index 000000000..3d50a52c5 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressLiterals.cs @@ -0,0 +1,199 @@ +using static ZstdSharp.UnsafeHelper; + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + /* ************************************************************** + * Literals compression - special cases + ****************************************************************/ + private static nuint ZSTD_noCompressLiterals(void* dst, nuint dstCapacity, void* src, nuint srcSize) + { + byte* ostart = (byte*)dst; + uint flSize = (uint)(1 + (srcSize > 31 ? 1 : 0) + (srcSize > 4095 ? 1 : 0)); + if (srcSize + flSize > dstCapacity) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + switch (flSize) + { + case 1: + ostart[0] = (byte)((uint)SymbolEncodingType_e.set_basic + (srcSize << 3)); + break; + case 2: + MEM_writeLE16(ostart, (ushort)((uint)SymbolEncodingType_e.set_basic + (1 << 2) + (srcSize << 4))); + break; + case 3: + MEM_writeLE32(ostart, (uint)((uint)SymbolEncodingType_e.set_basic + (3 << 2) + (srcSize << 4))); + break; + default: + assert(0 != 0); + break; + } + + memcpy(ostart + flSize, src, (uint)srcSize); + return srcSize + flSize; + } + + private static int allBytesIdentical(void* src, nuint srcSize) + { + assert(srcSize >= 1); + assert(src != null); + { + byte b = ((byte*)src)[0]; + nuint p; + for (p = 1; p < srcSize; p++) + { + if (((byte*)src)[p] != b) + return 0; + } + + return 1; + } + } + + /* ZSTD_compressRleLiteralsBlock() : + * Conditions : + * - All bytes in @src are identical + * - dstCapacity >= 4 */ + private static nuint ZSTD_compressRleLiteralsBlock(void* dst, nuint dstCapacity, void* src, nuint srcSize) + { + byte* ostart = (byte*)dst; + uint flSize = (uint)(1 + (srcSize > 31 ? 1 : 0) + (srcSize > 4095 ? 1 : 0)); + assert(dstCapacity >= 4); + assert(allBytesIdentical(src, srcSize) != 0); + switch (flSize) + { + case 1: + ostart[0] = (byte)((uint)SymbolEncodingType_e.set_rle + (srcSize << 3)); + break; + case 2: + MEM_writeLE16(ostart, (ushort)((uint)SymbolEncodingType_e.set_rle + (1 << 2) + (srcSize << 4))); + break; + case 3: + MEM_writeLE32(ostart, (uint)((uint)SymbolEncodingType_e.set_rle + (3 << 2) + (srcSize << 4))); + break; + default: + assert(0 != 0); + break; + } + + ostart[flSize] = *(byte*)src; + return flSize + 1; + } + + /* ZSTD_minLiteralsToCompress() : + * returns minimal amount of literals + * for literal compression to even be attempted. + * Minimum is made tighter as compression strategy increases. + */ + private static nuint ZSTD_minLiteralsToCompress(ZSTD_strategy strategy, HUF_repeat huf_repeat) + { + assert((int)strategy >= 0); + assert((int)strategy <= 9); + { + int shift = 9 - (int)strategy < 3 ? 9 - (int)strategy : 3; + nuint mintc = huf_repeat == HUF_repeat.HUF_repeat_valid ? 6 : (nuint)8 << shift; + return mintc; + } + } + + /* ZSTD_compressLiterals(): + * @entropyWorkspace: must be aligned on 4-bytes boundaries + * @entropyWorkspaceSize : must be >= HUF_WORKSPACE_SIZE + * @suspectUncompressible: sampling checks, to potentially skip huffman coding + */ + private static nuint ZSTD_compressLiterals(void* dst, nuint dstCapacity, void* src, nuint srcSize, void* entropyWorkspace, nuint entropyWorkspaceSize, ZSTD_hufCTables_t* prevHuf, ZSTD_hufCTables_t* nextHuf, ZSTD_strategy strategy, int disableLiteralCompression, int suspectUncompressible, int bmi2) + { + nuint lhSize = (nuint)(3 + (srcSize >= 1 * (1 << 10) ? 1 : 0) + (srcSize >= 16 * (1 << 10) ? 1 : 0)); + byte* ostart = (byte*)dst; + uint singleStream = srcSize < 256 ? 1U : 0U; + SymbolEncodingType_e hType = SymbolEncodingType_e.set_compressed; + nuint cLitSize; + memcpy(nextHuf, prevHuf, (uint)sizeof(ZSTD_hufCTables_t)); + if (disableLiteralCompression != 0) + return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); + if (srcSize < ZSTD_minLiteralsToCompress(strategy, prevHuf->repeatMode)) + return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); + if (dstCapacity < lhSize + 1) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + { + HUF_repeat repeat = prevHuf->repeatMode; + int flags = 0 | (bmi2 != 0 ? (int)HUF_flags_e.HUF_flags_bmi2 : 0) | (strategy < ZSTD_strategy.ZSTD_lazy && srcSize <= 1024 ? (int)HUF_flags_e.HUF_flags_preferRepeat : 0) | (strategy >= ZSTD_strategy.ZSTD_btultra ? (int)HUF_flags_e.HUF_flags_optimalDepth : 0) | (suspectUncompressible != 0 ? (int)HUF_flags_e.HUF_flags_suspectUncompressible : 0); + void* huf_compress; + if (repeat == HUF_repeat.HUF_repeat_valid && lhSize == 3) + singleStream = 1; + huf_compress = singleStream != 0 ? (delegate* managed)(&HUF_compress1X_repeat) : (delegate* managed)(&HUF_compress4X_repeat); + cLitSize = ((delegate* managed)huf_compress)(ostart + lhSize, dstCapacity - lhSize, src, srcSize, 255, 11, entropyWorkspace, entropyWorkspaceSize, &nextHuf->CTable.e0, &repeat, flags); + if (repeat != HUF_repeat.HUF_repeat_none) + { + hType = SymbolEncodingType_e.set_repeat; + } + } + + { + nuint minGain = ZSTD_minGain(srcSize, strategy); + if (cLitSize == 0 || cLitSize >= srcSize - minGain || ERR_isError(cLitSize)) + { + memcpy(nextHuf, prevHuf, (uint)sizeof(ZSTD_hufCTables_t)); + return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); + } + } + + if (cLitSize == 1) + { + if (srcSize >= 8 || allBytesIdentical(src, srcSize) != 0) + { + memcpy(nextHuf, prevHuf, (uint)sizeof(ZSTD_hufCTables_t)); + return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize); + } + } + + if (hType == SymbolEncodingType_e.set_compressed) + { + nextHuf->repeatMode = HUF_repeat.HUF_repeat_check; + } + + switch (lhSize) + { + case 3: +#if DEBUG + if (singleStream == 0) + assert(srcSize >= 6); +#endif + { + uint lhc = (uint)hType + ((singleStream == 0 ? 1U : 0U) << 2) + ((uint)srcSize << 4) + ((uint)cLitSize << 14); + MEM_writeLE24(ostart, lhc); + break; + } + + case 4: + assert(srcSize >= 6); + { + uint lhc = (uint)(hType + (2 << 2)) + ((uint)srcSize << 4) + ((uint)cLitSize << 18); + MEM_writeLE32(ostart, lhc); + break; + } + + case 5: + assert(srcSize >= 6); + { + uint lhc = (uint)(hType + (3 << 2)) + ((uint)srcSize << 4) + ((uint)cLitSize << 22); + MEM_writeLE32(ostart, lhc); + ostart[4] = (byte)(cLitSize >> 10); + break; + } + + default: + assert(0 != 0); + break; + } + + return lhSize + cLitSize; + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressSequences.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressSequences.cs new file mode 100644 index 000000000..17578d25f --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressSequences.cs @@ -0,0 +1,662 @@ +using static ZstdSharp.UnsafeHelper; +using System; +using System.Runtime.InteropServices; +using System.Runtime.CompilerServices; + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { +#if NET7_0_OR_GREATER + private static ReadOnlySpan Span_kInverseProbabilityLog256 => new uint[256] + { + 0, + 2048, + 1792, + 1642, + 1536, + 1453, + 1386, + 1329, + 1280, + 1236, + 1197, + 1162, + 1130, + 1100, + 1073, + 1047, + 1024, + 1001, + 980, + 960, + 941, + 923, + 906, + 889, + 874, + 859, + 844, + 830, + 817, + 804, + 791, + 779, + 768, + 756, + 745, + 734, + 724, + 714, + 704, + 694, + 685, + 676, + 667, + 658, + 650, + 642, + 633, + 626, + 618, + 610, + 603, + 595, + 588, + 581, + 574, + 567, + 561, + 554, + 548, + 542, + 535, + 529, + 523, + 517, + 512, + 506, + 500, + 495, + 489, + 484, + 478, + 473, + 468, + 463, + 458, + 453, + 448, + 443, + 438, + 434, + 429, + 424, + 420, + 415, + 411, + 407, + 402, + 398, + 394, + 390, + 386, + 382, + 377, + 373, + 370, + 366, + 362, + 358, + 354, + 350, + 347, + 343, + 339, + 336, + 332, + 329, + 325, + 322, + 318, + 315, + 311, + 308, + 305, + 302, + 298, + 295, + 292, + 289, + 286, + 282, + 279, + 276, + 273, + 270, + 267, + 264, + 261, + 258, + 256, + 253, + 250, + 247, + 244, + 241, + 239, + 236, + 233, + 230, + 228, + 225, + 222, + 220, + 217, + 215, + 212, + 209, + 207, + 204, + 202, + 199, + 197, + 194, + 192, + 190, + 187, + 185, + 182, + 180, + 178, + 175, + 173, + 171, + 168, + 166, + 164, + 162, + 159, + 157, + 155, + 153, + 151, + 149, + 146, + 144, + 142, + 140, + 138, + 136, + 134, + 132, + 130, + 128, + 126, + 123, + 121, + 119, + 117, + 115, + 114, + 112, + 110, + 108, + 106, + 104, + 102, + 100, + 98, + 96, + 94, + 93, + 91, + 89, + 87, + 85, + 83, + 82, + 80, + 78, + 76, + 74, + 73, + 71, + 69, + 67, + 66, + 64, + 62, + 61, + 59, + 57, + 55, + 54, + 52, + 50, + 49, + 47, + 46, + 44, + 42, + 41, + 39, + 37, + 36, + 34, + 33, + 31, + 30, + 28, + 26, + 25, + 23, + 22, + 20, + 19, + 17, + 16, + 14, + 13, + 11, + 10, + 8, + 7, + 5, + 4, + 2, + 1 + }; + private static uint* kInverseProbabilityLog256 => (uint*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_kInverseProbabilityLog256)); +#else + + private static readonly uint* kInverseProbabilityLog256 = GetArrayPointer(new uint[256] { 0, 2048, 1792, 1642, 1536, 1453, 1386, 1329, 1280, 1236, 1197, 1162, 1130, 1100, 1073, 1047, 1024, 1001, 980, 960, 941, 923, 906, 889, 874, 859, 844, 830, 817, 804, 791, 779, 768, 756, 745, 734, 724, 714, 704, 694, 685, 676, 667, 658, 650, 642, 633, 626, 618, 610, 603, 595, 588, 581, 574, 567, 561, 554, 548, 542, 535, 529, 523, 517, 512, 506, 500, 495, 489, 484, 478, 473, 468, 463, 458, 453, 448, 443, 438, 434, 429, 424, 420, 415, 411, 407, 402, 398, 394, 390, 386, 382, 377, 373, 370, 366, 362, 358, 354, 350, 347, 343, 339, 336, 332, 329, 325, 322, 318, 315, 311, 308, 305, 302, 298, 295, 292, 289, 286, 282, 279, 276, 273, 270, 267, 264, 261, 258, 256, 253, 250, 247, 244, 241, 239, 236, 233, 230, 228, 225, 222, 220, 217, 215, 212, 209, 207, 204, 202, 199, 197, 194, 192, 190, 187, 185, 182, 180, 178, 175, 173, 171, 168, 166, 164, 162, 159, 157, 155, 153, 151, 149, 146, 144, 142, 140, 138, 136, 134, 132, 130, 128, 126, 123, 121, 119, 117, 115, 114, 112, 110, 108, 106, 104, 102, 100, 98, 96, 94, 93, 91, 89, 87, 85, 83, 82, 80, 78, 76, 74, 73, 71, 69, 67, 66, 64, 62, 61, 59, 57, 55, 54, 52, 50, 49, 47, 46, 44, 42, 41, 39, 37, 36, 34, 33, 31, 30, 28, 26, 25, 23, 22, 20, 19, 17, 16, 14, 13, 11, 10, 8, 7, 5, 4, 2, 1 }); +#endif + private static uint ZSTD_getFSEMaxSymbolValue(uint* ctable) + { + void* ptr = ctable; + ushort* u16ptr = (ushort*)ptr; + uint maxSymbolValue = MEM_read16(u16ptr + 1); + return maxSymbolValue; + } + + /** + * Returns true if we should use ncount=-1 else we should + * use ncount=1 for low probability symbols instead. + */ + private static uint ZSTD_useLowProbCount(nuint nbSeq) + { + return nbSeq >= 2048 ? 1U : 0U; + } + + /** + * Returns the cost in bytes of encoding the normalized count header. + * Returns an error if any of the helper functions return an error. + */ + private static nuint ZSTD_NCountCost(uint* count, uint max, nuint nbSeq, uint FSELog) + { + byte* wksp = stackalloc byte[512]; + short* norm = stackalloc short[53]; + uint tableLog = FSE_optimalTableLog(FSELog, nbSeq, max); + { + nuint err_code = FSE_normalizeCount(norm, tableLog, count, nbSeq, max, ZSTD_useLowProbCount(nbSeq)); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + return FSE_writeNCount(wksp, sizeof(byte) * 512, norm, max, tableLog); + } + + /** + * Returns the cost in bits of encoding the distribution described by count + * using the entropy bound. + */ + private static nuint ZSTD_entropyCost(uint* count, uint max, nuint total) + { + uint cost = 0; + uint s; + assert(total > 0); + for (s = 0; s <= max; ++s) + { + uint norm = (uint)(256 * count[s] / total); + if (count[s] != 0 && norm == 0) + norm = 1; + assert(count[s] < total); + cost += count[s] * kInverseProbabilityLog256[norm]; + } + + return cost >> 8; + } + + /** + * Returns the cost in bits of encoding the distribution in count using ctable. + * Returns an error if ctable cannot represent all the symbols in count. + */ + private static nuint ZSTD_fseBitCost(uint* ctable, uint* count, uint max) + { + const uint kAccuracyLog = 8; + nuint cost = 0; + uint s; + FSE_CState_t cstate; + FSE_initCState(&cstate, ctable); + if (ZSTD_getFSEMaxSymbolValue(ctable) < max) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + } + + for (s = 0; s <= max; ++s) + { + uint tableLog = cstate.stateLog; + uint badCost = tableLog + 1 << (int)kAccuracyLog; + uint bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog); + if (count[s] == 0) + continue; + if (bitCost >= badCost) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + } + + cost += (nuint)count[s] * bitCost; + } + + return cost >> (int)kAccuracyLog; + } + + /** + * Returns the cost in bits of encoding the distribution in count using the + * table described by norm. The max symbol support by norm is assumed >= max. + * norm must be valid for every symbol with non-zero probability in count. + */ + private static nuint ZSTD_crossEntropyCost(short* norm, uint accuracyLog, uint* count, uint max) + { + uint shift = 8 - accuracyLog; + nuint cost = 0; + uint s; + assert(accuracyLog <= 8); + for (s = 0; s <= max; ++s) + { + uint normAcc = norm[s] != -1 ? (uint)norm[s] : 1; + uint norm256 = normAcc << (int)shift; + assert(norm256 > 0); + assert(norm256 < 256); + cost += count[s] * kInverseProbabilityLog256[norm256]; + } + + return cost >> 8; + } + + private static SymbolEncodingType_e ZSTD_selectEncodingType(FSE_repeat* repeatMode, uint* count, uint max, nuint mostFrequent, nuint nbSeq, uint FSELog, uint* prevCTable, short* defaultNorm, uint defaultNormLog, ZSTD_DefaultPolicy_e isDefaultAllowed, ZSTD_strategy strategy) + { + if (mostFrequent == nbSeq) + { + *repeatMode = FSE_repeat.FSE_repeat_none; + if (isDefaultAllowed != default && nbSeq <= 2) + { + return SymbolEncodingType_e.set_basic; + } + + return SymbolEncodingType_e.set_rle; + } + + if (strategy < ZSTD_strategy.ZSTD_lazy) + { + if (isDefaultAllowed != default) + { + const nuint staticFse_nbSeq_max = 1000; + nuint mult = (nuint)(10 - strategy); + const nuint baseLog = 3; + /* 28-36 for offset, 56-72 for lengths */ + nuint dynamicFse_nbSeq_min = ((nuint)1 << (int)defaultNormLog) * mult >> (int)baseLog; + assert(defaultNormLog >= 5 && defaultNormLog <= 6); + assert(mult <= 9 && mult >= 7); + if (*repeatMode == FSE_repeat.FSE_repeat_valid && nbSeq < staticFse_nbSeq_max) + { + return SymbolEncodingType_e.set_repeat; + } + + if (nbSeq < dynamicFse_nbSeq_min || mostFrequent < nbSeq >> (int)(defaultNormLog - 1)) + { + *repeatMode = FSE_repeat.FSE_repeat_none; + return SymbolEncodingType_e.set_basic; + } + } + } + else + { + nuint basicCost = isDefaultAllowed != default ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, count, max) : unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + nuint repeatCost = *repeatMode != FSE_repeat.FSE_repeat_none ? ZSTD_fseBitCost(prevCTable, count, max) : unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + nuint NCountCost = ZSTD_NCountCost(count, max, nbSeq, FSELog); + nuint compressedCost = (NCountCost << 3) + ZSTD_entropyCost(count, max, nbSeq); +#if DEBUG + if (isDefaultAllowed != default) + { + assert(!ERR_isError(basicCost)); + assert(!(*repeatMode == FSE_repeat.FSE_repeat_valid && ERR_isError(repeatCost))); + } +#endif + + assert(!ERR_isError(NCountCost)); + assert(compressedCost < unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxCode))); + if (basicCost <= repeatCost && basicCost <= compressedCost) + { + assert(isDefaultAllowed != default); + *repeatMode = FSE_repeat.FSE_repeat_none; + return SymbolEncodingType_e.set_basic; + } + + if (repeatCost <= compressedCost) + { + assert(!ERR_isError(repeatCost)); + return SymbolEncodingType_e.set_repeat; + } + + assert(compressedCost < basicCost && compressedCost < repeatCost); + } + + *repeatMode = FSE_repeat.FSE_repeat_check; + return SymbolEncodingType_e.set_compressed; + } + + private static nuint ZSTD_buildCTable(void* dst, nuint dstCapacity, uint* nextCTable, uint FSELog, SymbolEncodingType_e type, uint* count, uint max, byte* codeTable, nuint nbSeq, short* defaultNorm, uint defaultNormLog, uint defaultMax, uint* prevCTable, nuint prevCTableSize, void* entropyWorkspace, nuint entropyWorkspaceSize) + { + byte* op = (byte*)dst; + byte* oend = op + dstCapacity; + switch (type) + { + case SymbolEncodingType_e.set_rle: + { + nuint err_code = FSE_buildCTable_rle(nextCTable, (byte)max); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (dstCapacity == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + *op = codeTable[0]; + return 1; + case SymbolEncodingType_e.set_repeat: + memcpy(nextCTable, prevCTable, (uint)prevCTableSize); + return 0; + case SymbolEncodingType_e.set_basic: + { + /* note : could be pre-calculated */ + nuint err_code = FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, entropyWorkspace, entropyWorkspaceSize); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + return 0; + case SymbolEncodingType_e.set_compressed: + { + ZSTD_BuildCTableWksp* wksp = (ZSTD_BuildCTableWksp*)entropyWorkspace; + nuint nbSeq_1 = nbSeq; + uint tableLog = FSE_optimalTableLog(FSELog, nbSeq, max); + if (count[codeTable[nbSeq - 1]] > 1) + { + count[codeTable[nbSeq - 1]]--; + nbSeq_1--; + } + + assert(nbSeq_1 > 1); + assert(entropyWorkspaceSize >= (nuint)sizeof(ZSTD_BuildCTableWksp)); + { + nuint err_code = FSE_normalizeCount(wksp->norm, tableLog, count, nbSeq_1, max, ZSTD_useLowProbCount(nbSeq_1)); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + assert(oend >= op); + { + /* overflow protected */ + nuint NCountSize = FSE_writeNCount(op, (nuint)(oend - op), wksp->norm, max, tableLog); + { + nuint err_code = NCountSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + { + nuint err_code = FSE_buildCTable_wksp(nextCTable, wksp->norm, max, tableLog, wksp->wksp, sizeof(uint) * 285); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + return NCountSize; + } + } + + default: + assert(0 != 0); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + } + } + + private static nuint ZSTD_encodeSequences_body(void* dst, nuint dstCapacity, uint* CTable_MatchLength, byte* mlCodeTable, uint* CTable_OffsetBits, byte* ofCodeTable, uint* CTable_LitLength, byte* llCodeTable, SeqDef_s* sequences, nuint nbSeq, int longOffsets) + { + BIT_CStream_t blockStream; + System.Runtime.CompilerServices.Unsafe.SkipInit(out blockStream); + FSE_CState_t stateMatchLength; + System.Runtime.CompilerServices.Unsafe.SkipInit(out stateMatchLength); + FSE_CState_t stateOffsetBits; + System.Runtime.CompilerServices.Unsafe.SkipInit(out stateOffsetBits); + FSE_CState_t stateLitLength; + System.Runtime.CompilerServices.Unsafe.SkipInit(out stateLitLength); + if (ERR_isError(BIT_initCStream(ref blockStream, dst, dstCapacity))) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + nuint blockStream_bitContainer = blockStream.bitContainer; + uint blockStream_bitPos = blockStream.bitPos; + sbyte* blockStream_ptr = blockStream.ptr; + sbyte* blockStream_endPtr = blockStream.endPtr; + FSE_initCState2(ref stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq - 1]); + FSE_initCState2(ref stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq - 1]); + FSE_initCState2(ref stateLitLength, CTable_LitLength, llCodeTable[nbSeq - 1]); + BIT_addBits(ref blockStream_bitContainer, ref blockStream_bitPos, sequences[nbSeq - 1].litLength, LL_bits[llCodeTable[nbSeq - 1]]); + if (MEM_32bits) + BIT_flushBits(ref blockStream_bitContainer, ref blockStream_bitPos, ref blockStream_ptr, blockStream_endPtr); + BIT_addBits(ref blockStream_bitContainer, ref blockStream_bitPos, sequences[nbSeq - 1].mlBase, ML_bits[mlCodeTable[nbSeq - 1]]); + if (MEM_32bits) + BIT_flushBits(ref blockStream_bitContainer, ref blockStream_bitPos, ref blockStream_ptr, blockStream_endPtr); + if (longOffsets != 0) + { + uint ofBits = ofCodeTable[nbSeq - 1]; + uint extraBits = ofBits - (ofBits < (uint)(MEM_32bits ? 25 : 57) - 1 ? ofBits : (uint)(MEM_32bits ? 25 : 57) - 1); + if (extraBits != 0) + { + BIT_addBits(ref blockStream_bitContainer, ref blockStream_bitPos, sequences[nbSeq - 1].offBase, extraBits); + BIT_flushBits(ref blockStream_bitContainer, ref blockStream_bitPos, ref blockStream_ptr, blockStream_endPtr); + } + + BIT_addBits(ref blockStream_bitContainer, ref blockStream_bitPos, sequences[nbSeq - 1].offBase >> (int)extraBits, ofBits - extraBits); + } + else + { + BIT_addBits(ref blockStream_bitContainer, ref blockStream_bitPos, sequences[nbSeq - 1].offBase, ofCodeTable[nbSeq - 1]); + } + + BIT_flushBits(ref blockStream_bitContainer, ref blockStream_bitPos, ref blockStream_ptr, blockStream_endPtr); + { + nuint n; + for (n = nbSeq - 2; n < nbSeq; n--) + { + byte llCode = llCodeTable[n]; + byte ofCode = ofCodeTable[n]; + byte mlCode = mlCodeTable[n]; + uint llBits = LL_bits[llCode]; + uint ofBits = ofCode; + uint mlBits = ML_bits[mlCode]; + FSE_encodeSymbol(ref blockStream_bitContainer, ref blockStream_bitPos, ref stateOffsetBits, ofCode); + FSE_encodeSymbol(ref blockStream_bitContainer, ref blockStream_bitPos, ref stateMatchLength, mlCode); + if (MEM_32bits) + BIT_flushBits(ref blockStream_bitContainer, ref blockStream_bitPos, ref blockStream_ptr, blockStream_endPtr); + FSE_encodeSymbol(ref blockStream_bitContainer, ref blockStream_bitPos, ref stateLitLength, llCode); + if (MEM_32bits || ofBits + mlBits + llBits >= 64 - 7 - (9 + 9 + 8)) + BIT_flushBits(ref blockStream_bitContainer, ref blockStream_bitPos, ref blockStream_ptr, blockStream_endPtr); + BIT_addBits(ref blockStream_bitContainer, ref blockStream_bitPos, sequences[n].litLength, llBits); + if (MEM_32bits && llBits + mlBits > 24) + BIT_flushBits(ref blockStream_bitContainer, ref blockStream_bitPos, ref blockStream_ptr, blockStream_endPtr); + BIT_addBits(ref blockStream_bitContainer, ref blockStream_bitPos, sequences[n].mlBase, mlBits); + if (MEM_32bits || ofBits + mlBits + llBits > 56) + BIT_flushBits(ref blockStream_bitContainer, ref blockStream_bitPos, ref blockStream_ptr, blockStream_endPtr); + if (longOffsets != 0) + { + uint extraBits = ofBits - (ofBits < (uint)(MEM_32bits ? 25 : 57) - 1 ? ofBits : (uint)(MEM_32bits ? 25 : 57) - 1); + if (extraBits != 0) + { + BIT_addBits(ref blockStream_bitContainer, ref blockStream_bitPos, sequences[n].offBase, extraBits); + BIT_flushBits(ref blockStream_bitContainer, ref blockStream_bitPos, ref blockStream_ptr, blockStream_endPtr); + } + + BIT_addBits(ref blockStream_bitContainer, ref blockStream_bitPos, sequences[n].offBase >> (int)extraBits, ofBits - extraBits); + } + else + { + BIT_addBits(ref blockStream_bitContainer, ref blockStream_bitPos, sequences[n].offBase, ofBits); + } + + BIT_flushBits(ref blockStream_bitContainer, ref blockStream_bitPos, ref blockStream_ptr, blockStream_endPtr); + } + } + + FSE_flushCState(ref blockStream_bitContainer, ref blockStream_bitPos, ref blockStream_ptr, blockStream_endPtr, ref stateMatchLength); + FSE_flushCState(ref blockStream_bitContainer, ref blockStream_bitPos, ref blockStream_ptr, blockStream_endPtr, ref stateOffsetBits); + FSE_flushCState(ref blockStream_bitContainer, ref blockStream_bitPos, ref blockStream_ptr, blockStream_endPtr, ref stateLitLength); + { + nuint streamSize = BIT_closeCStream(ref blockStream_bitContainer, ref blockStream_bitPos, blockStream_ptr, blockStream_endPtr, blockStream.startPtr); + if (streamSize == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + return streamSize; + } + } + + private static nuint ZSTD_encodeSequences_default(void* dst, nuint dstCapacity, uint* CTable_MatchLength, byte* mlCodeTable, uint* CTable_OffsetBits, byte* ofCodeTable, uint* CTable_LitLength, byte* llCodeTable, SeqDef_s* sequences, nuint nbSeq, int longOffsets) + { + return ZSTD_encodeSequences_body(dst, dstCapacity, CTable_MatchLength, mlCodeTable, CTable_OffsetBits, ofCodeTable, CTable_LitLength, llCodeTable, sequences, nbSeq, longOffsets); + } + + private static nuint ZSTD_encodeSequences(void* dst, nuint dstCapacity, uint* CTable_MatchLength, byte* mlCodeTable, uint* CTable_OffsetBits, byte* ofCodeTable, uint* CTable_LitLength, byte* llCodeTable, SeqDef_s* sequences, nuint nbSeq, int longOffsets, int bmi2) + { + return ZSTD_encodeSequences_default(dst, dstCapacity, CTable_MatchLength, mlCodeTable, CTable_OffsetBits, ofCodeTable, CTable_LitLength, llCodeTable, sequences, nbSeq, longOffsets); + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressSuperblock.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressSuperblock.cs new file mode 100644 index 000000000..3d5382c98 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressSuperblock.cs @@ -0,0 +1,585 @@ +using static ZstdSharp.UnsafeHelper; + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + /** ZSTD_compressSubBlock_literal() : + * Compresses literals section for a sub-block. + * When we have to write the Huffman table we will sometimes choose a header + * size larger than necessary. This is because we have to pick the header size + * before we know the table size + compressed size, so we have a bound on the + * table size. If we guessed incorrectly, we fall back to uncompressed literals. + * + * We write the header when writeEntropy=1 and set entropyWritten=1 when we succeeded + * in writing the header, otherwise it is set to 0. + * + * hufMetadata->hType has literals block type info. + * If it is set_basic, all sub-blocks literals section will be Raw_Literals_Block. + * If it is set_rle, all sub-blocks literals section will be RLE_Literals_Block. + * If it is set_compressed, first sub-block's literals section will be Compressed_Literals_Block + * If it is set_compressed, first sub-block's literals section will be Treeless_Literals_Block + * and the following sub-blocks' literals sections will be Treeless_Literals_Block. + * @return : compressed size of literals section of a sub-block + * Or 0 if unable to compress. + * Or error code */ + private static nuint ZSTD_compressSubBlock_literal(nuint* hufTable, ZSTD_hufCTablesMetadata_t* hufMetadata, byte* literals, nuint litSize, void* dst, nuint dstSize, int bmi2, int writeEntropy, int* entropyWritten) + { + nuint header = (nuint)(writeEntropy != 0 ? 200 : 0); + nuint lhSize = (nuint)(3 + (litSize >= 1 * (1 << 10) - header ? 1 : 0) + (litSize >= 16 * (1 << 10) - header ? 1 : 0)); + byte* ostart = (byte*)dst; + byte* oend = ostart + dstSize; + byte* op = ostart + lhSize; + uint singleStream = lhSize == 3 ? 1U : 0U; + SymbolEncodingType_e hType = writeEntropy != 0 ? hufMetadata->hType : SymbolEncodingType_e.set_repeat; + nuint cLitSize = 0; + *entropyWritten = 0; + if (litSize == 0 || hufMetadata->hType == SymbolEncodingType_e.set_basic) + { + return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); + } + else if (hufMetadata->hType == SymbolEncodingType_e.set_rle) + { + return ZSTD_compressRleLiteralsBlock(dst, dstSize, literals, litSize); + } + + assert(litSize > 0); + assert(hufMetadata->hType == SymbolEncodingType_e.set_compressed || hufMetadata->hType == SymbolEncodingType_e.set_repeat); + if (writeEntropy != 0 && hufMetadata->hType == SymbolEncodingType_e.set_compressed) + { + memcpy(op, hufMetadata->hufDesBuffer, (uint)hufMetadata->hufDesSize); + op += hufMetadata->hufDesSize; + cLitSize += hufMetadata->hufDesSize; + } + + { + int flags = bmi2 != 0 ? (int)HUF_flags_e.HUF_flags_bmi2 : 0; + nuint cSize = singleStream != 0 ? HUF_compress1X_usingCTable(op, (nuint)(oend - op), literals, litSize, hufTable, flags) : HUF_compress4X_usingCTable(op, (nuint)(oend - op), literals, litSize, hufTable, flags); + op += cSize; + cLitSize += cSize; + if (cSize == 0 || ERR_isError(cSize)) + { + return 0; + } + + if (writeEntropy == 0 && cLitSize >= litSize) + { + return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); + } + + if (lhSize < (nuint)(3 + (cLitSize >= 1 * (1 << 10) ? 1 : 0) + (cLitSize >= 16 * (1 << 10) ? 1 : 0))) + { + assert(cLitSize > litSize); + return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); + } + } + + switch (lhSize) + { + case 3: + { + uint lhc = (uint)hType + ((singleStream == 0 ? 1U : 0U) << 2) + ((uint)litSize << 4) + ((uint)cLitSize << 14); + MEM_writeLE24(ostart, lhc); + break; + } + + case 4: + { + uint lhc = (uint)(hType + (2 << 2)) + ((uint)litSize << 4) + ((uint)cLitSize << 18); + MEM_writeLE32(ostart, lhc); + break; + } + + case 5: + { + uint lhc = (uint)(hType + (3 << 2)) + ((uint)litSize << 4) + ((uint)cLitSize << 22); + MEM_writeLE32(ostart, lhc); + ostart[4] = (byte)(cLitSize >> 10); + break; + } + + default: + assert(0 != 0); + break; + } + + *entropyWritten = 1; + return (nuint)(op - ostart); + } + + private static nuint ZSTD_seqDecompressedSize(SeqStore_t* seqStore, SeqDef_s* sequences, nuint nbSeqs, nuint litSize, int lastSubBlock) + { + nuint matchLengthSum = 0; + nuint litLengthSum = 0; + nuint n; + for (n = 0; n < nbSeqs; n++) + { + ZSTD_SequenceLength seqLen = ZSTD_getSequenceLength(seqStore, sequences + n); + litLengthSum += seqLen.litLength; + matchLengthSum += seqLen.matchLength; + } + + if (lastSubBlock == 0) + assert(litLengthSum == litSize); + else + assert(litLengthSum <= litSize); + return matchLengthSum + litSize; + } + + /** ZSTD_compressSubBlock_sequences() : + * Compresses sequences section for a sub-block. + * fseMetadata->llType, fseMetadata->ofType, and fseMetadata->mlType have + * symbol compression modes for the super-block. + * The first successfully compressed block will have these in its header. + * We set entropyWritten=1 when we succeed in compressing the sequences. + * The following sub-blocks will always have repeat mode. + * @return : compressed size of sequences section of a sub-block + * Or 0 if it is unable to compress + * Or error code. */ + private static nuint ZSTD_compressSubBlock_sequences(ZSTD_fseCTables_t* fseTables, ZSTD_fseCTablesMetadata_t* fseMetadata, SeqDef_s* sequences, nuint nbSeq, byte* llCode, byte* mlCode, byte* ofCode, ZSTD_CCtx_params_s* cctxParams, void* dst, nuint dstCapacity, int bmi2, int writeEntropy, int* entropyWritten) + { + int longOffsets = cctxParams->cParams.windowLog > (uint)(MEM_32bits ? 25 : 57) ? 1 : 0; + byte* ostart = (byte*)dst; + byte* oend = ostart + dstCapacity; + byte* op = ostart; + byte* seqHead; + *entropyWritten = 0; + if (oend - op < 3 + 1) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + if (nbSeq < 128) + *op++ = (byte)nbSeq; + else if (nbSeq < 0x7F00) + { + op[0] = (byte)((nbSeq >> 8) + 0x80); + op[1] = (byte)nbSeq; + op += 2; + } + else + { + op[0] = 0xFF; + MEM_writeLE16(op + 1, (ushort)(nbSeq - 0x7F00)); + op += 3; + } + + if (nbSeq == 0) + { + return (nuint)(op - ostart); + } + + seqHead = op++; + if (writeEntropy != 0) + { + uint LLtype = (uint)fseMetadata->llType; + uint Offtype = (uint)fseMetadata->ofType; + uint MLtype = (uint)fseMetadata->mlType; + *seqHead = (byte)((LLtype << 6) + (Offtype << 4) + (MLtype << 2)); + memcpy(op, fseMetadata->fseTablesBuffer, (uint)fseMetadata->fseTablesSize); + op += fseMetadata->fseTablesSize; + } + else + { + uint repeat = (uint)SymbolEncodingType_e.set_repeat; + *seqHead = (byte)((repeat << 6) + (repeat << 4) + (repeat << 2)); + } + + { + nuint bitstreamSize = ZSTD_encodeSequences(op, (nuint)(oend - op), fseTables->matchlengthCTable, mlCode, fseTables->offcodeCTable, ofCode, fseTables->litlengthCTable, llCode, sequences, nbSeq, longOffsets, bmi2); + { + nuint err_code = bitstreamSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + op += bitstreamSize; + if (writeEntropy != 0 && fseMetadata->lastCountSize != 0 && fseMetadata->lastCountSize + bitstreamSize < 4) + { + assert(fseMetadata->lastCountSize + bitstreamSize == 3); + return 0; + } + } + + if (op - seqHead < 4) + { + return 0; + } + + *entropyWritten = 1; + return (nuint)(op - ostart); + } + + /** ZSTD_compressSubBlock() : + * Compresses a single sub-block. + * @return : compressed size of the sub-block + * Or 0 if it failed to compress. */ + private static nuint ZSTD_compressSubBlock(ZSTD_entropyCTables_t* entropy, ZSTD_entropyCTablesMetadata_t* entropyMetadata, SeqDef_s* sequences, nuint nbSeq, byte* literals, nuint litSize, byte* llCode, byte* mlCode, byte* ofCode, ZSTD_CCtx_params_s* cctxParams, void* dst, nuint dstCapacity, int bmi2, int writeLitEntropy, int writeSeqEntropy, int* litEntropyWritten, int* seqEntropyWritten, uint lastBlock) + { + byte* ostart = (byte*)dst; + byte* oend = ostart + dstCapacity; + byte* op = ostart + ZSTD_blockHeaderSize; + { + nuint cLitSize = ZSTD_compressSubBlock_literal(&entropy->huf.CTable.e0, &entropyMetadata->hufMetadata, literals, litSize, op, (nuint)(oend - op), bmi2, writeLitEntropy, litEntropyWritten); + { + nuint err_code = cLitSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (cLitSize == 0) + return 0; + op += cLitSize; + } + + { + nuint cSeqSize = ZSTD_compressSubBlock_sequences(&entropy->fse, &entropyMetadata->fseMetadata, sequences, nbSeq, llCode, mlCode, ofCode, cctxParams, op, (nuint)(oend - op), bmi2, writeSeqEntropy, seqEntropyWritten); + { + nuint err_code = cSeqSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (cSeqSize == 0) + return 0; + op += cSeqSize; + } + + { + nuint cSize = (nuint)(op - ostart) - ZSTD_blockHeaderSize; + uint cBlockHeader24 = lastBlock + ((uint)blockType_e.bt_compressed << 1) + (uint)(cSize << 3); + MEM_writeLE24(ostart, cBlockHeader24); + } + + return (nuint)(op - ostart); + } + + private static nuint ZSTD_estimateSubBlockSize_literal(byte* literals, nuint litSize, ZSTD_hufCTables_t* huf, ZSTD_hufCTablesMetadata_t* hufMetadata, void* workspace, nuint wkspSize, int writeEntropy) + { + uint* countWksp = (uint*)workspace; + uint maxSymbolValue = 255; + /* Use hard coded size of 3 bytes */ + nuint literalSectionHeaderSize = 3; + if (hufMetadata->hType == SymbolEncodingType_e.set_basic) + return litSize; + else if (hufMetadata->hType == SymbolEncodingType_e.set_rle) + return 1; + else if (hufMetadata->hType == SymbolEncodingType_e.set_compressed || hufMetadata->hType == SymbolEncodingType_e.set_repeat) + { + nuint largest = HIST_count_wksp(countWksp, &maxSymbolValue, literals, litSize, workspace, wkspSize); + if (ERR_isError(largest)) + return litSize; + { + nuint cLitSizeEstimate = HUF_estimateCompressedSize(&huf->CTable.e0, countWksp, maxSymbolValue); + if (writeEntropy != 0) + cLitSizeEstimate += hufMetadata->hufDesSize; + return cLitSizeEstimate + literalSectionHeaderSize; + } + } + + assert(0 != 0); + return 0; + } + + private static nuint ZSTD_estimateSubBlockSize_symbolType(SymbolEncodingType_e type, byte* codeTable, uint maxCode, nuint nbSeq, uint* fseCTable, byte* additionalBits, short* defaultNorm, uint defaultNormLog, uint defaultMax, void* workspace, nuint wkspSize) + { + uint* countWksp = (uint*)workspace; + byte* ctp = codeTable; + byte* ctStart = ctp; + byte* ctEnd = ctStart + nbSeq; + nuint cSymbolTypeSizeEstimateInBits = 0; + uint max = maxCode; + HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); + if (type == SymbolEncodingType_e.set_basic) + { + assert(max <= defaultMax); + cSymbolTypeSizeEstimateInBits = max <= defaultMax ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max) : unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + } + else if (type == SymbolEncodingType_e.set_rle) + { + cSymbolTypeSizeEstimateInBits = 0; + } + else if (type == SymbolEncodingType_e.set_compressed || type == SymbolEncodingType_e.set_repeat) + { + cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max); + } + + if (ERR_isError(cSymbolTypeSizeEstimateInBits)) + return nbSeq * 10; + while (ctp < ctEnd) + { + if (additionalBits != null) + cSymbolTypeSizeEstimateInBits += additionalBits[*ctp]; + else + cSymbolTypeSizeEstimateInBits += *ctp; + ctp++; + } + + return cSymbolTypeSizeEstimateInBits / 8; + } + + private static nuint ZSTD_estimateSubBlockSize_sequences(byte* ofCodeTable, byte* llCodeTable, byte* mlCodeTable, nuint nbSeq, ZSTD_fseCTables_t* fseTables, ZSTD_fseCTablesMetadata_t* fseMetadata, void* workspace, nuint wkspSize, int writeEntropy) + { + /* Use hard coded size of 3 bytes */ + const nuint sequencesSectionHeaderSize = 3; + nuint cSeqSizeEstimate = 0; + if (nbSeq == 0) + return sequencesSectionHeaderSize; + cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, 31, nbSeq, fseTables->offcodeCTable, null, OF_defaultNorm, OF_defaultNormLog, 28, workspace, wkspSize); + cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->llType, llCodeTable, 35, nbSeq, fseTables->litlengthCTable, LL_bits, LL_defaultNorm, LL_defaultNormLog, 35, workspace, wkspSize); + cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, 52, nbSeq, fseTables->matchlengthCTable, ML_bits, ML_defaultNorm, ML_defaultNormLog, 52, workspace, wkspSize); + if (writeEntropy != 0) + cSeqSizeEstimate += fseMetadata->fseTablesSize; + return cSeqSizeEstimate + sequencesSectionHeaderSize; + } + + private static EstimatedBlockSize ZSTD_estimateSubBlockSize(byte* literals, nuint litSize, byte* ofCodeTable, byte* llCodeTable, byte* mlCodeTable, nuint nbSeq, ZSTD_entropyCTables_t* entropy, ZSTD_entropyCTablesMetadata_t* entropyMetadata, void* workspace, nuint wkspSize, int writeLitEntropy, int writeSeqEntropy) + { + EstimatedBlockSize ebs; + ebs.estLitSize = ZSTD_estimateSubBlockSize_literal(literals, litSize, &entropy->huf, &entropyMetadata->hufMetadata, workspace, wkspSize, writeLitEntropy); + ebs.estBlockSize = ZSTD_estimateSubBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable, nbSeq, &entropy->fse, &entropyMetadata->fseMetadata, workspace, wkspSize, writeSeqEntropy); + ebs.estBlockSize += ebs.estLitSize + ZSTD_blockHeaderSize; + return ebs; + } + + private static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t* fseMetadata) + { + if (fseMetadata->llType == SymbolEncodingType_e.set_compressed || fseMetadata->llType == SymbolEncodingType_e.set_rle) + return 1; + if (fseMetadata->mlType == SymbolEncodingType_e.set_compressed || fseMetadata->mlType == SymbolEncodingType_e.set_rle) + return 1; + if (fseMetadata->ofType == SymbolEncodingType_e.set_compressed || fseMetadata->ofType == SymbolEncodingType_e.set_rle) + return 1; + return 0; + } + + private static nuint countLiterals(SeqStore_t* seqStore, SeqDef_s* sp, nuint seqCount) + { + nuint n, total = 0; + assert(sp != null); + for (n = 0; n < seqCount; n++) + { + total += ZSTD_getSequenceLength(seqStore, sp + n).litLength; + } + + return total; + } + + private static nuint sizeBlockSequences(SeqDef_s* sp, nuint nbSeqs, nuint targetBudget, nuint avgLitCost, nuint avgSeqCost, int firstSubBlock) + { + nuint n, budget = 0, inSize = 0; + /* generous estimate */ + nuint headerSize = (nuint)firstSubBlock * 120 * 256; + assert(firstSubBlock == 0 || firstSubBlock == 1); + budget += headerSize; + budget += sp[0].litLength * avgLitCost + avgSeqCost; + if (budget > targetBudget) + return 1; + inSize = (nuint)(sp[0].litLength + (sp[0].mlBase + 3)); + for (n = 1; n < nbSeqs; n++) + { + nuint currentCost = sp[n].litLength * avgLitCost + avgSeqCost; + budget += currentCost; + inSize += (nuint)(sp[n].litLength + (sp[n].mlBase + 3)); + if (budget > targetBudget && budget < inSize * 256) + break; + } + + return n; + } + + /** ZSTD_compressSubBlock_multi() : + * Breaks super-block into multiple sub-blocks and compresses them. + * Entropy will be written into the first block. + * The following blocks use repeat_mode to compress. + * Sub-blocks are all compressed, except the last one when beneficial. + * @return : compressed size of the super block (which features multiple ZSTD blocks) + * or 0 if it failed to compress. */ + private static nuint ZSTD_compressSubBlock_multi(SeqStore_t* seqStorePtr, ZSTD_compressedBlockState_t* prevCBlock, ZSTD_compressedBlockState_t* nextCBlock, ZSTD_entropyCTablesMetadata_t* entropyMetadata, ZSTD_CCtx_params_s* cctxParams, void* dst, nuint dstCapacity, void* src, nuint srcSize, int bmi2, uint lastBlock, void* workspace, nuint wkspSize) + { + SeqDef_s* sstart = seqStorePtr->sequencesStart; + SeqDef_s* send = seqStorePtr->sequences; + /* tracks progresses within seqStorePtr->sequences */ + SeqDef_s* sp = sstart; + nuint nbSeqs = (nuint)(send - sstart); + byte* lstart = seqStorePtr->litStart; + byte* lend = seqStorePtr->lit; + byte* lp = lstart; + nuint nbLiterals = (nuint)(lend - lstart); + byte* ip = (byte*)src; + byte* iend = ip + srcSize; + byte* ostart = (byte*)dst; + byte* oend = ostart + dstCapacity; + byte* op = ostart; + byte* llCodePtr = seqStorePtr->llCode; + byte* mlCodePtr = seqStorePtr->mlCode; + byte* ofCodePtr = seqStorePtr->ofCode; + /* enforce minimum size, to reduce undesirable side effects */ + const nuint minTarget = 1340; + nuint targetCBlockSize = minTarget > cctxParams->targetCBlockSize ? minTarget : cctxParams->targetCBlockSize; + int writeLitEntropy = entropyMetadata->hufMetadata.hType == SymbolEncodingType_e.set_compressed ? 1 : 0; + int writeSeqEntropy = 1; + if (nbSeqs > 0) + { + EstimatedBlockSize ebs = ZSTD_estimateSubBlockSize(lp, nbLiterals, ofCodePtr, llCodePtr, mlCodePtr, nbSeqs, &nextCBlock->entropy, entropyMetadata, workspace, wkspSize, writeLitEntropy, writeSeqEntropy); + /* quick estimation */ + nuint avgLitCost = nbLiterals != 0 ? ebs.estLitSize * 256 / nbLiterals : 256; + nuint avgSeqCost = (ebs.estBlockSize - ebs.estLitSize) * 256 / nbSeqs; + nuint nbSubBlocks = (ebs.estBlockSize + targetCBlockSize / 2) / targetCBlockSize > 1 ? (ebs.estBlockSize + targetCBlockSize / 2) / targetCBlockSize : 1; + nuint n, avgBlockBudget, blockBudgetSupp = 0; + avgBlockBudget = ebs.estBlockSize * 256 / nbSubBlocks; + if (ebs.estBlockSize > srcSize) + return 0; + assert(nbSubBlocks > 0); + for (n = 0; n < nbSubBlocks - 1; n++) + { + /* determine nb of sequences for current sub-block + nbLiterals from next sequence */ + nuint seqCount = sizeBlockSequences(sp, (nuint)(send - sp), avgBlockBudget + blockBudgetSupp, avgLitCost, avgSeqCost, n == 0 ? 1 : 0); + assert(seqCount <= (nuint)(send - sp)); + if (sp + seqCount == send) + break; + assert(seqCount > 0); + { + int litEntropyWritten = 0; + int seqEntropyWritten = 0; + nuint litSize = countLiterals(seqStorePtr, sp, seqCount); + nuint decompressedSize = ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, 0); + nuint cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata, sp, seqCount, lp, litSize, llCodePtr, mlCodePtr, ofCodePtr, cctxParams, op, (nuint)(oend - op), bmi2, writeLitEntropy, writeSeqEntropy, &litEntropyWritten, &seqEntropyWritten, 0); + { + nuint err_code = cSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (cSize > 0 && cSize < decompressedSize) + { + assert(ip + decompressedSize <= iend); + ip += decompressedSize; + lp += litSize; + op += cSize; + llCodePtr += seqCount; + mlCodePtr += seqCount; + ofCodePtr += seqCount; + if (litEntropyWritten != 0) + { + writeLitEntropy = 0; + } + + if (seqEntropyWritten != 0) + { + writeSeqEntropy = 0; + } + + sp += seqCount; + blockBudgetSupp = 0; + } + } + } + } + + { + int litEntropyWritten = 0; + int seqEntropyWritten = 0; + nuint litSize = (nuint)(lend - lp); + nuint seqCount = (nuint)(send - sp); + nuint decompressedSize = ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, 1); + nuint cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata, sp, seqCount, lp, litSize, llCodePtr, mlCodePtr, ofCodePtr, cctxParams, op, (nuint)(oend - op), bmi2, writeLitEntropy, writeSeqEntropy, &litEntropyWritten, &seqEntropyWritten, lastBlock); + { + nuint err_code = cSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (cSize > 0 && cSize < decompressedSize) + { + assert(ip + decompressedSize <= iend); + ip += decompressedSize; + lp += litSize; + op += cSize; + llCodePtr += seqCount; + mlCodePtr += seqCount; + ofCodePtr += seqCount; + if (litEntropyWritten != 0) + { + writeLitEntropy = 0; + } + + if (seqEntropyWritten != 0) + { + writeSeqEntropy = 0; + } + + sp += seqCount; + } + } + + if (writeLitEntropy != 0) + { + memcpy(&nextCBlock->entropy.huf, &prevCBlock->entropy.huf, (uint)sizeof(ZSTD_hufCTables_t)); + } + + if (writeSeqEntropy != 0 && ZSTD_needSequenceEntropyTables(&entropyMetadata->fseMetadata) != 0) + { + return 0; + } + + if (ip < iend) + { + /* some data left : last part of the block sent uncompressed */ + nuint rSize = (nuint)(iend - ip); + nuint cSize = ZSTD_noCompressBlock(op, (nuint)(oend - op), ip, rSize, lastBlock); + { + nuint err_code = cSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + assert(cSize != 0); + op += cSize; + if (sp < send) + { + SeqDef_s* seq; + repcodes_s rep; + memcpy(&rep, prevCBlock->rep, (uint)sizeof(repcodes_s)); + for (seq = sstart; seq < sp; ++seq) + { + ZSTD_updateRep(rep.rep, seq->offBase, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0 ? 1U : 0U); + } + + memcpy(nextCBlock->rep, &rep, (uint)sizeof(repcodes_s)); + } + } + + return (nuint)(op - ostart); + } + + /* ZSTD_compressSuperBlock() : + * Used to compress a super block when targetCBlockSize is being used. + * The given block will be compressed into multiple sub blocks that are around targetCBlockSize. */ + private static nuint ZSTD_compressSuperBlock(ZSTD_CCtx_s* zc, void* dst, nuint dstCapacity, void* src, nuint srcSize, uint lastBlock) + { + ZSTD_entropyCTablesMetadata_t entropyMetadata; + { + nuint err_code = ZSTD_buildBlockEntropyStats(&zc->seqStore, &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy, &zc->appliedParams, &entropyMetadata, zc->tmpWorkspace, zc->tmpWkspSize); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + return ZSTD_compressSubBlock_multi(&zc->seqStore, zc->blockState.prevCBlock, zc->blockState.nextCBlock, &entropyMetadata, &zc->appliedParams, dst, dstCapacity, src, srcSize, zc->bmi2, lastBlock, zc->tmpWorkspace, zc->tmpWkspSize); + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCwksp.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCwksp.cs new file mode 100644 index 000000000..8f6063a52 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCwksp.cs @@ -0,0 +1,518 @@ +using System.Runtime.CompilerServices; +using static ZstdSharp.UnsafeHelper; +using System.Diagnostics; + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + [Conditional("DEBUG")] + private static void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) + { + assert(ws->workspace <= ws->objectEnd); + assert(ws->objectEnd <= ws->tableEnd); + assert(ws->objectEnd <= ws->tableValidEnd); + assert(ws->tableEnd <= ws->allocStart); + assert(ws->tableValidEnd <= ws->allocStart); + assert(ws->allocStart <= ws->workspaceEnd); + assert(ws->initOnceStart <= ZSTD_cwksp_initialAllocStart(ws)); + assert(ws->workspace <= ws->initOnceStart); + } + + /** + * Align must be a power of 2. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_cwksp_align(nuint size, nuint align) + { + nuint mask = align - 1; + assert(ZSTD_isPower2(align) != 0); + return size + mask & ~mask; + } + + /** + * Use this to determine how much space in the workspace we will consume to + * allocate this object. (Normally it should be exactly the size of the object, + * but under special conditions, like ASAN, where we pad each object, it might + * be larger.) + * + * Since tables aren't currently redzoned, you don't need to call through this + * to figure out how much space you need for the matchState tables. Everything + * else is though. + * + * Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned64_alloc_size(). + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_cwksp_alloc_size(nuint size) + { + if (size == 0) + return 0; + return size; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_cwksp_aligned_alloc_size(nuint size, nuint alignment) + { + return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, alignment)); + } + + /** + * Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes. + * Used to determine the number of bytes required for a given "aligned". + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_cwksp_aligned64_alloc_size(nuint size) + { + return ZSTD_cwksp_aligned_alloc_size(size, 64); + } + + /** + * Returns the amount of additional space the cwksp must allocate + * for internal purposes (currently only alignment). + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_cwksp_slack_space_required() + { + /* For alignment, the wksp will always allocate an additional 2*ZSTD_CWKSP_ALIGNMENT_BYTES + * bytes to align the beginning of tables section and end of buffers; + */ + const nuint slackSpace = 64 * 2; + return slackSpace; + } + + /** + * Return the number of additional bytes required to align a pointer to the given number of bytes. + * alignBytes must be a power of two. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_cwksp_bytes_to_align_ptr(void* ptr, nuint alignBytes) + { + nuint alignBytesMask = alignBytes - 1; + nuint bytes = alignBytes - ((nuint)ptr & alignBytesMask) & alignBytesMask; + assert(ZSTD_isPower2(alignBytes) != 0); + assert(bytes < alignBytes); + return bytes; + } + + /** + * Returns the initial value for allocStart which is used to determine the position from + * which we can allocate from the end of the workspace. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws) + { + sbyte* endPtr = (sbyte*)ws->workspaceEnd; + assert(ZSTD_isPower2(64) != 0); + endPtr = endPtr - (nuint)endPtr % 64; + return endPtr; + } + + /** + * Internal function. Do not use directly. + * Reserves the given number of bytes within the aligned/buffer segment of the wksp, + * which counts from the end of the wksp (as opposed to the object/table segment). + * + * Returns a pointer to the beginning of that space. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void* ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, nuint bytes) + { + void* alloc = (byte*)ws->allocStart - bytes; + void* bottom = ws->tableEnd; + ZSTD_cwksp_assert_internal_consistency(ws); + assert(alloc >= bottom); + if (alloc < bottom) + { + ws->allocFailed = 1; + return null; + } + + if (alloc < ws->tableValidEnd) + { + ws->tableValidEnd = alloc; + } + + ws->allocStart = alloc; + return alloc; + } + + /** + * Moves the cwksp to the next phase, and does any necessary allocations. + * cwksp initialization must necessarily go through each phase in order. + * Returns a 0 on success, or zstd error + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) + { + assert(phase >= ws->phase); + if (phase > ws->phase) + { + if (ws->phase < ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_aligned_init_once && phase >= ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_aligned_init_once) + { + ws->tableValidEnd = ws->objectEnd; + ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws); + { + void* alloc = ws->objectEnd; + nuint bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, 64); + void* objectEnd = (byte*)alloc + bytesToAlign; + if (objectEnd > ws->workspaceEnd) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } + + ws->objectEnd = objectEnd; + ws->tableEnd = objectEnd; + if (ws->tableValidEnd < ws->tableEnd) + { + ws->tableValidEnd = ws->tableEnd; + } + } + } + + ws->phase = phase; + ZSTD_cwksp_assert_internal_consistency(ws); + } + + return 0; + } + + /** + * Returns whether this object/buffer/etc was allocated in this workspace. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int ZSTD_cwksp_owns_buffer(ZSTD_cwksp* ws, void* ptr) + { + return ptr != null && ws->workspace <= ptr && ptr < ws->workspaceEnd ? 1 : 0; + } + + /** + * Internal function. Do not use directly. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void* ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, nuint bytes, ZSTD_cwksp_alloc_phase_e phase) + { + void* alloc; + if (ERR_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) + { + return null; + } + + alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes); + return alloc; + } + + /** + * Reserves and returns unaligned memory. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static byte* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, nuint bytes) + { + return (byte*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_buffers); + } + + /** + * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes). + * This memory has been initialized at least once in the past. + * This doesn't mean it has been initialized this time, and it might contain data from previous + * operations. + * The main usage is for algorithms that might need read access into uninitialized memory. + * The algorithm must maintain safety under these conditions and must make sure it doesn't + * leak any of the past data (directly or in side channels). + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void* ZSTD_cwksp_reserve_aligned_init_once(ZSTD_cwksp* ws, nuint bytes) + { + nuint alignedBytes = ZSTD_cwksp_align(bytes, 64); + void* ptr = ZSTD_cwksp_reserve_internal(ws, alignedBytes, ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_aligned_init_once); + assert(((nuint)ptr & 64 - 1) == 0); + if (ptr != null && ptr < ws->initOnceStart) + { + memset(ptr, 0, (uint)((nuint)((byte*)ws->initOnceStart - (byte*)ptr) < alignedBytes ? (nuint)((byte*)ws->initOnceStart - (byte*)ptr) : alignedBytes)); + ws->initOnceStart = ptr; + } + + return ptr; + } + + /** + * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes). + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void* ZSTD_cwksp_reserve_aligned64(ZSTD_cwksp* ws, nuint bytes) + { + void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, 64), ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_aligned); + assert(((nuint)ptr & 64 - 1) == 0); + return ptr; + } + + /** + * Aligned on 64 bytes. These buffers have the special property that + * their values remain constrained, allowing us to reuse them without + * memset()-ing them. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, nuint bytes) + { + ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_aligned_init_once; + void* alloc; + void* end; + void* top; + if (ws->phase < phase) + { + if (ERR_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) + { + return null; + } + } + + alloc = ws->tableEnd; + end = (byte*)alloc + bytes; + top = ws->allocStart; + assert((bytes & sizeof(uint) - 1) == 0); + ZSTD_cwksp_assert_internal_consistency(ws); + assert(end <= top); + if (end > top) + { + ws->allocFailed = 1; + return null; + } + + ws->tableEnd = end; + assert((bytes & 64 - 1) == 0); + assert(((nuint)alloc & 64 - 1) == 0); + return alloc; + } + + /** + * Aligned on sizeof(void*). + * Note : should happen only once, at workspace first initialization + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, nuint bytes) + { + nuint roundedBytes = ZSTD_cwksp_align(bytes, (nuint)sizeof(void*)); + void* alloc = ws->objectEnd; + void* end = (byte*)alloc + roundedBytes; + assert((nuint)alloc % (nuint)sizeof(void*) == 0); + assert(bytes % (nuint)sizeof(void*) == 0); + ZSTD_cwksp_assert_internal_consistency(ws); + if (ws->phase != ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) + { + ws->allocFailed = 1; + return null; + } + + ws->objectEnd = end; + ws->tableEnd = end; + ws->tableValidEnd = end; + return alloc; + } + + /** + * with alignment control + * Note : should happen only once, at workspace first initialization + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void* ZSTD_cwksp_reserve_object_aligned(ZSTD_cwksp* ws, nuint byteSize, nuint alignment) + { + nuint mask = alignment - 1; + nuint surplus = alignment > (nuint)sizeof(void*) ? alignment - (nuint)sizeof(void*) : 0; + void* start = ZSTD_cwksp_reserve_object(ws, byteSize + surplus); + if (start == null) + return null; + if (surplus == 0) + return start; + assert(ZSTD_isPower2(alignment) != 0); + return (void*)((nuint)start + surplus & ~mask); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) + { + assert(ws->tableValidEnd >= ws->objectEnd); + assert(ws->tableValidEnd <= ws->allocStart); + ws->tableValidEnd = ws->objectEnd; + ZSTD_cwksp_assert_internal_consistency(ws); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) + { + assert(ws->tableValidEnd >= ws->objectEnd); + assert(ws->tableValidEnd <= ws->allocStart); + if (ws->tableValidEnd < ws->tableEnd) + { + ws->tableValidEnd = ws->tableEnd; + } + + ZSTD_cwksp_assert_internal_consistency(ws); + } + + /** + * Zero the part of the allocated tables not already marked clean. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) + { + assert(ws->tableValidEnd >= ws->objectEnd); + assert(ws->tableValidEnd <= ws->allocStart); + if (ws->tableValidEnd < ws->tableEnd) + { + memset(ws->tableValidEnd, 0, (uint)(nuint)((byte*)ws->tableEnd - (byte*)ws->tableValidEnd)); + } + + ZSTD_cwksp_mark_tables_clean(ws); + } + + /** + * Invalidates table allocations. + * All other allocations remain valid. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) + { + ws->tableEnd = ws->objectEnd; + ZSTD_cwksp_assert_internal_consistency(ws); + } + + /** + * Invalidates all buffer, aligned, and table allocations. + * Object allocations remain valid. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_cwksp_clear(ZSTD_cwksp* ws) + { + ws->tableEnd = ws->objectEnd; + ws->allocStart = ZSTD_cwksp_initialAllocStart(ws); + ws->allocFailed = 0; + if (ws->phase > ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_aligned_init_once) + { + ws->phase = ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_aligned_init_once; + } + + ZSTD_cwksp_assert_internal_consistency(ws); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_cwksp_sizeof(ZSTD_cwksp* ws) + { + return (nuint)((byte*)ws->workspaceEnd - (byte*)ws->workspace); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_cwksp_used(ZSTD_cwksp* ws) + { + return (nuint)((byte*)ws->tableEnd - (byte*)ws->workspace) + (nuint)((byte*)ws->workspaceEnd - (byte*)ws->allocStart); + } + + /** + * The provided workspace takes ownership of the buffer [start, start+size). + * Any existing values in the workspace are ignored (the previously managed + * buffer, if present, must be separately freed). + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, nuint size, ZSTD_cwksp_static_alloc_e isStatic) + { + assert(((nuint)start & (nuint)(sizeof(void*) - 1)) == 0); + ws->workspace = start; + ws->workspaceEnd = (byte*)start + size; + ws->objectEnd = ws->workspace; + ws->tableValidEnd = ws->objectEnd; + ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws); + ws->phase = ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_objects; + ws->isStatic = isStatic; + ZSTD_cwksp_clear(ws); + ws->workspaceOversizedDuration = 0; + ZSTD_cwksp_assert_internal_consistency(ws); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_cwksp_create(ZSTD_cwksp* ws, nuint size, ZSTD_customMem customMem) + { + void* workspace = ZSTD_customMalloc(size, customMem); + if (workspace == null) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } + + ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_static_alloc_e.ZSTD_cwksp_dynamic_alloc); + return 0; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) + { + void* ptr = ws->workspace; + *ws = new ZSTD_cwksp(); + ZSTD_customFree(ptr, customMem); + } + + /** + * Moves the management of a workspace from one cwksp to another. The src cwksp + * is left in an invalid state (src must be re-init()'ed before it's used again). + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) + { + *dst = *src; + *src = new ZSTD_cwksp(); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int ZSTD_cwksp_reserve_failed(ZSTD_cwksp* ws) + { + return ws->allocFailed; + } + + /* ZSTD_alignmentSpaceWithinBounds() : + * Returns if the estimated space needed for a wksp is within an acceptable limit of the + * actual amount of space used. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int ZSTD_cwksp_estimated_space_within_bounds(ZSTD_cwksp* ws, nuint estimatedSpace) + { + return estimatedSpace - ZSTD_cwksp_slack_space_required() <= ZSTD_cwksp_used(ws) && ZSTD_cwksp_used(ws) <= estimatedSpace ? 1 : 0; + } + + /*-************************************* + * Functions + ***************************************/ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_cwksp_available_space(ZSTD_cwksp* ws) + { + return (nuint)((byte*)ws->allocStart - (byte*)ws->tableEnd); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, nuint additionalNeededSpace) + { + return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace ? 1 : 0; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, nuint additionalNeededSpace) + { + return ZSTD_cwksp_check_available(ws, additionalNeededSpace * 3); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, nuint additionalNeededSpace) + { + return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace) != 0 && ws->workspaceOversizedDuration > 128 ? 1 : 0; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_cwksp_bump_oversized_duration(ZSTD_cwksp* ws, nuint additionalNeededSpace) + { + if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace) != 0) + { + ws->workspaceOversizedDuration++; + } + else + { + ws->workspaceOversizedDuration = 0; + } + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDdict.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDdict.cs new file mode 100644 index 000000000..d7edbcd39 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDdict.cs @@ -0,0 +1,237 @@ +using static ZstdSharp.UnsafeHelper; + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + /* note: several prototypes are already published in `zstd.h` : + * ZSTD_createDDict() + * ZSTD_createDDict_byReference() + * ZSTD_createDDict_advanced() + * ZSTD_freeDDict() + * ZSTD_initStaticDDict() + * ZSTD_sizeof_DDict() + * ZSTD_estimateDDictSize() + * ZSTD_getDictID_fromDict() + */ + private static void* ZSTD_DDict_dictContent(ZSTD_DDict_s* ddict) + { + assert(ddict != null); + return ddict->dictContent; + } + + private static nuint ZSTD_DDict_dictSize(ZSTD_DDict_s* ddict) + { + assert(ddict != null); + return ddict->dictSize; + } + + private static void ZSTD_copyDDictParameters(ZSTD_DCtx_s* dctx, ZSTD_DDict_s* ddict) + { + assert(dctx != null); + assert(ddict != null); + dctx->dictID = ddict->dictID; + dctx->prefixStart = ddict->dictContent; + dctx->virtualStart = ddict->dictContent; + dctx->dictEnd = (byte*)ddict->dictContent + ddict->dictSize; + dctx->previousDstEnd = dctx->dictEnd; + if (ddict->entropyPresent != 0) + { + dctx->litEntropy = 1; + dctx->fseEntropy = 1; + dctx->LLTptr = &ddict->entropy.LLTable.e0; + dctx->MLTptr = &ddict->entropy.MLTable.e0; + dctx->OFTptr = &ddict->entropy.OFTable.e0; + dctx->HUFptr = ddict->entropy.hufTable; + dctx->entropy.rep[0] = ddict->entropy.rep[0]; + dctx->entropy.rep[1] = ddict->entropy.rep[1]; + dctx->entropy.rep[2] = ddict->entropy.rep[2]; + } + else + { + dctx->litEntropy = 0; + dctx->fseEntropy = 0; + } + } + + private static nuint ZSTD_loadEntropy_intoDDict(ZSTD_DDict_s* ddict, ZSTD_dictContentType_e dictContentType) + { + ddict->dictID = 0; + ddict->entropyPresent = 0; + if (dictContentType == ZSTD_dictContentType_e.ZSTD_dct_rawContent) + return 0; + if (ddict->dictSize < 8) + { + if (dictContentType == ZSTD_dictContentType_e.ZSTD_dct_fullDict) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + return 0; + } + + { + uint magic = MEM_readLE32(ddict->dictContent); + if (magic != 0xEC30A437) + { + if (dictContentType == ZSTD_dictContentType_e.ZSTD_dct_fullDict) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + return 0; + } + } + + ddict->dictID = MEM_readLE32((sbyte*)ddict->dictContent + 4); + if (ERR_isError(ZSTD_loadDEntropy(&ddict->entropy, ddict->dictContent, ddict->dictSize))) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + + ddict->entropyPresent = 1; + return 0; + } + + private static nuint ZSTD_initDDict_internal(ZSTD_DDict_s* ddict, void* dict, nuint dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType) + { + if (dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef || dict == null || dictSize == 0) + { + ddict->dictBuffer = null; + ddict->dictContent = dict; + if (dict == null) + dictSize = 0; + } + else + { + void* internalBuffer = ZSTD_customMalloc(dictSize, ddict->cMem); + ddict->dictBuffer = internalBuffer; + ddict->dictContent = internalBuffer; + if (internalBuffer == null) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + memcpy(internalBuffer, dict, (uint)dictSize); + } + + ddict->dictSize = dictSize; + ddict->entropy.hufTable[0] = 12 * 0x1000001; + { + /* parse dictionary content */ + nuint err_code = ZSTD_loadEntropy_intoDDict(ddict, dictContentType); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + return 0; + } + + public static ZSTD_DDict_s* ZSTD_createDDict_advanced(void* dict, nuint dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_customMem customMem) + { + if (((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) != 0) + return null; + { + ZSTD_DDict_s* ddict = (ZSTD_DDict_s*)ZSTD_customMalloc((nuint)sizeof(ZSTD_DDict_s), customMem); + if (ddict == null) + return null; + ddict->cMem = customMem; + { + nuint initResult = ZSTD_initDDict_internal(ddict, dict, dictSize, dictLoadMethod, dictContentType); + if (ERR_isError(initResult)) + { + ZSTD_freeDDict(ddict); + return null; + } + } + + return ddict; + } + } + + /*! ZSTD_createDDict() : + * Create a digested dictionary, to start decompression without startup delay. + * `dict` content is copied inside DDict. + * Consequently, `dict` can be released after `ZSTD_DDict` creation */ + public static ZSTD_DDict_s* ZSTD_createDDict(void* dict, nuint dictSize) + { + ZSTD_customMem allocator = new ZSTD_customMem + { + customAlloc = null, + customFree = null, + opaque = null + }; + return ZSTD_createDDict_advanced(dict, dictSize, ZSTD_dictLoadMethod_e.ZSTD_dlm_byCopy, ZSTD_dictContentType_e.ZSTD_dct_auto, allocator); + } + + /*! ZSTD_createDDict_byReference() : + * Create a digested dictionary, to start decompression without startup delay. + * Dictionary content is simply referenced, it will be accessed during decompression. + * Warning : dictBuffer must outlive DDict (DDict must be freed before dictBuffer) */ + public static ZSTD_DDict_s* ZSTD_createDDict_byReference(void* dictBuffer, nuint dictSize) + { + ZSTD_customMem allocator = new ZSTD_customMem + { + customAlloc = null, + customFree = null, + opaque = null + }; + return ZSTD_createDDict_advanced(dictBuffer, dictSize, ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, ZSTD_dictContentType_e.ZSTD_dct_auto, allocator); + } + + public static ZSTD_DDict_s* ZSTD_initStaticDDict(void* sBuffer, nuint sBufferSize, void* dict, nuint dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType) + { + nuint neededSpace = (nuint)sizeof(ZSTD_DDict_s) + (dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef ? 0 : dictSize); + ZSTD_DDict_s* ddict = (ZSTD_DDict_s*)sBuffer; + assert(sBuffer != null); + assert(dict != null); + if (((nuint)sBuffer & 7) != 0) + return null; + if (sBufferSize < neededSpace) + return null; + if (dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byCopy) + { + memcpy(ddict + 1, dict, (uint)dictSize); + dict = ddict + 1; + } + + if (ERR_isError(ZSTD_initDDict_internal(ddict, dict, dictSize, ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, dictContentType))) + return null; + return ddict; + } + + /*! ZSTD_freeDDict() : + * Function frees memory allocated with ZSTD_createDDict() + * If a NULL pointer is passed, no operation is performed. */ + public static nuint ZSTD_freeDDict(ZSTD_DDict_s* ddict) + { + if (ddict == null) + return 0; + { + ZSTD_customMem cMem = ddict->cMem; + ZSTD_customFree(ddict->dictBuffer, cMem); + ZSTD_customFree(ddict, cMem); + return 0; + } + } + + /*! ZSTD_estimateDDictSize() : + * Estimate amount of memory that will be needed to create a dictionary for decompression. + * Note : dictionary created by reference using ZSTD_dlm_byRef are smaller */ + public static nuint ZSTD_estimateDDictSize(nuint dictSize, ZSTD_dictLoadMethod_e dictLoadMethod) + { + return (nuint)sizeof(ZSTD_DDict_s) + (dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef ? 0 : dictSize); + } + + public static nuint ZSTD_sizeof_DDict(ZSTD_DDict_s* ddict) + { + if (ddict == null) + return 0; + return (nuint)sizeof(ZSTD_DDict_s) + (ddict->dictBuffer != null ? ddict->dictSize : 0); + } + + /*! ZSTD_getDictID_fromDDict() : + * Provides the dictID of the dictionary loaded into `ddict`. + * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. + * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ + public static uint ZSTD_getDictID_fromDDict(ZSTD_DDict_s* ddict) + { + if (ddict == null) + return 0; + return ddict->dictID; + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompress.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompress.cs new file mode 100644 index 000000000..65454a56c --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompress.cs @@ -0,0 +1,2878 @@ +using static ZstdSharp.UnsafeHelper; + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + /* Hash function to determine starting position of dict insertion within the table + * Returns an index between [0, hashSet->ddictPtrTableSize] + */ + private static nuint ZSTD_DDictHashSet_getIndex(ZSTD_DDictHashSet* hashSet, uint dictID) + { + ulong hash = ZSTD_XXH64(&dictID, sizeof(uint), 0); + return (nuint)(hash & hashSet->ddictPtrTableSize - 1); + } + + /* Adds DDict to a hashset without resizing it. + * If inserting a DDict with a dictID that already exists in the set, replaces the one in the set. + * Returns 0 if successful, or a zstd error code if something went wrong. + */ + private static nuint ZSTD_DDictHashSet_emplaceDDict(ZSTD_DDictHashSet* hashSet, ZSTD_DDict_s* ddict) + { + uint dictID = ZSTD_getDictID_fromDDict(ddict); + nuint idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID); + nuint idxRangeMask = hashSet->ddictPtrTableSize - 1; + if (hashSet->ddictPtrCount == hashSet->ddictPtrTableSize) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + } + + while (hashSet->ddictPtrTable[idx] != null) + { + if (ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]) == dictID) + { + hashSet->ddictPtrTable[idx] = ddict; + return 0; + } + + idx &= idxRangeMask; + idx++; + } + + hashSet->ddictPtrTable[idx] = ddict; + hashSet->ddictPtrCount++; + return 0; + } + + /* Expands hash table by factor of DDICT_HASHSET_RESIZE_FACTOR and + * rehashes all values, allocates new table, frees old table. + * Returns 0 on success, otherwise a zstd error code. + */ + private static nuint ZSTD_DDictHashSet_expand(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) + { + nuint newTableSize = hashSet->ddictPtrTableSize * 2; + ZSTD_DDict_s** newTable = (ZSTD_DDict_s**)ZSTD_customCalloc((nuint)sizeof(ZSTD_DDict_s*) * newTableSize, customMem); + ZSTD_DDict_s** oldTable = hashSet->ddictPtrTable; + nuint oldTableSize = hashSet->ddictPtrTableSize; + nuint i; + if (newTable == null) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } + + hashSet->ddictPtrTable = newTable; + hashSet->ddictPtrTableSize = newTableSize; + hashSet->ddictPtrCount = 0; + for (i = 0; i < oldTableSize; ++i) + { + if (oldTable[i] != null) + { + nuint err_code = ZSTD_DDictHashSet_emplaceDDict(hashSet, oldTable[i]); + if (ERR_isError(err_code)) + { + return err_code; + } + } + } + + ZSTD_customFree(oldTable, customMem); + return 0; + } + + /* Fetches a DDict with the given dictID + * Returns the ZSTD_DDict* with the requested dictID. If it doesn't exist, then returns NULL. + */ + private static ZSTD_DDict_s* ZSTD_DDictHashSet_getDDict(ZSTD_DDictHashSet* hashSet, uint dictID) + { + nuint idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID); + nuint idxRangeMask = hashSet->ddictPtrTableSize - 1; + for (; ; ) + { + nuint currDictID = ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]); + if (currDictID == dictID || currDictID == 0) + { + break; + } + else + { + idx &= idxRangeMask; + idx++; + } + } + + return hashSet->ddictPtrTable[idx]; + } + + /* Allocates space for and returns a ddict hash set + * The hash set's ZSTD_DDict* table has all values automatically set to NULL to begin with. + * Returns NULL if allocation failed. + */ + private static ZSTD_DDictHashSet* ZSTD_createDDictHashSet(ZSTD_customMem customMem) + { + ZSTD_DDictHashSet* ret = (ZSTD_DDictHashSet*)ZSTD_customMalloc((nuint)sizeof(ZSTD_DDictHashSet), customMem); + if (ret == null) + return null; + ret->ddictPtrTable = (ZSTD_DDict_s**)ZSTD_customCalloc((nuint)(64 * sizeof(ZSTD_DDict_s*)), customMem); + if (ret->ddictPtrTable == null) + { + ZSTD_customFree(ret, customMem); + return null; + } + + ret->ddictPtrTableSize = 64; + ret->ddictPtrCount = 0; + return ret; + } + + /* Frees the table of ZSTD_DDict* within a hashset, then frees the hashset itself. + * Note: The ZSTD_DDict* within the table are NOT freed. + */ + private static void ZSTD_freeDDictHashSet(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) + { + if (hashSet != null && hashSet->ddictPtrTable != null) + { + ZSTD_customFree(hashSet->ddictPtrTable, customMem); + } + + if (hashSet != null) + { + ZSTD_customFree(hashSet, customMem); + } + } + + /* Public function: Adds a DDict into the ZSTD_DDictHashSet, possibly triggering a resize of the hash set. + * Returns 0 on success, or a ZSTD error. + */ + private static nuint ZSTD_DDictHashSet_addDDict(ZSTD_DDictHashSet* hashSet, ZSTD_DDict_s* ddict, ZSTD_customMem customMem) + { + if (hashSet->ddictPtrCount * 4 / hashSet->ddictPtrTableSize * 3 != 0) + { + nuint err_code = ZSTD_DDictHashSet_expand(hashSet, customMem); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + { + nuint err_code = ZSTD_DDictHashSet_emplaceDDict(hashSet, ddict); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + return 0; + } + + /*-************************************************************* + * Context management + ***************************************************************/ + public static nuint ZSTD_sizeof_DCtx(ZSTD_DCtx_s* dctx) + { + if (dctx == null) + return 0; + return (nuint)sizeof(ZSTD_DCtx_s) + ZSTD_sizeof_DDict(dctx->ddictLocal) + dctx->inBuffSize + dctx->outBuffSize; + } + + public static nuint ZSTD_estimateDCtxSize() + { + return (nuint)sizeof(ZSTD_DCtx_s); + } + + private static nuint ZSTD_startingInputLength(ZSTD_format_e format) + { + nuint startingInputLength = (nuint)(format == ZSTD_format_e.ZSTD_f_zstd1 ? 5 : 1); + assert(format == ZSTD_format_e.ZSTD_f_zstd1 || format == ZSTD_format_e.ZSTD_f_zstd1_magicless); + return startingInputLength; + } + + private static void ZSTD_DCtx_resetParameters(ZSTD_DCtx_s* dctx) + { + assert(dctx->streamStage == ZSTD_dStreamStage.zdss_init); + dctx->format = ZSTD_format_e.ZSTD_f_zstd1; + dctx->maxWindowSize = ((uint)1 << 27) + 1; + dctx->outBufferMode = ZSTD_bufferMode_e.ZSTD_bm_buffered; + dctx->forceIgnoreChecksum = ZSTD_forceIgnoreChecksum_e.ZSTD_d_validateChecksum; + dctx->refMultipleDDicts = ZSTD_refMultipleDDicts_e.ZSTD_rmd_refSingleDDict; + dctx->disableHufAsm = 0; + dctx->maxBlockSizeParam = 0; + } + + private static void ZSTD_initDCtx_internal(ZSTD_DCtx_s* dctx) + { + dctx->staticSize = 0; + dctx->ddict = null; + dctx->ddictLocal = null; + dctx->dictEnd = null; + dctx->ddictIsCold = 0; + dctx->dictUses = ZSTD_dictUses_e.ZSTD_dont_use; + dctx->inBuff = null; + dctx->inBuffSize = 0; + dctx->outBuffSize = 0; + dctx->streamStage = ZSTD_dStreamStage.zdss_init; + dctx->noForwardProgress = 0; + dctx->oversizedDuration = 0; + dctx->isFrameDecompression = 1; + dctx->ddictSet = null; + ZSTD_DCtx_resetParameters(dctx); + } + + public static ZSTD_DCtx_s* ZSTD_initStaticDCtx(void* workspace, nuint workspaceSize) + { + ZSTD_DCtx_s* dctx = (ZSTD_DCtx_s*)workspace; + if (((nuint)workspace & 7) != 0) + return null; + if (workspaceSize < (nuint)sizeof(ZSTD_DCtx_s)) + return null; + ZSTD_initDCtx_internal(dctx); + dctx->staticSize = workspaceSize; + dctx->inBuff = (sbyte*)(dctx + 1); + return dctx; + } + + private static ZSTD_DCtx_s* ZSTD_createDCtx_internal(ZSTD_customMem customMem) + { + if (((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) != 0) + return null; + { + ZSTD_DCtx_s* dctx = (ZSTD_DCtx_s*)ZSTD_customMalloc((nuint)sizeof(ZSTD_DCtx_s), customMem); + if (dctx == null) + return null; + dctx->customMem = customMem; + ZSTD_initDCtx_internal(dctx); + return dctx; + } + } + + public static ZSTD_DCtx_s* ZSTD_createDCtx_advanced(ZSTD_customMem customMem) + { + return ZSTD_createDCtx_internal(customMem); + } + + public static ZSTD_DCtx_s* ZSTD_createDCtx() + { + return ZSTD_createDCtx_internal(ZSTD_defaultCMem); + } + + private static void ZSTD_clearDict(ZSTD_DCtx_s* dctx) + { + ZSTD_freeDDict(dctx->ddictLocal); + dctx->ddictLocal = null; + dctx->ddict = null; + dctx->dictUses = ZSTD_dictUses_e.ZSTD_dont_use; + } + + public static nuint ZSTD_freeDCtx(ZSTD_DCtx_s* dctx) + { + if (dctx == null) + return 0; + if (dctx->staticSize != 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } + + { + ZSTD_customMem cMem = dctx->customMem; + ZSTD_clearDict(dctx); + ZSTD_customFree(dctx->inBuff, cMem); + dctx->inBuff = null; + if (dctx->ddictSet != null) + { + ZSTD_freeDDictHashSet(dctx->ddictSet, cMem); + dctx->ddictSet = null; + } + + ZSTD_customFree(dctx, cMem); + return 0; + } + } + + /* no longer useful */ + public static void ZSTD_copyDCtx(ZSTD_DCtx_s* dstDCtx, ZSTD_DCtx_s* srcDCtx) + { + nuint toCopy = (nuint)((sbyte*)&dstDCtx->inBuff - (sbyte*)dstDCtx); + memcpy(dstDCtx, srcDCtx, (uint)toCopy); + } + + /* Given a dctx with a digested frame params, re-selects the correct ZSTD_DDict based on + * the requested dict ID from the frame. If there exists a reference to the correct ZSTD_DDict, then + * accordingly sets the ddict to be used to decompress the frame. + * + * If no DDict is found, then no action is taken, and the ZSTD_DCtx::ddict remains as-is. + * + * ZSTD_d_refMultipleDDicts must be enabled for this function to be called. + */ + private static void ZSTD_DCtx_selectFrameDDict(ZSTD_DCtx_s* dctx) + { + assert(dctx->refMultipleDDicts != default && dctx->ddictSet != null); + if (dctx->ddict != null) + { + ZSTD_DDict_s* frameDDict = ZSTD_DDictHashSet_getDDict(dctx->ddictSet, dctx->fParams.dictID); + if (frameDDict != null) + { + ZSTD_clearDict(dctx); + dctx->dictID = dctx->fParams.dictID; + dctx->ddict = frameDDict; + dctx->dictUses = ZSTD_dictUses_e.ZSTD_use_indefinitely; + } + } + } + + /*! ZSTD_isFrame() : + * Tells if the content of `buffer` starts with a valid Frame Identifier. + * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. + * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled. + * Note 3 : Skippable Frame Identifiers are considered valid. */ + public static uint ZSTD_isFrame(void* buffer, nuint size) + { + if (size < 4) + return 0; + { + uint magic = MEM_readLE32(buffer); + if (magic == 0xFD2FB528) + return 1; + if ((magic & 0xFFFFFFF0) == 0x184D2A50) + return 1; + } + + return 0; + } + + /*! ZSTD_isSkippableFrame() : + * Tells if the content of `buffer` starts with a valid Frame Identifier for a skippable frame. + * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. + */ + public static uint ZSTD_isSkippableFrame(void* buffer, nuint size) + { + if (size < 4) + return 0; + { + uint magic = MEM_readLE32(buffer); + if ((magic & 0xFFFFFFF0) == 0x184D2A50) + return 1; + } + + return 0; + } + + /** ZSTD_frameHeaderSize_internal() : + * srcSize must be large enough to reach header size fields. + * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless. + * @return : size of the Frame Header + * or an error code, which can be tested with ZSTD_isError() */ + private static nuint ZSTD_frameHeaderSize_internal(void* src, nuint srcSize, ZSTD_format_e format) + { + nuint minInputSize = ZSTD_startingInputLength(format); + if (srcSize < minInputSize) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + { + byte fhd = ((byte*)src)[minInputSize - 1]; + uint dictID = (uint)(fhd & 3); + uint singleSegment = (uint)(fhd >> 5 & 1); + uint fcsId = (uint)(fhd >> 6); + return minInputSize + (nuint)(singleSegment == 0 ? 1 : 0) + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId] + (nuint)(singleSegment != 0 && fcsId == 0 ? 1 : 0); + } + } + + /** ZSTD_frameHeaderSize() : + * srcSize must be >= ZSTD_frameHeaderSize_prefix. + * @return : size of the Frame Header, + * or an error code (if srcSize is too small) */ + public static nuint ZSTD_frameHeaderSize(void* src, nuint srcSize) + { + return ZSTD_frameHeaderSize_internal(src, srcSize, ZSTD_format_e.ZSTD_f_zstd1); + } + + /** ZSTD_getFrameHeader_advanced() : + * decode Frame Header, or require larger `srcSize`. + * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless + * @return : 0, `zfhPtr` is correctly filled, + * >0, `srcSize` is too small, value is wanted `srcSize` amount, + ** or an error code, which can be tested using ZSTD_isError() */ + public static nuint ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, void* src, nuint srcSize, ZSTD_format_e format) + { + byte* ip = (byte*)src; + nuint minInputSize = ZSTD_startingInputLength(format); + if (srcSize > 0) + { + if (src == null) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + } + } + + if (srcSize < minInputSize) + { + if (srcSize > 0 && format != ZSTD_format_e.ZSTD_f_zstd1_magicless) + { + /* when receiving less than @minInputSize bytes, + * control these bytes at least correspond to a supported magic number + * in order to error out early if they don't. + **/ + nuint toCopy = 4 < srcSize ? 4 : srcSize; + byte* hbuf = stackalloc byte[4]; + MEM_writeLE32(hbuf, 0xFD2FB528); + assert(src != null); + memcpy(hbuf, src, (uint)toCopy); + if (MEM_readLE32(hbuf) != 0xFD2FB528) + { + MEM_writeLE32(hbuf, 0x184D2A50); + memcpy(hbuf, src, (uint)toCopy); + if ((MEM_readLE32(hbuf) & 0xFFFFFFF0) != 0x184D2A50) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_prefix_unknown)); + } + } + } + + return minInputSize; + } + + *zfhPtr = new ZSTD_frameHeader(); + if (format != ZSTD_format_e.ZSTD_f_zstd1_magicless && MEM_readLE32(src) != 0xFD2FB528) + { + if ((MEM_readLE32(src) & 0xFFFFFFF0) == 0x184D2A50) + { + if (srcSize < 8) + return 8; + *zfhPtr = new ZSTD_frameHeader + { + frameType = ZSTD_frameType_e.ZSTD_skippableFrame, + dictID = MEM_readLE32(src) - 0x184D2A50, + headerSize = 8, + frameContentSize = MEM_readLE32((sbyte*)src + 4) + }; + return 0; + } + + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_prefix_unknown)); + } + + { + nuint fhsize = ZSTD_frameHeaderSize_internal(src, srcSize, format); + if (srcSize < fhsize) + return fhsize; + zfhPtr->headerSize = (uint)fhsize; + } + + { + byte fhdByte = ip[minInputSize - 1]; + nuint pos = minInputSize; + uint dictIDSizeCode = (uint)(fhdByte & 3); + uint checksumFlag = (uint)(fhdByte >> 2 & 1); + uint singleSegment = (uint)(fhdByte >> 5 & 1); + uint fcsID = (uint)(fhdByte >> 6); + ulong windowSize = 0; + uint dictID = 0; + ulong frameContentSize = unchecked(0UL - 1); + if ((fhdByte & 0x08) != 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported)); + } + + if (singleSegment == 0) + { + byte wlByte = ip[pos++]; + uint windowLog = (uint)((wlByte >> 3) + 10); + if (windowLog > (uint)(sizeof(nuint) == 4 ? 30 : 31)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_windowTooLarge)); + } + + windowSize = 1UL << (int)windowLog; + windowSize += (windowSize >> 3) * (ulong)(wlByte & 7); + } + + switch (dictIDSizeCode) + { + default: + assert(0 != 0); + goto case 0; + case 0: + break; + case 1: + dictID = ip[pos]; + pos++; + break; + case 2: + dictID = MEM_readLE16(ip + pos); + pos += 2; + break; + case 3: + dictID = MEM_readLE32(ip + pos); + pos += 4; + break; + } + + switch (fcsID) + { + default: + assert(0 != 0); + goto case 0; + case 0: + if (singleSegment != 0) + frameContentSize = ip[pos]; + break; + case 1: + frameContentSize = (ulong)(MEM_readLE16(ip + pos) + 256); + break; + case 2: + frameContentSize = MEM_readLE32(ip + pos); + break; + case 3: + frameContentSize = MEM_readLE64(ip + pos); + break; + } + + if (singleSegment != 0) + windowSize = frameContentSize; + zfhPtr->frameType = ZSTD_frameType_e.ZSTD_frame; + zfhPtr->frameContentSize = frameContentSize; + zfhPtr->windowSize = windowSize; + zfhPtr->blockSizeMax = (uint)(windowSize < 1 << 17 ? windowSize : 1 << 17); + zfhPtr->dictID = dictID; + zfhPtr->checksumFlag = checksumFlag; + } + + return 0; + } + + /** ZSTD_getFrameHeader() : + * decode Frame Header, or require larger `srcSize`. + * note : this function does not consume input, it only reads it. + * @return : 0, `zfhPtr` is correctly filled, + * >0, `srcSize` is too small, value is wanted `srcSize` amount, + * or an error code, which can be tested using ZSTD_isError() */ + public static nuint ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, void* src, nuint srcSize) + { + return ZSTD_getFrameHeader_advanced(zfhPtr, src, srcSize, ZSTD_format_e.ZSTD_f_zstd1); + } + + /** ZSTD_getFrameContentSize() : + * compatible with legacy mode + * @return : decompressed size of the single frame pointed to be `src` if known, otherwise + * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined + * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */ + public static ulong ZSTD_getFrameContentSize(void* src, nuint srcSize) + { + ZSTD_frameHeader zfh; + if (ZSTD_getFrameHeader(&zfh, src, srcSize) != 0) + return unchecked(0UL - 2); + if (zfh.frameType == ZSTD_frameType_e.ZSTD_skippableFrame) + { + return 0; + } + else + { + return zfh.frameContentSize; + } + } + + private static nuint readSkippableFrameSize(void* src, nuint srcSize) + { + const nuint skippableHeaderSize = 8; + uint sizeU32; + if (srcSize < 8) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + sizeU32 = MEM_readLE32((byte*)src + 4); + if (sizeU32 + 8 < sizeU32) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported)); + } + + { + nuint skippableSize = skippableHeaderSize + sizeU32; + if (skippableSize > srcSize) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + return skippableSize; + } + } + + /*! ZSTD_readSkippableFrame() : + * Retrieves content of a skippable frame, and writes it to dst buffer. + * + * The parameter magicVariant will receive the magicVariant that was supplied when the frame was written, + * i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. This can be NULL if the caller is not interested + * in the magicVariant. + * + * Returns an error if destination buffer is not large enough, or if this is not a valid skippable frame. + * + * @return : number of bytes written or a ZSTD error. + */ + public static nuint ZSTD_readSkippableFrame(void* dst, nuint dstCapacity, uint* magicVariant, void* src, nuint srcSize) + { + if (srcSize < 8) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + { + uint magicNumber = MEM_readLE32(src); + nuint skippableFrameSize = readSkippableFrameSize(src, srcSize); + nuint skippableContentSize = skippableFrameSize - 8; + if (ZSTD_isSkippableFrame(src, srcSize) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported)); + } + + if (skippableFrameSize < 8 || skippableFrameSize > srcSize) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + if (skippableContentSize > dstCapacity) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + if (skippableContentSize > 0 && dst != null) + memcpy(dst, (byte*)src + 8, (uint)skippableContentSize); + if (magicVariant != null) + *magicVariant = magicNumber - 0x184D2A50; + return skippableContentSize; + } + } + + /** ZSTD_findDecompressedSize() : + * `srcSize` must be the exact length of some number of ZSTD compressed and/or + * skippable frames + * note: compatible with legacy mode + * @return : decompressed size of the frames contained */ + public static ulong ZSTD_findDecompressedSize(void* src, nuint srcSize) + { + ulong totalDstSize = 0; + while (srcSize >= ZSTD_startingInputLength(ZSTD_format_e.ZSTD_f_zstd1)) + { + uint magicNumber = MEM_readLE32(src); + if ((magicNumber & 0xFFFFFFF0) == 0x184D2A50) + { + nuint skippableSize = readSkippableFrameSize(src, srcSize); + if (ERR_isError(skippableSize)) + return unchecked(0UL - 2); + assert(skippableSize <= srcSize); + src = (byte*)src + skippableSize; + srcSize -= skippableSize; + continue; + } + + { + ulong fcs = ZSTD_getFrameContentSize(src, srcSize); + if (fcs >= unchecked(0UL - 2)) + return fcs; + if (totalDstSize + fcs < totalDstSize) + return unchecked(0UL - 2); + totalDstSize += fcs; + } + + { + nuint frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize); + if (ERR_isError(frameSrcSize)) + return unchecked(0UL - 2); + assert(frameSrcSize <= srcSize); + src = (byte*)src + frameSrcSize; + srcSize -= frameSrcSize; + } + } + + if (srcSize != 0) + return unchecked(0UL - 2); + return totalDstSize; + } + + /** ZSTD_getDecompressedSize() : + * compatible with legacy mode + * @return : decompressed size if known, 0 otherwise + note : 0 can mean any of the following : + - frame content is empty + - decompressed size field is not present in frame header + - frame header unknown / not supported + - frame header not complete (`srcSize` too small) */ + public static ulong ZSTD_getDecompressedSize(void* src, nuint srcSize) + { + ulong ret = ZSTD_getFrameContentSize(src, srcSize); + return ret >= unchecked(0UL - 2) ? 0 : ret; + } + + /** ZSTD_decodeFrameHeader() : + * `headerSize` must be the size provided by ZSTD_frameHeaderSize(). + * If multiple DDict references are enabled, also will choose the correct DDict to use. + * @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */ + private static nuint ZSTD_decodeFrameHeader(ZSTD_DCtx_s* dctx, void* src, nuint headerSize) + { + nuint result = ZSTD_getFrameHeader_advanced(&dctx->fParams, src, headerSize, dctx->format); + if (ERR_isError(result)) + return result; + if (result > 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + if (dctx->refMultipleDDicts == ZSTD_refMultipleDDicts_e.ZSTD_rmd_refMultipleDDicts && dctx->ddictSet != null) + { + ZSTD_DCtx_selectFrameDDict(dctx); + } + + if (dctx->fParams.dictID != 0 && dctx->dictID != dctx->fParams.dictID) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_wrong)); + } + + dctx->validateChecksum = (uint)(dctx->fParams.checksumFlag != 0 && dctx->forceIgnoreChecksum == default ? 1 : 0); + if (dctx->validateChecksum != 0) + ZSTD_XXH64_reset(&dctx->xxhState, 0); + dctx->processedCSize += headerSize; + return 0; + } + + private static ZSTD_frameSizeInfo ZSTD_errorFrameSizeInfo(nuint ret) + { + ZSTD_frameSizeInfo frameSizeInfo; + System.Runtime.CompilerServices.Unsafe.SkipInit(out frameSizeInfo); + frameSizeInfo.compressedSize = ret; + frameSizeInfo.decompressedBound = unchecked(0UL - 2); + return frameSizeInfo; + } + + private static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(void* src, nuint srcSize, ZSTD_format_e format) + { + ZSTD_frameSizeInfo frameSizeInfo; + frameSizeInfo = new ZSTD_frameSizeInfo(); + if (format == ZSTD_format_e.ZSTD_f_zstd1 && srcSize >= 8 && (MEM_readLE32(src) & 0xFFFFFFF0) == 0x184D2A50) + { + frameSizeInfo.compressedSize = readSkippableFrameSize(src, srcSize); + assert(ERR_isError(frameSizeInfo.compressedSize) || frameSizeInfo.compressedSize <= srcSize); + return frameSizeInfo; + } + else + { + byte* ip = (byte*)src; + byte* ipstart = ip; + nuint remainingSize = srcSize; + nuint nbBlocks = 0; + ZSTD_frameHeader zfh; + { + nuint ret = ZSTD_getFrameHeader_advanced(&zfh, src, srcSize, format); + if (ERR_isError(ret)) + return ZSTD_errorFrameSizeInfo(ret); + if (ret > 0) + return ZSTD_errorFrameSizeInfo(unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong))); + } + + ip += zfh.headerSize; + remainingSize -= zfh.headerSize; + while (true) + { + blockProperties_t blockProperties; + nuint cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties); + if (ERR_isError(cBlockSize)) + return ZSTD_errorFrameSizeInfo(cBlockSize); + if (ZSTD_blockHeaderSize + cBlockSize > remainingSize) + return ZSTD_errorFrameSizeInfo(unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong))); + ip += ZSTD_blockHeaderSize + cBlockSize; + remainingSize -= ZSTD_blockHeaderSize + cBlockSize; + nbBlocks++; + if (blockProperties.lastBlock != 0) + break; + } + + if (zfh.checksumFlag != 0) + { + if (remainingSize < 4) + return ZSTD_errorFrameSizeInfo(unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong))); + ip += 4; + } + + frameSizeInfo.nbBlocks = nbBlocks; + frameSizeInfo.compressedSize = (nuint)(ip - ipstart); + frameSizeInfo.decompressedBound = zfh.frameContentSize != unchecked(0UL - 1) ? zfh.frameContentSize : (ulong)nbBlocks * zfh.blockSizeMax; + return frameSizeInfo; + } + } + + private static nuint ZSTD_findFrameCompressedSize_advanced(void* src, nuint srcSize, ZSTD_format_e format) + { + ZSTD_frameSizeInfo frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize, format); + return frameSizeInfo.compressedSize; + } + + /** ZSTD_findFrameCompressedSize() : + * See docs in zstd.h + * Note: compatible with legacy mode */ + public static nuint ZSTD_findFrameCompressedSize(void* src, nuint srcSize) + { + return ZSTD_findFrameCompressedSize_advanced(src, srcSize, ZSTD_format_e.ZSTD_f_zstd1); + } + + /** ZSTD_decompressBound() : + * compatible with legacy mode + * `src` must point to the start of a ZSTD frame or a skippable frame + * `srcSize` must be at least as large as the frame contained + * @return : the maximum decompressed size of the compressed source + */ + public static ulong ZSTD_decompressBound(void* src, nuint srcSize) + { + ulong bound = 0; + while (srcSize > 0) + { + ZSTD_frameSizeInfo frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize, ZSTD_format_e.ZSTD_f_zstd1); + nuint compressedSize = frameSizeInfo.compressedSize; + ulong decompressedBound = frameSizeInfo.decompressedBound; + if (ERR_isError(compressedSize) || decompressedBound == unchecked(0UL - 2)) + return unchecked(0UL - 2); + assert(srcSize >= compressedSize); + src = (byte*)src + compressedSize; + srcSize -= compressedSize; + bound += decompressedBound; + } + + return bound; + } + + /*! ZSTD_decompressionMargin() : + * Zstd supports in-place decompression, where the input and output buffers overlap. + * In this case, the output buffer must be at least (Margin + Output_Size) bytes large, + * and the input buffer must be at the end of the output buffer. + * + * _______________________ Output Buffer ________________________ + * | | + * | ____ Input Buffer ____| + * | | | + * v v v + * |---------------------------------------|-----------|----------| + * ^ ^ ^ + * |___________________ Output_Size ___________________|_ Margin _| + * + * NOTE: See also ZSTD_DECOMPRESSION_MARGIN(). + * NOTE: This applies only to single-pass decompression through ZSTD_decompress() or + * ZSTD_decompressDCtx(). + * NOTE: This function supports multi-frame input. + * + * @param src The compressed frame(s) + * @param srcSize The size of the compressed frame(s) + * @returns The decompression margin or an error that can be checked with ZSTD_isError(). + */ + public static nuint ZSTD_decompressionMargin(void* src, nuint srcSize) + { + nuint margin = 0; + uint maxBlockSize = 0; + while (srcSize > 0) + { + ZSTD_frameSizeInfo frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize, ZSTD_format_e.ZSTD_f_zstd1); + nuint compressedSize = frameSizeInfo.compressedSize; + ulong decompressedBound = frameSizeInfo.decompressedBound; + ZSTD_frameHeader zfh; + { + nuint err_code = ZSTD_getFrameHeader(&zfh, src, srcSize); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (ERR_isError(compressedSize) || decompressedBound == unchecked(0UL - 2)) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + if (zfh.frameType == ZSTD_frameType_e.ZSTD_frame) + { + margin += zfh.headerSize; + margin += (nuint)(zfh.checksumFlag != 0 ? 4 : 0); + margin += 3 * frameSizeInfo.nbBlocks; + maxBlockSize = maxBlockSize > zfh.blockSizeMax ? maxBlockSize : zfh.blockSizeMax; + } + else + { + assert(zfh.frameType == ZSTD_frameType_e.ZSTD_skippableFrame); + margin += compressedSize; + } + + assert(srcSize >= compressedSize); + src = (byte*)src + compressedSize; + srcSize -= compressedSize; + } + + margin += maxBlockSize; + return margin; + } + + /** ZSTD_insertBlock() : + * insert `src` block into `dctx` history. Useful to track uncompressed blocks. */ + public static nuint ZSTD_insertBlock(ZSTD_DCtx_s* dctx, void* blockStart, nuint blockSize) + { + ZSTD_checkContinuity(dctx, blockStart, blockSize); + dctx->previousDstEnd = (sbyte*)blockStart + blockSize; + return blockSize; + } + + private static nuint ZSTD_copyRawBlock(void* dst, nuint dstCapacity, void* src, nuint srcSize) + { + if (srcSize > dstCapacity) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + if (dst == null) + { + if (srcSize == 0) + return 0; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstBuffer_null)); + } + + memmove(dst, src, srcSize); + return srcSize; + } + + private static nuint ZSTD_setRleBlock(void* dst, nuint dstCapacity, byte b, nuint regenSize) + { + if (regenSize > dstCapacity) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + if (dst == null) + { + if (regenSize == 0) + return 0; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstBuffer_null)); + } + + memset(dst, b, (uint)regenSize); + return regenSize; + } + + private static void ZSTD_DCtx_trace_end(ZSTD_DCtx_s* dctx, ulong uncompressedSize, ulong compressedSize, int streaming) + { + } + + /*! ZSTD_decompressFrame() : + * @dctx must be properly initialized + * will update *srcPtr and *srcSizePtr, + * to make *srcPtr progress by one frame. */ + private static nuint ZSTD_decompressFrame(ZSTD_DCtx_s* dctx, void* dst, nuint dstCapacity, void** srcPtr, nuint* srcSizePtr) + { + byte* istart = (byte*)*srcPtr; + byte* ip = istart; + byte* ostart = (byte*)dst; + byte* oend = dstCapacity != 0 ? ostart + dstCapacity : ostart; + byte* op = ostart; + nuint remainingSrcSize = *srcSizePtr; + if (remainingSrcSize < (nuint)(dctx->format == ZSTD_format_e.ZSTD_f_zstd1 ? 6 : 2) + ZSTD_blockHeaderSize) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + { + nuint frameHeaderSize = ZSTD_frameHeaderSize_internal(ip, (nuint)(dctx->format == ZSTD_format_e.ZSTD_f_zstd1 ? 5 : 1), dctx->format); + if (ERR_isError(frameHeaderSize)) + return frameHeaderSize; + if (remainingSrcSize < frameHeaderSize + ZSTD_blockHeaderSize) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + { + nuint err_code = ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + ip += frameHeaderSize; + remainingSrcSize -= frameHeaderSize; + } + + if (dctx->maxBlockSizeParam != 0) + dctx->fParams.blockSizeMax = dctx->fParams.blockSizeMax < (uint)dctx->maxBlockSizeParam ? dctx->fParams.blockSizeMax : (uint)dctx->maxBlockSizeParam; + while (true) + { + byte* oBlockEnd = oend; + nuint decodedSize; + blockProperties_t blockProperties; + nuint cBlockSize = ZSTD_getcBlockSize(ip, remainingSrcSize, &blockProperties); + if (ERR_isError(cBlockSize)) + return cBlockSize; + ip += ZSTD_blockHeaderSize; + remainingSrcSize -= ZSTD_blockHeaderSize; + if (cBlockSize > remainingSrcSize) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + if (ip >= op && ip < oBlockEnd) + { + oBlockEnd = op + (ip - op); + } + + switch (blockProperties.blockType) + { + case blockType_e.bt_compressed: + assert(dctx->isFrameDecompression == 1); + decodedSize = ZSTD_decompressBlock_internal(dctx, op, (nuint)(oBlockEnd - op), ip, cBlockSize, streaming_operation.not_streaming); + break; + case blockType_e.bt_raw: + decodedSize = ZSTD_copyRawBlock(op, (nuint)(oend - op), ip, cBlockSize); + break; + case blockType_e.bt_rle: + decodedSize = ZSTD_setRleBlock(op, (nuint)(oBlockEnd - op), *ip, blockProperties.origSize); + break; + case blockType_e.bt_reserved: + default: + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + { + nuint err_code = decodedSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (dctx->validateChecksum != 0) + { + ZSTD_XXH64_update(&dctx->xxhState, op, decodedSize); + } + + if (decodedSize != 0) + { + op += decodedSize; + } + + assert(ip != null); + ip += cBlockSize; + remainingSrcSize -= cBlockSize; + if (blockProperties.lastBlock != 0) + break; + } + + if (dctx->fParams.frameContentSize != unchecked(0UL - 1)) + { + if ((ulong)(op - ostart) != dctx->fParams.frameContentSize) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + } + + if (dctx->fParams.checksumFlag != 0) + { + if (remainingSrcSize < 4) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_checksum_wrong)); + } + + if (dctx->forceIgnoreChecksum == default) + { + uint checkCalc = (uint)ZSTD_XXH64_digest(&dctx->xxhState); + uint checkRead; + checkRead = MEM_readLE32(ip); + if (checkRead != checkCalc) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_checksum_wrong)); + } + } + + ip += 4; + remainingSrcSize -= 4; + } + + ZSTD_DCtx_trace_end(dctx, (ulong)(op - ostart), (ulong)(ip - istart), 0); + *srcPtr = ip; + *srcSizePtr = remainingSrcSize; + return (nuint)(op - ostart); + } + + private static nuint ZSTD_decompressMultiFrame(ZSTD_DCtx_s* dctx, void* dst, nuint dstCapacity, void* src, nuint srcSize, void* dict, nuint dictSize, ZSTD_DDict_s* ddict) + { + void* dststart = dst; + int moreThan1Frame = 0; + assert(dict == null || ddict == null); + if (ddict != null) + { + dict = ZSTD_DDict_dictContent(ddict); + dictSize = ZSTD_DDict_dictSize(ddict); + } + + while (srcSize >= ZSTD_startingInputLength(dctx->format)) + { + if (dctx->format == ZSTD_format_e.ZSTD_f_zstd1 && srcSize >= 4) + { + uint magicNumber = MEM_readLE32(src); + if ((magicNumber & 0xFFFFFFF0) == 0x184D2A50) + { + /* skippable frame detected : skip it */ + nuint skippableSize = readSkippableFrameSize(src, srcSize); + { + nuint err_code = skippableSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + assert(skippableSize <= srcSize); + src = (byte*)src + skippableSize; + srcSize -= skippableSize; + continue; + } + } + + if (ddict != null) + { + /* we were called from ZSTD_decompress_usingDDict */ + nuint err_code = ZSTD_decompressBegin_usingDDict(dctx, ddict); + if (ERR_isError(err_code)) + { + return err_code; + } + } + else + { + /* this will initialize correctly with no dict if dict == NULL, so + * use this in all cases but ddict */ + nuint err_code = ZSTD_decompressBegin_usingDict(dctx, dict, dictSize); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + ZSTD_checkContinuity(dctx, dst, dstCapacity); + { + nuint res = ZSTD_decompressFrame(dctx, dst, dstCapacity, &src, &srcSize); + if (ZSTD_getErrorCode(res) == ZSTD_ErrorCode.ZSTD_error_prefix_unknown && moreThan1Frame == 1) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + if (ERR_isError(res)) + return res; + assert(res <= dstCapacity); + if (res != 0) + dst = (byte*)dst + res; + dstCapacity -= res; + } + + moreThan1Frame = 1; + } + + if (srcSize != 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + return (nuint)((byte*)dst - (byte*)dststart); + } + + /*! ZSTD_decompress_usingDict() : + * Decompression using a known Dictionary. + * Dictionary must be identical to the one used during compression. + * Note : This function loads the dictionary, resulting in significant startup delay. + * It's intended for a dictionary used only once. + * Note : When `dict == NULL || dictSize < 8` no dictionary is used. */ + public static nuint ZSTD_decompress_usingDict(ZSTD_DCtx_s* dctx, void* dst, nuint dstCapacity, void* src, nuint srcSize, void* dict, nuint dictSize) + { + return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, null); + } + + private static ZSTD_DDict_s* ZSTD_getDDict(ZSTD_DCtx_s* dctx) + { + switch (dctx->dictUses) + { + default: + assert(0 != 0); + goto case ZSTD_dictUses_e.ZSTD_dont_use; + case ZSTD_dictUses_e.ZSTD_dont_use: + ZSTD_clearDict(dctx); + return null; + case ZSTD_dictUses_e.ZSTD_use_indefinitely: + return dctx->ddict; + case ZSTD_dictUses_e.ZSTD_use_once: + dctx->dictUses = ZSTD_dictUses_e.ZSTD_dont_use; + return dctx->ddict; + } + } + + /*! ZSTD_decompressDCtx() : + * Same as ZSTD_decompress(), + * requires an allocated ZSTD_DCtx. + * Compatible with sticky parameters (see below). + */ + public static nuint ZSTD_decompressDCtx(ZSTD_DCtx_s* dctx, void* dst, nuint dstCapacity, void* src, nuint srcSize) + { + return ZSTD_decompress_usingDDict(dctx, dst, dstCapacity, src, srcSize, ZSTD_getDDict(dctx)); + } + + /*! ZSTD_decompress() : + * `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames. + * Multiple compressed frames can be decompressed at once with this method. + * The result will be the concatenation of all decompressed frames, back to back. + * `dstCapacity` is an upper bound of originalSize to regenerate. + * First frame's decompressed size can be extracted using ZSTD_getFrameContentSize(). + * If maximum upper bound isn't known, prefer using streaming mode to decompress data. + * @return : the number of bytes decompressed into `dst` (<= `dstCapacity`), + * or an errorCode if it fails (which can be tested using ZSTD_isError()). */ + public static nuint ZSTD_decompress(void* dst, nuint dstCapacity, void* src, nuint srcSize) + { + nuint regenSize; + ZSTD_DCtx_s* dctx = ZSTD_createDCtx_internal(ZSTD_defaultCMem); + if (dctx == null) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } + + regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize); + ZSTD_freeDCtx(dctx); + return regenSize; + } + + /*-************************************** + * Advanced Streaming Decompression API + * Bufferless and synchronous + ****************************************/ + public static nuint ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx_s* dctx) + { + return dctx->expected; + } + + /** + * Similar to ZSTD_nextSrcSizeToDecompress(), but when a block input can be streamed, we + * allow taking a partial block as the input. Currently only raw uncompressed blocks can + * be streamed. + * + * For blocks that can be streamed, this allows us to reduce the latency until we produce + * output, and avoid copying the input. + * + * @param inputSize - The total amount of input that the caller currently has. + */ + private static nuint ZSTD_nextSrcSizeToDecompressWithInputSize(ZSTD_DCtx_s* dctx, nuint inputSize) + { + if (!(dctx->stage == ZSTD_dStage.ZSTDds_decompressBlock || dctx->stage == ZSTD_dStage.ZSTDds_decompressLastBlock)) + return dctx->expected; + if (dctx->bType != blockType_e.bt_raw) + return dctx->expected; + return inputSize <= 1 ? 1 : inputSize <= dctx->expected ? inputSize : dctx->expected; + } + + public static ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx_s* dctx) + { + switch (dctx->stage) + { + default: + assert(0 != 0); + goto case ZSTD_dStage.ZSTDds_getFrameHeaderSize; + case ZSTD_dStage.ZSTDds_getFrameHeaderSize: + case ZSTD_dStage.ZSTDds_decodeFrameHeader: + return ZSTD_nextInputType_e.ZSTDnit_frameHeader; + case ZSTD_dStage.ZSTDds_decodeBlockHeader: + return ZSTD_nextInputType_e.ZSTDnit_blockHeader; + case ZSTD_dStage.ZSTDds_decompressBlock: + return ZSTD_nextInputType_e.ZSTDnit_block; + case ZSTD_dStage.ZSTDds_decompressLastBlock: + return ZSTD_nextInputType_e.ZSTDnit_lastBlock; + case ZSTD_dStage.ZSTDds_checkChecksum: + return ZSTD_nextInputType_e.ZSTDnit_checksum; + case ZSTD_dStage.ZSTDds_decodeSkippableHeader: + case ZSTD_dStage.ZSTDds_skipFrame: + return ZSTD_nextInputType_e.ZSTDnit_skippableFrame; + } + } + + private static int ZSTD_isSkipFrame(ZSTD_DCtx_s* dctx) + { + return dctx->stage == ZSTD_dStage.ZSTDds_skipFrame ? 1 : 0; + } + + /** ZSTD_decompressContinue() : + * srcSize : must be the exact nb of bytes expected (see ZSTD_nextSrcSizeToDecompress()) + * @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity) + * or an error code, which can be tested using ZSTD_isError() */ + public static nuint ZSTD_decompressContinue(ZSTD_DCtx_s* dctx, void* dst, nuint dstCapacity, void* src, nuint srcSize) + { + if (srcSize != ZSTD_nextSrcSizeToDecompressWithInputSize(dctx, srcSize)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + ZSTD_checkContinuity(dctx, dst, dstCapacity); + dctx->processedCSize += srcSize; + switch (dctx->stage) + { + case ZSTD_dStage.ZSTDds_getFrameHeaderSize: + assert(src != null); + if (dctx->format == ZSTD_format_e.ZSTD_f_zstd1) + { + assert(srcSize >= 4); + if ((MEM_readLE32(src) & 0xFFFFFFF0) == 0x184D2A50) + { + memcpy(dctx->headerBuffer, src, (uint)srcSize); + dctx->expected = 8 - srcSize; + dctx->stage = ZSTD_dStage.ZSTDds_decodeSkippableHeader; + return 0; + } + } + + dctx->headerSize = ZSTD_frameHeaderSize_internal(src, srcSize, dctx->format); + if (ERR_isError(dctx->headerSize)) + return dctx->headerSize; + memcpy(dctx->headerBuffer, src, (uint)srcSize); + dctx->expected = dctx->headerSize - srcSize; + dctx->stage = ZSTD_dStage.ZSTDds_decodeFrameHeader; + return 0; + case ZSTD_dStage.ZSTDds_decodeFrameHeader: + assert(src != null); + memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, (uint)srcSize); + { + nuint err_code = ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + dctx->expected = ZSTD_blockHeaderSize; + dctx->stage = ZSTD_dStage.ZSTDds_decodeBlockHeader; + return 0; + case ZSTD_dStage.ZSTDds_decodeBlockHeader: + { + blockProperties_t bp; + nuint cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp); + if (ERR_isError(cBlockSize)) + return cBlockSize; + if (cBlockSize > dctx->fParams.blockSizeMax) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + dctx->expected = cBlockSize; + dctx->bType = bp.blockType; + dctx->rleSize = bp.origSize; + if (cBlockSize != 0) + { + dctx->stage = bp.lastBlock != 0 ? ZSTD_dStage.ZSTDds_decompressLastBlock : ZSTD_dStage.ZSTDds_decompressBlock; + return 0; + } + + if (bp.lastBlock != 0) + { + if (dctx->fParams.checksumFlag != 0) + { + dctx->expected = 4; + dctx->stage = ZSTD_dStage.ZSTDds_checkChecksum; + } + else + { + dctx->expected = 0; + dctx->stage = ZSTD_dStage.ZSTDds_getFrameHeaderSize; + } + } + else + { + dctx->expected = ZSTD_blockHeaderSize; + dctx->stage = ZSTD_dStage.ZSTDds_decodeBlockHeader; + } + + return 0; + } + + case ZSTD_dStage.ZSTDds_decompressLastBlock: + case ZSTD_dStage.ZSTDds_decompressBlock: + { + nuint rSize; + switch (dctx->bType) + { + case blockType_e.bt_compressed: + assert(dctx->isFrameDecompression == 1); + rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, streaming_operation.is_streaming); + dctx->expected = 0; + break; + case blockType_e.bt_raw: + assert(srcSize <= dctx->expected); + rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize); + { + nuint err_code = rSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + assert(rSize == srcSize); + dctx->expected -= rSize; + break; + case blockType_e.bt_rle: + rSize = ZSTD_setRleBlock(dst, dstCapacity, *(byte*)src, dctx->rleSize); + dctx->expected = 0; + break; + case blockType_e.bt_reserved: + default: + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + { + nuint err_code = rSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (rSize > dctx->fParams.blockSizeMax) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + dctx->decodedSize += rSize; + if (dctx->validateChecksum != 0) + ZSTD_XXH64_update(&dctx->xxhState, dst, rSize); + dctx->previousDstEnd = (sbyte*)dst + rSize; + if (dctx->expected > 0) + { + return rSize; + } + + if (dctx->stage == ZSTD_dStage.ZSTDds_decompressLastBlock) + { + if (dctx->fParams.frameContentSize != unchecked(0UL - 1) && dctx->decodedSize != dctx->fParams.frameContentSize) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + if (dctx->fParams.checksumFlag != 0) + { + dctx->expected = 4; + dctx->stage = ZSTD_dStage.ZSTDds_checkChecksum; + } + else + { + ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, 1); + dctx->expected = 0; + dctx->stage = ZSTD_dStage.ZSTDds_getFrameHeaderSize; + } + } + else + { + dctx->stage = ZSTD_dStage.ZSTDds_decodeBlockHeader; + dctx->expected = ZSTD_blockHeaderSize; + } + + return rSize; + } + + case ZSTD_dStage.ZSTDds_checkChecksum: + assert(srcSize == 4); + { + if (dctx->validateChecksum != 0) + { + uint h32 = (uint)ZSTD_XXH64_digest(&dctx->xxhState); + uint check32 = MEM_readLE32(src); + if (check32 != h32) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_checksum_wrong)); + } + } + + ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, 1); + dctx->expected = 0; + dctx->stage = ZSTD_dStage.ZSTDds_getFrameHeaderSize; + return 0; + } + + case ZSTD_dStage.ZSTDds_decodeSkippableHeader: + assert(src != null); + assert(srcSize <= 8); + assert(dctx->format != ZSTD_format_e.ZSTD_f_zstd1_magicless); + memcpy(dctx->headerBuffer + (8 - srcSize), src, (uint)srcSize); + dctx->expected = MEM_readLE32(dctx->headerBuffer + 4); + dctx->stage = ZSTD_dStage.ZSTDds_skipFrame; + return 0; + case ZSTD_dStage.ZSTDds_skipFrame: + dctx->expected = 0; + dctx->stage = ZSTD_dStage.ZSTDds_getFrameHeaderSize; + return 0; + default: + assert(0 != 0); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + } + } + + private static nuint ZSTD_refDictContent(ZSTD_DCtx_s* dctx, void* dict, nuint dictSize) + { + dctx->dictEnd = dctx->previousDstEnd; + dctx->virtualStart = (sbyte*)dict - ((sbyte*)dctx->previousDstEnd - (sbyte*)dctx->prefixStart); + dctx->prefixStart = dict; + dctx->previousDstEnd = (sbyte*)dict + dictSize; + return 0; + } + + /*! ZSTD_loadDEntropy() : + * dict : must point at beginning of a valid zstd dictionary. + * @return : size of entropy tables read */ + private static nuint ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy, void* dict, nuint dictSize) + { + byte* dictPtr = (byte*)dict; + byte* dictEnd = dictPtr + dictSize; + if (dictSize <= 8) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + + assert(MEM_readLE32(dict) == 0xEC30A437); + dictPtr += 8; + { + /* use fse tables as temporary workspace; implies fse tables are grouped together */ + void* workspace = &entropy->LLTable; + nuint workspaceSize = (nuint)(sizeof(ZSTD_seqSymbol) * 513 + sizeof(ZSTD_seqSymbol) * 257 + sizeof(ZSTD_seqSymbol) * 513); + nuint hSize = HUF_readDTableX2_wksp(entropy->hufTable, dictPtr, (nuint)(dictEnd - dictPtr), workspace, workspaceSize, 0); + if (ERR_isError(hSize)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + + dictPtr += hSize; + } + + { + short* offcodeNCount = stackalloc short[32]; + uint offcodeMaxValue = 31, offcodeLog; + nuint offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, (nuint)(dictEnd - dictPtr)); + if (ERR_isError(offcodeHeaderSize)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + + if (offcodeMaxValue > 31) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + + if (offcodeLog > 8) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + + ZSTD_buildFSETable(&entropy->OFTable.e0, offcodeNCount, offcodeMaxValue, OF_base, OF_bits, offcodeLog, entropy->workspace, sizeof(uint) * 157, 0); + dictPtr += offcodeHeaderSize; + } + + { + short* matchlengthNCount = stackalloc short[53]; + uint matchlengthMaxValue = 52, matchlengthLog; + nuint matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, (nuint)(dictEnd - dictPtr)); + if (ERR_isError(matchlengthHeaderSize)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + + if (matchlengthMaxValue > 52) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + + if (matchlengthLog > 9) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + + ZSTD_buildFSETable(&entropy->MLTable.e0, matchlengthNCount, matchlengthMaxValue, ML_base, ML_bits, matchlengthLog, entropy->workspace, sizeof(uint) * 157, 0); + dictPtr += matchlengthHeaderSize; + } + + { + short* litlengthNCount = stackalloc short[36]; + uint litlengthMaxValue = 35, litlengthLog; + nuint litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, (nuint)(dictEnd - dictPtr)); + if (ERR_isError(litlengthHeaderSize)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + + if (litlengthMaxValue > 35) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + + if (litlengthLog > 9) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + + ZSTD_buildFSETable(&entropy->LLTable.e0, litlengthNCount, litlengthMaxValue, LL_base, LL_bits, litlengthLog, entropy->workspace, sizeof(uint) * 157, 0); + dictPtr += litlengthHeaderSize; + } + + if (dictPtr + 12 > dictEnd) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + + { + int i; + nuint dictContentSize = (nuint)(dictEnd - (dictPtr + 12)); + for (i = 0; i < 3; i++) + { + uint rep = MEM_readLE32(dictPtr); + dictPtr += 4; + if (rep == 0 || rep > dictContentSize) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + + entropy->rep[i] = rep; + } + } + + return (nuint)(dictPtr - (byte*)dict); + } + + private static nuint ZSTD_decompress_insertDictionary(ZSTD_DCtx_s* dctx, void* dict, nuint dictSize) + { + if (dictSize < 8) + return ZSTD_refDictContent(dctx, dict, dictSize); + { + uint magic = MEM_readLE32(dict); + if (magic != 0xEC30A437) + { + return ZSTD_refDictContent(dctx, dict, dictSize); + } + } + + dctx->dictID = MEM_readLE32((sbyte*)dict + 4); + { + nuint eSize = ZSTD_loadDEntropy(&dctx->entropy, dict, dictSize); + if (ERR_isError(eSize)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + + dict = (sbyte*)dict + eSize; + dictSize -= eSize; + } + + dctx->litEntropy = dctx->fseEntropy = 1; + return ZSTD_refDictContent(dctx, dict, dictSize); + } + + public static nuint ZSTD_decompressBegin(ZSTD_DCtx_s* dctx) + { + assert(dctx != null); + dctx->expected = ZSTD_startingInputLength(dctx->format); + dctx->stage = ZSTD_dStage.ZSTDds_getFrameHeaderSize; + dctx->processedCSize = 0; + dctx->decodedSize = 0; + dctx->previousDstEnd = null; + dctx->prefixStart = null; + dctx->virtualStart = null; + dctx->dictEnd = null; + dctx->entropy.hufTable[0] = 12 * 0x1000001; + dctx->litEntropy = dctx->fseEntropy = 0; + dctx->dictID = 0; + dctx->bType = blockType_e.bt_reserved; + dctx->isFrameDecompression = 1; + memcpy(dctx->entropy.rep, repStartValue, sizeof(uint) * 3); + dctx->LLTptr = &dctx->entropy.LLTable.e0; + dctx->MLTptr = &dctx->entropy.MLTable.e0; + dctx->OFTptr = &dctx->entropy.OFTable.e0; + dctx->HUFptr = dctx->entropy.hufTable; + return 0; + } + + public static nuint ZSTD_decompressBegin_usingDict(ZSTD_DCtx_s* dctx, void* dict, nuint dictSize) + { + { + nuint err_code = ZSTD_decompressBegin(dctx); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (dict != null && dictSize != 0) + if (ERR_isError(ZSTD_decompress_insertDictionary(dctx, dict, dictSize))) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + + return 0; + } + + /* ====== ZSTD_DDict ====== */ + public static nuint ZSTD_decompressBegin_usingDDict(ZSTD_DCtx_s* dctx, ZSTD_DDict_s* ddict) + { + assert(dctx != null); + if (ddict != null) + { + sbyte* dictStart = (sbyte*)ZSTD_DDict_dictContent(ddict); + nuint dictSize = ZSTD_DDict_dictSize(ddict); + void* dictEnd = dictStart + dictSize; + dctx->ddictIsCold = dctx->dictEnd != dictEnd ? 1 : 0; + } + + { + nuint err_code = ZSTD_decompressBegin(dctx); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (ddict != null) + { + ZSTD_copyDDictParameters(dctx, ddict); + } + + return 0; + } + + /*! ZSTD_getDictID_fromDict() : + * Provides the dictID stored within dictionary. + * if @return == 0, the dictionary is not conformant with Zstandard specification. + * It can still be loaded, but as a content-only dictionary. */ + public static uint ZSTD_getDictID_fromDict(void* dict, nuint dictSize) + { + if (dictSize < 8) + return 0; + if (MEM_readLE32(dict) != 0xEC30A437) + return 0; + return MEM_readLE32((sbyte*)dict + 4); + } + + /*! ZSTD_getDictID_fromFrame() : + * Provides the dictID required to decompress frame stored within `src`. + * If @return == 0, the dictID could not be decoded. + * This could for one of the following reasons : + * - The frame does not require a dictionary (most common case). + * - The frame was built with dictID intentionally removed. + * Needed dictionary is a hidden piece of information. + * Note : this use case also happens when using a non-conformant dictionary. + * - `srcSize` is too small, and as a result, frame header could not be decoded. + * Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`. + * - This is not a Zstandard frame. + * When identifying the exact failure cause, it's possible to use + * ZSTD_getFrameHeader(), which will provide a more precise error code. */ + public static uint ZSTD_getDictID_fromFrame(void* src, nuint srcSize) + { + ZSTD_frameHeader zfp = new ZSTD_frameHeader + { + frameContentSize = 0, + windowSize = 0, + blockSizeMax = 0, + frameType = ZSTD_frameType_e.ZSTD_frame, + headerSize = 0, + dictID = 0, + checksumFlag = 0, + _reserved1 = 0, + _reserved2 = 0 + }; + nuint hError = ZSTD_getFrameHeader(&zfp, src, srcSize); + if (ERR_isError(hError)) + return 0; + return zfp.dictID; + } + + /*! ZSTD_decompress_usingDDict() : + * Decompression using a pre-digested Dictionary + * Use dictionary without significant overhead. */ + public static nuint ZSTD_decompress_usingDDict(ZSTD_DCtx_s* dctx, void* dst, nuint dstCapacity, void* src, nuint srcSize, ZSTD_DDict_s* ddict) + { + return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, null, 0, ddict); + } + + /*===================================== + * Streaming decompression + *====================================*/ + public static ZSTD_DCtx_s* ZSTD_createDStream() + { + return ZSTD_createDCtx_internal(ZSTD_defaultCMem); + } + + public static ZSTD_DCtx_s* ZSTD_initStaticDStream(void* workspace, nuint workspaceSize) + { + return ZSTD_initStaticDCtx(workspace, workspaceSize); + } + + public static ZSTD_DCtx_s* ZSTD_createDStream_advanced(ZSTD_customMem customMem) + { + return ZSTD_createDCtx_internal(customMem); + } + + public static nuint ZSTD_freeDStream(ZSTD_DCtx_s* zds) + { + return ZSTD_freeDCtx(zds); + } + + /* *** Initialization *** */ + public static nuint ZSTD_DStreamInSize() + { + return (nuint)(1 << 17) + ZSTD_blockHeaderSize; + } + + public static nuint ZSTD_DStreamOutSize() + { + return 1 << 17; + } + + /*! ZSTD_DCtx_loadDictionary_advanced() : + * Same as ZSTD_DCtx_loadDictionary(), + * but gives direct control over + * how to load the dictionary (by copy ? by reference ?) + * and how to interpret it (automatic ? force raw mode ? full mode only ?). */ + public static nuint ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx_s* dctx, void* dict, nuint dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType) + { + if (dctx->streamStage != ZSTD_dStreamStage.zdss_init) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); + } + + ZSTD_clearDict(dctx); + if (dict != null && dictSize != 0) + { + dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem); + if (dctx->ddictLocal == null) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } + + dctx->ddict = dctx->ddictLocal; + dctx->dictUses = ZSTD_dictUses_e.ZSTD_use_indefinitely; + } + + return 0; + } + + /*! ZSTD_DCtx_loadDictionary_byReference() : + * Same as ZSTD_DCtx_loadDictionary(), + * but references `dict` content instead of copying it into `dctx`. + * This saves memory if `dict` remains around., + * However, it's imperative that `dict` remains accessible (and unmodified) while being used, so it must outlive decompression. */ + public static nuint ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx_s* dctx, void* dict, nuint dictSize) + { + return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, ZSTD_dictContentType_e.ZSTD_dct_auto); + } + + /*! ZSTD_DCtx_loadDictionary() : Requires v1.4.0+ + * Create an internal DDict from dict buffer, to be used to decompress all future frames. + * The dictionary remains valid for all future frames, until explicitly invalidated, or + * a new dictionary is loaded. + * @result : 0, or an error code (which can be tested with ZSTD_isError()). + * Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary, + * meaning "return to no-dictionary mode". + * Note 1 : Loading a dictionary involves building tables, + * which has a non-negligible impact on CPU usage and latency. + * It's recommended to "load once, use many times", to amortize the cost + * Note 2 :`dict` content will be copied internally, so `dict` can be released after loading. + * Use ZSTD_DCtx_loadDictionary_byReference() to reference dictionary content instead. + * Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to take control of + * how dictionary content is loaded and interpreted. + */ + public static nuint ZSTD_DCtx_loadDictionary(ZSTD_DCtx_s* dctx, void* dict, nuint dictSize) + { + return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dictLoadMethod_e.ZSTD_dlm_byCopy, ZSTD_dictContentType_e.ZSTD_dct_auto); + } + + /*! ZSTD_DCtx_refPrefix_advanced() : + * Same as ZSTD_DCtx_refPrefix(), but gives finer control over + * how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */ + public static nuint ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx_s* dctx, void* prefix, nuint prefixSize, ZSTD_dictContentType_e dictContentType) + { + { + nuint err_code = ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, dictContentType); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + dctx->dictUses = ZSTD_dictUses_e.ZSTD_use_once; + return 0; + } + + /*! ZSTD_DCtx_refPrefix() : Requires v1.4.0+ + * Reference a prefix (single-usage dictionary) to decompress next frame. + * This is the reverse operation of ZSTD_CCtx_refPrefix(), + * and must use the same prefix as the one used during compression. + * Prefix is **only used once**. Reference is discarded at end of frame. + * End of frame is reached when ZSTD_decompressStream() returns 0. + * @result : 0, or an error code (which can be tested with ZSTD_isError()). + * Note 1 : Adding any prefix (including NULL) invalidates any previously set prefix or dictionary + * Note 2 : Prefix buffer is referenced. It **must** outlive decompression. + * Prefix buffer must remain unmodified up to the end of frame, + * reached when ZSTD_decompressStream() returns 0. + * Note 3 : By default, the prefix is treated as raw content (ZSTD_dct_rawContent). + * Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode (Experimental section) + * Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost. + * A full dictionary is more costly, as it requires building tables. + */ + public static nuint ZSTD_DCtx_refPrefix(ZSTD_DCtx_s* dctx, void* prefix, nuint prefixSize) + { + return ZSTD_DCtx_refPrefix_advanced(dctx, prefix, prefixSize, ZSTD_dictContentType_e.ZSTD_dct_rawContent); + } + + /* ZSTD_initDStream_usingDict() : + * return : expected size, aka ZSTD_startingInputLength(). + * this function cannot fail */ + public static nuint ZSTD_initDStream_usingDict(ZSTD_DCtx_s* zds, void* dict, nuint dictSize) + { + { + nuint err_code = ZSTD_DCtx_reset(zds, ZSTD_ResetDirective.ZSTD_reset_session_only); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + { + nuint err_code = ZSTD_DCtx_loadDictionary(zds, dict, dictSize); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + return ZSTD_startingInputLength(zds->format); + } + + /* note : this variant can't fail */ + public static nuint ZSTD_initDStream(ZSTD_DCtx_s* zds) + { + { + nuint err_code = ZSTD_DCtx_reset(zds, ZSTD_ResetDirective.ZSTD_reset_session_only); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + { + nuint err_code = ZSTD_DCtx_refDDict(zds, null); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + return ZSTD_startingInputLength(zds->format); + } + + /* ZSTD_initDStream_usingDDict() : + * ddict will just be referenced, and must outlive decompression session + * this function cannot fail */ + public static nuint ZSTD_initDStream_usingDDict(ZSTD_DCtx_s* dctx, ZSTD_DDict_s* ddict) + { + { + nuint err_code = ZSTD_DCtx_reset(dctx, ZSTD_ResetDirective.ZSTD_reset_session_only); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + { + nuint err_code = ZSTD_DCtx_refDDict(dctx, ddict); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + return ZSTD_startingInputLength(dctx->format); + } + + /* ZSTD_resetDStream() : + * return : expected size, aka ZSTD_startingInputLength(). + * this function cannot fail */ + public static nuint ZSTD_resetDStream(ZSTD_DCtx_s* dctx) + { + { + nuint err_code = ZSTD_DCtx_reset(dctx, ZSTD_ResetDirective.ZSTD_reset_session_only); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + return ZSTD_startingInputLength(dctx->format); + } + + /*! ZSTD_DCtx_refDDict() : Requires v1.4.0+ + * Reference a prepared dictionary, to be used to decompress next frames. + * The dictionary remains active for decompression of future frames using same DCtx. + * + * If called with ZSTD_d_refMultipleDDicts enabled, repeated calls of this function + * will store the DDict references in a table, and the DDict used for decompression + * will be determined at decompression time, as per the dict ID in the frame. + * The memory for the table is allocated on the first call to refDDict, and can be + * freed with ZSTD_freeDCtx(). + * + * If called with ZSTD_d_refMultipleDDicts disabled (the default), only one dictionary + * will be managed, and referencing a dictionary effectively "discards" any previous one. + * + * @result : 0, or an error code (which can be tested with ZSTD_isError()). + * Special: referencing a NULL DDict means "return to no-dictionary mode". + * Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx. + */ + public static nuint ZSTD_DCtx_refDDict(ZSTD_DCtx_s* dctx, ZSTD_DDict_s* ddict) + { + if (dctx->streamStage != ZSTD_dStreamStage.zdss_init) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); + } + + ZSTD_clearDict(dctx); + if (ddict != null) + { + dctx->ddict = ddict; + dctx->dictUses = ZSTD_dictUses_e.ZSTD_use_indefinitely; + if (dctx->refMultipleDDicts == ZSTD_refMultipleDDicts_e.ZSTD_rmd_refMultipleDDicts) + { + if (dctx->ddictSet == null) + { + dctx->ddictSet = ZSTD_createDDictHashSet(dctx->customMem); + if (dctx->ddictSet == null) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } + } + + assert(dctx->staticSize == 0); + { + nuint err_code = ZSTD_DDictHashSet_addDDict(dctx->ddictSet, ddict, dctx->customMem); + if (ERR_isError(err_code)) + { + return err_code; + } + } + } + } + + return 0; + } + + /* ZSTD_DCtx_setMaxWindowSize() : + * note : no direct equivalence in ZSTD_DCtx_setParameter, + * since this version sets windowSize, and the other sets windowLog */ + public static nuint ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx_s* dctx, nuint maxWindowSize) + { + ZSTD_bounds bounds = ZSTD_dParam_getBounds(ZSTD_dParameter.ZSTD_d_windowLogMax); + nuint min = (nuint)1 << bounds.lowerBound; + nuint max = (nuint)1 << bounds.upperBound; + if (dctx->streamStage != ZSTD_dStreamStage.zdss_init) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); + } + + if (maxWindowSize < min) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + if (maxWindowSize > max) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + dctx->maxWindowSize = maxWindowSize; + return 0; + } + + /*! ZSTD_DCtx_setFormat() : + * This function is REDUNDANT. Prefer ZSTD_DCtx_setParameter(). + * Instruct the decoder context about what kind of data to decode next. + * This instruction is mandatory to decode data without a fully-formed header, + * such ZSTD_f_zstd1_magicless for example. + * @return : 0, or an error code (which can be tested using ZSTD_isError()). */ + public static nuint ZSTD_DCtx_setFormat(ZSTD_DCtx_s* dctx, ZSTD_format_e format) + { + return ZSTD_DCtx_setParameter(dctx, ZSTD_dParameter.ZSTD_d_experimentalParam1, (int)format); + } + + /*! ZSTD_dParam_getBounds() : + * All parameters must belong to an interval with lower and upper bounds, + * otherwise they will either trigger an error or be automatically clamped. + * @return : a structure, ZSTD_bounds, which contains + * - an error status field, which must be tested using ZSTD_isError() + * - both lower and upper bounds, inclusive + */ + public static ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam) + { + ZSTD_bounds bounds = new ZSTD_bounds + { + error = 0, + lowerBound = 0, + upperBound = 0 + }; + switch (dParam) + { + case ZSTD_dParameter.ZSTD_d_windowLogMax: + bounds.lowerBound = 10; + bounds.upperBound = sizeof(nuint) == 4 ? 30 : 31; + return bounds; + case ZSTD_dParameter.ZSTD_d_experimentalParam1: + bounds.lowerBound = (int)ZSTD_format_e.ZSTD_f_zstd1; + bounds.upperBound = (int)ZSTD_format_e.ZSTD_f_zstd1_magicless; + return bounds; + case ZSTD_dParameter.ZSTD_d_experimentalParam2: + bounds.lowerBound = (int)ZSTD_bufferMode_e.ZSTD_bm_buffered; + bounds.upperBound = (int)ZSTD_bufferMode_e.ZSTD_bm_stable; + return bounds; + case ZSTD_dParameter.ZSTD_d_experimentalParam3: + bounds.lowerBound = (int)ZSTD_forceIgnoreChecksum_e.ZSTD_d_validateChecksum; + bounds.upperBound = (int)ZSTD_forceIgnoreChecksum_e.ZSTD_d_ignoreChecksum; + return bounds; + case ZSTD_dParameter.ZSTD_d_experimentalParam4: + bounds.lowerBound = (int)ZSTD_refMultipleDDicts_e.ZSTD_rmd_refSingleDDict; + bounds.upperBound = (int)ZSTD_refMultipleDDicts_e.ZSTD_rmd_refMultipleDDicts; + return bounds; + case ZSTD_dParameter.ZSTD_d_experimentalParam5: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + case ZSTD_dParameter.ZSTD_d_experimentalParam6: + bounds.lowerBound = 1 << 10; + bounds.upperBound = 1 << 17; + return bounds; + default: + break; + } + + bounds.error = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported)); + return bounds; + } + + /* ZSTD_dParam_withinBounds: + * @return 1 if value is within dParam bounds, + * 0 otherwise */ + private static int ZSTD_dParam_withinBounds(ZSTD_dParameter dParam, int value) + { + ZSTD_bounds bounds = ZSTD_dParam_getBounds(dParam); + if (ERR_isError(bounds.error)) + return 0; + if (value < bounds.lowerBound) + return 0; + if (value > bounds.upperBound) + return 0; + return 1; + } + + /*! ZSTD_DCtx_getParameter() : + * Get the requested decompression parameter value, selected by enum ZSTD_dParameter, + * and store it into int* value. + * @return : 0, or an error code (which can be tested with ZSTD_isError()). + */ + public static nuint ZSTD_DCtx_getParameter(ZSTD_DCtx_s* dctx, ZSTD_dParameter param, int* value) + { + switch (param) + { + case ZSTD_dParameter.ZSTD_d_windowLogMax: + *value = (int)ZSTD_highbit32((uint)dctx->maxWindowSize); + return 0; + case ZSTD_dParameter.ZSTD_d_experimentalParam1: + *value = (int)dctx->format; + return 0; + case ZSTD_dParameter.ZSTD_d_experimentalParam2: + *value = (int)dctx->outBufferMode; + return 0; + case ZSTD_dParameter.ZSTD_d_experimentalParam3: + *value = (int)dctx->forceIgnoreChecksum; + return 0; + case ZSTD_dParameter.ZSTD_d_experimentalParam4: + *value = (int)dctx->refMultipleDDicts; + return 0; + case ZSTD_dParameter.ZSTD_d_experimentalParam5: + *value = dctx->disableHufAsm; + return 0; + case ZSTD_dParameter.ZSTD_d_experimentalParam6: + *value = dctx->maxBlockSizeParam; + return 0; + default: + break; + } + + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported)); + } + + /*! ZSTD_DCtx_setParameter() : + * Set one compression parameter, selected by enum ZSTD_dParameter. + * All parameters have valid bounds. Bounds can be queried using ZSTD_dParam_getBounds(). + * Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter). + * Setting a parameter is only possible during frame initialization (before starting decompression). + * @return : 0, or an error code (which can be tested using ZSTD_isError()). + */ + public static nuint ZSTD_DCtx_setParameter(ZSTD_DCtx_s* dctx, ZSTD_dParameter dParam, int value) + { + if (dctx->streamStage != ZSTD_dStreamStage.zdss_init) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); + } + + switch (dParam) + { + case ZSTD_dParameter.ZSTD_d_windowLogMax: + if (value == 0) + value = 27; + { + if (ZSTD_dParam_withinBounds(ZSTD_dParameter.ZSTD_d_windowLogMax, value) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + } + + dctx->maxWindowSize = (nuint)1 << value; + return 0; + case ZSTD_dParameter.ZSTD_d_experimentalParam1: + { + if (ZSTD_dParam_withinBounds(ZSTD_dParameter.ZSTD_d_experimentalParam1, value) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + } + + dctx->format = (ZSTD_format_e)value; + return 0; + case ZSTD_dParameter.ZSTD_d_experimentalParam2: + { + if (ZSTD_dParam_withinBounds(ZSTD_dParameter.ZSTD_d_experimentalParam2, value) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + } + + dctx->outBufferMode = (ZSTD_bufferMode_e)value; + return 0; + case ZSTD_dParameter.ZSTD_d_experimentalParam3: + { + if (ZSTD_dParam_withinBounds(ZSTD_dParameter.ZSTD_d_experimentalParam3, value) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + } + + dctx->forceIgnoreChecksum = (ZSTD_forceIgnoreChecksum_e)value; + return 0; + case ZSTD_dParameter.ZSTD_d_experimentalParam4: + { + if (ZSTD_dParam_withinBounds(ZSTD_dParameter.ZSTD_d_experimentalParam4, value) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + } + + if (dctx->staticSize != 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported)); + } + + dctx->refMultipleDDicts = (ZSTD_refMultipleDDicts_e)value; + return 0; + case ZSTD_dParameter.ZSTD_d_experimentalParam5: + { + if (ZSTD_dParam_withinBounds(ZSTD_dParameter.ZSTD_d_experimentalParam5, value) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + } + + dctx->disableHufAsm = value != 0 ? 1 : 0; + return 0; + case ZSTD_dParameter.ZSTD_d_experimentalParam6: + if (value != 0) + { + if (ZSTD_dParam_withinBounds(ZSTD_dParameter.ZSTD_d_experimentalParam6, value) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + } + + dctx->maxBlockSizeParam = value; + return 0; + default: + break; + } + + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported)); + } + + /*! ZSTD_DCtx_reset() : + * Return a DCtx to clean state. + * Session and parameters can be reset jointly or separately. + * Parameters can only be reset when no active frame is being decompressed. + * @return : 0, or an error code, which can be tested with ZSTD_isError() + */ + public static nuint ZSTD_DCtx_reset(ZSTD_DCtx_s* dctx, ZSTD_ResetDirective reset) + { + if (reset == ZSTD_ResetDirective.ZSTD_reset_session_only || reset == ZSTD_ResetDirective.ZSTD_reset_session_and_parameters) + { + dctx->streamStage = ZSTD_dStreamStage.zdss_init; + dctx->noForwardProgress = 0; + dctx->isFrameDecompression = 1; + } + + if (reset == ZSTD_ResetDirective.ZSTD_reset_parameters || reset == ZSTD_ResetDirective.ZSTD_reset_session_and_parameters) + { + if (dctx->streamStage != ZSTD_dStreamStage.zdss_init) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); + } + + ZSTD_clearDict(dctx); + ZSTD_DCtx_resetParameters(dctx); + } + + return 0; + } + + public static nuint ZSTD_sizeof_DStream(ZSTD_DCtx_s* dctx) + { + return ZSTD_sizeof_DCtx(dctx); + } + + private static nuint ZSTD_decodingBufferSize_internal(ulong windowSize, ulong frameContentSize, nuint blockSizeMax) + { + nuint blockSize = (nuint)(windowSize < 1 << 17 ? windowSize : 1 << 17) < blockSizeMax ? (nuint)(windowSize < 1 << 17 ? windowSize : 1 << 17) : blockSizeMax; + /* We need blockSize + WILDCOPY_OVERLENGTH worth of buffer so that if a block + * ends at windowSize + WILDCOPY_OVERLENGTH + 1 bytes, we can start writing + * the block at the beginning of the output buffer, and maintain a full window. + * + * We need another blockSize worth of buffer so that we can store split + * literals at the end of the block without overwriting the extDict window. + */ + ulong neededRBSize = windowSize + blockSize * 2 + 32 * 2; + ulong neededSize = frameContentSize < neededRBSize ? frameContentSize : neededRBSize; + nuint minRBSize = (nuint)neededSize; + if (minRBSize != neededSize) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_windowTooLarge)); + } + + return minRBSize; + } + + /*===== Buffer-less streaming decompression functions =====*/ + public static nuint ZSTD_decodingBufferSize_min(ulong windowSize, ulong frameContentSize) + { + return ZSTD_decodingBufferSize_internal(windowSize, frameContentSize, 1 << 17); + } + + public static nuint ZSTD_estimateDStreamSize(nuint windowSize) + { + nuint blockSize = windowSize < 1 << 17 ? windowSize : 1 << 17; + /* no block can be larger */ + nuint inBuffSize = blockSize; + nuint outBuffSize = ZSTD_decodingBufferSize_min(windowSize, unchecked(0UL - 1)); + return ZSTD_estimateDCtxSize() + inBuffSize + outBuffSize; + } + + public static nuint ZSTD_estimateDStreamSize_fromFrame(void* src, nuint srcSize) + { + /* note : should be user-selectable, but requires an additional parameter (or a dctx) */ + uint windowSizeMax = 1U << (sizeof(nuint) == 4 ? 30 : 31); + ZSTD_frameHeader zfh; + nuint err = ZSTD_getFrameHeader(&zfh, src, srcSize); + if (ERR_isError(err)) + return err; + if (err > 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + if (zfh.windowSize > windowSizeMax) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_windowTooLarge)); + } + + return ZSTD_estimateDStreamSize((nuint)zfh.windowSize); + } + + /* ***** Decompression ***** */ + private static int ZSTD_DCtx_isOverflow(ZSTD_DCtx_s* zds, nuint neededInBuffSize, nuint neededOutBuffSize) + { + return zds->inBuffSize + zds->outBuffSize >= (neededInBuffSize + neededOutBuffSize) * 3 ? 1 : 0; + } + + private static void ZSTD_DCtx_updateOversizedDuration(ZSTD_DCtx_s* zds, nuint neededInBuffSize, nuint neededOutBuffSize) + { + if (ZSTD_DCtx_isOverflow(zds, neededInBuffSize, neededOutBuffSize) != 0) + zds->oversizedDuration++; + else + zds->oversizedDuration = 0; + } + + private static int ZSTD_DCtx_isOversizedTooLong(ZSTD_DCtx_s* zds) + { + return zds->oversizedDuration >= 128 ? 1 : 0; + } + + /* Checks that the output buffer hasn't changed if ZSTD_obm_stable is used. */ + private static nuint ZSTD_checkOutBuffer(ZSTD_DCtx_s* zds, ZSTD_outBuffer_s* output) + { + ZSTD_outBuffer_s expect = zds->expectedOutBuffer; + if (zds->outBufferMode != ZSTD_bufferMode_e.ZSTD_bm_stable) + return 0; + if (zds->streamStage == ZSTD_dStreamStage.zdss_init) + return 0; + if (expect.dst == output->dst && expect.pos == output->pos && expect.size == output->size) + return 0; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstBuffer_wrong)); + } + + /* Calls ZSTD_decompressContinue() with the right parameters for ZSTD_decompressStream() + * and updates the stage and the output buffer state. This call is extracted so it can be + * used both when reading directly from the ZSTD_inBuffer, and in buffered input mode. + * NOTE: You must break after calling this function since the streamStage is modified. + */ + private static nuint ZSTD_decompressContinueStream(ZSTD_DCtx_s* zds, sbyte** op, sbyte* oend, void* src, nuint srcSize) + { + int isSkipFrame = ZSTD_isSkipFrame(zds); + if (zds->outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered) + { + nuint dstSize = isSkipFrame != 0 ? 0 : zds->outBuffSize - zds->outStart; + nuint decodedSize = ZSTD_decompressContinue(zds, zds->outBuff + zds->outStart, dstSize, src, srcSize); + { + nuint err_code = decodedSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (decodedSize == 0 && isSkipFrame == 0) + { + zds->streamStage = ZSTD_dStreamStage.zdss_read; + } + else + { + zds->outEnd = zds->outStart + decodedSize; + zds->streamStage = ZSTD_dStreamStage.zdss_flush; + } + } + else + { + /* Write directly into the output buffer */ + nuint dstSize = isSkipFrame != 0 ? 0 : (nuint)(oend - *op); + nuint decodedSize = ZSTD_decompressContinue(zds, *op, dstSize, src, srcSize); + { + nuint err_code = decodedSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + *op += decodedSize; + zds->streamStage = ZSTD_dStreamStage.zdss_read; + assert(*op <= oend); + assert(zds->outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable); + } + + return 0; + } + + /*! ZSTD_decompressStream() : + * Streaming decompression function. + * Call repetitively to consume full input updating it as necessary. + * Function will update both input and output `pos` fields exposing current state via these fields: + * - `input.pos < input.size`, some input remaining and caller should provide remaining input + * on the next call. + * - `output.pos < output.size`, decoder flushed internal output buffer. + * - `output.pos == output.size`, unflushed data potentially present in the internal buffers, + * check ZSTD_decompressStream() @return value, + * if > 0, invoke it again to flush remaining data to output. + * Note : with no additional input, amount of data flushed <= ZSTD_BLOCKSIZE_MAX. + * + * @return : 0 when a frame is completely decoded and fully flushed, + * or an error code, which can be tested using ZSTD_isError(), + * or any other value > 0, which means there is some decoding or flushing to do to complete current frame. + * + * Note: when an operation returns with an error code, the @zds state may be left in undefined state. + * It's UB to invoke `ZSTD_decompressStream()` on such a state. + * In order to re-use such a state, it must be first reset, + * which can be done explicitly (`ZSTD_DCtx_reset()`), + * or is implied for operations starting some new decompression job (`ZSTD_initDStream`, `ZSTD_decompressDCtx()`, `ZSTD_decompress_usingDict()`) + */ + public static nuint ZSTD_decompressStream(ZSTD_DCtx_s* zds, ZSTD_outBuffer_s* output, ZSTD_inBuffer_s* input) + { + sbyte* src = (sbyte*)input->src; + sbyte* istart = input->pos != 0 ? src + input->pos : src; + sbyte* iend = input->size != 0 ? src + input->size : src; + sbyte* ip = istart; + sbyte* dst = (sbyte*)output->dst; + sbyte* ostart = output->pos != 0 ? dst + output->pos : dst; + sbyte* oend = output->size != 0 ? dst + output->size : dst; + sbyte* op = ostart; + uint someMoreWork = 1; + assert(zds != null); + if (input->pos > input->size) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + if (output->pos > output->size) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + { + nuint err_code = ZSTD_checkOutBuffer(zds, output); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + while (someMoreWork != 0) + { + switch (zds->streamStage) + { + case ZSTD_dStreamStage.zdss_init: + zds->streamStage = ZSTD_dStreamStage.zdss_loadHeader; + zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0; + zds->hostageByte = 0; + zds->expectedOutBuffer = *output; + goto case ZSTD_dStreamStage.zdss_loadHeader; + case ZSTD_dStreamStage.zdss_loadHeader: + { + nuint hSize = ZSTD_getFrameHeader_advanced(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format); + if (zds->refMultipleDDicts != default && zds->ddictSet != null) + { + ZSTD_DCtx_selectFrameDDict(zds); + } + + if (ERR_isError(hSize)) + { + return hSize; + } + + if (hSize != 0) + { + /* if hSize!=0, hSize > zds->lhSize */ + nuint toLoad = hSize - zds->lhSize; + nuint remainingInput = (nuint)(iend - ip); + assert(iend >= ip); + if (toLoad > remainingInput) + { + if (remainingInput > 0) + { + memcpy(zds->headerBuffer + zds->lhSize, ip, (uint)remainingInput); + zds->lhSize += remainingInput; + } + + input->pos = input->size; + { + /* check first few bytes */ + nuint err_code = ZSTD_getFrameHeader_advanced(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + return ((nuint)(zds->format == ZSTD_format_e.ZSTD_f_zstd1 ? 6 : 2) > hSize ? (nuint)(zds->format == ZSTD_format_e.ZSTD_f_zstd1 ? 6 : 2) : hSize) - zds->lhSize + ZSTD_blockHeaderSize; + } + + assert(ip != null); + memcpy(zds->headerBuffer + zds->lhSize, ip, (uint)toLoad); + zds->lhSize = hSize; + ip += toLoad; + break; + } + } + + if (zds->fParams.frameContentSize != unchecked(0UL - 1) && zds->fParams.frameType != ZSTD_frameType_e.ZSTD_skippableFrame && (nuint)(oend - op) >= zds->fParams.frameContentSize) + { + nuint cSize = ZSTD_findFrameCompressedSize_advanced(istart, (nuint)(iend - istart), zds->format); + if (cSize <= (nuint)(iend - istart)) + { + /* shortcut : using single-pass mode */ + nuint decompressedSize = ZSTD_decompress_usingDDict(zds, op, (nuint)(oend - op), istart, cSize, ZSTD_getDDict(zds)); + if (ERR_isError(decompressedSize)) + return decompressedSize; + assert(istart != null); + ip = istart + cSize; + op = op != null ? op + decompressedSize : op; + zds->expected = 0; + zds->streamStage = ZSTD_dStreamStage.zdss_init; + someMoreWork = 0; + break; + } + } + + if (zds->outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable && zds->fParams.frameType != ZSTD_frameType_e.ZSTD_skippableFrame && zds->fParams.frameContentSize != unchecked(0UL - 1) && (nuint)(oend - op) < zds->fParams.frameContentSize) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + { + nuint err_code = ZSTD_decompressBegin_usingDDict(zds, ZSTD_getDDict(zds)); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (zds->format == ZSTD_format_e.ZSTD_f_zstd1 && (MEM_readLE32(zds->headerBuffer) & 0xFFFFFFF0) == 0x184D2A50) + { + zds->expected = MEM_readLE32(zds->headerBuffer + 4); + zds->stage = ZSTD_dStage.ZSTDds_skipFrame; + } + else + { + { + nuint err_code = ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + zds->expected = ZSTD_blockHeaderSize; + zds->stage = ZSTD_dStage.ZSTDds_decodeBlockHeader; + } + + zds->fParams.windowSize = zds->fParams.windowSize > 1U << 10 ? zds->fParams.windowSize : 1U << 10; + if (zds->fParams.windowSize > zds->maxWindowSize) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_windowTooLarge)); + } + + if (zds->maxBlockSizeParam != 0) + zds->fParams.blockSizeMax = zds->fParams.blockSizeMax < (uint)zds->maxBlockSizeParam ? zds->fParams.blockSizeMax : (uint)zds->maxBlockSizeParam; + { + /* frame checksum */ + nuint neededInBuffSize = zds->fParams.blockSizeMax > 4 ? zds->fParams.blockSizeMax : 4; + nuint neededOutBuffSize = zds->outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered ? ZSTD_decodingBufferSize_internal(zds->fParams.windowSize, zds->fParams.frameContentSize, zds->fParams.blockSizeMax) : 0; + ZSTD_DCtx_updateOversizedDuration(zds, neededInBuffSize, neededOutBuffSize); + { + int tooSmall = zds->inBuffSize < neededInBuffSize || zds->outBuffSize < neededOutBuffSize ? 1 : 0; + int tooLarge = ZSTD_DCtx_isOversizedTooLong(zds); + if (tooSmall != 0 || tooLarge != 0) + { + nuint bufferSize = neededInBuffSize + neededOutBuffSize; + if (zds->staticSize != 0) + { + assert(zds->staticSize >= (nuint)sizeof(ZSTD_DCtx_s)); + if (bufferSize > zds->staticSize - (nuint)sizeof(ZSTD_DCtx_s)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } + } + else + { + ZSTD_customFree(zds->inBuff, zds->customMem); + zds->inBuffSize = 0; + zds->outBuffSize = 0; + zds->inBuff = (sbyte*)ZSTD_customMalloc(bufferSize, zds->customMem); + if (zds->inBuff == null) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } + } + + zds->inBuffSize = neededInBuffSize; + zds->outBuff = zds->inBuff + zds->inBuffSize; + zds->outBuffSize = neededOutBuffSize; + } + } + } + + zds->streamStage = ZSTD_dStreamStage.zdss_read; + goto case ZSTD_dStreamStage.zdss_read; + case ZSTD_dStreamStage.zdss_read: + { + nuint neededInSize = ZSTD_nextSrcSizeToDecompressWithInputSize(zds, (nuint)(iend - ip)); + if (neededInSize == 0) + { + zds->streamStage = ZSTD_dStreamStage.zdss_init; + someMoreWork = 0; + break; + } + + if ((nuint)(iend - ip) >= neededInSize) + { + { + nuint err_code = ZSTD_decompressContinueStream(zds, &op, oend, ip, neededInSize); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + assert(ip != null); + ip += neededInSize; + break; + } + } + + if (ip == iend) + { + someMoreWork = 0; + break; + } + + zds->streamStage = ZSTD_dStreamStage.zdss_load; + goto case ZSTD_dStreamStage.zdss_load; + case ZSTD_dStreamStage.zdss_load: + { + nuint neededInSize = ZSTD_nextSrcSizeToDecompress(zds); + nuint toLoad = neededInSize - zds->inPos; + int isSkipFrame = ZSTD_isSkipFrame(zds); + nuint loadedSize; + assert(neededInSize == ZSTD_nextSrcSizeToDecompressWithInputSize(zds, (nuint)(iend - ip))); + if (isSkipFrame != 0) + { + loadedSize = toLoad < (nuint)(iend - ip) ? toLoad : (nuint)(iend - ip); + } + else + { + if (toLoad > zds->inBuffSize - zds->inPos) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, (nuint)(iend - ip)); + } + + if (loadedSize != 0) + { + ip += loadedSize; + zds->inPos += loadedSize; + } + + if (loadedSize < toLoad) + { + someMoreWork = 0; + break; + } + + zds->inPos = 0; + { + nuint err_code = ZSTD_decompressContinueStream(zds, &op, oend, zds->inBuff, neededInSize); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + break; + } + + case ZSTD_dStreamStage.zdss_flush: + { + nuint toFlushSize = zds->outEnd - zds->outStart; + nuint flushedSize = ZSTD_limitCopy(op, (nuint)(oend - op), zds->outBuff + zds->outStart, toFlushSize); + op = op != null ? op + flushedSize : op; + zds->outStart += flushedSize; + if (flushedSize == toFlushSize) + { + zds->streamStage = ZSTD_dStreamStage.zdss_read; + if (zds->outBuffSize < zds->fParams.frameContentSize && zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) + { + zds->outStart = zds->outEnd = 0; + } + + break; + } + } + + someMoreWork = 0; + break; + default: + assert(0 != 0); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + } + } + + input->pos = (nuint)(ip - (sbyte*)input->src); + output->pos = (nuint)(op - (sbyte*)output->dst); + zds->expectedOutBuffer = *output; + if (ip == istart && op == ostart) + { + zds->noForwardProgress++; + if (zds->noForwardProgress >= 16) + { + if (op == oend) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_noForwardProgress_destFull)); + } + + if (ip == iend) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_noForwardProgress_inputEmpty)); + } + + assert(0 != 0); + } + } + else + { + zds->noForwardProgress = 0; + } + + { + nuint nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds); + if (nextSrcSizeHint == 0) + { + if (zds->outEnd == zds->outStart) + { + if (zds->hostageByte != 0) + { + if (input->pos >= input->size) + { + zds->streamStage = ZSTD_dStreamStage.zdss_read; + return 1; + } + + input->pos++; + } + + return 0; + } + + if (zds->hostageByte == 0) + { + input->pos--; + zds->hostageByte = 1; + } + + return 1; + } + + nextSrcSizeHint += ZSTD_blockHeaderSize * (nuint)(ZSTD_nextInputType(zds) == ZSTD_nextInputType_e.ZSTDnit_block ? 1 : 0); + assert(zds->inPos <= nextSrcSizeHint); + nextSrcSizeHint -= zds->inPos; + return nextSrcSizeHint; + } + } + + /*! ZSTD_decompressStream_simpleArgs() : + * Same as ZSTD_decompressStream(), + * but using only integral types as arguments. + * This can be helpful for binders from dynamic languages + * which have troubles handling structures containing memory pointers. + */ + public static nuint ZSTD_decompressStream_simpleArgs(ZSTD_DCtx_s* dctx, void* dst, nuint dstCapacity, nuint* dstPos, void* src, nuint srcSize, nuint* srcPos) + { + ZSTD_outBuffer_s output; + ZSTD_inBuffer_s input; + output.dst = dst; + output.size = dstCapacity; + output.pos = *dstPos; + input.src = src; + input.size = srcSize; + input.pos = *srcPos; + { + nuint cErr = ZSTD_decompressStream(dctx, &output, &input); + *dstPos = output.pos; + *srcPos = input.pos; + return cErr; + } + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompressBlock.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompressBlock.cs new file mode 100644 index 000000000..a3ef9e251 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompressBlock.cs @@ -0,0 +1,2138 @@ +using static ZstdSharp.UnsafeHelper; +using System.Runtime.CompilerServices; +using System; +using System.Runtime.InteropServices; + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + /*_******************************************************* + * Memory operations + **********************************************************/ + private static void ZSTD_copy4(void* dst, void* src) + { + memcpy(dst, src, 4); + } + + /*-************************************************************* + * Block decoding + ***************************************************************/ + private static nuint ZSTD_blockSizeMax(ZSTD_DCtx_s* dctx) + { + nuint blockSizeMax = dctx->isFrameDecompression != 0 ? dctx->fParams.blockSizeMax : 1 << 17; + assert(blockSizeMax <= 1 << 17); + return blockSizeMax; + } + + /*! ZSTD_getcBlockSize() : + * Provides the size of compressed block from block header `src` */ + private static nuint ZSTD_getcBlockSize(void* src, nuint srcSize, blockProperties_t* bpPtr) + { + if (srcSize < ZSTD_blockHeaderSize) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + { + uint cBlockHeader = MEM_readLE24(src); + uint cSize = cBlockHeader >> 3; + bpPtr->lastBlock = cBlockHeader & 1; + bpPtr->blockType = (blockType_e)(cBlockHeader >> 1 & 3); + bpPtr->origSize = cSize; + if (bpPtr->blockType == blockType_e.bt_rle) + return 1; + if (bpPtr->blockType == blockType_e.bt_reserved) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + return cSize; + } + } + + /* Allocate buffer for literals, either overlapping current dst, or split between dst and litExtraBuffer, or stored entirely within litExtraBuffer */ + private static void ZSTD_allocateLiteralsBuffer(ZSTD_DCtx_s* dctx, void* dst, nuint dstCapacity, nuint litSize, streaming_operation streaming, nuint expectedWriteSize, uint splitImmediately) + { + nuint blockSizeMax = ZSTD_blockSizeMax(dctx); + assert(litSize <= blockSizeMax); + assert(dctx->isFrameDecompression != 0 || streaming == streaming_operation.not_streaming); + assert(expectedWriteSize <= blockSizeMax); + if (streaming == streaming_operation.not_streaming && dstCapacity > blockSizeMax + 32 + litSize + 32) + { + dctx->litBuffer = (byte*)dst + blockSizeMax + 32; + dctx->litBufferEnd = dctx->litBuffer + litSize; + dctx->litBufferLocation = ZSTD_litLocation_e.ZSTD_in_dst; + } + else if (litSize <= 1 << 16) + { + dctx->litBuffer = dctx->litExtraBuffer; + dctx->litBufferEnd = dctx->litBuffer + litSize; + dctx->litBufferLocation = ZSTD_litLocation_e.ZSTD_not_in_dst; + } + else + { + assert(blockSizeMax > 1 << 16); + if (splitImmediately != 0) + { + dctx->litBuffer = (byte*)dst + expectedWriteSize - litSize + (1 << 16) - 32; + dctx->litBufferEnd = dctx->litBuffer + litSize - (1 << 16); + } + else + { + dctx->litBuffer = (byte*)dst + expectedWriteSize - litSize; + dctx->litBufferEnd = (byte*)dst + expectedWriteSize; + } + + dctx->litBufferLocation = ZSTD_litLocation_e.ZSTD_split; + assert(dctx->litBufferEnd <= (byte*)dst + expectedWriteSize); + } + } + + /*! ZSTD_decodeLiteralsBlock() : + * Where it is possible to do so without being stomped by the output during decompression, the literals block will be stored + * in the dstBuffer. If there is room to do so, it will be stored in full in the excess dst space after where the current + * block will be output. Otherwise it will be stored at the end of the current dst blockspace, with a small portion being + * stored in dctx->litExtraBuffer to help keep it "ahead" of the current output write. + * + * @return : nb of bytes read from src (< srcSize ) + * note : symbol not declared but exposed for fullbench */ + private static nuint ZSTD_decodeLiteralsBlock(ZSTD_DCtx_s* dctx, void* src, nuint srcSize, void* dst, nuint dstCapacity, streaming_operation streaming) + { + if (srcSize < 1 + 1) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + { + byte* istart = (byte*)src; + SymbolEncodingType_e litEncType = (SymbolEncodingType_e)(istart[0] & 3); + nuint blockSizeMax = ZSTD_blockSizeMax(dctx); + switch (litEncType) + { + case SymbolEncodingType_e.set_repeat: + if (dctx->litEntropy == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + + goto case SymbolEncodingType_e.set_compressed; + case SymbolEncodingType_e.set_compressed: + if (srcSize < 5) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + { + nuint lhSize, litSize, litCSize; + uint singleStream = 0; + uint lhlCode = (uint)(istart[0] >> 2 & 3); + uint lhc = MEM_readLE32(istart); + nuint hufSuccess; + nuint expectedWriteSize = blockSizeMax < dstCapacity ? blockSizeMax : dstCapacity; + int flags = 0 | (ZSTD_DCtx_get_bmi2(dctx) != 0 ? (int)HUF_flags_e.HUF_flags_bmi2 : 0) | (dctx->disableHufAsm != 0 ? (int)HUF_flags_e.HUF_flags_disableAsm : 0); + switch (lhlCode) + { + case 0: + case 1: + default: + singleStream = lhlCode == 0 ? 1U : 0U; + lhSize = 3; + litSize = lhc >> 4 & 0x3FF; + litCSize = lhc >> 14 & 0x3FF; + break; + case 2: + lhSize = 4; + litSize = lhc >> 4 & 0x3FFF; + litCSize = lhc >> 18; + break; + case 3: + lhSize = 5; + litSize = lhc >> 4 & 0x3FFFF; + litCSize = (lhc >> 22) + ((nuint)istart[4] << 10); + break; + } + + if (litSize > 0 && dst == null) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + if (litSize > blockSizeMax) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + if (singleStream == 0) + if (litSize < 6) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_literals_headerWrong)); + } + + if (litCSize + lhSize > srcSize) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + if (expectedWriteSize < litSize) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 0); + if (dctx->ddictIsCold != 0 && litSize > 768) + { + sbyte* _ptr = (sbyte*)dctx->HUFptr; + const nuint _size = sizeof(uint) * 4097; + nuint _pos; + for (_pos = 0; _pos < _size; _pos += 64) + { +#if NETCOREAPP3_0_OR_GREATER + if (System.Runtime.Intrinsics.X86.Sse.IsSupported) + { + System.Runtime.Intrinsics.X86.Sse.Prefetch1(_ptr + _pos); + } +#endif + } + } + + if (litEncType == SymbolEncodingType_e.set_repeat) + { + if (singleStream != 0) + { + hufSuccess = HUF_decompress1X_usingDTable(dctx->litBuffer, litSize, istart + lhSize, litCSize, dctx->HUFptr, flags); + } + else + { + assert(litSize >= 6); + hufSuccess = HUF_decompress4X_usingDTable(dctx->litBuffer, litSize, istart + lhSize, litCSize, dctx->HUFptr, flags); + } + } + else + { + if (singleStream != 0) + { + hufSuccess = HUF_decompress1X1_DCtx_wksp(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart + lhSize, litCSize, dctx->workspace, sizeof(uint) * 640, flags); + } + else + { + hufSuccess = HUF_decompress4X_hufOnly_wksp(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart + lhSize, litCSize, dctx->workspace, sizeof(uint) * 640, flags); + } + } + + if (dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split) + { + assert(litSize > 1 << 16); + memcpy(dctx->litExtraBuffer, dctx->litBufferEnd - (1 << 16), 1 << 16); + memmove(dctx->litBuffer + (1 << 16) - 32, dctx->litBuffer, litSize - (1 << 16)); + dctx->litBuffer += (1 << 16) - 32; + dctx->litBufferEnd -= 32; + assert(dctx->litBufferEnd <= (byte*)dst + blockSizeMax); + } + + if (ERR_isError(hufSuccess)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + dctx->litPtr = dctx->litBuffer; + dctx->litSize = litSize; + dctx->litEntropy = 1; + if (litEncType == SymbolEncodingType_e.set_compressed) + dctx->HUFptr = dctx->entropy.hufTable; + return litCSize + lhSize; + } + + case SymbolEncodingType_e.set_basic: + { + nuint litSize, lhSize; + uint lhlCode = (uint)(istart[0] >> 2 & 3); + nuint expectedWriteSize = blockSizeMax < dstCapacity ? blockSizeMax : dstCapacity; + switch (lhlCode) + { + case 0: + case 2: + default: + lhSize = 1; + litSize = (nuint)(istart[0] >> 3); + break; + case 1: + lhSize = 2; + litSize = (nuint)(MEM_readLE16(istart) >> 4); + break; + case 3: + lhSize = 3; + if (srcSize < 3) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + litSize = MEM_readLE24(istart) >> 4; + break; + } + + if (litSize > 0 && dst == null) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + if (litSize > blockSizeMax) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + if (expectedWriteSize < litSize) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 1); + if (lhSize + litSize + 32 > srcSize) + { + if (litSize + lhSize > srcSize) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + if (dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split) + { + memcpy(dctx->litBuffer, istart + lhSize, (uint)(litSize - (1 << 16))); + memcpy(dctx->litExtraBuffer, istart + lhSize + litSize - (1 << 16), 1 << 16); + } + else + { + memcpy(dctx->litBuffer, istart + lhSize, (uint)litSize); + } + + dctx->litPtr = dctx->litBuffer; + dctx->litSize = litSize; + return lhSize + litSize; + } + + dctx->litPtr = istart + lhSize; + dctx->litSize = litSize; + dctx->litBufferEnd = dctx->litPtr + litSize; + dctx->litBufferLocation = ZSTD_litLocation_e.ZSTD_not_in_dst; + return lhSize + litSize; + } + + case SymbolEncodingType_e.set_rle: + { + uint lhlCode = (uint)(istart[0] >> 2 & 3); + nuint litSize, lhSize; + nuint expectedWriteSize = blockSizeMax < dstCapacity ? blockSizeMax : dstCapacity; + switch (lhlCode) + { + case 0: + case 2: + default: + lhSize = 1; + litSize = (nuint)(istart[0] >> 3); + break; + case 1: + lhSize = 2; + if (srcSize < 3) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + litSize = (nuint)(MEM_readLE16(istart) >> 4); + break; + case 3: + lhSize = 3; + if (srcSize < 4) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + litSize = MEM_readLE24(istart) >> 4; + break; + } + + if (litSize > 0 && dst == null) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + if (litSize > blockSizeMax) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + if (expectedWriteSize < litSize) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 1); + if (dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split) + { + memset(dctx->litBuffer, istart[lhSize], (uint)(litSize - (1 << 16))); + memset(dctx->litExtraBuffer, istart[lhSize], 1 << 16); + } + else + { + memset(dctx->litBuffer, istart[lhSize], (uint)litSize); + } + + dctx->litPtr = dctx->litBuffer; + dctx->litSize = litSize; + return lhSize + 1; + } + + default: + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + } + } + + /* Hidden declaration for fullbench */ + private static nuint ZSTD_decodeLiteralsBlock_wrapper(ZSTD_DCtx_s* dctx, void* src, nuint srcSize, void* dst, nuint dstCapacity) + { + dctx->isFrameDecompression = 0; + return ZSTD_decodeLiteralsBlock(dctx, src, srcSize, dst, dstCapacity, streaming_operation.not_streaming); + } + + private static readonly ZSTD_seqSymbol* LL_defaultDTable = GetArrayPointer(new ZSTD_seqSymbol[65] { new ZSTD_seqSymbol(nextState: 1, nbAdditionalBits: 1, nbBits: 1, baseValue: 6), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 4, baseValue: 0), new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 0, nbBits: 4, baseValue: 0), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 1), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 3), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 4), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 6), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 7), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 9), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 10), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 12), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 14), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 5, baseValue: 16), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 5, baseValue: 20), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 5, baseValue: 22), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 2, nbBits: 5, baseValue: 28), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 3, nbBits: 5, baseValue: 32), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 4, nbBits: 5, baseValue: 48), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 6, nbBits: 5, baseValue: 64), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 7, nbBits: 5, baseValue: 128), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 8, nbBits: 6, baseValue: 256), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 10, nbBits: 6, baseValue: 1024), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 12, nbBits: 6, baseValue: 4096), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 4, baseValue: 0), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 4, baseValue: 1), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 2), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 4), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 5), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 7), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 8), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 10), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 11), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 13), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 1, nbBits: 5, baseValue: 16), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 5, baseValue: 18), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 1, nbBits: 5, baseValue: 22), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 2, nbBits: 5, baseValue: 24), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 3, nbBits: 5, baseValue: 32), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 3, nbBits: 5, baseValue: 40), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 6, nbBits: 4, baseValue: 64), new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 6, nbBits: 4, baseValue: 64), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 7, nbBits: 5, baseValue: 128), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 9, nbBits: 6, baseValue: 512), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 11, nbBits: 6, baseValue: 2048), new ZSTD_seqSymbol(nextState: 48, nbAdditionalBits: 0, nbBits: 4, baseValue: 0), new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 0, nbBits: 4, baseValue: 1), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 2), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 3), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 5), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 6), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 8), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 9), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 11), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 12), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 15), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 1, nbBits: 5, baseValue: 18), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 1, nbBits: 5, baseValue: 20), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 2, nbBits: 5, baseValue: 24), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 2, nbBits: 5, baseValue: 28), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 3, nbBits: 5, baseValue: 40), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 4, nbBits: 5, baseValue: 48), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 16, nbBits: 6, baseValue: 65536), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 15, nbBits: 6, baseValue: 32768), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 14, nbBits: 6, baseValue: 16384), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 13, nbBits: 6, baseValue: 8192) }); + private static readonly ZSTD_seqSymbol* OF_defaultDTable = GetArrayPointer(new ZSTD_seqSymbol[33] { new ZSTD_seqSymbol(nextState: 1, nbAdditionalBits: 1, nbBits: 1, baseValue: 5), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 0), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 6, nbBits: 4, baseValue: 61), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 9, nbBits: 5, baseValue: 509), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 15, nbBits: 5, baseValue: 32765), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 21, nbBits: 5, baseValue: 2097149), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 3, nbBits: 5, baseValue: 5), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 7, nbBits: 4, baseValue: 125), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 12, nbBits: 5, baseValue: 4093), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 18, nbBits: 5, baseValue: 262141), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 23, nbBits: 5, baseValue: 8388605), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 5, nbBits: 5, baseValue: 29), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 8, nbBits: 4, baseValue: 253), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 14, nbBits: 5, baseValue: 16381), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 20, nbBits: 5, baseValue: 1048573), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 2, nbBits: 5, baseValue: 1), new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 7, nbBits: 4, baseValue: 125), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 11, nbBits: 5, baseValue: 2045), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 17, nbBits: 5, baseValue: 131069), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 22, nbBits: 5, baseValue: 4194301), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 4, nbBits: 5, baseValue: 13), new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 8, nbBits: 4, baseValue: 253), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 13, nbBits: 5, baseValue: 8189), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 19, nbBits: 5, baseValue: 524285), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 5, baseValue: 1), new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 6, nbBits: 4, baseValue: 61), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 10, nbBits: 5, baseValue: 1021), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 16, nbBits: 5, baseValue: 65533), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 28, nbBits: 5, baseValue: 268435453), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 27, nbBits: 5, baseValue: 134217725), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 26, nbBits: 5, baseValue: 67108861), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 25, nbBits: 5, baseValue: 33554429), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 24, nbBits: 5, baseValue: 16777213) }); + private static readonly ZSTD_seqSymbol* ML_defaultDTable = GetArrayPointer(new ZSTD_seqSymbol[65] { new ZSTD_seqSymbol(nextState: 1, nbAdditionalBits: 1, nbBits: 1, baseValue: 6), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 3), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 4, baseValue: 4), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 5), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 6), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 8), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 9), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 11), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 13), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 16), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 19), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 22), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 25), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 28), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 31), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 34), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 6, baseValue: 37), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 6, baseValue: 41), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 2, nbBits: 6, baseValue: 47), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 3, nbBits: 6, baseValue: 59), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 4, nbBits: 6, baseValue: 83), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 7, nbBits: 6, baseValue: 131), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 9, nbBits: 6, baseValue: 515), new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 0, nbBits: 4, baseValue: 4), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 4, baseValue: 5), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 6), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 7), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 9), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 10), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 12), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 15), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 18), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 21), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 24), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 27), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 30), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 33), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 6, baseValue: 35), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 6, baseValue: 39), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 2, nbBits: 6, baseValue: 43), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 3, nbBits: 6, baseValue: 51), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 4, nbBits: 6, baseValue: 67), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 5, nbBits: 6, baseValue: 99), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 8, nbBits: 6, baseValue: 259), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 4, baseValue: 4), new ZSTD_seqSymbol(nextState: 48, nbAdditionalBits: 0, nbBits: 4, baseValue: 4), new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 0, nbBits: 4, baseValue: 5), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 7), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 8), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 10), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 11), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 14), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 17), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 20), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 23), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 26), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 29), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 32), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 16, nbBits: 6, baseValue: 65539), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 15, nbBits: 6, baseValue: 32771), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 14, nbBits: 6, baseValue: 16387), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 13, nbBits: 6, baseValue: 8195), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 12, nbBits: 6, baseValue: 4099), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 11, nbBits: 6, baseValue: 2051), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 10, nbBits: 6, baseValue: 1027) }); + private static void ZSTD_buildSeqTable_rle(ZSTD_seqSymbol* dt, uint baseValue, byte nbAddBits) + { + void* ptr = dt; + ZSTD_seqSymbol_header* DTableH = (ZSTD_seqSymbol_header*)ptr; + ZSTD_seqSymbol* cell = dt + 1; + DTableH->tableLog = 0; + DTableH->fastMode = 0; + cell->nbBits = 0; + cell->nextState = 0; + assert(nbAddBits < 255); + cell->nbAdditionalBits = nbAddBits; + cell->baseValue = baseValue; + } + + /* ZSTD_buildFSETable() : + * generate FSE decoding table for one symbol (ll, ml or off) + * cannot fail if input is valid => + * all inputs are presumed validated at this stage */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt, short* normalizedCounter, uint maxSymbolValue, uint* baseValue, byte* nbAdditionalBits, uint tableLog, void* wksp, nuint wkspSize) + { + ZSTD_seqSymbol* tableDecode = dt + 1; + uint maxSV1 = maxSymbolValue + 1; + uint tableSize = (uint)(1 << (int)tableLog); + ushort* symbolNext = (ushort*)wksp; + byte* spread = (byte*)(symbolNext + 52 + 1); + uint highThreshold = tableSize - 1; + assert(maxSymbolValue <= 52); + assert(tableLog <= 9); + assert(wkspSize >= sizeof(short) * (52 + 1) + (1U << 9) + sizeof(ulong)); + { + ZSTD_seqSymbol_header DTableH; + DTableH.tableLog = tableLog; + DTableH.fastMode = 1; + { + short largeLimit = (short)(1 << (int)(tableLog - 1)); + uint s; + for (s = 0; s < maxSV1; s++) + { + if (normalizedCounter[s] == -1) + { + tableDecode[highThreshold--].baseValue = s; + symbolNext[s] = 1; + } + else + { + if (normalizedCounter[s] >= largeLimit) + DTableH.fastMode = 0; + assert(normalizedCounter[s] >= 0); + symbolNext[s] = (ushort)normalizedCounter[s]; + } + } + } + + memcpy(dt, &DTableH, (uint)sizeof(ZSTD_seqSymbol_header)); + } + + assert(tableSize <= 512); + if (highThreshold == tableSize - 1) + { + nuint tableMask = tableSize - 1; + nuint step = (tableSize >> 1) + (tableSize >> 3) + 3; + { + const ulong add = 0x0101010101010101UL; + nuint pos = 0; + ulong sv = 0; + uint s; + for (s = 0; s < maxSV1; ++s, sv += add) + { + int i; + int n = normalizedCounter[s]; + MEM_write64(spread + pos, sv); + for (i = 8; i < n; i += 8) + { + MEM_write64(spread + pos + i, sv); + } + + assert(n >= 0); + pos += (nuint)n; + } + } + + { + nuint position = 0; + nuint s; + const nuint unroll = 2; + assert(tableSize % unroll == 0); + for (s = 0; s < tableSize; s += unroll) + { + nuint u; + for (u = 0; u < unroll; ++u) + { + nuint uPosition = position + u * step & tableMask; + tableDecode[uPosition].baseValue = spread[s + u]; + } + + position = position + unroll * step & tableMask; + } + + assert(position == 0); + } + } + else + { + uint tableMask = tableSize - 1; + uint step = (tableSize >> 1) + (tableSize >> 3) + 3; + uint s, position = 0; + for (s = 0; s < maxSV1; s++) + { + int i; + int n = normalizedCounter[s]; + for (i = 0; i < n; i++) + { + tableDecode[position].baseValue = s; + position = position + step & tableMask; + while (position > highThreshold) + position = position + step & tableMask; + } + } + + assert(position == 0); + } + + { + uint u; + for (u = 0; u < tableSize; u++) + { + uint symbol = tableDecode[u].baseValue; + uint nextState = symbolNext[symbol]++; + tableDecode[u].nbBits = (byte)(tableLog - ZSTD_highbit32(nextState)); + tableDecode[u].nextState = (ushort)((nextState << tableDecode[u].nbBits) - tableSize); + assert(nbAdditionalBits[symbol] < 255); + tableDecode[u].nbAdditionalBits = nbAdditionalBits[symbol]; + tableDecode[u].baseValue = baseValue[symbol]; + } + } + } + + /* Avoids the FORCE_INLINE of the _body() function. */ + private static void ZSTD_buildFSETable_body_default(ZSTD_seqSymbol* dt, short* normalizedCounter, uint maxSymbolValue, uint* baseValue, byte* nbAdditionalBits, uint tableLog, void* wksp, nuint wkspSize) + { + ZSTD_buildFSETable_body(dt, normalizedCounter, maxSymbolValue, baseValue, nbAdditionalBits, tableLog, wksp, wkspSize); + } + + /* ZSTD_buildFSETable() : + * generate FSE decoding table for one symbol (ll, ml or off) + * this function must be called with valid parameters only + * (dt is large enough, normalizedCounter distribution total is a power of 2, max is within range, etc.) + * in which case it cannot fail. + * The workspace must be 4-byte aligned and at least ZSTD_BUILD_FSE_TABLE_WKSP_SIZE bytes, which is + * defined in zstd_decompress_internal.h. + * Internal use only. + */ + private static void ZSTD_buildFSETable(ZSTD_seqSymbol* dt, short* normalizedCounter, uint maxSymbolValue, uint* baseValue, byte* nbAdditionalBits, uint tableLog, void* wksp, nuint wkspSize, int bmi2) + { + ZSTD_buildFSETable_body_default(dt, normalizedCounter, maxSymbolValue, baseValue, nbAdditionalBits, tableLog, wksp, wkspSize); + } + + /*! ZSTD_buildSeqTable() : + * @return : nb bytes read from src, + * or an error code if it fails */ + private static nuint ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, ZSTD_seqSymbol** DTablePtr, SymbolEncodingType_e type, uint max, uint maxLog, void* src, nuint srcSize, uint* baseValue, byte* nbAdditionalBits, ZSTD_seqSymbol* defaultTable, uint flagRepeatTable, int ddictIsCold, int nbSeq, uint* wksp, nuint wkspSize, int bmi2) + { + switch (type) + { + case SymbolEncodingType_e.set_rle: + if (srcSize == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + if (*(byte*)src > max) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + { + uint symbol = *(byte*)src; + uint baseline = baseValue[symbol]; + byte nbBits = nbAdditionalBits[symbol]; + ZSTD_buildSeqTable_rle(DTableSpace, baseline, nbBits); + } + + *DTablePtr = DTableSpace; + return 1; + case SymbolEncodingType_e.set_basic: + *DTablePtr = defaultTable; + return 0; + case SymbolEncodingType_e.set_repeat: + if (flagRepeatTable == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + if (ddictIsCold != 0 && nbSeq > 24) + { + void* pStart = *DTablePtr; + nuint pSize = (nuint)(sizeof(ZSTD_seqSymbol) * (1 + (1 << (int)maxLog))); + { + sbyte* _ptr = (sbyte*)pStart; + nuint _size = pSize; + nuint _pos; + for (_pos = 0; _pos < _size; _pos += 64) + { +#if NETCOREAPP3_0_OR_GREATER + if (System.Runtime.Intrinsics.X86.Sse.IsSupported) + { + System.Runtime.Intrinsics.X86.Sse.Prefetch1(_ptr + _pos); + } +#endif + } + } + } + + return 0; + case SymbolEncodingType_e.set_compressed: + { + uint tableLog; + short* norm = stackalloc short[53]; + nuint headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize); + if (ERR_isError(headerSize)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + if (tableLog > maxLog) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog, wksp, wkspSize, bmi2); + *DTablePtr = DTableSpace; + return headerSize; + } + + default: + assert(0 != 0); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + } + } + + /*! ZSTD_decodeSeqHeaders() : + * decode sequence header from src */ + /* Used by: zstd_decompress_block, fullbench */ + private static nuint ZSTD_decodeSeqHeaders(ZSTD_DCtx_s* dctx, int* nbSeqPtr, void* src, nuint srcSize) + { + byte* istart = (byte*)src; + byte* iend = istart + srcSize; + byte* ip = istart; + int nbSeq; + if (srcSize < 1) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + nbSeq = *ip++; + if (nbSeq > 0x7F) + { + if (nbSeq == 0xFF) + { + if (ip + 2 > iend) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + nbSeq = MEM_readLE16(ip) + 0x7F00; + ip += 2; + } + else + { + if (ip >= iend) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + nbSeq = (nbSeq - 0x80 << 8) + *ip++; + } + } + + *nbSeqPtr = nbSeq; + if (nbSeq == 0) + { + if (ip != iend) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + return (nuint)(ip - istart); + } + + if (ip + 1 > iend) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + if ((*ip & 3) != 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + { + SymbolEncodingType_e LLtype = (SymbolEncodingType_e)(*ip >> 6); + SymbolEncodingType_e OFtype = (SymbolEncodingType_e)(*ip >> 4 & 3); + SymbolEncodingType_e MLtype = (SymbolEncodingType_e)(*ip >> 2 & 3); + ip++; + { + nuint llhSize = ZSTD_buildSeqTable(&dctx->entropy.LLTable.e0, &dctx->LLTptr, LLtype, 35, 9, ip, (nuint)(iend - ip), LL_base, LL_bits, LL_defaultDTable, dctx->fseEntropy, dctx->ddictIsCold, nbSeq, dctx->workspace, sizeof(uint) * 640, ZSTD_DCtx_get_bmi2(dctx)); + if (ERR_isError(llhSize)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + ip += llhSize; + } + + { + nuint ofhSize = ZSTD_buildSeqTable(&dctx->entropy.OFTable.e0, &dctx->OFTptr, OFtype, 31, 8, ip, (nuint)(iend - ip), OF_base, OF_bits, OF_defaultDTable, dctx->fseEntropy, dctx->ddictIsCold, nbSeq, dctx->workspace, sizeof(uint) * 640, ZSTD_DCtx_get_bmi2(dctx)); + if (ERR_isError(ofhSize)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + ip += ofhSize; + } + + { + nuint mlhSize = ZSTD_buildSeqTable(&dctx->entropy.MLTable.e0, &dctx->MLTptr, MLtype, 52, 9, ip, (nuint)(iend - ip), ML_base, ML_bits, ML_defaultDTable, dctx->fseEntropy, dctx->ddictIsCold, nbSeq, dctx->workspace, sizeof(uint) * 640, ZSTD_DCtx_get_bmi2(dctx)); + if (ERR_isError(mlhSize)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + ip += mlhSize; + } + } + + return (nuint)(ip - istart); + } + +#if NET7_0_OR_GREATER + private static ReadOnlySpan Span_dec32table => new uint[8] + { + 0, + 1, + 2, + 1, + 4, + 4, + 4, + 4 + }; + private static uint* dec32table => (uint*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_dec32table)); +#else + + private static readonly uint* dec32table = GetArrayPointer(new uint[8] { 0, 1, 2, 1, 4, 4, 4, 4 }); +#endif +#if NET7_0_OR_GREATER + private static ReadOnlySpan Span_dec64table => new int[8] + { + 8, + 8, + 8, + 7, + 8, + 9, + 10, + 11 + }; + private static int* dec64table => (int*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_dec64table)); +#else + + private static readonly int* dec64table = GetArrayPointer(new int[8] { 8, 8, 8, 7, 8, 9, 10, 11 }); +#endif + /*! ZSTD_overlapCopy8() : + * Copies 8 bytes from ip to op and updates op and ip where ip <= op. + * If the offset is < 8 then the offset is spread to at least 8 bytes. + * + * Precondition: *ip <= *op + * Postcondition: *op - *op >= 8 + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_overlapCopy8(byte** op, byte** ip, nuint offset) + { + assert(*ip <= *op); + if (offset < 8) + { + int sub2 = dec64table[offset]; + (*op)[0] = (*ip)[0]; + (*op)[1] = (*ip)[1]; + (*op)[2] = (*ip)[2]; + (*op)[3] = (*ip)[3]; + *ip += dec32table[offset]; + ZSTD_copy4(*op + 4, *ip); + *ip -= sub2; + } + else + { + ZSTD_copy8(*op, *ip); + } + + *ip += 8; + *op += 8; + assert(*op - *ip >= 8); + } + + /*! ZSTD_safecopy() : + * Specialized version of memcpy() that is allowed to READ up to WILDCOPY_OVERLENGTH past the input buffer + * and write up to 16 bytes past oend_w (op >= oend_w is allowed). + * This function is only called in the uncommon case where the sequence is near the end of the block. It + * should be fast for a single long sequence, but can be slow for several short sequences. + * + * @param ovtype controls the overlap detection + * - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart. + * - ZSTD_overlap_src_before_dst: The src and dst may overlap and may be any distance apart. + * The src buffer must be before the dst buffer. + */ + private static void ZSTD_safecopy(byte* op, byte* oend_w, byte* ip, nint length, ZSTD_overlap_e ovtype) + { + nint diff = (nint)(op - ip); + byte* oend = op + length; + assert(ovtype == ZSTD_overlap_e.ZSTD_no_overlap && (diff <= -8 || diff >= 8 || op >= oend_w) || ovtype == ZSTD_overlap_e.ZSTD_overlap_src_before_dst && diff >= 0); + if (length < 8) + { + while (op < oend) + *op++ = *ip++; + return; + } + + if (ovtype == ZSTD_overlap_e.ZSTD_overlap_src_before_dst) + { + assert(length >= 8); + ZSTD_overlapCopy8(&op, &ip, (nuint)diff); + length -= 8; + assert(op - ip >= 8); + assert(op <= oend); + } + + if (oend <= oend_w) + { + ZSTD_wildcopy(op, ip, length, ovtype); + return; + } + + if (op <= oend_w) + { + assert(oend > oend_w); + ZSTD_wildcopy(op, ip, (nint)(oend_w - op), ovtype); + ip += oend_w - op; + op += oend_w - op; + } + + while (op < oend) + *op++ = *ip++; + } + + /* ZSTD_safecopyDstBeforeSrc(): + * This version allows overlap with dst before src, or handles the non-overlap case with dst after src + * Kept separate from more common ZSTD_safecopy case to avoid performance impact to the safecopy common case */ + private static void ZSTD_safecopyDstBeforeSrc(byte* op, byte* ip, nint length) + { + nint diff = (nint)(op - ip); + byte* oend = op + length; + if (length < 8 || diff > -8) + { + while (op < oend) + *op++ = *ip++; + return; + } + + if (op <= oend - 32 && diff < -16) + { + ZSTD_wildcopy(op, ip, (nint)(oend - 32 - op), ZSTD_overlap_e.ZSTD_no_overlap); + ip += oend - 32 - op; + op += oend - 32 - op; + } + + while (op < oend) + *op++ = *ip++; + } + + /* ZSTD_execSequenceEnd(): + * This version handles cases that are near the end of the output buffer. It requires + * more careful checks to make sure there is no overflow. By separating out these hard + * and unlikely cases, we can speed up the common cases. + * + * NOTE: This function needs to be fast for a single long sequence, but doesn't need + * to be optimized for many small sequences, since those fall into ZSTD_execSequence(). + */ + private static nuint ZSTD_execSequenceEnd(byte* op, byte* oend, seq_t sequence, byte** litPtr, byte* litLimit, byte* prefixStart, byte* virtualStart, byte* dictEnd) + { + byte* oLitEnd = op + sequence.litLength; + nuint sequenceLength = sequence.litLength + sequence.matchLength; + byte* iLitEnd = *litPtr + sequence.litLength; + byte* match = oLitEnd - sequence.offset; + byte* oend_w = oend - 32; + if (sequenceLength > (nuint)(oend - op)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + if (sequence.litLength > (nuint)(litLimit - *litPtr)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + assert(op < op + sequenceLength); + assert(oLitEnd < op + sequenceLength); + ZSTD_safecopy(op, oend_w, *litPtr, (nint)sequence.litLength, ZSTD_overlap_e.ZSTD_no_overlap); + op = oLitEnd; + *litPtr = iLitEnd; + if (sequence.offset > (nuint)(oLitEnd - prefixStart)) + { + if (sequence.offset > (nuint)(oLitEnd - virtualStart)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + match = dictEnd - (prefixStart - match); + if (match + sequence.matchLength <= dictEnd) + { + memmove(oLitEnd, match, sequence.matchLength); + return sequenceLength; + } + + { + nuint length1 = (nuint)(dictEnd - match); + memmove(oLitEnd, match, length1); + op = oLitEnd + length1; + sequence.matchLength -= length1; + match = prefixStart; + } + } + + ZSTD_safecopy(op, oend_w, match, (nint)sequence.matchLength, ZSTD_overlap_e.ZSTD_overlap_src_before_dst); + return sequenceLength; + } + + /* ZSTD_execSequenceEndSplitLitBuffer(): + * This version is intended to be used during instances where the litBuffer is still split. It is kept separate to avoid performance impact for the good case. + */ + private static nuint ZSTD_execSequenceEndSplitLitBuffer(byte* op, byte* oend, byte* oend_w, seq_t sequence, byte** litPtr, byte* litLimit, byte* prefixStart, byte* virtualStart, byte* dictEnd) + { + byte* oLitEnd = op + sequence.litLength; + nuint sequenceLength = sequence.litLength + sequence.matchLength; + byte* iLitEnd = *litPtr + sequence.litLength; + byte* match = oLitEnd - sequence.offset; + if (sequenceLength > (nuint)(oend - op)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + if (sequence.litLength > (nuint)(litLimit - *litPtr)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + assert(op < op + sequenceLength); + assert(oLitEnd < op + sequenceLength); + if (op > *litPtr && op < *litPtr + sequence.litLength) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + ZSTD_safecopyDstBeforeSrc(op, *litPtr, (nint)sequence.litLength); + op = oLitEnd; + *litPtr = iLitEnd; + if (sequence.offset > (nuint)(oLitEnd - prefixStart)) + { + if (sequence.offset > (nuint)(oLitEnd - virtualStart)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + match = dictEnd - (prefixStart - match); + if (match + sequence.matchLength <= dictEnd) + { + memmove(oLitEnd, match, sequence.matchLength); + return sequenceLength; + } + + { + nuint length1 = (nuint)(dictEnd - match); + memmove(oLitEnd, match, length1); + op = oLitEnd + length1; + sequence.matchLength -= length1; + match = prefixStart; + } + } + + ZSTD_safecopy(op, oend_w, match, (nint)sequence.matchLength, ZSTD_overlap_e.ZSTD_overlap_src_before_dst); + return sequenceLength; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_execSequence(byte* op, byte* oend, seq_t sequence, byte** litPtr, byte* litLimit, byte* prefixStart, byte* virtualStart, byte* dictEnd) + { + var sequence_litLength = sequence.litLength; + var sequence_matchLength = sequence.matchLength; + var sequence_offset = sequence.offset; + byte* oLitEnd = op + sequence_litLength; + nuint sequenceLength = sequence_litLength + sequence_matchLength; + /* risk : address space overflow (32-bits) */ + byte* oMatchEnd = op + sequenceLength; + /* risk : address space underflow on oend=NULL */ + byte* oend_w = oend - 32; + byte* iLitEnd = *litPtr + sequence_litLength; + byte* match = oLitEnd - sequence_offset; + assert(op != null); + assert(oend_w < oend); + if (iLitEnd > litLimit || oMatchEnd > oend_w || MEM_32bits && (nuint)(oend - op) < sequenceLength + 32) + return ZSTD_execSequenceEnd(op, oend, new seq_t { litLength = sequence_litLength, matchLength = sequence_matchLength, offset = sequence_offset }, litPtr, litLimit, prefixStart, virtualStart, dictEnd); + assert(op <= oLitEnd); + assert(oLitEnd < oMatchEnd); + assert(oMatchEnd <= oend); + assert(iLitEnd <= litLimit); + assert(oLitEnd <= oend_w); + assert(oMatchEnd <= oend_w); + assert(32 >= 16); + ZSTD_copy16(op, *litPtr); + if (sequence_litLength > 16) + { + ZSTD_wildcopy(op + 16, *litPtr + 16, (nint)(sequence_litLength - 16), ZSTD_overlap_e.ZSTD_no_overlap); + } + + op = oLitEnd; + *litPtr = iLitEnd; + if (sequence_offset > (nuint)(oLitEnd - prefixStart)) + { + if (sequence_offset > (nuint)(oLitEnd - virtualStart)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + match = dictEnd + (match - prefixStart); + if (match + sequence_matchLength <= dictEnd) + { + memmove(oLitEnd, match, sequence_matchLength); + return sequenceLength; + } + + { + nuint length1 = (nuint)(dictEnd - match); + memmove(oLitEnd, match, length1); + op = oLitEnd + length1; + sequence_matchLength -= length1; + match = prefixStart; + } + } + + assert(op <= oMatchEnd); + assert(oMatchEnd <= oend_w); + assert(match >= prefixStart); + assert(sequence_matchLength >= 1); + if (sequence_offset >= 16) + { + ZSTD_wildcopy(op, match, (nint)sequence_matchLength, ZSTD_overlap_e.ZSTD_no_overlap); + return sequenceLength; + } + + assert(sequence_offset < 16); + ZSTD_overlapCopy8(ref op, ref match, sequence_offset); + if (sequence_matchLength > 8) + { + assert(op < oMatchEnd); + ZSTD_wildcopy(op, match, (nint)sequence_matchLength - 8, ZSTD_overlap_e.ZSTD_overlap_src_before_dst); + } + + return sequenceLength; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_execSequenceSplitLitBuffer(byte* op, byte* oend, byte* oend_w, seq_t sequence, byte** litPtr, byte* litLimit, byte* prefixStart, byte* virtualStart, byte* dictEnd) + { + byte* oLitEnd = op + sequence.litLength; + nuint sequenceLength = sequence.litLength + sequence.matchLength; + /* risk : address space overflow (32-bits) */ + byte* oMatchEnd = op + sequenceLength; + byte* iLitEnd = *litPtr + sequence.litLength; + byte* match = oLitEnd - sequence.offset; + assert(op != null); + assert(oend_w < oend); + if (iLitEnd > litLimit || oMatchEnd > oend_w || MEM_32bits && (nuint)(oend - op) < sequenceLength + 32) + return ZSTD_execSequenceEndSplitLitBuffer(op, oend, oend_w, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd); + assert(op <= oLitEnd); + assert(oLitEnd < oMatchEnd); + assert(oMatchEnd <= oend); + assert(iLitEnd <= litLimit); + assert(oLitEnd <= oend_w); + assert(oMatchEnd <= oend_w); + assert(32 >= 16); + ZSTD_copy16(op, *litPtr); + if (sequence.litLength > 16) + { + ZSTD_wildcopy(op + 16, *litPtr + 16, (nint)(sequence.litLength - 16), ZSTD_overlap_e.ZSTD_no_overlap); + } + + op = oLitEnd; + *litPtr = iLitEnd; + if (sequence.offset > (nuint)(oLitEnd - prefixStart)) + { + if (sequence.offset > (nuint)(oLitEnd - virtualStart)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + match = dictEnd + (match - prefixStart); + if (match + sequence.matchLength <= dictEnd) + { + memmove(oLitEnd, match, sequence.matchLength); + return sequenceLength; + } + + { + nuint length1 = (nuint)(dictEnd - match); + memmove(oLitEnd, match, length1); + op = oLitEnd + length1; + sequence.matchLength -= length1; + match = prefixStart; + } + } + + assert(op <= oMatchEnd); + assert(oMatchEnd <= oend_w); + assert(match >= prefixStart); + assert(sequence.matchLength >= 1); + if (sequence.offset >= 16) + { + ZSTD_wildcopy(op, match, (nint)sequence.matchLength, ZSTD_overlap_e.ZSTD_no_overlap); + return sequenceLength; + } + + assert(sequence.offset < 16); + ZSTD_overlapCopy8(&op, &match, sequence.offset); + if (sequence.matchLength > 8) + { + assert(op < oMatchEnd); + ZSTD_wildcopy(op, match, (nint)sequence.matchLength - 8, ZSTD_overlap_e.ZSTD_overlap_src_before_dst); + } + + return sequenceLength; + } + + private static void ZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, ZSTD_seqSymbol* dt) + { + void* ptr = dt; + ZSTD_seqSymbol_header* DTableH = (ZSTD_seqSymbol_header*)ptr; + DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog); + BIT_reloadDStream(bitD); + DStatePtr->table = dt + 1; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_updateFseStateWithDInfo(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, ushort nextState, uint nbBits) + { + nuint lowBits = BIT_readBits(bitD, nbBits); + DStatePtr->state = nextState + lowBits; + } + + /** + * ZSTD_decodeSequence(): + * @p longOffsets : tells the decoder to reload more bit while decoding large offsets + * only used in 32-bit mode + * @return : Sequence (litL + matchL + offset) + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static seq_t ZSTD_decodeSequence(seqState_t* seqState, ZSTD_longOffset_e longOffsets, int isLastSeq) + { + seq_t seq; + ZSTD_seqSymbol* llDInfo = seqState->stateLL.table + seqState->stateLL.state; + ZSTD_seqSymbol* mlDInfo = seqState->stateML.table + seqState->stateML.state; + ZSTD_seqSymbol* ofDInfo = seqState->stateOffb.table + seqState->stateOffb.state; + seq.matchLength = mlDInfo->baseValue; + seq.litLength = llDInfo->baseValue; + { + uint ofBase = ofDInfo->baseValue; + byte llBits = llDInfo->nbAdditionalBits; + byte mlBits = mlDInfo->nbAdditionalBits; + byte ofBits = ofDInfo->nbAdditionalBits; + byte totalBits = (byte)(llBits + mlBits + ofBits); + ushort llNext = llDInfo->nextState; + ushort mlNext = mlDInfo->nextState; + ushort ofNext = ofDInfo->nextState; + uint llnbBits = llDInfo->nbBits; + uint mlnbBits = mlDInfo->nbBits; + uint ofnbBits = ofDInfo->nbBits; + assert(llBits <= 16); + assert(mlBits <= 16); + assert(ofBits <= 31); + { + nuint offset; + if (ofBits > 1) + { + if (MEM_32bits && longOffsets != default && ofBits >= 25) + { + /* Always read extra bits, this keeps the logic simple, + * avoids branches, and avoids accidentally reading 0 bits. + */ + const uint extraBits = 30 - 25; + offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << (int)extraBits); + BIT_reloadDStream(&seqState->DStream); + offset += BIT_readBitsFast(&seqState->DStream, extraBits); + } + else + { + offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits); + if (MEM_32bits) + BIT_reloadDStream(&seqState->DStream); + } + + seqState->prevOffset.e2 = seqState->prevOffset.e1; + seqState->prevOffset.e1 = seqState->prevOffset.e0; + seqState->prevOffset.e0 = offset; + } + else + { + uint ll0 = llDInfo->baseValue == 0 ? 1U : 0U; + if (ofBits == 0) + { + offset = (&seqState->prevOffset.e0)[ll0]; + seqState->prevOffset.e1 = (&seqState->prevOffset.e0)[ll0 == 0 ? 1 : 0]; + seqState->prevOffset.e0 = offset; + } + else + { + offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1); + { + nuint temp = offset == 3 ? seqState->prevOffset.e0 - 1 : (&seqState->prevOffset.e0)[offset]; + temp -= temp == 0 ? 1U : 0U; + if (offset != 1) + seqState->prevOffset.e2 = seqState->prevOffset.e1; + seqState->prevOffset.e1 = seqState->prevOffset.e0; + seqState->prevOffset.e0 = offset = temp; + } + } + } + + seq.offset = offset; + } + + if (mlBits > 0) + seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits); + if (MEM_32bits && mlBits + llBits >= 25 - (30 - 25)) + BIT_reloadDStream(&seqState->DStream); + if (MEM_64bits && totalBits >= 57 - (9 + 9 + 8)) + BIT_reloadDStream(&seqState->DStream); + if (llBits > 0) + seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits); + if (MEM_32bits) + BIT_reloadDStream(&seqState->DStream); + if (isLastSeq == 0) + { + ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llNext, llnbBits); + ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlNext, mlnbBits); + if (MEM_32bits) + BIT_reloadDStream(&seqState->DStream); + ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofNext, ofnbBits); + BIT_reloadDStream(&seqState->DStream); + } + } + + return seq; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_decompressSequences_bodySplitLitBuffer(ZSTD_DCtx_s* dctx, void* dst, nuint maxDstSize, void* seqStart, nuint seqSize, int nbSeq, ZSTD_longOffset_e isLongOffset) + { + byte* ip = (byte*)seqStart; + byte* iend = ip + seqSize; + byte* ostart = (byte*)dst; + byte* oend = ZSTD_maybeNullPtrAdd(ostart, (nint)maxDstSize); + byte* op = ostart; + byte* litPtr = dctx->litPtr; + byte* litBufferEnd = dctx->litBufferEnd; + byte* prefixStart = (byte*)dctx->prefixStart; + byte* vBase = (byte*)dctx->virtualStart; + byte* dictEnd = (byte*)dctx->dictEnd; + if (nbSeq != 0) + { + seqState_t seqState; + dctx->fseEntropy = 1; + { + uint i; + for (i = 0; i < 3; i++) + (&seqState.prevOffset.e0)[i] = dctx->entropy.rep[i]; + } + + if (ERR_isError(BIT_initDStream(&seqState.DStream, ip, (nuint)(iend - ip)))) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); + ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); + ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); + assert(dst != null); + { + /* some static analyzer believe that @sequence is not initialized (it necessarily is, since for(;;) loop as at least one iteration) */ + seq_t sequence = new seq_t + { + litLength = 0, + matchLength = 0, + offset = 0 + }; + for (; nbSeq != 0; nbSeq--) + { + sequence = ZSTD_decodeSequence(&seqState, isLongOffset, nbSeq == 1 ? 1 : 0); + if (litPtr + sequence.litLength > dctx->litBufferEnd) + break; + { + nuint oneSeqSize = ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequence.litLength - 32, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd); + if (ERR_isError(oneSeqSize)) + return oneSeqSize; + op += oneSeqSize; + } + } + + if (nbSeq > 0) + { + nuint leftoverLit = (nuint)(dctx->litBufferEnd - litPtr); + if (leftoverLit != 0) + { + if (leftoverLit > (nuint)(oend - op)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + ZSTD_safecopyDstBeforeSrc(op, litPtr, (nint)leftoverLit); + sequence.litLength -= leftoverLit; + op += leftoverLit; + } + + litPtr = dctx->litExtraBuffer; + litBufferEnd = dctx->litExtraBuffer + (1 << 16); + dctx->litBufferLocation = ZSTD_litLocation_e.ZSTD_not_in_dst; + { + nuint oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd); + if (ERR_isError(oneSeqSize)) + return oneSeqSize; + op += oneSeqSize; + } + + nbSeq--; + } + } + + if (nbSeq > 0) + { + for (; nbSeq != 0; nbSeq--) + { + seq_t sequence = ZSTD_decodeSequence(&seqState, isLongOffset, nbSeq == 1 ? 1 : 0); + nuint oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd); + if (ERR_isError(oneSeqSize)) + return oneSeqSize; + op += oneSeqSize; + } + } + + if (nbSeq != 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + if (BIT_endOfDStream(&seqState.DStream) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + { + uint i; + for (i = 0; i < 3; i++) + dctx->entropy.rep[i] = (uint)(&seqState.prevOffset.e0)[i]; + } + } + + if (dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split) + { + /* split hasn't been reached yet, first get dst then copy litExtraBuffer */ + nuint lastLLSize = (nuint)(litBufferEnd - litPtr); + if (lastLLSize > (nuint)(oend - op)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + if (op != null) + { + memmove(op, litPtr, lastLLSize); + op += lastLLSize; + } + + litPtr = dctx->litExtraBuffer; + litBufferEnd = dctx->litExtraBuffer + (1 << 16); + dctx->litBufferLocation = ZSTD_litLocation_e.ZSTD_not_in_dst; + } + + { + nuint lastLLSize = (nuint)(litBufferEnd - litPtr); + if (lastLLSize > (nuint)(oend - op)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + if (op != null) + { + memcpy(op, litPtr, (uint)lastLLSize); + op += lastLLSize; + } + } + + return (nuint)(op - ostart); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_decompressSequences_body(ZSTD_DCtx_s* dctx, void* dst, nuint maxDstSize, void* seqStart, nuint seqSize, int nbSeq, ZSTD_longOffset_e isLongOffset) + { + // HACK, force nbSeq to stack (better register usage) + System.Threading.Thread.VolatileRead(ref nbSeq); + byte* ip = (byte*)seqStart; + byte* iend = ip + seqSize; + byte* ostart = (byte*)dst; + byte* oend = dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_not_in_dst ? ZSTD_maybeNullPtrAdd(ostart, (nint)maxDstSize) : dctx->litBuffer; + byte* op = ostart; + byte* litPtr = dctx->litPtr; + byte* litEnd = litPtr + dctx->litSize; + byte* prefixStart = (byte*)dctx->prefixStart; + byte* vBase = (byte*)dctx->virtualStart; + byte* dictEnd = (byte*)dctx->dictEnd; + if (nbSeq != 0) + { + seqState_t seqState; + System.Runtime.CompilerServices.Unsafe.SkipInit(out seqState); + dctx->fseEntropy = 1; + { + uint i; + for (i = 0; i < 3; i++) + System.Runtime.CompilerServices.Unsafe.Add(ref seqState.prevOffset.e0, (int)i) = dctx->entropy.rep[i]; + } + + if (ERR_isError(BIT_initDStream(ref seqState.DStream, ip, (nuint)(iend - ip)))) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + ZSTD_initFseState(ref seqState.stateLL, ref seqState.DStream, dctx->LLTptr); + ZSTD_initFseState(ref seqState.stateOffb, ref seqState.DStream, dctx->OFTptr); + ZSTD_initFseState(ref seqState.stateML, ref seqState.DStream, dctx->MLTptr); + assert(dst != null); + nuint seqState_DStream_bitContainer = seqState.DStream.bitContainer; + uint seqState_DStream_bitsConsumed = seqState.DStream.bitsConsumed; + sbyte* seqState_DStream_ptr = seqState.DStream.ptr; + sbyte* seqState_DStream_start = seqState.DStream.start; + sbyte* seqState_DStream_limitPtr = seqState.DStream.limitPtr; + for (; nbSeq != 0; nbSeq--) + { + nuint sequence_litLength; + nuint sequence_matchLength; + nuint sequence_offset; + ZSTD_seqSymbol* llDInfo = seqState.stateLL.table + seqState.stateLL.state; + ZSTD_seqSymbol* mlDInfo = seqState.stateML.table + seqState.stateML.state; + ZSTD_seqSymbol* ofDInfo = seqState.stateOffb.table + seqState.stateOffb.state; + sequence_matchLength = mlDInfo->baseValue; + sequence_litLength = llDInfo->baseValue; + { + uint ofBase = ofDInfo->baseValue; + byte llBits = llDInfo->nbAdditionalBits; + byte mlBits = mlDInfo->nbAdditionalBits; + byte ofBits = ofDInfo->nbAdditionalBits; + byte totalBits = (byte)(llBits + mlBits + ofBits); + ushort llNext = llDInfo->nextState; + ushort mlNext = mlDInfo->nextState; + ushort ofNext = ofDInfo->nextState; + uint llnbBits = llDInfo->nbBits; + uint mlnbBits = mlDInfo->nbBits; + uint ofnbBits = ofDInfo->nbBits; + assert(llBits <= 16); + assert(mlBits <= 16); + assert(ofBits <= 31); + { + nuint offset; + if (ofBits > 1) + { + if (MEM_32bits && isLongOffset != default && ofBits >= 25) + { + /* Always read extra bits, this keeps the logic simple, + * avoids branches, and avoids accidentally reading 0 bits. + */ + const uint extraBits = 30 - 25; + offset = ofBase + (BIT_readBitsFast(seqState_DStream_bitContainer, ref seqState_DStream_bitsConsumed, ofBits - extraBits) << (int)extraBits); + BIT_reloadDStream(ref seqState_DStream_bitContainer, ref seqState_DStream_bitsConsumed, ref seqState_DStream_ptr, seqState_DStream_start, seqState_DStream_limitPtr); + offset += BIT_readBitsFast(seqState_DStream_bitContainer, ref seqState_DStream_bitsConsumed, extraBits); + } + else + { + offset = ofBase + BIT_readBitsFast(seqState_DStream_bitContainer, ref seqState_DStream_bitsConsumed, ofBits); + if (MEM_32bits) + BIT_reloadDStream(ref seqState_DStream_bitContainer, ref seqState_DStream_bitsConsumed, ref seqState_DStream_ptr, seqState_DStream_start, seqState_DStream_limitPtr); + } + + seqState.prevOffset.e2 = seqState.prevOffset.e1; + seqState.prevOffset.e1 = seqState.prevOffset.e0; + seqState.prevOffset.e0 = offset; + } + else + { + uint ll0 = llDInfo->baseValue == 0 ? 1U : 0U; + if (ofBits == 0) + { + offset = System.Runtime.CompilerServices.Unsafe.Add(ref seqState.prevOffset.e0, (int)ll0); + seqState.prevOffset.e1 = System.Runtime.CompilerServices.Unsafe.Add(ref seqState.prevOffset.e0, ll0 == 0 ? 1 : 0); + seqState.prevOffset.e0 = offset; + } + else + { + offset = ofBase + ll0 + BIT_readBitsFast(seqState_DStream_bitContainer, ref seqState_DStream_bitsConsumed, 1); + { + nuint temp = offset == 3 ? seqState.prevOffset.e0 - 1 : System.Runtime.CompilerServices.Unsafe.Add(ref seqState.prevOffset.e0, (int)offset); + temp -= temp == 0 ? 1U : 0U; + if (offset != 1) + seqState.prevOffset.e2 = seqState.prevOffset.e1; + seqState.prevOffset.e1 = seqState.prevOffset.e0; + seqState.prevOffset.e0 = offset = temp; + } + } + } + + sequence_offset = offset; + } + + if (mlBits > 0) + sequence_matchLength += BIT_readBitsFast(seqState_DStream_bitContainer, ref seqState_DStream_bitsConsumed, mlBits); + if (MEM_32bits && mlBits + llBits >= 25 - (30 - 25)) + BIT_reloadDStream(ref seqState_DStream_bitContainer, ref seqState_DStream_bitsConsumed, ref seqState_DStream_ptr, seqState_DStream_start, seqState_DStream_limitPtr); + if (MEM_64bits && totalBits >= 57 - (9 + 9 + 8)) + BIT_reloadDStream(ref seqState_DStream_bitContainer, ref seqState_DStream_bitsConsumed, ref seqState_DStream_ptr, seqState_DStream_start, seqState_DStream_limitPtr); + if (llBits > 0) + sequence_litLength += BIT_readBitsFast(seqState_DStream_bitContainer, ref seqState_DStream_bitsConsumed, llBits); + if (MEM_32bits) + BIT_reloadDStream(ref seqState_DStream_bitContainer, ref seqState_DStream_bitsConsumed, ref seqState_DStream_ptr, seqState_DStream_start, seqState_DStream_limitPtr); + if ((nbSeq == 1 ? 1 : 0) == 0) + { + ZSTD_updateFseStateWithDInfo(ref seqState.stateLL, seqState_DStream_bitContainer, ref seqState_DStream_bitsConsumed, llNext, llnbBits); + ZSTD_updateFseStateWithDInfo(ref seqState.stateML, seqState_DStream_bitContainer, ref seqState_DStream_bitsConsumed, mlNext, mlnbBits); + if (MEM_32bits) + BIT_reloadDStream(ref seqState_DStream_bitContainer, ref seqState_DStream_bitsConsumed, ref seqState_DStream_ptr, seqState_DStream_start, seqState_DStream_limitPtr); + ZSTD_updateFseStateWithDInfo(ref seqState.stateOffb, seqState_DStream_bitContainer, ref seqState_DStream_bitsConsumed, ofNext, ofnbBits); + BIT_reloadDStream(ref seqState_DStream_bitContainer, ref seqState_DStream_bitsConsumed, ref seqState_DStream_ptr, seqState_DStream_start, seqState_DStream_limitPtr); + } + } + + nuint oneSeqSize; + { + byte* oLitEnd = op + sequence_litLength; + oneSeqSize = sequence_litLength + sequence_matchLength; + /* risk : address space overflow (32-bits) */ + byte* oMatchEnd = op + oneSeqSize; + /* risk : address space underflow on oend=NULL */ + byte* oend_w = oend - 32; + byte* iLitEnd = litPtr + sequence_litLength; + byte* match = oLitEnd - sequence_offset; + assert(op != null); + assert(oend_w < oend); + if (iLitEnd > litEnd || oMatchEnd > oend_w || MEM_32bits && (nuint)(oend - op) < oneSeqSize + 32) + { + oneSeqSize = ZSTD_execSequenceEnd(op, oend, new seq_t { litLength = sequence_litLength, matchLength = sequence_matchLength, offset = sequence_offset }, &litPtr, litEnd, prefixStart, vBase, dictEnd); + goto returnOneSeqSize; + } + + assert(op <= oLitEnd); + assert(oLitEnd < oMatchEnd); + assert(oMatchEnd <= oend); + assert(iLitEnd <= litEnd); + assert(oLitEnd <= oend_w); + assert(oMatchEnd <= oend_w); + assert(32 >= 16); + ZSTD_copy16(op, litPtr); + if (sequence_litLength > 16) + { + ZSTD_wildcopy(op + 16, litPtr + 16, (nint)(sequence_litLength - 16), ZSTD_overlap_e.ZSTD_no_overlap); + } + + byte* opInner = oLitEnd; + litPtr = iLitEnd; + if (sequence_offset > (nuint)(oLitEnd - prefixStart)) + { + if (sequence_offset > (nuint)(oLitEnd - vBase)) + { + oneSeqSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + goto returnOneSeqSize; + } + + match = dictEnd + (match - prefixStart); + if (match + sequence_matchLength <= dictEnd) + { + memmove(oLitEnd, match, sequence_matchLength); + goto returnOneSeqSize; + } + + { + nuint length1 = (nuint)(dictEnd - match); + memmove(oLitEnd, match, length1); + opInner = oLitEnd + length1; + sequence_matchLength -= length1; + match = prefixStart; + } + } + + assert(opInner <= oMatchEnd); + assert(oMatchEnd <= oend_w); + assert(match >= prefixStart); + assert(sequence_matchLength >= 1); + if (sequence_offset >= 16) + { + ZSTD_wildcopy(opInner, match, (nint)sequence_matchLength, ZSTD_overlap_e.ZSTD_no_overlap); + goto returnOneSeqSize; + } + + assert(sequence_offset < 16); + ZSTD_overlapCopy8(ref opInner, ref match, sequence_offset); + if (sequence_matchLength > 8) + { + assert(opInner < oMatchEnd); + ZSTD_wildcopy(opInner, match, (nint)sequence_matchLength - 8, ZSTD_overlap_e.ZSTD_overlap_src_before_dst); + } + + returnOneSeqSize: + ; + } + + if (ERR_isError(oneSeqSize)) + return oneSeqSize; + op += oneSeqSize; + } + + assert(nbSeq == 0); + if (BIT_endOfDStream(seqState_DStream_bitsConsumed, seqState_DStream_ptr, seqState_DStream_start) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + { + uint i; + for (i = 0; i < 3; i++) + dctx->entropy.rep[i] = (uint)System.Runtime.CompilerServices.Unsafe.Add(ref seqState.prevOffset.e0, (int)i); + } + } + + { + nuint lastLLSize = (nuint)(litEnd - litPtr); + if (lastLLSize > (nuint)(oend - op)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + if (op != null) + { + memcpy(op, litPtr, (uint)lastLLSize); + op += lastLLSize; + } + } + + return (nuint)(op - ostart); + } + + private static nuint ZSTD_decompressSequences_default(ZSTD_DCtx_s* dctx, void* dst, nuint maxDstSize, void* seqStart, nuint seqSize, int nbSeq, ZSTD_longOffset_e isLongOffset) + { + return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); + } + + private static nuint ZSTD_decompressSequencesSplitLitBuffer_default(ZSTD_DCtx_s* dctx, void* dst, nuint maxDstSize, void* seqStart, nuint seqSize, int nbSeq, ZSTD_longOffset_e isLongOffset) + { + return ZSTD_decompressSequences_bodySplitLitBuffer(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_prefetchMatch(nuint prefetchPos, seq_t sequence, byte* prefixStart, byte* dictEnd) + { + prefetchPos += sequence.litLength; + { + byte* matchBase = sequence.offset > prefetchPos ? dictEnd : prefixStart; + /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted. + * No consequence though : memory address is only used for prefetching, not for dereferencing */ + byte* match = ZSTD_wrappedPtrSub(ZSTD_wrappedPtrAdd(matchBase, (nint)prefetchPos), (nint)sequence.offset); +#if NETCOREAPP3_0_OR_GREATER + if (System.Runtime.Intrinsics.X86.Sse.IsSupported) + { + System.Runtime.Intrinsics.X86.Sse.Prefetch0(match); + System.Runtime.Intrinsics.X86.Sse.Prefetch0(match + 64); + } +#endif + } + + return prefetchPos + sequence.matchLength; + } + + /* This decoding function employs prefetching + * to reduce latency impact of cache misses. + * It's generally employed when block contains a significant portion of long-distance matches + * or when coupled with a "cold" dictionary */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_decompressSequencesLong_body(ZSTD_DCtx_s* dctx, void* dst, nuint maxDstSize, void* seqStart, nuint seqSize, int nbSeq, ZSTD_longOffset_e isLongOffset) + { + byte* ip = (byte*)seqStart; + byte* iend = ip + seqSize; + byte* ostart = (byte*)dst; + byte* oend = dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_in_dst ? dctx->litBuffer : ZSTD_maybeNullPtrAdd(ostart, (nint)maxDstSize); + byte* op = ostart; + byte* litPtr = dctx->litPtr; + byte* litBufferEnd = dctx->litBufferEnd; + byte* prefixStart = (byte*)dctx->prefixStart; + byte* dictStart = (byte*)dctx->virtualStart; + byte* dictEnd = (byte*)dctx->dictEnd; + if (nbSeq != 0) + { + seq_t* sequences = stackalloc seq_t[8]; + int seqAdvance = nbSeq < 8 ? nbSeq : 8; + seqState_t seqState; + int seqNb; + /* track position relative to prefixStart */ + nuint prefetchPos = (nuint)(op - prefixStart); + dctx->fseEntropy = 1; + { + int i; + for (i = 0; i < 3; i++) + (&seqState.prevOffset.e0)[i] = dctx->entropy.rep[i]; + } + + assert(dst != null); + assert(iend >= ip); + if (ERR_isError(BIT_initDStream(&seqState.DStream, ip, (nuint)(iend - ip)))) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); + ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); + ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); + for (seqNb = 0; seqNb < seqAdvance; seqNb++) + { + seq_t sequence = ZSTD_decodeSequence(&seqState, isLongOffset, seqNb == nbSeq - 1 ? 1 : 0); + prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd); + sequences[seqNb] = sequence; + } + + for (; seqNb < nbSeq; seqNb++) + { + seq_t sequence = ZSTD_decodeSequence(&seqState, isLongOffset, seqNb == nbSeq - 1 ? 1 : 0); + if (dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split && litPtr + sequences[seqNb - 8 & 8 - 1].litLength > dctx->litBufferEnd) + { + /* lit buffer is reaching split point, empty out the first buffer and transition to litExtraBuffer */ + nuint leftoverLit = (nuint)(dctx->litBufferEnd - litPtr); + if (leftoverLit != 0) + { + if (leftoverLit > (nuint)(oend - op)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + ZSTD_safecopyDstBeforeSrc(op, litPtr, (nint)leftoverLit); + sequences[seqNb - 8 & 8 - 1].litLength -= leftoverLit; + op += leftoverLit; + } + + litPtr = dctx->litExtraBuffer; + litBufferEnd = dctx->litExtraBuffer + (1 << 16); + dctx->litBufferLocation = ZSTD_litLocation_e.ZSTD_not_in_dst; + { + nuint oneSeqSize = ZSTD_execSequence(op, oend, sequences[seqNb - 8 & 8 - 1], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); + if (ERR_isError(oneSeqSize)) + return oneSeqSize; + prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd); + sequences[seqNb & 8 - 1] = sequence; + op += oneSeqSize; + } + } + else + { + /* lit buffer is either wholly contained in first or second split, or not split at all*/ + nuint oneSeqSize = dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split ? ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequences[seqNb - 8 & 8 - 1].litLength - 32, sequences[seqNb - 8 & 8 - 1], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd) : ZSTD_execSequence(op, oend, sequences[seqNb - 8 & 8 - 1], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); + if (ERR_isError(oneSeqSize)) + return oneSeqSize; + prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd); + sequences[seqNb & 8 - 1] = sequence; + op += oneSeqSize; + } + } + + if (BIT_endOfDStream(&seqState.DStream) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + seqNb -= seqAdvance; + for (; seqNb < nbSeq; seqNb++) + { + seq_t* sequence = &sequences[seqNb & 8 - 1]; + if (dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split && litPtr + sequence->litLength > dctx->litBufferEnd) + { + nuint leftoverLit = (nuint)(dctx->litBufferEnd - litPtr); + if (leftoverLit != 0) + { + if (leftoverLit > (nuint)(oend - op)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + ZSTD_safecopyDstBeforeSrc(op, litPtr, (nint)leftoverLit); + sequence->litLength -= leftoverLit; + op += leftoverLit; + } + + litPtr = dctx->litExtraBuffer; + litBufferEnd = dctx->litExtraBuffer + (1 << 16); + dctx->litBufferLocation = ZSTD_litLocation_e.ZSTD_not_in_dst; + { + nuint oneSeqSize = ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); + if (ERR_isError(oneSeqSize)) + return oneSeqSize; + op += oneSeqSize; + } + } + else + { + nuint oneSeqSize = dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split ? ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequence->litLength - 32, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd) : ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); + if (ERR_isError(oneSeqSize)) + return oneSeqSize; + op += oneSeqSize; + } + } + + { + uint i; + for (i = 0; i < 3; i++) + dctx->entropy.rep[i] = (uint)(&seqState.prevOffset.e0)[i]; + } + } + + if (dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split) + { + nuint lastLLSize = (nuint)(litBufferEnd - litPtr); + if (lastLLSize > (nuint)(oend - op)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + if (op != null) + { + memmove(op, litPtr, lastLLSize); + op += lastLLSize; + } + + litPtr = dctx->litExtraBuffer; + litBufferEnd = dctx->litExtraBuffer + (1 << 16); + } + + { + nuint lastLLSize = (nuint)(litBufferEnd - litPtr); + if (lastLLSize > (nuint)(oend - op)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + if (op != null) + { + memmove(op, litPtr, lastLLSize); + op += lastLLSize; + } + } + + return (nuint)(op - ostart); + } + + private static nuint ZSTD_decompressSequencesLong_default(ZSTD_DCtx_s* dctx, void* dst, nuint maxDstSize, void* seqStart, nuint seqSize, int nbSeq, ZSTD_longOffset_e isLongOffset) + { + return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); + } + + private static nuint ZSTD_decompressSequences(ZSTD_DCtx_s* dctx, void* dst, nuint maxDstSize, void* seqStart, nuint seqSize, int nbSeq, ZSTD_longOffset_e isLongOffset) + { + return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); + } + + private static nuint ZSTD_decompressSequencesSplitLitBuffer(ZSTD_DCtx_s* dctx, void* dst, nuint maxDstSize, void* seqStart, nuint seqSize, int nbSeq, ZSTD_longOffset_e isLongOffset) + { + return ZSTD_decompressSequencesSplitLitBuffer_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); + } + + /* ZSTD_decompressSequencesLong() : + * decompression function triggered when a minimum share of offsets is considered "long", + * aka out of cache. + * note : "long" definition seems overloaded here, sometimes meaning "wider than bitstream register", and sometimes meaning "farther than memory cache distance". + * This function will try to mitigate main memory latency through the use of prefetching */ + private static nuint ZSTD_decompressSequencesLong(ZSTD_DCtx_s* dctx, void* dst, nuint maxDstSize, void* seqStart, nuint seqSize, int nbSeq, ZSTD_longOffset_e isLongOffset) + { + return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); + } + + /** + * @returns The total size of the history referenceable by zstd, including + * both the prefix and the extDict. At @p op any offset larger than this + * is invalid. + */ + private static nuint ZSTD_totalHistorySize(byte* op, byte* virtualStart) + { + return (nuint)(op - virtualStart); + } + + /* ZSTD_getOffsetInfo() : + * condition : offTable must be valid + * @return : "share" of long offsets (arbitrarily defined as > (1<<23)) + * compared to maximum possible of (1< table[u].nbAdditionalBits ? info.maxNbAdditionalBits : table[u].nbAdditionalBits; + if (table[u].nbAdditionalBits > 22) + info.longOffsetShare += 1; + } + + assert(tableLog <= 8); + info.longOffsetShare <<= (int)(8 - tableLog); + } + + return info; + } + + /** + * @returns The maximum offset we can decode in one read of our bitstream, without + * reloading more bits in the middle of the offset bits read. Any offsets larger + * than this must use the long offset decoder. + */ + private static nuint ZSTD_maxShortOffset() + { + if (MEM_64bits) + { + return unchecked((nuint)(-1)); + } + else + { + /* The maximum offBase is (1 << (STREAM_ACCUMULATOR_MIN + 1)) - 1. + * This offBase would require STREAM_ACCUMULATOR_MIN extra bits. + * Then we have to subtract ZSTD_REP_NUM to get the maximum possible offset. + */ + nuint maxOffbase = ((nuint)1 << (int)((uint)(MEM_32bits ? 25 : 57) + 1)) - 1; + nuint maxOffset = maxOffbase - 3; + assert(ZSTD_highbit32((uint)maxOffbase) == (uint)(MEM_32bits ? 25 : 57)); + return maxOffset; + } + } + + /* ZSTD_decompressBlock_internal() : + * decompress block, starting at `src`, + * into destination buffer `dst`. + * @return : decompressed block size, + * or an error code (which can be tested using ZSTD_isError()) + */ + private static nuint ZSTD_decompressBlock_internal(ZSTD_DCtx_s* dctx, void* dst, nuint dstCapacity, void* src, nuint srcSize, streaming_operation streaming) + { + byte* ip = (byte*)src; + if (srcSize > ZSTD_blockSizeMax(dctx)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + { + nuint litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize, dst, dstCapacity, streaming); + if (ERR_isError(litCSize)) + return litCSize; + ip += litCSize; + srcSize -= litCSize; + } + + { + /* Compute the maximum block size, which must also work when !frame and fParams are unset. + * Additionally, take the min with dstCapacity to ensure that the totalHistorySize fits in a size_t. + */ + nuint blockSizeMax = dstCapacity < ZSTD_blockSizeMax(dctx) ? dstCapacity : ZSTD_blockSizeMax(dctx); + nuint totalHistorySize = ZSTD_totalHistorySize(ZSTD_maybeNullPtrAdd((byte*)dst, (nint)blockSizeMax), (byte*)dctx->virtualStart); + /* isLongOffset must be true if there are long offsets. + * Offsets are long if they are larger than ZSTD_maxShortOffset(). + * We don't expect that to be the case in 64-bit mode. + * + * We check here to see if our history is large enough to allow long offsets. + * If it isn't, then we can't possible have (valid) long offsets. If the offset + * is invalid, then it is okay to read it incorrectly. + * + * If isLongOffsets is true, then we will later check our decoding table to see + * if it is even possible to generate long offsets. + */ + ZSTD_longOffset_e isLongOffset = (ZSTD_longOffset_e)(MEM_32bits && totalHistorySize > ZSTD_maxShortOffset() ? 1 : 0); + int usePrefetchDecoder = dctx->ddictIsCold; + int nbSeq; + nuint seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize); + if (ERR_isError(seqHSize)) + return seqHSize; + ip += seqHSize; + srcSize -= seqHSize; + if ((dst == null || dstCapacity == 0) && nbSeq > 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + if (MEM_64bits && sizeof(nuint) == sizeof(void*) && unchecked((nuint)(-1)) - (nuint)dst < 1 << 20) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + if (isLongOffset != default || usePrefetchDecoder == 0 && totalHistorySize > 1U << 24 && nbSeq > 8) + { + ZSTD_OffsetInfo info = ZSTD_getOffsetInfo(dctx->OFTptr, nbSeq); + if (isLongOffset != default && info.maxNbAdditionalBits <= (uint)(MEM_32bits ? 25 : 57)) + { + isLongOffset = ZSTD_longOffset_e.ZSTD_lo_isRegularOffset; + } + + if (usePrefetchDecoder == 0) + { + /* heuristic values, correspond to 2.73% and 7.81% */ + uint minShare = (uint)(MEM_64bits ? 7 : 20); + usePrefetchDecoder = info.longOffsetShare >= minShare ? 1 : 0; + } + } + + dctx->ddictIsCold = 0; + if (usePrefetchDecoder != 0) + { + return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset); + } + + if (dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split) + return ZSTD_decompressSequencesSplitLitBuffer(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset); + else + return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset); + } + } + + /*! ZSTD_checkContinuity() : + * check if next `dst` follows previous position, where decompression ended. + * If yes, do nothing (continue on current segment). + * If not, classify previous segment as "external dictionary", and start a new segment. + * This function cannot fail. */ + private static void ZSTD_checkContinuity(ZSTD_DCtx_s* dctx, void* dst, nuint dstSize) + { + if (dst != dctx->previousDstEnd && dstSize > 0) + { + dctx->dictEnd = dctx->previousDstEnd; + dctx->virtualStart = (sbyte*)dst - ((sbyte*)dctx->previousDstEnd - (sbyte*)dctx->prefixStart); + dctx->prefixStart = dst; + dctx->previousDstEnd = dst; + } + } + + /* Internal definition of ZSTD_decompressBlock() to avoid deprecation warnings. */ + private static nuint ZSTD_decompressBlock_deprecated(ZSTD_DCtx_s* dctx, void* dst, nuint dstCapacity, void* src, nuint srcSize) + { + nuint dSize; + dctx->isFrameDecompression = 0; + ZSTD_checkContinuity(dctx, dst, dstCapacity); + dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, streaming_operation.not_streaming); + { + nuint err_code = dSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + dctx->previousDstEnd = (sbyte*)dst + dSize; + return dSize; + } + + /* NOTE: Must just wrap ZSTD_decompressBlock_deprecated() */ + public static nuint ZSTD_decompressBlock(ZSTD_DCtx_s* dctx, void* dst, nuint dstCapacity, void* src, nuint srcSize) + { + return ZSTD_decompressBlock_deprecated(dctx, dst, dstCapacity, src, srcSize); + } + + private static void ZSTD_initFseState(ref ZSTD_fseState DStatePtr, ref BIT_DStream_t bitD, ZSTD_seqSymbol* dt) + { + void* ptr = dt; + ZSTD_seqSymbol_header* DTableH = (ZSTD_seqSymbol_header*)ptr; + DStatePtr.state = BIT_readBits(bitD.bitContainer, ref bitD.bitsConsumed, DTableH->tableLog); + BIT_reloadDStream(ref bitD.bitContainer, ref bitD.bitsConsumed, ref bitD.ptr, bitD.start, bitD.limitPtr); + DStatePtr.table = dt + 1; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_updateFseStateWithDInfo(ref ZSTD_fseState DStatePtr, nuint bitD_bitContainer, ref uint bitD_bitsConsumed, ushort nextState, uint nbBits) + { + nuint lowBits = BIT_readBits(bitD_bitContainer, ref bitD_bitsConsumed, nbBits); + DStatePtr.state = nextState + lowBits; + } + + /*! ZSTD_overlapCopy8() : + * Copies 8 bytes from ip to op and updates op and ip where ip <= op. + * If the offset is < 8 then the offset is spread to at least 8 bytes. + * + * Precondition: *ip <= *op + * Postcondition: *op - *op >= 8 + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_overlapCopy8(ref byte* op, ref byte* ip, nuint offset) + { + assert(ip <= op); + if (offset < 8) + { + int sub2 = dec64table[offset]; + op[0] = ip[0]; + op[1] = ip[1]; + op[2] = ip[2]; + op[3] = ip[3]; + ip += dec32table[offset]; + ZSTD_copy4(op + 4, ip); + ip -= sub2; + } + else + { + ZSTD_copy8(op, ip); + } + + ip += 8; + op += 8; + assert(op - ip >= 8); + } + } +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompressInternal.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompressInternal.cs new file mode 100644 index 000000000..5b37eaa40 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompressInternal.cs @@ -0,0 +1,205 @@ +using static ZstdSharp.UnsafeHelper; +using System; +using System.Runtime.InteropServices; +using System.Runtime.CompilerServices; + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { +#if NET7_0_OR_GREATER + private static ReadOnlySpan Span_LL_base => new uint[36] + { + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 18, + 20, + 22, + 24, + 28, + 32, + 40, + 48, + 64, + 0x80, + 0x100, + 0x200, + 0x400, + 0x800, + 0x1000, + 0x2000, + 0x4000, + 0x8000, + 0x10000 + }; + private static uint* LL_base => (uint*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_LL_base)); +#else + + private static readonly uint* LL_base = GetArrayPointer(new uint[36] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18, 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000 }); +#endif +#if NET7_0_OR_GREATER + private static ReadOnlySpan Span_OF_base => new uint[32] + { + 0, + 1, + 1, + 5, + 0xD, + 0x1D, + 0x3D, + 0x7D, + 0xFD, + 0x1FD, + 0x3FD, + 0x7FD, + 0xFFD, + 0x1FFD, + 0x3FFD, + 0x7FFD, + 0xFFFD, + 0x1FFFD, + 0x3FFFD, + 0x7FFFD, + 0xFFFFD, + 0x1FFFFD, + 0x3FFFFD, + 0x7FFFFD, + 0xFFFFFD, + 0x1FFFFFD, + 0x3FFFFFD, + 0x7FFFFFD, + 0xFFFFFFD, + 0x1FFFFFFD, + 0x3FFFFFFD, + 0x7FFFFFFD + }; + private static uint* OF_base => (uint*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_OF_base)); +#else + + private static readonly uint* OF_base = GetArrayPointer(new uint[32] { 0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D, 0xFD, 0x1FD, 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD, 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD, 0x1FFFFFFD, 0x3FFFFFFD, 0x7FFFFFFD }); +#endif +#if NET7_0_OR_GREATER + private static ReadOnlySpan Span_OF_bits => new byte[32] + { + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31 + }; + private static byte* OF_bits => (byte*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_OF_bits)); +#else + + private static readonly byte* OF_bits = GetArrayPointer(new byte[32] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31 }); +#endif +#if NET7_0_OR_GREATER + private static ReadOnlySpan Span_ML_base => new uint[53] + { + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 37, + 39, + 41, + 43, + 47, + 51, + 59, + 67, + 83, + 99, + 0x83, + 0x103, + 0x203, + 0x403, + 0x803, + 0x1003, + 0x2003, + 0x4003, + 0x8003, + 0x10003 + }; + private static uint* ML_base => (uint*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_ML_base)); +#else + + private static readonly uint* ML_base = GetArrayPointer(new uint[53] { 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 39, 41, 43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, 0x1003, 0x2003, 0x4003, 0x8003, 0x10003 }); +#endif + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int ZSTD_DCtx_get_bmi2(ZSTD_DCtx_s* dctx) + { + return 0; + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDoubleFast.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDoubleFast.cs new file mode 100644 index 000000000..ec35208f2 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDoubleFast.cs @@ -0,0 +1,873 @@ +using System.Runtime.CompilerServices; +using static ZstdSharp.UnsafeHelper; +using System; +using System.Runtime.InteropServices; + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + private static void ZSTD_fillDoubleHashTableForCDict(ZSTD_MatchState_t* ms, void* end, ZSTD_dictTableLoadMethod_e dtlm) + { + ZSTD_compressionParameters* cParams = &ms->cParams; + uint* hashLarge = ms->hashTable; + uint hBitsL = cParams->hashLog + 8; + uint mls = cParams->minMatch; + uint* hashSmall = ms->chainTable; + uint hBitsS = cParams->chainLog + 8; + byte* @base = ms->window.@base; + byte* ip = @base + ms->nextToUpdate; + byte* iend = (byte*)end - 8; + const uint fastHashFillStep = 3; + for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) + { + uint curr = (uint)(ip - @base); + uint i; + for (i = 0; i < fastHashFillStep; ++i) + { + nuint smHashAndTag = ZSTD_hashPtr(ip + i, hBitsS, mls); + nuint lgHashAndTag = ZSTD_hashPtr(ip + i, hBitsL, 8); + if (i == 0) + { + ZSTD_writeTaggedIndex(hashSmall, smHashAndTag, curr + i); + } + + if (i == 0 || hashLarge[lgHashAndTag >> 8] == 0) + { + ZSTD_writeTaggedIndex(hashLarge, lgHashAndTag, curr + i); + } + + if (dtlm == ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast) + break; + } + } + } + + private static void ZSTD_fillDoubleHashTableForCCtx(ZSTD_MatchState_t* ms, void* end, ZSTD_dictTableLoadMethod_e dtlm) + { + ZSTD_compressionParameters* cParams = &ms->cParams; + uint* hashLarge = ms->hashTable; + uint hBitsL = cParams->hashLog; + uint mls = cParams->minMatch; + uint* hashSmall = ms->chainTable; + uint hBitsS = cParams->chainLog; + byte* @base = ms->window.@base; + byte* ip = @base + ms->nextToUpdate; + byte* iend = (byte*)end - 8; + const uint fastHashFillStep = 3; + for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) + { + uint curr = (uint)(ip - @base); + uint i; + for (i = 0; i < fastHashFillStep; ++i) + { + nuint smHash = ZSTD_hashPtr(ip + i, hBitsS, mls); + nuint lgHash = ZSTD_hashPtr(ip + i, hBitsL, 8); + if (i == 0) + hashSmall[smHash] = curr + i; + if (i == 0 || hashLarge[lgHash] == 0) + hashLarge[lgHash] = curr + i; + if (dtlm == ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast) + break; + } + } + } + + private static void ZSTD_fillDoubleHashTable(ZSTD_MatchState_t* ms, void* end, ZSTD_dictTableLoadMethod_e dtlm, ZSTD_tableFillPurpose_e tfp) + { + if (tfp == ZSTD_tableFillPurpose_e.ZSTD_tfp_forCDict) + { + ZSTD_fillDoubleHashTableForCDict(ms, end, dtlm); + } + else + { + ZSTD_fillDoubleHashTableForCCtx(ms, end, dtlm); + } + } + +#if NET7_0_OR_GREATER + private static ReadOnlySpan Span_dummy => new byte[10] + { + 0x12, + 0x34, + 0x56, + 0x78, + 0x9a, + 0xbc, + 0xde, + 0xf0, + 0xe2, + 0xb4 + }; + private static byte* dummy => (byte*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_dummy)); +#else + + private static readonly byte* dummy = GetArrayPointer(new byte[10] { 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0, 0xe2, 0xb4 }); +#endif + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_compressBlock_doubleFast_noDict_generic(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize, uint mls) + { + ZSTD_compressionParameters* cParams = &ms->cParams; + uint* hashLong = ms->hashTable; + uint hBitsL = cParams->hashLog; + uint* hashSmall = ms->chainTable; + uint hBitsS = cParams->chainLog; + byte* @base = ms->window.@base; + byte* istart = (byte*)src; + byte* anchor = istart; + uint endIndex = (uint)((nuint)(istart - @base) + srcSize); + /* presumes that, if there is a dictionary, it must be using Attach mode */ + uint prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog); + byte* prefixLowest = @base + prefixLowestIndex; + byte* iend = istart + srcSize; + byte* ilimit = iend - 8; + uint offset_1 = rep[0], offset_2 = rep[1]; + uint offsetSaved1 = 0, offsetSaved2 = 0; + nuint mLength; + uint offset; + uint curr; + /* how many positions to search before increasing step size */ + const nuint kStepIncr = 1 << 8; + /* the position at which to increment the step size if no match is found */ + byte* nextStep; + /* the current step size */ + nuint step; + /* the long hash at ip */ + nuint hl0; + /* the long hash at ip1 */ + nuint hl1; + /* the long match index for ip */ + uint idxl0; + /* the long match index for ip1 */ + uint idxl1; + /* the long match for ip */ + byte* matchl0; + /* the short match for ip */ + byte* matchs0; + /* the long match for ip1 */ + byte* matchl1; + /* matchs0 or safe address */ + byte* matchs0_safe; + /* the current position */ + byte* ip = istart; + /* the next position */ + byte* ip1; + ip += ip - prefixLowest == 0 ? 1 : 0; + { + uint current = (uint)(ip - @base); + uint windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog); + uint maxRep = current - windowLow; + if (offset_2 > maxRep) + { + offsetSaved2 = offset_2; + offset_2 = 0; + } + + if (offset_1 > maxRep) + { + offsetSaved1 = offset_1; + offset_1 = 0; + } + } + + while (true) + { + step = 1; + nextStep = ip + kStepIncr; + ip1 = ip + step; + if (ip1 > ilimit) + { + goto _cleanup; + } + + hl0 = ZSTD_hashPtr(ip, hBitsL, 8); + idxl0 = hashLong[hl0]; + matchl0 = @base + idxl0; + do + { + nuint hs0 = ZSTD_hashPtr(ip, hBitsS, mls); + uint idxs0 = hashSmall[hs0]; + curr = (uint)(ip - @base); + matchs0 = @base + idxs0; + hashLong[hl0] = hashSmall[hs0] = curr; + if (offset_1 > 0 && MEM_read32(ip + 1 - offset_1) == MEM_read32(ip + 1)) + { + mLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4; + ip++; + assert(1 >= 1); + assert(1 <= 3); + ZSTD_storeSeq(seqStore, (nuint)(ip - anchor), anchor, iend, 1, mLength); + goto _match_stored; + } + + hl1 = ZSTD_hashPtr(ip1, hBitsL, 8); + { + byte* matchl0_safe = ZSTD_selectAddr(idxl0, prefixLowestIndex, matchl0, &dummy[0]); + if (MEM_read64(matchl0_safe) == MEM_read64(ip) && matchl0_safe == matchl0) + { + mLength = ZSTD_count(ip + 8, matchl0 + 8, iend) + 8; + offset = (uint)(ip - matchl0); + while (ip > anchor && matchl0 > prefixLowest && ip[-1] == matchl0[-1]) + { + ip--; + matchl0--; + mLength++; + } + + goto _match_found; + } + } + + idxl1 = hashLong[hl1]; + matchl1 = @base + idxl1; + matchs0_safe = ZSTD_selectAddr(idxs0, prefixLowestIndex, matchs0, &dummy[0]); + if (MEM_read32(matchs0_safe) == MEM_read32(ip) && matchs0_safe == matchs0) + { + goto _search_next_long; + } + + if (ip1 >= nextStep) + { +#if NETCOREAPP3_0_OR_GREATER + if (System.Runtime.Intrinsics.X86.Sse.IsSupported) + { + System.Runtime.Intrinsics.X86.Sse.Prefetch0(ip1 + 64); + System.Runtime.Intrinsics.X86.Sse.Prefetch0(ip1 + 128); + } +#endif + + step++; + nextStep += kStepIncr; + } + + ip = ip1; + ip1 += step; + hl0 = hl1; + idxl0 = idxl1; + matchl0 = matchl1; + } + while (ip1 <= ilimit); + _cleanup: + offsetSaved2 = offsetSaved1 != 0 && offset_1 != 0 ? offsetSaved1 : offsetSaved2; + rep[0] = offset_1 != 0 ? offset_1 : offsetSaved1; + rep[1] = offset_2 != 0 ? offset_2 : offsetSaved2; + return (nuint)(iend - anchor); + _search_next_long: + mLength = ZSTD_count(ip + 4, matchs0 + 4, iend) + 4; + offset = (uint)(ip - matchs0); + if (idxl1 > prefixLowestIndex && MEM_read64(matchl1) == MEM_read64(ip1)) + { + nuint l1len = ZSTD_count(ip1 + 8, matchl1 + 8, iend) + 8; + if (l1len > mLength) + { + ip = ip1; + mLength = l1len; + offset = (uint)(ip - matchl1); + matchs0 = matchl1; + } + } + + while (ip > anchor && matchs0 > prefixLowest && ip[-1] == matchs0[-1]) + { + ip--; + matchs0--; + mLength++; + } + + _match_found: + offset_2 = offset_1; + offset_1 = offset; + if (step < 4) + { + hashLong[hl1] = (uint)(ip1 - @base); + } + + assert(offset > 0); + ZSTD_storeSeq(seqStore, (nuint)(ip - anchor), anchor, iend, offset + 3, mLength); + _match_stored: + ip += mLength; + anchor = ip; + if (ip <= ilimit) + { + { + uint indexToInsert = curr + 2; + hashLong[ZSTD_hashPtr(@base + indexToInsert, hBitsL, 8)] = indexToInsert; + hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = (uint)(ip - 2 - @base); + hashSmall[ZSTD_hashPtr(@base + indexToInsert, hBitsS, mls)] = indexToInsert; + hashSmall[ZSTD_hashPtr(ip - 1, hBitsS, mls)] = (uint)(ip - 1 - @base); + } + + while (ip <= ilimit && offset_2 > 0 && MEM_read32(ip) == MEM_read32(ip - offset_2)) + { + /* store sequence */ + nuint rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4; + /* swap offset_2 <=> offset_1 */ + uint tmpOff = offset_2; + offset_2 = offset_1; + offset_1 = tmpOff; + hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (uint)(ip - @base); + hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (uint)(ip - @base); + assert(1 >= 1); + assert(1 <= 3); + ZSTD_storeSeq(seqStore, 0, anchor, iend, 1, rLength); + ip += rLength; + anchor = ip; + continue; + } + } + } + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_compressBlock_doubleFast_dictMatchState_generic(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize, uint mls) + { + ZSTD_compressionParameters* cParams = &ms->cParams; + uint* hashLong = ms->hashTable; + uint hBitsL = cParams->hashLog; + uint* hashSmall = ms->chainTable; + uint hBitsS = cParams->chainLog; + byte* @base = ms->window.@base; + byte* istart = (byte*)src; + byte* ip = istart; + byte* anchor = istart; + uint endIndex = (uint)((nuint)(istart - @base) + srcSize); + /* presumes that, if there is a dictionary, it must be using Attach mode */ + uint prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog); + byte* prefixLowest = @base + prefixLowestIndex; + byte* iend = istart + srcSize; + byte* ilimit = iend - 8; + uint offset_1 = rep[0], offset_2 = rep[1]; + ZSTD_MatchState_t* dms = ms->dictMatchState; + ZSTD_compressionParameters* dictCParams = &dms->cParams; + uint* dictHashLong = dms->hashTable; + uint* dictHashSmall = dms->chainTable; + uint dictStartIndex = dms->window.dictLimit; + byte* dictBase = dms->window.@base; + byte* dictStart = dictBase + dictStartIndex; + byte* dictEnd = dms->window.nextSrc; + uint dictIndexDelta = prefixLowestIndex - (uint)(dictEnd - dictBase); + uint dictHBitsL = dictCParams->hashLog + 8; + uint dictHBitsS = dictCParams->chainLog + 8; + uint dictAndPrefixLength = (uint)(ip - prefixLowest + (dictEnd - dictStart)); + assert(ms->window.dictLimit + (1U << (int)cParams->windowLog) >= endIndex); + if (ms->prefetchCDictTables != 0) + { + nuint hashTableBytes = ((nuint)1 << (int)dictCParams->hashLog) * sizeof(uint); + nuint chainTableBytes = ((nuint)1 << (int)dictCParams->chainLog) * sizeof(uint); + { + sbyte* _ptr = (sbyte*)dictHashLong; + nuint _size = hashTableBytes; + nuint _pos; + for (_pos = 0; _pos < _size; _pos += 64) + { +#if NETCOREAPP3_0_OR_GREATER + if (System.Runtime.Intrinsics.X86.Sse.IsSupported) + { + System.Runtime.Intrinsics.X86.Sse.Prefetch1(_ptr + _pos); + } +#endif + } + } + + { + sbyte* _ptr = (sbyte*)dictHashSmall; + nuint _size = chainTableBytes; + nuint _pos; + for (_pos = 0; _pos < _size; _pos += 64) + { +#if NETCOREAPP3_0_OR_GREATER + if (System.Runtime.Intrinsics.X86.Sse.IsSupported) + { + System.Runtime.Intrinsics.X86.Sse.Prefetch1(_ptr + _pos); + } +#endif + } + } + } + + ip += dictAndPrefixLength == 0 ? 1 : 0; + assert(offset_1 <= dictAndPrefixLength); + assert(offset_2 <= dictAndPrefixLength); + while (ip < ilimit) + { + nuint mLength; + uint offset; + nuint h2 = ZSTD_hashPtr(ip, hBitsL, 8); + nuint h = ZSTD_hashPtr(ip, hBitsS, mls); + nuint dictHashAndTagL = ZSTD_hashPtr(ip, dictHBitsL, 8); + nuint dictHashAndTagS = ZSTD_hashPtr(ip, dictHBitsS, mls); + uint dictMatchIndexAndTagL = dictHashLong[dictHashAndTagL >> 8]; + uint dictMatchIndexAndTagS = dictHashSmall[dictHashAndTagS >> 8]; + int dictTagsMatchL = ZSTD_comparePackedTags(dictMatchIndexAndTagL, dictHashAndTagL); + int dictTagsMatchS = ZSTD_comparePackedTags(dictMatchIndexAndTagS, dictHashAndTagS); + uint curr = (uint)(ip - @base); + uint matchIndexL = hashLong[h2]; + uint matchIndexS = hashSmall[h]; + byte* matchLong = @base + matchIndexL; + byte* match = @base + matchIndexS; + uint repIndex = curr + 1 - offset_1; + byte* repMatch = repIndex < prefixLowestIndex ? dictBase + (repIndex - dictIndexDelta) : @base + repIndex; + hashLong[h2] = hashSmall[h] = curr; + if (ZSTD_index_overlap_check(prefixLowestIndex, repIndex) != 0 && MEM_read32(repMatch) == MEM_read32(ip + 1)) + { + byte* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; + mLength = ZSTD_count_2segments(ip + 1 + 4, repMatch + 4, iend, repMatchEnd, prefixLowest) + 4; + ip++; + assert(1 >= 1); + assert(1 <= 3); + ZSTD_storeSeq(seqStore, (nuint)(ip - anchor), anchor, iend, 1, mLength); + goto _match_stored; + } + + if (matchIndexL >= prefixLowestIndex && MEM_read64(matchLong) == MEM_read64(ip)) + { + mLength = ZSTD_count(ip + 8, matchLong + 8, iend) + 8; + offset = (uint)(ip - matchLong); + while (ip > anchor && matchLong > prefixLowest && ip[-1] == matchLong[-1]) + { + ip--; + matchLong--; + mLength++; + } + + goto _match_found; + } + else if (dictTagsMatchL != 0) + { + /* check dictMatchState long match */ + uint dictMatchIndexL = dictMatchIndexAndTagL >> 8; + byte* dictMatchL = dictBase + dictMatchIndexL; + assert(dictMatchL < dictEnd); + if (dictMatchL > dictStart && MEM_read64(dictMatchL) == MEM_read64(ip)) + { + mLength = ZSTD_count_2segments(ip + 8, dictMatchL + 8, iend, dictEnd, prefixLowest) + 8; + offset = curr - dictMatchIndexL - dictIndexDelta; + while (ip > anchor && dictMatchL > dictStart && ip[-1] == dictMatchL[-1]) + { + ip--; + dictMatchL--; + mLength++; + } + + goto _match_found; + } + } + + if (matchIndexS > prefixLowestIndex) + { + if (MEM_read32(match) == MEM_read32(ip)) + { + goto _search_next_long; + } + } + else if (dictTagsMatchS != 0) + { + /* check dictMatchState short match */ + uint dictMatchIndexS = dictMatchIndexAndTagS >> 8; + match = dictBase + dictMatchIndexS; + matchIndexS = dictMatchIndexS + dictIndexDelta; + if (match > dictStart && MEM_read32(match) == MEM_read32(ip)) + { + goto _search_next_long; + } + } + + ip += (ip - anchor >> 8) + 1; + continue; + _search_next_long: + { + nuint hl3 = ZSTD_hashPtr(ip + 1, hBitsL, 8); + nuint dictHashAndTagL3 = ZSTD_hashPtr(ip + 1, dictHBitsL, 8); + uint matchIndexL3 = hashLong[hl3]; + uint dictMatchIndexAndTagL3 = dictHashLong[dictHashAndTagL3 >> 8]; + int dictTagsMatchL3 = ZSTD_comparePackedTags(dictMatchIndexAndTagL3, dictHashAndTagL3); + byte* matchL3 = @base + matchIndexL3; + hashLong[hl3] = curr + 1; + if (matchIndexL3 >= prefixLowestIndex && MEM_read64(matchL3) == MEM_read64(ip + 1)) + { + mLength = ZSTD_count(ip + 9, matchL3 + 8, iend) + 8; + ip++; + offset = (uint)(ip - matchL3); + while (ip > anchor && matchL3 > prefixLowest && ip[-1] == matchL3[-1]) + { + ip--; + matchL3--; + mLength++; + } + + goto _match_found; + } + else if (dictTagsMatchL3 != 0) + { + /* check dict long +1 match */ + uint dictMatchIndexL3 = dictMatchIndexAndTagL3 >> 8; + byte* dictMatchL3 = dictBase + dictMatchIndexL3; + assert(dictMatchL3 < dictEnd); + if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip + 1)) + { + mLength = ZSTD_count_2segments(ip + 1 + 8, dictMatchL3 + 8, iend, dictEnd, prefixLowest) + 8; + ip++; + offset = curr + 1 - dictMatchIndexL3 - dictIndexDelta; + while (ip > anchor && dictMatchL3 > dictStart && ip[-1] == dictMatchL3[-1]) + { + ip--; + dictMatchL3--; + mLength++; + } + + goto _match_found; + } + } + } + + if (matchIndexS < prefixLowestIndex) + { + mLength = ZSTD_count_2segments(ip + 4, match + 4, iend, dictEnd, prefixLowest) + 4; + offset = curr - matchIndexS; + while (ip > anchor && match > dictStart && ip[-1] == match[-1]) + { + ip--; + match--; + mLength++; + } + } + else + { + mLength = ZSTD_count(ip + 4, match + 4, iend) + 4; + offset = (uint)(ip - match); + while (ip > anchor && match > prefixLowest && ip[-1] == match[-1]) + { + ip--; + match--; + mLength++; + } + } + + _match_found: + offset_2 = offset_1; + offset_1 = offset; + assert(offset > 0); + ZSTD_storeSeq(seqStore, (nuint)(ip - anchor), anchor, iend, offset + 3, mLength); + _match_stored: + ip += mLength; + anchor = ip; + if (ip <= ilimit) + { + { + uint indexToInsert = curr + 2; + hashLong[ZSTD_hashPtr(@base + indexToInsert, hBitsL, 8)] = indexToInsert; + hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = (uint)(ip - 2 - @base); + hashSmall[ZSTD_hashPtr(@base + indexToInsert, hBitsS, mls)] = indexToInsert; + hashSmall[ZSTD_hashPtr(ip - 1, hBitsS, mls)] = (uint)(ip - 1 - @base); + } + + while (ip <= ilimit) + { + uint current2 = (uint)(ip - @base); + uint repIndex2 = current2 - offset_2; + byte* repMatch2 = repIndex2 < prefixLowestIndex ? dictBase + repIndex2 - dictIndexDelta : @base + repIndex2; + if (ZSTD_index_overlap_check(prefixLowestIndex, repIndex2) != 0 && MEM_read32(repMatch2) == MEM_read32(ip)) + { + byte* repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend; + nuint repLength2 = ZSTD_count_2segments(ip + 4, repMatch2 + 4, iend, repEnd2, prefixLowest) + 4; + /* swap offset_2 <=> offset_1 */ + uint tmpOffset = offset_2; + offset_2 = offset_1; + offset_1 = tmpOffset; + assert(1 >= 1); + assert(1 <= 3); + ZSTD_storeSeq(seqStore, 0, anchor, iend, 1, repLength2); + hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; + hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; + ip += repLength2; + anchor = ip; + continue; + } + + break; + } + } + } + + rep[0] = offset_1; + rep[1] = offset_2; + return (nuint)(iend - anchor); + } + + private static nuint ZSTD_compressBlock_doubleFast_noDict_4(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_doubleFast_noDict_generic(ms, seqStore, rep, src, srcSize, 4); + } + + private static nuint ZSTD_compressBlock_doubleFast_noDict_5(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_doubleFast_noDict_generic(ms, seqStore, rep, src, srcSize, 5); + } + + private static nuint ZSTD_compressBlock_doubleFast_noDict_6(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_doubleFast_noDict_generic(ms, seqStore, rep, src, srcSize, 6); + } + + private static nuint ZSTD_compressBlock_doubleFast_noDict_7(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_doubleFast_noDict_generic(ms, seqStore, rep, src, srcSize, 7); + } + + private static nuint ZSTD_compressBlock_doubleFast_dictMatchState_4(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_doubleFast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 4); + } + + private static nuint ZSTD_compressBlock_doubleFast_dictMatchState_5(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_doubleFast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 5); + } + + private static nuint ZSTD_compressBlock_doubleFast_dictMatchState_6(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_doubleFast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 6); + } + + private static nuint ZSTD_compressBlock_doubleFast_dictMatchState_7(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_doubleFast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 7); + } + + private static nuint ZSTD_compressBlock_doubleFast(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + uint mls = ms->cParams.minMatch; + switch (mls) + { + default: + case 4: + return ZSTD_compressBlock_doubleFast_noDict_4(ms, seqStore, rep, src, srcSize); + case 5: + return ZSTD_compressBlock_doubleFast_noDict_5(ms, seqStore, rep, src, srcSize); + case 6: + return ZSTD_compressBlock_doubleFast_noDict_6(ms, seqStore, rep, src, srcSize); + case 7: + return ZSTD_compressBlock_doubleFast_noDict_7(ms, seqStore, rep, src, srcSize); + } + } + + private static nuint ZSTD_compressBlock_doubleFast_dictMatchState(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + uint mls = ms->cParams.minMatch; + switch (mls) + { + default: + case 4: + return ZSTD_compressBlock_doubleFast_dictMatchState_4(ms, seqStore, rep, src, srcSize); + case 5: + return ZSTD_compressBlock_doubleFast_dictMatchState_5(ms, seqStore, rep, src, srcSize); + case 6: + return ZSTD_compressBlock_doubleFast_dictMatchState_6(ms, seqStore, rep, src, srcSize); + case 7: + return ZSTD_compressBlock_doubleFast_dictMatchState_7(ms, seqStore, rep, src, srcSize); + } + } + + private static nuint ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize, uint mls) + { + ZSTD_compressionParameters* cParams = &ms->cParams; + uint* hashLong = ms->hashTable; + uint hBitsL = cParams->hashLog; + uint* hashSmall = ms->chainTable; + uint hBitsS = cParams->chainLog; + byte* istart = (byte*)src; + byte* ip = istart; + byte* anchor = istart; + byte* iend = istart + srcSize; + byte* ilimit = iend - 8; + byte* @base = ms->window.@base; + uint endIndex = (uint)((nuint)(istart - @base) + srcSize); + uint lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog); + uint dictStartIndex = lowLimit; + uint dictLimit = ms->window.dictLimit; + uint prefixStartIndex = dictLimit > lowLimit ? dictLimit : lowLimit; + byte* prefixStart = @base + prefixStartIndex; + byte* dictBase = ms->window.dictBase; + byte* dictStart = dictBase + dictStartIndex; + byte* dictEnd = dictBase + prefixStartIndex; + uint offset_1 = rep[0], offset_2 = rep[1]; + if (prefixStartIndex == dictStartIndex) + return ZSTD_compressBlock_doubleFast(ms, seqStore, rep, src, srcSize); + while (ip < ilimit) + { + nuint hSmall = ZSTD_hashPtr(ip, hBitsS, mls); + uint matchIndex = hashSmall[hSmall]; + byte* matchBase = matchIndex < prefixStartIndex ? dictBase : @base; + byte* match = matchBase + matchIndex; + nuint hLong = ZSTD_hashPtr(ip, hBitsL, 8); + uint matchLongIndex = hashLong[hLong]; + byte* matchLongBase = matchLongIndex < prefixStartIndex ? dictBase : @base; + byte* matchLong = matchLongBase + matchLongIndex; + uint curr = (uint)(ip - @base); + /* offset_1 expected <= curr +1 */ + uint repIndex = curr + 1 - offset_1; + byte* repBase = repIndex < prefixStartIndex ? dictBase : @base; + byte* repMatch = repBase + repIndex; + nuint mLength; + hashSmall[hSmall] = hashLong[hLong] = curr; + if ((ZSTD_index_overlap_check(prefixStartIndex, repIndex) & (offset_1 <= curr + 1 - dictStartIndex ? 1 : 0)) != 0 && MEM_read32(repMatch) == MEM_read32(ip + 1)) + { + byte* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; + mLength = ZSTD_count_2segments(ip + 1 + 4, repMatch + 4, iend, repMatchEnd, prefixStart) + 4; + ip++; + assert(1 >= 1); + assert(1 <= 3); + ZSTD_storeSeq(seqStore, (nuint)(ip - anchor), anchor, iend, 1, mLength); + } + else + { + if (matchLongIndex > dictStartIndex && MEM_read64(matchLong) == MEM_read64(ip)) + { + byte* matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend; + byte* lowMatchPtr = matchLongIndex < prefixStartIndex ? dictStart : prefixStart; + uint offset; + mLength = ZSTD_count_2segments(ip + 8, matchLong + 8, iend, matchEnd, prefixStart) + 8; + offset = curr - matchLongIndex; + while (ip > anchor && matchLong > lowMatchPtr && ip[-1] == matchLong[-1]) + { + ip--; + matchLong--; + mLength++; + } + + offset_2 = offset_1; + offset_1 = offset; + assert(offset > 0); + ZSTD_storeSeq(seqStore, (nuint)(ip - anchor), anchor, iend, offset + 3, mLength); + } + else if (matchIndex > dictStartIndex && MEM_read32(match) == MEM_read32(ip)) + { + nuint h3 = ZSTD_hashPtr(ip + 1, hBitsL, 8); + uint matchIndex3 = hashLong[h3]; + byte* match3Base = matchIndex3 < prefixStartIndex ? dictBase : @base; + byte* match3 = match3Base + matchIndex3; + uint offset; + hashLong[h3] = curr + 1; + if (matchIndex3 > dictStartIndex && MEM_read64(match3) == MEM_read64(ip + 1)) + { + byte* matchEnd = matchIndex3 < prefixStartIndex ? dictEnd : iend; + byte* lowMatchPtr = matchIndex3 < prefixStartIndex ? dictStart : prefixStart; + mLength = ZSTD_count_2segments(ip + 9, match3 + 8, iend, matchEnd, prefixStart) + 8; + ip++; + offset = curr + 1 - matchIndex3; + while (ip > anchor && match3 > lowMatchPtr && ip[-1] == match3[-1]) + { + ip--; + match3--; + mLength++; + } + } + else + { + byte* matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend; + byte* lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart; + mLength = ZSTD_count_2segments(ip + 4, match + 4, iend, matchEnd, prefixStart) + 4; + offset = curr - matchIndex; + while (ip > anchor && match > lowMatchPtr && ip[-1] == match[-1]) + { + ip--; + match--; + mLength++; + } + } + + offset_2 = offset_1; + offset_1 = offset; + assert(offset > 0); + ZSTD_storeSeq(seqStore, (nuint)(ip - anchor), anchor, iend, offset + 3, mLength); + } + else + { + ip += (ip - anchor >> 8) + 1; + continue; + } + } + + ip += mLength; + anchor = ip; + if (ip <= ilimit) + { + { + uint indexToInsert = curr + 2; + hashLong[ZSTD_hashPtr(@base + indexToInsert, hBitsL, 8)] = indexToInsert; + hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = (uint)(ip - 2 - @base); + hashSmall[ZSTD_hashPtr(@base + indexToInsert, hBitsS, mls)] = indexToInsert; + hashSmall[ZSTD_hashPtr(ip - 1, hBitsS, mls)] = (uint)(ip - 1 - @base); + } + + while (ip <= ilimit) + { + uint current2 = (uint)(ip - @base); + uint repIndex2 = current2 - offset_2; + byte* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : @base + repIndex2; + if ((ZSTD_index_overlap_check(prefixStartIndex, repIndex2) & (offset_2 <= current2 - dictStartIndex ? 1 : 0)) != 0 && MEM_read32(repMatch2) == MEM_read32(ip)) + { + byte* repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; + nuint repLength2 = ZSTD_count_2segments(ip + 4, repMatch2 + 4, iend, repEnd2, prefixStart) + 4; + /* swap offset_2 <=> offset_1 */ + uint tmpOffset = offset_2; + offset_2 = offset_1; + offset_1 = tmpOffset; + assert(1 >= 1); + assert(1 <= 3); + ZSTD_storeSeq(seqStore, 0, anchor, iend, 1, repLength2); + hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; + hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; + ip += repLength2; + anchor = ip; + continue; + } + + break; + } + } + } + + rep[0] = offset_1; + rep[1] = offset_2; + return (nuint)(iend - anchor); + } + + private static nuint ZSTD_compressBlock_doubleFast_extDict_4(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 4); + } + + private static nuint ZSTD_compressBlock_doubleFast_extDict_5(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 5); + } + + private static nuint ZSTD_compressBlock_doubleFast_extDict_6(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 6); + } + + private static nuint ZSTD_compressBlock_doubleFast_extDict_7(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 7); + } + + private static nuint ZSTD_compressBlock_doubleFast_extDict(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + uint mls = ms->cParams.minMatch; + switch (mls) + { + default: + case 4: + return ZSTD_compressBlock_doubleFast_extDict_4(ms, seqStore, rep, src, srcSize); + case 5: + return ZSTD_compressBlock_doubleFast_extDict_5(ms, seqStore, rep, src, srcSize); + case 6: + return ZSTD_compressBlock_doubleFast_extDict_6(ms, seqStore, rep, src, srcSize); + case 7: + return ZSTD_compressBlock_doubleFast_extDict_7(ms, seqStore, rep, src, srcSize); + } + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdFast.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdFast.cs new file mode 100644 index 000000000..e4a03f891 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdFast.cs @@ -0,0 +1,941 @@ +using static ZstdSharp.UnsafeHelper; +using System.Runtime.CompilerServices; + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + private static void ZSTD_fillHashTableForCDict(ZSTD_MatchState_t* ms, void* end, ZSTD_dictTableLoadMethod_e dtlm) + { + ZSTD_compressionParameters* cParams = &ms->cParams; + uint* hashTable = ms->hashTable; + uint hBits = cParams->hashLog + 8; + uint mls = cParams->minMatch; + byte* @base = ms->window.@base; + byte* ip = @base + ms->nextToUpdate; + byte* iend = (byte*)end - 8; + const uint fastHashFillStep = 3; + assert(dtlm == ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_full); + for (; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) + { + uint curr = (uint)(ip - @base); + { + nuint hashAndTag = ZSTD_hashPtr(ip, hBits, mls); + ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr); + } + + if (dtlm == ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast) + continue; + { + uint p; + for (p = 1; p < fastHashFillStep; ++p) + { + nuint hashAndTag = ZSTD_hashPtr(ip + p, hBits, mls); + if (hashTable[hashAndTag >> 8] == 0) + { + ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr + p); + } + } + } + } + } + + private static void ZSTD_fillHashTableForCCtx(ZSTD_MatchState_t* ms, void* end, ZSTD_dictTableLoadMethod_e dtlm) + { + ZSTD_compressionParameters* cParams = &ms->cParams; + uint* hashTable = ms->hashTable; + uint hBits = cParams->hashLog; + uint mls = cParams->minMatch; + byte* @base = ms->window.@base; + byte* ip = @base + ms->nextToUpdate; + byte* iend = (byte*)end - 8; + const uint fastHashFillStep = 3; + assert(dtlm == ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast); + for (; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) + { + uint curr = (uint)(ip - @base); + nuint hash0 = ZSTD_hashPtr(ip, hBits, mls); + hashTable[hash0] = curr; + if (dtlm == ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast) + continue; + { + uint p; + for (p = 1; p < fastHashFillStep; ++p) + { + nuint hash = ZSTD_hashPtr(ip + p, hBits, mls); + if (hashTable[hash] == 0) + { + hashTable[hash] = curr + p; + } + } + } + } + } + + private static void ZSTD_fillHashTable(ZSTD_MatchState_t* ms, void* end, ZSTD_dictTableLoadMethod_e dtlm, ZSTD_tableFillPurpose_e tfp) + { + if (tfp == ZSTD_tableFillPurpose_e.ZSTD_tfp_forCDict) + { + ZSTD_fillHashTableForCDict(ms, end, dtlm); + } + else + { + ZSTD_fillHashTableForCCtx(ms, end, dtlm); + } + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int ZSTD_match4Found_cmov(byte* currentPtr, byte* matchAddress, uint matchIdx, uint idxLowLimit) + { + /* currentIdx >= lowLimit is a (somewhat) unpredictable branch. + * However expression below compiles into conditional move. + */ + byte* mvalAddr = ZSTD_selectAddr(matchIdx, idxLowLimit, matchAddress, dummy); + if (MEM_read32(currentPtr) != MEM_read32(mvalAddr)) + return 0; + return matchIdx >= idxLowLimit ? 1 : 0; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int ZSTD_match4Found_branch(byte* currentPtr, byte* matchAddress, uint matchIdx, uint idxLowLimit) + { + /* using a branch instead of a cmov, + * because it's faster in scenarios where matchIdx >= idxLowLimit is generally true, + * aka almost all candidates are within range */ + uint mval; + if (matchIdx >= idxLowLimit) + { + mval = MEM_read32(matchAddress); + } + else + { + mval = MEM_read32(currentPtr) ^ 1; + } + + return MEM_read32(currentPtr) == mval ? 1 : 0; + } + + /** + * If you squint hard enough (and ignore repcodes), the search operation at any + * given position is broken into 4 stages: + * + * 1. Hash (map position to hash value via input read) + * 2. Lookup (map hash val to index via hashtable read) + * 3. Load (map index to value at that position via input read) + * 4. Compare + * + * Each of these steps involves a memory read at an address which is computed + * from the previous step. This means these steps must be sequenced and their + * latencies are cumulative. + * + * Rather than do 1->2->3->4 sequentially for a single position before moving + * onto the next, this implementation interleaves these operations across the + * next few positions: + * + * R = Repcode Read & Compare + * H = Hash + * T = Table Lookup + * M = Match Read & Compare + * + * Pos | Time --> + * ----+------------------- + * N | ... M + * N+1 | ... TM + * N+2 | R H T M + * N+3 | H TM + * N+4 | R H T M + * N+5 | H ... + * N+6 | R ... + * + * This is very much analogous to the pipelining of execution in a CPU. And just + * like a CPU, we have to dump the pipeline when we find a match (i.e., take a + * branch). + * + * When this happens, we throw away our current state, and do the following prep + * to re-enter the loop: + * + * Pos | Time --> + * ----+------------------- + * N | H T + * N+1 | H + * + * This is also the work we do at the beginning to enter the loop initially. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_compressBlock_fast_noDict_generic(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize, uint mls, int useCmov) + { + ZSTD_compressionParameters* cParams = &ms->cParams; + uint* hashTable = ms->hashTable; + uint hlog = cParams->hashLog; + /* min 2 */ + nuint stepSize = cParams->targetLength + (uint)(cParams->targetLength == 0 ? 1 : 0) + 1; + byte* @base = ms->window.@base; + byte* istart = (byte*)src; + uint endIndex = (uint)((nuint)(istart - @base) + srcSize); + uint prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog); + byte* prefixStart = @base + prefixStartIndex; + byte* iend = istart + srcSize; + byte* ilimit = iend - 8; + byte* anchor = istart; + byte* ip0 = istart; + byte* ip1; + byte* ip2; + byte* ip3; + uint current0; + uint rep_offset1 = rep[0]; + uint rep_offset2 = rep[1]; + uint offsetSaved1 = 0, offsetSaved2 = 0; + /* hash for ip0 */ + nuint hash0; + /* hash for ip1 */ + nuint hash1; + /* match idx for ip0 */ + uint matchIdx; + uint offcode; + byte* match0; + nuint mLength; + /* ip0 and ip1 are always adjacent. The targetLength skipping and + * uncompressibility acceleration is applied to every other position, + * matching the behavior of #1562. step therefore represents the gap + * between pairs of positions, from ip0 to ip2 or ip1 to ip3. */ + nuint step; + byte* nextStep; + const nuint kStepIncr = 1 << 8 - 1; + void* matchFound = useCmov != 0 ? (delegate* managed)(&ZSTD_match4Found_cmov) : (delegate* managed)(&ZSTD_match4Found_branch); + ip0 += ip0 == prefixStart ? 1 : 0; + { + uint curr = (uint)(ip0 - @base); + uint windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog); + uint maxRep = curr - windowLow; + if (rep_offset2 > maxRep) + { + offsetSaved2 = rep_offset2; + rep_offset2 = 0; + } + + if (rep_offset1 > maxRep) + { + offsetSaved1 = rep_offset1; + rep_offset1 = 0; + } + } + + _start: + step = stepSize; + nextStep = ip0 + kStepIncr; + ip1 = ip0 + 1; + ip2 = ip0 + step; + ip3 = ip2 + 1; + if (ip3 >= ilimit) + { + goto _cleanup; + } + + hash0 = ZSTD_hashPtr(ip0, hlog, mls); + hash1 = ZSTD_hashPtr(ip1, hlog, mls); + matchIdx = hashTable[hash0]; + do + { + /* load repcode match for ip[2]*/ + uint rval = MEM_read32(ip2 - rep_offset1); + current0 = (uint)(ip0 - @base); + hashTable[hash0] = current0; + if (MEM_read32(ip2) == rval && rep_offset1 > 0) + { + ip0 = ip2; + match0 = ip0 - rep_offset1; + mLength = ip0[-1] == match0[-1] ? 1U : 0U; + ip0 -= mLength; + match0 -= mLength; + assert(1 >= 1); + assert(1 <= 3); + offcode = 1; + mLength += 4; + hashTable[hash1] = (uint)(ip1 - @base); + goto _match; + } + + if (((delegate* managed)matchFound)(ip0, @base + matchIdx, matchIdx, prefixStartIndex) != 0) + { + hashTable[hash1] = (uint)(ip1 - @base); + goto _offset; + } + + matchIdx = hashTable[hash1]; + hash0 = hash1; + hash1 = ZSTD_hashPtr(ip2, hlog, mls); + ip0 = ip1; + ip1 = ip2; + ip2 = ip3; + current0 = (uint)(ip0 - @base); + hashTable[hash0] = current0; + if (((delegate* managed)matchFound)(ip0, @base + matchIdx, matchIdx, prefixStartIndex) != 0) + { + if (step <= 4) + { + hashTable[hash1] = (uint)(ip1 - @base); + } + + goto _offset; + } + + matchIdx = hashTable[hash1]; + hash0 = hash1; + hash1 = ZSTD_hashPtr(ip2, hlog, mls); + ip0 = ip1; + ip1 = ip2; + ip2 = ip0 + step; + ip3 = ip1 + step; + if (ip2 >= nextStep) + { + step++; +#if NETCOREAPP3_0_OR_GREATER + if (System.Runtime.Intrinsics.X86.Sse.IsSupported) + { + System.Runtime.Intrinsics.X86.Sse.Prefetch0(ip1 + 64); + System.Runtime.Intrinsics.X86.Sse.Prefetch0(ip1 + 128); + } +#endif + + nextStep += kStepIncr; + } + } + while (ip3 < ilimit); + _cleanup: + offsetSaved2 = offsetSaved1 != 0 && rep_offset1 != 0 ? offsetSaved1 : offsetSaved2; + rep[0] = rep_offset1 != 0 ? rep_offset1 : offsetSaved1; + rep[1] = rep_offset2 != 0 ? rep_offset2 : offsetSaved2; + return (nuint)(iend - anchor); + _offset: + match0 = @base + matchIdx; + rep_offset2 = rep_offset1; + rep_offset1 = (uint)(ip0 - match0); + assert(rep_offset1 > 0); + offcode = rep_offset1 + 3; + mLength = 4; + while (ip0 > anchor && match0 > prefixStart && ip0[-1] == match0[-1]) + { + ip0--; + match0--; + mLength++; + } + + _match: + mLength += ZSTD_count(ip0 + mLength, match0 + mLength, iend); + ZSTD_storeSeq(seqStore, (nuint)(ip0 - anchor), anchor, iend, offcode, mLength); + ip0 += mLength; + anchor = ip0; + if (ip0 <= ilimit) + { + assert(@base + current0 + 2 > istart); + hashTable[ZSTD_hashPtr(@base + current0 + 2, hlog, mls)] = current0 + 2; + hashTable[ZSTD_hashPtr(ip0 - 2, hlog, mls)] = (uint)(ip0 - 2 - @base); + if (rep_offset2 > 0) + { + while (ip0 <= ilimit && MEM_read32(ip0) == MEM_read32(ip0 - rep_offset2)) + { + /* store sequence */ + nuint rLength = ZSTD_count(ip0 + 4, ip0 + 4 - rep_offset2, iend) + 4; + { + /* swap rep_offset2 <=> rep_offset1 */ + uint tmpOff = rep_offset2; + rep_offset2 = rep_offset1; + rep_offset1 = tmpOff; + } + + hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (uint)(ip0 - @base); + ip0 += rLength; + assert(1 >= 1); + assert(1 <= 3); + ZSTD_storeSeq(seqStore, 0, anchor, iend, 1, rLength); + anchor = ip0; + continue; + } + } + } + + goto _start; + } + + private static nuint ZSTD_compressBlock_fast_noDict_4_1(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_fast_noDict_generic(ms, seqStore, rep, src, srcSize, 4, 1); + } + + private static nuint ZSTD_compressBlock_fast_noDict_5_1(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_fast_noDict_generic(ms, seqStore, rep, src, srcSize, 5, 1); + } + + private static nuint ZSTD_compressBlock_fast_noDict_6_1(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_fast_noDict_generic(ms, seqStore, rep, src, srcSize, 6, 1); + } + + private static nuint ZSTD_compressBlock_fast_noDict_7_1(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_fast_noDict_generic(ms, seqStore, rep, src, srcSize, 7, 1); + } + + private static nuint ZSTD_compressBlock_fast_noDict_4_0(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_fast_noDict_generic(ms, seqStore, rep, src, srcSize, 4, 0); + } + + private static nuint ZSTD_compressBlock_fast_noDict_5_0(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_fast_noDict_generic(ms, seqStore, rep, src, srcSize, 5, 0); + } + + private static nuint ZSTD_compressBlock_fast_noDict_6_0(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_fast_noDict_generic(ms, seqStore, rep, src, srcSize, 6, 0); + } + + private static nuint ZSTD_compressBlock_fast_noDict_7_0(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_fast_noDict_generic(ms, seqStore, rep, src, srcSize, 7, 0); + } + + private static nuint ZSTD_compressBlock_fast(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + uint mml = ms->cParams.minMatch; + /* use cmov when "candidate in range" branch is likely unpredictable */ + int useCmov = ms->cParams.windowLog < 19 ? 1 : 0; + assert(ms->dictMatchState == null); + if (useCmov != 0) + { + switch (mml) + { + default: + case 4: + return ZSTD_compressBlock_fast_noDict_4_1(ms, seqStore, rep, src, srcSize); + case 5: + return ZSTD_compressBlock_fast_noDict_5_1(ms, seqStore, rep, src, srcSize); + case 6: + return ZSTD_compressBlock_fast_noDict_6_1(ms, seqStore, rep, src, srcSize); + case 7: + return ZSTD_compressBlock_fast_noDict_7_1(ms, seqStore, rep, src, srcSize); + } + } + else + { + switch (mml) + { + default: + case 4: + return ZSTD_compressBlock_fast_noDict_4_0(ms, seqStore, rep, src, srcSize); + case 5: + return ZSTD_compressBlock_fast_noDict_5_0(ms, seqStore, rep, src, srcSize); + case 6: + return ZSTD_compressBlock_fast_noDict_6_0(ms, seqStore, rep, src, srcSize); + case 7: + return ZSTD_compressBlock_fast_noDict_7_0(ms, seqStore, rep, src, srcSize); + } + } + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_compressBlock_fast_dictMatchState_generic(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize, uint mls, uint hasStep) + { + ZSTD_compressionParameters* cParams = &ms->cParams; + uint* hashTable = ms->hashTable; + uint hlog = cParams->hashLog; + /* support stepSize of 0 */ + uint stepSize = cParams->targetLength + (uint)(cParams->targetLength == 0 ? 1 : 0); + byte* @base = ms->window.@base; + byte* istart = (byte*)src; + byte* ip0 = istart; + /* we assert below that stepSize >= 1 */ + byte* ip1 = ip0 + stepSize; + byte* anchor = istart; + uint prefixStartIndex = ms->window.dictLimit; + byte* prefixStart = @base + prefixStartIndex; + byte* iend = istart + srcSize; + byte* ilimit = iend - 8; + uint offset_1 = rep[0], offset_2 = rep[1]; + ZSTD_MatchState_t* dms = ms->dictMatchState; + ZSTD_compressionParameters* dictCParams = &dms->cParams; + uint* dictHashTable = dms->hashTable; + uint dictStartIndex = dms->window.dictLimit; + byte* dictBase = dms->window.@base; + byte* dictStart = dictBase + dictStartIndex; + byte* dictEnd = dms->window.nextSrc; + uint dictIndexDelta = prefixStartIndex - (uint)(dictEnd - dictBase); + uint dictAndPrefixLength = (uint)(istart - prefixStart + dictEnd - dictStart); + uint dictHBits = dictCParams->hashLog + 8; + /* if a dictionary is still attached, it necessarily means that + * it is within window size. So we just check it. */ + uint maxDistance = 1U << (int)cParams->windowLog; + uint endIndex = (uint)((nuint)(istart - @base) + srcSize); + assert(endIndex - prefixStartIndex <= maxDistance); + assert(prefixStartIndex >= (uint)(dictEnd - dictBase)); + if (ms->prefetchCDictTables != 0) + { + nuint hashTableBytes = ((nuint)1 << (int)dictCParams->hashLog) * sizeof(uint); + { + sbyte* _ptr = (sbyte*)dictHashTable; + nuint _size = hashTableBytes; + nuint _pos; + for (_pos = 0; _pos < _size; _pos += 64) + { +#if NETCOREAPP3_0_OR_GREATER + if (System.Runtime.Intrinsics.X86.Sse.IsSupported) + { + System.Runtime.Intrinsics.X86.Sse.Prefetch1(_ptr + _pos); + } +#endif + } + } + } + + ip0 += dictAndPrefixLength == 0 ? 1 : 0; + assert(offset_1 <= dictAndPrefixLength); + assert(offset_2 <= dictAndPrefixLength); + assert(stepSize >= 1); + while (ip1 <= ilimit) + { + nuint mLength; + nuint hash0 = ZSTD_hashPtr(ip0, hlog, mls); + nuint dictHashAndTag0 = ZSTD_hashPtr(ip0, dictHBits, mls); + uint dictMatchIndexAndTag = dictHashTable[dictHashAndTag0 >> 8]; + int dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag0); + uint matchIndex = hashTable[hash0]; + uint curr = (uint)(ip0 - @base); + nuint step = stepSize; + const nuint kStepIncr = 1 << 8; + byte* nextStep = ip0 + kStepIncr; + while (true) + { + byte* match = @base + matchIndex; + uint repIndex = curr + 1 - offset_1; + byte* repMatch = repIndex < prefixStartIndex ? dictBase + (repIndex - dictIndexDelta) : @base + repIndex; + nuint hash1 = ZSTD_hashPtr(ip1, hlog, mls); + nuint dictHashAndTag1 = ZSTD_hashPtr(ip1, dictHBits, mls); + hashTable[hash0] = curr; + if (ZSTD_index_overlap_check(prefixStartIndex, repIndex) != 0 && MEM_read32(repMatch) == MEM_read32(ip0 + 1)) + { + byte* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; + mLength = ZSTD_count_2segments(ip0 + 1 + 4, repMatch + 4, iend, repMatchEnd, prefixStart) + 4; + ip0++; + assert(1 >= 1); + assert(1 <= 3); + ZSTD_storeSeq(seqStore, (nuint)(ip0 - anchor), anchor, iend, 1, mLength); + break; + } + + if (dictTagsMatch != 0) + { + /* Found a possible dict match */ + uint dictMatchIndex = dictMatchIndexAndTag >> 8; + byte* dictMatch = dictBase + dictMatchIndex; + if (dictMatchIndex > dictStartIndex && MEM_read32(dictMatch) == MEM_read32(ip0)) + { + if (matchIndex <= prefixStartIndex) + { + uint offset = curr - dictMatchIndex - dictIndexDelta; + mLength = ZSTD_count_2segments(ip0 + 4, dictMatch + 4, iend, dictEnd, prefixStart) + 4; + while (ip0 > anchor && dictMatch > dictStart && ip0[-1] == dictMatch[-1]) + { + ip0--; + dictMatch--; + mLength++; + } + + offset_2 = offset_1; + offset_1 = offset; + assert(offset > 0); + ZSTD_storeSeq(seqStore, (nuint)(ip0 - anchor), anchor, iend, offset + 3, mLength); + break; + } + } + } + + if (ZSTD_match4Found_cmov(ip0, match, matchIndex, prefixStartIndex) != 0) + { + /* found a regular match of size >= 4 */ + uint offset = (uint)(ip0 - match); + mLength = ZSTD_count(ip0 + 4, match + 4, iend) + 4; + while (ip0 > anchor && match > prefixStart && ip0[-1] == match[-1]) + { + ip0--; + match--; + mLength++; + } + + offset_2 = offset_1; + offset_1 = offset; + assert(offset > 0); + ZSTD_storeSeq(seqStore, (nuint)(ip0 - anchor), anchor, iend, offset + 3, mLength); + break; + } + + dictMatchIndexAndTag = dictHashTable[dictHashAndTag1 >> 8]; + dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag1); + matchIndex = hashTable[hash1]; + if (ip1 >= nextStep) + { + step++; + nextStep += kStepIncr; + } + + ip0 = ip1; + ip1 = ip1 + step; + if (ip1 > ilimit) + goto _cleanup; + curr = (uint)(ip0 - @base); + hash0 = hash1; + } + + assert(mLength != 0); + ip0 += mLength; + anchor = ip0; + if (ip0 <= ilimit) + { + assert(@base + curr + 2 > istart); + hashTable[ZSTD_hashPtr(@base + curr + 2, hlog, mls)] = curr + 2; + hashTable[ZSTD_hashPtr(ip0 - 2, hlog, mls)] = (uint)(ip0 - 2 - @base); + while (ip0 <= ilimit) + { + uint current2 = (uint)(ip0 - @base); + uint repIndex2 = current2 - offset_2; + byte* repMatch2 = repIndex2 < prefixStartIndex ? dictBase - dictIndexDelta + repIndex2 : @base + repIndex2; + if (ZSTD_index_overlap_check(prefixStartIndex, repIndex2) != 0 && MEM_read32(repMatch2) == MEM_read32(ip0)) + { + byte* repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; + nuint repLength2 = ZSTD_count_2segments(ip0 + 4, repMatch2 + 4, iend, repEnd2, prefixStart) + 4; + /* swap offset_2 <=> offset_1 */ + uint tmpOffset = offset_2; + offset_2 = offset_1; + offset_1 = tmpOffset; + assert(1 >= 1); + assert(1 <= 3); + ZSTD_storeSeq(seqStore, 0, anchor, iend, 1, repLength2); + hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = current2; + ip0 += repLength2; + anchor = ip0; + continue; + } + + break; + } + } + + assert(ip0 == anchor); + ip1 = ip0 + stepSize; + } + + _cleanup: + rep[0] = offset_1; + rep[1] = offset_2; + return (nuint)(iend - anchor); + } + + private static nuint ZSTD_compressBlock_fast_dictMatchState_4_0(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 4, 0); + } + + private static nuint ZSTD_compressBlock_fast_dictMatchState_5_0(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 5, 0); + } + + private static nuint ZSTD_compressBlock_fast_dictMatchState_6_0(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 6, 0); + } + + private static nuint ZSTD_compressBlock_fast_dictMatchState_7_0(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 7, 0); + } + + private static nuint ZSTD_compressBlock_fast_dictMatchState(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + uint mls = ms->cParams.minMatch; + assert(ms->dictMatchState != null); + switch (mls) + { + default: + case 4: + return ZSTD_compressBlock_fast_dictMatchState_4_0(ms, seqStore, rep, src, srcSize); + case 5: + return ZSTD_compressBlock_fast_dictMatchState_5_0(ms, seqStore, rep, src, srcSize); + case 6: + return ZSTD_compressBlock_fast_dictMatchState_6_0(ms, seqStore, rep, src, srcSize); + case 7: + return ZSTD_compressBlock_fast_dictMatchState_7_0(ms, seqStore, rep, src, srcSize); + } + } + + private static nuint ZSTD_compressBlock_fast_extDict_generic(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize, uint mls, uint hasStep) + { + ZSTD_compressionParameters* cParams = &ms->cParams; + uint* hashTable = ms->hashTable; + uint hlog = cParams->hashLog; + /* support stepSize of 0 */ + nuint stepSize = cParams->targetLength + (uint)(cParams->targetLength == 0 ? 1 : 0) + 1; + byte* @base = ms->window.@base; + byte* dictBase = ms->window.dictBase; + byte* istart = (byte*)src; + byte* anchor = istart; + uint endIndex = (uint)((nuint)(istart - @base) + srcSize); + uint lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog); + uint dictStartIndex = lowLimit; + byte* dictStart = dictBase + dictStartIndex; + uint dictLimit = ms->window.dictLimit; + uint prefixStartIndex = dictLimit < lowLimit ? lowLimit : dictLimit; + byte* prefixStart = @base + prefixStartIndex; + byte* dictEnd = dictBase + prefixStartIndex; + byte* iend = istart + srcSize; + byte* ilimit = iend - 8; + uint offset_1 = rep[0], offset_2 = rep[1]; + uint offsetSaved1 = 0, offsetSaved2 = 0; + byte* ip0 = istart; + byte* ip1; + byte* ip2; + byte* ip3; + uint current0; + /* hash for ip0 */ + nuint hash0; + /* hash for ip1 */ + nuint hash1; + /* match idx for ip0 */ + uint idx; + /* base pointer for idx */ + byte* idxBase; + uint offcode; + byte* match0; + nuint mLength; + /* initialize to avoid warning, assert != 0 later */ + byte* matchEnd = null; + nuint step; + byte* nextStep; + const nuint kStepIncr = 1 << 8 - 1; + if (prefixStartIndex == dictStartIndex) + return ZSTD_compressBlock_fast(ms, seqStore, rep, src, srcSize); + { + uint curr = (uint)(ip0 - @base); + uint maxRep = curr - dictStartIndex; + if (offset_2 >= maxRep) + { + offsetSaved2 = offset_2; + offset_2 = 0; + } + + if (offset_1 >= maxRep) + { + offsetSaved1 = offset_1; + offset_1 = 0; + } + } + + _start: + step = stepSize; + nextStep = ip0 + kStepIncr; + ip1 = ip0 + 1; + ip2 = ip0 + step; + ip3 = ip2 + 1; + if (ip3 >= ilimit) + { + goto _cleanup; + } + + hash0 = ZSTD_hashPtr(ip0, hlog, mls); + hash1 = ZSTD_hashPtr(ip1, hlog, mls); + idx = hashTable[hash0]; + idxBase = idx < prefixStartIndex ? dictBase : @base; + do + { + { + uint current2 = (uint)(ip2 - @base); + uint repIndex = current2 - offset_1; + byte* repBase = repIndex < prefixStartIndex ? dictBase : @base; + uint rval; + if (prefixStartIndex - repIndex >= 4 && offset_1 > 0) + { + rval = MEM_read32(repBase + repIndex); + } + else + { + rval = MEM_read32(ip2) ^ 1; + } + + current0 = (uint)(ip0 - @base); + hashTable[hash0] = current0; + if (MEM_read32(ip2) == rval) + { + ip0 = ip2; + match0 = repBase + repIndex; + matchEnd = repIndex < prefixStartIndex ? dictEnd : iend; + assert(match0 != prefixStart && match0 != dictStart); + mLength = ip0[-1] == match0[-1] ? 1U : 0U; + ip0 -= mLength; + match0 -= mLength; + assert(1 >= 1); + assert(1 <= 3); + offcode = 1; + mLength += 4; + goto _match; + } + } + + { + uint mval = idx >= dictStartIndex ? MEM_read32(idxBase + idx) : MEM_read32(ip0) ^ 1; + if (MEM_read32(ip0) == mval) + { + goto _offset; + } + } + + idx = hashTable[hash1]; + idxBase = idx < prefixStartIndex ? dictBase : @base; + hash0 = hash1; + hash1 = ZSTD_hashPtr(ip2, hlog, mls); + ip0 = ip1; + ip1 = ip2; + ip2 = ip3; + current0 = (uint)(ip0 - @base); + hashTable[hash0] = current0; + { + uint mval = idx >= dictStartIndex ? MEM_read32(idxBase + idx) : MEM_read32(ip0) ^ 1; + if (MEM_read32(ip0) == mval) + { + goto _offset; + } + } + + idx = hashTable[hash1]; + idxBase = idx < prefixStartIndex ? dictBase : @base; + hash0 = hash1; + hash1 = ZSTD_hashPtr(ip2, hlog, mls); + ip0 = ip1; + ip1 = ip2; + ip2 = ip0 + step; + ip3 = ip1 + step; + if (ip2 >= nextStep) + { + step++; +#if NETCOREAPP3_0_OR_GREATER + if (System.Runtime.Intrinsics.X86.Sse.IsSupported) + { + System.Runtime.Intrinsics.X86.Sse.Prefetch0(ip1 + 64); + System.Runtime.Intrinsics.X86.Sse.Prefetch0(ip1 + 128); + } +#endif + + nextStep += kStepIncr; + } + } + while (ip3 < ilimit); + _cleanup: + offsetSaved2 = offsetSaved1 != 0 && offset_1 != 0 ? offsetSaved1 : offsetSaved2; + rep[0] = offset_1 != 0 ? offset_1 : offsetSaved1; + rep[1] = offset_2 != 0 ? offset_2 : offsetSaved2; + return (nuint)(iend - anchor); + _offset: + { + uint offset = current0 - idx; + byte* lowMatchPtr = idx < prefixStartIndex ? dictStart : prefixStart; + matchEnd = idx < prefixStartIndex ? dictEnd : iend; + match0 = idxBase + idx; + offset_2 = offset_1; + offset_1 = offset; + assert(offset > 0); + offcode = offset + 3; + mLength = 4; + while (ip0 > anchor && match0 > lowMatchPtr && ip0[-1] == match0[-1]) + { + ip0--; + match0--; + mLength++; + } + } + + _match: + assert(matchEnd != null); + mLength += ZSTD_count_2segments(ip0 + mLength, match0 + mLength, iend, matchEnd, prefixStart); + ZSTD_storeSeq(seqStore, (nuint)(ip0 - anchor), anchor, iend, offcode, mLength); + ip0 += mLength; + anchor = ip0; + if (ip1 < ip0) + { + hashTable[hash1] = (uint)(ip1 - @base); + } + + if (ip0 <= ilimit) + { + assert(@base + current0 + 2 > istart); + hashTable[ZSTD_hashPtr(@base + current0 + 2, hlog, mls)] = current0 + 2; + hashTable[ZSTD_hashPtr(ip0 - 2, hlog, mls)] = (uint)(ip0 - 2 - @base); + while (ip0 <= ilimit) + { + uint repIndex2 = (uint)(ip0 - @base) - offset_2; + byte* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : @base + repIndex2; + if ((ZSTD_index_overlap_check(prefixStartIndex, repIndex2) & (offset_2 > 0 ? 1 : 0)) != 0 && MEM_read32(repMatch2) == MEM_read32(ip0)) + { + byte* repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; + nuint repLength2 = ZSTD_count_2segments(ip0 + 4, repMatch2 + 4, iend, repEnd2, prefixStart) + 4; + { + /* swap offset_2 <=> offset_1 */ + uint tmpOffset = offset_2; + offset_2 = offset_1; + offset_1 = tmpOffset; + } + + assert(1 >= 1); + assert(1 <= 3); + ZSTD_storeSeq(seqStore, 0, anchor, iend, 1, repLength2); + hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (uint)(ip0 - @base); + ip0 += repLength2; + anchor = ip0; + continue; + } + + break; + } + } + + goto _start; + } + + private static nuint ZSTD_compressBlock_fast_extDict_4_0(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 4, 0); + } + + private static nuint ZSTD_compressBlock_fast_extDict_5_0(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 5, 0); + } + + private static nuint ZSTD_compressBlock_fast_extDict_6_0(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 6, 0); + } + + private static nuint ZSTD_compressBlock_fast_extDict_7_0(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 7, 0); + } + + private static nuint ZSTD_compressBlock_fast_extDict(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + uint mls = ms->cParams.minMatch; + assert(ms->dictMatchState == null); + switch (mls) + { + default: + case 4: + return ZSTD_compressBlock_fast_extDict_4_0(ms, seqStore, rep, src, srcSize); + case 5: + return ZSTD_compressBlock_fast_extDict_5_0(ms, seqStore, rep, src, srcSize); + case 6: + return ZSTD_compressBlock_fast_extDict_6_0(ms, seqStore, rep, src, srcSize); + case 7: + return ZSTD_compressBlock_fast_extDict_7_0(ms, seqStore, rep, src, srcSize); + } + } + } +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdInternal.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdInternal.cs new file mode 100644 index 000000000..2ad2c6289 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdInternal.cs @@ -0,0 +1,385 @@ +using static ZstdSharp.UnsafeHelper; +using System; +using System.Runtime.InteropServices; +using System.Runtime.CompilerServices; +#if NETCOREAPP3_0_OR_GREATER +using System.Runtime.Intrinsics.X86; +#endif +#if NET5_0_OR_GREATER +using System.Runtime.Intrinsics.Arm; +#endif + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { +#if NET7_0_OR_GREATER + private static ReadOnlySpan Span_repStartValue => new uint[3] + { + 1, + 4, + 8 + }; + private static uint* repStartValue => (uint*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_repStartValue)); +#else + + private static readonly uint* repStartValue = GetArrayPointer(new uint[3] { 1, 4, 8 }); +#endif + private static readonly nuint* ZSTD_fcs_fieldSize = GetArrayPointer(new nuint[4] { 0, 2, 4, 8 }); + private static readonly nuint* ZSTD_did_fieldSize = GetArrayPointer(new nuint[4] { 0, 1, 2, 4 }); + private const uint ZSTD_blockHeaderSize = 3; +#if NET7_0_OR_GREATER + private static ReadOnlySpan Span_LL_bits => new byte[36] + { + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 2, + 2, + 3, + 3, + 4, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16 + }; + private static byte* LL_bits => (byte*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_LL_bits)); +#else + + private static readonly byte* LL_bits = GetArrayPointer(new byte[36] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 }); +#endif +#if NET7_0_OR_GREATER + private static ReadOnlySpan Span_LL_defaultNorm => new short[36] + { + 4, + 3, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 1, + 1, + 1, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 3, + 2, + 1, + 1, + 1, + 1, + 1, + -1, + -1, + -1, + -1 + }; + private static short* LL_defaultNorm => (short*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_LL_defaultNorm)); +#else + + private static readonly short* LL_defaultNorm = GetArrayPointer(new short[36] { 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, (short)(-1), (short)(-1), (short)(-1), (short)(-1) }); +#endif + private const uint LL_defaultNormLog = 6; +#if NET7_0_OR_GREATER + private static ReadOnlySpan Span_ML_bits => new byte[53] + { + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 2, + 2, + 3, + 3, + 4, + 4, + 5, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16 + }; + private static byte* ML_bits => (byte*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_ML_bits)); +#else + + private static readonly byte* ML_bits = GetArrayPointer(new byte[53] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 }); +#endif +#if NET7_0_OR_GREATER + private static ReadOnlySpan Span_ML_defaultNorm => new short[53] + { + 1, + 4, + 3, + 2, + 2, + 2, + 2, + 2, + 2, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + -1, + -1, + -1, + -1, + -1, + -1, + -1 + }; + private static short* ML_defaultNorm => (short*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_ML_defaultNorm)); +#else + + private static readonly short* ML_defaultNorm = GetArrayPointer(new short[53] { 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, (short)(-1), (short)(-1), (short)(-1), (short)(-1), (short)(-1), (short)(-1), (short)(-1) }); +#endif + private const uint ML_defaultNormLog = 6; +#if NET7_0_OR_GREATER + private static ReadOnlySpan Span_OF_defaultNorm => new short[29] + { + 1, + 1, + 1, + 1, + 1, + 1, + 2, + 2, + 2, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + -1, + -1, + -1, + -1, + -1 + }; + private static short* OF_defaultNorm => (short*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_OF_defaultNorm)); +#else + + private static readonly short* OF_defaultNorm = GetArrayPointer(new short[29] { 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, (short)(-1), (short)(-1), (short)(-1), (short)(-1), (short)(-1) }); +#endif + private const uint OF_defaultNormLog = 5; + /*-******************************************* + * Shared functions to include for inlining + *********************************************/ + private static void ZSTD_copy8(void* dst, void* src) + { + memcpy(dst, src, 8); + } + + /* Need to use memmove here since the literal buffer can now be located within + the dst buffer. In circumstances where the op "catches up" to where the + literal buffer is, there can be partial overlaps in this call on the final + copy if the literal is being shifted by less than 16 bytes. */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_copy16(void* dst, void* src) + { +#if NET5_0_OR_GREATER + if (AdvSimd.IsSupported) + { + AdvSimd.Store((byte*)dst, AdvSimd.LoadVector128((byte*)src)); + } + else +#endif +#if NETCOREAPP3_0_OR_GREATER + if (Sse2.IsSupported) + { + Sse2.Store((byte*)dst, Sse2.LoadVector128((byte*)src)); + } + else +#endif + { + var v1 = System.Runtime.CompilerServices.Unsafe.ReadUnaligned((ulong*)src); + var v2 = System.Runtime.CompilerServices.Unsafe.ReadUnaligned((ulong*)src + 1); + System.Runtime.CompilerServices.Unsafe.WriteUnaligned((ulong*)dst, v1); + System.Runtime.CompilerServices.Unsafe.WriteUnaligned((ulong*)dst + 1, v2); + } + } + + /*! ZSTD_wildcopy() : + * Custom version of ZSTD_memcpy(), can over read/write up to WILDCOPY_OVERLENGTH bytes (if length==0) + * @param ovtype controls the overlap detection + * - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart. + * - ZSTD_overlap_src_before_dst: The src and dst may overlap, but they MUST be at least 8 bytes apart. + * The src buffer must be before the dst buffer. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_wildcopy(void* dst, void* src, nint length, ZSTD_overlap_e ovtype) + { + nint diff = (nint)((byte*)dst - (byte*)src); + byte* ip = (byte*)src; + byte* op = (byte*)dst; + byte* oend = op + length; + if (ovtype == ZSTD_overlap_e.ZSTD_overlap_src_before_dst && diff < 16) + { + do + { + ZSTD_copy8(op, ip); + op += 8; + ip += 8; + } + while (op < oend); + } + else + { + assert(diff >= 16 || diff <= -16); + ZSTD_copy16(op, ip); + if (16 >= length) + return; + op += 16; + ip += 16; + do + { + { + ZSTD_copy16(op, ip); + op += 16; + ip += 16; + } + + { + ZSTD_copy16(op, ip); + op += 16; + ip += 16; + } + } + while (op < oend); + } + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_limitCopy(void* dst, nuint dstCapacity, void* src, nuint srcSize) + { + nuint length = dstCapacity < srcSize ? dstCapacity : srcSize; + if (length > 0) + { + memcpy(dst, src, (uint)length); + } + + return length; + } + } +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLazy.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLazy.cs new file mode 100644 index 000000000..c537716e7 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLazy.cs @@ -0,0 +1,2670 @@ +using static ZstdSharp.UnsafeHelper; +using System.Runtime.CompilerServices; +using System.Numerics; +using System; +#if NETCOREAPP3_0_OR_GREATER +using System.Runtime.Intrinsics; +using System.Runtime.Intrinsics.X86; +#endif +#if NET5_0_OR_GREATER +using System.Runtime.Intrinsics.Arm; +#endif + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + /*-************************************* + * Binary Tree search + ***************************************/ + private static void ZSTD_updateDUBT(ZSTD_MatchState_t* ms, byte* ip, byte* iend, uint mls) + { + ZSTD_compressionParameters* cParams = &ms->cParams; + uint* hashTable = ms->hashTable; + uint hashLog = cParams->hashLog; + uint* bt = ms->chainTable; + uint btLog = cParams->chainLog - 1; + uint btMask = (uint)((1 << (int)btLog) - 1); + byte* @base = ms->window.@base; + uint target = (uint)(ip - @base); + uint idx = ms->nextToUpdate; + assert(ip + 8 <= iend); + assert(idx >= ms->window.dictLimit); + for (; idx < target; idx++) + { + /* assumption : ip + 8 <= iend */ + nuint h = ZSTD_hashPtr(@base + idx, hashLog, mls); + uint matchIndex = hashTable[h]; + uint* nextCandidatePtr = bt + 2 * (idx & btMask); + uint* sortMarkPtr = nextCandidatePtr + 1; + hashTable[h] = idx; + *nextCandidatePtr = matchIndex; + *sortMarkPtr = 1; + } + + ms->nextToUpdate = target; + } + + /** ZSTD_insertDUBT1() : + * sort one already inserted but unsorted position + * assumption : curr >= btlow == (curr - btmask) + * doesn't fail */ + private static void ZSTD_insertDUBT1(ZSTD_MatchState_t* ms, uint curr, byte* inputEnd, uint nbCompares, uint btLow, ZSTD_dictMode_e dictMode) + { + ZSTD_compressionParameters* cParams = &ms->cParams; + uint* bt = ms->chainTable; + uint btLog = cParams->chainLog - 1; + uint btMask = (uint)((1 << (int)btLog) - 1); + nuint commonLengthSmaller = 0, commonLengthLarger = 0; + byte* @base = ms->window.@base; + byte* dictBase = ms->window.dictBase; + uint dictLimit = ms->window.dictLimit; + byte* ip = curr >= dictLimit ? @base + curr : dictBase + curr; + byte* iend = curr >= dictLimit ? inputEnd : dictBase + dictLimit; + byte* dictEnd = dictBase + dictLimit; + byte* prefixStart = @base + dictLimit; + byte* match; + uint* smallerPtr = bt + 2 * (curr & btMask); + uint* largerPtr = smallerPtr + 1; + /* this candidate is unsorted : next sorted candidate is reached through *smallerPtr, while *largerPtr contains previous unsorted candidate (which is already saved and can be overwritten) */ + uint matchIndex = *smallerPtr; + /* to be nullified at the end */ + uint dummy32; + uint windowValid = ms->window.lowLimit; + uint maxDistance = 1U << (int)cParams->windowLog; + uint windowLow = curr - windowValid > maxDistance ? curr - maxDistance : windowValid; + assert(curr >= btLow); + assert(ip < iend); + for (; nbCompares != 0 && matchIndex > windowLow; --nbCompares) + { + uint* nextPtr = bt + 2 * (matchIndex & btMask); + /* guaranteed minimum nb of common bytes */ + nuint matchLength = commonLengthSmaller < commonLengthLarger ? commonLengthSmaller : commonLengthLarger; + assert(matchIndex < curr); + if (dictMode != ZSTD_dictMode_e.ZSTD_extDict || matchIndex + matchLength >= dictLimit || curr < dictLimit) + { + byte* mBase = dictMode != ZSTD_dictMode_e.ZSTD_extDict || matchIndex + matchLength >= dictLimit ? @base : dictBase; + assert(matchIndex + matchLength >= dictLimit || curr < dictLimit); + match = mBase + matchIndex; + matchLength += ZSTD_count(ip + matchLength, match + matchLength, iend); + } + else + { + match = dictBase + matchIndex; + matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iend, dictEnd, prefixStart); + if (matchIndex + matchLength >= dictLimit) + match = @base + matchIndex; + } + + if (ip + matchLength == iend) + { + break; + } + + if (match[matchLength] < ip[matchLength]) + { + *smallerPtr = matchIndex; + commonLengthSmaller = matchLength; + if (matchIndex <= btLow) + { + smallerPtr = &dummy32; + break; + } + + smallerPtr = nextPtr + 1; + matchIndex = nextPtr[1]; + } + else + { + *largerPtr = matchIndex; + commonLengthLarger = matchLength; + if (matchIndex <= btLow) + { + largerPtr = &dummy32; + break; + } + + largerPtr = nextPtr; + matchIndex = nextPtr[0]; + } + } + + *smallerPtr = *largerPtr = 0; + } + + private static nuint ZSTD_DUBT_findBetterDictMatch(ZSTD_MatchState_t* ms, byte* ip, byte* iend, nuint* offsetPtr, nuint bestLength, uint nbCompares, uint mls, ZSTD_dictMode_e dictMode) + { + ZSTD_MatchState_t* dms = ms->dictMatchState; + ZSTD_compressionParameters* dmsCParams = &dms->cParams; + uint* dictHashTable = dms->hashTable; + uint hashLog = dmsCParams->hashLog; + nuint h = ZSTD_hashPtr(ip, hashLog, mls); + uint dictMatchIndex = dictHashTable[h]; + byte* @base = ms->window.@base; + byte* prefixStart = @base + ms->window.dictLimit; + uint curr = (uint)(ip - @base); + byte* dictBase = dms->window.@base; + byte* dictEnd = dms->window.nextSrc; + uint dictHighLimit = (uint)(dms->window.nextSrc - dms->window.@base); + uint dictLowLimit = dms->window.lowLimit; + uint dictIndexDelta = ms->window.lowLimit - dictHighLimit; + uint* dictBt = dms->chainTable; + uint btLog = dmsCParams->chainLog - 1; + uint btMask = (uint)((1 << (int)btLog) - 1); + uint btLow = btMask >= dictHighLimit - dictLowLimit ? dictLowLimit : dictHighLimit - btMask; + nuint commonLengthSmaller = 0, commonLengthLarger = 0; + assert(dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState); + for (; nbCompares != 0 && dictMatchIndex > dictLowLimit; --nbCompares) + { + uint* nextPtr = dictBt + 2 * (dictMatchIndex & btMask); + /* guaranteed minimum nb of common bytes */ + nuint matchLength = commonLengthSmaller < commonLengthLarger ? commonLengthSmaller : commonLengthLarger; + byte* match = dictBase + dictMatchIndex; + matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iend, dictEnd, prefixStart); + if (dictMatchIndex + matchLength >= dictHighLimit) + match = @base + dictMatchIndex + dictIndexDelta; + if (matchLength > bestLength) + { + uint matchIndex = dictMatchIndex + dictIndexDelta; + if (4 * (int)(matchLength - bestLength) > (int)(ZSTD_highbit32(curr - matchIndex + 1) - ZSTD_highbit32((uint)offsetPtr[0] + 1))) + { + bestLength = matchLength; + assert(curr - matchIndex > 0); + *offsetPtr = curr - matchIndex + 3; + } + + if (ip + matchLength == iend) + { + break; + } + } + + if (match[matchLength] < ip[matchLength]) + { + if (dictMatchIndex <= btLow) + { + break; + } + + commonLengthSmaller = matchLength; + dictMatchIndex = nextPtr[1]; + } + else + { + if (dictMatchIndex <= btLow) + { + break; + } + + commonLengthLarger = matchLength; + dictMatchIndex = nextPtr[0]; + } + } + + if (bestLength >= 3) + { + assert(*offsetPtr > 3); + uint mIndex = curr - (uint)(*offsetPtr - 3); + } + + return bestLength; + } + + private static nuint ZSTD_DUBT_findBestMatch(ZSTD_MatchState_t* ms, byte* ip, byte* iend, nuint* offBasePtr, uint mls, ZSTD_dictMode_e dictMode) + { + ZSTD_compressionParameters* cParams = &ms->cParams; + uint* hashTable = ms->hashTable; + uint hashLog = cParams->hashLog; + nuint h = ZSTD_hashPtr(ip, hashLog, mls); + uint matchIndex = hashTable[h]; + byte* @base = ms->window.@base; + uint curr = (uint)(ip - @base); + uint windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog); + uint* bt = ms->chainTable; + uint btLog = cParams->chainLog - 1; + uint btMask = (uint)((1 << (int)btLog) - 1); + uint btLow = btMask >= curr ? 0 : curr - btMask; + uint unsortLimit = btLow > windowLow ? btLow : windowLow; + uint* nextCandidate = bt + 2 * (matchIndex & btMask); + uint* unsortedMark = bt + 2 * (matchIndex & btMask) + 1; + uint nbCompares = 1U << (int)cParams->searchLog; + uint nbCandidates = nbCompares; + uint previousCandidate = 0; + assert(ip <= iend - 8); + assert(dictMode != ZSTD_dictMode_e.ZSTD_dedicatedDictSearch); + while (matchIndex > unsortLimit && *unsortedMark == 1 && nbCandidates > 1) + { + *unsortedMark = previousCandidate; + previousCandidate = matchIndex; + matchIndex = *nextCandidate; + nextCandidate = bt + 2 * (matchIndex & btMask); + unsortedMark = bt + 2 * (matchIndex & btMask) + 1; + nbCandidates--; + } + + if (matchIndex > unsortLimit && *unsortedMark == 1) + { + *nextCandidate = *unsortedMark = 0; + } + + matchIndex = previousCandidate; + while (matchIndex != 0) + { + uint* nextCandidateIdxPtr = bt + 2 * (matchIndex & btMask) + 1; + uint nextCandidateIdx = *nextCandidateIdxPtr; + ZSTD_insertDUBT1(ms, matchIndex, iend, nbCandidates, unsortLimit, dictMode); + matchIndex = nextCandidateIdx; + nbCandidates++; + } + + { + nuint commonLengthSmaller = 0, commonLengthLarger = 0; + byte* dictBase = ms->window.dictBase; + uint dictLimit = ms->window.dictLimit; + byte* dictEnd = dictBase + dictLimit; + byte* prefixStart = @base + dictLimit; + uint* smallerPtr = bt + 2 * (curr & btMask); + uint* largerPtr = bt + 2 * (curr & btMask) + 1; + uint matchEndIdx = curr + 8 + 1; + /* to be nullified at the end */ + uint dummy32; + nuint bestLength = 0; + matchIndex = hashTable[h]; + hashTable[h] = curr; + for (; nbCompares != 0 && matchIndex > windowLow; --nbCompares) + { + uint* nextPtr = bt + 2 * (matchIndex & btMask); + /* guaranteed minimum nb of common bytes */ + nuint matchLength = commonLengthSmaller < commonLengthLarger ? commonLengthSmaller : commonLengthLarger; + byte* match; + if (dictMode != ZSTD_dictMode_e.ZSTD_extDict || matchIndex + matchLength >= dictLimit) + { + match = @base + matchIndex; + matchLength += ZSTD_count(ip + matchLength, match + matchLength, iend); + } + else + { + match = dictBase + matchIndex; + matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iend, dictEnd, prefixStart); + if (matchIndex + matchLength >= dictLimit) + match = @base + matchIndex; + } + + if (matchLength > bestLength) + { + if (matchLength > matchEndIdx - matchIndex) + matchEndIdx = matchIndex + (uint)matchLength; + if (4 * (int)(matchLength - bestLength) > (int)(ZSTD_highbit32(curr - matchIndex + 1) - ZSTD_highbit32((uint)*offBasePtr))) + { + bestLength = matchLength; + assert(curr - matchIndex > 0); + *offBasePtr = curr - matchIndex + 3; + } + + if (ip + matchLength == iend) + { + if (dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState) + { + nbCompares = 0; + } + + break; + } + } + + if (match[matchLength] < ip[matchLength]) + { + *smallerPtr = matchIndex; + commonLengthSmaller = matchLength; + if (matchIndex <= btLow) + { + smallerPtr = &dummy32; + break; + } + + smallerPtr = nextPtr + 1; + matchIndex = nextPtr[1]; + } + else + { + *largerPtr = matchIndex; + commonLengthLarger = matchLength; + if (matchIndex <= btLow) + { + largerPtr = &dummy32; + break; + } + + largerPtr = nextPtr; + matchIndex = nextPtr[0]; + } + } + + *smallerPtr = *largerPtr = 0; + assert(nbCompares <= 1U << (sizeof(nuint) == 4 ? 30 : 31) - 1); + if (dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState && nbCompares != 0) + { + bestLength = ZSTD_DUBT_findBetterDictMatch(ms, ip, iend, offBasePtr, bestLength, nbCompares, mls, dictMode); + } + + assert(matchEndIdx > curr + 8); + ms->nextToUpdate = matchEndIdx - 8; + if (bestLength >= 3) + { + assert(*offBasePtr > 3); + uint mIndex = curr - (uint)(*offBasePtr - 3); + } + + return bestLength; + } + } + + /** ZSTD_BtFindBestMatch() : Tree updater, providing best match */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_BtFindBestMatch(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offBasePtr, uint mls, ZSTD_dictMode_e dictMode) + { + if (ip < ms->window.@base + ms->nextToUpdate) + return 0; + ZSTD_updateDUBT(ms, ip, iLimit, mls); + return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offBasePtr, mls, dictMode); + } + + /*********************************** + * Dedicated dict search + ***********************************/ + private static void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_MatchState_t* ms, byte* ip) + { + byte* @base = ms->window.@base; + uint target = (uint)(ip - @base); + uint* hashTable = ms->hashTable; + uint* chainTable = ms->chainTable; + uint chainSize = (uint)(1 << (int)ms->cParams.chainLog); + uint idx = ms->nextToUpdate; + uint minChain = chainSize < target - idx ? target - chainSize : idx; + const uint bucketSize = 1 << 2; + uint cacheSize = bucketSize - 1; + uint chainAttempts = (uint)(1 << (int)ms->cParams.searchLog) - cacheSize; + uint chainLimit = chainAttempts > 255 ? 255 : chainAttempts; + /* We know the hashtable is oversized by a factor of `bucketSize`. + * We are going to temporarily pretend `bucketSize == 1`, keeping only a + * single entry. We will use the rest of the space to construct a temporary + * chaintable. + */ + uint hashLog = ms->cParams.hashLog - 2; + uint* tmpHashTable = hashTable; + uint* tmpChainTable = hashTable + ((nuint)1 << (int)hashLog); + uint tmpChainSize = (uint)((1 << 2) - 1) << (int)hashLog; + uint tmpMinChain = tmpChainSize < target ? target - tmpChainSize : idx; + uint hashIdx; + assert(ms->cParams.chainLog <= 24); + assert(ms->cParams.hashLog > ms->cParams.chainLog); + assert(idx != 0); + assert(tmpMinChain <= minChain); + for (; idx < target; idx++) + { + uint h = (uint)ZSTD_hashPtr(@base + idx, hashLog, ms->cParams.minMatch); + if (idx >= tmpMinChain) + { + tmpChainTable[idx - tmpMinChain] = hashTable[h]; + } + + tmpHashTable[h] = idx; + } + + { + uint chainPos = 0; + for (hashIdx = 0; hashIdx < 1U << (int)hashLog; hashIdx++) + { + uint count; + uint countBeyondMinChain = 0; + uint i = tmpHashTable[hashIdx]; + for (count = 0; i >= tmpMinChain && count < cacheSize; count++) + { + if (i < minChain) + { + countBeyondMinChain++; + } + + i = tmpChainTable[i - tmpMinChain]; + } + + if (count == cacheSize) + { + for (count = 0; count < chainLimit;) + { + if (i < minChain) + { + if (i == 0 || ++countBeyondMinChain > cacheSize) + { + break; + } + } + + chainTable[chainPos++] = i; + count++; + if (i < tmpMinChain) + { + break; + } + + i = tmpChainTable[i - tmpMinChain]; + } + } + else + { + count = 0; + } + + if (count != 0) + { + tmpHashTable[hashIdx] = (chainPos - count << 8) + count; + } + else + { + tmpHashTable[hashIdx] = 0; + } + } + + assert(chainPos <= chainSize); + } + + for (hashIdx = (uint)(1 << (int)hashLog); hashIdx != 0;) + { + uint bucketIdx = --hashIdx << 2; + uint chainPackedPointer = tmpHashTable[hashIdx]; + uint i; + for (i = 0; i < cacheSize; i++) + { + hashTable[bucketIdx + i] = 0; + } + + hashTable[bucketIdx + bucketSize - 1] = chainPackedPointer; + } + + for (idx = ms->nextToUpdate; idx < target; idx++) + { + uint h = (uint)ZSTD_hashPtr(@base + idx, hashLog, ms->cParams.minMatch) << 2; + uint i; + for (i = cacheSize - 1; i != 0; i--) + hashTable[h + i] = hashTable[h + i - 1]; + hashTable[h] = idx; + } + + ms->nextToUpdate = target; + } + + /* Returns the longest match length found in the dedicated dict search structure. + * If none are longer than the argument ml, then ml will be returned. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_dedicatedDictSearch_lazy_search(nuint* offsetPtr, nuint ml, uint nbAttempts, ZSTD_MatchState_t* dms, byte* ip, byte* iLimit, byte* prefixStart, uint curr, uint dictLimit, nuint ddsIdx) + { + uint ddsLowestIndex = dms->window.dictLimit; + byte* ddsBase = dms->window.@base; + byte* ddsEnd = dms->window.nextSrc; + uint ddsSize = (uint)(ddsEnd - ddsBase); + uint ddsIndexDelta = dictLimit - ddsSize; + const uint bucketSize = 1 << 2; + uint bucketLimit = nbAttempts < bucketSize - 1 ? nbAttempts : bucketSize - 1; + uint ddsAttempt; + uint matchIndex; + for (ddsAttempt = 0; ddsAttempt < bucketSize - 1; ddsAttempt++) + { +#if NETCOREAPP3_0_OR_GREATER + if (Sse.IsSupported) + { + Sse.Prefetch0(ddsBase + dms->hashTable[ddsIdx + ddsAttempt]); + } +#endif + } + + { + uint chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1]; + uint chainIndex = chainPackedPointer >> 8; +#if NETCOREAPP3_0_OR_GREATER + if (Sse.IsSupported) + { + Sse.Prefetch0(&dms->chainTable[chainIndex]); + } +#endif + } + + for (ddsAttempt = 0; ddsAttempt < bucketLimit; ddsAttempt++) + { + nuint currentMl = 0; + byte* match; + matchIndex = dms->hashTable[ddsIdx + ddsAttempt]; + match = ddsBase + matchIndex; + if (matchIndex == 0) + { + return ml; + } + + assert(matchIndex >= ddsLowestIndex); + assert(match + 4 <= ddsEnd); + if (MEM_read32(match) == MEM_read32(ip)) + { + currentMl = ZSTD_count_2segments(ip + 4, match + 4, iLimit, ddsEnd, prefixStart) + 4; + } + + if (currentMl > ml) + { + ml = currentMl; + assert(curr - (matchIndex + ddsIndexDelta) > 0); + *offsetPtr = curr - (matchIndex + ddsIndexDelta) + 3; + if (ip + currentMl == iLimit) + { + return ml; + } + } + } + + { + uint chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1]; + uint chainIndex = chainPackedPointer >> 8; + uint chainLength = chainPackedPointer & 0xFF; + uint chainAttempts = nbAttempts - ddsAttempt; + uint chainLimit = chainAttempts > chainLength ? chainLength : chainAttempts; + uint chainAttempt; + for (chainAttempt = 0; chainAttempt < chainLimit; chainAttempt++) + { +#if NETCOREAPP3_0_OR_GREATER + if (Sse.IsSupported) + { + Sse.Prefetch0(ddsBase + dms->chainTable[chainIndex + chainAttempt]); + } +#endif + } + + for (chainAttempt = 0; chainAttempt < chainLimit; chainAttempt++, chainIndex++) + { + nuint currentMl = 0; + byte* match; + matchIndex = dms->chainTable[chainIndex]; + match = ddsBase + matchIndex; + assert(matchIndex >= ddsLowestIndex); + assert(match + 4 <= ddsEnd); + if (MEM_read32(match) == MEM_read32(ip)) + { + currentMl = ZSTD_count_2segments(ip + 4, match + 4, iLimit, ddsEnd, prefixStart) + 4; + } + + if (currentMl > ml) + { + ml = currentMl; + assert(curr - (matchIndex + ddsIndexDelta) > 0); + *offsetPtr = curr - (matchIndex + ddsIndexDelta) + 3; + if (ip + currentMl == iLimit) + break; + } + } + } + + return ml; + } + + /* Update chains up to ip (excluded) + Assumption : always within prefix (i.e. not within extDict) */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_insertAndFindFirstIndex_internal(ZSTD_MatchState_t* ms, ZSTD_compressionParameters* cParams, byte* ip, uint mls, uint lazySkipping) + { + uint* hashTable = ms->hashTable; + uint hashLog = cParams->hashLog; + uint* chainTable = ms->chainTable; + uint chainMask = (uint)((1 << (int)cParams->chainLog) - 1); + byte* @base = ms->window.@base; + uint target = (uint)(ip - @base); + uint idx = ms->nextToUpdate; + while (idx < target) + { + nuint h = ZSTD_hashPtr(@base + idx, hashLog, mls); + chainTable[idx & chainMask] = hashTable[h]; + hashTable[h] = idx; + idx++; + if (lazySkipping != 0) + break; + } + + ms->nextToUpdate = target; + return hashTable[ZSTD_hashPtr(ip, hashLog, mls)]; + } + + private static uint ZSTD_insertAndFindFirstIndex(ZSTD_MatchState_t* ms, byte* ip) + { + ZSTD_compressionParameters* cParams = &ms->cParams; + return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch, 0); + } + + /* inlining is important to hardwire a hot branch (template emulation) */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_HcFindBestMatch(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr, uint mls, ZSTD_dictMode_e dictMode) + { + ZSTD_compressionParameters* cParams = &ms->cParams; + uint* chainTable = ms->chainTable; + uint chainSize = (uint)(1 << (int)cParams->chainLog); + uint chainMask = chainSize - 1; + byte* @base = ms->window.@base; + byte* dictBase = ms->window.dictBase; + uint dictLimit = ms->window.dictLimit; + byte* prefixStart = @base + dictLimit; + byte* dictEnd = dictBase + dictLimit; + uint curr = (uint)(ip - @base); + uint maxDistance = 1U << (int)cParams->windowLog; + uint lowestValid = ms->window.lowLimit; + uint withinMaxDistance = curr - lowestValid > maxDistance ? curr - maxDistance : lowestValid; + uint isDictionary = ms->loadedDictEnd != 0 ? 1U : 0U; + uint lowLimit = isDictionary != 0 ? lowestValid : withinMaxDistance; + uint minChain = curr > chainSize ? curr - chainSize : 0; + uint nbAttempts = 1U << (int)cParams->searchLog; + nuint ml = 4 - 1; + ZSTD_MatchState_t* dms = ms->dictMatchState; + uint ddsHashLog = dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch ? dms->cParams.hashLog - 2 : 0; + nuint ddsIdx = dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch ? ZSTD_hashPtr(ip, ddsHashLog, mls) << 2 : 0; + uint matchIndex; + if (dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch) + { + uint* entry = &dms->hashTable[ddsIdx]; +#if NETCOREAPP3_0_OR_GREATER + if (Sse.IsSupported) + { + Sse.Prefetch0(entry); + } +#endif + } + + matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls, (uint)ms->lazySkipping); + for (; matchIndex >= lowLimit && nbAttempts > 0; nbAttempts--) + { + nuint currentMl = 0; + if (dictMode != ZSTD_dictMode_e.ZSTD_extDict || matchIndex >= dictLimit) + { + byte* match = @base + matchIndex; + assert(matchIndex >= dictLimit); + if (MEM_read32(match + ml - 3) == MEM_read32(ip + ml - 3)) + currentMl = ZSTD_count(ip, match, iLimit); + } + else + { + byte* match = dictBase + matchIndex; + assert(match + 4 <= dictEnd); + if (MEM_read32(match) == MEM_read32(ip)) + currentMl = ZSTD_count_2segments(ip + 4, match + 4, iLimit, dictEnd, prefixStart) + 4; + } + + if (currentMl > ml) + { + ml = currentMl; + assert(curr - matchIndex > 0); + *offsetPtr = curr - matchIndex + 3; + if (ip + currentMl == iLimit) + break; + } + + if (matchIndex <= minChain) + break; + matchIndex = chainTable[matchIndex & chainMask]; + } + + assert(nbAttempts <= 1U << (sizeof(nuint) == 4 ? 30 : 31) - 1); + if (dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch) + { + ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts, dms, ip, iLimit, prefixStart, curr, dictLimit, ddsIdx); + } + else if (dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState) + { + uint* dmsChainTable = dms->chainTable; + uint dmsChainSize = (uint)(1 << (int)dms->cParams.chainLog); + uint dmsChainMask = dmsChainSize - 1; + uint dmsLowestIndex = dms->window.dictLimit; + byte* dmsBase = dms->window.@base; + byte* dmsEnd = dms->window.nextSrc; + uint dmsSize = (uint)(dmsEnd - dmsBase); + uint dmsIndexDelta = dictLimit - dmsSize; + uint dmsMinChain = dmsSize > dmsChainSize ? dmsSize - dmsChainSize : 0; + matchIndex = dms->hashTable[ZSTD_hashPtr(ip, dms->cParams.hashLog, mls)]; + for (; matchIndex >= dmsLowestIndex && nbAttempts > 0; nbAttempts--) + { + nuint currentMl = 0; + byte* match = dmsBase + matchIndex; + assert(match + 4 <= dmsEnd); + if (MEM_read32(match) == MEM_read32(ip)) + currentMl = ZSTD_count_2segments(ip + 4, match + 4, iLimit, dmsEnd, prefixStart) + 4; + if (currentMl > ml) + { + ml = currentMl; + assert(curr > matchIndex + dmsIndexDelta); + assert(curr - (matchIndex + dmsIndexDelta) > 0); + *offsetPtr = curr - (matchIndex + dmsIndexDelta) + 3; + if (ip + currentMl == iLimit) + break; + } + + if (matchIndex <= dmsMinChain) + break; + matchIndex = dmsChainTable[matchIndex & dmsChainMask]; + } + } + + return ml; + } + + /* ZSTD_VecMask_next(): + * Starting from the LSB, returns the idx of the next non-zero bit. + * Basically counting the nb of trailing zeroes. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_VecMask_next(ulong val) + { + assert(val != 0); + return (uint)BitOperations.TrailingZeroCount(val); + } + + /* ZSTD_row_nextIndex(): + * Returns the next index to insert at within a tagTable row, and updates the "head" + * value to reflect the update. Essentially cycles backwards from [1, {entries per row}) + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_row_nextIndex(byte* tagRow, uint rowMask) + { + uint next = (uint)(*tagRow - 1) & rowMask; + next += next == 0 ? rowMask : 0; + *tagRow = (byte)next; + return next; + } + + /* ZSTD_isAligned(): + * Checks that a pointer is aligned to "align" bytes which must be a power of 2. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int ZSTD_isAligned(void* ptr, nuint align) + { + assert((align & align - 1) == 0); + return ((nuint)ptr & align - 1) == 0 ? 1 : 0; + } + + /* ZSTD_row_prefetch(): + * Performs prefetching for the hashTable and tagTable at a given row. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_row_prefetch(uint* hashTable, byte* tagTable, uint relRow, uint rowLog) + { +#if NETCOREAPP3_0_OR_GREATER + if (Sse.IsSupported) + { + Sse.Prefetch0(hashTable + relRow); + } +#endif + + if (rowLog >= 5) + { +#if NETCOREAPP3_0_OR_GREATER + if (Sse.IsSupported) + { + Sse.Prefetch0(hashTable + relRow + 16); + } +#endif + } + +#if NETCOREAPP3_0_OR_GREATER + if (Sse.IsSupported) + { + Sse.Prefetch0(tagTable + relRow); + } +#endif + + if (rowLog == 6) + { +#if NETCOREAPP3_0_OR_GREATER + if (Sse.IsSupported) + { + Sse.Prefetch0(tagTable + relRow + 32); + } +#endif + } + + assert(rowLog == 4 || rowLog == 5 || rowLog == 6); + assert(ZSTD_isAligned(hashTable + relRow, 64) != 0); + assert(ZSTD_isAligned(tagTable + relRow, (nuint)1 << (int)rowLog) != 0); + } + + /* ZSTD_row_fillHashCache(): + * Fill up the hash cache starting at idx, prefetching up to ZSTD_ROW_HASH_CACHE_SIZE entries, + * but not beyond iLimit. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_row_fillHashCache(ZSTD_MatchState_t* ms, byte* @base, uint rowLog, uint mls, uint idx, byte* iLimit) + { + uint* hashTable = ms->hashTable; + byte* tagTable = ms->tagTable; + uint hashLog = ms->rowHashLog; + uint maxElemsToPrefetch = @base + idx > iLimit ? 0 : (uint)(iLimit - (@base + idx) + 1); + uint lim = idx + (8 < maxElemsToPrefetch ? 8 : maxElemsToPrefetch); + for (; idx < lim; ++idx) + { + uint hash = (uint)ZSTD_hashPtrSalted(@base + idx, hashLog + 8, mls, ms->hashSalt); + uint row = hash >> 8 << (int)rowLog; + ZSTD_row_prefetch(hashTable, tagTable, row, rowLog); + ms->hashCache[idx & 8 - 1] = hash; + } + } + + /* ZSTD_row_nextCachedHash(): + * Returns the hash of base + idx, and replaces the hash in the hash cache with the byte at + * base + idx + ZSTD_ROW_HASH_CACHE_SIZE. Also prefetches the appropriate rows from hashTable and tagTable. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_row_nextCachedHash(uint* cache, uint* hashTable, byte* tagTable, byte* @base, uint idx, uint hashLog, uint rowLog, uint mls, ulong hashSalt) + { + uint newHash = (uint)ZSTD_hashPtrSalted(@base + idx + 8, hashLog + 8, mls, hashSalt); + uint row = newHash >> 8 << (int)rowLog; + ZSTD_row_prefetch(hashTable, tagTable, row, rowLog); + { + uint hash = cache[idx & 8 - 1]; + cache[idx & 8 - 1] = newHash; + return hash; + } + } + + /* ZSTD_row_update_internalImpl(): + * Updates the hash table with positions starting from updateStartIdx until updateEndIdx. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_row_update_internalImpl(ZSTD_MatchState_t* ms, uint updateStartIdx, uint updateEndIdx, uint mls, uint rowLog, uint rowMask, uint useCache) + { + uint* hashTable = ms->hashTable; + byte* tagTable = ms->tagTable; + uint hashLog = ms->rowHashLog; + byte* @base = ms->window.@base; + for (; updateStartIdx < updateEndIdx; ++updateStartIdx) + { + uint hash = useCache != 0 ? ZSTD_row_nextCachedHash(ms->hashCache, hashTable, tagTable, @base, updateStartIdx, hashLog, rowLog, mls, ms->hashSalt) : (uint)ZSTD_hashPtrSalted(@base + updateStartIdx, hashLog + 8, mls, ms->hashSalt); + uint relRow = hash >> 8 << (int)rowLog; + uint* row = hashTable + relRow; + byte* tagRow = tagTable + relRow; + uint pos = ZSTD_row_nextIndex(tagRow, rowMask); + assert(hash == ZSTD_hashPtrSalted(@base + updateStartIdx, hashLog + 8, mls, ms->hashSalt)); + tagRow[pos] = (byte)(hash & (1U << 8) - 1); + row[pos] = updateStartIdx; + } + } + + /* ZSTD_row_update_internal(): + * Inserts the byte at ip into the appropriate position in the hash table, and updates ms->nextToUpdate. + * Skips sections of long matches as is necessary. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_row_update_internal(ZSTD_MatchState_t* ms, byte* ip, uint mls, uint rowLog, uint rowMask, uint useCache) + { + uint idx = ms->nextToUpdate; + byte* @base = ms->window.@base; + uint target = (uint)(ip - @base); + const uint kSkipThreshold = 384; + const uint kMaxMatchStartPositionsToUpdate = 96; + const uint kMaxMatchEndPositionsToUpdate = 32; + if (useCache != 0) + { + if (target - idx > kSkipThreshold) + { + uint bound = idx + kMaxMatchStartPositionsToUpdate; + ZSTD_row_update_internalImpl(ms, idx, bound, mls, rowLog, rowMask, useCache); + idx = target - kMaxMatchEndPositionsToUpdate; + ZSTD_row_fillHashCache(ms, @base, rowLog, mls, idx, ip + 1); + } + } + + assert(target >= idx); + ZSTD_row_update_internalImpl(ms, idx, target, mls, rowLog, rowMask, useCache); + ms->nextToUpdate = target; + } + + /* ZSTD_row_update(): + * External wrapper for ZSTD_row_update_internal(). Used for filling the hashtable during dictionary + * processing. + */ + private static void ZSTD_row_update(ZSTD_MatchState_t* ms, byte* ip) + { + uint rowLog = ms->cParams.searchLog <= 4 ? 4 : ms->cParams.searchLog <= 6 ? ms->cParams.searchLog : 6; + uint rowMask = (1U << (int)rowLog) - 1; + /* mls caps out at 6 */ + uint mls = ms->cParams.minMatch < 6 ? ms->cParams.minMatch : 6; + ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 0); + } + + /* Returns the mask width of bits group of which will be set to 1. Given not all + * architectures have easy movemask instruction, this helps to iterate over + * groups of bits easier and faster. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_row_matchMaskGroupWidth(uint rowEntries) + { + assert(rowEntries == 16 || rowEntries == 32 || rowEntries == 64); + assert(rowEntries <= 64); +#if NET5_0_OR_GREATER + if (AdvSimd.IsSupported && BitConverter.IsLittleEndian) + { + if (rowEntries == 16) + return 4; +#if NET9_0_OR_GREATER + if (AdvSimd.Arm64.IsSupported) + { + if (rowEntries == 32) + return 2; + if (rowEntries == 64) + return 1; + } +#endif + } +#endif + return 1; + } + +#if NETCOREAPP3_0_OR_GREATER + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ulong ZSTD_row_getSSEMask(int nbChunks, byte* src, byte tag, uint head) + { + Vector128 comparisonMask = Vector128.Create(tag); + assert(nbChunks is 1 or 2 or 4); + if (nbChunks == 1) + { + Vector128 chunk0 = Sse2.LoadVector128(src); + Vector128 equalMask0 = Sse2.CompareEqual(chunk0, comparisonMask); + int matches0 = Sse2.MoveMask(equalMask0); + return BitOperations.RotateRight((ushort)matches0, (int)head); + } + + if (nbChunks == 2) + { + Vector128 chunk0 = Sse2.LoadVector128(src); + Vector128 equalMask0 = Sse2.CompareEqual(chunk0, comparisonMask); + int matches0 = Sse2.MoveMask(equalMask0); + Vector128 chunk1 = Sse2.LoadVector128(src + 16); + Vector128 equalMask1 = Sse2.CompareEqual(chunk1, comparisonMask); + int matches1 = Sse2.MoveMask(equalMask1); + return BitOperations.RotateRight((uint)matches1 << 16 | (uint)matches0, (int)head); + } + + { + Vector128 chunk0 = Sse2.LoadVector128(src); + Vector128 equalMask0 = Sse2.CompareEqual(chunk0, comparisonMask); + int matches0 = Sse2.MoveMask(equalMask0); + Vector128 chunk1 = Sse2.LoadVector128(src + 16 * 1); + Vector128 equalMask1 = Sse2.CompareEqual(chunk1, comparisonMask); + int matches1 = Sse2.MoveMask(equalMask1); + Vector128 chunk2 = Sse2.LoadVector128(src + 16 * 2); + Vector128 equalMask2 = Sse2.CompareEqual(chunk2, comparisonMask); + int matches2 = Sse2.MoveMask(equalMask2); + Vector128 chunk3 = Sse2.LoadVector128(src + 16 * 3); + Vector128 equalMask3 = Sse2.CompareEqual(chunk3, comparisonMask); + int matches3 = Sse2.MoveMask(equalMask3); + return BitOperations.RotateRight((ulong)matches3 << 48 | (ulong)matches2 << 32 | (ulong)matches1 << 16 | (uint)matches0, (int)head); + } + } +#endif + + /* Returns a ZSTD_VecMask (U64) that has the nth group (determined by + * ZSTD_row_matchMaskGroupWidth) of bits set to 1 if the newly-computed "tag" + * matches the hash at the nth position in a row of the tagTable. + * Each row is a circular buffer beginning at the value of "headGrouped". So we + * must rotate the "matches" bitfield to match up with the actual layout of the + * entries within the hashTable */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ulong ZSTD_row_getMatchMask(byte* tagRow, byte tag, uint headGrouped, uint rowEntries) + { + byte* src = tagRow; + assert(rowEntries == 16 || rowEntries == 32 || rowEntries == 64); + assert(rowEntries <= 64); + assert(ZSTD_row_matchMaskGroupWidth(rowEntries) * rowEntries <= sizeof(ulong) * 8); +#if NETCOREAPP3_0_OR_GREATER + if (Sse2.IsSupported) + { + return ZSTD_row_getSSEMask((int)(rowEntries / 16), src, tag, headGrouped); + } +#endif + +#if NET5_0_OR_GREATER + if (AdvSimd.IsSupported && BitConverter.IsLittleEndian) + { + if (rowEntries == 16) + { + /* vshrn_n_u16 shifts by 4 every u16 and narrows to 8 lower bits. + * After that groups of 4 bits represent the equalMask. We lower + * all bits except the highest in these groups by doing AND with + * 0x88 = 0b10001000. + */ + Vector128 chunk = AdvSimd.LoadVector128(src); + Vector128 equalMask = AdvSimd.CompareEqual(chunk, AdvSimd.DuplicateToVector128(tag)).As(); + Vector64 res = AdvSimd.ShiftRightLogicalNarrowingLower(equalMask, 4); + ulong matches = res.As().GetElement(0); + return BitOperations.RotateRight(matches, (int)headGrouped) & 0x8888888888888888; + } + else if (rowEntries == 32) + { +#if NET9_0_OR_GREATER + if (AdvSimd.Arm64.IsSupported) + { + /* Same idea as with rowEntries == 16 but doing AND with + * 0x55 = 0b01010101. + */ + (Vector128 chunk0, Vector128 chunk1) = AdvSimd.Arm64.Load2xVector128AndUnzip((ushort*)src); + Vector128 dup = AdvSimd.DuplicateToVector128(tag); + Vector64 t0 = AdvSimd.ShiftRightLogicalNarrowingLower(AdvSimd.CompareEqual(chunk0.As(), dup).As(), 6); + Vector64 t1 = AdvSimd.ShiftRightLogicalNarrowingLower(AdvSimd.CompareEqual(chunk1.As(), dup).As(), 6); + Vector64 res = AdvSimd.ShiftLeftAndInsert(t0, t1, 4); + ulong matches = res.As().GetElement(0); + return BitOperations.RotateRight(matches, (int)headGrouped) & 0x5555555555555555; + } +#endif + } + else + { /* rowEntries == 64 */ +#if NET9_0_OR_GREATER + if (AdvSimd.Arm64.IsSupported) + { + (Vector128 chunk0, Vector128 chunk1, Vector128 chunk2, Vector128 chunk3) = AdvSimd.Arm64.Load4xVector128AndUnzip(src); + Vector128 dup = AdvSimd.DuplicateToVector128(tag); + Vector128 cmp0 = AdvSimd.CompareEqual(chunk0, dup); + Vector128 cmp1 = AdvSimd.CompareEqual(chunk1, dup); + Vector128 cmp2 = AdvSimd.CompareEqual(chunk2, dup); + Vector128 cmp3 = AdvSimd.CompareEqual(chunk3, dup); + + Vector128 t0 = AdvSimd.ShiftRightAndInsert(cmp1, cmp0, 1); + Vector128 t1 = AdvSimd.ShiftRightAndInsert(cmp3, cmp2, 1); + Vector128 t2 = AdvSimd.ShiftRightAndInsert(t1, t0, 2); + Vector128 t3 = AdvSimd.ShiftRightAndInsert(t2, t2, 4); + Vector64 t4 = AdvSimd.ShiftRightLogicalNarrowingLower(t3.As(), 4); + ulong matches = t4.As().GetElement(0); + return BitOperations.RotateRight(matches, (int) headGrouped); + } +#endif + } + } +#endif + + { + nuint chunkSize = (nuint)sizeof(nuint); + nuint shiftAmount = chunkSize * 8 - chunkSize; + nuint xFF = ~(nuint)0; + nuint x01 = xFF / 0xFF; + nuint x80 = x01 << 7; + nuint splatChar = tag * x01; + ulong matches = 0; + int i = (int)(rowEntries - chunkSize); + assert(sizeof(nuint) == 4 || sizeof(nuint) == 8); + if (BitConverter.IsLittleEndian) + { + nuint extractMagic = xFF / 0x7F >> (int)chunkSize; + do + { + nuint chunk = MEM_readST(&src[i]); + chunk ^= splatChar; + chunk = ((chunk | x80) - x01 | chunk) & x80; + matches <<= (int)chunkSize; + matches |= chunk * extractMagic >> (int)shiftAmount; + i -= (int)chunkSize; + } + while (i >= 0); + } + else + { + nuint msb = xFF ^ xFF >> 1; + nuint extractMagic = msb / 0x1FF | msb; + do + { + nuint chunk = MEM_readST(&src[i]); + chunk ^= splatChar; + chunk = ((chunk | x80) - x01 | chunk) & x80; + matches <<= (int)chunkSize; + matches |= (chunk >> 7) * extractMagic >> (int)shiftAmount; + i -= (int)chunkSize; + } + while (i >= 0); + } + + matches = ~matches; + if (rowEntries == 16) + { + return BitOperations.RotateRight((ushort)matches, (int)headGrouped); + } + else if (rowEntries == 32) + { + return BitOperations.RotateRight((uint)matches, (int)headGrouped); + } + else + { + return BitOperations.RotateRight(matches, (int)headGrouped); + } + } + } + + /* The high-level approach of the SIMD row based match finder is as follows: + * - Figure out where to insert the new entry: + * - Generate a hash for current input position and split it into a one byte of tag and `rowHashLog` bits of index. + * - The hash is salted by a value that changes on every context reset, so when the same table is used + * we will avoid collisions that would otherwise slow us down by introducing phantom matches. + * - The hashTable is effectively split into groups or "rows" of 15 or 31 entries of U32, and the index determines + * which row to insert into. + * - Determine the correct position within the row to insert the entry into. Each row of 15 or 31 can + * be considered as a circular buffer with a "head" index that resides in the tagTable (overall 16 or 32 bytes + * per row). + * - Use SIMD to efficiently compare the tags in the tagTable to the 1-byte tag calculated for the position and + * generate a bitfield that we can cycle through to check the collisions in the hash table. + * - Pick the longest match. + * - Insert the tag into the equivalent row and position in the tagTable. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_RowFindBestMatch(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr, uint mls, ZSTD_dictMode_e dictMode, uint rowLog) + { + uint* hashTable = ms->hashTable; + byte* tagTable = ms->tagTable; + uint* hashCache = ms->hashCache; + uint hashLog = ms->rowHashLog; + ZSTD_compressionParameters* cParams = &ms->cParams; + byte* @base = ms->window.@base; + byte* dictBase = ms->window.dictBase; + uint dictLimit = ms->window.dictLimit; + byte* prefixStart = @base + dictLimit; + byte* dictEnd = dictBase + dictLimit; + uint curr = (uint)(ip - @base); + uint maxDistance = 1U << (int)cParams->windowLog; + uint lowestValid = ms->window.lowLimit; + uint withinMaxDistance = curr - lowestValid > maxDistance ? curr - maxDistance : lowestValid; + uint isDictionary = ms->loadedDictEnd != 0 ? 1U : 0U; + uint lowLimit = isDictionary != 0 ? lowestValid : withinMaxDistance; + uint rowEntries = 1U << (int)rowLog; + uint rowMask = rowEntries - 1; + /* nb of searches is capped at nb entries per row */ + uint cappedSearchLog = cParams->searchLog < rowLog ? cParams->searchLog : rowLog; + uint groupWidth = ZSTD_row_matchMaskGroupWidth(rowEntries); + ulong hashSalt = ms->hashSalt; + uint nbAttempts = 1U << (int)cappedSearchLog; + nuint ml = 4 - 1; + uint hash; + /* DMS/DDS variables that may be referenced laster */ + ZSTD_MatchState_t* dms = ms->dictMatchState; + /* Initialize the following variables to satisfy static analyzer */ + nuint ddsIdx = 0; + /* cctx hash tables are limited in searches, but allow extra searches into DDS */ + uint ddsExtraAttempts = 0; + uint dmsTag = 0; + uint* dmsRow = null; + byte* dmsTagRow = null; + if (dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch) + { + uint ddsHashLog = dms->cParams.hashLog - 2; + { + ddsIdx = ZSTD_hashPtr(ip, ddsHashLog, mls) << 2; +#if NETCOREAPP3_0_OR_GREATER + if (Sse.IsSupported) + { + Sse.Prefetch0(&dms->hashTable[ddsIdx]); + } +#endif + } + + ddsExtraAttempts = cParams->searchLog > rowLog ? 1U << (int)(cParams->searchLog - rowLog) : 0; + } + + if (dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState) + { + /* Prefetch DMS rows */ + uint* dmsHashTable = dms->hashTable; + byte* dmsTagTable = dms->tagTable; + uint dmsHash = (uint)ZSTD_hashPtr(ip, dms->rowHashLog + 8, mls); + uint dmsRelRow = dmsHash >> 8 << (int)rowLog; + dmsTag = dmsHash & (1U << 8) - 1; + dmsTagRow = dmsTagTable + dmsRelRow; + dmsRow = dmsHashTable + dmsRelRow; + ZSTD_row_prefetch(dmsHashTable, dmsTagTable, dmsRelRow, rowLog); + } + + if (ms->lazySkipping == 0) + { + ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 1); + hash = ZSTD_row_nextCachedHash(hashCache, hashTable, tagTable, @base, curr, hashLog, rowLog, mls, hashSalt); + } + else + { + hash = (uint)ZSTD_hashPtrSalted(ip, hashLog + 8, mls, hashSalt); + ms->nextToUpdate = curr; + } + + ms->hashSaltEntropy += hash; + { + uint relRow = hash >> 8 << (int)rowLog; + uint tag = hash & (1U << 8) - 1; + uint* row = hashTable + relRow; + byte* tagRow = tagTable + relRow; + uint headGrouped = (*tagRow & rowMask) * groupWidth; + uint* matchBuffer = stackalloc uint[64]; + nuint numMatches = 0; + nuint currMatch = 0; + ulong matches = ZSTD_row_getMatchMask(tagRow, (byte)tag, headGrouped, rowEntries); + for (; matches > 0 && nbAttempts > 0; matches &= matches - 1) + { + uint matchPos = (headGrouped + ZSTD_VecMask_next(matches)) / groupWidth & rowMask; + uint matchIndex = row[matchPos]; + if (matchPos == 0) + continue; + assert(numMatches < rowEntries); + if (matchIndex < lowLimit) + break; + if (dictMode != ZSTD_dictMode_e.ZSTD_extDict || matchIndex >= dictLimit) + { +#if NETCOREAPP3_0_OR_GREATER + if (Sse.IsSupported) + { + Sse.Prefetch0(@base + matchIndex); + } +#endif + } + else + { +#if NETCOREAPP3_0_OR_GREATER + if (Sse.IsSupported) + { + Sse.Prefetch0(dictBase + matchIndex); + } +#endif + } + + matchBuffer[numMatches++] = matchIndex; + --nbAttempts; + } + + { + uint pos = ZSTD_row_nextIndex(tagRow, rowMask); + tagRow[pos] = (byte)tag; + row[pos] = ms->nextToUpdate++; + } + + for (; currMatch < numMatches; ++currMatch) + { + uint matchIndex = matchBuffer[currMatch]; + nuint currentMl = 0; + assert(matchIndex < curr); + assert(matchIndex >= lowLimit); + if (dictMode != ZSTD_dictMode_e.ZSTD_extDict || matchIndex >= dictLimit) + { + byte* match = @base + matchIndex; + assert(matchIndex >= dictLimit); + if (MEM_read32(match + ml - 3) == MEM_read32(ip + ml - 3)) + currentMl = ZSTD_count(ip, match, iLimit); + } + else + { + byte* match = dictBase + matchIndex; + assert(match + 4 <= dictEnd); + if (MEM_read32(match) == MEM_read32(ip)) + currentMl = ZSTD_count_2segments(ip + 4, match + 4, iLimit, dictEnd, prefixStart) + 4; + } + + if (currentMl > ml) + { + ml = currentMl; + assert(curr - matchIndex > 0); + *offsetPtr = curr - matchIndex + 3; + if (ip + currentMl == iLimit) + break; + } + } + } + + assert(nbAttempts <= 1U << (sizeof(nuint) == 4 ? 30 : 31) - 1); + if (dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch) + { + ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts + ddsExtraAttempts, dms, ip, iLimit, prefixStart, curr, dictLimit, ddsIdx); + } + else if (dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState) + { + /* TODO: Measure and potentially add prefetching to DMS */ + uint dmsLowestIndex = dms->window.dictLimit; + byte* dmsBase = dms->window.@base; + byte* dmsEnd = dms->window.nextSrc; + uint dmsSize = (uint)(dmsEnd - dmsBase); + uint dmsIndexDelta = dictLimit - dmsSize; + { + uint headGrouped = (*dmsTagRow & rowMask) * groupWidth; + uint* matchBuffer = stackalloc uint[64]; + nuint numMatches = 0; + nuint currMatch = 0; + ulong matches = ZSTD_row_getMatchMask(dmsTagRow, (byte)dmsTag, headGrouped, rowEntries); + for (; matches > 0 && nbAttempts > 0; matches &= matches - 1) + { + uint matchPos = (headGrouped + ZSTD_VecMask_next(matches)) / groupWidth & rowMask; + uint matchIndex = dmsRow[matchPos]; + if (matchPos == 0) + continue; + if (matchIndex < dmsLowestIndex) + break; +#if NETCOREAPP3_0_OR_GREATER + if (Sse.IsSupported) + { + Sse.Prefetch0(dmsBase + matchIndex); + } +#endif + + matchBuffer[numMatches++] = matchIndex; + --nbAttempts; + } + + for (; currMatch < numMatches; ++currMatch) + { + uint matchIndex = matchBuffer[currMatch]; + nuint currentMl = 0; + assert(matchIndex >= dmsLowestIndex); + assert(matchIndex < curr); + { + byte* match = dmsBase + matchIndex; + assert(match + 4 <= dmsEnd); + if (MEM_read32(match) == MEM_read32(ip)) + currentMl = ZSTD_count_2segments(ip + 4, match + 4, iLimit, dmsEnd, prefixStart) + 4; + } + + if (currentMl > ml) + { + ml = currentMl; + assert(curr > matchIndex + dmsIndexDelta); + assert(curr - (matchIndex + dmsIndexDelta) > 0); + *offsetPtr = curr - (matchIndex + dmsIndexDelta) + 3; + if (ip + currentMl == iLimit) + break; + } + } + } + } + + return ml; + } + + /* Generate row search fns for each combination of (dictMode, mls, rowLog) */ + private static nuint ZSTD_RowFindBestMatch_noDict_4_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 4); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_noDict, 4); + } + + private static nuint ZSTD_RowFindBestMatch_noDict_4_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 5); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_noDict, 5); + } + + private static nuint ZSTD_RowFindBestMatch_noDict_4_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 6); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_noDict, 6); + } + + private static nuint ZSTD_RowFindBestMatch_noDict_5_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 4); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_noDict, 4); + } + + private static nuint ZSTD_RowFindBestMatch_noDict_5_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 5); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_noDict, 5); + } + + private static nuint ZSTD_RowFindBestMatch_noDict_5_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 6); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_noDict, 6); + } + + private static nuint ZSTD_RowFindBestMatch_noDict_6_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 4); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_noDict, 4); + } + + private static nuint ZSTD_RowFindBestMatch_noDict_6_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 5); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_noDict, 5); + } + + private static nuint ZSTD_RowFindBestMatch_noDict_6_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 6); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_noDict, 6); + } + + private static nuint ZSTD_RowFindBestMatch_extDict_4_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 4); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_extDict, 4); + } + + private static nuint ZSTD_RowFindBestMatch_extDict_4_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 5); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_extDict, 5); + } + + private static nuint ZSTD_RowFindBestMatch_extDict_4_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 6); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_extDict, 6); + } + + private static nuint ZSTD_RowFindBestMatch_extDict_5_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 4); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_extDict, 4); + } + + private static nuint ZSTD_RowFindBestMatch_extDict_5_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 5); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_extDict, 5); + } + + private static nuint ZSTD_RowFindBestMatch_extDict_5_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 6); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_extDict, 6); + } + + private static nuint ZSTD_RowFindBestMatch_extDict_6_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 4); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_extDict, 4); + } + + private static nuint ZSTD_RowFindBestMatch_extDict_6_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 5); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_extDict, 5); + } + + private static nuint ZSTD_RowFindBestMatch_extDict_6_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 6); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_extDict, 6); + } + + private static nuint ZSTD_RowFindBestMatch_dictMatchState_4_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 4); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_dictMatchState, 4); + } + + private static nuint ZSTD_RowFindBestMatch_dictMatchState_4_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 5); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_dictMatchState, 5); + } + + private static nuint ZSTD_RowFindBestMatch_dictMatchState_4_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 6); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_dictMatchState, 6); + } + + private static nuint ZSTD_RowFindBestMatch_dictMatchState_5_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 4); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_dictMatchState, 4); + } + + private static nuint ZSTD_RowFindBestMatch_dictMatchState_5_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 5); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_dictMatchState, 5); + } + + private static nuint ZSTD_RowFindBestMatch_dictMatchState_5_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 6); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_dictMatchState, 6); + } + + private static nuint ZSTD_RowFindBestMatch_dictMatchState_6_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 4); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_dictMatchState, 4); + } + + private static nuint ZSTD_RowFindBestMatch_dictMatchState_6_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 5); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_dictMatchState, 5); + } + + private static nuint ZSTD_RowFindBestMatch_dictMatchState_6_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 6); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_dictMatchState, 6); + } + + private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_4_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 4); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, 4); + } + + private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_4_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 5); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, 5); + } + + private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_4_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 6); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, 6); + } + + private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_5_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 4); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, 4); + } + + private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_5_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 5); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, 5); + } + + private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_5_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 6); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, 6); + } + + private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_6_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 4); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, 4); + } + + private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_6_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 5); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, 5); + } + + private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_6_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); + assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 6); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, 6); + } + + /* Generate binary Tree search fns for each combination of (dictMode, mls) */ + private static nuint ZSTD_BtFindBestMatch_noDict_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offBasePtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); + return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, 4, ZSTD_dictMode_e.ZSTD_noDict); + } + + private static nuint ZSTD_BtFindBestMatch_noDict_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offBasePtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); + return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, 5, ZSTD_dictMode_e.ZSTD_noDict); + } + + private static nuint ZSTD_BtFindBestMatch_noDict_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offBasePtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); + return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, 6, ZSTD_dictMode_e.ZSTD_noDict); + } + + private static nuint ZSTD_BtFindBestMatch_extDict_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offBasePtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); + return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, 4, ZSTD_dictMode_e.ZSTD_extDict); + } + + private static nuint ZSTD_BtFindBestMatch_extDict_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offBasePtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); + return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, 5, ZSTD_dictMode_e.ZSTD_extDict); + } + + private static nuint ZSTD_BtFindBestMatch_extDict_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offBasePtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); + return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, 6, ZSTD_dictMode_e.ZSTD_extDict); + } + + private static nuint ZSTD_BtFindBestMatch_dictMatchState_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offBasePtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); + return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, 4, ZSTD_dictMode_e.ZSTD_dictMatchState); + } + + private static nuint ZSTD_BtFindBestMatch_dictMatchState_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offBasePtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); + return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, 5, ZSTD_dictMode_e.ZSTD_dictMatchState); + } + + private static nuint ZSTD_BtFindBestMatch_dictMatchState_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offBasePtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); + return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, 6, ZSTD_dictMode_e.ZSTD_dictMatchState); + } + + private static nuint ZSTD_BtFindBestMatch_dedicatedDictSearch_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offBasePtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); + return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, 4, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch); + } + + private static nuint ZSTD_BtFindBestMatch_dedicatedDictSearch_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offBasePtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); + return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, 5, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch); + } + + private static nuint ZSTD_BtFindBestMatch_dedicatedDictSearch_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offBasePtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); + return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, 6, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch); + } + + /* Generate hash chain search fns for each combination of (dictMode, mls) */ + private static nuint ZSTD_HcFindBestMatch_noDict_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); + return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_noDict); + } + + private static nuint ZSTD_HcFindBestMatch_noDict_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); + return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_noDict); + } + + private static nuint ZSTD_HcFindBestMatch_noDict_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); + return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_noDict); + } + + private static nuint ZSTD_HcFindBestMatch_extDict_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); + return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_extDict); + } + + private static nuint ZSTD_HcFindBestMatch_extDict_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); + return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_extDict); + } + + private static nuint ZSTD_HcFindBestMatch_extDict_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); + return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_extDict); + } + + private static nuint ZSTD_HcFindBestMatch_dictMatchState_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); + return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_dictMatchState); + } + + private static nuint ZSTD_HcFindBestMatch_dictMatchState_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); + return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_dictMatchState); + } + + private static nuint ZSTD_HcFindBestMatch_dictMatchState_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); + return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_dictMatchState); + } + + private static nuint ZSTD_HcFindBestMatch_dedicatedDictSearch_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); + return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch); + } + + private static nuint ZSTD_HcFindBestMatch_dedicatedDictSearch_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); + return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch); + } + + private static nuint ZSTD_HcFindBestMatch_dedicatedDictSearch_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + { + assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); + return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch); + } + + /** + * Searches for the longest match at @p ip. + * Dispatches to the correct implementation function based on the + * (searchMethod, dictMode, mls, rowLog). We use switch statements + * here instead of using an indirect function call through a function + * pointer because after Spectre and Meltdown mitigations, indirect + * function calls can be very costly, especially in the kernel. + * + * NOTE: dictMode and searchMethod should be templated, so those switch + * statements should be optimized out. Only the mls & rowLog switches + * should be left. + * + * @param ms The match state. + * @param ip The position to search at. + * @param iend The end of the input data. + * @param[out] offsetPtr Stores the match offset into this pointer. + * @param mls The minimum search length, in the range [4, 6]. + * @param rowLog The row log (if applicable), in the range [4, 6]. + * @param searchMethod The search method to use (templated). + * @param dictMode The dictMode (templated). + * + * @returns The length of the longest match found, or < mls if no match is found. + * If a match is found its offset is stored in @p offsetPtr. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_searchMax(ZSTD_MatchState_t* ms, byte* ip, byte* iend, nuint* offsetPtr, uint mls, uint rowLog, searchMethod_e searchMethod, ZSTD_dictMode_e dictMode) + { + if (dictMode == ZSTD_dictMode_e.ZSTD_noDict) + { + if (searchMethod == searchMethod_e.search_rowHash) + { + if (mls == 4) + { + if (rowLog == 4) + return ZSTD_RowFindBestMatch_noDict_4_4(ms, ip, iend, offsetPtr); + return rowLog == 5 ? ZSTD_RowFindBestMatch_noDict_4_5(ms, ip, iend, offsetPtr) : ZSTD_RowFindBestMatch_noDict_4_6(ms, ip, iend, offsetPtr); + } + + if (mls == 5) + { + if (rowLog == 4) + return ZSTD_RowFindBestMatch_noDict_5_4(ms, ip, iend, offsetPtr); + return rowLog == 5 ? ZSTD_RowFindBestMatch_noDict_5_5(ms, ip, iend, offsetPtr) : ZSTD_RowFindBestMatch_noDict_5_6(ms, ip, iend, offsetPtr); + } + + if (rowLog == 4) + return ZSTD_RowFindBestMatch_noDict_6_4(ms, ip, iend, offsetPtr); + return rowLog == 5 ? ZSTD_RowFindBestMatch_noDict_6_5(ms, ip, iend, offsetPtr) : ZSTD_RowFindBestMatch_noDict_6_6(ms, ip, iend, offsetPtr); + } + + if (searchMethod == searchMethod_e.search_hashChain) + { + if (mls == 4) + return ZSTD_HcFindBestMatch_noDict_4(ms, ip, iend, offsetPtr); + return mls == 5 ? ZSTD_HcFindBestMatch_noDict_5(ms, ip, iend, offsetPtr) : ZSTD_HcFindBestMatch_noDict_6(ms, ip, iend, offsetPtr); + } + + // searchMethod_e.search_binaryTree + if (mls == 4) + return ZSTD_BtFindBestMatch_noDict_4(ms, ip, iend, offsetPtr); + return mls == 5 ? ZSTD_BtFindBestMatch_noDict_5(ms, ip, iend, offsetPtr) : ZSTD_BtFindBestMatch_noDict_6(ms, ip, iend, offsetPtr); + } + + if (dictMode == ZSTD_dictMode_e.ZSTD_extDict) + { + if (searchMethod == searchMethod_e.search_rowHash) + { + if (mls == 4) + { + if (rowLog == 4) + return ZSTD_RowFindBestMatch_extDict_4_4(ms, ip, iend, offsetPtr); + if (rowLog == 5) + return ZSTD_RowFindBestMatch_extDict_4_5(ms, ip, iend, offsetPtr); + return ZSTD_RowFindBestMatch_extDict_4_6(ms, ip, iend, offsetPtr); + } + + if (mls == 5) + { + if (rowLog == 4) + return ZSTD_RowFindBestMatch_extDict_5_4(ms, ip, iend, offsetPtr); + if (rowLog == 5) + return ZSTD_RowFindBestMatch_extDict_5_5(ms, ip, iend, offsetPtr); + return ZSTD_RowFindBestMatch_extDict_5_6(ms, ip, iend, offsetPtr); + } + + if (mls == 6) + { + if (rowLog == 4) + return ZSTD_RowFindBestMatch_extDict_6_4(ms, ip, iend, offsetPtr); + if (rowLog == 5) + return ZSTD_RowFindBestMatch_extDict_6_5(ms, ip, iend, offsetPtr); + return ZSTD_RowFindBestMatch_extDict_6_6(ms, ip, iend, offsetPtr); + } + } + + if (searchMethod == searchMethod_e.search_hashChain) + { + if (mls == 4) + return ZSTD_HcFindBestMatch_extDict_4(ms, ip, iend, offsetPtr); + if (mls == 5) + return ZSTD_HcFindBestMatch_extDict_5(ms, ip, iend, offsetPtr); + return ZSTD_HcFindBestMatch_extDict_6(ms, ip, iend, offsetPtr); + } + + // searchMethod_e.search_binaryTree + if (mls == 4) + return ZSTD_BtFindBestMatch_extDict_4(ms, ip, iend, offsetPtr); + if (mls == 5) + return ZSTD_BtFindBestMatch_extDict_5(ms, ip, iend, offsetPtr); + return ZSTD_BtFindBestMatch_extDict_6(ms, ip, iend, offsetPtr); + } + + if (dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState) + { + if (searchMethod == searchMethod_e.search_rowHash) + { + if (mls == 4) + { + if (rowLog == 4) + return ZSTD_RowFindBestMatch_dictMatchState_4_4(ms, ip, iend, offsetPtr); + if (rowLog == 5) + return ZSTD_RowFindBestMatch_dictMatchState_4_5(ms, ip, iend, offsetPtr); + return ZSTD_RowFindBestMatch_dictMatchState_4_6(ms, ip, iend, offsetPtr); + } + + if (mls == 5) + { + if (rowLog == 4) + return ZSTD_RowFindBestMatch_dictMatchState_5_4(ms, ip, iend, offsetPtr); + if (rowLog == 5) + return ZSTD_RowFindBestMatch_dictMatchState_5_5(ms, ip, iend, offsetPtr); + return ZSTD_RowFindBestMatch_dictMatchState_5_6(ms, ip, iend, offsetPtr); + } + + if (mls == 6) + { + if (rowLog == 4) + return ZSTD_RowFindBestMatch_dictMatchState_6_4(ms, ip, iend, offsetPtr); + if (rowLog == 5) + return ZSTD_RowFindBestMatch_dictMatchState_6_5(ms, ip, iend, offsetPtr); + return ZSTD_RowFindBestMatch_dictMatchState_6_6(ms, ip, iend, offsetPtr); + } + } + + if (searchMethod == searchMethod_e.search_hashChain) + { + if (mls == 4) + return ZSTD_HcFindBestMatch_dictMatchState_4(ms, ip, iend, offsetPtr); + if (mls == 5) + return ZSTD_HcFindBestMatch_dictMatchState_5(ms, ip, iend, offsetPtr); + return ZSTD_HcFindBestMatch_dictMatchState_6(ms, ip, iend, offsetPtr); + } + + // search_binaryTree + if (mls == 4) + return ZSTD_BtFindBestMatch_dictMatchState_4(ms, ip, iend, offsetPtr); + if (mls == 5) + return ZSTD_BtFindBestMatch_dictMatchState_5(ms, ip, iend, offsetPtr); + return ZSTD_BtFindBestMatch_dictMatchState_6(ms, ip, iend, offsetPtr); + } + + if (searchMethod == searchMethod_e.search_rowHash) + { + if (mls == 4) + { + if (rowLog == 4) + return ZSTD_RowFindBestMatch_dedicatedDictSearch_4_4(ms, ip, iend, offsetPtr); + if (rowLog == 5) + return ZSTD_RowFindBestMatch_dedicatedDictSearch_4_5(ms, ip, iend, offsetPtr); + return ZSTD_RowFindBestMatch_dedicatedDictSearch_4_6(ms, ip, iend, offsetPtr); + } + + if (mls == 5) + { + if (rowLog == 4) + return ZSTD_RowFindBestMatch_dedicatedDictSearch_5_4(ms, ip, iend, offsetPtr); + if (rowLog == 5) + return ZSTD_RowFindBestMatch_dedicatedDictSearch_5_5(ms, ip, iend, offsetPtr); + return ZSTD_RowFindBestMatch_dedicatedDictSearch_5_6(ms, ip, iend, offsetPtr); + } + + if (mls == 6) + { + if (rowLog == 4) + return ZSTD_RowFindBestMatch_dedicatedDictSearch_6_4(ms, ip, iend, offsetPtr); + if (rowLog == 5) + return ZSTD_RowFindBestMatch_dedicatedDictSearch_6_5(ms, ip, iend, offsetPtr); + return ZSTD_RowFindBestMatch_dedicatedDictSearch_6_6(ms, ip, iend, offsetPtr); + } + } + + if (searchMethod == searchMethod_e.search_hashChain) + { + if (mls == 4) + return ZSTD_HcFindBestMatch_dedicatedDictSearch_4(ms, ip, iend, offsetPtr); + if (mls == 5) + return ZSTD_HcFindBestMatch_dedicatedDictSearch_5(ms, ip, iend, offsetPtr); + return ZSTD_HcFindBestMatch_dedicatedDictSearch_6(ms, ip, iend, offsetPtr); + } + + // searchMethod_e.search_binaryTree + if (mls == 4) + return ZSTD_BtFindBestMatch_dedicatedDictSearch_4(ms, ip, iend, offsetPtr); + if (mls == 5) + return ZSTD_BtFindBestMatch_dedicatedDictSearch_5(ms, ip, iend, offsetPtr); + return ZSTD_BtFindBestMatch_dedicatedDictSearch_6(ms, ip, iend, offsetPtr); + } + + /* ******************************* + * Common parser - lazy strategy + *********************************/ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_compressBlock_lazy_generic(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize, searchMethod_e searchMethod, uint depth, ZSTD_dictMode_e dictMode) + { + byte* istart = (byte*)src; + byte* ip = istart; + byte* anchor = istart; + byte* iend = istart + srcSize; + byte* ilimit = searchMethod == searchMethod_e.search_rowHash ? iend - 8 - 8 : iend - 8; + byte* @base = ms->window.@base; + uint prefixLowestIndex = ms->window.dictLimit; + byte* prefixLowest = @base + prefixLowestIndex; + uint mls = ms->cParams.minMatch <= 4 ? 4 : ms->cParams.minMatch <= 6 ? ms->cParams.minMatch : 6; + uint rowLog = ms->cParams.searchLog <= 4 ? 4 : ms->cParams.searchLog <= 6 ? ms->cParams.searchLog : 6; + uint offset_1 = rep[0], offset_2 = rep[1]; + uint offsetSaved1 = 0, offsetSaved2 = 0; + int isDMS = dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? 1 : 0; + int isDDS = dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch ? 1 : 0; + int isDxS = isDMS != 0 || isDDS != 0 ? 1 : 0; + ZSTD_MatchState_t* dms = ms->dictMatchState; + uint dictLowestIndex = isDxS != 0 ? dms->window.dictLimit : 0; + byte* dictBase = isDxS != 0 ? dms->window.@base : null; + byte* dictLowest = isDxS != 0 ? dictBase + dictLowestIndex : null; + byte* dictEnd = isDxS != 0 ? dms->window.nextSrc : null; + uint dictIndexDelta = isDxS != 0 ? prefixLowestIndex - (uint)(dictEnd - dictBase) : 0; + uint dictAndPrefixLength = (uint)(ip - prefixLowest + (dictEnd - dictLowest)); + ip += dictAndPrefixLength == 0 ? 1 : 0; + if (dictMode == ZSTD_dictMode_e.ZSTD_noDict) + { + uint curr = (uint)(ip - @base); + uint windowLow = ZSTD_getLowestPrefixIndex(ms, curr, ms->cParams.windowLog); + uint maxRep = curr - windowLow; + if (offset_2 > maxRep) + { + offsetSaved2 = offset_2; + offset_2 = 0; + } + + if (offset_1 > maxRep) + { + offsetSaved1 = offset_1; + offset_1 = 0; + } + } + +#if DEBUG + if (isDxS != 0) + { + assert(offset_1 <= dictAndPrefixLength); + assert(offset_2 <= dictAndPrefixLength); + } +#endif + + ms->lazySkipping = 0; + if (searchMethod == searchMethod_e.search_rowHash) + { + ZSTD_row_fillHashCache(ms, @base, rowLog, mls, ms->nextToUpdate, ilimit); + } + + while (ip < ilimit) + { + nuint matchLength = 0; + assert(1 >= 1); + assert(1 <= 3); + nuint offBase = 1; + byte* start = ip + 1; + if (isDxS != 0) + { + uint repIndex = (uint)(ip - @base) + 1 - offset_1; + byte* repMatch = (dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState || dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch) && repIndex < prefixLowestIndex ? dictBase + (repIndex - dictIndexDelta) : @base + repIndex; + if (ZSTD_index_overlap_check(prefixLowestIndex, repIndex) != 0 && MEM_read32(repMatch) == MEM_read32(ip + 1)) + { + byte* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; + matchLength = ZSTD_count_2segments(ip + 1 + 4, repMatch + 4, iend, repMatchEnd, prefixLowest) + 4; + if (depth == 0) + goto _storeSequence; + } + } + + if (dictMode == ZSTD_dictMode_e.ZSTD_noDict && offset_1 > 0 && MEM_read32(ip + 1 - offset_1) == MEM_read32(ip + 1)) + { + matchLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4; + if (depth == 0) + goto _storeSequence; + } + + { + nuint offbaseFound = 999999999; + nuint ml2 = ZSTD_searchMax(ms, ip, iend, &offbaseFound, mls, rowLog, searchMethod, dictMode); + if (ml2 > matchLength) + { + matchLength = ml2; + start = ip; + offBase = offbaseFound; + } + } + + if (matchLength < 4) + { + /* jump faster over incompressible sections */ + nuint step = ((nuint)(ip - anchor) >> 8) + 1; + ip += step; + ms->lazySkipping = step > 8 ? 1 : 0; + continue; + } + + if (depth >= 1) + while (ip < ilimit) + { + ip++; + if (dictMode == ZSTD_dictMode_e.ZSTD_noDict && offBase != 0 && offset_1 > 0 && MEM_read32(ip) == MEM_read32(ip - offset_1)) + { + nuint mlRep = ZSTD_count(ip + 4, ip + 4 - offset_1, iend) + 4; + int gain2 = (int)(mlRep * 3); + int gain1 = (int)(matchLength * 3 - ZSTD_highbit32((uint)offBase) + 1); + if (mlRep >= 4 && gain2 > gain1) + { + matchLength = mlRep; + assert(1 >= 1); + assert(1 <= 3); + offBase = 1; + start = ip; + } + } + + if (isDxS != 0) + { + uint repIndex = (uint)(ip - @base) - offset_1; + byte* repMatch = repIndex < prefixLowestIndex ? dictBase + (repIndex - dictIndexDelta) : @base + repIndex; + if (ZSTD_index_overlap_check(prefixLowestIndex, repIndex) != 0 && MEM_read32(repMatch) == MEM_read32(ip)) + { + byte* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; + nuint mlRep = ZSTD_count_2segments(ip + 4, repMatch + 4, iend, repMatchEnd, prefixLowest) + 4; + int gain2 = (int)(mlRep * 3); + int gain1 = (int)(matchLength * 3 - ZSTD_highbit32((uint)offBase) + 1); + if (mlRep >= 4 && gain2 > gain1) + { + matchLength = mlRep; + assert(1 >= 1); + assert(1 <= 3); + offBase = 1; + start = ip; + } + } + } + + { + nuint ofbCandidate = 999999999; + nuint ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, dictMode); + /* raw approx */ + int gain2 = (int)(ml2 * 4 - ZSTD_highbit32((uint)ofbCandidate)); + int gain1 = (int)(matchLength * 4 - ZSTD_highbit32((uint)offBase) + 4); + if (ml2 >= 4 && gain2 > gain1) + { + matchLength = ml2; + offBase = ofbCandidate; + start = ip; + continue; + } + } + + if (depth == 2 && ip < ilimit) + { + ip++; + if (dictMode == ZSTD_dictMode_e.ZSTD_noDict && offBase != 0 && offset_1 > 0 && MEM_read32(ip) == MEM_read32(ip - offset_1)) + { + nuint mlRep = ZSTD_count(ip + 4, ip + 4 - offset_1, iend) + 4; + int gain2 = (int)(mlRep * 4); + int gain1 = (int)(matchLength * 4 - ZSTD_highbit32((uint)offBase) + 1); + if (mlRep >= 4 && gain2 > gain1) + { + matchLength = mlRep; + assert(1 >= 1); + assert(1 <= 3); + offBase = 1; + start = ip; + } + } + + if (isDxS != 0) + { + uint repIndex = (uint)(ip - @base) - offset_1; + byte* repMatch = repIndex < prefixLowestIndex ? dictBase + (repIndex - dictIndexDelta) : @base + repIndex; + if (ZSTD_index_overlap_check(prefixLowestIndex, repIndex) != 0 && MEM_read32(repMatch) == MEM_read32(ip)) + { + byte* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; + nuint mlRep = ZSTD_count_2segments(ip + 4, repMatch + 4, iend, repMatchEnd, prefixLowest) + 4; + int gain2 = (int)(mlRep * 4); + int gain1 = (int)(matchLength * 4 - ZSTD_highbit32((uint)offBase) + 1); + if (mlRep >= 4 && gain2 > gain1) + { + matchLength = mlRep; + assert(1 >= 1); + assert(1 <= 3); + offBase = 1; + start = ip; + } + } + } + + { + nuint ofbCandidate = 999999999; + nuint ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, dictMode); + /* raw approx */ + int gain2 = (int)(ml2 * 4 - ZSTD_highbit32((uint)ofbCandidate)); + int gain1 = (int)(matchLength * 4 - ZSTD_highbit32((uint)offBase) + 7); + if (ml2 >= 4 && gain2 > gain1) + { + matchLength = ml2; + offBase = ofbCandidate; + start = ip; + continue; + } + } + } + + break; + } + + if (offBase > 3) + { + if (dictMode == ZSTD_dictMode_e.ZSTD_noDict) + { + assert(offBase > 3); + assert(offBase > 3); + while (start > anchor && start - (offBase - 3) > prefixLowest && start[-1] == (start - (offBase - 3))[-1]) + { + start--; + matchLength++; + } + } + + if (isDxS != 0) + { + assert(offBase > 3); + uint matchIndex = (uint)((nuint)(start - @base) - (offBase - 3)); + byte* match = matchIndex < prefixLowestIndex ? dictBase + matchIndex - dictIndexDelta : @base + matchIndex; + byte* mStart = matchIndex < prefixLowestIndex ? dictLowest : prefixLowest; + while (start > anchor && match > mStart && start[-1] == match[-1]) + { + start--; + match--; + matchLength++; + } + } + + offset_2 = offset_1; + assert(offBase > 3); + offset_1 = (uint)(offBase - 3); + } + + _storeSequence: + { + nuint litLength = (nuint)(start - anchor); + ZSTD_storeSeq(seqStore, litLength, anchor, iend, (uint)offBase, matchLength); + anchor = ip = start + matchLength; + } + + if (ms->lazySkipping != 0) + { + if (searchMethod == searchMethod_e.search_rowHash) + { + ZSTD_row_fillHashCache(ms, @base, rowLog, mls, ms->nextToUpdate, ilimit); + } + + ms->lazySkipping = 0; + } + + if (isDxS != 0) + { + while (ip <= ilimit) + { + uint current2 = (uint)(ip - @base); + uint repIndex = current2 - offset_2; + byte* repMatch = repIndex < prefixLowestIndex ? dictBase - dictIndexDelta + repIndex : @base + repIndex; + if (ZSTD_index_overlap_check(prefixLowestIndex, repIndex) != 0 && MEM_read32(repMatch) == MEM_read32(ip)) + { + byte* repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend; + matchLength = ZSTD_count_2segments(ip + 4, repMatch + 4, iend, repEnd2, prefixLowest) + 4; + offBase = offset_2; + offset_2 = offset_1; + offset_1 = (uint)offBase; + assert(1 >= 1); + assert(1 <= 3); + ZSTD_storeSeq(seqStore, 0, anchor, iend, 1, matchLength); + ip += matchLength; + anchor = ip; + continue; + } + + break; + } + } + + if (dictMode == ZSTD_dictMode_e.ZSTD_noDict) + { + while (ip <= ilimit && offset_2 > 0 && MEM_read32(ip) == MEM_read32(ip - offset_2)) + { + matchLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4; + offBase = offset_2; + offset_2 = offset_1; + offset_1 = (uint)offBase; + assert(1 >= 1); + assert(1 <= 3); + ZSTD_storeSeq(seqStore, 0, anchor, iend, 1, matchLength); + ip += matchLength; + anchor = ip; + continue; + } + } + } + + offsetSaved2 = offsetSaved1 != 0 && offset_1 != 0 ? offsetSaved1 : offsetSaved2; + rep[0] = offset_1 != 0 ? offset_1 : offsetSaved1; + rep[1] = offset_2 != 0 ? offset_2 : offsetSaved2; + return (nuint)(iend - anchor); + } + + private static nuint ZSTD_compressBlock_greedy(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_hashChain, 0, ZSTD_dictMode_e.ZSTD_noDict); + } + + private static nuint ZSTD_compressBlock_greedy_dictMatchState(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_hashChain, 0, ZSTD_dictMode_e.ZSTD_dictMatchState); + } + + private static nuint ZSTD_compressBlock_greedy_dedicatedDictSearch(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_hashChain, 0, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch); + } + + private static nuint ZSTD_compressBlock_greedy_row(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_rowHash, 0, ZSTD_dictMode_e.ZSTD_noDict); + } + + private static nuint ZSTD_compressBlock_greedy_dictMatchState_row(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_rowHash, 0, ZSTD_dictMode_e.ZSTD_dictMatchState); + } + + private static nuint ZSTD_compressBlock_greedy_dedicatedDictSearch_row(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_rowHash, 0, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch); + } + + private static nuint ZSTD_compressBlock_lazy(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_hashChain, 1, ZSTD_dictMode_e.ZSTD_noDict); + } + + private static nuint ZSTD_compressBlock_lazy_dictMatchState(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_hashChain, 1, ZSTD_dictMode_e.ZSTD_dictMatchState); + } + + private static nuint ZSTD_compressBlock_lazy_dedicatedDictSearch(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_hashChain, 1, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch); + } + + private static nuint ZSTD_compressBlock_lazy_row(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_rowHash, 1, ZSTD_dictMode_e.ZSTD_noDict); + } + + private static nuint ZSTD_compressBlock_lazy_dictMatchState_row(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_rowHash, 1, ZSTD_dictMode_e.ZSTD_dictMatchState); + } + + private static nuint ZSTD_compressBlock_lazy_dedicatedDictSearch_row(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_rowHash, 1, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch); + } + + private static nuint ZSTD_compressBlock_lazy2(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_hashChain, 2, ZSTD_dictMode_e.ZSTD_noDict); + } + + private static nuint ZSTD_compressBlock_lazy2_dictMatchState(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_hashChain, 2, ZSTD_dictMode_e.ZSTD_dictMatchState); + } + + private static nuint ZSTD_compressBlock_lazy2_dedicatedDictSearch(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_hashChain, 2, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch); + } + + private static nuint ZSTD_compressBlock_lazy2_row(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_rowHash, 2, ZSTD_dictMode_e.ZSTD_noDict); + } + + private static nuint ZSTD_compressBlock_lazy2_dictMatchState_row(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_rowHash, 2, ZSTD_dictMode_e.ZSTD_dictMatchState); + } + + private static nuint ZSTD_compressBlock_lazy2_dedicatedDictSearch_row(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_rowHash, 2, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch); + } + + private static nuint ZSTD_compressBlock_btlazy2(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_binaryTree, 2, ZSTD_dictMode_e.ZSTD_noDict); + } + + private static nuint ZSTD_compressBlock_btlazy2_dictMatchState(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_binaryTree, 2, ZSTD_dictMode_e.ZSTD_dictMatchState); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_compressBlock_lazy_extDict_generic(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize, searchMethod_e searchMethod, uint depth) + { + byte* istart = (byte*)src; + byte* ip = istart; + byte* anchor = istart; + byte* iend = istart + srcSize; + byte* ilimit = searchMethod == searchMethod_e.search_rowHash ? iend - 8 - 8 : iend - 8; + byte* @base = ms->window.@base; + uint dictLimit = ms->window.dictLimit; + byte* prefixStart = @base + dictLimit; + byte* dictBase = ms->window.dictBase; + byte* dictEnd = dictBase + dictLimit; + byte* dictStart = dictBase + ms->window.lowLimit; + uint windowLog = ms->cParams.windowLog; + uint mls = ms->cParams.minMatch <= 4 ? 4 : ms->cParams.minMatch <= 6 ? ms->cParams.minMatch : 6; + uint rowLog = ms->cParams.searchLog <= 4 ? 4 : ms->cParams.searchLog <= 6 ? ms->cParams.searchLog : 6; + uint offset_1 = rep[0], offset_2 = rep[1]; + ms->lazySkipping = 0; + ip += ip == prefixStart ? 1 : 0; + if (searchMethod == searchMethod_e.search_rowHash) + { + ZSTD_row_fillHashCache(ms, @base, rowLog, mls, ms->nextToUpdate, ilimit); + } + + while (ip < ilimit) + { + nuint matchLength = 0; + assert(1 >= 1); + assert(1 <= 3); + nuint offBase = 1; + byte* start = ip + 1; + uint curr = (uint)(ip - @base); + { + uint windowLow = ZSTD_getLowestMatchIndex(ms, curr + 1, windowLog); + uint repIndex = curr + 1 - offset_1; + byte* repBase = repIndex < dictLimit ? dictBase : @base; + byte* repMatch = repBase + repIndex; + if ((ZSTD_index_overlap_check(dictLimit, repIndex) & (offset_1 <= curr + 1 - windowLow ? 1 : 0)) != 0) + if (MEM_read32(ip + 1) == MEM_read32(repMatch)) + { + /* repcode detected we should take it */ + byte* repEnd = repIndex < dictLimit ? dictEnd : iend; + matchLength = ZSTD_count_2segments(ip + 1 + 4, repMatch + 4, iend, repEnd, prefixStart) + 4; + if (depth == 0) + goto _storeSequence; + } + } + + { + nuint ofbCandidate = 999999999; + nuint ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_dictMode_e.ZSTD_extDict); + if (ml2 > matchLength) + { + matchLength = ml2; + start = ip; + offBase = ofbCandidate; + } + } + + if (matchLength < 4) + { + nuint step = (nuint)(ip - anchor) >> 8; + ip += step + 1; + ms->lazySkipping = step > 8 ? 1 : 0; + continue; + } + + if (depth >= 1) + while (ip < ilimit) + { + ip++; + curr++; + if (offBase != 0) + { + uint windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog); + uint repIndex = curr - offset_1; + byte* repBase = repIndex < dictLimit ? dictBase : @base; + byte* repMatch = repBase + repIndex; + if ((ZSTD_index_overlap_check(dictLimit, repIndex) & (offset_1 <= curr - windowLow ? 1 : 0)) != 0) + if (MEM_read32(ip) == MEM_read32(repMatch)) + { + /* repcode detected */ + byte* repEnd = repIndex < dictLimit ? dictEnd : iend; + nuint repLength = ZSTD_count_2segments(ip + 4, repMatch + 4, iend, repEnd, prefixStart) + 4; + int gain2 = (int)(repLength * 3); + int gain1 = (int)(matchLength * 3 - ZSTD_highbit32((uint)offBase) + 1); + if (repLength >= 4 && gain2 > gain1) + { + matchLength = repLength; + assert(1 >= 1); + assert(1 <= 3); + offBase = 1; + start = ip; + } + } + } + + { + nuint ofbCandidate = 999999999; + nuint ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_dictMode_e.ZSTD_extDict); + /* raw approx */ + int gain2 = (int)(ml2 * 4 - ZSTD_highbit32((uint)ofbCandidate)); + int gain1 = (int)(matchLength * 4 - ZSTD_highbit32((uint)offBase) + 4); + if (ml2 >= 4 && gain2 > gain1) + { + matchLength = ml2; + offBase = ofbCandidate; + start = ip; + continue; + } + } + + if (depth == 2 && ip < ilimit) + { + ip++; + curr++; + if (offBase != 0) + { + uint windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog); + uint repIndex = curr - offset_1; + byte* repBase = repIndex < dictLimit ? dictBase : @base; + byte* repMatch = repBase + repIndex; + if ((ZSTD_index_overlap_check(dictLimit, repIndex) & (offset_1 <= curr - windowLow ? 1 : 0)) != 0) + if (MEM_read32(ip) == MEM_read32(repMatch)) + { + /* repcode detected */ + byte* repEnd = repIndex < dictLimit ? dictEnd : iend; + nuint repLength = ZSTD_count_2segments(ip + 4, repMatch + 4, iend, repEnd, prefixStart) + 4; + int gain2 = (int)(repLength * 4); + int gain1 = (int)(matchLength * 4 - ZSTD_highbit32((uint)offBase) + 1); + if (repLength >= 4 && gain2 > gain1) + { + matchLength = repLength; + assert(1 >= 1); + assert(1 <= 3); + offBase = 1; + start = ip; + } + } + } + + { + nuint ofbCandidate = 999999999; + nuint ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_dictMode_e.ZSTD_extDict); + /* raw approx */ + int gain2 = (int)(ml2 * 4 - ZSTD_highbit32((uint)ofbCandidate)); + int gain1 = (int)(matchLength * 4 - ZSTD_highbit32((uint)offBase) + 7); + if (ml2 >= 4 && gain2 > gain1) + { + matchLength = ml2; + offBase = ofbCandidate; + start = ip; + continue; + } + } + } + + break; + } + + if (offBase > 3) + { + assert(offBase > 3); + uint matchIndex = (uint)((nuint)(start - @base) - (offBase - 3)); + byte* match = matchIndex < dictLimit ? dictBase + matchIndex : @base + matchIndex; + byte* mStart = matchIndex < dictLimit ? dictStart : prefixStart; + while (start > anchor && match > mStart && start[-1] == match[-1]) + { + start--; + match--; + matchLength++; + } + + offset_2 = offset_1; + assert(offBase > 3); + offset_1 = (uint)(offBase - 3); + } + + _storeSequence: + { + nuint litLength = (nuint)(start - anchor); + ZSTD_storeSeq(seqStore, litLength, anchor, iend, (uint)offBase, matchLength); + anchor = ip = start + matchLength; + } + + if (ms->lazySkipping != 0) + { + if (searchMethod == searchMethod_e.search_rowHash) + { + ZSTD_row_fillHashCache(ms, @base, rowLog, mls, ms->nextToUpdate, ilimit); + } + + ms->lazySkipping = 0; + } + + while (ip <= ilimit) + { + uint repCurrent = (uint)(ip - @base); + uint windowLow = ZSTD_getLowestMatchIndex(ms, repCurrent, windowLog); + uint repIndex = repCurrent - offset_2; + byte* repBase = repIndex < dictLimit ? dictBase : @base; + byte* repMatch = repBase + repIndex; + if ((ZSTD_index_overlap_check(dictLimit, repIndex) & (offset_2 <= repCurrent - windowLow ? 1 : 0)) != 0) + if (MEM_read32(ip) == MEM_read32(repMatch)) + { + /* repcode detected we should take it */ + byte* repEnd = repIndex < dictLimit ? dictEnd : iend; + matchLength = ZSTD_count_2segments(ip + 4, repMatch + 4, iend, repEnd, prefixStart) + 4; + offBase = offset_2; + offset_2 = offset_1; + offset_1 = (uint)offBase; + assert(1 >= 1); + assert(1 <= 3); + ZSTD_storeSeq(seqStore, 0, anchor, iend, 1, matchLength); + ip += matchLength; + anchor = ip; + continue; + } + + break; + } + } + + rep[0] = offset_1; + rep[1] = offset_2; + return (nuint)(iend - anchor); + } + + private static nuint ZSTD_compressBlock_greedy_extDict(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_hashChain, 0); + } + + private static nuint ZSTD_compressBlock_greedy_extDict_row(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_rowHash, 0); + } + + private static nuint ZSTD_compressBlock_lazy_extDict(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_hashChain, 1); + } + + private static nuint ZSTD_compressBlock_lazy_extDict_row(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_rowHash, 1); + } + + private static nuint ZSTD_compressBlock_lazy2_extDict(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_hashChain, 2); + } + + private static nuint ZSTD_compressBlock_lazy2_extDict_row(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_rowHash, 2); + } + + private static nuint ZSTD_compressBlock_btlazy2_extDict(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_binaryTree, 2); + } + } +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLdm.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLdm.cs new file mode 100644 index 000000000..53a559439 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLdm.cs @@ -0,0 +1,757 @@ +using static ZstdSharp.UnsafeHelper; + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + /** ZSTD_ldm_gear_init(): + * + * Initializes the rolling hash state such that it will honor the + * settings in params. */ + private static void ZSTD_ldm_gear_init(ldmRollingHashState_t* state, ldmParams_t* @params) + { + uint maxBitsInMask = @params->minMatchLength < 64 ? @params->minMatchLength : 64; + uint hashRateLog = @params->hashRateLog; + state->rolling = ~(uint)0; + if (hashRateLog > 0 && hashRateLog <= maxBitsInMask) + { + state->stopMask = ((ulong)1 << (int)hashRateLog) - 1 << (int)(maxBitsInMask - hashRateLog); + } + else + { + state->stopMask = ((ulong)1 << (int)hashRateLog) - 1; + } + } + + /** ZSTD_ldm_gear_reset() + * Feeds [data, data + minMatchLength) into the hash without registering any + * splits. This effectively resets the hash state. This is used when skipping + * over data, either at the beginning of a block, or skipping sections. + */ + private static void ZSTD_ldm_gear_reset(ldmRollingHashState_t* state, byte* data, nuint minMatchLength) + { + ulong hash = state->rolling; + nuint n = 0; + while (n + 3 < minMatchLength) + { + { + hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; + n += 1; + } + + { + hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; + n += 1; + } + + { + hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; + n += 1; + } + + { + hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; + n += 1; + } + } + + while (n < minMatchLength) + { + hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; + n += 1; + } + } + + /** ZSTD_ldm_gear_feed(): + * + * Registers in the splits array all the split points found in the first + * size bytes following the data pointer. This function terminates when + * either all the data has been processed or LDM_BATCH_SIZE splits are + * present in the splits array. + * + * Precondition: The splits array must not be full. + * Returns: The number of bytes processed. */ + private static nuint ZSTD_ldm_gear_feed(ldmRollingHashState_t* state, byte* data, nuint size, nuint* splits, uint* numSplits) + { + nuint n; + ulong hash, mask; + hash = state->rolling; + mask = state->stopMask; + n = 0; + while (n + 3 < size) + { + { + hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; + n += 1; + if ((hash & mask) == 0) + { + splits[*numSplits] = n; + *numSplits += 1; + if (*numSplits == 64) + goto done; + } + } + + { + hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; + n += 1; + if ((hash & mask) == 0) + { + splits[*numSplits] = n; + *numSplits += 1; + if (*numSplits == 64) + goto done; + } + } + + { + hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; + n += 1; + if ((hash & mask) == 0) + { + splits[*numSplits] = n; + *numSplits += 1; + if (*numSplits == 64) + goto done; + } + } + + { + hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; + n += 1; + if ((hash & mask) == 0) + { + splits[*numSplits] = n; + *numSplits += 1; + if (*numSplits == 64) + goto done; + } + } + } + + while (n < size) + { + hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; + n += 1; + if ((hash & mask) == 0) + { + splits[*numSplits] = n; + *numSplits += 1; + if (*numSplits == 64) + goto done; + } + } + + done: + state->rolling = hash; + return n; + } + + /** ZSTD_ldm_adjustParameters() : + * If the params->hashRateLog is not set, set it to its default value based on + * windowLog and params->hashLog. + * + * Ensures that params->bucketSizeLog is <= params->hashLog (setting it to + * params->hashLog if it is not). + * + * Ensures that the minMatchLength >= targetLength during optimal parsing. + */ + private static void ZSTD_ldm_adjustParameters(ldmParams_t* @params, ZSTD_compressionParameters* cParams) + { + @params->windowLog = cParams->windowLog; + if (@params->hashRateLog == 0) + { + if (@params->hashLog > 0) + { + assert(@params->hashLog <= (uint)((sizeof(nuint) == 4 ? 30 : 31) < 30 ? sizeof(nuint) == 4 ? 30 : 31 : 30)); + if (@params->windowLog > @params->hashLog) + { + @params->hashRateLog = @params->windowLog - @params->hashLog; + } + } + else + { + assert(1 <= (int)cParams->strategy && (int)cParams->strategy <= 9); + @params->hashRateLog = (uint)(7 - (int)cParams->strategy / 3); + } + } + + if (@params->hashLog == 0) + { + @params->hashLog = @params->windowLog - @params->hashRateLog <= 6 ? 6 : @params->windowLog - @params->hashRateLog <= (uint)((sizeof(nuint) == 4 ? 30 : 31) < 30 ? sizeof(nuint) == 4 ? 30 : 31 : 30) ? @params->windowLog - @params->hashRateLog : (uint)((sizeof(nuint) == 4 ? 30 : 31) < 30 ? sizeof(nuint) == 4 ? 30 : 31 : 30); + } + + if (@params->minMatchLength == 0) + { + @params->minMatchLength = 64; + if (cParams->strategy >= ZSTD_strategy.ZSTD_btultra) + @params->minMatchLength /= 2; + } + + if (@params->bucketSizeLog == 0) + { + assert(1 <= (int)cParams->strategy && (int)cParams->strategy <= 9); + @params->bucketSizeLog = (uint)cParams->strategy <= 4 ? 4 : (uint)cParams->strategy <= 8 ? (uint)cParams->strategy : 8; + } + + @params->bucketSizeLog = @params->bucketSizeLog < @params->hashLog ? @params->bucketSizeLog : @params->hashLog; + } + + /** ZSTD_ldm_getTableSize() : + * Estimate the space needed for long distance matching tables or 0 if LDM is + * disabled. + */ + private static nuint ZSTD_ldm_getTableSize(ldmParams_t @params) + { + nuint ldmHSize = (nuint)1 << (int)@params.hashLog; + nuint ldmBucketSizeLog = @params.bucketSizeLog < @params.hashLog ? @params.bucketSizeLog : @params.hashLog; + nuint ldmBucketSize = (nuint)1 << (int)(@params.hashLog - ldmBucketSizeLog); + nuint totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize) + ZSTD_cwksp_alloc_size(ldmHSize * (nuint)sizeof(ldmEntry_t)); + return @params.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable ? totalSize : 0; + } + + /** ZSTD_ldm_getSeqSpace() : + * Return an upper bound on the number of sequences that can be produced by + * the long distance matcher, or 0 if LDM is disabled. + */ + private static nuint ZSTD_ldm_getMaxNbSeq(ldmParams_t @params, nuint maxChunkSize) + { + return @params.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable ? maxChunkSize / @params.minMatchLength : 0; + } + + /** ZSTD_ldm_getBucket() : + * Returns a pointer to the start of the bucket associated with hash. */ + private static ldmEntry_t* ZSTD_ldm_getBucket(ldmState_t* ldmState, nuint hash, uint bucketSizeLog) + { + return ldmState->hashTable + (hash << (int)bucketSizeLog); + } + + /** ZSTD_ldm_insertEntry() : + * Insert the entry with corresponding hash into the hash table */ + private static void ZSTD_ldm_insertEntry(ldmState_t* ldmState, nuint hash, ldmEntry_t entry, uint bucketSizeLog) + { + byte* pOffset = ldmState->bucketOffsets + hash; + uint offset = *pOffset; + *(ZSTD_ldm_getBucket(ldmState, hash, bucketSizeLog) + offset) = entry; + *pOffset = (byte)(offset + 1 & (1U << (int)bucketSizeLog) - 1); + } + + /** ZSTD_ldm_countBackwardsMatch() : + * Returns the number of bytes that match backwards before pIn and pMatch. + * + * We count only bytes where pMatch >= pBase and pIn >= pAnchor. */ + private static nuint ZSTD_ldm_countBackwardsMatch(byte* pIn, byte* pAnchor, byte* pMatch, byte* pMatchBase) + { + nuint matchLength = 0; + while (pIn > pAnchor && pMatch > pMatchBase && pIn[-1] == pMatch[-1]) + { + pIn--; + pMatch--; + matchLength++; + } + + return matchLength; + } + + /** ZSTD_ldm_countBackwardsMatch_2segments() : + * Returns the number of bytes that match backwards from pMatch, + * even with the backwards match spanning 2 different segments. + * + * On reaching `pMatchBase`, start counting from mEnd */ + private static nuint ZSTD_ldm_countBackwardsMatch_2segments(byte* pIn, byte* pAnchor, byte* pMatch, byte* pMatchBase, byte* pExtDictStart, byte* pExtDictEnd) + { + nuint matchLength = ZSTD_ldm_countBackwardsMatch(pIn, pAnchor, pMatch, pMatchBase); + if (pMatch - matchLength != pMatchBase || pMatchBase == pExtDictStart) + { + return matchLength; + } + + matchLength += ZSTD_ldm_countBackwardsMatch(pIn - matchLength, pAnchor, pExtDictEnd, pExtDictStart); + return matchLength; + } + + /** ZSTD_ldm_fillFastTables() : + * + * Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies. + * This is similar to ZSTD_loadDictionaryContent. + * + * The tables for the other strategies are filled within their + * block compressors. */ + private static nuint ZSTD_ldm_fillFastTables(ZSTD_MatchState_t* ms, void* end) + { + byte* iend = (byte*)end; + switch (ms->cParams.strategy) + { + case ZSTD_strategy.ZSTD_fast: + ZSTD_fillHashTable(ms, iend, ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, ZSTD_tableFillPurpose_e.ZSTD_tfp_forCCtx); + break; + case ZSTD_strategy.ZSTD_dfast: + ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, ZSTD_tableFillPurpose_e.ZSTD_tfp_forCCtx); + break; + case ZSTD_strategy.ZSTD_greedy: + case ZSTD_strategy.ZSTD_lazy: + case ZSTD_strategy.ZSTD_lazy2: + case ZSTD_strategy.ZSTD_btlazy2: + case ZSTD_strategy.ZSTD_btopt: + case ZSTD_strategy.ZSTD_btultra: + case ZSTD_strategy.ZSTD_btultra2: + break; + default: + assert(0 != 0); + break; + } + + return 0; + } + + private static void ZSTD_ldm_fillHashTable(ldmState_t* ldmState, byte* ip, byte* iend, ldmParams_t* @params) + { + uint minMatchLength = @params->minMatchLength; + uint bucketSizeLog = @params->bucketSizeLog; + uint hBits = @params->hashLog - bucketSizeLog; + byte* @base = ldmState->window.@base; + byte* istart = ip; + ldmRollingHashState_t hashState; + nuint* splits = &ldmState->splitIndices.e0; + uint numSplits; + ZSTD_ldm_gear_init(&hashState, @params); + while (ip < iend) + { + nuint hashed; + uint n; + numSplits = 0; + hashed = ZSTD_ldm_gear_feed(&hashState, ip, (nuint)(iend - ip), splits, &numSplits); + for (n = 0; n < numSplits; n++) + { + if (ip + splits[n] >= istart + minMatchLength) + { + byte* split = ip + splits[n] - minMatchLength; + ulong xxhash = ZSTD_XXH64(split, minMatchLength, 0); + uint hash = (uint)(xxhash & ((uint)1 << (int)hBits) - 1); + ldmEntry_t entry; + entry.offset = (uint)(split - @base); + entry.checksum = (uint)(xxhash >> 32); + ZSTD_ldm_insertEntry(ldmState, hash, entry, @params->bucketSizeLog); + } + } + + ip += hashed; + } + } + + /** ZSTD_ldm_limitTableUpdate() : + * + * Sets cctx->nextToUpdate to a position corresponding closer to anchor + * if it is far way + * (after a long match, only update tables a limited amount). */ + private static void ZSTD_ldm_limitTableUpdate(ZSTD_MatchState_t* ms, byte* anchor) + { + uint curr = (uint)(anchor - ms->window.@base); + if (curr > ms->nextToUpdate + 1024) + { + ms->nextToUpdate = curr - (512 < curr - ms->nextToUpdate - 1024 ? 512 : curr - ms->nextToUpdate - 1024); + } + } + + private static nuint ZSTD_ldm_generateSequences_internal(ldmState_t* ldmState, RawSeqStore_t* rawSeqStore, ldmParams_t* @params, void* src, nuint srcSize) + { + /* LDM parameters */ + int extDict = (int)ZSTD_window_hasExtDict(ldmState->window); + uint minMatchLength = @params->minMatchLength; + uint entsPerBucket = 1U << (int)@params->bucketSizeLog; + uint hBits = @params->hashLog - @params->bucketSizeLog; + /* Prefix and extDict parameters */ + uint dictLimit = ldmState->window.dictLimit; + uint lowestIndex = extDict != 0 ? ldmState->window.lowLimit : dictLimit; + byte* @base = ldmState->window.@base; + byte* dictBase = extDict != 0 ? ldmState->window.dictBase : null; + byte* dictStart = extDict != 0 ? dictBase + lowestIndex : null; + byte* dictEnd = extDict != 0 ? dictBase + dictLimit : null; + byte* lowPrefixPtr = @base + dictLimit; + /* Input bounds */ + byte* istart = (byte*)src; + byte* iend = istart + srcSize; + byte* ilimit = iend - 8; + /* Input positions */ + byte* anchor = istart; + byte* ip = istart; + /* Rolling hash state */ + ldmRollingHashState_t hashState; + /* Arrays for staged-processing */ + nuint* splits = &ldmState->splitIndices.e0; + ldmMatchCandidate_t* candidates = &ldmState->matchCandidates.e0; + uint numSplits; + if (srcSize < minMatchLength) + return (nuint)(iend - anchor); + ZSTD_ldm_gear_init(&hashState, @params); + ZSTD_ldm_gear_reset(&hashState, ip, minMatchLength); + ip += minMatchLength; + while (ip < ilimit) + { + nuint hashed; + uint n; + numSplits = 0; + hashed = ZSTD_ldm_gear_feed(&hashState, ip, (nuint)(ilimit - ip), splits, &numSplits); + for (n = 0; n < numSplits; n++) + { + byte* split = ip + splits[n] - minMatchLength; + ulong xxhash = ZSTD_XXH64(split, minMatchLength, 0); + uint hash = (uint)(xxhash & ((uint)1 << (int)hBits) - 1); + candidates[n].split = split; + candidates[n].hash = hash; + candidates[n].checksum = (uint)(xxhash >> 32); + candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, @params->bucketSizeLog); +#if NETCOREAPP3_0_OR_GREATER + if (System.Runtime.Intrinsics.X86.Sse.IsSupported) + { + System.Runtime.Intrinsics.X86.Sse.Prefetch0(candidates[n].bucket); + } +#endif + } + + for (n = 0; n < numSplits; n++) + { + nuint forwardMatchLength = 0, backwardMatchLength = 0, bestMatchLength = 0, mLength; + uint offset; + byte* split = candidates[n].split; + uint checksum = candidates[n].checksum; + uint hash = candidates[n].hash; + ldmEntry_t* bucket = candidates[n].bucket; + ldmEntry_t* cur; + ldmEntry_t* bestEntry = null; + ldmEntry_t newEntry; + newEntry.offset = (uint)(split - @base); + newEntry.checksum = checksum; + if (split < anchor) + { + ZSTD_ldm_insertEntry(ldmState, hash, newEntry, @params->bucketSizeLog); + continue; + } + + for (cur = bucket; cur < bucket + entsPerBucket; cur++) + { + nuint curForwardMatchLength, curBackwardMatchLength, curTotalMatchLength; + if (cur->checksum != checksum || cur->offset <= lowestIndex) + { + continue; + } + + if (extDict != 0) + { + byte* curMatchBase = cur->offset < dictLimit ? dictBase : @base; + byte* pMatch = curMatchBase + cur->offset; + byte* matchEnd = cur->offset < dictLimit ? dictEnd : iend; + byte* lowMatchPtr = cur->offset < dictLimit ? dictStart : lowPrefixPtr; + curForwardMatchLength = ZSTD_count_2segments(split, pMatch, iend, matchEnd, lowPrefixPtr); + if (curForwardMatchLength < minMatchLength) + { + continue; + } + + curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch_2segments(split, anchor, pMatch, lowMatchPtr, dictStart, dictEnd); + } + else + { + byte* pMatch = @base + cur->offset; + curForwardMatchLength = ZSTD_count(split, pMatch, iend); + if (curForwardMatchLength < minMatchLength) + { + continue; + } + + curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch(split, anchor, pMatch, lowPrefixPtr); + } + + curTotalMatchLength = curForwardMatchLength + curBackwardMatchLength; + if (curTotalMatchLength > bestMatchLength) + { + bestMatchLength = curTotalMatchLength; + forwardMatchLength = curForwardMatchLength; + backwardMatchLength = curBackwardMatchLength; + bestEntry = cur; + } + } + + if (bestEntry == null) + { + ZSTD_ldm_insertEntry(ldmState, hash, newEntry, @params->bucketSizeLog); + continue; + } + + offset = (uint)(split - @base) - bestEntry->offset; + mLength = forwardMatchLength + backwardMatchLength; + { + rawSeq* seq = rawSeqStore->seq + rawSeqStore->size; + if (rawSeqStore->size == rawSeqStore->capacity) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + seq->litLength = (uint)(split - backwardMatchLength - anchor); + seq->matchLength = (uint)mLength; + seq->offset = offset; + rawSeqStore->size++; + } + + ZSTD_ldm_insertEntry(ldmState, hash, newEntry, @params->bucketSizeLog); + anchor = split + forwardMatchLength; + if (anchor > ip + hashed) + { + ZSTD_ldm_gear_reset(&hashState, anchor - minMatchLength, minMatchLength); + ip = anchor - hashed; + break; + } + } + + ip += hashed; + } + + return (nuint)(iend - anchor); + } + + /*! ZSTD_ldm_reduceTable() : + * reduce table indexes by `reducerValue` */ + private static void ZSTD_ldm_reduceTable(ldmEntry_t* table, uint size, uint reducerValue) + { + uint u; + for (u = 0; u < size; u++) + { + if (table[u].offset < reducerValue) + table[u].offset = 0; + else + table[u].offset -= reducerValue; + } + } + + /** + * ZSTD_ldm_generateSequences(): + * + * Generates the sequences using the long distance match finder. + * Generates long range matching sequences in `sequences`, which parse a prefix + * of the source. `sequences` must be large enough to store every sequence, + * which can be checked with `ZSTD_ldm_getMaxNbSeq()`. + * @returns 0 or an error code. + * + * NOTE: The user must have called ZSTD_window_update() for all of the input + * they have, even if they pass it to ZSTD_ldm_generateSequences() in chunks. + * NOTE: This function returns an error if it runs out of space to store + * sequences. + */ + private static nuint ZSTD_ldm_generateSequences(ldmState_t* ldmState, RawSeqStore_t* sequences, ldmParams_t* @params, void* src, nuint srcSize) + { + uint maxDist = 1U << (int)@params->windowLog; + byte* istart = (byte*)src; + byte* iend = istart + srcSize; + const nuint kMaxChunkSize = 1 << 20; + nuint nbChunks = srcSize / kMaxChunkSize + (nuint)(srcSize % kMaxChunkSize != 0 ? 1 : 0); + nuint chunk; + nuint leftoverSize = 0; + assert(unchecked((uint)-1) - (MEM_64bits ? 3500U * (1 << 20) : 2000U * (1 << 20)) >= kMaxChunkSize); + assert(ldmState->window.nextSrc >= (byte*)src + srcSize); + assert(sequences->pos <= sequences->size); + assert(sequences->size <= sequences->capacity); + for (chunk = 0; chunk < nbChunks && sequences->size < sequences->capacity; ++chunk) + { + byte* chunkStart = istart + chunk * kMaxChunkSize; + nuint remaining = (nuint)(iend - chunkStart); + byte* chunkEnd = remaining < kMaxChunkSize ? iend : chunkStart + kMaxChunkSize; + nuint chunkSize = (nuint)(chunkEnd - chunkStart); + nuint newLeftoverSize; + nuint prevSize = sequences->size; + assert(chunkStart < iend); + if (ZSTD_window_needOverflowCorrection(ldmState->window, 0, maxDist, ldmState->loadedDictEnd, chunkStart, chunkEnd) != 0) + { + uint ldmHSize = 1U << (int)@params->hashLog; + uint correction = ZSTD_window_correctOverflow(&ldmState->window, 0, maxDist, chunkStart); + ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction); + ldmState->loadedDictEnd = 0; + } + + ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, &ldmState->loadedDictEnd, null); + newLeftoverSize = ZSTD_ldm_generateSequences_internal(ldmState, sequences, @params, chunkStart, chunkSize); + if (ERR_isError(newLeftoverSize)) + return newLeftoverSize; + if (prevSize < sequences->size) + { + sequences->seq[prevSize].litLength += (uint)leftoverSize; + leftoverSize = newLeftoverSize; + } + else + { + assert(newLeftoverSize == chunkSize); + leftoverSize += chunkSize; + } + } + + return 0; + } + + /** + * ZSTD_ldm_skipSequences(): + * + * Skip past `srcSize` bytes worth of sequences in `rawSeqStore`. + * Avoids emitting matches less than `minMatch` bytes. + * Must be called for data that is not passed to ZSTD_ldm_blockCompress(). + */ + private static void ZSTD_ldm_skipSequences(RawSeqStore_t* rawSeqStore, nuint srcSize, uint minMatch) + { + while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) + { + rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos; + if (srcSize <= seq->litLength) + { + seq->litLength -= (uint)srcSize; + return; + } + + srcSize -= seq->litLength; + seq->litLength = 0; + if (srcSize < seq->matchLength) + { + seq->matchLength -= (uint)srcSize; + if (seq->matchLength < minMatch) + { + if (rawSeqStore->pos + 1 < rawSeqStore->size) + { + seq[1].litLength += seq[0].matchLength; + } + + rawSeqStore->pos++; + } + + return; + } + + srcSize -= seq->matchLength; + seq->matchLength = 0; + rawSeqStore->pos++; + } + } + + /** + * If the sequence length is longer than remaining then the sequence is split + * between this block and the next. + * + * Returns the current sequence to handle, or if the rest of the block should + * be literals, it returns a sequence with offset == 0. + */ + private static rawSeq maybeSplitSequence(RawSeqStore_t* rawSeqStore, uint remaining, uint minMatch) + { + rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos]; + assert(sequence.offset > 0); + if (remaining >= sequence.litLength + sequence.matchLength) + { + rawSeqStore->pos++; + return sequence; + } + + if (remaining <= sequence.litLength) + { + sequence.offset = 0; + } + else if (remaining < sequence.litLength + sequence.matchLength) + { + sequence.matchLength = remaining - sequence.litLength; + if (sequence.matchLength < minMatch) + { + sequence.offset = 0; + } + } + + ZSTD_ldm_skipSequences(rawSeqStore, remaining, minMatch); + return sequence; + } + + /* ZSTD_ldm_skipRawSeqStoreBytes(): + * Moves forward in rawSeqStore by nbBytes, updating fields 'pos' and 'posInSequence'. + * Not to be used in conjunction with ZSTD_ldm_skipSequences(). + * Must be called for data with is not passed to ZSTD_ldm_blockCompress(). + */ + private static void ZSTD_ldm_skipRawSeqStoreBytes(RawSeqStore_t* rawSeqStore, nuint nbBytes) + { + uint currPos = (uint)(rawSeqStore->posInSequence + nbBytes); + while (currPos != 0 && rawSeqStore->pos < rawSeqStore->size) + { + rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos]; + if (currPos >= currSeq.litLength + currSeq.matchLength) + { + currPos -= currSeq.litLength + currSeq.matchLength; + rawSeqStore->pos++; + } + else + { + rawSeqStore->posInSequence = currPos; + break; + } + } + + if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) + { + rawSeqStore->posInSequence = 0; + } + } + + /** + * ZSTD_ldm_blockCompress(): + * + * Compresses a block using the predefined sequences, along with a secondary + * block compressor. The literals section of every sequence is passed to the + * secondary block compressor, and those sequences are interspersed with the + * predefined sequences. Returns the length of the last literals. + * Updates `rawSeqStore.pos` to indicate how many sequences have been consumed. + * `rawSeqStore.seq` may also be updated to split the last sequence between two + * blocks. + * @return The length of the last literals. + * + * NOTE: The source must be at most the maximum block size, but the predefined + * sequences can be any size, and may be longer than the block. In the case that + * they are longer than the block, the last sequences may need to be split into + * two. We handle that case correctly, and update `rawSeqStore` appropriately. + * NOTE: This function does not return any errors. + */ + private static nuint ZSTD_ldm_blockCompress(RawSeqStore_t* rawSeqStore, ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, ZSTD_paramSwitch_e useRowMatchFinder, void* src, nuint srcSize) + { + ZSTD_compressionParameters* cParams = &ms->cParams; + uint minMatch = cParams->minMatch; + ZSTD_BlockCompressor_f blockCompressor = ZSTD_selectBlockCompressor(cParams->strategy, useRowMatchFinder, ZSTD_matchState_dictMode(ms)); + /* Input bounds */ + byte* istart = (byte*)src; + byte* iend = istart + srcSize; + /* Input positions */ + byte* ip = istart; + if (cParams->strategy >= ZSTD_strategy.ZSTD_btopt) + { + nuint lastLLSize; + ms->ldmSeqStore = rawSeqStore; + lastLLSize = blockCompressor(ms, seqStore, rep, src, srcSize); + ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore, srcSize); + return lastLLSize; + } + + assert(rawSeqStore->pos <= rawSeqStore->size); + assert(rawSeqStore->size <= rawSeqStore->capacity); + while (rawSeqStore->pos < rawSeqStore->size && ip < iend) + { + /* maybeSplitSequence updates rawSeqStore->pos */ + rawSeq sequence = maybeSplitSequence(rawSeqStore, (uint)(iend - ip), minMatch); + if (sequence.offset == 0) + break; + assert(ip + sequence.litLength + sequence.matchLength <= iend); + ZSTD_ldm_limitTableUpdate(ms, ip); + ZSTD_ldm_fillFastTables(ms, ip); + { + int i; + nuint newLitLength = blockCompressor(ms, seqStore, rep, ip, sequence.litLength); + ip += sequence.litLength; + for (i = 3 - 1; i > 0; i--) + rep[i] = rep[i - 1]; + rep[0] = sequence.offset; + assert(sequence.offset > 0); + ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend, sequence.offset + 3, sequence.matchLength); + ip += sequence.matchLength; + } + } + + ZSTD_ldm_limitTableUpdate(ms, ip); + ZSTD_ldm_fillFastTables(ms, ip); + return blockCompressor(ms, seqStore, rep, ip, (nuint)(iend - ip)); + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLdmGeartab.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLdmGeartab.cs new file mode 100644 index 000000000..7027cb6f7 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLdmGeartab.cs @@ -0,0 +1,275 @@ +using static ZstdSharp.UnsafeHelper; +using System; +using System.Runtime.InteropServices; + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { +#if NET7_0_OR_GREATER + private static ReadOnlySpan Span_ZSTD_ldm_gearTab => new ulong[256] + { + 0xf5b8f72c5f77775c, + 0x84935f266b7ac412, + 0xb647ada9ca730ccc, + 0xb065bb4b114fb1de, + 0x34584e7e8c3a9fd0, + 0x4e97e17c6ae26b05, + 0x3a03d743bc99a604, + 0xcecd042422c4044f, + 0x76de76c58524259e, + 0x9c8528f65badeaca, + 0x86563706e2097529, + 0x2902475fa375d889, + 0xafb32a9739a5ebe6, + 0xce2714da3883e639, + 0x21eaf821722e69e, + 0x37b628620b628, + 0x49a8d455d88caf5, + 0x8556d711e6958140, + 0x4f7ae74fc605c1f, + 0x829f0c3468bd3a20, + 0x4ffdc885c625179e, + 0x8473de048a3daf1b, + 0x51008822b05646b2, + 0x69d75d12b2d1cc5f, + 0x8c9d4a19159154bc, + 0xc3cc10f4abbd4003, + 0xd06ddc1cecb97391, + 0xbe48e6e7ed80302e, + 0x3481db31cee03547, + 0xacc3f67cdaa1d210, + 0x65cb771d8c7f96cc, + 0x8eb27177055723dd, + 0xc789950d44cd94be, + 0x934feadc3700b12b, + 0x5e485f11edbdf182, + 0x1e2e2a46fd64767a, + 0x2969ca71d82efa7c, + 0x9d46e9935ebbba2e, + 0xe056b67e05e6822b, + 0x94d73f55739d03a0, + 0xcd7010bdb69b5a03, + 0x455ef9fcd79b82f4, + 0x869cb54a8749c161, + 0x38d1a4fa6185d225, + 0xb475166f94bbe9bb, + 0xa4143548720959f1, + 0x7aed4780ba6b26ba, + 0xd0ce264439e02312, + 0x84366d746078d508, + 0xa8ce973c72ed17be, + 0x21c323a29a430b01, + 0x9962d617e3af80ee, + 0xab0ce91d9c8cf75b, + 0x530e8ee6d19a4dbc, + 0x2ef68c0cf53f5d72, + 0xc03a681640a85506, + 0x496e4e9f9c310967, + 0x78580472b59b14a0, + 0x273824c23b388577, + 0x66bf923ad45cb553, + 0x47ae1a5a2492ba86, + 0x35e304569e229659, + 0x4765182a46870b6f, + 0x6cbab625e9099412, + 0xddac9a2e598522c1, + 0x7172086e666624f2, + 0xdf5003ca503b7837, + 0x88c0c1db78563d09, + 0x58d51865acfc289d, + 0x177671aec65224f1, + 0xfb79d8a241e967d7, + 0x2be1e101cad9a49a, + 0x6625682f6e29186b, + 0x399553457ac06e50, + 0x35dffb4c23abb74, + 0x429db2591f54aade, + 0xc52802a8037d1009, + 0x6acb27381f0b25f3, + 0xf45e2551ee4f823b, + 0x8b0ea2d99580c2f7, + 0x3bed519cbcb4e1e1, + 0xff452823dbb010a, + 0x9d42ed614f3dd267, + 0x5b9313c06257c57b, + 0xa114b8008b5e1442, + 0xc1fe311c11c13d4b, + 0x66e8763ea34c5568, + 0x8b982af1c262f05d, + 0xee8876faaa75fbb7, + 0x8a62a4d0d172bb2a, + 0xc13d94a3b7449a97, + 0x6dbbba9dc15d037c, + 0xc786101f1d92e0f1, + 0xd78681a907a0b79b, + 0xf61aaf2962c9abb9, + 0x2cfd16fcd3cb7ad9, + 0x868c5b6744624d21, + 0x25e650899c74ddd7, + 0xba042af4a7c37463, + 0x4eb1a539465a3eca, + 0xbe09dbf03b05d5ca, + 0x774e5a362b5472ba, + 0x47a1221229d183cd, + 0x504b0ca18ef5a2df, + 0xdffbdfbde2456eb9, + 0x46cd2b2fbee34634, + 0xf2aef8fe819d98c3, + 0x357f5276d4599d61, + 0x24a5483879c453e3, + 0x88026889192b4b9, + 0x28da96671782dbec, + 0x4ef37c40588e9aaa, + 0x8837b90651bc9fb3, + 0xc164f741d3f0e5d6, + 0xbc135a0a704b70ba, + 0x69cd868f7622ada, + 0xbc37ba89e0b9c0ab, + 0x47c14a01323552f6, + 0x4f00794bacee98bb, + 0x7107de7d637a69d5, + 0x88af793bb6f2255e, + 0xf3c6466b8799b598, + 0xc288c616aa7f3b59, + 0x81ca63cf42fca3fd, + 0x88d85ace36a2674b, + 0xd056bd3792389e7, + 0xe55c396c4e9dd32d, + 0xbefb504571e6c0a6, + 0x96ab32115e91e8cc, + 0xbf8acb18de8f38d1, + 0x66dae58801672606, + 0x833b6017872317fb, + 0xb87c16f2d1c92864, + 0xdb766a74e58b669c, + 0x89659f85c61417be, + 0xc8daad856011ea0c, + 0x76a4b565b6fe7eae, + 0xa469d085f6237312, + 0xaaf0365683a3e96c, + 0x4dbb746f8424f7b8, + 0x638755af4e4acc1, + 0x3d7807f5bde64486, + 0x17be6d8f5bbb7639, + 0x903f0cd44dc35dc, + 0x67b672eafdf1196c, + 0xa676ff93ed4c82f1, + 0x521d1004c5053d9d, + 0x37ba9ad09ccc9202, + 0x84e54d297aacfb51, + 0xa0b4b776a143445, + 0x820d471e20b348e, + 0x1874383cb83d46dc, + 0x97edeec7a1efe11c, + 0xb330e50b1bdc42aa, + 0x1dd91955ce70e032, + 0xa514cdb88f2939d5, + 0x2791233fd90db9d3, + 0x7b670a4cc50f7a9b, + 0x77c07d2a05c6dfa5, + 0xe3778b6646d0a6fa, + 0xb39c8eda47b56749, + 0x933ed448addbef28, + 0xaf846af6ab7d0bf4, + 0xe5af208eb666e49, + 0x5e6622f73534cd6a, + 0x297daeca42ef5b6e, + 0x862daef3d35539a6, + 0xe68722498f8e1ea9, + 0x981c53093dc0d572, + 0xfa09b0bfbf86fbf5, + 0x30b1e96166219f15, + 0x70e7d466bdc4fb83, + 0x5a66736e35f2a8e9, + 0xcddb59d2b7c1baef, + 0xd6c7d247d26d8996, + 0xea4e39eac8de1ba3, + 0x539c8bb19fa3aff2, + 0x9f90e4c5fd508d8, + 0xa34e5956fbaf3385, + 0x2e2f8e151d3ef375, + 0x173691e9b83faec1, + 0xb85a8d56bf016379, + 0x8382381267408ae3, + 0xb90f901bbdc0096d, + 0x7c6ad32933bcec65, + 0x76bb5e2f2c8ad595, + 0x390f851a6cf46d28, + 0xc3e6064da1c2da72, + 0xc52a0c101cfa5389, + 0xd78eaf84a3fbc530, + 0x3781b9e2288b997e, + 0x73c2f6dea83d05c4, + 0x4228e364c5b5ed7, + 0x9d7a3edf0da43911, + 0x8edcfeda24686756, + 0x5e7667a7b7a9b3a1, + 0x4c4f389fa143791d, + 0xb08bc1023da7cddc, + 0x7ab4be3ae529b1cc, + 0x754e6132dbe74ff9, + 0x71635442a839df45, + 0x2f6fb1643fbe52de, + 0x961e0a42cf7a8177, + 0xf3b45d83d89ef2ea, + 0xee3de4cf4a6e3e9b, + 0xcd6848542c3295e7, + 0xe4cee1664c78662f, + 0x9947548b474c68c4, + 0x25d73777a5ed8b0b, + 0xc915b1d636b7fc, + 0x21c2ba75d9b0d2da, + 0x5f6b5dcf608a64a1, + 0xdcf333255ff9570c, + 0x633b922418ced4ee, + 0xc136dde0b004b34a, + 0x58cc83b05d4b2f5a, + 0x5eb424dda28e42d2, + 0x62df47369739cd98, + 0xb4e0b42485e4ce17, + 0x16e1f0c1f9a8d1e7, + 0x8ec3916707560ebf, + 0x62ba6e2df2cc9db3, + 0xcbf9f4ff77d83a16, + 0x78d9d7d07d2bbcc4, + 0xef554ce1e02c41f4, + 0x8d7581127eccf94d, + 0xa9b53336cb3c8a05, + 0x38c42c0bf45c4f91, + 0x640893cdf4488863, + 0x80ec34bc575ea568, + 0x39f324f5b48eaa40, + 0xe9d9ed1f8eff527f, + 0x9224fc058cc5a214, + 0xbaba00b04cfe7741, + 0x309a9f120fcf52af, + 0xa558f3ec65626212, + 0x424bec8b7adabe2f, + 0x41622513a6aea433, + 0xb88da2d5324ca798, + 0xd287733b245528a4, + 0x9a44697e6d68aec3, + 0x7b1093be2f49bb28, + 0x50bbec632e3d8aad, + 0x6cd90723e1ea8283, + 0x897b9e7431b02bf3, + 0x219efdcb338a7047, + 0x3b0311f0a27c0656, + 0xdb17bf91c0db96e7, + 0x8cd4fd6b4e85a5b2, + 0xfab071054ba6409d, + 0x40d6fe831fa9dfd9, + 0xaf358debad7d791e, + 0xeb8d0e25a65e3e58, + 0xbbcbd3df14e08580, + 0xcf751f27ecdab2b, + 0x2b4da14f2613d8f4 + }; + private static ulong* ZSTD_ldm_gearTab => (ulong*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_ZSTD_ldm_gearTab)); +#else + + private static readonly ulong* ZSTD_ldm_gearTab = GetArrayPointer(new ulong[256] { 0xf5b8f72c5f77775c, 0x84935f266b7ac412, 0xb647ada9ca730ccc, 0xb065bb4b114fb1de, 0x34584e7e8c3a9fd0, 0x4e97e17c6ae26b05, 0x3a03d743bc99a604, 0xcecd042422c4044f, 0x76de76c58524259e, 0x9c8528f65badeaca, 0x86563706e2097529, 0x2902475fa375d889, 0xafb32a9739a5ebe6, 0xce2714da3883e639, 0x21eaf821722e69e, 0x37b628620b628, 0x49a8d455d88caf5, 0x8556d711e6958140, 0x4f7ae74fc605c1f, 0x829f0c3468bd3a20, 0x4ffdc885c625179e, 0x8473de048a3daf1b, 0x51008822b05646b2, 0x69d75d12b2d1cc5f, 0x8c9d4a19159154bc, 0xc3cc10f4abbd4003, 0xd06ddc1cecb97391, 0xbe48e6e7ed80302e, 0x3481db31cee03547, 0xacc3f67cdaa1d210, 0x65cb771d8c7f96cc, 0x8eb27177055723dd, 0xc789950d44cd94be, 0x934feadc3700b12b, 0x5e485f11edbdf182, 0x1e2e2a46fd64767a, 0x2969ca71d82efa7c, 0x9d46e9935ebbba2e, 0xe056b67e05e6822b, 0x94d73f55739d03a0, 0xcd7010bdb69b5a03, 0x455ef9fcd79b82f4, 0x869cb54a8749c161, 0x38d1a4fa6185d225, 0xb475166f94bbe9bb, 0xa4143548720959f1, 0x7aed4780ba6b26ba, 0xd0ce264439e02312, 0x84366d746078d508, 0xa8ce973c72ed17be, 0x21c323a29a430b01, 0x9962d617e3af80ee, 0xab0ce91d9c8cf75b, 0x530e8ee6d19a4dbc, 0x2ef68c0cf53f5d72, 0xc03a681640a85506, 0x496e4e9f9c310967, 0x78580472b59b14a0, 0x273824c23b388577, 0x66bf923ad45cb553, 0x47ae1a5a2492ba86, 0x35e304569e229659, 0x4765182a46870b6f, 0x6cbab625e9099412, 0xddac9a2e598522c1, 0x7172086e666624f2, 0xdf5003ca503b7837, 0x88c0c1db78563d09, 0x58d51865acfc289d, 0x177671aec65224f1, 0xfb79d8a241e967d7, 0x2be1e101cad9a49a, 0x6625682f6e29186b, 0x399553457ac06e50, 0x35dffb4c23abb74, 0x429db2591f54aade, 0xc52802a8037d1009, 0x6acb27381f0b25f3, 0xf45e2551ee4f823b, 0x8b0ea2d99580c2f7, 0x3bed519cbcb4e1e1, 0xff452823dbb010a, 0x9d42ed614f3dd267, 0x5b9313c06257c57b, 0xa114b8008b5e1442, 0xc1fe311c11c13d4b, 0x66e8763ea34c5568, 0x8b982af1c262f05d, 0xee8876faaa75fbb7, 0x8a62a4d0d172bb2a, 0xc13d94a3b7449a97, 0x6dbbba9dc15d037c, 0xc786101f1d92e0f1, 0xd78681a907a0b79b, 0xf61aaf2962c9abb9, 0x2cfd16fcd3cb7ad9, 0x868c5b6744624d21, 0x25e650899c74ddd7, 0xba042af4a7c37463, 0x4eb1a539465a3eca, 0xbe09dbf03b05d5ca, 0x774e5a362b5472ba, 0x47a1221229d183cd, 0x504b0ca18ef5a2df, 0xdffbdfbde2456eb9, 0x46cd2b2fbee34634, 0xf2aef8fe819d98c3, 0x357f5276d4599d61, 0x24a5483879c453e3, 0x88026889192b4b9, 0x28da96671782dbec, 0x4ef37c40588e9aaa, 0x8837b90651bc9fb3, 0xc164f741d3f0e5d6, 0xbc135a0a704b70ba, 0x69cd868f7622ada, 0xbc37ba89e0b9c0ab, 0x47c14a01323552f6, 0x4f00794bacee98bb, 0x7107de7d637a69d5, 0x88af793bb6f2255e, 0xf3c6466b8799b598, 0xc288c616aa7f3b59, 0x81ca63cf42fca3fd, 0x88d85ace36a2674b, 0xd056bd3792389e7, 0xe55c396c4e9dd32d, 0xbefb504571e6c0a6, 0x96ab32115e91e8cc, 0xbf8acb18de8f38d1, 0x66dae58801672606, 0x833b6017872317fb, 0xb87c16f2d1c92864, 0xdb766a74e58b669c, 0x89659f85c61417be, 0xc8daad856011ea0c, 0x76a4b565b6fe7eae, 0xa469d085f6237312, 0xaaf0365683a3e96c, 0x4dbb746f8424f7b8, 0x638755af4e4acc1, 0x3d7807f5bde64486, 0x17be6d8f5bbb7639, 0x903f0cd44dc35dc, 0x67b672eafdf1196c, 0xa676ff93ed4c82f1, 0x521d1004c5053d9d, 0x37ba9ad09ccc9202, 0x84e54d297aacfb51, 0xa0b4b776a143445, 0x820d471e20b348e, 0x1874383cb83d46dc, 0x97edeec7a1efe11c, 0xb330e50b1bdc42aa, 0x1dd91955ce70e032, 0xa514cdb88f2939d5, 0x2791233fd90db9d3, 0x7b670a4cc50f7a9b, 0x77c07d2a05c6dfa5, 0xe3778b6646d0a6fa, 0xb39c8eda47b56749, 0x933ed448addbef28, 0xaf846af6ab7d0bf4, 0xe5af208eb666e49, 0x5e6622f73534cd6a, 0x297daeca42ef5b6e, 0x862daef3d35539a6, 0xe68722498f8e1ea9, 0x981c53093dc0d572, 0xfa09b0bfbf86fbf5, 0x30b1e96166219f15, 0x70e7d466bdc4fb83, 0x5a66736e35f2a8e9, 0xcddb59d2b7c1baef, 0xd6c7d247d26d8996, 0xea4e39eac8de1ba3, 0x539c8bb19fa3aff2, 0x9f90e4c5fd508d8, 0xa34e5956fbaf3385, 0x2e2f8e151d3ef375, 0x173691e9b83faec1, 0xb85a8d56bf016379, 0x8382381267408ae3, 0xb90f901bbdc0096d, 0x7c6ad32933bcec65, 0x76bb5e2f2c8ad595, 0x390f851a6cf46d28, 0xc3e6064da1c2da72, 0xc52a0c101cfa5389, 0xd78eaf84a3fbc530, 0x3781b9e2288b997e, 0x73c2f6dea83d05c4, 0x4228e364c5b5ed7, 0x9d7a3edf0da43911, 0x8edcfeda24686756, 0x5e7667a7b7a9b3a1, 0x4c4f389fa143791d, 0xb08bc1023da7cddc, 0x7ab4be3ae529b1cc, 0x754e6132dbe74ff9, 0x71635442a839df45, 0x2f6fb1643fbe52de, 0x961e0a42cf7a8177, 0xf3b45d83d89ef2ea, 0xee3de4cf4a6e3e9b, 0xcd6848542c3295e7, 0xe4cee1664c78662f, 0x9947548b474c68c4, 0x25d73777a5ed8b0b, 0xc915b1d636b7fc, 0x21c2ba75d9b0d2da, 0x5f6b5dcf608a64a1, 0xdcf333255ff9570c, 0x633b922418ced4ee, 0xc136dde0b004b34a, 0x58cc83b05d4b2f5a, 0x5eb424dda28e42d2, 0x62df47369739cd98, 0xb4e0b42485e4ce17, 0x16e1f0c1f9a8d1e7, 0x8ec3916707560ebf, 0x62ba6e2df2cc9db3, 0xcbf9f4ff77d83a16, 0x78d9d7d07d2bbcc4, 0xef554ce1e02c41f4, 0x8d7581127eccf94d, 0xa9b53336cb3c8a05, 0x38c42c0bf45c4f91, 0x640893cdf4488863, 0x80ec34bc575ea568, 0x39f324f5b48eaa40, 0xe9d9ed1f8eff527f, 0x9224fc058cc5a214, 0xbaba00b04cfe7741, 0x309a9f120fcf52af, 0xa558f3ec65626212, 0x424bec8b7adabe2f, 0x41622513a6aea433, 0xb88da2d5324ca798, 0xd287733b245528a4, 0x9a44697e6d68aec3, 0x7b1093be2f49bb28, 0x50bbec632e3d8aad, 0x6cd90723e1ea8283, 0x897b9e7431b02bf3, 0x219efdcb338a7047, 0x3b0311f0a27c0656, 0xdb17bf91c0db96e7, 0x8cd4fd6b4e85a5b2, 0xfab071054ba6409d, 0x40d6fe831fa9dfd9, 0xaf358debad7d791e, 0xeb8d0e25a65e3e58, 0xbbcbd3df14e08580, 0xcf751f27ecdab2b, 0x2b4da14f2613d8f4 }); +#endif + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdOpt.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdOpt.cs new file mode 100644 index 000000000..c001858d8 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdOpt.cs @@ -0,0 +1,1435 @@ +using System.Runtime.CompilerServices; +using static ZstdSharp.UnsafeHelper; +using System; +using System.Runtime.InteropServices; + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + /* ZSTD_bitWeight() : + * provide estimated "cost" of a stat in full bits only */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_bitWeight(uint stat) + { + return ZSTD_highbit32(stat + 1) * (1 << 8); + } + + /* ZSTD_fracWeight() : + * provide fractional-bit "cost" of a stat, + * using linear interpolation approximation */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_fracWeight(uint rawStat) + { + uint stat = rawStat + 1; + uint hb = ZSTD_highbit32(stat); + uint BWeight = hb * (1 << 8); + /* Fweight was meant for "Fractional weight" + * but it's effectively a value between 1 and 2 + * using fixed point arithmetic */ + uint FWeight = stat << 8 >> (int)hb; + uint weight = BWeight + FWeight; + assert(hb + 8 < 31); + return weight; + } + + private static int ZSTD_compressedLiterals(optState_t* optPtr) + { + return optPtr->literalCompressionMode != ZSTD_paramSwitch_e.ZSTD_ps_disable ? 1 : 0; + } + + private static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel) + { + if (ZSTD_compressedLiterals(optPtr) != 0) + optPtr->litSumBasePrice = optLevel != 0 ? ZSTD_fracWeight(optPtr->litSum) : ZSTD_bitWeight(optPtr->litSum); + optPtr->litLengthSumBasePrice = optLevel != 0 ? ZSTD_fracWeight(optPtr->litLengthSum) : ZSTD_bitWeight(optPtr->litLengthSum); + optPtr->matchLengthSumBasePrice = optLevel != 0 ? ZSTD_fracWeight(optPtr->matchLengthSum) : ZSTD_bitWeight(optPtr->matchLengthSum); + optPtr->offCodeSumBasePrice = optLevel != 0 ? ZSTD_fracWeight(optPtr->offCodeSum) : ZSTD_bitWeight(optPtr->offCodeSum); + } + + private static uint sum_u32(uint* table, nuint nbElts) + { + nuint n; + uint total = 0; + for (n = 0; n < nbElts; n++) + { + total += table[n]; + } + + return total; + } + + private static uint ZSTD_downscaleStats(uint* table, uint lastEltIndex, uint shift, base_directive_e base1) + { + uint s, sum = 0; + assert(shift < 30); + for (s = 0; s < lastEltIndex + 1; s++) + { + uint @base = (uint)(base1 != default ? 1 : table[s] > 0 ? 1 : 0); + uint newStat = @base + (table[s] >> (int)shift); + sum += newStat; + table[s] = newStat; + } + + return sum; + } + + /* ZSTD_scaleStats() : + * reduce all elt frequencies in table if sum too large + * return the resulting sum of elements */ + private static uint ZSTD_scaleStats(uint* table, uint lastEltIndex, uint logTarget) + { + uint prevsum = sum_u32(table, lastEltIndex + 1); + uint factor = prevsum >> (int)logTarget; + assert(logTarget < 30); + if (factor <= 1) + return prevsum; + return ZSTD_downscaleStats(table, lastEltIndex, ZSTD_highbit32(factor), base_directive_e.base_1guaranteed); + } + +#if NET7_0_OR_GREATER + private static ReadOnlySpan Span_baseLLfreqs => new uint[36] + { + 4, + 2, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1 + }; + private static uint* baseLLfreqs => (uint*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_baseLLfreqs)); +#else + + private static readonly uint* baseLLfreqs = GetArrayPointer(new uint[36] { 4, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }); +#endif +#if NET7_0_OR_GREATER + private static ReadOnlySpan Span_baseOFCfreqs => new uint[32] + { + 6, + 2, + 1, + 1, + 2, + 3, + 4, + 4, + 4, + 3, + 2, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1 + }; + private static uint* baseOFCfreqs => (uint*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_baseOFCfreqs)); +#else + + private static readonly uint* baseOFCfreqs = GetArrayPointer(new uint[32] { 6, 2, 1, 1, 2, 3, 4, 4, 4, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }); +#endif + /* ZSTD_rescaleFreqs() : + * if first block (detected by optPtr->litLengthSum == 0) : init statistics + * take hints from dictionary if there is one + * and init from zero if there is none, + * using src for literals stats, and baseline stats for sequence symbols + * otherwise downscale existing stats, to be used as seed for next block. + */ + private static void ZSTD_rescaleFreqs(optState_t* optPtr, byte* src, nuint srcSize, int optLevel) + { + int compressedLiterals = ZSTD_compressedLiterals(optPtr); + optPtr->priceType = ZSTD_OptPrice_e.zop_dynamic; + if (optPtr->litLengthSum == 0) + { + if (srcSize <= 8) + { + optPtr->priceType = ZSTD_OptPrice_e.zop_predef; + } + + assert(optPtr->symbolCosts != null); + if (optPtr->symbolCosts->huf.repeatMode == HUF_repeat.HUF_repeat_valid) + { + optPtr->priceType = ZSTD_OptPrice_e.zop_dynamic; + if (compressedLiterals != 0) + { + /* generate literals statistics from huffman table */ + uint lit; + assert(optPtr->litFreq != null); + optPtr->litSum = 0; + for (lit = 0; lit <= (1 << 8) - 1; lit++) + { + /* scale to 2K */ + const uint scaleLog = 11; + uint bitCost = HUF_getNbBitsFromCTable(&optPtr->symbolCosts->huf.CTable.e0, lit); + assert(bitCost <= scaleLog); + optPtr->litFreq[lit] = (uint)(bitCost != 0 ? 1 << (int)(scaleLog - bitCost) : 1); + optPtr->litSum += optPtr->litFreq[lit]; + } + } + + { + uint ll; + FSE_CState_t llstate; + FSE_initCState(&llstate, optPtr->symbolCosts->fse.litlengthCTable); + optPtr->litLengthSum = 0; + for (ll = 0; ll <= 35; ll++) + { + /* scale to 1K */ + const uint scaleLog = 10; + uint bitCost = FSE_getMaxNbBits(llstate.symbolTT, ll); + assert(bitCost < scaleLog); + optPtr->litLengthFreq[ll] = (uint)(bitCost != 0 ? 1 << (int)(scaleLog - bitCost) : 1); + optPtr->litLengthSum += optPtr->litLengthFreq[ll]; + } + } + + { + uint ml; + FSE_CState_t mlstate; + FSE_initCState(&mlstate, optPtr->symbolCosts->fse.matchlengthCTable); + optPtr->matchLengthSum = 0; + for (ml = 0; ml <= 52; ml++) + { + const uint scaleLog = 10; + uint bitCost = FSE_getMaxNbBits(mlstate.symbolTT, ml); + assert(bitCost < scaleLog); + optPtr->matchLengthFreq[ml] = (uint)(bitCost != 0 ? 1 << (int)(scaleLog - bitCost) : 1); + optPtr->matchLengthSum += optPtr->matchLengthFreq[ml]; + } + } + + { + uint of; + FSE_CState_t ofstate; + FSE_initCState(&ofstate, optPtr->symbolCosts->fse.offcodeCTable); + optPtr->offCodeSum = 0; + for (of = 0; of <= 31; of++) + { + const uint scaleLog = 10; + uint bitCost = FSE_getMaxNbBits(ofstate.symbolTT, of); + assert(bitCost < scaleLog); + optPtr->offCodeFreq[of] = (uint)(bitCost != 0 ? 1 << (int)(scaleLog - bitCost) : 1); + optPtr->offCodeSum += optPtr->offCodeFreq[of]; + } + } + } + else + { + assert(optPtr->litFreq != null); + if (compressedLiterals != 0) + { + /* base initial cost of literals on direct frequency within src */ + uint lit = (1 << 8) - 1; + HIST_count_simple(optPtr->litFreq, &lit, src, srcSize); + optPtr->litSum = ZSTD_downscaleStats(optPtr->litFreq, (1 << 8) - 1, 8, base_directive_e.base_0possible); + } + + { + memcpy(optPtr->litLengthFreq, baseLLfreqs, sizeof(uint) * 36); + optPtr->litLengthSum = sum_u32(baseLLfreqs, 35 + 1); + } + + { + uint ml; + for (ml = 0; ml <= 52; ml++) + optPtr->matchLengthFreq[ml] = 1; + } + + optPtr->matchLengthSum = 52 + 1; + { + memcpy(optPtr->offCodeFreq, baseOFCfreqs, sizeof(uint) * 32); + optPtr->offCodeSum = sum_u32(baseOFCfreqs, 31 + 1); + } + } + } + else + { + if (compressedLiterals != 0) + optPtr->litSum = ZSTD_scaleStats(optPtr->litFreq, (1 << 8) - 1, 12); + optPtr->litLengthSum = ZSTD_scaleStats(optPtr->litLengthFreq, 35, 11); + optPtr->matchLengthSum = ZSTD_scaleStats(optPtr->matchLengthFreq, 52, 11); + optPtr->offCodeSum = ZSTD_scaleStats(optPtr->offCodeFreq, 31, 11); + } + + ZSTD_setBasePrices(optPtr, optLevel); + } + + /* ZSTD_rawLiteralsCost() : + * price of literals (only) in specified segment (which length can be 0). + * does not include price of literalLength symbol */ + private static uint ZSTD_rawLiteralsCost(byte* literals, uint litLength, optState_t* optPtr, int optLevel) + { + if (litLength == 0) + return 0; + if (ZSTD_compressedLiterals(optPtr) == 0) + return (litLength << 3) * (1 << 8); + if (optPtr->priceType == ZSTD_OptPrice_e.zop_predef) + return litLength * 6 * (1 << 8); + { + uint price = optPtr->litSumBasePrice * litLength; + uint litPriceMax = optPtr->litSumBasePrice - (1 << 8); + uint u; + assert(optPtr->litSumBasePrice >= 1 << 8); + for (u = 0; u < litLength; u++) + { + uint litPrice = optLevel != 0 ? ZSTD_fracWeight(optPtr->litFreq[literals[u]]) : ZSTD_bitWeight(optPtr->litFreq[literals[u]]); + if (litPrice > litPriceMax) + litPrice = litPriceMax; + price -= litPrice; + } + + return price; + } + } + + /* ZSTD_litLengthPrice() : + * cost of literalLength symbol */ + private static uint ZSTD_litLengthPrice(uint litLength, optState_t* optPtr, int optLevel) + { + assert(litLength <= 1 << 17); + if (optPtr->priceType == ZSTD_OptPrice_e.zop_predef) + return optLevel != 0 ? ZSTD_fracWeight(litLength) : ZSTD_bitWeight(litLength); + if (litLength == 1 << 17) + return (1 << 8) + ZSTD_litLengthPrice((1 << 17) - 1, optPtr, optLevel); + { + uint llCode = ZSTD_LLcode(litLength); + return (uint)(LL_bits[llCode] * (1 << 8)) + optPtr->litLengthSumBasePrice - (optLevel != 0 ? ZSTD_fracWeight(optPtr->litLengthFreq[llCode]) : ZSTD_bitWeight(optPtr->litLengthFreq[llCode])); + } + } + + /* ZSTD_getMatchPrice() : + * Provides the cost of the match part (offset + matchLength) of a sequence. + * Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence. + * @offBase : sumtype, representing an offset or a repcode, and using numeric representation of ZSTD_storeSeq() + * @optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_getMatchPrice(uint offBase, uint matchLength, optState_t* optPtr, int optLevel) + { + uint price; + uint offCode = ZSTD_highbit32(offBase); + uint mlBase = matchLength - 3; + assert(matchLength >= 3); + if (optPtr->priceType == ZSTD_OptPrice_e.zop_predef) + return (optLevel != 0 ? ZSTD_fracWeight(mlBase) : ZSTD_bitWeight(mlBase)) + (16 + offCode) * (1 << 8); + price = offCode * (1 << 8) + (optPtr->offCodeSumBasePrice - (optLevel != 0 ? ZSTD_fracWeight(optPtr->offCodeFreq[offCode]) : ZSTD_bitWeight(optPtr->offCodeFreq[offCode]))); + if (optLevel < 2 && offCode >= 20) + price += (offCode - 19) * 2 * (1 << 8); + { + uint mlCode = ZSTD_MLcode(mlBase); + price += (uint)(ML_bits[mlCode] * (1 << 8)) + (optPtr->matchLengthSumBasePrice - (optLevel != 0 ? ZSTD_fracWeight(optPtr->matchLengthFreq[mlCode]) : ZSTD_bitWeight(optPtr->matchLengthFreq[mlCode]))); + } + + price += (1 << 8) / 5; + return price; + } + + /* ZSTD_updateStats() : + * assumption : literals + litLength <= iend */ + private static void ZSTD_updateStats(optState_t* optPtr, uint litLength, byte* literals, uint offBase, uint matchLength) + { + if (ZSTD_compressedLiterals(optPtr) != 0) + { + uint u; + for (u = 0; u < litLength; u++) + optPtr->litFreq[literals[u]] += 2; + optPtr->litSum += litLength * 2; + } + + { + uint llCode = ZSTD_LLcode(litLength); + optPtr->litLengthFreq[llCode]++; + optPtr->litLengthSum++; + } + + { + uint offCode = ZSTD_highbit32(offBase); + assert(offCode <= 31); + optPtr->offCodeFreq[offCode]++; + optPtr->offCodeSum++; + } + + { + uint mlBase = matchLength - 3; + uint mlCode = ZSTD_MLcode(mlBase); + optPtr->matchLengthFreq[mlCode]++; + optPtr->matchLengthSum++; + } + } + + /* ZSTD_readMINMATCH() : + * function safe only for comparisons + * assumption : memPtr must be at least 4 bytes before end of buffer */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_readMINMATCH(void* memPtr, uint length) + { + switch (length) + { + default: + case 4: + return MEM_read32(memPtr); + case 3: + if (BitConverter.IsLittleEndian) + return MEM_read32(memPtr) << 8; + else + return MEM_read32(memPtr) >> 8; + } + } + + /* Update hashTable3 up to ip (excluded) + Assumption : always within prefix (i.e. not within extDict) */ + private static uint ZSTD_insertAndFindFirstIndexHash3(ZSTD_MatchState_t* ms, uint* nextToUpdate3, byte* ip) + { + uint* hashTable3 = ms->hashTable3; + uint hashLog3 = ms->hashLog3; + byte* @base = ms->window.@base; + uint idx = *nextToUpdate3; + uint target = (uint)(ip - @base); + nuint hash3 = ZSTD_hash3Ptr(ip, hashLog3); + assert(hashLog3 > 0); + while (idx < target) + { + hashTable3[ZSTD_hash3Ptr(@base + idx, hashLog3)] = idx; + idx++; + } + + *nextToUpdate3 = target; + return hashTable3[hash3]; + } + + /*-************************************* + * Binary Tree search + ***************************************/ + /** ZSTD_insertBt1() : add one or multiple positions to tree. + * @param ip assumed <= iend-8 . + * @param target The target of ZSTD_updateTree_internal() - we are filling to this position + * @return : nb of positions added */ + private static uint ZSTD_insertBt1(ZSTD_MatchState_t* ms, byte* ip, byte* iend, uint target, uint mls, int extDict) + { + ZSTD_compressionParameters* cParams = &ms->cParams; + uint* hashTable = ms->hashTable; + uint hashLog = cParams->hashLog; + nuint h = ZSTD_hashPtr(ip, hashLog, mls); + uint* bt = ms->chainTable; + uint btLog = cParams->chainLog - 1; + uint btMask = (uint)((1 << (int)btLog) - 1); + uint matchIndex = hashTable[h]; + nuint commonLengthSmaller = 0, commonLengthLarger = 0; + byte* @base = ms->window.@base; + byte* dictBase = ms->window.dictBase; + uint dictLimit = ms->window.dictLimit; + byte* dictEnd = dictBase + dictLimit; + byte* prefixStart = @base + dictLimit; + byte* match; + uint curr = (uint)(ip - @base); + uint btLow = btMask >= curr ? 0 : curr - btMask; + uint* smallerPtr = bt + 2 * (curr & btMask); + uint* largerPtr = smallerPtr + 1; + /* to be nullified at the end */ + uint dummy32; + /* windowLow is based on target because + * we only need positions that will be in the window at the end of the tree update. + */ + uint windowLow = ZSTD_getLowestMatchIndex(ms, target, cParams->windowLog); + uint matchEndIdx = curr + 8 + 1; + nuint bestLength = 8; + uint nbCompares = 1U << (int)cParams->searchLog; + assert(curr <= target); + assert(ip <= iend - 8); + hashTable[h] = curr; + assert(windowLow > 0); + for (; nbCompares != 0 && matchIndex >= windowLow; --nbCompares) + { + uint* nextPtr = bt + 2 * (matchIndex & btMask); + /* guaranteed minimum nb of common bytes */ + nuint matchLength = commonLengthSmaller < commonLengthLarger ? commonLengthSmaller : commonLengthLarger; + assert(matchIndex < curr); + if (extDict == 0 || matchIndex + matchLength >= dictLimit) + { + assert(matchIndex + matchLength >= dictLimit); + match = @base + matchIndex; + matchLength += ZSTD_count(ip + matchLength, match + matchLength, iend); + } + else + { + match = dictBase + matchIndex; + matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iend, dictEnd, prefixStart); + if (matchIndex + matchLength >= dictLimit) + match = @base + matchIndex; + } + + if (matchLength > bestLength) + { + bestLength = matchLength; + if (matchLength > matchEndIdx - matchIndex) + matchEndIdx = matchIndex + (uint)matchLength; + } + + if (ip + matchLength == iend) + { + break; + } + + if (match[matchLength] < ip[matchLength]) + { + *smallerPtr = matchIndex; + commonLengthSmaller = matchLength; + if (matchIndex <= btLow) + { + smallerPtr = &dummy32; + break; + } + + smallerPtr = nextPtr + 1; + matchIndex = nextPtr[1]; + } + else + { + *largerPtr = matchIndex; + commonLengthLarger = matchLength; + if (matchIndex <= btLow) + { + largerPtr = &dummy32; + break; + } + + largerPtr = nextPtr; + matchIndex = nextPtr[0]; + } + } + + *smallerPtr = *largerPtr = 0; + { + uint positions = 0; + if (bestLength > 384) + positions = 192 < (uint)(bestLength - 384) ? 192 : (uint)(bestLength - 384); + assert(matchEndIdx > curr + 8); + return positions > matchEndIdx - (curr + 8) ? positions : matchEndIdx - (curr + 8); + } + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_updateTree_internal(ZSTD_MatchState_t* ms, byte* ip, byte* iend, uint mls, ZSTD_dictMode_e dictMode) + { + byte* @base = ms->window.@base; + uint target = (uint)(ip - @base); + uint idx = ms->nextToUpdate; + while (idx < target) + { + uint forward = ZSTD_insertBt1(ms, @base + idx, iend, target, mls, dictMode == ZSTD_dictMode_e.ZSTD_extDict ? 1 : 0); + assert(idx < idx + forward); + idx += forward; + } + + assert((nuint)(ip - @base) <= unchecked((uint)-1)); + assert((nuint)(iend - @base) <= unchecked((uint)-1)); + ms->nextToUpdate = target; + } + + /* used in ZSTD_loadDictionaryContent() */ + private static void ZSTD_updateTree(ZSTD_MatchState_t* ms, byte* ip, byte* iend) + { + ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_dictMode_e.ZSTD_noDict); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_insertBtAndGetAllMatches(ZSTD_match_t* matches, ZSTD_MatchState_t* ms, uint* nextToUpdate3, byte* ip, byte* iLimit, ZSTD_dictMode_e dictMode, uint* rep, uint ll0, uint lengthToBeat, uint mls) + { + ZSTD_compressionParameters* cParams = &ms->cParams; + uint sufficient_len = cParams->targetLength < (1 << 12) - 1 ? cParams->targetLength : (1 << 12) - 1; + byte* @base = ms->window.@base; + uint curr = (uint)(ip - @base); + uint hashLog = cParams->hashLog; + uint minMatch = (uint)(mls == 3 ? 3 : 4); + uint* hashTable = ms->hashTable; + nuint h = ZSTD_hashPtr(ip, hashLog, mls); + uint matchIndex = hashTable[h]; + uint* bt = ms->chainTable; + uint btLog = cParams->chainLog - 1; + uint btMask = (1U << (int)btLog) - 1; + nuint commonLengthSmaller = 0, commonLengthLarger = 0; + byte* dictBase = ms->window.dictBase; + uint dictLimit = ms->window.dictLimit; + byte* dictEnd = dictBase + dictLimit; + byte* prefixStart = @base + dictLimit; + uint btLow = btMask >= curr ? 0 : curr - btMask; + uint windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog); + uint matchLow = windowLow != 0 ? windowLow : 1; + uint* smallerPtr = bt + 2 * (curr & btMask); + uint* largerPtr = bt + 2 * (curr & btMask) + 1; + /* farthest referenced position of any match => detects repetitive patterns */ + uint matchEndIdx = curr + 8 + 1; + /* to be nullified at the end */ + uint dummy32; + uint mnum = 0; + uint nbCompares = 1U << (int)cParams->searchLog; + ZSTD_MatchState_t* dms = dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? ms->dictMatchState : null; + ZSTD_compressionParameters* dmsCParams = dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? &dms->cParams : null; + byte* dmsBase = dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? dms->window.@base : null; + byte* dmsEnd = dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? dms->window.nextSrc : null; + uint dmsHighLimit = dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? (uint)(dmsEnd - dmsBase) : 0; + uint dmsLowLimit = dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? dms->window.lowLimit : 0; + uint dmsIndexDelta = dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? windowLow - dmsHighLimit : 0; + uint dmsHashLog = dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? dmsCParams->hashLog : hashLog; + uint dmsBtLog = dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? dmsCParams->chainLog - 1 : btLog; + uint dmsBtMask = dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? (1U << (int)dmsBtLog) - 1 : 0; + uint dmsBtLow = dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState && dmsBtMask < dmsHighLimit - dmsLowLimit ? dmsHighLimit - dmsBtMask : dmsLowLimit; + nuint bestLength = lengthToBeat - 1; + assert(ll0 <= 1); + { + uint lastR = 3 + ll0; + uint repCode; + for (repCode = ll0; repCode < lastR; repCode++) + { + uint repOffset = repCode == 3 ? rep[0] - 1 : rep[repCode]; + uint repIndex = curr - repOffset; + uint repLen = 0; + assert(curr >= dictLimit); + if (repOffset - 1 < curr - dictLimit) + { + if (repIndex >= windowLow && ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repOffset, minMatch)) + { + repLen = (uint)ZSTD_count(ip + minMatch, ip + minMatch - repOffset, iLimit) + minMatch; + } + } + else + { + byte* repMatch = dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? dmsBase + repIndex - dmsIndexDelta : dictBase + repIndex; + assert(curr >= windowLow); + if (dictMode == ZSTD_dictMode_e.ZSTD_extDict && ((repOffset - 1 < curr - windowLow ? 1 : 0) & ZSTD_index_overlap_check(dictLimit, repIndex)) != 0 && ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) + { + repLen = (uint)ZSTD_count_2segments(ip + minMatch, repMatch + minMatch, iLimit, dictEnd, prefixStart) + minMatch; + } + + if (dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState && ((repOffset - 1 < curr - (dmsLowLimit + dmsIndexDelta) ? 1 : 0) & ZSTD_index_overlap_check(dictLimit, repIndex)) != 0 && ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) + { + repLen = (uint)ZSTD_count_2segments(ip + minMatch, repMatch + minMatch, iLimit, dmsEnd, prefixStart) + minMatch; + } + } + + if (repLen > bestLength) + { + bestLength = repLen; + assert(repCode - ll0 + 1 >= 1); + assert(repCode - ll0 + 1 <= 3); + matches[mnum].off = repCode - ll0 + 1; + matches[mnum].len = repLen; + mnum++; + if (repLen > sufficient_len || ip + repLen == iLimit) + { + return mnum; + } + } + } + } + + if (mls == 3 && bestLength < mls) + { + uint matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, nextToUpdate3, ip); + if (matchIndex3 >= matchLow && curr - matchIndex3 < 1 << 18) + { + nuint mlen; + if (dictMode == ZSTD_dictMode_e.ZSTD_noDict || dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState || matchIndex3 >= dictLimit) + { + byte* match = @base + matchIndex3; + mlen = ZSTD_count(ip, match, iLimit); + } + else + { + byte* match = dictBase + matchIndex3; + mlen = ZSTD_count_2segments(ip, match, iLimit, dictEnd, prefixStart); + } + + if (mlen >= mls) + { + bestLength = mlen; + assert(curr > matchIndex3); + assert(mnum == 0); + assert(curr - matchIndex3 > 0); + matches[0].off = curr - matchIndex3 + 3; + matches[0].len = (uint)mlen; + mnum = 1; + if (mlen > sufficient_len || ip + mlen == iLimit) + { + ms->nextToUpdate = curr + 1; + return 1; + } + } + } + } + + hashTable[h] = curr; + for (; nbCompares != 0 && matchIndex >= matchLow; --nbCompares) + { + uint* nextPtr = bt + 2 * (matchIndex & btMask); + byte* match; + /* guaranteed minimum nb of common bytes */ + nuint matchLength = commonLengthSmaller < commonLengthLarger ? commonLengthSmaller : commonLengthLarger; + assert(curr > matchIndex); + if (dictMode == ZSTD_dictMode_e.ZSTD_noDict || dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState || matchIndex + matchLength >= dictLimit) + { + assert(matchIndex + matchLength >= dictLimit); + match = @base + matchIndex; +#if DEBUG + if (matchIndex >= dictLimit) + assert(memcmp(match, ip, matchLength) == 0); +#endif + matchLength += ZSTD_count(ip + matchLength, match + matchLength, iLimit); + } + else + { + match = dictBase + matchIndex; + assert(memcmp(match, ip, matchLength) == 0); + matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iLimit, dictEnd, prefixStart); + if (matchIndex + matchLength >= dictLimit) + match = @base + matchIndex; + } + + if (matchLength > bestLength) + { + assert(matchEndIdx > matchIndex); + if (matchLength > matchEndIdx - matchIndex) + matchEndIdx = matchIndex + (uint)matchLength; + bestLength = matchLength; + assert(curr - matchIndex > 0); + matches[mnum].off = curr - matchIndex + 3; + matches[mnum].len = (uint)matchLength; + mnum++; + if (matchLength > 1 << 12 || ip + matchLength == iLimit) + { + if (dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState) + nbCompares = 0; + break; + } + } + + if (match[matchLength] < ip[matchLength]) + { + *smallerPtr = matchIndex; + commonLengthSmaller = matchLength; + if (matchIndex <= btLow) + { + smallerPtr = &dummy32; + break; + } + + smallerPtr = nextPtr + 1; + matchIndex = nextPtr[1]; + } + else + { + *largerPtr = matchIndex; + commonLengthLarger = matchLength; + if (matchIndex <= btLow) + { + largerPtr = &dummy32; + break; + } + + largerPtr = nextPtr; + matchIndex = nextPtr[0]; + } + } + + *smallerPtr = *largerPtr = 0; + assert(nbCompares <= 1U << (sizeof(nuint) == 4 ? 30 : 31) - 1); + if (dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState && nbCompares != 0) + { + nuint dmsH = ZSTD_hashPtr(ip, dmsHashLog, mls); + uint dictMatchIndex = dms->hashTable[dmsH]; + uint* dmsBt = dms->chainTable; + commonLengthSmaller = commonLengthLarger = 0; + for (; nbCompares != 0 && dictMatchIndex > dmsLowLimit; --nbCompares) + { + uint* nextPtr = dmsBt + 2 * (dictMatchIndex & dmsBtMask); + /* guaranteed minimum nb of common bytes */ + nuint matchLength = commonLengthSmaller < commonLengthLarger ? commonLengthSmaller : commonLengthLarger; + byte* match = dmsBase + dictMatchIndex; + matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iLimit, dmsEnd, prefixStart); + if (dictMatchIndex + matchLength >= dmsHighLimit) + match = @base + dictMatchIndex + dmsIndexDelta; + if (matchLength > bestLength) + { + matchIndex = dictMatchIndex + dmsIndexDelta; + if (matchLength > matchEndIdx - matchIndex) + matchEndIdx = matchIndex + (uint)matchLength; + bestLength = matchLength; + assert(curr - matchIndex > 0); + matches[mnum].off = curr - matchIndex + 3; + matches[mnum].len = (uint)matchLength; + mnum++; + if (matchLength > 1 << 12 || ip + matchLength == iLimit) + { + break; + } + } + + if (dictMatchIndex <= dmsBtLow) + { + break; + } + + if (match[matchLength] < ip[matchLength]) + { + commonLengthSmaller = matchLength; + dictMatchIndex = nextPtr[1]; + } + else + { + commonLengthLarger = matchLength; + dictMatchIndex = nextPtr[0]; + } + } + } + + assert(matchEndIdx > curr + 8); + ms->nextToUpdate = matchEndIdx - 8; + return mnum; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_btGetAllMatches_internal(ZSTD_match_t* matches, ZSTD_MatchState_t* ms, uint* nextToUpdate3, byte* ip, byte* iHighLimit, uint* rep, uint ll0, uint lengthToBeat, ZSTD_dictMode_e dictMode, uint mls) + { + assert((ms->cParams.minMatch <= 3 ? 3 : ms->cParams.minMatch <= 6 ? ms->cParams.minMatch : 6) == mls); + if (ip < ms->window.@base + ms->nextToUpdate) + return 0; + ZSTD_updateTree_internal(ms, ip, iHighLimit, mls, dictMode); + return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, mls); + } + + private static uint ZSTD_btGetAllMatches_noDict_3(ZSTD_match_t* matches, ZSTD_MatchState_t* ms, uint* nextToUpdate3, byte* ip, byte* iHighLimit, uint* rep, uint ll0, uint lengthToBeat) + { + return ZSTD_btGetAllMatches_internal(matches, ms, nextToUpdate3, ip, iHighLimit, rep, ll0, lengthToBeat, ZSTD_dictMode_e.ZSTD_noDict, 3); + } + + private static uint ZSTD_btGetAllMatches_noDict_4(ZSTD_match_t* matches, ZSTD_MatchState_t* ms, uint* nextToUpdate3, byte* ip, byte* iHighLimit, uint* rep, uint ll0, uint lengthToBeat) + { + return ZSTD_btGetAllMatches_internal(matches, ms, nextToUpdate3, ip, iHighLimit, rep, ll0, lengthToBeat, ZSTD_dictMode_e.ZSTD_noDict, 4); + } + + private static uint ZSTD_btGetAllMatches_noDict_5(ZSTD_match_t* matches, ZSTD_MatchState_t* ms, uint* nextToUpdate3, byte* ip, byte* iHighLimit, uint* rep, uint ll0, uint lengthToBeat) + { + return ZSTD_btGetAllMatches_internal(matches, ms, nextToUpdate3, ip, iHighLimit, rep, ll0, lengthToBeat, ZSTD_dictMode_e.ZSTD_noDict, 5); + } + + private static uint ZSTD_btGetAllMatches_noDict_6(ZSTD_match_t* matches, ZSTD_MatchState_t* ms, uint* nextToUpdate3, byte* ip, byte* iHighLimit, uint* rep, uint ll0, uint lengthToBeat) + { + return ZSTD_btGetAllMatches_internal(matches, ms, nextToUpdate3, ip, iHighLimit, rep, ll0, lengthToBeat, ZSTD_dictMode_e.ZSTD_noDict, 6); + } + + private static uint ZSTD_btGetAllMatches_extDict_3(ZSTD_match_t* matches, ZSTD_MatchState_t* ms, uint* nextToUpdate3, byte* ip, byte* iHighLimit, uint* rep, uint ll0, uint lengthToBeat) + { + return ZSTD_btGetAllMatches_internal(matches, ms, nextToUpdate3, ip, iHighLimit, rep, ll0, lengthToBeat, ZSTD_dictMode_e.ZSTD_extDict, 3); + } + + private static uint ZSTD_btGetAllMatches_extDict_4(ZSTD_match_t* matches, ZSTD_MatchState_t* ms, uint* nextToUpdate3, byte* ip, byte* iHighLimit, uint* rep, uint ll0, uint lengthToBeat) + { + return ZSTD_btGetAllMatches_internal(matches, ms, nextToUpdate3, ip, iHighLimit, rep, ll0, lengthToBeat, ZSTD_dictMode_e.ZSTD_extDict, 4); + } + + private static uint ZSTD_btGetAllMatches_extDict_5(ZSTD_match_t* matches, ZSTD_MatchState_t* ms, uint* nextToUpdate3, byte* ip, byte* iHighLimit, uint* rep, uint ll0, uint lengthToBeat) + { + return ZSTD_btGetAllMatches_internal(matches, ms, nextToUpdate3, ip, iHighLimit, rep, ll0, lengthToBeat, ZSTD_dictMode_e.ZSTD_extDict, 5); + } + + private static uint ZSTD_btGetAllMatches_extDict_6(ZSTD_match_t* matches, ZSTD_MatchState_t* ms, uint* nextToUpdate3, byte* ip, byte* iHighLimit, uint* rep, uint ll0, uint lengthToBeat) + { + return ZSTD_btGetAllMatches_internal(matches, ms, nextToUpdate3, ip, iHighLimit, rep, ll0, lengthToBeat, ZSTD_dictMode_e.ZSTD_extDict, 6); + } + + private static uint ZSTD_btGetAllMatches_dictMatchState_3(ZSTD_match_t* matches, ZSTD_MatchState_t* ms, uint* nextToUpdate3, byte* ip, byte* iHighLimit, uint* rep, uint ll0, uint lengthToBeat) + { + return ZSTD_btGetAllMatches_internal(matches, ms, nextToUpdate3, ip, iHighLimit, rep, ll0, lengthToBeat, ZSTD_dictMode_e.ZSTD_dictMatchState, 3); + } + + private static uint ZSTD_btGetAllMatches_dictMatchState_4(ZSTD_match_t* matches, ZSTD_MatchState_t* ms, uint* nextToUpdate3, byte* ip, byte* iHighLimit, uint* rep, uint ll0, uint lengthToBeat) + { + return ZSTD_btGetAllMatches_internal(matches, ms, nextToUpdate3, ip, iHighLimit, rep, ll0, lengthToBeat, ZSTD_dictMode_e.ZSTD_dictMatchState, 4); + } + + private static uint ZSTD_btGetAllMatches_dictMatchState_5(ZSTD_match_t* matches, ZSTD_MatchState_t* ms, uint* nextToUpdate3, byte* ip, byte* iHighLimit, uint* rep, uint ll0, uint lengthToBeat) + { + return ZSTD_btGetAllMatches_internal(matches, ms, nextToUpdate3, ip, iHighLimit, rep, ll0, lengthToBeat, ZSTD_dictMode_e.ZSTD_dictMatchState, 5); + } + + private static uint ZSTD_btGetAllMatches_dictMatchState_6(ZSTD_match_t* matches, ZSTD_MatchState_t* ms, uint* nextToUpdate3, byte* ip, byte* iHighLimit, uint* rep, uint ll0, uint lengthToBeat) + { + return ZSTD_btGetAllMatches_internal(matches, ms, nextToUpdate3, ip, iHighLimit, rep, ll0, lengthToBeat, ZSTD_dictMode_e.ZSTD_dictMatchState, 6); + } + + private static readonly ZSTD_getAllMatchesFn[][] getAllMatchesFns = new ZSTD_getAllMatchesFn[3][] + { + new ZSTD_getAllMatchesFn[4] + { + ZSTD_btGetAllMatches_noDict_3, + ZSTD_btGetAllMatches_noDict_4, + ZSTD_btGetAllMatches_noDict_5, + ZSTD_btGetAllMatches_noDict_6 + }, + new ZSTD_getAllMatchesFn[4] + { + ZSTD_btGetAllMatches_extDict_3, + ZSTD_btGetAllMatches_extDict_4, + ZSTD_btGetAllMatches_extDict_5, + ZSTD_btGetAllMatches_extDict_6 + }, + new ZSTD_getAllMatchesFn[4] + { + ZSTD_btGetAllMatches_dictMatchState_3, + ZSTD_btGetAllMatches_dictMatchState_4, + ZSTD_btGetAllMatches_dictMatchState_5, + ZSTD_btGetAllMatches_dictMatchState_6 + } + }; + private static ZSTD_getAllMatchesFn ZSTD_selectBtGetAllMatches(ZSTD_MatchState_t* ms, ZSTD_dictMode_e dictMode) + { + uint mls = ms->cParams.minMatch <= 3 ? 3 : ms->cParams.minMatch <= 6 ? ms->cParams.minMatch : 6; + assert((uint)dictMode < 3); + assert(mls - 3 < 4); + return getAllMatchesFns[(int)dictMode][mls - 3]; + } + + /* ZSTD_optLdm_skipRawSeqStoreBytes(): + * Moves forward in @rawSeqStore by @nbBytes, + * which will update the fields 'pos' and 'posInSequence'. + */ + private static void ZSTD_optLdm_skipRawSeqStoreBytes(RawSeqStore_t* rawSeqStore, nuint nbBytes) + { + uint currPos = (uint)(rawSeqStore->posInSequence + nbBytes); + while (currPos != 0 && rawSeqStore->pos < rawSeqStore->size) + { + rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos]; + if (currPos >= currSeq.litLength + currSeq.matchLength) + { + currPos -= currSeq.litLength + currSeq.matchLength; + rawSeqStore->pos++; + } + else + { + rawSeqStore->posInSequence = currPos; + break; + } + } + + if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) + { + rawSeqStore->posInSequence = 0; + } + } + + /* ZSTD_opt_getNextMatchAndUpdateSeqStore(): + * Calculates the beginning and end of the next match in the current block. + * Updates 'pos' and 'posInSequence' of the ldmSeqStore. + */ + private static void ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, uint currPosInBlock, uint blockBytesRemaining) + { + rawSeq currSeq; + uint currBlockEndPos; + uint literalsBytesRemaining; + uint matchBytesRemaining; + if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) + { + optLdm->startPosInBlock = 0xffffffff; + optLdm->endPosInBlock = 0xffffffff; + return; + } + + currSeq = optLdm->seqStore.seq[optLdm->seqStore.pos]; + assert(optLdm->seqStore.posInSequence <= currSeq.litLength + currSeq.matchLength); + currBlockEndPos = currPosInBlock + blockBytesRemaining; + literalsBytesRemaining = optLdm->seqStore.posInSequence < currSeq.litLength ? currSeq.litLength - (uint)optLdm->seqStore.posInSequence : 0; + matchBytesRemaining = literalsBytesRemaining == 0 ? currSeq.matchLength - ((uint)optLdm->seqStore.posInSequence - currSeq.litLength) : currSeq.matchLength; + if (literalsBytesRemaining >= blockBytesRemaining) + { + optLdm->startPosInBlock = 0xffffffff; + optLdm->endPosInBlock = 0xffffffff; + ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, blockBytesRemaining); + return; + } + + optLdm->startPosInBlock = currPosInBlock + literalsBytesRemaining; + optLdm->endPosInBlock = optLdm->startPosInBlock + matchBytesRemaining; + optLdm->offset = currSeq.offset; + if (optLdm->endPosInBlock > currBlockEndPos) + { + optLdm->endPosInBlock = currBlockEndPos; + ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, currBlockEndPos - currPosInBlock); + } + else + { + ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, literalsBytesRemaining + matchBytesRemaining); + } + } + + /* ZSTD_optLdm_maybeAddMatch(): + * Adds a match if it's long enough, + * based on it's 'matchStartPosInBlock' and 'matchEndPosInBlock', + * into 'matches'. Maintains the correct ordering of 'matches'. + */ + private static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, uint* nbMatches, ZSTD_optLdm_t* optLdm, uint currPosInBlock, uint minMatch) + { + uint posDiff = currPosInBlock - optLdm->startPosInBlock; + /* Note: ZSTD_match_t actually contains offBase and matchLength (before subtracting MINMATCH) */ + uint candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff; + if (currPosInBlock < optLdm->startPosInBlock || currPosInBlock >= optLdm->endPosInBlock || candidateMatchLength < minMatch) + { + return; + } + + if (*nbMatches == 0 || candidateMatchLength > matches[*nbMatches - 1].len && *nbMatches < 1 << 12) + { + assert(optLdm->offset > 0); + uint candidateOffBase = optLdm->offset + 3; + matches[*nbMatches].len = candidateMatchLength; + matches[*nbMatches].off = candidateOffBase; + (*nbMatches)++; + } + } + + /* ZSTD_optLdm_processMatchCandidate(): + * Wrapper function to update ldm seq store and call ldm functions as necessary. + */ + private static void ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm, ZSTD_match_t* matches, uint* nbMatches, uint currPosInBlock, uint remainingBytes, uint minMatch) + { + if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) + { + return; + } + + if (currPosInBlock >= optLdm->endPosInBlock) + { + if (currPosInBlock > optLdm->endPosInBlock) + { + /* The position at which ZSTD_optLdm_processMatchCandidate() is called is not necessarily + * at the end of a match from the ldm seq store, and will often be some bytes + * over beyond matchEndPosInBlock. As such, we need to correct for these "overshoots" + */ + uint posOvershoot = currPosInBlock - optLdm->endPosInBlock; + ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, posOvershoot); + } + + ZSTD_opt_getNextMatchAndUpdateSeqStore(optLdm, currPosInBlock, remainingBytes); + } + + ZSTD_optLdm_maybeAddMatch(matches, nbMatches, optLdm, currPosInBlock, minMatch); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_compressBlock_opt_generic(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize, int optLevel, ZSTD_dictMode_e dictMode) + { + optState_t* optStatePtr = &ms->opt; + byte* istart = (byte*)src; + byte* ip = istart; + byte* anchor = istart; + byte* iend = istart + srcSize; + byte* ilimit = iend - 8; + byte* @base = ms->window.@base; + byte* prefixStart = @base + ms->window.dictLimit; + ZSTD_compressionParameters* cParams = &ms->cParams; + ZSTD_getAllMatchesFn getAllMatches = ZSTD_selectBtGetAllMatches(ms, dictMode); + uint sufficient_len = cParams->targetLength < (1 << 12) - 1 ? cParams->targetLength : (1 << 12) - 1; + uint minMatch = (uint)(cParams->minMatch == 3 ? 3 : 4); + uint nextToUpdate3 = ms->nextToUpdate; + ZSTD_optimal_t* opt = optStatePtr->priceTable; + ZSTD_match_t* matches = optStatePtr->matchTable; + ZSTD_optimal_t lastStretch; + ZSTD_optLdm_t optLdm; + lastStretch = new ZSTD_optimal_t(); + optLdm.seqStore = ms->ldmSeqStore != null ? *ms->ldmSeqStore : kNullRawSeqStore; + optLdm.endPosInBlock = optLdm.startPosInBlock = optLdm.offset = 0; + ZSTD_opt_getNextMatchAndUpdateSeqStore(&optLdm, (uint)(ip - istart), (uint)(iend - ip)); + assert(optLevel <= 2); + ZSTD_rescaleFreqs(optStatePtr, (byte*)src, srcSize, optLevel); + ip += ip == prefixStart ? 1 : 0; + while (ip < ilimit) + { + uint cur, last_pos = 0; + { + uint litlen = (uint)(ip - anchor); + uint ll0 = litlen == 0 ? 1U : 0U; + uint nbMatches = getAllMatches(matches, ms, &nextToUpdate3, ip, iend, rep, ll0, minMatch); + ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches, (uint)(ip - istart), (uint)(iend - ip), minMatch); + if (nbMatches == 0) + { + ip++; + continue; + } + + opt[0].mlen = 0; + opt[0].litlen = litlen; + opt[0].price = (int)ZSTD_litLengthPrice(litlen, optStatePtr, optLevel); + memcpy(&opt[0].rep[0], rep, sizeof(uint) * 3); + { + uint maxML = matches[nbMatches - 1].len; + uint maxOffBase = matches[nbMatches - 1].off; + if (maxML > sufficient_len) + { + lastStretch.litlen = 0; + lastStretch.mlen = maxML; + lastStretch.off = maxOffBase; + cur = 0; + last_pos = maxML; + goto _shortestPath; + } + } + + assert(opt[0].price >= 0); + { + uint pos; + uint matchNb; + for (pos = 1; pos < minMatch; pos++) + { + opt[pos].price = 1 << 30; + opt[pos].mlen = 0; + opt[pos].litlen = litlen + pos; + } + + for (matchNb = 0; matchNb < nbMatches; matchNb++) + { + uint offBase = matches[matchNb].off; + uint end = matches[matchNb].len; + for (; pos <= end; pos++) + { + int matchPrice = (int)ZSTD_getMatchPrice(offBase, pos, optStatePtr, optLevel); + int sequencePrice = opt[0].price + matchPrice; + opt[pos].mlen = pos; + opt[pos].off = offBase; + opt[pos].litlen = 0; + opt[pos].price = sequencePrice + (int)ZSTD_litLengthPrice(0, optStatePtr, optLevel); + } + } + + last_pos = pos - 1; + opt[pos].price = 1 << 30; + } + } + + for (cur = 1; cur <= last_pos; cur++) + { + byte* inr = ip + cur; + assert(cur <= 1 << 12); + { + uint litlen = opt[cur - 1].litlen + 1; + int price = opt[cur - 1].price + (int)ZSTD_rawLiteralsCost(ip + cur - 1, 1, optStatePtr, optLevel) + ((int)ZSTD_litLengthPrice(litlen, optStatePtr, optLevel) - (int)ZSTD_litLengthPrice(litlen - 1, optStatePtr, optLevel)); + assert(price < 1000000000); + if (price <= opt[cur].price) + { + ZSTD_optimal_t prevMatch = opt[cur]; + opt[cur] = opt[cur - 1]; + opt[cur].litlen = litlen; + opt[cur].price = price; + if (optLevel >= 1 && prevMatch.litlen == 0 && (int)ZSTD_litLengthPrice(1, optStatePtr, optLevel) - (int)ZSTD_litLengthPrice(1 - 1, optStatePtr, optLevel) < 0 && ip + cur < iend) + { + /* check next position, in case it would be cheaper */ + int with1literal = prevMatch.price + (int)ZSTD_rawLiteralsCost(ip + cur, 1, optStatePtr, optLevel) + ((int)ZSTD_litLengthPrice(1, optStatePtr, optLevel) - (int)ZSTD_litLengthPrice(1 - 1, optStatePtr, optLevel)); + int withMoreLiterals = price + (int)ZSTD_rawLiteralsCost(ip + cur, 1, optStatePtr, optLevel) + ((int)ZSTD_litLengthPrice(litlen + 1, optStatePtr, optLevel) - (int)ZSTD_litLengthPrice(litlen + 1 - 1, optStatePtr, optLevel)); + if (with1literal < withMoreLiterals && with1literal < opt[cur + 1].price) + { + /* update offset history - before it disappears */ + uint prev = cur - prevMatch.mlen; + repcodes_s newReps = ZSTD_newRep(opt[prev].rep, prevMatch.off, opt[prev].litlen == 0 ? 1U : 0U); + assert(cur >= prevMatch.mlen); + opt[cur + 1] = prevMatch; + memcpy(opt[cur + 1].rep, &newReps, (uint)sizeof(repcodes_s)); + opt[cur + 1].litlen = 1; + opt[cur + 1].price = with1literal; + if (last_pos < cur + 1) + last_pos = cur + 1; + } + } + } + } + + assert(cur >= opt[cur].mlen); + if (opt[cur].litlen == 0) + { + /* just finished a match => alter offset history */ + uint prev = cur - opt[cur].mlen; + repcodes_s newReps = ZSTD_newRep(opt[prev].rep, opt[cur].off, opt[prev].litlen == 0 ? 1U : 0U); + memcpy(opt[cur].rep, &newReps, (uint)sizeof(repcodes_s)); + } + + if (inr > ilimit) + continue; + if (cur == last_pos) + break; + if (optLevel == 0 && opt[cur + 1].price <= opt[cur].price + (1 << 8) / 2) + { + continue; + } + + assert(opt[cur].price >= 0); + { + uint ll0 = opt[cur].litlen == 0 ? 1U : 0U; + int previousPrice = opt[cur].price; + int basePrice = previousPrice + (int)ZSTD_litLengthPrice(0, optStatePtr, optLevel); + uint nbMatches = getAllMatches(matches, ms, &nextToUpdate3, inr, iend, opt[cur].rep, ll0, minMatch); + uint matchNb; + ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches, (uint)(inr - istart), (uint)(iend - inr), minMatch); + if (nbMatches == 0) + { + continue; + } + + { + uint longestML = matches[nbMatches - 1].len; + if (longestML > sufficient_len || cur + longestML >= 1 << 12 || ip + cur + longestML >= iend) + { + lastStretch.mlen = longestML; + lastStretch.off = matches[nbMatches - 1].off; + lastStretch.litlen = 0; + last_pos = cur + longestML; + goto _shortestPath; + } + } + + for (matchNb = 0; matchNb < nbMatches; matchNb++) + { + uint offset = matches[matchNb].off; + uint lastML = matches[matchNb].len; + uint startML = matchNb > 0 ? matches[matchNb - 1].len + 1 : minMatch; + uint mlen; + for (mlen = lastML; mlen >= startML; mlen--) + { + uint pos = cur + mlen; + int price = basePrice + (int)ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel); + if (pos > last_pos || price < opt[pos].price) + { + while (last_pos < pos) + { + last_pos++; + opt[last_pos].price = 1 << 30; + opt[last_pos].litlen = 0 == 0 ? 1U : 0U; + } + + opt[pos].mlen = mlen; + opt[pos].off = offset; + opt[pos].litlen = 0; + opt[pos].price = price; + } + else + { + if (optLevel == 0) + break; + } + } + } + } + + opt[last_pos + 1].price = 1 << 30; + } + + lastStretch = opt[last_pos]; + assert(cur >= lastStretch.mlen); + cur = last_pos - lastStretch.mlen; + _shortestPath: + assert(opt[0].mlen == 0); + assert(last_pos >= lastStretch.mlen); + assert(cur == last_pos - lastStretch.mlen); + if (lastStretch.mlen == 0) + { + assert(lastStretch.litlen == (uint)(ip - anchor) + last_pos); + ip += last_pos; + continue; + } + + assert(lastStretch.off > 0); + if (lastStretch.litlen == 0) + { + /* finishing on a match : update offset history */ + repcodes_s reps = ZSTD_newRep(opt[cur].rep, lastStretch.off, opt[cur].litlen == 0 ? 1U : 0U); + memcpy(rep, &reps, (uint)sizeof(repcodes_s)); + } + else + { + memcpy(rep, lastStretch.rep, (uint)sizeof(repcodes_s)); + assert(cur >= lastStretch.litlen); + cur -= lastStretch.litlen; + } + + { + uint storeEnd = cur + 2; + uint storeStart = storeEnd; + uint stretchPos = cur; + assert(storeEnd < (1 << 12) + 3); + if (lastStretch.litlen > 0) + { + opt[storeEnd].litlen = lastStretch.litlen; + opt[storeEnd].mlen = 0; + storeStart = storeEnd - 1; + opt[storeStart] = lastStretch; + } + + { + opt[storeEnd] = lastStretch; + storeStart = storeEnd; + } + + while (true) + { + ZSTD_optimal_t nextStretch = opt[stretchPos]; + opt[storeStart].litlen = nextStretch.litlen; + if (nextStretch.mlen == 0) + { + break; + } + + storeStart--; + opt[storeStart] = nextStretch; + assert(nextStretch.litlen + nextStretch.mlen <= stretchPos); + stretchPos -= nextStretch.litlen + nextStretch.mlen; + } + + { + uint storePos; + for (storePos = storeStart; storePos <= storeEnd; storePos++) + { + uint llen = opt[storePos].litlen; + uint mlen = opt[storePos].mlen; + uint offBase = opt[storePos].off; + uint advance = llen + mlen; + if (mlen == 0) + { + assert(storePos == storeEnd); + ip = anchor + llen; + continue; + } + + assert(anchor + llen <= iend); + ZSTD_updateStats(optStatePtr, llen, anchor, offBase, mlen); + ZSTD_storeSeq(seqStore, llen, anchor, iend, offBase, mlen); + anchor += advance; + ip = anchor; + } + } + + ZSTD_setBasePrices(optStatePtr, optLevel); + } + } + + return (nuint)(iend - anchor); + } + + private static nuint ZSTD_compressBlock_opt0(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize, ZSTD_dictMode_e dictMode) + { + return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0, dictMode); + } + + private static nuint ZSTD_compressBlock_opt2(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize, ZSTD_dictMode_e dictMode) + { + return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2, dictMode); + } + + private static nuint ZSTD_compressBlock_btopt(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_dictMode_e.ZSTD_noDict); + } + + /* ZSTD_initStats_ultra(): + * make a first compression pass, just to seed stats with more accurate starting values. + * only works on first block, with no dictionary and no ldm. + * this function cannot error out, its narrow contract must be respected. + */ + private static void ZSTD_initStats_ultra(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + /* updated rep codes will sink here */ + uint* tmpRep = stackalloc uint[3]; + memcpy(tmpRep, rep, sizeof(uint) * 3); + assert(ms->opt.litLengthSum == 0); + assert(seqStore->sequences == seqStore->sequencesStart); + assert(ms->window.dictLimit == ms->window.lowLimit); + assert(ms->window.dictLimit - ms->nextToUpdate <= 1); + ZSTD_compressBlock_opt2(ms, seqStore, tmpRep, src, srcSize, ZSTD_dictMode_e.ZSTD_noDict); + ZSTD_resetSeqStore(seqStore); + ms->window.@base -= srcSize; + ms->window.dictLimit += (uint)srcSize; + ms->window.lowLimit = ms->window.dictLimit; + ms->nextToUpdate = ms->window.dictLimit; + } + + private static nuint ZSTD_compressBlock_btultra(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_dictMode_e.ZSTD_noDict); + } + + /* note : no btultra2 variant for extDict nor dictMatchState, + * because btultra2 is not meant to work with dictionaries + * and is only specific for the first block (no prefix) */ + private static nuint ZSTD_compressBlock_btultra2(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + uint curr = (uint)((byte*)src - ms->window.@base); + assert(srcSize <= 1 << 17); + if (ms->opt.litLengthSum == 0 && seqStore->sequences == seqStore->sequencesStart && ms->window.dictLimit == ms->window.lowLimit && curr == ms->window.dictLimit && srcSize > 8) + { + ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize); + } + + return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_dictMode_e.ZSTD_noDict); + } + + private static nuint ZSTD_compressBlock_btopt_dictMatchState(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_dictMode_e.ZSTD_dictMatchState); + } + + private static nuint ZSTD_compressBlock_btopt_extDict(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_dictMode_e.ZSTD_extDict); + } + + private static nuint ZSTD_compressBlock_btultra_dictMatchState(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_dictMode_e.ZSTD_dictMatchState); + } + + private static nuint ZSTD_compressBlock_btultra_extDict(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + { + return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_dictMode_e.ZSTD_extDict); + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdPresplit.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdPresplit.cs new file mode 100644 index 000000000..22a13a59a --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdPresplit.cs @@ -0,0 +1,246 @@ +using System.Runtime.CompilerServices; +using static ZstdSharp.UnsafeHelper; +using System; +using System.Runtime.InteropServices; + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + /* for hashLog > 8, hash 2 bytes. + * for hashLog == 8, just take the byte, no hashing. + * The speed of this method relies on compile-time constant propagation */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint hash2(void* p, uint hashLog) + { + assert(hashLog >= 8); + if (hashLog == 8) + return ((byte*)p)[0]; + assert(hashLog <= 10); + return MEM_read16(p) * 0x9e3779b9 >> (int)(32 - hashLog); + } + + private static void initStats(FPStats* fpstats) + { + *fpstats = new FPStats(); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void addEvents_generic(Fingerprint* fp, void* src, nuint srcSize, nuint samplingRate, uint hashLog) + { + sbyte* p = (sbyte*)src; + nuint limit = srcSize - 2 + 1; + nuint n; + assert(srcSize >= 2); + for (n = 0; n < limit; n += samplingRate) + { + fp->events[hash2(p + n, hashLog)]++; + } + + fp->nbEvents += limit / samplingRate; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void recordFingerprint_generic(Fingerprint* fp, void* src, nuint srcSize, nuint samplingRate, uint hashLog) + { + memset(fp, 0, (uint)(sizeof(uint) * ((nuint)1 << (int)hashLog))); + fp->nbEvents = 0; + addEvents_generic(fp, src, srcSize, samplingRate, hashLog); + } + + private static void ZSTD_recordFingerprint_1(Fingerprint* fp, void* src, nuint srcSize) + { + recordFingerprint_generic(fp, src, srcSize, 1, 10); + } + + private static void ZSTD_recordFingerprint_5(Fingerprint* fp, void* src, nuint srcSize) + { + recordFingerprint_generic(fp, src, srcSize, 5, 10); + } + + private static void ZSTD_recordFingerprint_11(Fingerprint* fp, void* src, nuint srcSize) + { + recordFingerprint_generic(fp, src, srcSize, 11, 9); + } + + private static void ZSTD_recordFingerprint_43(Fingerprint* fp, void* src, nuint srcSize) + { + recordFingerprint_generic(fp, src, srcSize, 43, 8); + } + + private static ulong abs64(long s64) + { + return (ulong)(s64 < 0 ? -s64 : s64); + } + + private static ulong fpDistance(Fingerprint* fp1, Fingerprint* fp2, uint hashLog) + { + ulong distance = 0; + nuint n; + assert(hashLog <= 10); + for (n = 0; n < (nuint)1 << (int)hashLog; n++) + { + distance += abs64(fp1->events[n] * (long)fp2->nbEvents - fp2->events[n] * (long)fp1->nbEvents); + } + + return distance; + } + + /* Compare newEvents with pastEvents + * return 1 when considered "too different" + */ + private static int compareFingerprints(Fingerprint* @ref, Fingerprint* newfp, int penalty, uint hashLog) + { + assert(@ref->nbEvents > 0); + assert(newfp->nbEvents > 0); + { + ulong p50 = @ref->nbEvents * (ulong)newfp->nbEvents; + ulong deviation = fpDistance(@ref, newfp, hashLog); + ulong threshold = p50 * (ulong)(16 - 2 + penalty) / 16; + return deviation >= threshold ? 1 : 0; + } + } + + private static void mergeEvents(Fingerprint* acc, Fingerprint* newfp) + { + nuint n; + for (n = 0; n < 1 << 10; n++) + { + acc->events[n] += newfp->events[n]; + } + + acc->nbEvents += newfp->nbEvents; + } + + private static void flushEvents(FPStats* fpstats) + { + nuint n; + for (n = 0; n < 1 << 10; n++) + { + fpstats->pastEvents.events[n] = fpstats->newEvents.events[n]; + } + + fpstats->pastEvents.nbEvents = fpstats->newEvents.nbEvents; + fpstats->newEvents = new Fingerprint(); + } + + private static void removeEvents(Fingerprint* acc, Fingerprint* slice) + { + nuint n; + for (n = 0; n < 1 << 10; n++) + { + assert(acc->events[n] >= slice->events[n]); + acc->events[n] -= slice->events[n]; + } + + acc->nbEvents -= slice->nbEvents; + } + + private static readonly void*[] records_fs = new void*[4] + { + (delegate* managed )(&ZSTD_recordFingerprint_43), + (delegate* managed )(&ZSTD_recordFingerprint_11), + (delegate* managed )(&ZSTD_recordFingerprint_5), + (delegate* managed )(&ZSTD_recordFingerprint_1) + }; +#if NET7_0_OR_GREATER + private static ReadOnlySpan Span_hashParams => new uint[4] + { + 8, + 9, + 10, + 10 + }; + private static uint* hashParams => (uint*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_hashParams)); +#else + + private static readonly uint* hashParams = GetArrayPointer(new uint[4] { 8, 9, 10, 10 }); +#endif + private static nuint ZSTD_splitBlock_byChunks(void* blockStart, nuint blockSize, int level, void* workspace, nuint wkspSize) + { + assert(0 <= level && level <= 3); + void* record_f = records_fs[level]; + FPStats* fpstats = (FPStats*)workspace; + sbyte* p = (sbyte*)blockStart; + int penalty = 3; + nuint pos = 0; + assert(blockSize == 128 << 10); + assert(workspace != null); + assert((nuint)workspace % (nuint)Math.Max(sizeof(uint), sizeof(ulong)) == 0); + assert(wkspSize >= (nuint)sizeof(FPStats)); + initStats(fpstats); + ((delegate* managed)record_f)(&fpstats->pastEvents, p, 8 << 10); + for (pos = 8 << 10; pos <= blockSize - (8 << 10); pos += 8 << 10) + { + ((delegate* managed)record_f)(&fpstats->newEvents, p + pos, 8 << 10); + if (compareFingerprints(&fpstats->pastEvents, &fpstats->newEvents, penalty, hashParams[level]) != 0) + { + return pos; + } + else + { + mergeEvents(&fpstats->pastEvents, &fpstats->newEvents); + if (penalty > 0) + penalty--; + } + } + + assert(pos == blockSize); + return blockSize; + } + + /* ZSTD_splitBlock_fromBorders(): very fast strategy : + * compare fingerprint from beginning and end of the block, + * derive from their difference if it's preferable to split in the middle, + * repeat the process a second time, for finer grained decision. + * 3 times did not brought improvements, so I stopped at 2. + * Benefits are good enough for a cheap heuristic. + * More accurate splitting saves more, but speed impact is also more perceptible. + * For better accuracy, use more elaborate variant *_byChunks. + */ + private static nuint ZSTD_splitBlock_fromBorders(void* blockStart, nuint blockSize, void* workspace, nuint wkspSize) + { + FPStats* fpstats = (FPStats*)workspace; + Fingerprint* middleEvents = (Fingerprint*)(void*)((sbyte*)workspace + 512 * sizeof(uint)); + assert(blockSize == 128 << 10); + assert(workspace != null); + assert((nuint)workspace % (nuint)Math.Max(sizeof(uint), sizeof(ulong)) == 0); + assert(wkspSize >= (nuint)sizeof(FPStats)); + initStats(fpstats); + HIST_add(fpstats->pastEvents.events, blockStart, 512); + HIST_add(fpstats->newEvents.events, (sbyte*)blockStart + blockSize - 512, 512); + fpstats->pastEvents.nbEvents = fpstats->newEvents.nbEvents = 512; + if (compareFingerprints(&fpstats->pastEvents, &fpstats->newEvents, 0, 8) == 0) + return blockSize; + HIST_add(middleEvents->events, (sbyte*)blockStart + blockSize / 2 - 512 / 2, 512); + middleEvents->nbEvents = 512; + { + ulong distFromBegin = fpDistance(&fpstats->pastEvents, middleEvents, 8); + ulong distFromEnd = fpDistance(&fpstats->newEvents, middleEvents, 8); + const ulong minDistance = 512 * 512 / 3; + if (abs64((long)distFromBegin - (long)distFromEnd) < minDistance) + return 64 * (1 << 10); + return (nuint)(distFromBegin > distFromEnd ? 32 * (1 << 10) : 96 * (1 << 10)); + } + } + + /* ZSTD_splitBlock(): + * @level must be a value between 0 and 4. + * higher levels spend more energy to detect block boundaries. + * @workspace must be aligned for size_t. + * @wkspSize must be at least >= ZSTD_SLIPBLOCK_WORKSPACESIZE + * note: + * For the time being, this function only accepts full 128 KB blocks. + * Therefore, @blockSize must be == 128 KB. + * While this could be extended to smaller sizes in the future, + * it is not yet clear if this would be useful. TBD. + */ + private static nuint ZSTD_splitBlock(void* blockStart, nuint blockSize, int level, void* workspace, nuint wkspSize) + { + assert(0 <= level && level <= 4); + if (level == 0) + return ZSTD_splitBlock_fromBorders(blockStart, blockSize, workspace, wkspSize); + return ZSTD_splitBlock_byChunks(blockStart, blockSize, level - 1, workspace, wkspSize); + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdmtCompress.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdmtCompress.cs new file mode 100644 index 000000000..caef6e892 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdmtCompress.cs @@ -0,0 +1,1617 @@ +using static ZstdSharp.UnsafeHelper; +using System.Runtime.CompilerServices; + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + private static readonly buffer_s g_nullBuffer = new buffer_s(start: null, capacity: 0); + private static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool_s* bufPool) + { + if (bufPool == null) + return; + if (bufPool->buffers != null) + { + uint u; + for (u = 0; u < bufPool->totalBuffers; u++) + { + ZSTD_customFree(bufPool->buffers[u].start, bufPool->cMem); + } + + ZSTD_customFree(bufPool->buffers, bufPool->cMem); + } + + SynchronizationWrapper.Free(&bufPool->poolMutex); + ZSTD_customFree(bufPool, bufPool->cMem); + } + + private static ZSTDMT_bufferPool_s* ZSTDMT_createBufferPool(uint maxNbBuffers, ZSTD_customMem cMem) + { + ZSTDMT_bufferPool_s* bufPool = (ZSTDMT_bufferPool_s*)ZSTD_customCalloc((nuint)sizeof(ZSTDMT_bufferPool_s), cMem); + if (bufPool == null) + return null; + SynchronizationWrapper.Init(&bufPool->poolMutex); + bufPool->buffers = (buffer_s*)ZSTD_customCalloc(maxNbBuffers * (uint)sizeof(buffer_s), cMem); + if (bufPool->buffers == null) + { + ZSTDMT_freeBufferPool(bufPool); + return null; + } + + bufPool->bufferSize = 64 * (1 << 10); + bufPool->totalBuffers = maxNbBuffers; + bufPool->nbBuffers = 0; + bufPool->cMem = cMem; + return bufPool; + } + + /* only works at initialization, not during compression */ + private static nuint ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool_s* bufPool) + { + nuint poolSize = (nuint)sizeof(ZSTDMT_bufferPool_s); + nuint arraySize = bufPool->totalBuffers * (uint)sizeof(buffer_s); + uint u; + nuint totalBufferSize = 0; + SynchronizationWrapper.Enter(&bufPool->poolMutex); + for (u = 0; u < bufPool->totalBuffers; u++) + totalBufferSize += bufPool->buffers[u].capacity; + SynchronizationWrapper.Exit(&bufPool->poolMutex); + return poolSize + arraySize + totalBufferSize; + } + + /* ZSTDMT_setBufferSize() : + * all future buffers provided by this buffer pool will have _at least_ this size + * note : it's better for all buffers to have same size, + * as they become freely interchangeable, reducing malloc/free usages and memory fragmentation */ + private static void ZSTDMT_setBufferSize(ZSTDMT_bufferPool_s* bufPool, nuint bSize) + { + SynchronizationWrapper.Enter(&bufPool->poolMutex); + bufPool->bufferSize = bSize; + SynchronizationWrapper.Exit(&bufPool->poolMutex); + } + + private static ZSTDMT_bufferPool_s* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool_s* srcBufPool, uint maxNbBuffers) + { + if (srcBufPool == null) + return null; + if (srcBufPool->totalBuffers >= maxNbBuffers) + return srcBufPool; + { + ZSTD_customMem cMem = srcBufPool->cMem; + /* forward parameters */ + nuint bSize = srcBufPool->bufferSize; + ZSTDMT_bufferPool_s* newBufPool; + ZSTDMT_freeBufferPool(srcBufPool); + newBufPool = ZSTDMT_createBufferPool(maxNbBuffers, cMem); + if (newBufPool == null) + return newBufPool; + ZSTDMT_setBufferSize(newBufPool, bSize); + return newBufPool; + } + } + + /** ZSTDMT_getBuffer() : + * assumption : bufPool must be valid + * @return : a buffer, with start pointer and size + * note: allocation may fail, in this case, start==NULL and size==0 */ + private static buffer_s ZSTDMT_getBuffer(ZSTDMT_bufferPool_s* bufPool) + { + nuint bSize = bufPool->bufferSize; + SynchronizationWrapper.Enter(&bufPool->poolMutex); + if (bufPool->nbBuffers != 0) + { + buffer_s buf = bufPool->buffers[--bufPool->nbBuffers]; + nuint availBufferSize = buf.capacity; + bufPool->buffers[bufPool->nbBuffers] = g_nullBuffer; + if (availBufferSize >= bSize && availBufferSize >> 3 <= bSize) + { + SynchronizationWrapper.Exit(&bufPool->poolMutex); + return buf; + } + + ZSTD_customFree(buf.start, bufPool->cMem); + } + + SynchronizationWrapper.Exit(&bufPool->poolMutex); + { + buffer_s buffer; + void* start = ZSTD_customMalloc(bSize, bufPool->cMem); + buffer.start = start; + buffer.capacity = start == null ? 0 : bSize; + return buffer; + } + } + + /* store buffer for later re-use, up to pool capacity */ + private static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool_s* bufPool, buffer_s buf) + { + if (buf.start == null) + return; + SynchronizationWrapper.Enter(&bufPool->poolMutex); + if (bufPool->nbBuffers < bufPool->totalBuffers) + { + bufPool->buffers[bufPool->nbBuffers++] = buf; + SynchronizationWrapper.Exit(&bufPool->poolMutex); + return; + } + + SynchronizationWrapper.Exit(&bufPool->poolMutex); + ZSTD_customFree(buf.start, bufPool->cMem); + } + + private static nuint ZSTDMT_sizeof_seqPool(ZSTDMT_bufferPool_s* seqPool) + { + return ZSTDMT_sizeof_bufferPool(seqPool); + } + + private static RawSeqStore_t bufferToSeq(buffer_s buffer) + { + RawSeqStore_t seq = kNullRawSeqStore; + seq.seq = (rawSeq*)buffer.start; + seq.capacity = buffer.capacity / (nuint)sizeof(rawSeq); + return seq; + } + + private static buffer_s seqToBuffer(RawSeqStore_t seq) + { + buffer_s buffer; + buffer.start = seq.seq; + buffer.capacity = seq.capacity * (nuint)sizeof(rawSeq); + return buffer; + } + + private static RawSeqStore_t ZSTDMT_getSeq(ZSTDMT_bufferPool_s* seqPool) + { + if (seqPool->bufferSize == 0) + { + return kNullRawSeqStore; + } + + return bufferToSeq(ZSTDMT_getBuffer(seqPool)); + } + + private static void ZSTDMT_releaseSeq(ZSTDMT_bufferPool_s* seqPool, RawSeqStore_t seq) + { + ZSTDMT_releaseBuffer(seqPool, seqToBuffer(seq)); + } + + private static void ZSTDMT_setNbSeq(ZSTDMT_bufferPool_s* seqPool, nuint nbSeq) + { + ZSTDMT_setBufferSize(seqPool, nbSeq * (nuint)sizeof(rawSeq)); + } + + private static ZSTDMT_bufferPool_s* ZSTDMT_createSeqPool(uint nbWorkers, ZSTD_customMem cMem) + { + ZSTDMT_bufferPool_s* seqPool = ZSTDMT_createBufferPool(nbWorkers, cMem); + if (seqPool == null) + return null; + ZSTDMT_setNbSeq(seqPool, 0); + return seqPool; + } + + private static void ZSTDMT_freeSeqPool(ZSTDMT_bufferPool_s* seqPool) + { + ZSTDMT_freeBufferPool(seqPool); + } + + private static ZSTDMT_bufferPool_s* ZSTDMT_expandSeqPool(ZSTDMT_bufferPool_s* pool, uint nbWorkers) + { + return ZSTDMT_expandBufferPool(pool, nbWorkers); + } + + /* note : all CCtx borrowed from the pool must be reverted back to the pool _before_ freeing the pool */ + private static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool) + { + if (pool == null) + return; + SynchronizationWrapper.Free(&pool->poolMutex); + if (pool->cctxs != null) + { + int cid; + for (cid = 0; cid < pool->totalCCtx; cid++) + ZSTD_freeCCtx(pool->cctxs[cid]); + ZSTD_customFree(pool->cctxs, pool->cMem); + } + + ZSTD_customFree(pool, pool->cMem); + } + + /* ZSTDMT_createCCtxPool() : + * implies nbWorkers >= 1 , checked by caller ZSTDMT_createCCtx() */ + private static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(int nbWorkers, ZSTD_customMem cMem) + { + ZSTDMT_CCtxPool* cctxPool = (ZSTDMT_CCtxPool*)ZSTD_customCalloc((nuint)sizeof(ZSTDMT_CCtxPool), cMem); + assert(nbWorkers > 0); + if (cctxPool == null) + return null; + SynchronizationWrapper.Init(&cctxPool->poolMutex); + cctxPool->totalCCtx = nbWorkers; + cctxPool->cctxs = (ZSTD_CCtx_s**)ZSTD_customCalloc((nuint)(nbWorkers * sizeof(ZSTD_CCtx_s*)), cMem); + if (cctxPool->cctxs == null) + { + ZSTDMT_freeCCtxPool(cctxPool); + return null; + } + + cctxPool->cMem = cMem; + cctxPool->cctxs[0] = ZSTD_createCCtx_advanced(cMem); + if (cctxPool->cctxs[0] == null) + { + ZSTDMT_freeCCtxPool(cctxPool); + return null; + } + + cctxPool->availCCtx = 1; + return cctxPool; + } + + private static ZSTDMT_CCtxPool* ZSTDMT_expandCCtxPool(ZSTDMT_CCtxPool* srcPool, int nbWorkers) + { + if (srcPool == null) + return null; + if (nbWorkers <= srcPool->totalCCtx) + return srcPool; + { + ZSTD_customMem cMem = srcPool->cMem; + ZSTDMT_freeCCtxPool(srcPool); + return ZSTDMT_createCCtxPool(nbWorkers, cMem); + } + } + + /* only works during initialization phase, not during compression */ + private static nuint ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool) + { + SynchronizationWrapper.Enter(&cctxPool->poolMutex); + { + uint nbWorkers = (uint)cctxPool->totalCCtx; + nuint poolSize = (nuint)sizeof(ZSTDMT_CCtxPool); + nuint arraySize = (nuint)(cctxPool->totalCCtx * sizeof(ZSTD_CCtx_s*)); + nuint totalCCtxSize = 0; + uint u; + for (u = 0; u < nbWorkers; u++) + { + totalCCtxSize += ZSTD_sizeof_CCtx(cctxPool->cctxs[u]); + } + + SynchronizationWrapper.Exit(&cctxPool->poolMutex); + assert(nbWorkers > 0); + return poolSize + arraySize + totalCCtxSize; + } + } + + private static ZSTD_CCtx_s* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* cctxPool) + { + SynchronizationWrapper.Enter(&cctxPool->poolMutex); + if (cctxPool->availCCtx != 0) + { + cctxPool->availCCtx--; + { + ZSTD_CCtx_s* cctx = cctxPool->cctxs[cctxPool->availCCtx]; + SynchronizationWrapper.Exit(&cctxPool->poolMutex); + return cctx; + } + } + + SynchronizationWrapper.Exit(&cctxPool->poolMutex); + return ZSTD_createCCtx_advanced(cctxPool->cMem); + } + + private static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx_s* cctx) + { + if (cctx == null) + return; + SynchronizationWrapper.Enter(&pool->poolMutex); + if (pool->availCCtx < pool->totalCCtx) + pool->cctxs[pool->availCCtx++] = cctx; + else + { + ZSTD_freeCCtx(cctx); + } + + SynchronizationWrapper.Exit(&pool->poolMutex); + } + + private static int ZSTDMT_serialState_reset(SerialState* serialState, ZSTDMT_bufferPool_s* seqPool, ZSTD_CCtx_params_s @params, nuint jobSize, void* dict, nuint dictSize, ZSTD_dictContentType_e dictContentType) + { + if (@params.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) + { + ZSTD_ldm_adjustParameters(&@params.ldmParams, &@params.cParams); + assert(@params.ldmParams.hashLog >= @params.ldmParams.bucketSizeLog); + assert(@params.ldmParams.hashRateLog < 32); + } + else + { + @params.ldmParams = new ldmParams_t(); + } + + serialState->nextJobID = 0; + if (@params.fParams.checksumFlag != 0) + ZSTD_XXH64_reset(&serialState->xxhState, 0); + if (@params.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) + { + ZSTD_customMem cMem = @params.customMem; + uint hashLog = @params.ldmParams.hashLog; + nuint hashSize = ((nuint)1 << (int)hashLog) * (nuint)sizeof(ldmEntry_t); + uint bucketLog = @params.ldmParams.hashLog - @params.ldmParams.bucketSizeLog; + uint prevBucketLog = serialState->@params.ldmParams.hashLog - serialState->@params.ldmParams.bucketSizeLog; + nuint numBuckets = (nuint)1 << (int)bucketLog; + ZSTDMT_setNbSeq(seqPool, ZSTD_ldm_getMaxNbSeq(@params.ldmParams, jobSize)); + ZSTD_window_init(&serialState->ldmState.window); + if (serialState->ldmState.hashTable == null || serialState->@params.ldmParams.hashLog < hashLog) + { + ZSTD_customFree(serialState->ldmState.hashTable, cMem); + serialState->ldmState.hashTable = (ldmEntry_t*)ZSTD_customMalloc(hashSize, cMem); + } + + if (serialState->ldmState.bucketOffsets == null || prevBucketLog < bucketLog) + { + ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem); + serialState->ldmState.bucketOffsets = (byte*)ZSTD_customMalloc(numBuckets, cMem); + } + + if (serialState->ldmState.hashTable == null || serialState->ldmState.bucketOffsets == null) + return 1; + memset(serialState->ldmState.hashTable, 0, (uint)hashSize); + memset(serialState->ldmState.bucketOffsets, 0, (uint)numBuckets); + serialState->ldmState.loadedDictEnd = 0; + if (dictSize > 0) + { + if (dictContentType == ZSTD_dictContentType_e.ZSTD_dct_rawContent) + { + byte* dictEnd = (byte*)dict + dictSize; + ZSTD_window_update(&serialState->ldmState.window, dict, dictSize, 0); + ZSTD_ldm_fillHashTable(&serialState->ldmState, (byte*)dict, dictEnd, &@params.ldmParams); + serialState->ldmState.loadedDictEnd = @params.forceWindow != 0 ? 0 : (uint)(dictEnd - serialState->ldmState.window.@base); + } + } + + serialState->ldmWindow = serialState->ldmState.window; + } + + serialState->@params = @params; + serialState->@params.jobSize = (uint)jobSize; + return 0; + } + + private static int ZSTDMT_serialState_init(SerialState* serialState) + { + int initError = 0; + *serialState = new SerialState(); + SynchronizationWrapper.Init(&serialState->mutex); + initError |= 0; + initError |= 0; + SynchronizationWrapper.Init(&serialState->ldmWindowMutex); + initError |= 0; + initError |= 0; + return initError; + } + + private static void ZSTDMT_serialState_free(SerialState* serialState) + { + ZSTD_customMem cMem = serialState->@params.customMem; + SynchronizationWrapper.Free(&serialState->mutex); + SynchronizationWrapper.Free(&serialState->ldmWindowMutex); + ZSTD_customFree(serialState->ldmState.hashTable, cMem); + ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem); + } + + private static void ZSTDMT_serialState_genSequences(SerialState* serialState, RawSeqStore_t* seqStore, Range src, uint jobID) + { + SynchronizationWrapper.Enter(&serialState->mutex); + while (serialState->nextJobID < jobID) + { + SynchronizationWrapper.Wait(&serialState->mutex); + } + + if (serialState->nextJobID == jobID) + { + if (serialState->@params.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) + { + nuint error; + assert(seqStore->seq != null && seqStore->pos == 0 && seqStore->size == 0 && seqStore->capacity > 0); + assert(src.size <= serialState->@params.jobSize); + ZSTD_window_update(&serialState->ldmState.window, src.start, src.size, 0); + error = ZSTD_ldm_generateSequences(&serialState->ldmState, seqStore, &serialState->@params.ldmParams, src.start, src.size); + assert(!ERR_isError(error)); + SynchronizationWrapper.Enter(&serialState->ldmWindowMutex); + serialState->ldmWindow = serialState->ldmState.window; + SynchronizationWrapper.Pulse(&serialState->ldmWindowMutex); + SynchronizationWrapper.Exit(&serialState->ldmWindowMutex); + } + + if (serialState->@params.fParams.checksumFlag != 0 && src.size > 0) + ZSTD_XXH64_update(&serialState->xxhState, src.start, src.size); + } + + serialState->nextJobID++; + SynchronizationWrapper.PulseAll(&serialState->mutex); + SynchronizationWrapper.Exit(&serialState->mutex); + } + + private static void ZSTDMT_serialState_applySequences(SerialState* serialState, ZSTD_CCtx_s* jobCCtx, RawSeqStore_t* seqStore) + { + if (seqStore->size > 0) + { + assert(serialState->@params.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable); + assert(jobCCtx != null); + ZSTD_referenceExternalSequences(jobCCtx, seqStore->seq, seqStore->size); + } + } + + private static void ZSTDMT_serialState_ensureFinished(SerialState* serialState, uint jobID, nuint cSize) + { + SynchronizationWrapper.Enter(&serialState->mutex); + if (serialState->nextJobID <= jobID) + { + assert(ERR_isError(cSize)); + serialState->nextJobID = jobID + 1; + SynchronizationWrapper.PulseAll(&serialState->mutex); + SynchronizationWrapper.Enter(&serialState->ldmWindowMutex); + ZSTD_window_clear(&serialState->ldmWindow); + SynchronizationWrapper.Pulse(&serialState->ldmWindowMutex); + SynchronizationWrapper.Exit(&serialState->ldmWindowMutex); + } + + SynchronizationWrapper.Exit(&serialState->mutex); + } + + private static readonly Range kNullRange = new Range(start: null, size: 0); + /* ZSTDMT_compressionJob() is a POOL_function type */ + private static void ZSTDMT_compressionJob(void* jobDescription) + { + ZSTDMT_jobDescription* job = (ZSTDMT_jobDescription*)jobDescription; + /* do not modify job->params ! copy it, modify the copy */ + ZSTD_CCtx_params_s jobParams = job->@params; + ZSTD_CCtx_s* cctx = ZSTDMT_getCCtx(job->cctxPool); + RawSeqStore_t rawSeqStore = ZSTDMT_getSeq(job->seqPool); + buffer_s dstBuff = job->dstBuff; + nuint lastCBlockSize = 0; + if (cctx == null) + { + SynchronizationWrapper.Enter(&job->job_mutex); + job->cSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + SynchronizationWrapper.Exit(&job->job_mutex); + goto _endJob; + } + + if (dstBuff.start == null) + { + dstBuff = ZSTDMT_getBuffer(job->bufPool); + if (dstBuff.start == null) + { + SynchronizationWrapper.Enter(&job->job_mutex); + job->cSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + SynchronizationWrapper.Exit(&job->job_mutex); + goto _endJob; + } + + job->dstBuff = dstBuff; + } + + if (jobParams.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable && rawSeqStore.seq == null) + { + SynchronizationWrapper.Enter(&job->job_mutex); + job->cSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + SynchronizationWrapper.Exit(&job->job_mutex); + goto _endJob; + } + + if (job->jobID != 0) + jobParams.fParams.checksumFlag = 0; + jobParams.ldmParams.enableLdm = ZSTD_paramSwitch_e.ZSTD_ps_disable; + jobParams.nbWorkers = 0; + ZSTDMT_serialState_genSequences(job->serial, &rawSeqStore, job->src, job->jobID); + if (job->cdict != null) + { + nuint initError = ZSTD_compressBegin_advanced_internal(cctx, null, 0, ZSTD_dictContentType_e.ZSTD_dct_auto, ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, job->cdict, &jobParams, job->fullFrameSize); + assert(job->firstJob != 0); + if (ERR_isError(initError)) + { + SynchronizationWrapper.Enter(&job->job_mutex); + job->cSize = initError; + SynchronizationWrapper.Exit(&job->job_mutex); + goto _endJob; + } + } + else + { + ulong pledgedSrcSize = job->firstJob != 0 ? job->fullFrameSize : job->src.size; + { + nuint forceWindowError = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_cParameter.ZSTD_c_experimentalParam3, job->firstJob == 0 ? 1 : 0); + if (ERR_isError(forceWindowError)) + { + SynchronizationWrapper.Enter(&job->job_mutex); + job->cSize = forceWindowError; + SynchronizationWrapper.Exit(&job->job_mutex); + goto _endJob; + } + } + + if (job->firstJob == 0) + { + nuint err = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_cParameter.ZSTD_c_experimentalParam15, 0); + if (ERR_isError(err)) + { + SynchronizationWrapper.Enter(&job->job_mutex); + job->cSize = err; + SynchronizationWrapper.Exit(&job->job_mutex); + goto _endJob; + } + } + + { + nuint initError = ZSTD_compressBegin_advanced_internal(cctx, job->prefix.start, job->prefix.size, ZSTD_dictContentType_e.ZSTD_dct_rawContent, ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, null, &jobParams, pledgedSrcSize); + if (ERR_isError(initError)) + { + SynchronizationWrapper.Enter(&job->job_mutex); + job->cSize = initError; + SynchronizationWrapper.Exit(&job->job_mutex); + goto _endJob; + } + } + } + + ZSTDMT_serialState_applySequences(job->serial, cctx, &rawSeqStore); + if (job->firstJob == 0) + { + nuint hSize = ZSTD_compressContinue_public(cctx, dstBuff.start, dstBuff.capacity, job->src.start, 0); + if (ERR_isError(hSize)) + { + SynchronizationWrapper.Enter(&job->job_mutex); + job->cSize = hSize; + SynchronizationWrapper.Exit(&job->job_mutex); + goto _endJob; + } + + ZSTD_invalidateRepCodes(cctx); + } + + { + const nuint chunkSize = 4 * (1 << 17); + int nbChunks = (int)((job->src.size + (chunkSize - 1)) / chunkSize); + byte* ip = (byte*)job->src.start; + byte* ostart = (byte*)dstBuff.start; + byte* op = ostart; + byte* oend = op + dstBuff.capacity; + int chunkNb; +#if DEBUG + if (sizeof(nuint) > sizeof(int)) + assert(job->src.size < 2147483647 * chunkSize); +#endif + assert(job->cSize == 0); + for (chunkNb = 1; chunkNb < nbChunks; chunkNb++) + { + nuint cSize = ZSTD_compressContinue_public(cctx, op, (nuint)(oend - op), ip, chunkSize); + if (ERR_isError(cSize)) + { + SynchronizationWrapper.Enter(&job->job_mutex); + job->cSize = cSize; + SynchronizationWrapper.Exit(&job->job_mutex); + goto _endJob; + } + + ip += chunkSize; + op += cSize; + assert(op < oend); + SynchronizationWrapper.Enter(&job->job_mutex); + job->cSize += cSize; + job->consumed = chunkSize * (nuint)chunkNb; + SynchronizationWrapper.Pulse(&job->job_mutex); + SynchronizationWrapper.Exit(&job->job_mutex); + } + + assert(chunkSize > 0); + assert((chunkSize & chunkSize - 1) == 0); + if (((uint)(nbChunks > 0 ? 1 : 0) | job->lastJob) != 0) + { + nuint lastBlockSize1 = job->src.size & chunkSize - 1; + nuint lastBlockSize = lastBlockSize1 == 0 && job->src.size >= chunkSize ? chunkSize : lastBlockSize1; + nuint cSize = job->lastJob != 0 ? ZSTD_compressEnd_public(cctx, op, (nuint)(oend - op), ip, lastBlockSize) : ZSTD_compressContinue_public(cctx, op, (nuint)(oend - op), ip, lastBlockSize); + if (ERR_isError(cSize)) + { + SynchronizationWrapper.Enter(&job->job_mutex); + job->cSize = cSize; + SynchronizationWrapper.Exit(&job->job_mutex); + goto _endJob; + } + + lastCBlockSize = cSize; + } + } + +#if DEBUG + if (job->firstJob == 0) + { + assert(ZSTD_window_hasExtDict(cctx->blockState.matchState.window) == 0); + } +#endif + + ZSTD_CCtx_trace(cctx, 0); + _endJob: + ZSTDMT_serialState_ensureFinished(job->serial, job->jobID, job->cSize); + ZSTDMT_releaseSeq(job->seqPool, rawSeqStore); + ZSTDMT_releaseCCtx(job->cctxPool, cctx); + SynchronizationWrapper.Enter(&job->job_mutex); + if (ERR_isError(job->cSize)) + assert(lastCBlockSize == 0); + job->cSize += lastCBlockSize; + job->consumed = job->src.size; + SynchronizationWrapper.Pulse(&job->job_mutex); + SynchronizationWrapper.Exit(&job->job_mutex); + } + + private static readonly RoundBuff_t kNullRoundBuff = new RoundBuff_t(buffer: null, capacity: 0, pos: 0); + private static void ZSTDMT_freeJobsTable(ZSTDMT_jobDescription* jobTable, uint nbJobs, ZSTD_customMem cMem) + { + uint jobNb; + if (jobTable == null) + return; + for (jobNb = 0; jobNb < nbJobs; jobNb++) + { + SynchronizationWrapper.Free(&jobTable[jobNb].job_mutex); + } + + ZSTD_customFree(jobTable, cMem); + } + + /* ZSTDMT_allocJobsTable() + * allocate and init a job table. + * update *nbJobsPtr to next power of 2 value, as size of table */ + private static ZSTDMT_jobDescription* ZSTDMT_createJobsTable(uint* nbJobsPtr, ZSTD_customMem cMem) + { + uint nbJobsLog2 = ZSTD_highbit32(*nbJobsPtr) + 1; + uint nbJobs = (uint)(1 << (int)nbJobsLog2); + uint jobNb; + ZSTDMT_jobDescription* jobTable = (ZSTDMT_jobDescription*)ZSTD_customCalloc(nbJobs * (uint)sizeof(ZSTDMT_jobDescription), cMem); + int initError = 0; + if (jobTable == null) + return null; + *nbJobsPtr = nbJobs; + for (jobNb = 0; jobNb < nbJobs; jobNb++) + { + SynchronizationWrapper.Init(&jobTable[jobNb].job_mutex); + initError |= 0; + initError |= 0; + } + + if (initError != 0) + { + ZSTDMT_freeJobsTable(jobTable, nbJobs, cMem); + return null; + } + + return jobTable; + } + + private static nuint ZSTDMT_expandJobsTable(ZSTDMT_CCtx_s* mtctx, uint nbWorkers) + { + uint nbJobs = nbWorkers + 2; + if (nbJobs > mtctx->jobIDMask + 1) + { + ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask + 1, mtctx->cMem); + mtctx->jobIDMask = 0; + mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, mtctx->cMem); + if (mtctx->jobs == null) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + assert(nbJobs != 0 && (nbJobs & nbJobs - 1) == 0); + mtctx->jobIDMask = nbJobs - 1; + } + + return 0; + } + + /* ZSTDMT_CCtxParam_setNbWorkers(): + * Internal use only */ + private static nuint ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params_s* @params, uint nbWorkers) + { + return ZSTD_CCtxParams_setParameter(@params, ZSTD_cParameter.ZSTD_c_nbWorkers, (int)nbWorkers); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ZSTDMT_CCtx_s* ZSTDMT_createCCtx_advanced_internal(uint nbWorkers, ZSTD_customMem cMem, void* pool) + { + ZSTDMT_CCtx_s* mtctx; + uint nbJobs = nbWorkers + 2; + int initError; + if (nbWorkers < 1) + return null; + nbWorkers = nbWorkers < (uint)(sizeof(void*) == 4 ? 64 : 256) ? nbWorkers : (uint)(sizeof(void*) == 4 ? 64 : 256); + if (((cMem.customAlloc != null ? 1 : 0) ^ (cMem.customFree != null ? 1 : 0)) != 0) + return null; + mtctx = (ZSTDMT_CCtx_s*)ZSTD_customCalloc((nuint)sizeof(ZSTDMT_CCtx_s), cMem); + if (mtctx == null) + return null; + ZSTDMT_CCtxParam_setNbWorkers(&mtctx->@params, nbWorkers); + mtctx->cMem = cMem; + mtctx->allJobsCompleted = 1; + if (pool != null) + { + mtctx->factory = pool; + mtctx->providedFactory = 1; + } + else + { + mtctx->factory = POOL_create_advanced(nbWorkers, 0, cMem); + mtctx->providedFactory = 0; + } + + mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, cMem); + assert(nbJobs > 0); + assert((nbJobs & nbJobs - 1) == 0); + mtctx->jobIDMask = nbJobs - 1; + mtctx->bufPool = ZSTDMT_createBufferPool(2 * nbWorkers + 3, cMem); + mtctx->cctxPool = ZSTDMT_createCCtxPool((int)nbWorkers, cMem); + mtctx->seqPool = ZSTDMT_createSeqPool(nbWorkers, cMem); + initError = ZSTDMT_serialState_init(&mtctx->serial); + mtctx->roundBuff = kNullRoundBuff; + if (((mtctx->factory == null || mtctx->jobs == null || mtctx->bufPool == null || mtctx->cctxPool == null || mtctx->seqPool == null ? 1 : 0) | initError) != 0) + { + ZSTDMT_freeCCtx(mtctx); + return null; + } + + return mtctx; + } + + /* Requires ZSTD_MULTITHREAD to be defined during compilation, otherwise it will return NULL. */ + private static ZSTDMT_CCtx_s* ZSTDMT_createCCtx_advanced(uint nbWorkers, ZSTD_customMem cMem, void* pool) + { + return ZSTDMT_createCCtx_advanced_internal(nbWorkers, cMem, pool); + } + + /* ZSTDMT_releaseAllJobResources() : + * note : ensure all workers are killed first ! */ + private static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx_s* mtctx) + { + uint jobID; + for (jobID = 0; jobID <= mtctx->jobIDMask; jobID++) + { + /* Copy the mutex/cond out */ + void* mutex = mtctx->jobs[jobID].job_mutex; + void* cond = mtctx->jobs[jobID].job_cond; + ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff); + mtctx->jobs[jobID] = new ZSTDMT_jobDescription + { + job_mutex = mutex, + job_cond = cond + }; + } + + mtctx->inBuff.buffer = g_nullBuffer; + mtctx->inBuff.filled = 0; + mtctx->allJobsCompleted = 1; + } + + private static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx_s* mtctx) + { + while (mtctx->doneJobID < mtctx->nextJobID) + { + uint jobID = mtctx->doneJobID & mtctx->jobIDMask; + SynchronizationWrapper.Enter(&mtctx->jobs[jobID].job_mutex); + while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) + { + SynchronizationWrapper.Wait(&mtctx->jobs[jobID].job_mutex); + } + + SynchronizationWrapper.Exit(&mtctx->jobs[jobID].job_mutex); + mtctx->doneJobID++; + } + } + + private static nuint ZSTDMT_freeCCtx(ZSTDMT_CCtx_s* mtctx) + { + if (mtctx == null) + return 0; + if (mtctx->providedFactory == 0) + POOL_free(mtctx->factory); + ZSTDMT_releaseAllJobResources(mtctx); + ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask + 1, mtctx->cMem); + ZSTDMT_freeBufferPool(mtctx->bufPool); + ZSTDMT_freeCCtxPool(mtctx->cctxPool); + ZSTDMT_freeSeqPool(mtctx->seqPool); + ZSTDMT_serialState_free(&mtctx->serial); + ZSTD_freeCDict(mtctx->cdictLocal); + if (mtctx->roundBuff.buffer != null) + ZSTD_customFree(mtctx->roundBuff.buffer, mtctx->cMem); + ZSTD_customFree(mtctx, mtctx->cMem); + return 0; + } + + private static nuint ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx_s* mtctx) + { + if (mtctx == null) + return 0; + return (nuint)sizeof(ZSTDMT_CCtx_s) + POOL_sizeof(mtctx->factory) + ZSTDMT_sizeof_bufferPool(mtctx->bufPool) + (mtctx->jobIDMask + 1) * (uint)sizeof(ZSTDMT_jobDescription) + ZSTDMT_sizeof_CCtxPool(mtctx->cctxPool) + ZSTDMT_sizeof_seqPool(mtctx->seqPool) + ZSTD_sizeof_CDict(mtctx->cdictLocal) + mtctx->roundBuff.capacity; + } + + /* ZSTDMT_resize() : + * @return : error code if fails, 0 on success */ + private static nuint ZSTDMT_resize(ZSTDMT_CCtx_s* mtctx, uint nbWorkers) + { + if (POOL_resize(mtctx->factory, nbWorkers) != 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + { + nuint err_code = ZSTDMT_expandJobsTable(mtctx, nbWorkers); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, 2 * nbWorkers + 3); + if (mtctx->bufPool == null) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + mtctx->cctxPool = ZSTDMT_expandCCtxPool(mtctx->cctxPool, (int)nbWorkers); + if (mtctx->cctxPool == null) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + mtctx->seqPool = ZSTDMT_expandSeqPool(mtctx->seqPool, nbWorkers); + if (mtctx->seqPool == null) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + ZSTDMT_CCtxParam_setNbWorkers(&mtctx->@params, nbWorkers); + return 0; + } + + /*! ZSTDMT_updateCParams_whileCompressing() : + * Updates a selected set of compression parameters, remaining compatible with currently active frame. + * New parameters will be applied to next compression job. */ + private static void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx_s* mtctx, ZSTD_CCtx_params_s* cctxParams) + { + /* Do not modify windowLog while compressing */ + uint saved_wlog = mtctx->@params.cParams.windowLog; + int compressionLevel = cctxParams->compressionLevel; + mtctx->@params.compressionLevel = compressionLevel; + { + ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams(cctxParams, unchecked(0UL - 1), 0, ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict); + cParams.windowLog = saved_wlog; + mtctx->@params.cParams = cParams; + } + } + + /* ZSTDMT_getFrameProgression(): + * tells how much data has been consumed (input) and produced (output) for current frame. + * able to count progression inside worker threads. + * Note : mutex will be acquired during statistics collection inside workers. */ + private static ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx_s* mtctx) + { + ZSTD_frameProgression fps; + fps.ingested = mtctx->consumed + mtctx->inBuff.filled; + fps.consumed = mtctx->consumed; + fps.produced = fps.flushed = mtctx->produced; + fps.currentJobID = mtctx->nextJobID; + fps.nbActiveWorkers = 0; + { + uint jobNb; + uint lastJobNb = mtctx->nextJobID + (uint)mtctx->jobReady; + assert(mtctx->jobReady <= 1); + for (jobNb = mtctx->doneJobID; jobNb < lastJobNb; jobNb++) + { + uint wJobID = jobNb & mtctx->jobIDMask; + ZSTDMT_jobDescription* jobPtr = &mtctx->jobs[wJobID]; + SynchronizationWrapper.Enter(&jobPtr->job_mutex); + { + nuint cResult = jobPtr->cSize; + nuint produced = ERR_isError(cResult) ? 0 : cResult; + nuint flushed = ERR_isError(cResult) ? 0 : jobPtr->dstFlushed; + assert(flushed <= produced); + fps.ingested += jobPtr->src.size; + fps.consumed += jobPtr->consumed; + fps.produced += produced; + fps.flushed += flushed; + fps.nbActiveWorkers += jobPtr->consumed < jobPtr->src.size ? 1U : 0U; + } + + SynchronizationWrapper.Exit(&mtctx->jobs[wJobID].job_mutex); + } + } + + return fps; + } + + /*! ZSTDMT_toFlushNow() + * Tell how many bytes are ready to be flushed immediately. + * Probe the oldest active job (not yet entirely flushed) and check its output buffer. + * If return 0, it means there is no active job, + * or, it means oldest job is still active, but everything produced has been flushed so far, + * therefore flushing is limited by speed of oldest job. */ + private static nuint ZSTDMT_toFlushNow(ZSTDMT_CCtx_s* mtctx) + { + nuint toFlush; + uint jobID = mtctx->doneJobID; + assert(jobID <= mtctx->nextJobID); + if (jobID == mtctx->nextJobID) + return 0; + { + uint wJobID = jobID & mtctx->jobIDMask; + ZSTDMT_jobDescription* jobPtr = &mtctx->jobs[wJobID]; + SynchronizationWrapper.Enter(&jobPtr->job_mutex); + { + nuint cResult = jobPtr->cSize; + nuint produced = ERR_isError(cResult) ? 0 : cResult; + nuint flushed = ERR_isError(cResult) ? 0 : jobPtr->dstFlushed; + assert(flushed <= produced); + assert(jobPtr->consumed <= jobPtr->src.size); + toFlush = produced - flushed; +#if DEBUG + if (toFlush == 0) + { + assert(jobPtr->consumed < jobPtr->src.size); + } +#endif + } + + SynchronizationWrapper.Exit(&mtctx->jobs[wJobID].job_mutex); + } + + return toFlush; + } + + /* ------------------------------------------ */ + /* ===== Multi-threaded compression ===== */ + /* ------------------------------------------ */ + private static uint ZSTDMT_computeTargetJobLog(ZSTD_CCtx_params_s* @params) + { + uint jobLog; + if (@params->ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) + { + jobLog = 21 > ZSTD_cycleLog(@params->cParams.chainLog, @params->cParams.strategy) + 3 ? 21 : ZSTD_cycleLog(@params->cParams.chainLog, @params->cParams.strategy) + 3; + } + else + { + jobLog = 20 > @params->cParams.windowLog + 2 ? 20 : @params->cParams.windowLog + 2; + } + + return jobLog < (uint)(MEM_32bits ? 29 : 30) ? jobLog : (uint)(MEM_32bits ? 29 : 30); + } + + private static int ZSTDMT_overlapLog_default(ZSTD_strategy strat) + { + switch (strat) + { + case ZSTD_strategy.ZSTD_btultra2: + return 9; + case ZSTD_strategy.ZSTD_btultra: + case ZSTD_strategy.ZSTD_btopt: + return 8; + case ZSTD_strategy.ZSTD_btlazy2: + case ZSTD_strategy.ZSTD_lazy2: + return 7; + case ZSTD_strategy.ZSTD_lazy: + case ZSTD_strategy.ZSTD_greedy: + case ZSTD_strategy.ZSTD_dfast: + case ZSTD_strategy.ZSTD_fast: + default: + break; + } + + return 6; + } + + private static int ZSTDMT_overlapLog(int ovlog, ZSTD_strategy strat) + { + assert(0 <= ovlog && ovlog <= 9); + if (ovlog == 0) + return ZSTDMT_overlapLog_default(strat); + return ovlog; + } + + private static nuint ZSTDMT_computeOverlapSize(ZSTD_CCtx_params_s* @params) + { + int overlapRLog = 9 - ZSTDMT_overlapLog(@params->overlapLog, @params->cParams.strategy); + int ovLog = (int)(overlapRLog >= 8 ? 0 : @params->cParams.windowLog - (uint)overlapRLog); + assert(0 <= overlapRLog && overlapRLog <= 8); + if (@params->ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) + { + ovLog = (int)((@params->cParams.windowLog < ZSTDMT_computeTargetJobLog(@params) - 2 ? @params->cParams.windowLog : ZSTDMT_computeTargetJobLog(@params) - 2) - (uint)overlapRLog); + } + + assert(0 <= ovLog && ovLog <= (sizeof(nuint) == 4 ? 30 : 31)); + return ovLog == 0 ? 0 : (nuint)1 << ovLog; + } + + /* ====================================== */ + /* ======= Streaming API ======= */ + /* ====================================== */ + private static nuint ZSTDMT_initCStream_internal(ZSTDMT_CCtx_s* mtctx, void* dict, nuint dictSize, ZSTD_dictContentType_e dictContentType, ZSTD_CDict_s* cdict, ZSTD_CCtx_params_s @params, ulong pledgedSrcSize) + { + assert(!ERR_isError(ZSTD_checkCParams(@params.cParams))); + assert(!(dict != null && cdict != null)); + if (@params.nbWorkers != mtctx->@params.nbWorkers) + { + /* init */ + nuint err_code = ZSTDMT_resize(mtctx, (uint)@params.nbWorkers); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (@params.jobSize != 0 && @params.jobSize < 512 * (1 << 10)) + @params.jobSize = 512 * (1 << 10); + if (@params.jobSize > (nuint)(MEM_32bits ? 512 * (1 << 20) : 1024 * (1 << 20))) + @params.jobSize = (nuint)(MEM_32bits ? 512 * (1 << 20) : 1024 * (1 << 20)); + if (mtctx->allJobsCompleted == 0) + { + ZSTDMT_waitForAllJobsCompleted(mtctx); + ZSTDMT_releaseAllJobResources(mtctx); + mtctx->allJobsCompleted = 1; + } + + mtctx->@params = @params; + mtctx->frameContentSize = pledgedSrcSize; + ZSTD_freeCDict(mtctx->cdictLocal); + if (dict != null) + { + mtctx->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, ZSTD_dictLoadMethod_e.ZSTD_dlm_byCopy, dictContentType, @params.cParams, mtctx->cMem); + mtctx->cdict = mtctx->cdictLocal; + if (mtctx->cdictLocal == null) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } + else + { + mtctx->cdictLocal = null; + mtctx->cdict = cdict; + } + + mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(&@params); + mtctx->targetSectionSize = @params.jobSize; + if (mtctx->targetSectionSize == 0) + { + mtctx->targetSectionSize = (nuint)(1UL << (int)ZSTDMT_computeTargetJobLog(&@params)); + } + + assert(mtctx->targetSectionSize <= (nuint)(MEM_32bits ? 512 * (1 << 20) : 1024 * (1 << 20))); + if (@params.rsyncable != 0) + { + /* Aim for the targetsectionSize as the average job size. */ + uint jobSizeKB = (uint)(mtctx->targetSectionSize >> 10); + assert(jobSizeKB >= 1); + uint rsyncBits = ZSTD_highbit32(jobSizeKB) + 10; + assert(rsyncBits >= 17 + 2); + mtctx->rsync.hash = 0; + mtctx->rsync.hitMask = (1UL << (int)rsyncBits) - 1; + mtctx->rsync.primePower = ZSTD_rollingHash_primePower(32); + } + + if (mtctx->targetSectionSize < mtctx->targetPrefixSize) + mtctx->targetSectionSize = mtctx->targetPrefixSize; + ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(mtctx->targetSectionSize)); + { + /* If ldm is enabled we need windowSize space. */ + nuint windowSize = mtctx->@params.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable ? 1U << (int)mtctx->@params.cParams.windowLog : 0; + /* Two buffers of slack, plus extra space for the overlap + * This is the minimum slack that LDM works with. One extra because + * flush might waste up to targetSectionSize-1 bytes. Another extra + * for the overlap (if > 0), then one to fill which doesn't overlap + * with the LDM window. + */ + nuint nbSlackBuffers = (nuint)(2 + (mtctx->targetPrefixSize > 0 ? 1 : 0)); + nuint slackSize = mtctx->targetSectionSize * nbSlackBuffers; + /* Compute the total size, and always have enough slack */ + nuint nbWorkers = (nuint)(mtctx->@params.nbWorkers > 1 ? mtctx->@params.nbWorkers : 1); + nuint sectionsSize = mtctx->targetSectionSize * nbWorkers; + nuint capacity = (windowSize > sectionsSize ? windowSize : sectionsSize) + slackSize; + if (mtctx->roundBuff.capacity < capacity) + { + if (mtctx->roundBuff.buffer != null) + ZSTD_customFree(mtctx->roundBuff.buffer, mtctx->cMem); + mtctx->roundBuff.buffer = (byte*)ZSTD_customMalloc(capacity, mtctx->cMem); + if (mtctx->roundBuff.buffer == null) + { + mtctx->roundBuff.capacity = 0; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } + + mtctx->roundBuff.capacity = capacity; + } + } + + mtctx->roundBuff.pos = 0; + mtctx->inBuff.buffer = g_nullBuffer; + mtctx->inBuff.filled = 0; + mtctx->inBuff.prefix = kNullRange; + mtctx->doneJobID = 0; + mtctx->nextJobID = 0; + mtctx->frameEnded = 0; + mtctx->allJobsCompleted = 0; + mtctx->consumed = 0; + mtctx->produced = 0; + ZSTD_freeCDict(mtctx->cdictLocal); + mtctx->cdictLocal = null; + mtctx->cdict = null; + if (dict != null) + { + if (dictContentType == ZSTD_dictContentType_e.ZSTD_dct_rawContent) + { + mtctx->inBuff.prefix.start = (byte*)dict; + mtctx->inBuff.prefix.size = dictSize; + } + else + { + mtctx->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, dictContentType, @params.cParams, mtctx->cMem); + mtctx->cdict = mtctx->cdictLocal; + if (mtctx->cdictLocal == null) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } + } + else + { + mtctx->cdict = cdict; + } + + if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, @params, mtctx->targetSectionSize, dict, dictSize, dictContentType) != 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + return 0; + } + + /* ZSTDMT_writeLastEmptyBlock() + * Write a single empty block with an end-of-frame to finish a frame. + * Job must be created from streaming variant. + * This function is always successful if expected conditions are fulfilled. + */ + private static void ZSTDMT_writeLastEmptyBlock(ZSTDMT_jobDescription* job) + { + assert(job->lastJob == 1); + assert(job->src.size == 0); + assert(job->firstJob == 0); + assert(job->dstBuff.start == null); + job->dstBuff = ZSTDMT_getBuffer(job->bufPool); + if (job->dstBuff.start == null) + { + job->cSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + return; + } + + assert(job->dstBuff.capacity >= ZSTD_blockHeaderSize); + job->src = kNullRange; + job->cSize = ZSTD_writeLastEmptyBlock(job->dstBuff.start, job->dstBuff.capacity); + assert(!ERR_isError(job->cSize)); + assert(job->consumed == 0); + } + + private static nuint ZSTDMT_createCompressionJob(ZSTDMT_CCtx_s* mtctx, nuint srcSize, ZSTD_EndDirective endOp) + { + uint jobID = mtctx->nextJobID & mtctx->jobIDMask; + int endFrame = endOp == ZSTD_EndDirective.ZSTD_e_end ? 1 : 0; + if (mtctx->nextJobID > mtctx->doneJobID + mtctx->jobIDMask) + { + assert((mtctx->nextJobID & mtctx->jobIDMask) == (mtctx->doneJobID & mtctx->jobIDMask)); + return 0; + } + + if (mtctx->jobReady == 0) + { + byte* src = (byte*)mtctx->inBuff.buffer.start; + mtctx->jobs[jobID].src.start = src; + mtctx->jobs[jobID].src.size = srcSize; + assert(mtctx->inBuff.filled >= srcSize); + mtctx->jobs[jobID].prefix = mtctx->inBuff.prefix; + mtctx->jobs[jobID].consumed = 0; + mtctx->jobs[jobID].cSize = 0; + mtctx->jobs[jobID].@params = mtctx->@params; + mtctx->jobs[jobID].cdict = mtctx->nextJobID == 0 ? mtctx->cdict : null; + mtctx->jobs[jobID].fullFrameSize = mtctx->frameContentSize; + mtctx->jobs[jobID].dstBuff = g_nullBuffer; + mtctx->jobs[jobID].cctxPool = mtctx->cctxPool; + mtctx->jobs[jobID].bufPool = mtctx->bufPool; + mtctx->jobs[jobID].seqPool = mtctx->seqPool; + mtctx->jobs[jobID].serial = &mtctx->serial; + mtctx->jobs[jobID].jobID = mtctx->nextJobID; + mtctx->jobs[jobID].firstJob = mtctx->nextJobID == 0 ? 1U : 0U; + mtctx->jobs[jobID].lastJob = (uint)endFrame; + mtctx->jobs[jobID].frameChecksumNeeded = mtctx->@params.fParams.checksumFlag != 0 && endFrame != 0 && mtctx->nextJobID > 0 ? 1U : 0U; + mtctx->jobs[jobID].dstFlushed = 0; + mtctx->roundBuff.pos += srcSize; + mtctx->inBuff.buffer = g_nullBuffer; + mtctx->inBuff.filled = 0; + if (endFrame == 0) + { + nuint newPrefixSize = srcSize < mtctx->targetPrefixSize ? srcSize : mtctx->targetPrefixSize; + mtctx->inBuff.prefix.start = src + srcSize - newPrefixSize; + mtctx->inBuff.prefix.size = newPrefixSize; + } + else + { + mtctx->inBuff.prefix = kNullRange; + mtctx->frameEnded = (uint)endFrame; + if (mtctx->nextJobID == 0) + { + mtctx->@params.fParams.checksumFlag = 0; + } + } + + if (srcSize == 0 && mtctx->nextJobID > 0) + { + assert(endOp == ZSTD_EndDirective.ZSTD_e_end); + ZSTDMT_writeLastEmptyBlock(mtctx->jobs + jobID); + mtctx->nextJobID++; + return 0; + } + } + + if (POOL_tryAdd(mtctx->factory, (delegate* managed)(&ZSTDMT_compressionJob), &mtctx->jobs[jobID]) != 0) + { + mtctx->nextJobID++; + mtctx->jobReady = 0; + } + else + { + mtctx->jobReady = 1; + } + + return 0; + } + + /*! ZSTDMT_flushProduced() : + * flush whatever data has been produced but not yet flushed in current job. + * move to next job if current one is fully flushed. + * `output` : `pos` will be updated with amount of data flushed . + * `blockToFlush` : if >0, the function will block and wait if there is no data available to flush . + * @return : amount of data remaining within internal buffer, 0 if no more, 1 if unknown but > 0, or an error code */ + private static nuint ZSTDMT_flushProduced(ZSTDMT_CCtx_s* mtctx, ZSTD_outBuffer_s* output, uint blockToFlush, ZSTD_EndDirective end) + { + uint wJobID = mtctx->doneJobID & mtctx->jobIDMask; + assert(output->size >= output->pos); + SynchronizationWrapper.Enter(&mtctx->jobs[wJobID].job_mutex); + if (blockToFlush != 0 && mtctx->doneJobID < mtctx->nextJobID) + { + assert(mtctx->jobs[wJobID].dstFlushed <= mtctx->jobs[wJobID].cSize); + while (mtctx->jobs[wJobID].dstFlushed == mtctx->jobs[wJobID].cSize) + { + if (mtctx->jobs[wJobID].consumed == mtctx->jobs[wJobID].src.size) + { + break; + } + + SynchronizationWrapper.Wait(&mtctx->jobs[wJobID].job_mutex); + } + } + + { + /* shared */ + nuint cSize = mtctx->jobs[wJobID].cSize; + /* shared */ + nuint srcConsumed = mtctx->jobs[wJobID].consumed; + /* read-only, could be done after mutex lock, but no-declaration-after-statement */ + nuint srcSize = mtctx->jobs[wJobID].src.size; + SynchronizationWrapper.Exit(&mtctx->jobs[wJobID].job_mutex); + if (ERR_isError(cSize)) + { + ZSTDMT_waitForAllJobsCompleted(mtctx); + ZSTDMT_releaseAllJobResources(mtctx); + return cSize; + } + + assert(srcConsumed <= srcSize); + if (srcConsumed == srcSize && mtctx->jobs[wJobID].frameChecksumNeeded != 0) + { + uint checksum = (uint)ZSTD_XXH64_digest(&mtctx->serial.xxhState); + MEM_writeLE32((sbyte*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].cSize, checksum); + cSize += 4; + mtctx->jobs[wJobID].cSize += 4; + mtctx->jobs[wJobID].frameChecksumNeeded = 0; + } + + if (cSize > 0) + { + nuint toFlush = cSize - mtctx->jobs[wJobID].dstFlushed < output->size - output->pos ? cSize - mtctx->jobs[wJobID].dstFlushed : output->size - output->pos; + assert(mtctx->doneJobID < mtctx->nextJobID); + assert(cSize >= mtctx->jobs[wJobID].dstFlushed); + assert(mtctx->jobs[wJobID].dstBuff.start != null); + if (toFlush > 0) + { + memcpy((sbyte*)output->dst + output->pos, (sbyte*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].dstFlushed, (uint)toFlush); + } + + output->pos += toFlush; + mtctx->jobs[wJobID].dstFlushed += toFlush; + if (srcConsumed == srcSize && mtctx->jobs[wJobID].dstFlushed == cSize) + { + ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[wJobID].dstBuff); + mtctx->jobs[wJobID].dstBuff = g_nullBuffer; + mtctx->jobs[wJobID].cSize = 0; + mtctx->consumed += srcSize; + mtctx->produced += cSize; + mtctx->doneJobID++; + } + } + + if (cSize > mtctx->jobs[wJobID].dstFlushed) + return cSize - mtctx->jobs[wJobID].dstFlushed; + if (srcSize > srcConsumed) + return 1; + } + + if (mtctx->doneJobID < mtctx->nextJobID) + return 1; + if (mtctx->jobReady != 0) + return 1; + if (mtctx->inBuff.filled > 0) + return 1; + mtctx->allJobsCompleted = mtctx->frameEnded; + if (end == ZSTD_EndDirective.ZSTD_e_end) + return mtctx->frameEnded == 0 ? 1U : 0U; + return 0; + } + + /** + * Returns the range of data used by the earliest job that is not yet complete. + * If the data of the first job is broken up into two segments, we cover both + * sections. + */ + private static Range ZSTDMT_getInputDataInUse(ZSTDMT_CCtx_s* mtctx) + { + uint firstJobID = mtctx->doneJobID; + uint lastJobID = mtctx->nextJobID; + uint jobID; + /* no need to check during first round */ + nuint roundBuffCapacity = mtctx->roundBuff.capacity; + nuint nbJobs1stRoundMin = roundBuffCapacity / mtctx->targetSectionSize; + if (lastJobID < nbJobs1stRoundMin) + return kNullRange; + for (jobID = firstJobID; jobID < lastJobID; ++jobID) + { + uint wJobID = jobID & mtctx->jobIDMask; + nuint consumed; + SynchronizationWrapper.Enter(&mtctx->jobs[wJobID].job_mutex); + consumed = mtctx->jobs[wJobID].consumed; + SynchronizationWrapper.Exit(&mtctx->jobs[wJobID].job_mutex); + if (consumed < mtctx->jobs[wJobID].src.size) + { + Range range = mtctx->jobs[wJobID].prefix; + if (range.size == 0) + { + range = mtctx->jobs[wJobID].src; + } + + assert(range.start <= mtctx->jobs[wJobID].src.start); + return range; + } + } + + return kNullRange; + } + + /** + * Returns non-zero iff buffer and range overlap. + */ + private static int ZSTDMT_isOverlapped(buffer_s buffer, Range range) + { + byte* bufferStart = (byte*)buffer.start; + byte* rangeStart = (byte*)range.start; + if (rangeStart == null || bufferStart == null) + return 0; + { + byte* bufferEnd = bufferStart + buffer.capacity; + byte* rangeEnd = rangeStart + range.size; + if (bufferStart == bufferEnd || rangeStart == rangeEnd) + return 0; + return bufferStart < rangeEnd && rangeStart < bufferEnd ? 1 : 0; + } + } + + private static int ZSTDMT_doesOverlapWindow(buffer_s buffer, ZSTD_window_t window) + { + Range extDict; + Range prefix; + extDict.start = window.dictBase + window.lowLimit; + extDict.size = window.dictLimit - window.lowLimit; + prefix.start = window.@base + window.dictLimit; + prefix.size = (nuint)(window.nextSrc - (window.@base + window.dictLimit)); + return ZSTDMT_isOverlapped(buffer, extDict) != 0 || ZSTDMT_isOverlapped(buffer, prefix) != 0 ? 1 : 0; + } + + private static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx_s* mtctx, buffer_s buffer) + { + if (mtctx->@params.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) + { + void** mutex = &mtctx->serial.ldmWindowMutex; + SynchronizationWrapper.Enter(mutex); + while (ZSTDMT_doesOverlapWindow(buffer, mtctx->serial.ldmWindow) != 0) + { + SynchronizationWrapper.Wait(mutex); + } + + SynchronizationWrapper.Exit(mutex); + } + } + + /** + * Attempts to set the inBuff to the next section to fill. + * If any part of the new section is still in use we give up. + * Returns non-zero if the buffer is filled. + */ + private static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx_s* mtctx) + { + Range inUse = ZSTDMT_getInputDataInUse(mtctx); + nuint spaceLeft = mtctx->roundBuff.capacity - mtctx->roundBuff.pos; + nuint spaceNeeded = mtctx->targetSectionSize; + buffer_s buffer; + assert(mtctx->inBuff.buffer.start == null); + assert(mtctx->roundBuff.capacity >= spaceNeeded); + if (spaceLeft < spaceNeeded) + { + /* ZSTD_invalidateRepCodes() doesn't work for extDict variants. + * Simply copy the prefix to the beginning in that case. + */ + byte* start = mtctx->roundBuff.buffer; + nuint prefixSize = mtctx->inBuff.prefix.size; + buffer.start = start; + buffer.capacity = prefixSize; + if (ZSTDMT_isOverlapped(buffer, inUse) != 0) + { + return 0; + } + + ZSTDMT_waitForLdmComplete(mtctx, buffer); + memmove(start, mtctx->inBuff.prefix.start, prefixSize); + mtctx->inBuff.prefix.start = start; + mtctx->roundBuff.pos = prefixSize; + } + + buffer.start = mtctx->roundBuff.buffer + mtctx->roundBuff.pos; + buffer.capacity = spaceNeeded; + if (ZSTDMT_isOverlapped(buffer, inUse) != 0) + { + return 0; + } + + assert(ZSTDMT_isOverlapped(buffer, mtctx->inBuff.prefix) == 0); + ZSTDMT_waitForLdmComplete(mtctx, buffer); + mtctx->inBuff.buffer = buffer; + mtctx->inBuff.filled = 0; + assert(mtctx->roundBuff.pos + buffer.capacity <= mtctx->roundBuff.capacity); + return 1; + } + + /** + * Searches through the input for a synchronization point. If one is found, we + * will instruct the caller to flush, and return the number of bytes to load. + * Otherwise, we will load as many bytes as possible and instruct the caller + * to continue as normal. + */ + private static SyncPoint findSynchronizationPoint(ZSTDMT_CCtx_s* mtctx, ZSTD_inBuffer_s input) + { + byte* istart = (byte*)input.src + input.pos; + ulong primePower = mtctx->rsync.primePower; + ulong hitMask = mtctx->rsync.hitMask; + SyncPoint syncPoint; + ulong hash; + byte* prev; + nuint pos; + syncPoint.toLoad = input.size - input.pos < mtctx->targetSectionSize - mtctx->inBuff.filled ? input.size - input.pos : mtctx->targetSectionSize - mtctx->inBuff.filled; + syncPoint.flush = 0; + if (mtctx->@params.rsyncable == 0) + return syncPoint; + if (mtctx->inBuff.filled + input.size - input.pos < 1 << 17) + return syncPoint; + if (mtctx->inBuff.filled + syncPoint.toLoad < 32) + return syncPoint; + if (mtctx->inBuff.filled < 1 << 17) + { + pos = (1 << 17) - mtctx->inBuff.filled; + if (pos >= 32) + { + prev = istart + pos - 32; + hash = ZSTD_rollingHash_compute(prev, 32); + } + else + { + assert(mtctx->inBuff.filled >= 32); + prev = (byte*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - 32; + hash = ZSTD_rollingHash_compute(prev + pos, 32 - pos); + hash = ZSTD_rollingHash_append(hash, istart, pos); + } + } + else + { + assert(mtctx->inBuff.filled >= 1 << 17); + assert(1 << 17 >= 32); + pos = 0; + prev = (byte*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - 32; + hash = ZSTD_rollingHash_compute(prev, 32); + if ((hash & hitMask) == hitMask) + { + syncPoint.toLoad = 0; + syncPoint.flush = 1; + return syncPoint; + } + } + + assert(pos < 32 || ZSTD_rollingHash_compute(istart + pos - 32, 32) == hash); + for (; pos < syncPoint.toLoad; ++pos) + { + byte toRemove = pos < 32 ? prev[pos] : istart[pos - 32]; + hash = ZSTD_rollingHash_rotate(hash, toRemove, istart[pos], primePower); + assert(mtctx->inBuff.filled + pos >= 1 << 17); + if ((hash & hitMask) == hitMask) + { + syncPoint.toLoad = pos + 1; + syncPoint.flush = 1; + ++pos; + break; + } + } + + assert(pos < 32 || ZSTD_rollingHash_compute(istart + pos - 32, 32) == hash); + return syncPoint; + } + + /* === Streaming functions === */ + private static nuint ZSTDMT_nextInputSizeHint(ZSTDMT_CCtx_s* mtctx) + { + nuint hintInSize = mtctx->targetSectionSize - mtctx->inBuff.filled; + if (hintInSize == 0) + hintInSize = mtctx->targetSectionSize; + return hintInSize; + } + + /** ZSTDMT_compressStream_generic() : + * internal use only - exposed to be invoked from zstd_compress.c + * assumption : output and input are valid (pos <= size) + * @return : minimum amount of data remaining to flush, 0 if none */ + private static nuint ZSTDMT_compressStream_generic(ZSTDMT_CCtx_s* mtctx, ZSTD_outBuffer_s* output, ZSTD_inBuffer_s* input, ZSTD_EndDirective endOp) + { + uint forwardInputProgress = 0; + assert(output->pos <= output->size); + assert(input->pos <= input->size); + if (mtctx->frameEnded != 0 && endOp == ZSTD_EndDirective.ZSTD_e_continue) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); + } + + if (mtctx->jobReady == 0 && input->size > input->pos) + { + if (mtctx->inBuff.buffer.start == null) + { + assert(mtctx->inBuff.filled == 0); + if (ZSTDMT_tryGetInputRange(mtctx) == 0) + { + assert(mtctx->doneJobID != mtctx->nextJobID); + } + } + + if (mtctx->inBuff.buffer.start != null) + { + SyncPoint syncPoint = findSynchronizationPoint(mtctx, *input); + if (syncPoint.flush != 0 && endOp == ZSTD_EndDirective.ZSTD_e_continue) + { + endOp = ZSTD_EndDirective.ZSTD_e_flush; + } + + assert(mtctx->inBuff.buffer.capacity >= mtctx->targetSectionSize); + memcpy((sbyte*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (sbyte*)input->src + input->pos, (uint)syncPoint.toLoad); + input->pos += syncPoint.toLoad; + mtctx->inBuff.filled += syncPoint.toLoad; + forwardInputProgress = syncPoint.toLoad > 0 ? 1U : 0U; + } + } + + if (input->pos < input->size && endOp == ZSTD_EndDirective.ZSTD_e_end) + { + assert(mtctx->inBuff.filled == 0 || mtctx->inBuff.filled == mtctx->targetSectionSize || mtctx->@params.rsyncable != 0); + endOp = ZSTD_EndDirective.ZSTD_e_flush; + } + + if (mtctx->jobReady != 0 || mtctx->inBuff.filled >= mtctx->targetSectionSize || endOp != ZSTD_EndDirective.ZSTD_e_continue && mtctx->inBuff.filled > 0 || endOp == ZSTD_EndDirective.ZSTD_e_end && mtctx->frameEnded == 0) + { + nuint jobSize = mtctx->inBuff.filled; + assert(mtctx->inBuff.filled <= mtctx->targetSectionSize); + { + nuint err_code = ZSTDMT_createCompressionJob(mtctx, jobSize, endOp); + if (ERR_isError(err_code)) + { + return err_code; + } + } + } + + { + /* block if there was no forward input progress */ + nuint remainingToFlush = ZSTDMT_flushProduced(mtctx, output, forwardInputProgress == 0 ? 1U : 0U, endOp); + if (input->pos < input->size) + return remainingToFlush > 1 ? remainingToFlush : 1; + return remainingToFlush; + } + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/_wksps_e__Union.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/_wksps_e__Union.cs new file mode 100644 index 000000000..d13d9b12b --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/_wksps_e__Union.cs @@ -0,0 +1,15 @@ +using System.Runtime.InteropServices; + +namespace ZstdSharp.Unsafe +{ + [StructLayout(LayoutKind.Explicit)] + public unsafe struct _wksps_e__Union + { + [FieldOffset(0)] + public HUF_buildCTable_wksp_tables buildCTable_wksp; + [FieldOffset(0)] + public HUF_WriteCTableWksp writeCTable_wksp; + [FieldOffset(0)] + public fixed uint hist_wksp[1024]; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/algo_time_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/algo_time_t.cs new file mode 100644 index 000000000..e54af2070 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/algo_time_t.cs @@ -0,0 +1,13 @@ +namespace ZstdSharp.Unsafe +{ + public struct algo_time_t + { + public uint tableTime; + public uint decode256Time; + public algo_time_t(uint tableTime, uint decode256Time) + { + this.tableTime = tableTime; + this.decode256Time = decode256Time; + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/base_directive_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/base_directive_e.cs new file mode 100644 index 000000000..a77fea3bd --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/base_directive_e.cs @@ -0,0 +1,8 @@ +namespace ZstdSharp.Unsafe +{ + public enum base_directive_e + { + base_0possible = 0, + base_1guaranteed = 1 + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/blockProperties_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/blockProperties_t.cs new file mode 100644 index 000000000..058ddffd2 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/blockProperties_t.cs @@ -0,0 +1,9 @@ +namespace ZstdSharp.Unsafe +{ + public struct blockProperties_t + { + public blockType_e blockType; + public uint lastBlock; + public uint origSize; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/blockType_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/blockType_e.cs new file mode 100644 index 000000000..baa27390a --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/blockType_e.cs @@ -0,0 +1,10 @@ +namespace ZstdSharp.Unsafe +{ + public enum blockType_e + { + bt_raw, + bt_rle, + bt_compressed, + bt_reserved + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/buffer_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/buffer_s.cs new file mode 100644 index 000000000..093a0b8d2 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/buffer_s.cs @@ -0,0 +1,15 @@ +namespace ZstdSharp.Unsafe +{ + /* ===== Buffer Pool ===== */ + /* a single Buffer Pool can be invoked from multiple threads in parallel */ + public unsafe struct buffer_s + { + public void* start; + public nuint capacity; + public buffer_s(void* start, nuint capacity) + { + this.start = start; + this.capacity = capacity; + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/dictItem.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/dictItem.cs new file mode 100644 index 000000000..a4b1abb30 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/dictItem.cs @@ -0,0 +1,9 @@ +namespace ZstdSharp.Unsafe +{ + public struct dictItem + { + public uint pos; + public uint length; + public uint savings; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/inBuff_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/inBuff_t.cs new file mode 100644 index 000000000..8fc651685 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/inBuff_t.cs @@ -0,0 +1,13 @@ +namespace ZstdSharp.Unsafe +{ + /* ------------------------------------------ */ + /* ===== Multi-threaded compression ===== */ + /* ------------------------------------------ */ + public struct InBuff_t + { + /* read-only non-owned prefix buffer */ + public Range prefix; + public buffer_s buffer; + public nuint filled; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmEntry_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmEntry_t.cs new file mode 100644 index 000000000..391fa98ec --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmEntry_t.cs @@ -0,0 +1,8 @@ +namespace ZstdSharp.Unsafe +{ + public struct ldmEntry_t + { + public uint offset; + public uint checksum; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmMatchCandidate_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmMatchCandidate_t.cs new file mode 100644 index 000000000..f3e41eddb --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmMatchCandidate_t.cs @@ -0,0 +1,10 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct ldmMatchCandidate_t + { + public byte* split; + public uint hash; + public uint checksum; + public ldmEntry_t* bucket; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmParams_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmParams_t.cs new file mode 100644 index 000000000..253e3bfee --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmParams_t.cs @@ -0,0 +1,18 @@ +namespace ZstdSharp.Unsafe +{ + public struct ldmParams_t + { + /* ZSTD_ps_enable to enable LDM. ZSTD_ps_auto by default */ + public ZSTD_paramSwitch_e enableLdm; + /* Log size of hashTable */ + public uint hashLog; + /* Log bucket size for collision resolution, at most 8 */ + public uint bucketSizeLog; + /* Minimum match length */ + public uint minMatchLength; + /* Log number of entries to skip */ + public uint hashRateLog; + /* Window log for the LDM */ + public uint windowLog; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmRollingHashState_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmRollingHashState_t.cs new file mode 100644 index 000000000..810380075 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmRollingHashState_t.cs @@ -0,0 +1,8 @@ +namespace ZstdSharp.Unsafe +{ + public struct ldmRollingHashState_t + { + public ulong rolling; + public ulong stopMask; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmState_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmState_t.cs new file mode 100644 index 000000000..a02441936 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmState_t.cs @@ -0,0 +1,169 @@ +using System.Runtime.CompilerServices; + +namespace ZstdSharp.Unsafe +{ + public unsafe struct ldmState_t + { + /* State for the window round buffer management */ + public ZSTD_window_t window; + public ldmEntry_t* hashTable; + public uint loadedDictEnd; + /* Next position in bucket to insert entry */ + public byte* bucketOffsets; + public _splitIndices_e__FixedBuffer splitIndices; + public _matchCandidates_e__FixedBuffer matchCandidates; +#if NET8_0_OR_GREATER + [InlineArray(64)] + public unsafe struct _splitIndices_e__FixedBuffer + { + public nuint e0; + } + +#else + public unsafe struct _splitIndices_e__FixedBuffer + { + public nuint e0; + public nuint e1; + public nuint e2; + public nuint e3; + public nuint e4; + public nuint e5; + public nuint e6; + public nuint e7; + public nuint e8; + public nuint e9; + public nuint e10; + public nuint e11; + public nuint e12; + public nuint e13; + public nuint e14; + public nuint e15; + public nuint e16; + public nuint e17; + public nuint e18; + public nuint e19; + public nuint e20; + public nuint e21; + public nuint e22; + public nuint e23; + public nuint e24; + public nuint e25; + public nuint e26; + public nuint e27; + public nuint e28; + public nuint e29; + public nuint e30; + public nuint e31; + public nuint e32; + public nuint e33; + public nuint e34; + public nuint e35; + public nuint e36; + public nuint e37; + public nuint e38; + public nuint e39; + public nuint e40; + public nuint e41; + public nuint e42; + public nuint e43; + public nuint e44; + public nuint e45; + public nuint e46; + public nuint e47; + public nuint e48; + public nuint e49; + public nuint e50; + public nuint e51; + public nuint e52; + public nuint e53; + public nuint e54; + public nuint e55; + public nuint e56; + public nuint e57; + public nuint e58; + public nuint e59; + public nuint e60; + public nuint e61; + public nuint e62; + public nuint e63; + } +#endif + +#if NET8_0_OR_GREATER + [InlineArray(64)] + public unsafe struct _matchCandidates_e__FixedBuffer + { + public ldmMatchCandidate_t e0; + } + +#else + public unsafe struct _matchCandidates_e__FixedBuffer + { + public ldmMatchCandidate_t e0; + public ldmMatchCandidate_t e1; + public ldmMatchCandidate_t e2; + public ldmMatchCandidate_t e3; + public ldmMatchCandidate_t e4; + public ldmMatchCandidate_t e5; + public ldmMatchCandidate_t e6; + public ldmMatchCandidate_t e7; + public ldmMatchCandidate_t e8; + public ldmMatchCandidate_t e9; + public ldmMatchCandidate_t e10; + public ldmMatchCandidate_t e11; + public ldmMatchCandidate_t e12; + public ldmMatchCandidate_t e13; + public ldmMatchCandidate_t e14; + public ldmMatchCandidate_t e15; + public ldmMatchCandidate_t e16; + public ldmMatchCandidate_t e17; + public ldmMatchCandidate_t e18; + public ldmMatchCandidate_t e19; + public ldmMatchCandidate_t e20; + public ldmMatchCandidate_t e21; + public ldmMatchCandidate_t e22; + public ldmMatchCandidate_t e23; + public ldmMatchCandidate_t e24; + public ldmMatchCandidate_t e25; + public ldmMatchCandidate_t e26; + public ldmMatchCandidate_t e27; + public ldmMatchCandidate_t e28; + public ldmMatchCandidate_t e29; + public ldmMatchCandidate_t e30; + public ldmMatchCandidate_t e31; + public ldmMatchCandidate_t e32; + public ldmMatchCandidate_t e33; + public ldmMatchCandidate_t e34; + public ldmMatchCandidate_t e35; + public ldmMatchCandidate_t e36; + public ldmMatchCandidate_t e37; + public ldmMatchCandidate_t e38; + public ldmMatchCandidate_t e39; + public ldmMatchCandidate_t e40; + public ldmMatchCandidate_t e41; + public ldmMatchCandidate_t e42; + public ldmMatchCandidate_t e43; + public ldmMatchCandidate_t e44; + public ldmMatchCandidate_t e45; + public ldmMatchCandidate_t e46; + public ldmMatchCandidate_t e47; + public ldmMatchCandidate_t e48; + public ldmMatchCandidate_t e49; + public ldmMatchCandidate_t e50; + public ldmMatchCandidate_t e51; + public ldmMatchCandidate_t e52; + public ldmMatchCandidate_t e53; + public ldmMatchCandidate_t e54; + public ldmMatchCandidate_t e55; + public ldmMatchCandidate_t e56; + public ldmMatchCandidate_t e57; + public ldmMatchCandidate_t e58; + public ldmMatchCandidate_t e59; + public ldmMatchCandidate_t e60; + public ldmMatchCandidate_t e61; + public ldmMatchCandidate_t e62; + public ldmMatchCandidate_t e63; + } +#endif + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/nodeElt_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/nodeElt_s.cs new file mode 100644 index 000000000..3e30dffc7 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/nodeElt_s.cs @@ -0,0 +1,13 @@ +namespace ZstdSharp.Unsafe +{ + /* ************************************************************** + * Required declarations + ****************************************************************/ + public struct nodeElt_s + { + public uint count; + public ushort parent; + public byte @byte; + public byte nbBits; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/offsetCount_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/offsetCount_t.cs new file mode 100644 index 000000000..7c652776f --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/offsetCount_t.cs @@ -0,0 +1,8 @@ +namespace ZstdSharp.Unsafe +{ + public struct offsetCount_t + { + public uint offset; + public uint count; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/optState_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/optState_t.cs new file mode 100644 index 000000000..5f08de1a0 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/optState_t.cs @@ -0,0 +1,39 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct optState_t + { + /* table of literals statistics, of size 256 */ + public uint* litFreq; + /* table of litLength statistics, of size (MaxLL+1) */ + public uint* litLengthFreq; + /* table of matchLength statistics, of size (MaxML+1) */ + public uint* matchLengthFreq; + /* table of offCode statistics, of size (MaxOff+1) */ + public uint* offCodeFreq; + /* list of found matches, of size ZSTD_OPT_SIZE */ + public ZSTD_match_t* matchTable; + /* All positions tracked by optimal parser, of size ZSTD_OPT_SIZE */ + public ZSTD_optimal_t* priceTable; + /* nb of literals */ + public uint litSum; + /* nb of litLength codes */ + public uint litLengthSum; + /* nb of matchLength codes */ + public uint matchLengthSum; + /* nb of offset codes */ + public uint offCodeSum; + /* to compare to log2(litfreq) */ + public uint litSumBasePrice; + /* to compare to log2(llfreq) */ + public uint litLengthSumBasePrice; + /* to compare to log2(mlfreq) */ + public uint matchLengthSumBasePrice; + /* to compare to log2(offreq) */ + public uint offCodeSumBasePrice; + /* prices can be determined dynamically, or follow a pre-defined cost structure */ + public ZSTD_OptPrice_e priceType; + /* pre-calculated dictionary statistics */ + public ZSTD_entropyCTables_t* symbolCosts; + public ZSTD_paramSwitch_e literalCompressionMode; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/rankPos.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/rankPos.cs new file mode 100644 index 000000000..f78f027d2 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/rankPos.cs @@ -0,0 +1,8 @@ +namespace ZstdSharp.Unsafe +{ + public struct rankPos + { + public ushort @base; + public ushort curr; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/rankValCol_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/rankValCol_t.cs new file mode 100644 index 000000000..d6c4fb5d7 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/rankValCol_t.cs @@ -0,0 +1,7 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct rankValCol_t + { + public fixed uint Body[13]; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/rawSeq.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/rawSeq.cs new file mode 100644 index 000000000..adefb2625 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/rawSeq.cs @@ -0,0 +1,12 @@ +namespace ZstdSharp.Unsafe +{ + public struct rawSeq + { + /* Offset of sequence */ + public uint offset; + /* Length of literals prior to match */ + public uint litLength; + /* Raw length of match */ + public uint matchLength; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/repcodes_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/repcodes_s.cs new file mode 100644 index 000000000..c63ef35b3 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/repcodes_s.cs @@ -0,0 +1,7 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct repcodes_s + { + public fixed uint rep[3]; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/searchMethod_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/searchMethod_e.cs new file mode 100644 index 000000000..fb38e0f68 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/searchMethod_e.cs @@ -0,0 +1,9 @@ +namespace ZstdSharp.Unsafe +{ + public enum searchMethod_e + { + search_hashChain = 0, + search_binaryTree = 1, + search_rowHash = 2 + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/seqState_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/seqState_t.cs new file mode 100644 index 000000000..2743a146f --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/seqState_t.cs @@ -0,0 +1,17 @@ +namespace ZstdSharp.Unsafe +{ + public unsafe struct seqState_t + { + public BIT_DStream_t DStream; + public ZSTD_fseState stateLL; + public ZSTD_fseState stateOffb; + public ZSTD_fseState stateML; + public _prevOffset_e__FixedBuffer prevOffset; + public unsafe struct _prevOffset_e__FixedBuffer + { + public nuint e0; + public nuint e1; + public nuint e2; + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/seqStoreSplits.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/seqStoreSplits.cs new file mode 100644 index 000000000..b4b7a4d40 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/seqStoreSplits.cs @@ -0,0 +1,11 @@ +namespace ZstdSharp.Unsafe +{ + /* Struct to keep track of where we are in our recursive calls. */ + public unsafe struct seqStoreSplits + { + /* Array of split indices */ + public uint* splitLocations; + /* The current index within splitLocations being worked on */ + public nuint idx; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/seq_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/seq_t.cs new file mode 100644 index 000000000..d286898c4 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/seq_t.cs @@ -0,0 +1,9 @@ +namespace ZstdSharp.Unsafe +{ + public struct seq_t + { + public nuint litLength; + public nuint matchLength; + public nuint offset; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/sortedSymbol_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/sortedSymbol_t.cs new file mode 100644 index 000000000..facf809c8 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/sortedSymbol_t.cs @@ -0,0 +1,7 @@ +namespace ZstdSharp.Unsafe +{ + public struct sortedSymbol_t + { + public byte symbol; + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/streaming_operation.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/streaming_operation.cs new file mode 100644 index 000000000..fae3973f0 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/streaming_operation.cs @@ -0,0 +1,9 @@ +namespace ZstdSharp.Unsafe +{ + /* Streaming state is used to inform allocation of the literal buffer */ + public enum streaming_operation + { + not_streaming = 0, + is_streaming = 1 + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/UnsafeHelper.cs b/src/SharpCompress/Compressors/ZStandard/UnsafeHelper.cs new file mode 100644 index 000000000..09aef626d --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/UnsafeHelper.cs @@ -0,0 +1,106 @@ +using System; +using System.Diagnostics; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; + + +namespace ZstdSharp +{ + public static unsafe class UnsafeHelper + { + public static void* PoisonMemory(void* destination, ulong size) + { + memset(destination, 0xCC, (uint)size); + return destination; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static void* malloc(ulong size) + { +#if NET6_0_OR_GREATER + var ptr = NativeMemory.Alloc((nuint) size); +#else + var ptr = (void*) Marshal.AllocHGlobal((nint) size); +#endif +#if DEBUG + return PoisonMemory(ptr, size); +#else + return ptr; +#endif + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static void* calloc(ulong num, ulong size) + { +#if NET6_0_OR_GREATER + return NativeMemory.AllocZeroed((nuint) num, (nuint) size); +#else + var total = num * size; + assert(total <= uint.MaxValue); + var destination = (void*)Marshal.AllocHGlobal((nint)total); + memset(destination, 0, (uint)total); + return destination; +#endif + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static void memcpy(void* destination, void* source, uint size) + => System.Runtime.CompilerServices.Unsafe.CopyBlockUnaligned(destination, source, size); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static void memset(void* memPtr, byte val, uint size) + => System.Runtime.CompilerServices.Unsafe.InitBlockUnaligned(memPtr, val, size); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static void free(void* ptr) + { +#if NET6_0_OR_GREATER + NativeMemory.Free(ptr); +#else + Marshal.FreeHGlobal((IntPtr) ptr); +#endif + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static T* GetArrayPointer(T[] array) where T : unmanaged + { + var size = (uint)(sizeof(T) * array.Length); +#if NET9_0_OR_GREATER + // This function is used to allocate memory for static data blocks. + // We have to use AllocateTypeAssociatedMemory and link the memory's + // lifetime to this assembly, in order to prevent memory leaks when + // loading the assembly in an unloadable AssemblyLoadContext. + // While introduced in .NET 5, we call this only in .NET 9+, because + // it's not implemented in the Mono runtime until then. + var destination = (T*)RuntimeHelpers.AllocateTypeAssociatedMemory(typeof(UnsafeHelper), (int)size); +#else + var destination = (T*)malloc(size); +#endif + fixed (void* source = &array[0]) + System.Runtime.CompilerServices.Unsafe.CopyBlockUnaligned(destination, source, size); + + return destination; + } + + [Conditional("DEBUG")] + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static void assert(bool condition, string? message = null) + { + if (!condition) + throw new ArgumentException(message ?? "assert failed"); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static void memmove(void* destination, void* source, ulong size) + => Buffer.MemoryCopy(source, destination, size, size); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static int memcmp(void* buf1, void* buf2, ulong size) + { + assert(size <= int.MaxValue); + var intSize = (int)size; + return new ReadOnlySpan(buf1, intSize) + .SequenceCompareTo(new ReadOnlySpan(buf2, intSize)); + } + } +} diff --git a/src/SharpCompress/SharpCompress.csproj b/src/SharpCompress/SharpCompress.csproj index 6af661644..31edb8325 100644 --- a/src/SharpCompress/SharpCompress.csproj +++ b/src/SharpCompress/SharpCompress.csproj @@ -36,7 +36,6 @@ - diff --git a/src/SharpCompress/packages.lock.json b/src/SharpCompress/packages.lock.json index 8230141df..82a2426c9 100644 --- a/src/SharpCompress/packages.lock.json +++ b/src/SharpCompress/packages.lock.json @@ -27,17 +27,6 @@ "resolved": "4.6.0", "contentHash": "lN6tZi7Q46zFzAbRYXTIvfXcyvQQgxnY7Xm6C6xQ9784dEL1amjM6S6Iw4ZpsvesAKnRVsM4scrDQaDqSClkjA==" }, - "ZstdSharp.Port": { - "type": "Direct", - "requested": "[0.8.6, )", - "resolved": "0.8.6", - "contentHash": "iP4jVLQoQmUjMU88g1WObiNr6YKZGvh4aOXn3yOJsHqZsflwRsxZPcIBvNXgjXO3vQKSLctXGLTpcBPLnWPS8A==", - "dependencies": { - "Microsoft.Bcl.AsyncInterfaces": "5.0.0", - "System.Memory": "4.5.5", - "System.Runtime.CompilerServices.Unsafe": "6.0.0" - } - }, "Microsoft.Build.Tasks.Git": { "type": "Transitive", "resolved": "8.0.0", @@ -52,44 +41,6 @@ "type": "Transitive", "resolved": "8.0.0", "contentHash": "dk9JPxTCIevS75HyEQ0E4OVAFhB2N+V9ShCXf8Q6FkUQZDkgLI12y679Nym1YqsiSysuQskT7Z+6nUf3yab6Vw==" - }, - "System.Numerics.Vectors": { - "type": "Transitive", - "resolved": "4.5.0", - "contentHash": "QQTlPTl06J/iiDbJCiepZ4H//BVraReU4O4EoRw1U02H5TLUIT7xn3GnDp9AXPSlJUDyFs4uWjWafNX6WrAojQ==" - }, - "System.Runtime.CompilerServices.Unsafe": { - "type": "Transitive", - "resolved": "6.0.0", - "contentHash": "/iUeP3tq1S0XdNNoMz5C9twLSrM/TH+qElHkXWaPvuNOt+99G75NrV0OS2EqHx5wMN7popYjpc8oTjC1y16DLg==" - }, - "System.Threading.Tasks.Extensions": { - "type": "Transitive", - "resolved": "4.5.4", - "contentHash": "zteT+G8xuGu6mS+mzDzYXbzS7rd3K6Fjb9RiZlYlJPam2/hU7JCBZBVEcywNuR+oZ1ncTvc/cq0faRr3P01OVg==", - "dependencies": { - "System.Runtime.CompilerServices.Unsafe": "4.5.3" - } - }, - "Microsoft.Bcl.AsyncInterfaces": { - "type": "CentralTransitive", - "requested": "[8.0.0, )", - "resolved": "5.0.0", - "contentHash": "W8DPQjkMScOMTtJbPwmPyj9c3zYSFGawDW3jwlBOOsnY+EzZFLgNQ/UMkK35JmkNOVPdCyPr2Tw7Vv9N+KA3ZQ==", - "dependencies": { - "System.Threading.Tasks.Extensions": "4.5.4" - } - }, - "System.Memory": { - "type": "CentralTransitive", - "requested": "[4.6.0, )", - "resolved": "4.5.5", - "contentHash": "XIWiDvKPXaTveaB7HVganDlOCRoj03l+jrwNvcge/t8vhGYKvqV+dMv6G4SAX2NoNmN0wZfVPTAlFwZcZvVOUw==", - "dependencies": { - "System.Buffers": "4.5.1", - "System.Numerics.Vectors": "4.5.0", - "System.Runtime.CompilerServices.Unsafe": "4.5.3" - } } }, ".NETFramework,Version=v4.8.1": { @@ -118,17 +69,6 @@ "resolved": "4.6.0", "contentHash": "lN6tZi7Q46zFzAbRYXTIvfXcyvQQgxnY7Xm6C6xQ9784dEL1amjM6S6Iw4ZpsvesAKnRVsM4scrDQaDqSClkjA==" }, - "ZstdSharp.Port": { - "type": "Direct", - "requested": "[0.8.6, )", - "resolved": "0.8.6", - "contentHash": "iP4jVLQoQmUjMU88g1WObiNr6YKZGvh4aOXn3yOJsHqZsflwRsxZPcIBvNXgjXO3vQKSLctXGLTpcBPLnWPS8A==", - "dependencies": { - "Microsoft.Bcl.AsyncInterfaces": "5.0.0", - "System.Memory": "4.5.5", - "System.Runtime.CompilerServices.Unsafe": "6.0.0" - } - }, "Microsoft.Build.Tasks.Git": { "type": "Transitive", "resolved": "8.0.0", @@ -143,44 +83,6 @@ "type": "Transitive", "resolved": "8.0.0", "contentHash": "dk9JPxTCIevS75HyEQ0E4OVAFhB2N+V9ShCXf8Q6FkUQZDkgLI12y679Nym1YqsiSysuQskT7Z+6nUf3yab6Vw==" - }, - "System.Numerics.Vectors": { - "type": "Transitive", - "resolved": "4.5.0", - "contentHash": "QQTlPTl06J/iiDbJCiepZ4H//BVraReU4O4EoRw1U02H5TLUIT7xn3GnDp9AXPSlJUDyFs4uWjWafNX6WrAojQ==" - }, - "System.Runtime.CompilerServices.Unsafe": { - "type": "Transitive", - "resolved": "6.0.0", - "contentHash": "/iUeP3tq1S0XdNNoMz5C9twLSrM/TH+qElHkXWaPvuNOt+99G75NrV0OS2EqHx5wMN7popYjpc8oTjC1y16DLg==" - }, - "System.Threading.Tasks.Extensions": { - "type": "Transitive", - "resolved": "4.5.4", - "contentHash": "zteT+G8xuGu6mS+mzDzYXbzS7rd3K6Fjb9RiZlYlJPam2/hU7JCBZBVEcywNuR+oZ1ncTvc/cq0faRr3P01OVg==", - "dependencies": { - "System.Runtime.CompilerServices.Unsafe": "4.5.3" - } - }, - "Microsoft.Bcl.AsyncInterfaces": { - "type": "CentralTransitive", - "requested": "[8.0.0, )", - "resolved": "5.0.0", - "contentHash": "W8DPQjkMScOMTtJbPwmPyj9c3zYSFGawDW3jwlBOOsnY+EzZFLgNQ/UMkK35JmkNOVPdCyPr2Tw7Vv9N+KA3ZQ==", - "dependencies": { - "System.Threading.Tasks.Extensions": "4.5.4" - } - }, - "System.Memory": { - "type": "CentralTransitive", - "requested": "[4.6.0, )", - "resolved": "4.5.5", - "contentHash": "XIWiDvKPXaTveaB7HVganDlOCRoj03l+jrwNvcge/t8vhGYKvqV+dMv6G4SAX2NoNmN0wZfVPTAlFwZcZvVOUw==", - "dependencies": { - "System.Buffers": "4.5.1", - "System.Numerics.Vectors": "4.5.0", - "System.Runtime.CompilerServices.Unsafe": "4.5.3" - } } }, ".NETStandard,Version=v2.0": { @@ -248,17 +150,6 @@ "System.Runtime.CompilerServices.Unsafe": "6.0.0" } }, - "ZstdSharp.Port": { - "type": "Direct", - "requested": "[0.8.6, )", - "resolved": "0.8.6", - "contentHash": "iP4jVLQoQmUjMU88g1WObiNr6YKZGvh4aOXn3yOJsHqZsflwRsxZPcIBvNXgjXO3vQKSLctXGLTpcBPLnWPS8A==", - "dependencies": { - "Microsoft.Bcl.AsyncInterfaces": "5.0.0", - "System.Memory": "4.5.5", - "System.Runtime.CompilerServices.Unsafe": "6.0.0" - } - }, "Microsoft.Build.Tasks.Git": { "type": "Transitive", "resolved": "8.0.0", @@ -324,12 +215,6 @@ "resolved": "4.6.0", "contentHash": "lN6tZi7Q46zFzAbRYXTIvfXcyvQQgxnY7Xm6C6xQ9784dEL1amjM6S6Iw4ZpsvesAKnRVsM4scrDQaDqSClkjA==" }, - "ZstdSharp.Port": { - "type": "Direct", - "requested": "[0.8.6, )", - "resolved": "0.8.6", - "contentHash": "iP4jVLQoQmUjMU88g1WObiNr6YKZGvh4aOXn3yOJsHqZsflwRsxZPcIBvNXgjXO3vQKSLctXGLTpcBPLnWPS8A==" - }, "Microsoft.Build.Tasks.Git": { "type": "Transitive", "resolved": "8.0.0", @@ -378,12 +263,6 @@ "resolved": "4.6.0", "contentHash": "lN6tZi7Q46zFzAbRYXTIvfXcyvQQgxnY7Xm6C6xQ9784dEL1amjM6S6Iw4ZpsvesAKnRVsM4scrDQaDqSClkjA==" }, - "ZstdSharp.Port": { - "type": "Direct", - "requested": "[0.8.6, )", - "resolved": "0.8.6", - "contentHash": "iP4jVLQoQmUjMU88g1WObiNr6YKZGvh4aOXn3yOJsHqZsflwRsxZPcIBvNXgjXO3vQKSLctXGLTpcBPLnWPS8A==" - }, "Microsoft.Build.Tasks.Git": { "type": "Transitive", "resolved": "8.0.0", diff --git a/tests/SharpCompress.Test/packages.lock.json b/tests/SharpCompress.Test/packages.lock.json index 511e7921a..ae6cfd236 100644 --- a/tests/SharpCompress.Test/packages.lock.json +++ b/tests/SharpCompress.Test/packages.lock.json @@ -82,11 +82,6 @@ "resolved": "1.5.0", "contentHash": "EXKiDFsChZW0RjrZ4FYHu9aW6+P4MCgEDCklsVseRfhoO0F+dXeMSsMRAlVXIo06kGJ/zv+2w1a2uc2+kxxSaQ==" }, - "System.Numerics.Vectors": { - "type": "Transitive", - "resolved": "4.5.0", - "contentHash": "QQTlPTl06J/iiDbJCiepZ4H//BVraReU4O4EoRw1U02H5TLUIT7xn3GnDp9AXPSlJUDyFs4uWjWafNX6WrAojQ==" - }, "System.Reflection.Metadata": { "type": "Transitive", "resolved": "1.6.0", @@ -97,8 +92,8 @@ }, "System.Runtime.CompilerServices.Unsafe": { "type": "Transitive", - "resolved": "6.0.0", - "contentHash": "/iUeP3tq1S0XdNNoMz5C9twLSrM/TH+qElHkXWaPvuNOt+99G75NrV0OS2EqHx5wMN7popYjpc8oTjC1y16DLg==" + "resolved": "4.5.3", + "contentHash": "3TIsJhD1EiiT0w2CcDMN/iSSwnNnsrnbzeVHSKkaEgV85txMprmuO+Yq2AdSbeVGcg28pdNDTPK87tJhX7VFHw==" }, "System.Threading.Tasks.Extensions": { "type": "Transitive", @@ -156,17 +151,7 @@ "sharpcompress": { "type": "Project", "dependencies": { - "System.Buffers": "[4.6.0, )", - "ZstdSharp.Port": "[0.8.6, )" - } - }, - "Microsoft.Bcl.AsyncInterfaces": { - "type": "CentralTransitive", - "requested": "[8.0.0, )", - "resolved": "5.0.0", - "contentHash": "W8DPQjkMScOMTtJbPwmPyj9c3zYSFGawDW3jwlBOOsnY+EzZFLgNQ/UMkK35JmkNOVPdCyPr2Tw7Vv9N+KA3ZQ==", - "dependencies": { - "System.Threading.Tasks.Extensions": "4.5.4" + "System.Buffers": "[4.6.0, )" } }, "System.Buffers": { @@ -174,28 +159,6 @@ "requested": "[4.6.0, )", "resolved": "4.6.0", "contentHash": "lN6tZi7Q46zFzAbRYXTIvfXcyvQQgxnY7Xm6C6xQ9784dEL1amjM6S6Iw4ZpsvesAKnRVsM4scrDQaDqSClkjA==" - }, - "System.Memory": { - "type": "CentralTransitive", - "requested": "[4.6.0, )", - "resolved": "4.5.5", - "contentHash": "XIWiDvKPXaTveaB7HVganDlOCRoj03l+jrwNvcge/t8vhGYKvqV+dMv6G4SAX2NoNmN0wZfVPTAlFwZcZvVOUw==", - "dependencies": { - "System.Buffers": "4.5.1", - "System.Numerics.Vectors": "4.5.0", - "System.Runtime.CompilerServices.Unsafe": "4.5.3" - } - }, - "ZstdSharp.Port": { - "type": "CentralTransitive", - "requested": "[0.8.6, )", - "resolved": "0.8.6", - "contentHash": "iP4jVLQoQmUjMU88g1WObiNr6YKZGvh4aOXn3yOJsHqZsflwRsxZPcIBvNXgjXO3vQKSLctXGLTpcBPLnWPS8A==", - "dependencies": { - "Microsoft.Bcl.AsyncInterfaces": "5.0.0", - "System.Memory": "4.5.5", - "System.Runtime.CompilerServices.Unsafe": "6.0.0" - } } }, "net8.0": { @@ -342,8 +305,7 @@ "sharpcompress": { "type": "Project", "dependencies": { - "System.Buffers": "[4.6.0, )", - "ZstdSharp.Port": "[0.8.6, )" + "System.Buffers": "[4.6.0, )" } }, "System.Buffers": { @@ -351,12 +313,6 @@ "requested": "[4.6.0, )", "resolved": "4.6.0", "contentHash": "lN6tZi7Q46zFzAbRYXTIvfXcyvQQgxnY7Xm6C6xQ9784dEL1amjM6S6Iw4ZpsvesAKnRVsM4scrDQaDqSClkjA==" - }, - "ZstdSharp.Port": { - "type": "CentralTransitive", - "requested": "[0.8.6, )", - "resolved": "0.8.6", - "contentHash": "iP4jVLQoQmUjMU88g1WObiNr6YKZGvh4aOXn3yOJsHqZsflwRsxZPcIBvNXgjXO3vQKSLctXGLTpcBPLnWPS8A==" } } } From 999af800afaa6850d2cf6b8d12894ab0572737e9 Mon Sep 17 00:00:00 2001 From: Adam Hathcock Date: Mon, 13 Oct 2025 16:21:05 +0100 Subject: [PATCH 2/6] add more non legacy stuff --- .../Compressors/ZStandard/JobThreadPool.cs | 146 ++++++++++++++++++ .../Compressors/ZStandard/Pool.cs | 117 ++++++++++++++ .../ZStandard/SynchronizationWrapper.cs | 23 +++ .../Compressors/ZStandard/UnmanagedObject.cs | 19 +++ .../Compressors/ZStandard/Unsafe/Bitstream.cs | 2 - .../ZStandard/Unsafe/HufDecompress.cs | 2 - .../ZStandard/Unsafe/ZstdCompress.cs | 10 +- .../Compressors/ZStandard/ZstdException.cs | 16 ++ 8 files changed, 326 insertions(+), 9 deletions(-) create mode 100644 src/SharpCompress/Compressors/ZStandard/JobThreadPool.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Pool.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/SynchronizationWrapper.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/UnmanagedObject.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/ZstdException.cs diff --git a/src/SharpCompress/Compressors/ZStandard/JobThreadPool.cs b/src/SharpCompress/Compressors/ZStandard/JobThreadPool.cs new file mode 100644 index 000000000..a51c36798 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/JobThreadPool.cs @@ -0,0 +1,146 @@ +using System; +using System.Collections.Concurrent; +using System.Collections.Generic; +using System.Threading; + +namespace ZstdSharp +{ + internal unsafe class JobThreadPool : IDisposable + { + private int numThreads; + private readonly List threads; + private readonly BlockingCollection queue; + + private struct Job + { + public void* function; + public void* opaque; + } + + private class JobThread + { + private Thread Thread { get; } + public CancellationTokenSource CancellationTokenSource { get; } + + public JobThread(Thread thread) + { + CancellationTokenSource = new CancellationTokenSource(); + Thread = thread; + } + + public void Start() + { + Thread.Start(this); + } + + public void Cancel() + { + CancellationTokenSource.Cancel(); + } + + public void Join() + { + Thread.Join(); + } + } + + private void Worker(object? obj) + { + if (obj is not JobThread poolThread) + return; + + var cancellationToken = poolThread.CancellationTokenSource.Token; + while (!queue.IsCompleted && !cancellationToken.IsCancellationRequested) + { + try + { + if (queue.TryTake(out var job, -1, cancellationToken)) + ((delegate* managed)job.function)(job.opaque); + } + catch (InvalidOperationException) + { + } + catch (OperationCanceledException) + { + } + } + } + + public JobThreadPool(int num, int queueSize) + { + numThreads = num; + queue = new BlockingCollection(queueSize + 1); + threads = new List(num); + for (var i = 0; i < numThreads; i++) + CreateThread(); + } + + private void CreateThread() + { + var poolThread = new JobThread(new Thread(Worker)); + threads.Add(poolThread); + poolThread.Start(); + } + + public void Resize(int num) + { + lock (threads) + { + if (num < numThreads) + { + for (var i = numThreads - 1; i >= num; i--) + { + threads[i].Cancel(); + threads.RemoveAt(i); + } + } + else + { + for (var i = numThreads; i < num; i++) + CreateThread(); + } + } + + numThreads = num; + } + + public void Add(void* function, void* opaque) + { + queue.Add(new Job { function = function, opaque = opaque }); + } + + public bool TryAdd(void* function, void* opaque) + { + return queue.TryAdd(new Job { function = function, opaque = opaque }); + } + + public void Join(bool cancel = true) + { + queue.CompleteAdding(); + List jobThreads; + lock (threads) + jobThreads = new List(threads); + + if (cancel) + { + foreach (var thread in jobThreads) + thread.Cancel(); + } + + foreach (var thread in jobThreads) + thread.Join(); + } + + public void Dispose() + { + queue.Dispose(); + } + + public int Size() + { + // todo not implemented + // https://github.com/dotnet/runtime/issues/24200 + return 0; + } + } +} diff --git a/src/SharpCompress/Compressors/ZStandard/Pool.cs b/src/SharpCompress/Compressors/ZStandard/Pool.cs new file mode 100644 index 000000000..41c0ab579 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Pool.cs @@ -0,0 +1,117 @@ +using static ZstdSharp.UnsafeHelper; + +namespace ZstdSharp.Unsafe +{ + public static unsafe partial class Methods + { + private static JobThreadPool GetThreadPool(void* ctx) => UnmanagedObject.Unwrap(ctx); + + /* ZSTD_createThreadPool() : public access point */ + public static void* ZSTD_createThreadPool(nuint numThreads) + { + return POOL_create(numThreads, 0); + } + + /*! POOL_create() : + * Create a thread pool with at most `numThreads` threads. + * `numThreads` must be at least 1. + * The maximum number of queued jobs before blocking is `queueSize`. + * @return : POOL_ctx pointer on success, else NULL. + */ + private static void* POOL_create(nuint numThreads, nuint queueSize) + { + return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem); + } + + private static void* POOL_create_advanced(nuint numThreads, nuint queueSize, ZSTD_customMem customMem) + { + var jobThreadPool = new JobThreadPool((int)numThreads, (int)queueSize); + return UnmanagedObject.Wrap(jobThreadPool); + } + + /*! POOL_join() : + Shutdown the queue, wake any sleeping threads, and join all of the threads. + */ + private static void POOL_join(void* ctx) + { + GetThreadPool(ctx).Join(); + } + + /*! POOL_free() : + * Free a thread pool returned by POOL_create(). + */ + private static void POOL_free(void* ctx) + { + if (ctx == null) + { + return; + } + + var jobThreadPool = GetThreadPool(ctx); + jobThreadPool.Join(); + jobThreadPool.Dispose(); + UnmanagedObject.Free(ctx); + } + + /*! POOL_joinJobs() : + * Waits for all queued jobs to finish executing. + */ + private static void POOL_joinJobs(void* ctx) + { + var jobThreadPool = GetThreadPool(ctx); + jobThreadPool.Join(false); + } + + public static void ZSTD_freeThreadPool(void* pool) + { + POOL_free(pool); + } + + /*! POOL_sizeof() : + * @return threadpool memory usage + * note : compatible with NULL (returns 0 in this case) + */ + private static nuint POOL_sizeof(void* ctx) + { + if (ctx == null) + return 0; + var jobThreadPool = GetThreadPool(ctx); + return (nuint)jobThreadPool.Size(); + } + + /* @return : 0 on success, 1 on error */ + private static int POOL_resize(void* ctx, nuint numThreads) + { + if (ctx == null) + return 1; + var jobThreadPool = GetThreadPool(ctx); + jobThreadPool.Resize((int)numThreads); + return 0; + } + + /*! POOL_add() : + * Add the job `function(opaque)` to the thread pool. `ctx` must be valid. + * Possibly blocks until there is room in the queue. + * Note : The function may be executed asynchronously, + * therefore, `opaque` must live until function has been completed. + */ + private static void POOL_add(void* ctx, void* function, void* opaque) + { + assert(ctx != null); + var jobThreadPool = GetThreadPool(ctx); + jobThreadPool.Add(function, opaque); + } + + /*! POOL_tryAdd() : + * Add the job `function(opaque)` to thread pool _if_ a queue slot is available. + * Returns immediately even if not (does not block). + * @return : 1 if successful, 0 if not. + */ + private static int POOL_tryAdd(void* ctx, void* function, void* opaque) + { + assert(ctx != null); + var jobThreadPool = GetThreadPool(ctx); + return jobThreadPool.TryAdd(function, opaque) ? 1 : 0; + } + } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/SynchronizationWrapper.cs b/src/SharpCompress/Compressors/ZStandard/SynchronizationWrapper.cs new file mode 100644 index 000000000..53c232e77 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/SynchronizationWrapper.cs @@ -0,0 +1,23 @@ +using System.Threading; + +namespace ZstdSharp +{ + internal static unsafe class SynchronizationWrapper + { + private static object UnwrapObject(void** obj) => UnmanagedObject.Unwrap(*obj); + + public static void Init(void** obj) => *obj = UnmanagedObject.Wrap(new object()); + + public static void Free(void** obj) => UnmanagedObject.Free(*obj); + + public static void Enter(void** obj) => Monitor.Enter(UnwrapObject(obj)); + + public static void Exit(void** obj) => Monitor.Exit(UnwrapObject(obj)); + + public static void Pulse(void** obj) => Monitor.Pulse(UnwrapObject(obj)); + + public static void PulseAll(void** obj) => Monitor.PulseAll(UnwrapObject(obj)); + + public static void Wait(void** mutex) => Monitor.Wait(UnwrapObject(mutex)); + } +} diff --git a/src/SharpCompress/Compressors/ZStandard/UnmanagedObject.cs b/src/SharpCompress/Compressors/ZStandard/UnmanagedObject.cs new file mode 100644 index 000000000..deae7c05e --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/UnmanagedObject.cs @@ -0,0 +1,19 @@ +using System; +using System.Runtime.InteropServices; + +namespace ZstdSharp +{ + /* + * Wrap object to void* to make it unmanaged + */ + internal static unsafe class UnmanagedObject + { + public static void* Wrap(object obj) => (void*)GCHandle.ToIntPtr(GCHandle.Alloc(obj)); + + private static GCHandle UnwrapGcHandle(void* value) => GCHandle.FromIntPtr((IntPtr)value); + + public static T Unwrap(void* value) => (T)UnwrapGcHandle(value).Target!; + + public static void Free(void* value) => UnwrapGcHandle(value).Free(); + } +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Bitstream.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Bitstream.cs index 068e9fb1a..23f59ac3e 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Bitstream.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Bitstream.cs @@ -367,7 +367,6 @@ private static BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD) { if (bitD->bitsConsumed > (uint)(sizeof(nuint) * 8)) { - const nuint zeroFilled = 0; bitD->ptr = (sbyte*)&static_zeroFilled[0]; return BIT_DStream_status.BIT_DStream_overflow; } @@ -557,7 +556,6 @@ private static BIT_DStream_status BIT_reloadDStream(ref nuint bitD_bitContainer, { if (bitD_bitsConsumed > (uint)(sizeof(nuint) * 8)) { - const nuint zeroFilled = 0; bitD_ptr = (sbyte*)&static_zeroFilled[0]; return BIT_DStream_status.BIT_DStream_overflow; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HufDecompress.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HufDecompress.cs index 4b131a8f3..fc9492c48 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HufDecompress.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HufDecompress.cs @@ -503,7 +503,6 @@ private static void HUF_decompress4X1_usingDTable_internal_fast_c_loop(HUF_Decom for (; ; ) { byte* olimit; - int stream; { assert(op0 <= op1); assert(ip0 >= ilowest); @@ -1444,7 +1443,6 @@ private static void HUF_decompress4X2_usingDTable_internal_fast_c_loop(HUF_Decom for (; ; ) { byte* olimit; - int stream; { assert(op0 <= oend0); assert(ip0 >= ilowest); diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompress.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompress.cs index 3e2e36a07..cdbf378db 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompress.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompress.cs @@ -3122,7 +3122,7 @@ private static nuint ZSTD_entropyCompressSeqStore(SeqStore_t* seqStorePtr, ZSTD_ return ZSTD_entropyCompressSeqStore_wExtLitBuffer(dst, dstCapacity, seqStorePtr->litStart, (nuint)(seqStorePtr->lit - seqStorePtr->litStart), srcSize, seqStorePtr, prevEntropy, nextEntropy, cctxParams, entropyWorkspace, entropyWkspSize, bmi2); } - private static readonly ZSTD_BlockCompressor_f[][] blockCompressor = new ZSTD_BlockCompressor_f[4][] + private static readonly ZSTD_BlockCompressor_f?[][] blockCompressor = new ZSTD_BlockCompressor_f?[4][] { new ZSTD_BlockCompressor_f[10] { @@ -3163,7 +3163,7 @@ private static nuint ZSTD_entropyCompressSeqStore(SeqStore_t* seqStorePtr, ZSTD_ ZSTD_compressBlock_btultra_dictMatchState, ZSTD_compressBlock_btultra_dictMatchState }, - new ZSTD_BlockCompressor_f[10] + new ZSTD_BlockCompressor_f?[10] { null, null, @@ -3209,7 +3209,7 @@ private static nuint ZSTD_entropyCompressSeqStore(SeqStore_t* seqStorePtr, ZSTD_ * assumption : strat is a valid strategy */ private static ZSTD_BlockCompressor_f ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramSwitch_e useRowMatchFinder, ZSTD_dictMode_e dictMode) { - ZSTD_BlockCompressor_f selectedCompressor; + ZSTD_BlockCompressor_f? selectedCompressor; assert(ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_strategy, (int)strat) != 0); if (ZSTD_rowMatchFinderUsed(strat, useRowMatchFinder) != 0) { @@ -3222,7 +3222,7 @@ private static ZSTD_BlockCompressor_f ZSTD_selectBlockCompressor(ZSTD_strategy s } assert(selectedCompressor != null); - return selectedCompressor; + return selectedCompressor.NotNull(); } private static void ZSTD_storeLastLiterals(SeqStore_t* seqStorePtr, byte* anchor, nuint lastLLSize) @@ -8219,4 +8219,4 @@ public static void ZSTD_CCtxParams_registerSequenceProducer(ZSTD_CCtx_params_s* } } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/ZstdException.cs b/src/SharpCompress/Compressors/ZStandard/ZstdException.cs new file mode 100644 index 000000000..d16ce18b7 --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/ZstdException.cs @@ -0,0 +1,16 @@ +using System; +using SharpCompress.Common; +using ZstdSharp.Unsafe; + +namespace ZstdSharp +{ + + + public class ZstdException : SharpCompressException + { + public ZstdException(ZSTD_ErrorCode code, string message) : base(message) + => Code = code; + + public ZSTD_ErrorCode Code { get; } + } +} From b545973c55df4f5b2cc4e7a0a3892e035a486f82 Mon Sep 17 00:00:00 2001 From: Adam Hathcock Date: Mon, 13 Oct 2025 16:41:58 +0100 Subject: [PATCH 3/6] stuff compiles now --- Directory.Packages.props | 4 +- .../ZStandard/CompressionStream.cs | 19 ++- .../ZStandard/DecompressionStream.cs | 14 +- .../ZStandard/Unsafe/ZstdCompress.cs | 2 +- .../ZStandard/Unsafe/ZstdmtCompress.cs | 4 +- src/SharpCompress/SharpCompress.csproj | 5 +- src/SharpCompress/packages.lock.json | 129 +++++++++++------- tests/SharpCompress.Test/packages.lock.json | 51 ++++--- 8 files changed, 147 insertions(+), 81 deletions(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 439ee7971..fd4473f8d 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -7,8 +7,8 @@ - - + + diff --git a/src/SharpCompress/Compressors/ZStandard/CompressionStream.cs b/src/SharpCompress/Compressors/ZStandard/CompressionStream.cs index 2bf952fd3..b65cc7523 100644 --- a/src/SharpCompress/Compressors/ZStandard/CompressionStream.cs +++ b/src/SharpCompress/Compressors/ZStandard/CompressionStream.cs @@ -68,7 +68,7 @@ public void LoadDictionary(byte[] dict) #if !NETSTANDARD2_0 && !NETFRAMEWORK public override async ValueTask DisposeAsync() #else - public async ValueTask DisposeAsync() + public async Task DisposeAsync() #endif { if (compressor == null) @@ -160,8 +160,14 @@ private void WriteInternal(ReadOnlySpan buffer, ZSTD_EndDirective directiv } while (directive == ZSTD_EndDirective.ZSTD_e_continue ? input.pos < input.size : remaining > 0); } - private async ValueTask WriteInternalAsync(ReadOnlyMemory? buffer, ZSTD_EndDirective directive, +#if !NETSTANDARD2_0 && !NETFRAMEWORK + private async ValueTask WriteInternalAsync(ReadOnlyMemory? buffer, ZSTD_EndDirective directive, CancellationToken cancellationToken = default) +#else + private async Task WriteInternalAsync(ReadOnlyMemory? buffer, ZSTD_EndDirective directive, + CancellationToken cancellationToken = default) +#endif + { EnsureNotDisposed(); @@ -178,15 +184,20 @@ private async ValueTask WriteInternalAsync(ReadOnlyMemory? buffer, ZSTD_En } while (directive == ZSTD_EndDirective.ZSTD_e_continue ? input.pos < input.size : remaining > 0); } +#if !NETSTANDARD2_0 && !NETFRAMEWORK + public override Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) => WriteAsync(new ReadOnlyMemory(buffer, offset, count), cancellationToken).AsTask(); -#if !NETSTANDARD2_0 && !NETFRAMEWORK public override async ValueTask WriteAsync(ReadOnlyMemory buffer, CancellationToken cancellationToken = default) => await WriteInternalAsync(buffer, ZSTD_EndDirective.ZSTD_e_continue, cancellationToken).ConfigureAwait(false); #else - public async ValueTask WriteAsync(ReadOnlyMemory buffer, + + public override Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + => WriteAsync(new ReadOnlyMemory(buffer, offset, count), cancellationToken); + + public async Task WriteAsync(ReadOnlyMemory buffer, CancellationToken cancellationToken = default) => await WriteInternalAsync(buffer, ZSTD_EndDirective.ZSTD_e_continue, cancellationToken).ConfigureAwait(false); #endif diff --git a/src/SharpCompress/Compressors/ZStandard/DecompressionStream.cs b/src/SharpCompress/Compressors/ZStandard/DecompressionStream.cs index 28dc88447..3f15227f3 100644 --- a/src/SharpCompress/Compressors/ZStandard/DecompressionStream.cs +++ b/src/SharpCompress/Compressors/ZStandard/DecompressionStream.cs @@ -146,14 +146,18 @@ public int Read(Span buffer) } } + +#if !NETSTANDARD2_0 && !NETFRAMEWORK public override Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) => ReadAsync(new Memory(buffer, offset, count), cancellationToken).AsTask(); -#if !NETSTANDARD2_0 && !NETFRAMEWORK public override async ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken = default) #else - public async ValueTask ReadAsync(Memory buffer, + + public override Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + => ReadAsync(new Memory(buffer, offset, count), cancellationToken); + public async Task ReadAsync(Memory buffer, CancellationToken cancellationToken = default) #endif { @@ -241,16 +245,16 @@ private void EnsureNotDisposed() } #if NETSTANDARD2_0 || NETFRAMEWORK - public virtual ValueTask DisposeAsync() + public virtual Task DisposeAsync() { try { Dispose(); - return default; + return Task.CompletedTask; } catch (Exception exc) { - return new ValueTask(Task.FromException(exc)); + return Task.FromException(exc); } } #endif diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompress.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompress.cs index cdbf378db..e5eee65b4 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompress.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompress.cs @@ -3335,7 +3335,7 @@ private static nuint ZSTD_buildSeqStore(ZSTD_CCtx_s* zc, void* src, nuint srcSiz uint curr = (uint)(istart - @base); #if DEBUG if (sizeof(nint) == 8) - assert(istart - @base < (nint)unchecked((uint)-1)); + assert(istart - @base < unchecked((nint)(uint)-1)); #endif if (curr > ms->nextToUpdate + 384) ms->nextToUpdate = curr - (192 < curr - ms->nextToUpdate - 384 ? 192 : curr - ms->nextToUpdate - 384); diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdmtCompress.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdmtCompress.cs index caef6e892..b379f7ebd 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdmtCompress.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdmtCompress.cs @@ -576,7 +576,7 @@ private static void ZSTDMT_compressionJob(void* jobDescription) int chunkNb; #if DEBUG if (sizeof(nuint) > sizeof(int)) - assert(job->src.size < 2147483647 * chunkSize); + assert(job->src.size < unchecked(2147483647 * chunkSize)); #endif assert(job->cSize == 0); for (chunkNb = 1; chunkNb < nbChunks; chunkNb++) @@ -1614,4 +1614,4 @@ private static nuint ZSTDMT_compressStream_generic(ZSTDMT_CCtx_s* mtctx, ZSTD_ou } } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/SharpCompress.csproj b/src/SharpCompress/SharpCompress.csproj index 31edb8325..760d6032e 100644 --- a/src/SharpCompress/SharpCompress.csproj +++ b/src/SharpCompress/SharpCompress.csproj @@ -35,12 +35,11 @@ $(DefineConstants);DEBUG_STREAMS - - - + + diff --git a/src/SharpCompress/packages.lock.json b/src/SharpCompress/packages.lock.json index 82a2426c9..1fc221dec 100644 --- a/src/SharpCompress/packages.lock.json +++ b/src/SharpCompress/packages.lock.json @@ -23,9 +23,30 @@ }, "System.Buffers": { "type": "Direct", - "requested": "[4.6.0, )", - "resolved": "4.6.0", - "contentHash": "lN6tZi7Q46zFzAbRYXTIvfXcyvQQgxnY7Xm6C6xQ9784dEL1amjM6S6Iw4ZpsvesAKnRVsM4scrDQaDqSClkjA==" + "requested": "[4.6.1, )", + "resolved": "4.6.1", + "contentHash": "N8GXpmiLMtljq7gwvyS+1QvKT/W2J8sNAvx+HVg4NGmsG/H+2k/y9QI23auLJRterrzCiDH+IWAw4V/GPwsMlw==" + }, + "System.Memory": { + "type": "Direct", + "requested": "[4.6.3, )", + "resolved": "4.6.3", + "contentHash": "qdcDOgnFZY40+Q9876JUHnlHu7bosOHX8XISRoH94fwk6hgaeQGSgfZd8srWRZNt5bV9ZW2TljcegDNxsf+96A==", + "dependencies": { + "System.Buffers": "4.6.1", + "System.Numerics.Vectors": "4.6.1", + "System.Runtime.CompilerServices.Unsafe": "6.1.2" + } + }, + "System.Text.Encoding.CodePages": { + "type": "Direct", + "requested": "[8.0.0, )", + "resolved": "8.0.0", + "contentHash": "OZIsVplFGaVY90G2SbpgU7EnCoOO5pw1t4ic21dBF3/1omrJFpAGoNAVpPyMVOC90/hvgkGG3VFqR13YgZMQfg==", + "dependencies": { + "System.Memory": "4.5.5", + "System.Runtime.CompilerServices.Unsafe": "6.0.0" + } }, "Microsoft.Build.Tasks.Git": { "type": "Transitive", @@ -41,6 +62,16 @@ "type": "Transitive", "resolved": "8.0.0", "contentHash": "dk9JPxTCIevS75HyEQ0E4OVAFhB2N+V9ShCXf8Q6FkUQZDkgLI12y679Nym1YqsiSysuQskT7Z+6nUf3yab6Vw==" + }, + "System.Numerics.Vectors": { + "type": "Transitive", + "resolved": "4.6.1", + "contentHash": "sQxefTnhagrhoq2ReR0D/6K0zJcr9Hrd6kikeXsA1I8kOCboTavcUC4r7TSfpKFeE163uMuxZcyfO1mGO3EN8Q==" + }, + "System.Runtime.CompilerServices.Unsafe": { + "type": "Transitive", + "resolved": "6.1.2", + "contentHash": "2hBr6zdbIBTDE3EhK7NSVNdX58uTK6iHW/P/Axmm9sl1xoGSLqDvMtpecn226TNwHByFokYwJmt/aQQNlO5CRw==" } }, ".NETFramework,Version=v4.8.1": { @@ -65,9 +96,30 @@ }, "System.Buffers": { "type": "Direct", - "requested": "[4.6.0, )", - "resolved": "4.6.0", - "contentHash": "lN6tZi7Q46zFzAbRYXTIvfXcyvQQgxnY7Xm6C6xQ9784dEL1amjM6S6Iw4ZpsvesAKnRVsM4scrDQaDqSClkjA==" + "requested": "[4.6.1, )", + "resolved": "4.6.1", + "contentHash": "N8GXpmiLMtljq7gwvyS+1QvKT/W2J8sNAvx+HVg4NGmsG/H+2k/y9QI23auLJRterrzCiDH+IWAw4V/GPwsMlw==" + }, + "System.Memory": { + "type": "Direct", + "requested": "[4.6.3, )", + "resolved": "4.6.3", + "contentHash": "qdcDOgnFZY40+Q9876JUHnlHu7bosOHX8XISRoH94fwk6hgaeQGSgfZd8srWRZNt5bV9ZW2TljcegDNxsf+96A==", + "dependencies": { + "System.Buffers": "4.6.1", + "System.Numerics.Vectors": "4.6.1", + "System.Runtime.CompilerServices.Unsafe": "6.1.2" + } + }, + "System.Text.Encoding.CodePages": { + "type": "Direct", + "requested": "[8.0.0, )", + "resolved": "8.0.0", + "contentHash": "OZIsVplFGaVY90G2SbpgU7EnCoOO5pw1t4ic21dBF3/1omrJFpAGoNAVpPyMVOC90/hvgkGG3VFqR13YgZMQfg==", + "dependencies": { + "System.Memory": "4.5.5", + "System.Runtime.CompilerServices.Unsafe": "6.0.0" + } }, "Microsoft.Build.Tasks.Git": { "type": "Transitive", @@ -83,18 +135,19 @@ "type": "Transitive", "resolved": "8.0.0", "contentHash": "dk9JPxTCIevS75HyEQ0E4OVAFhB2N+V9ShCXf8Q6FkUQZDkgLI12y679Nym1YqsiSysuQskT7Z+6nUf3yab6Vw==" + }, + "System.Numerics.Vectors": { + "type": "Transitive", + "resolved": "4.6.1", + "contentHash": "sQxefTnhagrhoq2ReR0D/6K0zJcr9Hrd6kikeXsA1I8kOCboTavcUC4r7TSfpKFeE163uMuxZcyfO1mGO3EN8Q==" + }, + "System.Runtime.CompilerServices.Unsafe": { + "type": "Transitive", + "resolved": "6.1.2", + "contentHash": "2hBr6zdbIBTDE3EhK7NSVNdX58uTK6iHW/P/Axmm9sl1xoGSLqDvMtpecn226TNwHByFokYwJmt/aQQNlO5CRw==" } }, ".NETStandard,Version=v2.0": { - "Microsoft.Bcl.AsyncInterfaces": { - "type": "Direct", - "requested": "[8.0.0, )", - "resolved": "8.0.0", - "contentHash": "3WA9q9yVqJp222P3x1wYIGDAkpjAku0TMUaaQV22g6L67AI0LdOIrVS7Ht2vJfLHGSPVuqN94vIr15qn+HEkHw==", - "dependencies": { - "System.Threading.Tasks.Extensions": "4.5.4" - } - }, "Microsoft.NETFramework.ReferenceAssemblies": { "type": "Direct", "requested": "[1.0.3, )", @@ -125,19 +178,19 @@ }, "System.Buffers": { "type": "Direct", - "requested": "[4.6.0, )", - "resolved": "4.6.0", - "contentHash": "lN6tZi7Q46zFzAbRYXTIvfXcyvQQgxnY7Xm6C6xQ9784dEL1amjM6S6Iw4ZpsvesAKnRVsM4scrDQaDqSClkjA==" + "requested": "[4.6.1, )", + "resolved": "4.6.1", + "contentHash": "N8GXpmiLMtljq7gwvyS+1QvKT/W2J8sNAvx+HVg4NGmsG/H+2k/y9QI23auLJRterrzCiDH+IWAw4V/GPwsMlw==" }, "System.Memory": { "type": "Direct", - "requested": "[4.6.0, )", - "resolved": "4.6.0", - "contentHash": "OEkbBQoklHngJ8UD8ez2AERSk2g+/qpAaSWWCBFbpH727HxDq5ydVkuncBaKcKfwRqXGWx64dS6G1SUScMsitg==", + "requested": "[4.6.3, )", + "resolved": "4.6.3", + "contentHash": "qdcDOgnFZY40+Q9876JUHnlHu7bosOHX8XISRoH94fwk6hgaeQGSgfZd8srWRZNt5bV9ZW2TljcegDNxsf+96A==", "dependencies": { - "System.Buffers": "4.6.0", - "System.Numerics.Vectors": "4.6.0", - "System.Runtime.CompilerServices.Unsafe": "6.1.0" + "System.Buffers": "4.6.1", + "System.Numerics.Vectors": "4.6.1", + "System.Runtime.CompilerServices.Unsafe": "6.1.2" } }, "System.Text.Encoding.CodePages": { @@ -172,21 +225,13 @@ }, "System.Numerics.Vectors": { "type": "Transitive", - "resolved": "4.6.0", - "contentHash": "t+SoieZsRuEyiw/J+qXUbolyO219tKQQI0+2/YI+Qv7YdGValA6WiuokrNKqjrTNsy5ABWU11bdKOzUdheteXg==" + "resolved": "4.6.1", + "contentHash": "sQxefTnhagrhoq2ReR0D/6K0zJcr9Hrd6kikeXsA1I8kOCboTavcUC4r7TSfpKFeE163uMuxZcyfO1mGO3EN8Q==" }, "System.Runtime.CompilerServices.Unsafe": { "type": "Transitive", - "resolved": "6.1.0", - "contentHash": "5o/HZxx6RVqYlhKSq8/zronDkALJZUT2Vz0hx43f0gwe8mwlM0y2nYlqdBwLMzr262Bwvpikeb/yEwkAa5PADg==" - }, - "System.Threading.Tasks.Extensions": { - "type": "Transitive", - "resolved": "4.5.4", - "contentHash": "zteT+G8xuGu6mS+mzDzYXbzS7rd3K6Fjb9RiZlYlJPam2/hU7JCBZBVEcywNuR+oZ1ncTvc/cq0faRr3P01OVg==", - "dependencies": { - "System.Runtime.CompilerServices.Unsafe": "4.5.3" - } + "resolved": "6.1.2", + "contentHash": "2hBr6zdbIBTDE3EhK7NSVNdX58uTK6iHW/P/Axmm9sl1xoGSLqDvMtpecn226TNwHByFokYwJmt/aQQNlO5CRw==" } }, "net6.0": { @@ -209,12 +254,6 @@ "Microsoft.SourceLink.Common": "8.0.0" } }, - "System.Buffers": { - "type": "Direct", - "requested": "[4.6.0, )", - "resolved": "4.6.0", - "contentHash": "lN6tZi7Q46zFzAbRYXTIvfXcyvQQgxnY7Xm6C6xQ9784dEL1amjM6S6Iw4ZpsvesAKnRVsM4scrDQaDqSClkjA==" - }, "Microsoft.Build.Tasks.Git": { "type": "Transitive", "resolved": "8.0.0", @@ -257,12 +296,6 @@ "Microsoft.SourceLink.Common": "8.0.0" } }, - "System.Buffers": { - "type": "Direct", - "requested": "[4.6.0, )", - "resolved": "4.6.0", - "contentHash": "lN6tZi7Q46zFzAbRYXTIvfXcyvQQgxnY7Xm6C6xQ9784dEL1amjM6S6Iw4ZpsvesAKnRVsM4scrDQaDqSClkjA==" - }, "Microsoft.Build.Tasks.Git": { "type": "Transitive", "resolved": "8.0.0", diff --git a/tests/SharpCompress.Test/packages.lock.json b/tests/SharpCompress.Test/packages.lock.json index ae6cfd236..c6e076f28 100644 --- a/tests/SharpCompress.Test/packages.lock.json +++ b/tests/SharpCompress.Test/packages.lock.json @@ -82,6 +82,11 @@ "resolved": "1.5.0", "contentHash": "EXKiDFsChZW0RjrZ4FYHu9aW6+P4MCgEDCklsVseRfhoO0F+dXeMSsMRAlVXIo06kGJ/zv+2w1a2uc2+kxxSaQ==" }, + "System.Numerics.Vectors": { + "type": "Transitive", + "resolved": "4.6.1", + "contentHash": "sQxefTnhagrhoq2ReR0D/6K0zJcr9Hrd6kikeXsA1I8kOCboTavcUC4r7TSfpKFeE163uMuxZcyfO1mGO3EN8Q==" + }, "System.Reflection.Metadata": { "type": "Transitive", "resolved": "1.6.0", @@ -92,8 +97,8 @@ }, "System.Runtime.CompilerServices.Unsafe": { "type": "Transitive", - "resolved": "4.5.3", - "contentHash": "3TIsJhD1EiiT0w2CcDMN/iSSwnNnsrnbzeVHSKkaEgV85txMprmuO+Yq2AdSbeVGcg28pdNDTPK87tJhX7VFHw==" + "resolved": "6.1.2", + "contentHash": "2hBr6zdbIBTDE3EhK7NSVNdX58uTK6iHW/P/Axmm9sl1xoGSLqDvMtpecn226TNwHByFokYwJmt/aQQNlO5CRw==" }, "System.Threading.Tasks.Extensions": { "type": "Transitive", @@ -151,14 +156,37 @@ "sharpcompress": { "type": "Project", "dependencies": { - "System.Buffers": "[4.6.0, )" + "System.Buffers": "[4.6.1, )", + "System.Memory": "[4.6.3, )", + "System.Text.Encoding.CodePages": "[8.0.0, )" } }, "System.Buffers": { "type": "CentralTransitive", - "requested": "[4.6.0, )", - "resolved": "4.6.0", - "contentHash": "lN6tZi7Q46zFzAbRYXTIvfXcyvQQgxnY7Xm6C6xQ9784dEL1amjM6S6Iw4ZpsvesAKnRVsM4scrDQaDqSClkjA==" + "requested": "[4.6.1, )", + "resolved": "4.6.1", + "contentHash": "N8GXpmiLMtljq7gwvyS+1QvKT/W2J8sNAvx+HVg4NGmsG/H+2k/y9QI23auLJRterrzCiDH+IWAw4V/GPwsMlw==" + }, + "System.Memory": { + "type": "CentralTransitive", + "requested": "[4.6.3, )", + "resolved": "4.6.3", + "contentHash": "qdcDOgnFZY40+Q9876JUHnlHu7bosOHX8XISRoH94fwk6hgaeQGSgfZd8srWRZNt5bV9ZW2TljcegDNxsf+96A==", + "dependencies": { + "System.Buffers": "4.6.1", + "System.Numerics.Vectors": "4.6.1", + "System.Runtime.CompilerServices.Unsafe": "6.1.2" + } + }, + "System.Text.Encoding.CodePages": { + "type": "CentralTransitive", + "requested": "[8.0.0, )", + "resolved": "8.0.0", + "contentHash": "OZIsVplFGaVY90G2SbpgU7EnCoOO5pw1t4ic21dBF3/1omrJFpAGoNAVpPyMVOC90/hvgkGG3VFqR13YgZMQfg==", + "dependencies": { + "System.Memory": "4.5.5", + "System.Runtime.CompilerServices.Unsafe": "6.0.0" + } } }, "net8.0": { @@ -303,16 +331,7 @@ } }, "sharpcompress": { - "type": "Project", - "dependencies": { - "System.Buffers": "[4.6.0, )" - } - }, - "System.Buffers": { - "type": "CentralTransitive", - "requested": "[4.6.0, )", - "resolved": "4.6.0", - "contentHash": "lN6tZi7Q46zFzAbRYXTIvfXcyvQQgxnY7Xm6C6xQ9784dEL1amjM6S6Iw4ZpsvesAKnRVsM4scrDQaDqSClkjA==" + "type": "Project" } } } From 0a7ffd003b160759d8f4f96fbf45763aaf2c06c2 Mon Sep 17 00:00:00 2001 From: Adam Hathcock Date: Mon, 13 Oct 2025 16:42:22 +0100 Subject: [PATCH 4/6] ran formatting --- .../Compressors/ZStandard/BitOperations.cs | 124 +- .../ZStandard/CompressionStream.cs | 167 +- .../Compressors/ZStandard/Compressor.cs | 95 +- .../Compressors/ZStandard/Constants.cs | 12 +- .../ZStandard/DecompressionStream.cs | 95 +- .../Compressors/ZStandard/Decompressor.cs | 90 +- .../Compressors/ZStandard/JobThreadPool.cs | 8 +- .../Compressors/ZStandard/Pool.cs | 11 +- .../Compressors/ZStandard/SafeHandles.cs | 26 +- .../Compressors/ZStandard/ThrowHelper.cs | 12 +- .../ZStandard/Unsafe/Allocations.cs | 17 +- .../ZStandard/Unsafe/BIT_CStream_t.cs | 2 +- .../ZStandard/Unsafe/BIT_DStream_status.cs | 7 +- .../ZStandard/Unsafe/BIT_DStream_t.cs | 2 +- .../Compressors/ZStandard/Unsafe/Bits.cs | 12 +- .../Compressors/ZStandard/Unsafe/Bitstream.cs | 297 +- .../ZStandard/Unsafe/BlockSummary.cs | 2 +- .../ZStandard/Unsafe/COVER_best_s.cs | 2 +- .../ZStandard/Unsafe/COVER_ctx_t.cs | 2 +- .../ZStandard/Unsafe/COVER_dictSelection.cs | 2 +- .../ZStandard/Unsafe/COVER_epoch_info_t.cs | 2 +- .../ZStandard/Unsafe/COVER_map_pair_t_s.cs | 2 +- .../ZStandard/Unsafe/COVER_map_s.cs | 2 +- .../ZStandard/Unsafe/COVER_segment_t.cs | 2 +- .../Unsafe/COVER_tryParameters_data_s.cs | 2 +- .../Compressors/ZStandard/Unsafe/Clevels.cs | 951 +++- .../Compressors/ZStandard/Unsafe/Compiler.cs | 2 +- .../Compressors/ZStandard/Unsafe/Cover.cs | 119 +- .../ZStandard/Unsafe/DTableDesc.cs | 2 +- .../ZStandard/Unsafe/EStats_ress_t.cs | 4 +- .../ZStandard/Unsafe/EntropyCommon.cs | 177 +- .../ZStandard/Unsafe/ErrorPrivate.cs | 2 +- .../ZStandard/Unsafe/EstimatedBlockSize.cs | 2 +- .../ZStandard/Unsafe/FASTCOVER_accel_t.cs | 4 +- .../ZStandard/Unsafe/FASTCOVER_ctx_t.cs | 2 +- .../Unsafe/FASTCOVER_tryParameters_data_s.cs | 2 +- .../Compressors/ZStandard/Unsafe/FPStats.cs | 2 +- .../ZStandard/Unsafe/FSE_CState_t.cs | 2 +- .../ZStandard/Unsafe/FSE_DState_t.cs | 3 +- .../ZStandard/Unsafe/FSE_DTableHeader.cs | 2 +- .../ZStandard/Unsafe/FSE_DecompressWksp.cs | 2 +- .../ZStandard/Unsafe/FSE_decode_t.cs | 2 +- .../ZStandard/Unsafe/FSE_repeat.cs | 6 +- .../Unsafe/FSE_symbolCompressionTransform.cs | 2 +- .../Compressors/ZStandard/Unsafe/Fastcover.cs | 251 +- .../ZStandard/Unsafe/Fingerprint.cs | 2 +- .../Compressors/ZStandard/Unsafe/Fse.cs | 83 +- .../ZStandard/Unsafe/FseCompress.cs | 246 +- .../ZStandard/Unsafe/FseDecompress.cs | 291 +- .../ZStandard/Unsafe/HIST_checkInput_e.cs | 4 +- .../ZStandard/Unsafe/HUF_CStream_t.cs | 3 +- .../ZStandard/Unsafe/HUF_CTableHeader.cs | 2 +- .../Unsafe/HUF_CompressWeightsWksp.cs | 2 +- .../ZStandard/Unsafe/HUF_DEltX1.cs | 2 +- .../ZStandard/Unsafe/HUF_DEltX2.cs | 2 +- .../Unsafe/HUF_DecompressFastArgs.cs | 3 +- .../Unsafe/HUF_ReadDTableX1_Workspace.cs | 2 +- .../Unsafe/HUF_ReadDTableX2_Workspace.cs | 3 +- .../ZStandard/Unsafe/HUF_WriteCTableWksp.cs | 3 +- .../Unsafe/HUF_buildCTable_wksp_tables.cs | 3 +- .../ZStandard/Unsafe/HUF_compress_tables_t.cs | 3 +- .../ZStandard/Unsafe/HUF_flags_e.cs | 9 +- .../ZStandard/Unsafe/HUF_nbStreams_e.cs | 4 +- .../ZStandard/Unsafe/HUF_repeat.cs | 6 +- .../Compressors/ZStandard/Unsafe/Hist.cs | 99 +- .../ZStandard/Unsafe/HufCompress.cs | 772 ++- .../ZStandard/Unsafe/HufDecompress.cs | 770 ++- .../Compressors/ZStandard/Unsafe/Mem.cs | 10 +- .../ZStandard/Unsafe/RSyncState_t.cs | 2 +- .../Compressors/ZStandard/Unsafe/Range.cs | 3 +- .../ZStandard/Unsafe/RawSeqStore_t.cs | 15 +- .../ZStandard/Unsafe/RoundBuff_t.cs | 5 +- .../ZStandard/Unsafe/SeqCollector.cs | 2 +- .../Compressors/ZStandard/Unsafe/SeqDef_s.cs | 3 +- .../ZStandard/Unsafe/SeqStore_t.cs | 6 +- .../ZStandard/Unsafe/SerialState.cs | 5 +- .../ZStandard/Unsafe/SymbolEncodingType_e.cs | 4 +- .../Compressors/ZStandard/Unsafe/SyncPoint.cs | 3 +- .../ZStandard/Unsafe/XXH32_canonical_t.cs | 2 +- .../ZStandard/Unsafe/XXH32_state_s.cs | 7 +- .../ZStandard/Unsafe/XXH64_canonical_t.cs | 2 +- .../ZStandard/Unsafe/XXH64_state_s.cs | 7 +- .../ZStandard/Unsafe/XXH_alignment.cs | 5 +- .../ZStandard/Unsafe/XXH_errorcode.cs | 5 +- .../Compressors/ZStandard/Unsafe/Xxhash.cs | 98 +- .../ZStandard/Unsafe/ZDICT_cover_params_t.cs | 8 +- .../Unsafe/ZDICT_fastCover_params_t.cs | 10 +- .../ZStandard/Unsafe/ZDICT_legacy_params_t.cs | 2 +- .../ZStandard/Unsafe/ZDICT_params_t.cs | 4 +- .../ZStandard/Unsafe/ZSTDMT_CCtxPool.cs | 2 +- .../ZStandard/Unsafe/ZSTDMT_CCtx_s.cs | 3 +- .../ZStandard/Unsafe/ZSTDMT_bufferPool_s.cs | 2 +- .../ZStandard/Unsafe/ZSTDMT_jobDescription.cs | 20 +- .../Unsafe/ZSTD_BlockCompressor_f.cs | 10 +- .../ZStandard/Unsafe/ZSTD_BuildCTableWksp.cs | 2 +- .../ZStandard/Unsafe/ZSTD_BuildSeqStore_e.cs | 4 +- .../ZStandard/Unsafe/ZSTD_CCtx_params_s.cs | 19 +- .../ZStandard/Unsafe/ZSTD_CCtx_s.cs | 20 +- .../ZStandard/Unsafe/ZSTD_CDict_s.cs | 6 +- .../ZStandard/Unsafe/ZSTD_CParamMode_e.cs | 7 +- .../ZStandard/Unsafe/ZSTD_DCtx_s.cs | 18 +- .../ZStandard/Unsafe/ZSTD_DDictHashSet.cs | 2 +- .../ZStandard/Unsafe/ZSTD_DDict_s.cs | 2 +- .../ZStandard/Unsafe/ZSTD_DefaultPolicy_e.cs | 4 +- .../ZStandard/Unsafe/ZSTD_EndDirective.cs | 6 +- .../ZStandard/Unsafe/ZSTD_ErrorCode.cs | 6 +- .../ZStandard/Unsafe/ZSTD_MatchState_t.cs | 15 +- .../ZStandard/Unsafe/ZSTD_OffsetInfo.cs | 2 +- .../ZStandard/Unsafe/ZSTD_OptPrice_e.cs | 4 +- .../ZStandard/Unsafe/ZSTD_ResetDirective.cs | 4 +- .../ZStandard/Unsafe/ZSTD_Sequence.cs | 5 +- .../ZStandard/Unsafe/ZSTD_SequenceLength.cs | 2 +- .../ZStandard/Unsafe/ZSTD_SequencePosition.cs | 4 +- .../ZStandard/Unsafe/ZSTD_blockSplitCtx.cs | 2 +- .../ZStandard/Unsafe/ZSTD_blockState_t.cs | 2 +- .../ZStandard/Unsafe/ZSTD_bounds.cs | 2 +- .../ZStandard/Unsafe/ZSTD_bufferMode_e.cs | 5 +- .../Unsafe/ZSTD_buffered_policy_e.cs | 4 +- .../ZStandard/Unsafe/ZSTD_cParameter.cs | 25 +- .../ZStandard/Unsafe/ZSTD_cStreamStage.cs | 4 +- .../Unsafe/ZSTD_compResetPolicy_e.cs | 4 +- .../Unsafe/ZSTD_compressedBlockState_t.cs | 2 +- .../Unsafe/ZSTD_compressionParameters.cs | 19 +- .../Unsafe/ZSTD_compressionStage_e.cs | 4 +- .../ZStandard/Unsafe/ZSTD_customMem.cs | 3 +- .../ZStandard/Unsafe/ZSTD_cwksp.cs | 2 +- .../Unsafe/ZSTD_cwksp_alloc_phase_e.cs | 4 +- .../Unsafe/ZSTD_cwksp_static_alloc_e.cs | 4 +- .../ZStandard/Unsafe/ZSTD_dParameter.cs | 5 +- .../ZStandard/Unsafe/ZSTD_dStage.cs | 4 +- .../ZStandard/Unsafe/ZSTD_dStreamStage.cs | 4 +- .../ZStandard/Unsafe/ZSTD_dictAttachPref_e.cs | 7 +- .../Unsafe/ZSTD_dictContentType_e.cs | 6 +- .../ZStandard/Unsafe/ZSTD_dictLoadMethod_e.cs | 5 +- .../ZStandard/Unsafe/ZSTD_dictMode_e.cs | 4 +- .../Unsafe/ZSTD_dictTableLoadMethod_e.cs | 4 +- .../ZStandard/Unsafe/ZSTD_dictUses_e.cs | 6 +- .../Unsafe/ZSTD_entropyCTablesMetadata_t.cs | 2 +- .../ZStandard/Unsafe/ZSTD_entropyCTables_t.cs | 2 +- .../ZStandard/Unsafe/ZSTD_entropyDTables_t.cs | 6 +- .../Unsafe/ZSTD_forceIgnoreChecksum_e.cs | 4 +- .../ZStandard/Unsafe/ZSTD_format_e.cs | 5 +- .../ZStandard/Unsafe/ZSTD_frameHeader.cs | 5 +- .../ZStandard/Unsafe/ZSTD_frameParameters.cs | 4 +- .../ZStandard/Unsafe/ZSTD_frameProgression.cs | 7 +- .../ZStandard/Unsafe/ZSTD_frameSizeInfo.cs | 2 +- .../ZStandard/Unsafe/ZSTD_frameType_e.cs | 4 +- .../Unsafe/ZSTD_fseCTablesMetadata_t.cs | 3 +- .../ZStandard/Unsafe/ZSTD_fseCTables_t.cs | 2 +- .../ZStandard/Unsafe/ZSTD_fseState.cs | 2 +- .../ZStandard/Unsafe/ZSTD_getAllMatchesFn.cs | 13 +- .../Unsafe/ZSTD_hufCTablesMetadata_t.cs | 2 +- .../ZStandard/Unsafe/ZSTD_hufCTables_t.cs | 3 +- .../ZStandard/Unsafe/ZSTD_inBuffer_s.cs | 4 +- .../Unsafe/ZSTD_indexResetPolicy_e.cs | 4 +- .../ZStandard/Unsafe/ZSTD_litLocation_e.cs | 6 +- .../Unsafe/ZSTD_literalCompressionMode_e.cs | 6 +- .../ZStandard/Unsafe/ZSTD_localDict.cs | 2 +- .../ZStandard/Unsafe/ZSTD_longLengthType_e.cs | 6 +- .../ZStandard/Unsafe/ZSTD_longOffset_e.cs | 4 +- .../ZStandard/Unsafe/ZSTD_match_t.cs | 3 +- .../ZStandard/Unsafe/ZSTD_nextInputType_e.cs | 4 +- .../ZStandard/Unsafe/ZSTD_optLdm_t.cs | 5 +- .../ZStandard/Unsafe/ZSTD_optimal_t.cs | 6 +- .../ZStandard/Unsafe/ZSTD_outBuffer_s.cs | 4 +- .../ZStandard/Unsafe/ZSTD_overlap_e.cs | 4 +- .../ZStandard/Unsafe/ZSTD_paramSwitch_e.cs | 6 +- .../ZStandard/Unsafe/ZSTD_parameters.cs | 2 +- .../ZStandard/Unsafe/ZSTD_prefixDict_s.cs | 2 +- .../Unsafe/ZSTD_refMultipleDDicts_e.cs | 4 +- .../ZStandard/Unsafe/ZSTD_resetTarget_e.cs | 4 +- .../ZStandard/Unsafe/ZSTD_seqSymbol.cs | 3 +- .../ZStandard/Unsafe/ZSTD_seqSymbol_header.cs | 2 +- .../ZStandard/Unsafe/ZSTD_sequenceFormat_e.cs | 5 +- .../ZStandard/Unsafe/ZSTD_strategy.cs | 4 +- .../Unsafe/ZSTD_symbolEncodingTypeStats_t.cs | 3 +- .../Unsafe/ZSTD_tableFillPurpose_e.cs | 4 +- .../ZStandard/Unsafe/ZSTD_window_t.cs | 7 +- .../Compressors/ZStandard/Unsafe/Zdict.cs | 226 +- .../Compressors/ZStandard/Unsafe/Zstd.cs | 8 +- .../ZStandard/Unsafe/ZstdCommon.cs | 2 +- .../ZStandard/Unsafe/ZstdCompress.cs | 5039 ++++++++++++++--- .../ZStandard/Unsafe/ZstdCompressInternal.cs | 838 ++- .../ZStandard/Unsafe/ZstdCompressLiterals.cs | 155 +- .../ZStandard/Unsafe/ZstdCompressSequences.cs | 1232 ++-- .../Unsafe/ZstdCompressSuperblock.cs | 532 +- .../Compressors/ZStandard/Unsafe/ZstdCwksp.cs | 111 +- .../Compressors/ZStandard/Unsafe/ZstdDdict.cs | 108 +- .../ZStandard/Unsafe/ZstdDecompress.cs | 1204 +++- .../ZStandard/Unsafe/ZstdDecompressBlock.cs | 1843 +++++- .../Unsafe/ZstdDecompressInternal.cs | 542 +- .../ZStandard/Unsafe/ZstdDoubleFast.cs | 506 +- .../Compressors/ZStandard/Unsafe/ZstdFast.cs | 453 +- .../ZStandard/Unsafe/ZstdInternal.cs | 752 ++- .../Compressors/ZStandard/Unsafe/ZstdLazy.cs | 3074 ++++++++-- .../Compressors/ZStandard/Unsafe/ZstdLdm.cs | 287 +- .../ZStandard/Unsafe/ZstdLdmGeartab.cs | 791 ++- .../Compressors/ZStandard/Unsafe/ZstdOpt.cs | 1332 ++++- .../ZStandard/Unsafe/ZstdPresplit.cs | 103 +- .../ZStandard/Unsafe/ZstdmtCompress.cs | 490 +- .../ZStandard/Unsafe/_wksps_e__Union.cs | 4 +- .../ZStandard/Unsafe/algo_time_t.cs | 3 +- .../ZStandard/Unsafe/base_directive_e.cs | 4 +- .../ZStandard/Unsafe/blockProperties_t.cs | 2 +- .../ZStandard/Unsafe/blockType_e.cs | 4 +- .../Compressors/ZStandard/Unsafe/buffer_s.cs | 3 +- .../Compressors/ZStandard/Unsafe/dictItem.cs | 2 +- .../Compressors/ZStandard/Unsafe/inBuff_t.cs | 2 +- .../ZStandard/Unsafe/ldmEntry_t.cs | 2 +- .../ZStandard/Unsafe/ldmMatchCandidate_t.cs | 2 +- .../ZStandard/Unsafe/ldmParams_t.cs | 7 +- .../ZStandard/Unsafe/ldmRollingHashState_t.cs | 2 +- .../ZStandard/Unsafe/ldmState_t.cs | 4 +- .../Compressors/ZStandard/Unsafe/nodeElt_s.cs | 2 +- .../ZStandard/Unsafe/offsetCount_t.cs | 2 +- .../ZStandard/Unsafe/optState_t.cs | 17 +- .../Compressors/ZStandard/Unsafe/rankPos.cs | 2 +- .../ZStandard/Unsafe/rankValCol_t.cs | 2 +- .../Compressors/ZStandard/Unsafe/rawSeq.cs | 4 +- .../ZStandard/Unsafe/repcodes_s.cs | 2 +- .../ZStandard/Unsafe/searchMethod_e.cs | 4 +- .../ZStandard/Unsafe/seqState_t.cs | 3 +- .../ZStandard/Unsafe/seqStoreSplits.cs | 3 +- .../Compressors/ZStandard/Unsafe/seq_t.cs | 2 +- .../ZStandard/Unsafe/sortedSymbol_t.cs | 2 +- .../ZStandard/Unsafe/streaming_operation.cs | 4 +- .../Compressors/ZStandard/UnsafeHelper.cs | 38 +- .../Compressors/ZStandard/ZstdException.cs | 6 +- 228 files changed, 20370 insertions(+), 4918 deletions(-) diff --git a/src/SharpCompress/Compressors/ZStandard/BitOperations.cs b/src/SharpCompress/Compressors/ZStandard/BitOperations.cs index d05634307..d35133b20 100644 --- a/src/SharpCompress/Compressors/ZStandard/BitOperations.cs +++ b/src/SharpCompress/Compressors/ZStandard/BitOperations.cs @@ -19,22 +19,82 @@ namespace System.Numerics public static unsafe class BitOperations { // hack: should be public because of inline - public static readonly byte* TrailingZeroCountDeBruijn = GetArrayPointer(new byte[] - { - 00, 01, 28, 02, 29, 14, 24, 03, - 30, 22, 20, 15, 25, 17, 04, 08, - 31, 27, 13, 23, 21, 19, 16, 07, - 26, 12, 18, 06, 11, 05, 10, 09 - }); + public static readonly byte* TrailingZeroCountDeBruijn = GetArrayPointer( + new byte[] + { + 00, + 01, + 28, + 02, + 29, + 14, + 24, + 03, + 30, + 22, + 20, + 15, + 25, + 17, + 04, + 08, + 31, + 27, + 13, + 23, + 21, + 19, + 16, + 07, + 26, + 12, + 18, + 06, + 11, + 05, + 10, + 09, + } + ); // hack: should be public because of inline - public static readonly byte* Log2DeBruijn = GetArrayPointer(new byte[] - { - 00, 09, 01, 10, 13, 21, 02, 29, - 11, 14, 16, 18, 22, 25, 03, 30, - 08, 12, 20, 28, 15, 17, 24, 07, - 19, 27, 23, 06, 26, 05, 04, 31 - }); + public static readonly byte* Log2DeBruijn = GetArrayPointer( + new byte[] + { + 00, + 09, + 01, + 10, + 13, + 21, + 02, + 29, + 11, + 14, + 16, + 18, + 22, + 25, + 03, + 30, + 08, + 12, + 20, + 28, + 15, + 17, + 24, + 07, + 19, + 27, + 23, + 06, + 26, + 05, + 04, + 31, + } + ); /// /// Returns the integer (floor) log of the specified value, base 2. @@ -69,7 +129,8 @@ public static int Log2(uint value) // uint.MaxValue >> 27 is always in range [0 - 31] so we use Unsafe.AddByteOffset to avoid bounds check return Log2DeBruijn[ // Using deBruijn sequence, k=2, n=5 (2^5=32) : 0b_0000_0111_1100_0100_1010_1100_1101_1101u - (int)((value * 0x07C4ACDDu) >> 27)]; + (int)((value * 0x07C4ACDDu) >> 27) + ]; } /// @@ -98,8 +159,7 @@ public static int Log2(ulong value) /// /// The value. [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static int TrailingZeroCount(int value) - => TrailingZeroCount((uint)value); + public static int TrailingZeroCount(int value) => TrailingZeroCount((uint)value); /// /// Count the number of trailing zero bits in an integer value. @@ -118,7 +178,8 @@ public static int TrailingZeroCount(uint value) // uint.MaxValue >> 27 is always in range [0 - 31] so we use Unsafe.AddByteOffset to avoid bounds check return TrailingZeroCountDeBruijn[ // Using deBruijn sequence, k=2, n=5 (2^5=32) : 0b_0000_0111_0111_1100_1011_0101_0011_0001u - (int)(((value & (uint)-(int)value) * 0x077CB531u) >> 27)]; // Multi-cast mitigates redundant conv.u8 + (int)(((value & (uint)-(int)value) * 0x077CB531u) >> 27) + ]; // Multi-cast mitigates redundant conv.u8 } /// @@ -127,8 +188,7 @@ public static int TrailingZeroCount(uint value) /// /// The value. [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static int TrailingZeroCount(long value) - => TrailingZeroCount((ulong)value); + public static int TrailingZeroCount(long value) => TrailingZeroCount((ulong)value); /// /// Count the number of trailing zero bits in a mask. @@ -157,8 +217,8 @@ public static int TrailingZeroCount(ulong value) /// Any value outside the range [0..31] is treated as congruent mod 32. /// The rotated value. [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static uint RotateLeft(uint value, int offset) - => (value << offset) | (value >> (32 - offset)); + public static uint RotateLeft(uint value, int offset) => + (value << offset) | (value >> (32 - offset)); /// /// Rotates the specified value left by the specified number of bits. @@ -169,8 +229,8 @@ public static uint RotateLeft(uint value, int offset) /// Any value outside the range [0..63] is treated as congruent mod 64. /// The rotated value. [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static ulong RotateLeft(ulong value, int offset) - => (value << offset) | (value >> (64 - offset)); + public static ulong RotateLeft(ulong value, int offset) => + (value << offset) | (value >> (64 - offset)); /// /// Rotates the specified value right by the specified number of bits. @@ -181,8 +241,8 @@ public static ulong RotateLeft(ulong value, int offset) /// Any value outside the range [0..31] is treated as congruent mod 32. /// The rotated value. [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static uint RotateRight(uint value, int offset) - => (value >> offset) | (value << (32 - offset)); + public static uint RotateRight(uint value, int offset) => + (value >> offset) | (value << (32 - offset)); /// /// Rotates the specified value right by the specified number of bits. @@ -193,8 +253,8 @@ public static uint RotateRight(uint value, int offset) /// Any value outside the range [0..63] is treated as congruent mod 64. /// The rotated value. [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static ulong RotateRight(ulong value, int offset) - => (value >> offset) | (value << (64 - offset)); + public static ulong RotateRight(ulong value, int offset) => + (value >> offset) | (value << (64 - offset)); /// /// Count the number of leading zero bits in a mask. @@ -221,9 +281,11 @@ public static int LeadingZeroCount(uint value) value |= value >> 16; // uint.MaxValue >> 27 is always in range [0 - 31] so we use Unsafe.AddByteOffset to avoid bounds check - return 31 ^ Log2DeBruijn[ - // uint|long -> IntPtr cast on 32-bit platforms does expensive overflow checks not needed here - (int)((value * 0x07C4ACDDu) >> 27)]; + return 31 + ^ Log2DeBruijn[ + // uint|long -> IntPtr cast on 32-bit platforms does expensive overflow checks not needed here + (int)((value * 0x07C4ACDDu) >> 27) + ]; } /// diff --git a/src/SharpCompress/Compressors/ZStandard/CompressionStream.cs b/src/SharpCompress/Compressors/ZStandard/CompressionStream.cs index b65cc7523..c8546afc0 100644 --- a/src/SharpCompress/Compressors/ZStandard/CompressionStream.cs +++ b/src/SharpCompress/Compressors/ZStandard/CompressionStream.cs @@ -16,14 +16,21 @@ public class CompressionStream : Stream private Compressor? compressor; private ZSTD_outBuffer_s output; - public CompressionStream(Stream stream, int level = Compressor.DefaultCompressionLevel, - int bufferSize = 0, bool leaveOpen = true) - : this(stream, new Compressor(level), bufferSize, false, leaveOpen) - { - } - - public CompressionStream(Stream stream, Compressor compressor, int bufferSize = 0, - bool preserveCompressor = true, bool leaveOpen = true) + public CompressionStream( + Stream stream, + int level = Compressor.DefaultCompressionLevel, + int bufferSize = 0, + bool leaveOpen = true + ) + : this(stream, new Compressor(level), bufferSize, false, leaveOpen) { } + + public CompressionStream( + Stream stream, + Compressor compressor, + int bufferSize = 0, + bool preserveCompressor = true, + bool leaveOpen = true + ) { if (stream == null) throw new ArgumentNullException(nameof(stream)); @@ -40,9 +47,11 @@ public CompressionStream(Stream stream, Compressor compressor, int bufferSize = this.leaveOpen = leaveOpen; var outputBufferSize = - bufferSize > 0 ? bufferSize : (int) Methods.ZSTD_CStreamOutSize().EnsureZstdSuccess(); + bufferSize > 0 + ? bufferSize + : (int)Methods.ZSTD_CStreamOutSize().EnsureZstdSuccess(); outputBuffer = ArrayPool.Shared.Rent(outputBufferSize); - output = new ZSTD_outBuffer_s {pos = 0, size = (nuint) outputBufferSize}; + output = new ZSTD_outBuffer_s { pos = 0, size = (nuint)outputBufferSize }; } public void SetParameter(ZSTD_cParameter parameter, int value) @@ -120,97 +129,147 @@ private void ReleaseUnmanagedResources() } } - public override void Flush() - => FlushInternal(ZSTD_EndDirective.ZSTD_e_flush); + public override void Flush() => FlushInternal(ZSTD_EndDirective.ZSTD_e_flush); - public override async Task FlushAsync(CancellationToken cancellationToken) - => await FlushInternalAsync(ZSTD_EndDirective.ZSTD_e_flush, cancellationToken).ConfigureAwait(false); + public override async Task FlushAsync(CancellationToken cancellationToken) => + await FlushInternalAsync(ZSTD_EndDirective.ZSTD_e_flush, cancellationToken) + .ConfigureAwait(false); private void FlushInternal(ZSTD_EndDirective directive) => WriteInternal(null, directive); - private async Task FlushInternalAsync(ZSTD_EndDirective directive, - CancellationToken cancellationToken = default) => - await WriteInternalAsync(null, directive, cancellationToken).ConfigureAwait(false); + private async Task FlushInternalAsync( + ZSTD_EndDirective directive, + CancellationToken cancellationToken = default + ) => await WriteInternalAsync(null, directive, cancellationToken).ConfigureAwait(false); - public override void Write(byte[] buffer, int offset, int count) - => Write(new ReadOnlySpan(buffer, offset, count)); + public override void Write(byte[] buffer, int offset, int count) => + Write(new ReadOnlySpan(buffer, offset, count)); #if !NETSTANDARD2_0 && !NETFRAMEWORK - public override void Write(ReadOnlySpan buffer) - => WriteInternal(buffer, ZSTD_EndDirective.ZSTD_e_continue); + public override void Write(ReadOnlySpan buffer) => + WriteInternal(buffer, ZSTD_EndDirective.ZSTD_e_continue); #else - public void Write(ReadOnlySpan buffer) - => WriteInternal(buffer, ZSTD_EndDirective.ZSTD_e_continue); + public void Write(ReadOnlySpan buffer) => + WriteInternal(buffer, ZSTD_EndDirective.ZSTD_e_continue); #endif private void WriteInternal(ReadOnlySpan buffer, ZSTD_EndDirective directive) { EnsureNotDisposed(); - var input = new ZSTD_inBuffer_s {pos = 0, size = buffer != null ? (nuint) buffer.Length : 0}; + var input = new ZSTD_inBuffer_s + { + pos = 0, + size = buffer != null ? (nuint)buffer.Length : 0, + }; nuint remaining; do { output.pos = 0; remaining = CompressStream(ref input, buffer, directive); - var written = (int) output.pos; + var written = (int)output.pos; if (written > 0) innerStream.Write(outputBuffer, 0, written); - } while (directive == ZSTD_EndDirective.ZSTD_e_continue ? input.pos < input.size : remaining > 0); + } while ( + directive == ZSTD_EndDirective.ZSTD_e_continue + ? input.pos < input.size + : remaining > 0 + ); } #if !NETSTANDARD2_0 && !NETFRAMEWORK - private async ValueTask WriteInternalAsync(ReadOnlyMemory? buffer, ZSTD_EndDirective directive, - CancellationToken cancellationToken = default) + private async ValueTask WriteInternalAsync( + ReadOnlyMemory? buffer, + ZSTD_EndDirective directive, + CancellationToken cancellationToken = default + ) #else - private async Task WriteInternalAsync(ReadOnlyMemory? buffer, ZSTD_EndDirective directive, - CancellationToken cancellationToken = default) + private async Task WriteInternalAsync( + ReadOnlyMemory? buffer, + ZSTD_EndDirective directive, + CancellationToken cancellationToken = default + ) #endif { EnsureNotDisposed(); - var input = new ZSTD_inBuffer_s { pos = 0, size = buffer.HasValue ? (nuint)buffer.Value.Length : 0 }; + var input = new ZSTD_inBuffer_s + { + pos = 0, + size = buffer.HasValue ? (nuint)buffer.Value.Length : 0, + }; nuint remaining; do { output.pos = 0; - remaining = CompressStream(ref input, buffer.HasValue ? buffer.Value.Span : null, directive); + remaining = CompressStream( + ref input, + buffer.HasValue ? buffer.Value.Span : null, + directive + ); - var written = (int) output.pos; + var written = (int)output.pos; if (written > 0) - await innerStream.WriteAsync(outputBuffer, 0, written, cancellationToken).ConfigureAwait(false); - } while (directive == ZSTD_EndDirective.ZSTD_e_continue ? input.pos < input.size : remaining > 0); + await innerStream + .WriteAsync(outputBuffer, 0, written, cancellationToken) + .ConfigureAwait(false); + } while ( + directive == ZSTD_EndDirective.ZSTD_e_continue + ? input.pos < input.size + : remaining > 0 + ); } #if !NETSTANDARD2_0 && !NETFRAMEWORK - public override Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) - => WriteAsync(new ReadOnlyMemory(buffer, offset, count), cancellationToken).AsTask(); - - public override async ValueTask WriteAsync(ReadOnlyMemory buffer, - CancellationToken cancellationToken = default) - => await WriteInternalAsync(buffer, ZSTD_EndDirective.ZSTD_e_continue, cancellationToken).ConfigureAwait(false); + public override Task WriteAsync( + byte[] buffer, + int offset, + int count, + CancellationToken cancellationToken + ) => + WriteAsync(new ReadOnlyMemory(buffer, offset, count), cancellationToken).AsTask(); + + public override async ValueTask WriteAsync( + ReadOnlyMemory buffer, + CancellationToken cancellationToken = default + ) => + await WriteInternalAsync(buffer, ZSTD_EndDirective.ZSTD_e_continue, cancellationToken) + .ConfigureAwait(false); #else - public override Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) - => WriteAsync(new ReadOnlyMemory(buffer, offset, count), cancellationToken); - - public async Task WriteAsync(ReadOnlyMemory buffer, - CancellationToken cancellationToken = default) - => await WriteInternalAsync(buffer, ZSTD_EndDirective.ZSTD_e_continue, cancellationToken).ConfigureAwait(false); + public override Task WriteAsync( + byte[] buffer, + int offset, + int count, + CancellationToken cancellationToken + ) => WriteAsync(new ReadOnlyMemory(buffer, offset, count), cancellationToken); + + public async Task WriteAsync( + ReadOnlyMemory buffer, + CancellationToken cancellationToken = default + ) => + await WriteInternalAsync(buffer, ZSTD_EndDirective.ZSTD_e_continue, cancellationToken) + .ConfigureAwait(false); #endif - internal unsafe nuint CompressStream(ref ZSTD_inBuffer_s input, ReadOnlySpan inputBuffer, - ZSTD_EndDirective directive) + internal unsafe nuint CompressStream( + ref ZSTD_inBuffer_s input, + ReadOnlySpan inputBuffer, + ZSTD_EndDirective directive + ) { fixed (byte* inputBufferPtr = inputBuffer) fixed (byte* outputBufferPtr = outputBuffer) { input.src = inputBufferPtr; output.dst = outputBufferPtr; - return compressor.NotNull().CompressStream(ref input, ref output, directive).EnsureZstdSuccess(); + return compressor + .NotNull() + .CompressStream(ref input, ref output, directive) + .EnsureZstdSuccess(); } } @@ -226,9 +285,13 @@ public override long Position set => throw new NotSupportedException(); } - public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException(); + public override long Seek(long offset, SeekOrigin origin) => + throw new NotSupportedException(); + public override void SetLength(long value) => throw new NotSupportedException(); - public override int Read(byte[] buffer, int offset, int count) => throw new NotSupportedException(); + + public override int Read(byte[] buffer, int offset, int count) => + throw new NotSupportedException(); private void EnsureNotDisposed() { diff --git a/src/SharpCompress/Compressors/ZStandard/Compressor.cs b/src/SharpCompress/Compressors/ZStandard/Compressor.cs index 71cd6c39e..470cd5636 100644 --- a/src/SharpCompress/Compressors/ZStandard/Compressor.cs +++ b/src/SharpCompress/Compressors/ZStandard/Compressor.cs @@ -62,7 +62,9 @@ public void LoadDictionary(ReadOnlySpan dict) { using var cctx = handle.Acquire(); fixed (byte* dictPtr = dict) - Methods.ZSTD_CCtx_loadDictionary(cctx, dictPtr, (nuint)dict.Length).EnsureZstdSuccess(); + Methods + .ZSTD_CCtx_loadDictionary(cctx, dictPtr, (nuint)dict.Length) + .EnsureZstdSuccess(); } public Compressor(int level = DefaultCompressionLevel) @@ -71,11 +73,11 @@ public Compressor(int level = DefaultCompressionLevel) Level = level; } - public static int GetCompressBound(int length) - => (int)Methods.ZSTD_compressBound((nuint)length); + public static int GetCompressBound(int length) => + (int)Methods.ZSTD_compressBound((nuint)length); - public static ulong GetCompressBoundLong(ulong length) - => Methods.ZSTD_compressBound((nuint)length); + public static ulong GetCompressBoundLong(ulong length) => + Methods.ZSTD_compressBound((nuint)length); public Span Wrap(ReadOnlySpan src) { @@ -84,8 +86,8 @@ public Span Wrap(ReadOnlySpan src) return new Span(dest, 0, length); } - public int Wrap(byte[] src, byte[] dest, int offset) - => Wrap(src, new Span(dest, offset, dest.Length - offset)); + public int Wrap(byte[] src, byte[] dest, int offset) => + Wrap(src, new Span(dest, offset, dest.Length - offset)); public int Wrap(ReadOnlySpan src, Span dest) { @@ -93,19 +95,37 @@ public int Wrap(ReadOnlySpan src, Span dest) fixed (byte* destPtr = dest) { using var cctx = handle.Acquire(); - return (int)Methods.ZSTD_compress2(cctx, destPtr, (nuint)dest.Length, srcPtr, (nuint)src.Length) - .EnsureZstdSuccess(); + return (int) + Methods + .ZSTD_compress2( + cctx, + destPtr, + (nuint)dest.Length, + srcPtr, + (nuint)src.Length + ) + .EnsureZstdSuccess(); } } - public int Wrap(ArraySegment src, ArraySegment dest) - => Wrap((ReadOnlySpan)src, dest); - - public int Wrap(byte[] src, int srcOffset, int srcLength, byte[] dst, int dstOffset, int dstLength) - => Wrap(new ReadOnlySpan(src, srcOffset, srcLength), new Span(dst, dstOffset, dstLength)); - - public bool TryWrap(byte[] src, byte[] dest, int offset, out int written) - => TryWrap(src, new Span(dest, offset, dest.Length - offset), out written); + public int Wrap(ArraySegment src, ArraySegment dest) => + Wrap((ReadOnlySpan)src, dest); + + public int Wrap( + byte[] src, + int srcOffset, + int srcLength, + byte[] dst, + int dstOffset, + int dstLength + ) => + Wrap( + new ReadOnlySpan(src, srcOffset, srcLength), + new Span(dst, dstOffset, dstLength) + ); + + public bool TryWrap(byte[] src, byte[] dest, int offset, out int written) => + TryWrap(src, new Span(dest, offset, dest.Length - offset), out written); public bool TryWrap(ReadOnlySpan src, Span dest, out int written) { @@ -115,8 +135,13 @@ public bool TryWrap(ReadOnlySpan src, Span dest, out int written) nuint returnValue; using (var cctx = handle.Acquire()) { - returnValue = - Methods.ZSTD_compress2(cctx, destPtr, (nuint)dest.Length, srcPtr, (nuint)src.Length); + returnValue = Methods.ZSTD_compress2( + cctx, + destPtr, + (nuint)dest.Length, + srcPtr, + (nuint)src.Length + ); } if (returnValue == unchecked(0 - (nuint)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)) @@ -131,11 +156,23 @@ public bool TryWrap(ReadOnlySpan src, Span dest, out int written) } } - public bool TryWrap(ArraySegment src, ArraySegment dest, out int written) - => TryWrap((ReadOnlySpan)src, dest, out written); - - public bool TryWrap(byte[] src, int srcOffset, int srcLength, byte[] dst, int dstOffset, int dstLength, out int written) - => TryWrap(new ReadOnlySpan(src, srcOffset, srcLength), new Span(dst, dstOffset, dstLength), out written); + public bool TryWrap(ArraySegment src, ArraySegment dest, out int written) => + TryWrap((ReadOnlySpan)src, dest, out written); + + public bool TryWrap( + byte[] src, + int srcOffset, + int srcLength, + byte[] dst, + int dstOffset, + int dstLength, + out int written + ) => + TryWrap( + new ReadOnlySpan(src, srcOffset, srcLength), + new Span(dst, dstOffset, dstLength), + out written + ); public void Dispose() { @@ -143,13 +180,19 @@ public void Dispose() GC.SuppressFinalize(this); } - internal nuint CompressStream(ref ZSTD_inBuffer_s input, ref ZSTD_outBuffer_s output, ZSTD_EndDirective directive) + internal nuint CompressStream( + ref ZSTD_inBuffer_s input, + ref ZSTD_outBuffer_s output, + ZSTD_EndDirective directive + ) { fixed (ZSTD_inBuffer_s* inputPtr = &input) fixed (ZSTD_outBuffer_s* outputPtr = &output) { using var cctx = handle.Acquire(); - return Methods.ZSTD_compressStream2(cctx, outputPtr, inputPtr, directive).EnsureZstdSuccess(); + return Methods + .ZSTD_compressStream2(cctx, outputPtr, inputPtr, directive) + .EnsureZstdSuccess(); } } diff --git a/src/SharpCompress/Compressors/ZStandard/Constants.cs b/src/SharpCompress/Compressors/ZStandard/Constants.cs index c7af12313..844946f7e 100644 --- a/src/SharpCompress/Compressors/ZStandard/Constants.cs +++ b/src/SharpCompress/Compressors/ZStandard/Constants.cs @@ -1,9 +1,9 @@ namespace ZstdSharp { - internal class Constants - { - //NOTE: https://docs.microsoft.com/en-us/dotnet/framework/configure-apps/file-schema/runtime/gcallowverylargeobjects-element#remarks - //NOTE: https://github.com/dotnet/runtime/blob/v5.0.0-rtm.20519.4/src/libraries/System.Private.CoreLib/src/System/Array.cs#L27 - public const ulong MaxByteArrayLength = 0x7FFFFFC7; - } + internal class Constants + { + //NOTE: https://docs.microsoft.com/en-us/dotnet/framework/configure-apps/file-schema/runtime/gcallowverylargeobjects-element#remarks + //NOTE: https://github.com/dotnet/runtime/blob/v5.0.0-rtm.20519.4/src/libraries/System.Private.CoreLib/src/System/Array.cs#L27 + public const ulong MaxByteArrayLength = 0x7FFFFFC7; + } } diff --git a/src/SharpCompress/Compressors/ZStandard/DecompressionStream.cs b/src/SharpCompress/Compressors/ZStandard/DecompressionStream.cs index 3f15227f3..69c449359 100644 --- a/src/SharpCompress/Compressors/ZStandard/DecompressionStream.cs +++ b/src/SharpCompress/Compressors/ZStandard/DecompressionStream.cs @@ -20,13 +20,22 @@ public class DecompressionStream : Stream private nuint lastDecompressResult = 0; private bool contextDrained = true; - public DecompressionStream(Stream stream, int bufferSize = 0, bool checkEndOfStream = true, bool leaveOpen = true) - : this(stream, new Decompressor(), bufferSize, checkEndOfStream, false, leaveOpen) - { - } - - public DecompressionStream(Stream stream, Decompressor decompressor, int bufferSize = 0, - bool checkEndOfStream = true, bool preserveDecompressor = true, bool leaveOpen = true) + public DecompressionStream( + Stream stream, + int bufferSize = 0, + bool checkEndOfStream = true, + bool leaveOpen = true + ) + : this(stream, new Decompressor(), bufferSize, checkEndOfStream, false, leaveOpen) { } + + public DecompressionStream( + Stream stream, + Decompressor decompressor, + int bufferSize = 0, + bool checkEndOfStream = true, + bool preserveDecompressor = true, + bool leaveOpen = true + ) { if (stream == null) throw new ArgumentNullException(nameof(stream)); @@ -43,9 +52,14 @@ public DecompressionStream(Stream stream, Decompressor decompressor, int bufferS this.leaveOpen = leaveOpen; this.checkEndOfStream = checkEndOfStream; - inputBufferSize = bufferSize > 0 ? bufferSize : (int) Methods.ZSTD_DStreamInSize().EnsureZstdSuccess(); + inputBufferSize = + bufferSize > 0 ? bufferSize : (int)Methods.ZSTD_DStreamInSize().EnsureZstdSuccess(); inputBuffer = ArrayPool.Shared.Rent(inputBufferSize); - input = new ZSTD_inBuffer_s {pos = (nuint) inputBufferSize, size = (nuint) inputBufferSize}; + input = new ZSTD_inBuffer_s + { + pos = (nuint)inputBufferSize, + size = (nuint)inputBufferSize, + }; } public void SetParameter(ZSTD_dParameter parameter, int value) @@ -90,8 +104,8 @@ protected override void Dispose(bool disposing) } } - public override int Read(byte[] buffer, int offset, int count) - => Read(new Span(buffer, offset, count)); + public override int Read(byte[] buffer, int offset, int count) => + Read(new Span(buffer, offset, count)); #if !NETSTANDARD2_0 && !NETFRAMEWORK public override int Read(Span buffer) @@ -107,7 +121,7 @@ public int Read(Span buffer) return 0; } - var output = new ZSTD_outBuffer_s {pos = 0, size = (nuint) buffer.Length}; + var output = new ZSTD_outBuffer_s { pos = 0, size = (nuint)buffer.Length }; while (true) { // If there is still input available, or there might be data buffered in the decompressor context, flush that out @@ -125,7 +139,7 @@ public int Read(Span buffer) // If we have data to return, return it immediately, so we won't stall on Read if (output.pos > 0) { - return (int) output.pos; + return (int)output.pos; } } @@ -141,24 +155,36 @@ public int Read(Span buffer) return 0; } - input.size = (nuint) bytesRead; + input.size = (nuint)bytesRead; input.pos = 0; } } - #if !NETSTANDARD2_0 && !NETFRAMEWORK - public override Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) - => ReadAsync(new Memory(buffer, offset, count), cancellationToken).AsTask(); - - public override async ValueTask ReadAsync(Memory buffer, - CancellationToken cancellationToken = default) + public override Task ReadAsync( + byte[] buffer, + int offset, + int count, + CancellationToken cancellationToken + ) => ReadAsync(new Memory(buffer, offset, count), cancellationToken).AsTask(); + + public override async ValueTask ReadAsync( + Memory buffer, + CancellationToken cancellationToken = default + ) #else - public override Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) - => ReadAsync(new Memory(buffer, offset, count), cancellationToken); - public async Task ReadAsync(Memory buffer, - CancellationToken cancellationToken = default) + public override Task ReadAsync( + byte[] buffer, + int offset, + int count, + CancellationToken cancellationToken + ) => ReadAsync(new Memory(buffer, offset, count), cancellationToken); + + public async Task ReadAsync( + Memory buffer, + CancellationToken cancellationToken = default + ) #endif { EnsureNotDisposed(); @@ -169,7 +195,7 @@ public async Task ReadAsync(Memory buffer, return 0; } - var output = new ZSTD_outBuffer_s { pos = 0, size = (nuint)buffer.Length}; + var output = new ZSTD_outBuffer_s { pos = 0, size = (nuint)buffer.Length }; while (true) { // If there is still input available, or there might be data buffered in the decompressor context, flush that out @@ -193,8 +219,13 @@ public async Task ReadAsync(Memory buffer, // Otherwise, read some more input int bytesRead; - if ((bytesRead = await innerStream.ReadAsync(inputBuffer, 0, inputBufferSize, cancellationToken) - .ConfigureAwait(false)) == 0) + if ( + ( + bytesRead = await innerStream + .ReadAsync(inputBuffer, 0, inputBufferSize, cancellationToken) + .ConfigureAwait(false) + ) == 0 + ) { if (checkEndOfStream && lastDecompressResult != 0) { @@ -204,7 +235,7 @@ public async Task ReadAsync(Memory buffer, return 0; } - input.size = (nuint) bytesRead; + input.size = (nuint)bytesRead; input.pos = 0; } } @@ -234,9 +265,13 @@ public override long Position public override void Flush() => throw new NotSupportedException(); - public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException(); + public override long Seek(long offset, SeekOrigin origin) => + throw new NotSupportedException(); + public override void SetLength(long value) => throw new NotSupportedException(); - public override void Write(byte[] buffer, int offset, int count) => throw new NotSupportedException(); + + public override void Write(byte[] buffer, int offset, int count) => + throw new NotSupportedException(); private void EnsureNotDisposed() { diff --git a/src/SharpCompress/Compressors/ZStandard/Decompressor.cs b/src/SharpCompress/Compressors/ZStandard/Decompressor.cs index 85b99e778..2e388bb27 100644 --- a/src/SharpCompress/Compressors/ZStandard/Decompressor.cs +++ b/src/SharpCompress/Compressors/ZStandard/Decompressor.cs @@ -36,38 +36,46 @@ public void LoadDictionary(ReadOnlySpan dict) { using var dctx = handle.Acquire(); fixed (byte* dictPtr = dict) - Methods.ZSTD_DCtx_loadDictionary(dctx, dictPtr, (nuint)dict.Length).EnsureZstdSuccess(); + Methods + .ZSTD_DCtx_loadDictionary(dctx, dictPtr, (nuint)dict.Length) + .EnsureZstdSuccess(); } public static ulong GetDecompressedSize(ReadOnlySpan src) { fixed (byte* srcPtr = src) - return Methods.ZSTD_decompressBound(srcPtr, (nuint)src.Length).EnsureContentSizeOk(); + return Methods + .ZSTD_decompressBound(srcPtr, (nuint)src.Length) + .EnsureContentSizeOk(); } - public static ulong GetDecompressedSize(ArraySegment src) - => GetDecompressedSize((ReadOnlySpan)src); + public static ulong GetDecompressedSize(ArraySegment src) => + GetDecompressedSize((ReadOnlySpan)src); - public static ulong GetDecompressedSize(byte[] src, int srcOffset, int srcLength) - => GetDecompressedSize(new ReadOnlySpan(src, srcOffset, srcLength)); + public static ulong GetDecompressedSize(byte[] src, int srcOffset, int srcLength) => + GetDecompressedSize(new ReadOnlySpan(src, srcOffset, srcLength)); public Span Unwrap(ReadOnlySpan src, int maxDecompressedSize = int.MaxValue) { var expectedDstSize = GetDecompressedSize(src); if (expectedDstSize > (ulong)maxDecompressedSize) - throw new ZstdException(ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall, - $"Decompressed content size {expectedDstSize} is greater than {nameof(maxDecompressedSize)} {maxDecompressedSize}"); + throw new ZstdException( + ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall, + $"Decompressed content size {expectedDstSize} is greater than {nameof(maxDecompressedSize)} {maxDecompressedSize}" + ); if (expectedDstSize > Constants.MaxByteArrayLength) - throw new ZstdException(ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall, - $"Decompressed content size {expectedDstSize} is greater than max possible byte array size {Constants.MaxByteArrayLength}"); + throw new ZstdException( + ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall, + $"Decompressed content size {expectedDstSize} is greater than max possible byte array size {Constants.MaxByteArrayLength}" + ); var dest = new byte[expectedDstSize]; var length = Unwrap(src, dest); return new Span(dest, 0, length); } - public int Unwrap(byte[] src, byte[] dest, int offset) - => Unwrap(src, new Span(dest, offset, dest.Length - offset)); + public int Unwrap(byte[] src, byte[] dest, int offset) => + Unwrap(src, new Span(dest, offset, dest.Length - offset)); public int Unwrap(ReadOnlySpan src, Span dest) { @@ -75,17 +83,34 @@ public int Unwrap(ReadOnlySpan src, Span dest) fixed (byte* destPtr = dest) { using var dctx = handle.Acquire(); - return (int)Methods - .ZSTD_decompressDCtx(dctx, destPtr, (nuint)dest.Length, srcPtr, (nuint)src.Length) - .EnsureZstdSuccess(); + return (int) + Methods + .ZSTD_decompressDCtx( + dctx, + destPtr, + (nuint)dest.Length, + srcPtr, + (nuint)src.Length + ) + .EnsureZstdSuccess(); } } - public int Unwrap(byte[] src, int srcOffset, int srcLength, byte[] dst, int dstOffset, int dstLength) - => Unwrap(new ReadOnlySpan(src, srcOffset, srcLength), new Span(dst, dstOffset, dstLength)); - - public bool TryUnwrap(byte[] src, byte[] dest, int offset, out int written) - => TryUnwrap(src, new Span(dest, offset, dest.Length - offset), out written); + public int Unwrap( + byte[] src, + int srcOffset, + int srcLength, + byte[] dst, + int dstOffset, + int dstLength + ) => + Unwrap( + new ReadOnlySpan(src, srcOffset, srcLength), + new Span(dst, dstOffset, dstLength) + ); + + public bool TryUnwrap(byte[] src, byte[] dest, int offset, out int written) => + TryUnwrap(src, new Span(dest, offset, dest.Length - offset), out written); public bool TryUnwrap(ReadOnlySpan src, Span dest, out int written) { @@ -95,8 +120,13 @@ public bool TryUnwrap(ReadOnlySpan src, Span dest, out int written) nuint returnValue; using (var dctx = handle.Acquire()) { - returnValue = - Methods.ZSTD_decompressDCtx(dctx, destPtr, (nuint)dest.Length, srcPtr, (nuint)src.Length); + returnValue = Methods.ZSTD_decompressDCtx( + dctx, + destPtr, + (nuint)dest.Length, + srcPtr, + (nuint)src.Length + ); } if (returnValue == unchecked(0 - (nuint)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)) @@ -111,8 +141,20 @@ public bool TryUnwrap(ReadOnlySpan src, Span dest, out int written) } } - public bool TryUnwrap(byte[] src, int srcOffset, int srcLength, byte[] dst, int dstOffset, int dstLength, out int written) - => TryUnwrap(new ReadOnlySpan(src, srcOffset, srcLength), new Span(dst, dstOffset, dstLength), out written); + public bool TryUnwrap( + byte[] src, + int srcOffset, + int srcLength, + byte[] dst, + int dstOffset, + int dstLength, + out int written + ) => + TryUnwrap( + new ReadOnlySpan(src, srcOffset, srcLength), + new Span(dst, dstOffset, dstLength), + out written + ); public void Dispose() { diff --git a/src/SharpCompress/Compressors/ZStandard/JobThreadPool.cs b/src/SharpCompress/Compressors/ZStandard/JobThreadPool.cs index a51c36798..c9bd20f79 100644 --- a/src/SharpCompress/Compressors/ZStandard/JobThreadPool.cs +++ b/src/SharpCompress/Compressors/ZStandard/JobThreadPool.cs @@ -57,12 +57,8 @@ private void Worker(object? obj) if (queue.TryTake(out var job, -1, cancellationToken)) ((delegate* managed)job.function)(job.opaque); } - catch (InvalidOperationException) - { - } - catch (OperationCanceledException) - { - } + catch (InvalidOperationException) { } + catch (OperationCanceledException) { } } } diff --git a/src/SharpCompress/Compressors/ZStandard/Pool.cs b/src/SharpCompress/Compressors/ZStandard/Pool.cs index 41c0ab579..0d681cbc2 100644 --- a/src/SharpCompress/Compressors/ZStandard/Pool.cs +++ b/src/SharpCompress/Compressors/ZStandard/Pool.cs @@ -4,7 +4,8 @@ namespace ZstdSharp.Unsafe { public static unsafe partial class Methods { - private static JobThreadPool GetThreadPool(void* ctx) => UnmanagedObject.Unwrap(ctx); + private static JobThreadPool GetThreadPool(void* ctx) => + UnmanagedObject.Unwrap(ctx); /* ZSTD_createThreadPool() : public access point */ public static void* ZSTD_createThreadPool(nuint numThreads) @@ -23,7 +24,11 @@ public static unsafe partial class Methods return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem); } - private static void* POOL_create_advanced(nuint numThreads, nuint queueSize, ZSTD_customMem customMem) + private static void* POOL_create_advanced( + nuint numThreads, + nuint queueSize, + ZSTD_customMem customMem + ) { var jobThreadPool = new JobThreadPool((int)numThreads, (int)queueSize); return UnmanagedObject.Wrap(jobThreadPool); @@ -114,4 +119,4 @@ private static int POOL_tryAdd(void* ctx, void* function, void* opaque) return jobThreadPool.TryAdd(function, opaque) ? 1 : 0; } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/SafeHandles.cs b/src/SharpCompress/Compressors/ZStandard/SafeHandles.cs index b470a4347..7d46add31 100644 --- a/src/SharpCompress/Compressors/ZStandard/SafeHandles.cs +++ b/src/SharpCompress/Compressors/ZStandard/SafeHandles.cs @@ -20,9 +20,8 @@ internal abstract unsafe class SafeZstdHandle : SafeHandle /// Parameterless constructor is hidden. Use the static Create factory /// method to create a new safe handle instance. /// - protected SafeZstdHandle() : base(IntPtr.Zero, true) - { - } + protected SafeZstdHandle() + : base(IntPtr.Zero, true) { } public sealed override bool IsInvalid => handle == IntPtr.Zero; } @@ -33,9 +32,7 @@ protected SafeZstdHandle() : base(IntPtr.Zero, true) internal sealed unsafe class SafeCctxHandle : SafeZstdHandle { /// - private SafeCctxHandle() - { - } + private SafeCctxHandle() { } /// /// Creates a new instance of . @@ -50,7 +47,10 @@ public static SafeCctxHandle Create() { var cctx = Methods.ZSTD_createCCtx(); if (cctx == null) - throw new ZstdException(ZSTD_ErrorCode.ZSTD_error_GENERIC, "Failed to create cctx"); + throw new ZstdException( + ZSTD_ErrorCode.ZSTD_error_GENERIC, + "Failed to create cctx" + ); safeHandle.SetHandle((IntPtr)cctx); success = true; } @@ -85,9 +85,7 @@ protected override bool ReleaseHandle() internal sealed unsafe class SafeDctxHandle : SafeZstdHandle { /// - private SafeDctxHandle() - { - } + private SafeDctxHandle() { } /// /// Creates a new instance of . @@ -102,7 +100,10 @@ public static SafeDctxHandle Create() { var dctx = Methods.ZSTD_createDCtx(); if (dctx == null) - throw new ZstdException(ZSTD_ErrorCode.ZSTD_error_GENERIC, "Failed to create dctx"); + throw new ZstdException( + ZSTD_ErrorCode.ZSTD_error_GENERIC, + "Failed to create dctx" + ); safeHandle.SetHandle((IntPtr)dctx); success = true; } @@ -140,7 +141,8 @@ protected override bool ReleaseHandle() /// Safe handle holders can be d to decrement the safe handle's /// reference count, and can be implicitly converted to pointers to . /// - internal unsafe ref struct SafeHandleHolder where T : unmanaged + internal unsafe ref struct SafeHandleHolder + where T : unmanaged { private readonly SafeHandle _handle; diff --git a/src/SharpCompress/Compressors/ZStandard/ThrowHelper.cs b/src/SharpCompress/Compressors/ZStandard/ThrowHelper.cs index 380e6acaf..bf0ac0cbd 100644 --- a/src/SharpCompress/Compressors/ZStandard/ThrowHelper.cs +++ b/src/SharpCompress/Compressors/ZStandard/ThrowHelper.cs @@ -26,10 +26,16 @@ public static nuint EnsureZdictSuccess(this nuint returnValue) public static ulong EnsureContentSizeOk(this ulong returnValue) { if (returnValue == ZSTD_CONTENTSIZE_UNKNOWN) - throw new ZstdException(ZSTD_ErrorCode.ZSTD_error_GENERIC, "Decompressed content size is not specified"); + throw new ZstdException( + ZSTD_ErrorCode.ZSTD_error_GENERIC, + "Decompressed content size is not specified" + ); if (returnValue == ZSTD_CONTENTSIZE_ERROR) - throw new ZstdException(ZSTD_ErrorCode.ZSTD_error_GENERIC, "Decompressed content size cannot be determined (e.g. invalid magic number, srcSize too small)"); + throw new ZstdException( + ZSTD_ErrorCode.ZSTD_error_GENERIC, + "Decompressed content size cannot be determined (e.g. invalid magic number, srcSize too small)" + ); return returnValue; } @@ -37,7 +43,7 @@ public static ulong EnsureContentSizeOk(this ulong returnValue) private static void ThrowException(nuint returnValue, string message) { var code = 0 - returnValue; - throw new ZstdException((ZSTD_ErrorCode) code, message); + throw new ZstdException((ZSTD_ErrorCode)code, message); } } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Allocations.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Allocations.cs index ea59312ff..46e132e13 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Allocations.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Allocations.cs @@ -10,7 +10,10 @@ public static unsafe partial class Methods private static void* ZSTD_customMalloc(nuint size, ZSTD_customMem customMem) { if (customMem.customAlloc != null) - return ((delegate* managed)customMem.customAlloc)(customMem.opaque, size); + return ((delegate* managed)customMem.customAlloc)( + customMem.opaque, + size + ); return malloc(size); } @@ -21,7 +24,10 @@ public static unsafe partial class Methods { /* calloc implemented as malloc+memset; * not as efficient as calloc, but next best guess for custom malloc */ - void* ptr = ((delegate* managed)customMem.customAlloc)(customMem.opaque, size); + void* ptr = ((delegate* managed)customMem.customAlloc)( + customMem.opaque, + size + ); memset(ptr, 0, (uint)size); return ptr; } @@ -35,10 +41,13 @@ private static void ZSTD_customFree(void* ptr, ZSTD_customMem customMem) if (ptr != null) { if (customMem.customFree != null) - ((delegate* managed)customMem.customFree)(customMem.opaque, ptr); + ((delegate* managed)customMem.customFree)( + customMem.opaque, + ptr + ); else free(ptr); } } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_CStream_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_CStream_t.cs index ee8d40ba7..b3be314f0 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_CStream_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_CStream_t.cs @@ -12,4 +12,4 @@ public unsafe struct BIT_CStream_t public sbyte* ptr; public sbyte* endPtr; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_DStream_status.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_DStream_status.cs index 3fbf57137..aa2966ca3 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_DStream_status.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_DStream_status.cs @@ -4,11 +4,14 @@ public enum BIT_DStream_status { /* fully refilled */ BIT_DStream_unfinished = 0, + /* still some bits left in bitstream */ BIT_DStream_endOfBuffer = 1, + /* bitstream entirely consumed, bit-exact */ BIT_DStream_completed = 2, + /* user requested more bits than present in bitstream */ - BIT_DStream_overflow = 3 + BIT_DStream_overflow = 3, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_DStream_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_DStream_t.cs index f47bc40b2..76cc23611 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_DStream_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_DStream_t.cs @@ -11,4 +11,4 @@ public unsafe struct BIT_DStream_t public sbyte* start; public sbyte* limitPtr; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Bits.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Bits.cs index 94b6010c0..14fdc46ec 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Bits.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Bits.cs @@ -1,7 +1,7 @@ -using System.Runtime.CompilerServices; -using static ZstdSharp.UnsafeHelper; using System; using System.Numerics; +using System.Runtime.CompilerServices; +using static ZstdSharp.UnsafeHelper; namespace ZstdSharp.Unsafe { @@ -41,10 +41,14 @@ private static uint ZSTD_NbCommonBytes(nuint val) assert(val != 0); if (BitConverter.IsLittleEndian) { - return MEM_64bits ? (uint)BitOperations.TrailingZeroCount(val) >> 3 : (uint)BitOperations.TrailingZeroCount((uint)val) >> 3; + return MEM_64bits + ? (uint)BitOperations.TrailingZeroCount(val) >> 3 + : (uint)BitOperations.TrailingZeroCount((uint)val) >> 3; } - return MEM_64bits ? (uint)BitOperations.LeadingZeroCount(val) >> 3 : (uint)BitOperations.LeadingZeroCount((uint)val) >> 3; + return MEM_64bits + ? (uint)BitOperations.LeadingZeroCount(val) >> 3 + : (uint)BitOperations.LeadingZeroCount((uint)val) >> 3; } [MethodImpl(MethodImplOptions.AggressiveInlining)] diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Bitstream.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Bitstream.cs index 23f59ac3e..41a5ea77f 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Bitstream.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Bitstream.cs @@ -1,7 +1,7 @@ -using static ZstdSharp.UnsafeHelper; using System; -using System.Runtime.InteropServices; using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; +using static ZstdSharp.UnsafeHelper; #if NETCOREAPP3_0_OR_GREATER using System.Runtime.Intrinsics.X86; #endif @@ -11,45 +11,86 @@ namespace ZstdSharp.Unsafe public static unsafe partial class Methods { #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_BIT_mask => new uint[32] - { - 0, - 1, - 3, - 7, - 0xF, - 0x1F, - 0x3F, - 0x7F, - 0xFF, - 0x1FF, - 0x3FF, - 0x7FF, - 0xFFF, - 0x1FFF, - 0x3FFF, - 0x7FFF, - 0xFFFF, - 0x1FFFF, - 0x3FFFF, - 0x7FFFF, - 0xFFFFF, - 0x1FFFFF, - 0x3FFFFF, - 0x7FFFFF, - 0xFFFFFF, - 0x1FFFFFF, - 0x3FFFFFF, - 0x7FFFFFF, - 0xFFFFFFF, - 0x1FFFFFFF, - 0x3FFFFFFF, - 0x7FFFFFFF - }; - private static uint* BIT_mask => (uint*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_BIT_mask)); + private static ReadOnlySpan Span_BIT_mask => + new uint[32] + { + 0, + 1, + 3, + 7, + 0xF, + 0x1F, + 0x3F, + 0x7F, + 0xFF, + 0x1FF, + 0x3FF, + 0x7FF, + 0xFFF, + 0x1FFF, + 0x3FFF, + 0x7FFF, + 0xFFFF, + 0x1FFFF, + 0x3FFFF, + 0x7FFFF, + 0xFFFFF, + 0x1FFFFF, + 0x3FFFFF, + 0x7FFFFF, + 0xFFFFFF, + 0x1FFFFFF, + 0x3FFFFFF, + 0x7FFFFFF, + 0xFFFFFFF, + 0x1FFFFFFF, + 0x3FFFFFFF, + 0x7FFFFFFF, + }; + private static uint* BIT_mask => + (uint*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_BIT_mask) + ); #else - private static readonly uint* BIT_mask = GetArrayPointer(new uint[32] { 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0x1FFFF, 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF, 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF, 0x7FFFFFF, 0xFFFFFFF, 0x1FFFFFFF, 0x3FFFFFFF, 0x7FFFFFFF }); + private static readonly uint* BIT_mask = GetArrayPointer( + new uint[32] + { + 0, + 1, + 3, + 7, + 0xF, + 0x1F, + 0x3F, + 0x7F, + 0xFF, + 0x1FF, + 0x3FF, + 0x7FF, + 0xFFF, + 0x1FFF, + 0x3FFF, + 0x7FFF, + 0xFFFF, + 0x1FFFF, + 0x3FFFF, + 0x7FFFF, + 0xFFFFF, + 0x1FFFFF, + 0x3FFFFF, + 0x7FFFFF, + 0xFFFFFF, + 0x1FFFFFF, + 0x3FFFFFF, + 0x7FFFFFF, + 0xFFFFFFF, + 0x1FFFFFFF, + 0x3FFFFFFF, + 0x7FFFFFFF, + } + ); #endif /*-************************************************************** * bitStream encoding @@ -59,7 +100,11 @@ public static unsafe partial class Methods * @return : 0 if success, * otherwise an error code (can be tested using ERR_isError()) */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint BIT_initCStream(ref BIT_CStream_t bitC, void* startPtr, nuint dstCapacity) + private static nuint BIT_initCStream( + ref BIT_CStream_t bitC, + void* startPtr, + nuint dstCapacity + ) { bitC.bitContainer = 0; bitC.bitPos = 0; @@ -94,7 +139,12 @@ private static nuint BIT_getLowerBits(nuint bitContainer, uint nbBits) * can add up to 31 bits into `bitC`. * Note : does not check for register overflow ! */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void BIT_addBits(ref nuint bitC_bitContainer, ref uint bitC_bitPos, nuint value, uint nbBits) + private static void BIT_addBits( + ref nuint bitC_bitContainer, + ref uint bitC_bitPos, + nuint value, + uint nbBits + ) { assert(nbBits < sizeof(uint) * 32 / sizeof(uint)); assert(nbBits + bitC_bitPos < (uint)(sizeof(nuint) * 8)); @@ -106,7 +156,12 @@ private static void BIT_addBits(ref nuint bitC_bitContainer, ref uint bitC_bitPo * works only if `value` is _clean_, * meaning all high bits above nbBits are 0 */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void BIT_addBitsFast(ref nuint bitC_bitContainer, ref uint bitC_bitPos, nuint value, uint nbBits) + private static void BIT_addBitsFast( + ref nuint bitC_bitContainer, + ref uint bitC_bitPos, + nuint value, + uint nbBits + ) { assert(value >> (int)nbBits == 0); assert(nbBits + bitC_bitPos < (uint)(sizeof(nuint) * 8)); @@ -118,7 +173,12 @@ private static void BIT_addBitsFast(ref nuint bitC_bitContainer, ref uint bitC_b * assumption : bitContainer has not overflowed * unsafe version; does not check buffer overflow */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void BIT_flushBitsFast(ref nuint bitC_bitContainer, ref uint bitC_bitPos, ref sbyte* bitC_ptr, sbyte* bitC_endPtr) + private static void BIT_flushBitsFast( + ref nuint bitC_bitContainer, + ref uint bitC_bitPos, + ref sbyte* bitC_ptr, + sbyte* bitC_endPtr + ) { nuint nbBytes = bitC_bitPos >> 3; assert(bitC_bitPos < (uint)(sizeof(nuint) * 8)); @@ -135,7 +195,12 @@ private static void BIT_flushBitsFast(ref nuint bitC_bitContainer, ref uint bitC * note : does not signal buffer overflow. * overflow will be revealed later on using BIT_closeCStream() */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void BIT_flushBits(ref nuint bitC_bitContainer, ref uint bitC_bitPos, ref sbyte* bitC_ptr, sbyte* bitC_endPtr) + private static void BIT_flushBits( + ref nuint bitC_bitContainer, + ref uint bitC_bitPos, + ref sbyte* bitC_ptr, + sbyte* bitC_endPtr + ) { nuint nbBytes = bitC_bitPos >> 3; assert(bitC_bitPos < (uint)(sizeof(nuint) * 8)); @@ -152,7 +217,13 @@ private static void BIT_flushBits(ref nuint bitC_bitContainer, ref uint bitC_bit * @return : size of CStream, in bytes, * or 0 if it could not fit into dstBuffer */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint BIT_closeCStream(ref nuint bitC_bitContainer, ref uint bitC_bitPos, sbyte* bitC_ptr, sbyte* bitC_endPtr, sbyte* bitC_startPtr) + private static nuint BIT_closeCStream( + ref nuint bitC_bitContainer, + ref uint bitC_bitPos, + sbyte* bitC_ptr, + sbyte* bitC_endPtr, + sbyte* bitC_startPtr + ) { BIT_addBitsFast(ref bitC_bitContainer, ref bitC_bitPos, 1, 1); BIT_flushBits(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr); @@ -199,13 +270,16 @@ private static nuint BIT_initDStream(BIT_DStream_t* bitD, void* srcBuffer, nuint switch (srcSize) { case 7: - bitD->bitContainer += (nuint)((byte*)srcBuffer)[6] << sizeof(nuint) * 8 - 16; + bitD->bitContainer += + (nuint)((byte*)srcBuffer)[6] << sizeof(nuint) * 8 - 16; goto case 6; case 6: - bitD->bitContainer += (nuint)((byte*)srcBuffer)[5] << sizeof(nuint) * 8 - 24; + bitD->bitContainer += + (nuint)((byte*)srcBuffer)[5] << sizeof(nuint) * 8 - 24; goto case 5; case 5: - bitD->bitContainer += (nuint)((byte*)srcBuffer)[4] << sizeof(nuint) * 8 - 32; + bitD->bitContainer += + (nuint)((byte*)srcBuffer)[4] << sizeof(nuint) * 8 - 32; goto case 4; case 4: bitD->bitContainer += (nuint)((byte*)srcBuffer)[3] << 24; @@ -224,7 +298,9 @@ private static nuint BIT_initDStream(BIT_DStream_t* bitD, void* srcBuffer, nuint byte lastByte = ((byte*)srcBuffer)[srcSize - 1]; bitD->bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0; if (lastByte == 0) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); } bitD->bitsConsumed += (uint)((nuint)sizeof(nuint) - srcSize) * 8; @@ -268,7 +344,11 @@ private static nuint BIT_getMiddleBits(nuint bitContainer, uint start, uint nbBi [MethodImpl(MethodImplOptions.AggressiveInlining)] private static nuint BIT_lookBits(BIT_DStream_t* bitD, uint nbBits) { - return BIT_getMiddleBits(bitD->bitContainer, (uint)(sizeof(nuint) * 8) - bitD->bitsConsumed - nbBits, nbBits); + return BIT_getMiddleBits( + bitD->bitContainer, + (uint)(sizeof(nuint) * 8) - bitD->bitsConsumed - nbBits, + nbBits + ); } /*! BIT_lookBitsFast() : @@ -278,7 +358,9 @@ private static nuint BIT_lookBitsFast(BIT_DStream_t* bitD, uint nbBits) { uint regMask = (uint)(sizeof(nuint) * 8 - 1); assert(nbBits >= 1); - return bitD->bitContainer << (int)(bitD->bitsConsumed & regMask) >> (int)(regMask + 1 - nbBits & regMask); + return bitD->bitContainer + << (int)(bitD->bitsConsumed & regMask) + >> (int)(regMask + 1 - nbBits & regMask); } [MethodImpl(MethodImplOptions.AggressiveInlining)] @@ -341,21 +423,18 @@ private static BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD) } #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_static_zeroFilled => new byte[] - { - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - }; - private static nuint* static_zeroFilled => (nuint*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_static_zeroFilled)); + private static ReadOnlySpan Span_static_zeroFilled => + new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 }; + private static nuint* static_zeroFilled => + (nuint*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_static_zeroFilled) + ); #else - private static readonly nuint* static_zeroFilled = (nuint*)GetArrayPointer(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 }); + private static readonly nuint* static_zeroFilled = (nuint*)GetArrayPointer( + new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 } + ); #endif /*! BIT_reloadDStream() : * Refill `bitD` from buffer previously set in BIT_initDStream() . @@ -406,7 +485,10 @@ private static BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD) [MethodImpl(MethodImplOptions.AggressiveInlining)] private static uint BIT_endOfDStream(BIT_DStream_t* DStream) { - return DStream->ptr == DStream->start && DStream->bitsConsumed == (uint)(sizeof(nuint) * 8) ? 1U : 0U; + return + DStream->ptr == DStream->start && DStream->bitsConsumed == (uint)(sizeof(nuint) * 8) + ? 1U + : 0U; } /*-******************************************************** @@ -472,7 +554,9 @@ private static nuint BIT_initDStream(ref BIT_DStream_t bitD, void* srcBuffer, nu byte lastByte = ((byte*)srcBuffer)[srcSize - 1]; bitD.bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0; if (lastByte == 0) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); } bitD.bitsConsumed += (uint)((nuint)sizeof(nuint) - srcSize) * 8; @@ -488,19 +572,33 @@ private static nuint BIT_initDStream(ref BIT_DStream_t bitD, void* srcBuffer, nu * On 64-bits, maxNbBits==56. * @return : value extracted */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint BIT_lookBits(nuint bitD_bitContainer, uint bitD_bitsConsumed, uint nbBits) + private static nuint BIT_lookBits( + nuint bitD_bitContainer, + uint bitD_bitsConsumed, + uint nbBits + ) { - return BIT_getMiddleBits(bitD_bitContainer, (uint)(sizeof(nuint) * 8) - bitD_bitsConsumed - nbBits, nbBits); + return BIT_getMiddleBits( + bitD_bitContainer, + (uint)(sizeof(nuint) * 8) - bitD_bitsConsumed - nbBits, + nbBits + ); } /*! BIT_lookBitsFast() : * unsafe version; only works if nbBits >= 1 */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint BIT_lookBitsFast(nuint bitD_bitContainer, uint bitD_bitsConsumed, uint nbBits) + private static nuint BIT_lookBitsFast( + nuint bitD_bitContainer, + uint bitD_bitsConsumed, + uint nbBits + ) { uint regMask = (uint)(sizeof(nuint) * 8 - 1); assert(nbBits >= 1); - return bitD_bitContainer << (int)(bitD_bitsConsumed & regMask) >> (int)(regMask + 1 - nbBits & regMask); + return bitD_bitContainer + << (int)(bitD_bitsConsumed & regMask) + >> (int)(regMask + 1 - nbBits & regMask); } [MethodImpl(MethodImplOptions.AggressiveInlining)] @@ -514,7 +612,11 @@ private static void BIT_skipBits(ref uint bitD_bitsConsumed, uint nbBits) * Pay attention to not read more than nbBits contained into local register. * @return : extracted value. */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint BIT_readBits(nuint bitD_bitContainer, ref uint bitD_bitsConsumed, uint nbBits) + private static nuint BIT_readBits( + nuint bitD_bitContainer, + ref uint bitD_bitsConsumed, + uint nbBits + ) { nuint value = BIT_lookBits(bitD_bitContainer, bitD_bitsConsumed, nbBits); BIT_skipBits(ref bitD_bitsConsumed, nbBits); @@ -524,7 +626,11 @@ private static nuint BIT_readBits(nuint bitD_bitContainer, ref uint bitD_bitsCon /*! BIT_readBitsFast() : * unsafe version; only works if nbBits >= 1 */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint BIT_readBitsFast(nuint bitD_bitContainer, ref uint bitD_bitsConsumed, uint nbBits) + private static nuint BIT_readBitsFast( + nuint bitD_bitContainer, + ref uint bitD_bitsConsumed, + uint nbBits + ) { nuint value = BIT_lookBitsFast(bitD_bitContainer, bitD_bitsConsumed, nbBits); assert(nbBits >= 1); @@ -539,11 +645,22 @@ private static nuint BIT_readBitsFast(nuint bitD_bitContainer, ref uint bitD_bit * point you must use BIT_reloadDStream() to reload. */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static BIT_DStream_status BIT_reloadDStreamFast(ref nuint bitD_bitContainer, ref uint bitD_bitsConsumed, ref sbyte* bitD_ptr, sbyte* bitD_start, sbyte* bitD_limitPtr) + private static BIT_DStream_status BIT_reloadDStreamFast( + ref nuint bitD_bitContainer, + ref uint bitD_bitsConsumed, + ref sbyte* bitD_ptr, + sbyte* bitD_start, + sbyte* bitD_limitPtr + ) { if (bitD_ptr < bitD_limitPtr) return BIT_DStream_status.BIT_DStream_overflow; - return BIT_reloadDStream_internal(ref bitD_bitContainer, ref bitD_bitsConsumed, ref bitD_ptr, bitD_start); + return BIT_reloadDStream_internal( + ref bitD_bitContainer, + ref bitD_bitsConsumed, + ref bitD_ptr, + bitD_start + ); } /*! BIT_reloadDStream() : @@ -552,7 +669,13 @@ private static BIT_DStream_status BIT_reloadDStreamFast(ref nuint bitD_bitContai * @return : status of `BIT_DStream_t` internal register. * when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static BIT_DStream_status BIT_reloadDStream(ref nuint bitD_bitContainer, ref uint bitD_bitsConsumed, ref sbyte* bitD_ptr, sbyte* bitD_start, sbyte* bitD_limitPtr) + private static BIT_DStream_status BIT_reloadDStream( + ref nuint bitD_bitContainer, + ref uint bitD_bitsConsumed, + ref sbyte* bitD_ptr, + sbyte* bitD_start, + sbyte* bitD_limitPtr + ) { if (bitD_bitsConsumed > (uint)(sizeof(nuint) * 8)) { @@ -563,7 +686,12 @@ private static BIT_DStream_status BIT_reloadDStream(ref nuint bitD_bitContainer, assert(bitD_ptr >= bitD_start); if (bitD_ptr >= bitD_limitPtr) { - return BIT_reloadDStream_internal(ref bitD_bitContainer, ref bitD_bitsConsumed, ref bitD_ptr, bitD_start); + return BIT_reloadDStream_internal( + ref bitD_bitContainer, + ref bitD_bitsConsumed, + ref bitD_ptr, + bitD_start + ); } if (bitD_ptr == bitD_start) @@ -595,7 +723,12 @@ private static BIT_DStream_status BIT_reloadDStream(ref nuint bitD_bitContainer, * 2. look window is valid after shifted down : bitD->ptr >= bitD->start */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static BIT_DStream_status BIT_reloadDStream_internal(ref nuint bitD_bitContainer, ref uint bitD_bitsConsumed, ref sbyte* bitD_ptr, sbyte* bitD_start) + private static BIT_DStream_status BIT_reloadDStream_internal( + ref nuint bitD_bitContainer, + ref uint bitD_bitsConsumed, + ref sbyte* bitD_ptr, + sbyte* bitD_start + ) { assert(bitD_bitsConsumed <= (uint)(sizeof(nuint) * 8)); bitD_ptr -= bitD_bitsConsumed >> 3; @@ -609,9 +742,15 @@ private static BIT_DStream_status BIT_reloadDStream_internal(ref nuint bitD_bitC * @return : 1 if DStream has _exactly_ reached its end (all bits consumed). */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint BIT_endOfDStream(uint DStream_bitsConsumed, sbyte* DStream_ptr, sbyte* DStream_start) + private static uint BIT_endOfDStream( + uint DStream_bitsConsumed, + sbyte* DStream_ptr, + sbyte* DStream_start + ) { - return DStream_ptr == DStream_start && DStream_bitsConsumed == (uint)(sizeof(nuint) * 8) ? 1U : 0U; + return DStream_ptr == DStream_start && DStream_bitsConsumed == (uint)(sizeof(nuint) * 8) + ? 1U + : 0U; } } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/BlockSummary.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/BlockSummary.cs index 4a105a854..5905c452d 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/BlockSummary.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/BlockSummary.cs @@ -6,4 +6,4 @@ public struct BlockSummary public nuint blockSize; public nuint litSize; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_best_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_best_s.cs index 629ee4092..5c26ba0a2 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_best_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_best_s.cs @@ -18,4 +18,4 @@ public unsafe struct COVER_best_s public ZDICT_cover_params_t parameters; public nuint compressedSize; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_ctx_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_ctx_t.cs index 3e5fb2d61..cddd0f7b7 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_ctx_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_ctx_t.cs @@ -17,4 +17,4 @@ public unsafe struct COVER_ctx_t public uint* dmerAt; public uint d; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_dictSelection.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_dictSelection.cs index 21a03e745..c077af0c0 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_dictSelection.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_dictSelection.cs @@ -9,4 +9,4 @@ public unsafe struct COVER_dictSelection public nuint dictSize; public nuint totalCompressedSize; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_epoch_info_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_epoch_info_t.cs index 50b791c36..3d4210616 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_epoch_info_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_epoch_info_t.cs @@ -8,4 +8,4 @@ public struct COVER_epoch_info_t public uint num; public uint size; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_map_pair_t_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_map_pair_t_s.cs index 473be3765..121a8dde7 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_map_pair_t_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_map_pair_t_s.cs @@ -5,4 +5,4 @@ public struct COVER_map_pair_t_s public uint key; public uint value; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_map_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_map_s.cs index 88188beca..1f872b013 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_map_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_map_s.cs @@ -7,4 +7,4 @@ public unsafe struct COVER_map_s public uint size; public uint sizeMask; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_segment_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_segment_t.cs index c217ce39a..c83a9f613 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_segment_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_segment_t.cs @@ -9,4 +9,4 @@ public struct COVER_segment_t public uint end; public uint score; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_tryParameters_data_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_tryParameters_data_s.cs index 43b10726b..91208d00c 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_tryParameters_data_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_tryParameters_data_s.cs @@ -10,4 +10,4 @@ public unsafe struct COVER_tryParameters_data_s public nuint dictBufferCapacity; public ZDICT_cover_params_t parameters; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Clevels.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Clevels.cs index 8b5664c3c..35ca9cb85 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Clevels.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Clevels.cs @@ -2,112 +2,849 @@ namespace ZstdSharp.Unsafe { public static unsafe partial class Methods { - private static readonly ZSTD_compressionParameters[][] ZSTD_defaultCParameters = new ZSTD_compressionParameters[4][] - { - new ZSTD_compressionParameters[23] + private static readonly ZSTD_compressionParameters[][] ZSTD_defaultCParameters = + new ZSTD_compressionParameters[4][] { - new ZSTD_compressionParameters(windowLog: 19, chainLog: 12, hashLog: 13, searchLog: 1, minMatch: 6, targetLength: 1, strategy: ZSTD_strategy.ZSTD_fast), - new ZSTD_compressionParameters(windowLog: 19, chainLog: 13, hashLog: 14, searchLog: 1, minMatch: 7, targetLength: 0, strategy: ZSTD_strategy.ZSTD_fast), - new ZSTD_compressionParameters(windowLog: 20, chainLog: 15, hashLog: 16, searchLog: 1, minMatch: 6, targetLength: 0, strategy: ZSTD_strategy.ZSTD_fast), - new ZSTD_compressionParameters(windowLog: 21, chainLog: 16, hashLog: 17, searchLog: 1, minMatch: 5, targetLength: 0, strategy: ZSTD_strategy.ZSTD_dfast), - new ZSTD_compressionParameters(windowLog: 21, chainLog: 18, hashLog: 18, searchLog: 1, minMatch: 5, targetLength: 0, strategy: ZSTD_strategy.ZSTD_dfast), - new ZSTD_compressionParameters(windowLog: 21, chainLog: 18, hashLog: 19, searchLog: 3, minMatch: 5, targetLength: 2, strategy: ZSTD_strategy.ZSTD_greedy), - new ZSTD_compressionParameters(windowLog: 21, chainLog: 18, hashLog: 19, searchLog: 3, minMatch: 5, targetLength: 4, strategy: ZSTD_strategy.ZSTD_lazy), - new ZSTD_compressionParameters(windowLog: 21, chainLog: 19, hashLog: 20, searchLog: 4, minMatch: 5, targetLength: 8, strategy: ZSTD_strategy.ZSTD_lazy), - new ZSTD_compressionParameters(windowLog: 21, chainLog: 19, hashLog: 20, searchLog: 4, minMatch: 5, targetLength: 16, strategy: ZSTD_strategy.ZSTD_lazy2), - new ZSTD_compressionParameters(windowLog: 22, chainLog: 20, hashLog: 21, searchLog: 4, minMatch: 5, targetLength: 16, strategy: ZSTD_strategy.ZSTD_lazy2), - new ZSTD_compressionParameters(windowLog: 22, chainLog: 21, hashLog: 22, searchLog: 5, minMatch: 5, targetLength: 16, strategy: ZSTD_strategy.ZSTD_lazy2), - new ZSTD_compressionParameters(windowLog: 22, chainLog: 21, hashLog: 22, searchLog: 6, minMatch: 5, targetLength: 16, strategy: ZSTD_strategy.ZSTD_lazy2), - new ZSTD_compressionParameters(windowLog: 22, chainLog: 22, hashLog: 23, searchLog: 6, minMatch: 5, targetLength: 32, strategy: ZSTD_strategy.ZSTD_lazy2), - new ZSTD_compressionParameters(windowLog: 22, chainLog: 22, hashLog: 22, searchLog: 4, minMatch: 5, targetLength: 32, strategy: ZSTD_strategy.ZSTD_btlazy2), - new ZSTD_compressionParameters(windowLog: 22, chainLog: 22, hashLog: 23, searchLog: 5, minMatch: 5, targetLength: 32, strategy: ZSTD_strategy.ZSTD_btlazy2), - new ZSTD_compressionParameters(windowLog: 22, chainLog: 23, hashLog: 23, searchLog: 6, minMatch: 5, targetLength: 32, strategy: ZSTD_strategy.ZSTD_btlazy2), - new ZSTD_compressionParameters(windowLog: 22, chainLog: 22, hashLog: 22, searchLog: 5, minMatch: 5, targetLength: 48, strategy: ZSTD_strategy.ZSTD_btopt), - new ZSTD_compressionParameters(windowLog: 23, chainLog: 23, hashLog: 22, searchLog: 5, minMatch: 4, targetLength: 64, strategy: ZSTD_strategy.ZSTD_btopt), - new ZSTD_compressionParameters(windowLog: 23, chainLog: 23, hashLog: 22, searchLog: 6, minMatch: 3, targetLength: 64, strategy: ZSTD_strategy.ZSTD_btultra), - new ZSTD_compressionParameters(windowLog: 23, chainLog: 24, hashLog: 22, searchLog: 7, minMatch: 3, targetLength: 256, strategy: ZSTD_strategy.ZSTD_btultra2), - new ZSTD_compressionParameters(windowLog: 25, chainLog: 25, hashLog: 23, searchLog: 7, minMatch: 3, targetLength: 256, strategy: ZSTD_strategy.ZSTD_btultra2), - new ZSTD_compressionParameters(windowLog: 26, chainLog: 26, hashLog: 24, searchLog: 7, minMatch: 3, targetLength: 512, strategy: ZSTD_strategy.ZSTD_btultra2), - new ZSTD_compressionParameters(windowLog: 27, chainLog: 27, hashLog: 25, searchLog: 9, minMatch: 3, targetLength: 999, strategy: ZSTD_strategy.ZSTD_btultra2) - }, - new ZSTD_compressionParameters[23] - { - new ZSTD_compressionParameters(windowLog: 18, chainLog: 12, hashLog: 13, searchLog: 1, minMatch: 5, targetLength: 1, strategy: ZSTD_strategy.ZSTD_fast), - new ZSTD_compressionParameters(windowLog: 18, chainLog: 13, hashLog: 14, searchLog: 1, minMatch: 6, targetLength: 0, strategy: ZSTD_strategy.ZSTD_fast), - new ZSTD_compressionParameters(windowLog: 18, chainLog: 14, hashLog: 14, searchLog: 1, minMatch: 5, targetLength: 0, strategy: ZSTD_strategy.ZSTD_dfast), - new ZSTD_compressionParameters(windowLog: 18, chainLog: 16, hashLog: 16, searchLog: 1, minMatch: 4, targetLength: 0, strategy: ZSTD_strategy.ZSTD_dfast), - new ZSTD_compressionParameters(windowLog: 18, chainLog: 16, hashLog: 17, searchLog: 3, minMatch: 5, targetLength: 2, strategy: ZSTD_strategy.ZSTD_greedy), - new ZSTD_compressionParameters(windowLog: 18, chainLog: 17, hashLog: 18, searchLog: 5, minMatch: 5, targetLength: 2, strategy: ZSTD_strategy.ZSTD_greedy), - new ZSTD_compressionParameters(windowLog: 18, chainLog: 18, hashLog: 19, searchLog: 3, minMatch: 5, targetLength: 4, strategy: ZSTD_strategy.ZSTD_lazy), - new ZSTD_compressionParameters(windowLog: 18, chainLog: 18, hashLog: 19, searchLog: 4, minMatch: 4, targetLength: 4, strategy: ZSTD_strategy.ZSTD_lazy), - new ZSTD_compressionParameters(windowLog: 18, chainLog: 18, hashLog: 19, searchLog: 4, minMatch: 4, targetLength: 8, strategy: ZSTD_strategy.ZSTD_lazy2), - new ZSTD_compressionParameters(windowLog: 18, chainLog: 18, hashLog: 19, searchLog: 5, minMatch: 4, targetLength: 8, strategy: ZSTD_strategy.ZSTD_lazy2), - new ZSTD_compressionParameters(windowLog: 18, chainLog: 18, hashLog: 19, searchLog: 6, minMatch: 4, targetLength: 8, strategy: ZSTD_strategy.ZSTD_lazy2), - new ZSTD_compressionParameters(windowLog: 18, chainLog: 18, hashLog: 19, searchLog: 5, minMatch: 4, targetLength: 12, strategy: ZSTD_strategy.ZSTD_btlazy2), - new ZSTD_compressionParameters(windowLog: 18, chainLog: 19, hashLog: 19, searchLog: 7, minMatch: 4, targetLength: 12, strategy: ZSTD_strategy.ZSTD_btlazy2), - new ZSTD_compressionParameters(windowLog: 18, chainLog: 18, hashLog: 19, searchLog: 4, minMatch: 4, targetLength: 16, strategy: ZSTD_strategy.ZSTD_btopt), - new ZSTD_compressionParameters(windowLog: 18, chainLog: 18, hashLog: 19, searchLog: 4, minMatch: 3, targetLength: 32, strategy: ZSTD_strategy.ZSTD_btopt), - new ZSTD_compressionParameters(windowLog: 18, chainLog: 18, hashLog: 19, searchLog: 6, minMatch: 3, targetLength: 128, strategy: ZSTD_strategy.ZSTD_btopt), - new ZSTD_compressionParameters(windowLog: 18, chainLog: 19, hashLog: 19, searchLog: 6, minMatch: 3, targetLength: 128, strategy: ZSTD_strategy.ZSTD_btultra), - new ZSTD_compressionParameters(windowLog: 18, chainLog: 19, hashLog: 19, searchLog: 8, minMatch: 3, targetLength: 256, strategy: ZSTD_strategy.ZSTD_btultra), - new ZSTD_compressionParameters(windowLog: 18, chainLog: 19, hashLog: 19, searchLog: 6, minMatch: 3, targetLength: 128, strategy: ZSTD_strategy.ZSTD_btultra2), - new ZSTD_compressionParameters(windowLog: 18, chainLog: 19, hashLog: 19, searchLog: 8, minMatch: 3, targetLength: 256, strategy: ZSTD_strategy.ZSTD_btultra2), - new ZSTD_compressionParameters(windowLog: 18, chainLog: 19, hashLog: 19, searchLog: 10, minMatch: 3, targetLength: 512, strategy: ZSTD_strategy.ZSTD_btultra2), - new ZSTD_compressionParameters(windowLog: 18, chainLog: 19, hashLog: 19, searchLog: 12, minMatch: 3, targetLength: 512, strategy: ZSTD_strategy.ZSTD_btultra2), - new ZSTD_compressionParameters(windowLog: 18, chainLog: 19, hashLog: 19, searchLog: 13, minMatch: 3, targetLength: 999, strategy: ZSTD_strategy.ZSTD_btultra2) - }, - new ZSTD_compressionParameters[23] - { - new ZSTD_compressionParameters(windowLog: 17, chainLog: 12, hashLog: 12, searchLog: 1, minMatch: 5, targetLength: 1, strategy: ZSTD_strategy.ZSTD_fast), - new ZSTD_compressionParameters(windowLog: 17, chainLog: 12, hashLog: 13, searchLog: 1, minMatch: 6, targetLength: 0, strategy: ZSTD_strategy.ZSTD_fast), - new ZSTD_compressionParameters(windowLog: 17, chainLog: 13, hashLog: 15, searchLog: 1, minMatch: 5, targetLength: 0, strategy: ZSTD_strategy.ZSTD_fast), - new ZSTD_compressionParameters(windowLog: 17, chainLog: 15, hashLog: 16, searchLog: 2, minMatch: 5, targetLength: 0, strategy: ZSTD_strategy.ZSTD_dfast), - new ZSTD_compressionParameters(windowLog: 17, chainLog: 17, hashLog: 17, searchLog: 2, minMatch: 4, targetLength: 0, strategy: ZSTD_strategy.ZSTD_dfast), - new ZSTD_compressionParameters(windowLog: 17, chainLog: 16, hashLog: 17, searchLog: 3, minMatch: 4, targetLength: 2, strategy: ZSTD_strategy.ZSTD_greedy), - new ZSTD_compressionParameters(windowLog: 17, chainLog: 16, hashLog: 17, searchLog: 3, minMatch: 4, targetLength: 4, strategy: ZSTD_strategy.ZSTD_lazy), - new ZSTD_compressionParameters(windowLog: 17, chainLog: 16, hashLog: 17, searchLog: 3, minMatch: 4, targetLength: 8, strategy: ZSTD_strategy.ZSTD_lazy2), - new ZSTD_compressionParameters(windowLog: 17, chainLog: 16, hashLog: 17, searchLog: 4, minMatch: 4, targetLength: 8, strategy: ZSTD_strategy.ZSTD_lazy2), - new ZSTD_compressionParameters(windowLog: 17, chainLog: 16, hashLog: 17, searchLog: 5, minMatch: 4, targetLength: 8, strategy: ZSTD_strategy.ZSTD_lazy2), - new ZSTD_compressionParameters(windowLog: 17, chainLog: 16, hashLog: 17, searchLog: 6, minMatch: 4, targetLength: 8, strategy: ZSTD_strategy.ZSTD_lazy2), - new ZSTD_compressionParameters(windowLog: 17, chainLog: 17, hashLog: 17, searchLog: 5, minMatch: 4, targetLength: 8, strategy: ZSTD_strategy.ZSTD_btlazy2), - new ZSTD_compressionParameters(windowLog: 17, chainLog: 18, hashLog: 17, searchLog: 7, minMatch: 4, targetLength: 12, strategy: ZSTD_strategy.ZSTD_btlazy2), - new ZSTD_compressionParameters(windowLog: 17, chainLog: 18, hashLog: 17, searchLog: 3, minMatch: 4, targetLength: 12, strategy: ZSTD_strategy.ZSTD_btopt), - new ZSTD_compressionParameters(windowLog: 17, chainLog: 18, hashLog: 17, searchLog: 4, minMatch: 3, targetLength: 32, strategy: ZSTD_strategy.ZSTD_btopt), - new ZSTD_compressionParameters(windowLog: 17, chainLog: 18, hashLog: 17, searchLog: 6, minMatch: 3, targetLength: 256, strategy: ZSTD_strategy.ZSTD_btopt), - new ZSTD_compressionParameters(windowLog: 17, chainLog: 18, hashLog: 17, searchLog: 6, minMatch: 3, targetLength: 128, strategy: ZSTD_strategy.ZSTD_btultra), - new ZSTD_compressionParameters(windowLog: 17, chainLog: 18, hashLog: 17, searchLog: 8, minMatch: 3, targetLength: 256, strategy: ZSTD_strategy.ZSTD_btultra), - new ZSTD_compressionParameters(windowLog: 17, chainLog: 18, hashLog: 17, searchLog: 10, minMatch: 3, targetLength: 512, strategy: ZSTD_strategy.ZSTD_btultra), - new ZSTD_compressionParameters(windowLog: 17, chainLog: 18, hashLog: 17, searchLog: 5, minMatch: 3, targetLength: 256, strategy: ZSTD_strategy.ZSTD_btultra2), - new ZSTD_compressionParameters(windowLog: 17, chainLog: 18, hashLog: 17, searchLog: 7, minMatch: 3, targetLength: 512, strategy: ZSTD_strategy.ZSTD_btultra2), - new ZSTD_compressionParameters(windowLog: 17, chainLog: 18, hashLog: 17, searchLog: 9, minMatch: 3, targetLength: 512, strategy: ZSTD_strategy.ZSTD_btultra2), - new ZSTD_compressionParameters(windowLog: 17, chainLog: 18, hashLog: 17, searchLog: 11, minMatch: 3, targetLength: 999, strategy: ZSTD_strategy.ZSTD_btultra2) - }, - new ZSTD_compressionParameters[23] - { - new ZSTD_compressionParameters(windowLog: 14, chainLog: 12, hashLog: 13, searchLog: 1, minMatch: 5, targetLength: 1, strategy: ZSTD_strategy.ZSTD_fast), - new ZSTD_compressionParameters(windowLog: 14, chainLog: 14, hashLog: 15, searchLog: 1, minMatch: 5, targetLength: 0, strategy: ZSTD_strategy.ZSTD_fast), - new ZSTD_compressionParameters(windowLog: 14, chainLog: 14, hashLog: 15, searchLog: 1, minMatch: 4, targetLength: 0, strategy: ZSTD_strategy.ZSTD_fast), - new ZSTD_compressionParameters(windowLog: 14, chainLog: 14, hashLog: 15, searchLog: 2, minMatch: 4, targetLength: 0, strategy: ZSTD_strategy.ZSTD_dfast), - new ZSTD_compressionParameters(windowLog: 14, chainLog: 14, hashLog: 14, searchLog: 4, minMatch: 4, targetLength: 2, strategy: ZSTD_strategy.ZSTD_greedy), - new ZSTD_compressionParameters(windowLog: 14, chainLog: 14, hashLog: 14, searchLog: 3, minMatch: 4, targetLength: 4, strategy: ZSTD_strategy.ZSTD_lazy), - new ZSTD_compressionParameters(windowLog: 14, chainLog: 14, hashLog: 14, searchLog: 4, minMatch: 4, targetLength: 8, strategy: ZSTD_strategy.ZSTD_lazy2), - new ZSTD_compressionParameters(windowLog: 14, chainLog: 14, hashLog: 14, searchLog: 6, minMatch: 4, targetLength: 8, strategy: ZSTD_strategy.ZSTD_lazy2), - new ZSTD_compressionParameters(windowLog: 14, chainLog: 14, hashLog: 14, searchLog: 8, minMatch: 4, targetLength: 8, strategy: ZSTD_strategy.ZSTD_lazy2), - new ZSTD_compressionParameters(windowLog: 14, chainLog: 15, hashLog: 14, searchLog: 5, minMatch: 4, targetLength: 8, strategy: ZSTD_strategy.ZSTD_btlazy2), - new ZSTD_compressionParameters(windowLog: 14, chainLog: 15, hashLog: 14, searchLog: 9, minMatch: 4, targetLength: 8, strategy: ZSTD_strategy.ZSTD_btlazy2), - new ZSTD_compressionParameters(windowLog: 14, chainLog: 15, hashLog: 14, searchLog: 3, minMatch: 4, targetLength: 12, strategy: ZSTD_strategy.ZSTD_btopt), - new ZSTD_compressionParameters(windowLog: 14, chainLog: 15, hashLog: 14, searchLog: 4, minMatch: 3, targetLength: 24, strategy: ZSTD_strategy.ZSTD_btopt), - new ZSTD_compressionParameters(windowLog: 14, chainLog: 15, hashLog: 14, searchLog: 5, minMatch: 3, targetLength: 32, strategy: ZSTD_strategy.ZSTD_btultra), - new ZSTD_compressionParameters(windowLog: 14, chainLog: 15, hashLog: 15, searchLog: 6, minMatch: 3, targetLength: 64, strategy: ZSTD_strategy.ZSTD_btultra), - new ZSTD_compressionParameters(windowLog: 14, chainLog: 15, hashLog: 15, searchLog: 7, minMatch: 3, targetLength: 256, strategy: ZSTD_strategy.ZSTD_btultra), - new ZSTD_compressionParameters(windowLog: 14, chainLog: 15, hashLog: 15, searchLog: 5, minMatch: 3, targetLength: 48, strategy: ZSTD_strategy.ZSTD_btultra2), - new ZSTD_compressionParameters(windowLog: 14, chainLog: 15, hashLog: 15, searchLog: 6, minMatch: 3, targetLength: 128, strategy: ZSTD_strategy.ZSTD_btultra2), - new ZSTD_compressionParameters(windowLog: 14, chainLog: 15, hashLog: 15, searchLog: 7, minMatch: 3, targetLength: 256, strategy: ZSTD_strategy.ZSTD_btultra2), - new ZSTD_compressionParameters(windowLog: 14, chainLog: 15, hashLog: 15, searchLog: 8, minMatch: 3, targetLength: 256, strategy: ZSTD_strategy.ZSTD_btultra2), - new ZSTD_compressionParameters(windowLog: 14, chainLog: 15, hashLog: 15, searchLog: 8, minMatch: 3, targetLength: 512, strategy: ZSTD_strategy.ZSTD_btultra2), - new ZSTD_compressionParameters(windowLog: 14, chainLog: 15, hashLog: 15, searchLog: 9, minMatch: 3, targetLength: 512, strategy: ZSTD_strategy.ZSTD_btultra2), - new ZSTD_compressionParameters(windowLog: 14, chainLog: 15, hashLog: 15, searchLog: 10, minMatch: 3, targetLength: 999, strategy: ZSTD_strategy.ZSTD_btultra2) - } - }; + new ZSTD_compressionParameters[23] + { + new ZSTD_compressionParameters( + windowLog: 19, + chainLog: 12, + hashLog: 13, + searchLog: 1, + minMatch: 6, + targetLength: 1, + strategy: ZSTD_strategy.ZSTD_fast + ), + new ZSTD_compressionParameters( + windowLog: 19, + chainLog: 13, + hashLog: 14, + searchLog: 1, + minMatch: 7, + targetLength: 0, + strategy: ZSTD_strategy.ZSTD_fast + ), + new ZSTD_compressionParameters( + windowLog: 20, + chainLog: 15, + hashLog: 16, + searchLog: 1, + minMatch: 6, + targetLength: 0, + strategy: ZSTD_strategy.ZSTD_fast + ), + new ZSTD_compressionParameters( + windowLog: 21, + chainLog: 16, + hashLog: 17, + searchLog: 1, + minMatch: 5, + targetLength: 0, + strategy: ZSTD_strategy.ZSTD_dfast + ), + new ZSTD_compressionParameters( + windowLog: 21, + chainLog: 18, + hashLog: 18, + searchLog: 1, + minMatch: 5, + targetLength: 0, + strategy: ZSTD_strategy.ZSTD_dfast + ), + new ZSTD_compressionParameters( + windowLog: 21, + chainLog: 18, + hashLog: 19, + searchLog: 3, + minMatch: 5, + targetLength: 2, + strategy: ZSTD_strategy.ZSTD_greedy + ), + new ZSTD_compressionParameters( + windowLog: 21, + chainLog: 18, + hashLog: 19, + searchLog: 3, + minMatch: 5, + targetLength: 4, + strategy: ZSTD_strategy.ZSTD_lazy + ), + new ZSTD_compressionParameters( + windowLog: 21, + chainLog: 19, + hashLog: 20, + searchLog: 4, + minMatch: 5, + targetLength: 8, + strategy: ZSTD_strategy.ZSTD_lazy + ), + new ZSTD_compressionParameters( + windowLog: 21, + chainLog: 19, + hashLog: 20, + searchLog: 4, + minMatch: 5, + targetLength: 16, + strategy: ZSTD_strategy.ZSTD_lazy2 + ), + new ZSTD_compressionParameters( + windowLog: 22, + chainLog: 20, + hashLog: 21, + searchLog: 4, + minMatch: 5, + targetLength: 16, + strategy: ZSTD_strategy.ZSTD_lazy2 + ), + new ZSTD_compressionParameters( + windowLog: 22, + chainLog: 21, + hashLog: 22, + searchLog: 5, + minMatch: 5, + targetLength: 16, + strategy: ZSTD_strategy.ZSTD_lazy2 + ), + new ZSTD_compressionParameters( + windowLog: 22, + chainLog: 21, + hashLog: 22, + searchLog: 6, + minMatch: 5, + targetLength: 16, + strategy: ZSTD_strategy.ZSTD_lazy2 + ), + new ZSTD_compressionParameters( + windowLog: 22, + chainLog: 22, + hashLog: 23, + searchLog: 6, + minMatch: 5, + targetLength: 32, + strategy: ZSTD_strategy.ZSTD_lazy2 + ), + new ZSTD_compressionParameters( + windowLog: 22, + chainLog: 22, + hashLog: 22, + searchLog: 4, + minMatch: 5, + targetLength: 32, + strategy: ZSTD_strategy.ZSTD_btlazy2 + ), + new ZSTD_compressionParameters( + windowLog: 22, + chainLog: 22, + hashLog: 23, + searchLog: 5, + minMatch: 5, + targetLength: 32, + strategy: ZSTD_strategy.ZSTD_btlazy2 + ), + new ZSTD_compressionParameters( + windowLog: 22, + chainLog: 23, + hashLog: 23, + searchLog: 6, + minMatch: 5, + targetLength: 32, + strategy: ZSTD_strategy.ZSTD_btlazy2 + ), + new ZSTD_compressionParameters( + windowLog: 22, + chainLog: 22, + hashLog: 22, + searchLog: 5, + minMatch: 5, + targetLength: 48, + strategy: ZSTD_strategy.ZSTD_btopt + ), + new ZSTD_compressionParameters( + windowLog: 23, + chainLog: 23, + hashLog: 22, + searchLog: 5, + minMatch: 4, + targetLength: 64, + strategy: ZSTD_strategy.ZSTD_btopt + ), + new ZSTD_compressionParameters( + windowLog: 23, + chainLog: 23, + hashLog: 22, + searchLog: 6, + minMatch: 3, + targetLength: 64, + strategy: ZSTD_strategy.ZSTD_btultra + ), + new ZSTD_compressionParameters( + windowLog: 23, + chainLog: 24, + hashLog: 22, + searchLog: 7, + minMatch: 3, + targetLength: 256, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + new ZSTD_compressionParameters( + windowLog: 25, + chainLog: 25, + hashLog: 23, + searchLog: 7, + minMatch: 3, + targetLength: 256, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + new ZSTD_compressionParameters( + windowLog: 26, + chainLog: 26, + hashLog: 24, + searchLog: 7, + minMatch: 3, + targetLength: 512, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + new ZSTD_compressionParameters( + windowLog: 27, + chainLog: 27, + hashLog: 25, + searchLog: 9, + minMatch: 3, + targetLength: 999, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + }, + new ZSTD_compressionParameters[23] + { + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 12, + hashLog: 13, + searchLog: 1, + minMatch: 5, + targetLength: 1, + strategy: ZSTD_strategy.ZSTD_fast + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 13, + hashLog: 14, + searchLog: 1, + minMatch: 6, + targetLength: 0, + strategy: ZSTD_strategy.ZSTD_fast + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 14, + hashLog: 14, + searchLog: 1, + minMatch: 5, + targetLength: 0, + strategy: ZSTD_strategy.ZSTD_dfast + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 16, + hashLog: 16, + searchLog: 1, + minMatch: 4, + targetLength: 0, + strategy: ZSTD_strategy.ZSTD_dfast + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 16, + hashLog: 17, + searchLog: 3, + minMatch: 5, + targetLength: 2, + strategy: ZSTD_strategy.ZSTD_greedy + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 17, + hashLog: 18, + searchLog: 5, + minMatch: 5, + targetLength: 2, + strategy: ZSTD_strategy.ZSTD_greedy + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 18, + hashLog: 19, + searchLog: 3, + minMatch: 5, + targetLength: 4, + strategy: ZSTD_strategy.ZSTD_lazy + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 18, + hashLog: 19, + searchLog: 4, + minMatch: 4, + targetLength: 4, + strategy: ZSTD_strategy.ZSTD_lazy + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 18, + hashLog: 19, + searchLog: 4, + minMatch: 4, + targetLength: 8, + strategy: ZSTD_strategy.ZSTD_lazy2 + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 18, + hashLog: 19, + searchLog: 5, + minMatch: 4, + targetLength: 8, + strategy: ZSTD_strategy.ZSTD_lazy2 + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 18, + hashLog: 19, + searchLog: 6, + minMatch: 4, + targetLength: 8, + strategy: ZSTD_strategy.ZSTD_lazy2 + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 18, + hashLog: 19, + searchLog: 5, + minMatch: 4, + targetLength: 12, + strategy: ZSTD_strategy.ZSTD_btlazy2 + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 19, + hashLog: 19, + searchLog: 7, + minMatch: 4, + targetLength: 12, + strategy: ZSTD_strategy.ZSTD_btlazy2 + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 18, + hashLog: 19, + searchLog: 4, + minMatch: 4, + targetLength: 16, + strategy: ZSTD_strategy.ZSTD_btopt + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 18, + hashLog: 19, + searchLog: 4, + minMatch: 3, + targetLength: 32, + strategy: ZSTD_strategy.ZSTD_btopt + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 18, + hashLog: 19, + searchLog: 6, + minMatch: 3, + targetLength: 128, + strategy: ZSTD_strategy.ZSTD_btopt + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 19, + hashLog: 19, + searchLog: 6, + minMatch: 3, + targetLength: 128, + strategy: ZSTD_strategy.ZSTD_btultra + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 19, + hashLog: 19, + searchLog: 8, + minMatch: 3, + targetLength: 256, + strategy: ZSTD_strategy.ZSTD_btultra + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 19, + hashLog: 19, + searchLog: 6, + minMatch: 3, + targetLength: 128, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 19, + hashLog: 19, + searchLog: 8, + minMatch: 3, + targetLength: 256, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 19, + hashLog: 19, + searchLog: 10, + minMatch: 3, + targetLength: 512, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 19, + hashLog: 19, + searchLog: 12, + minMatch: 3, + targetLength: 512, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 19, + hashLog: 19, + searchLog: 13, + minMatch: 3, + targetLength: 999, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + }, + new ZSTD_compressionParameters[23] + { + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 12, + hashLog: 12, + searchLog: 1, + minMatch: 5, + targetLength: 1, + strategy: ZSTD_strategy.ZSTD_fast + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 12, + hashLog: 13, + searchLog: 1, + minMatch: 6, + targetLength: 0, + strategy: ZSTD_strategy.ZSTD_fast + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 13, + hashLog: 15, + searchLog: 1, + minMatch: 5, + targetLength: 0, + strategy: ZSTD_strategy.ZSTD_fast + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 15, + hashLog: 16, + searchLog: 2, + minMatch: 5, + targetLength: 0, + strategy: ZSTD_strategy.ZSTD_dfast + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 17, + hashLog: 17, + searchLog: 2, + minMatch: 4, + targetLength: 0, + strategy: ZSTD_strategy.ZSTD_dfast + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 16, + hashLog: 17, + searchLog: 3, + minMatch: 4, + targetLength: 2, + strategy: ZSTD_strategy.ZSTD_greedy + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 16, + hashLog: 17, + searchLog: 3, + minMatch: 4, + targetLength: 4, + strategy: ZSTD_strategy.ZSTD_lazy + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 16, + hashLog: 17, + searchLog: 3, + minMatch: 4, + targetLength: 8, + strategy: ZSTD_strategy.ZSTD_lazy2 + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 16, + hashLog: 17, + searchLog: 4, + minMatch: 4, + targetLength: 8, + strategy: ZSTD_strategy.ZSTD_lazy2 + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 16, + hashLog: 17, + searchLog: 5, + minMatch: 4, + targetLength: 8, + strategy: ZSTD_strategy.ZSTD_lazy2 + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 16, + hashLog: 17, + searchLog: 6, + minMatch: 4, + targetLength: 8, + strategy: ZSTD_strategy.ZSTD_lazy2 + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 17, + hashLog: 17, + searchLog: 5, + minMatch: 4, + targetLength: 8, + strategy: ZSTD_strategy.ZSTD_btlazy2 + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 18, + hashLog: 17, + searchLog: 7, + minMatch: 4, + targetLength: 12, + strategy: ZSTD_strategy.ZSTD_btlazy2 + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 18, + hashLog: 17, + searchLog: 3, + minMatch: 4, + targetLength: 12, + strategy: ZSTD_strategy.ZSTD_btopt + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 18, + hashLog: 17, + searchLog: 4, + minMatch: 3, + targetLength: 32, + strategy: ZSTD_strategy.ZSTD_btopt + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 18, + hashLog: 17, + searchLog: 6, + minMatch: 3, + targetLength: 256, + strategy: ZSTD_strategy.ZSTD_btopt + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 18, + hashLog: 17, + searchLog: 6, + minMatch: 3, + targetLength: 128, + strategy: ZSTD_strategy.ZSTD_btultra + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 18, + hashLog: 17, + searchLog: 8, + minMatch: 3, + targetLength: 256, + strategy: ZSTD_strategy.ZSTD_btultra + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 18, + hashLog: 17, + searchLog: 10, + minMatch: 3, + targetLength: 512, + strategy: ZSTD_strategy.ZSTD_btultra + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 18, + hashLog: 17, + searchLog: 5, + minMatch: 3, + targetLength: 256, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 18, + hashLog: 17, + searchLog: 7, + minMatch: 3, + targetLength: 512, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 18, + hashLog: 17, + searchLog: 9, + minMatch: 3, + targetLength: 512, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 18, + hashLog: 17, + searchLog: 11, + minMatch: 3, + targetLength: 999, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + }, + new ZSTD_compressionParameters[23] + { + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 12, + hashLog: 13, + searchLog: 1, + minMatch: 5, + targetLength: 1, + strategy: ZSTD_strategy.ZSTD_fast + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 14, + hashLog: 15, + searchLog: 1, + minMatch: 5, + targetLength: 0, + strategy: ZSTD_strategy.ZSTD_fast + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 14, + hashLog: 15, + searchLog: 1, + minMatch: 4, + targetLength: 0, + strategy: ZSTD_strategy.ZSTD_fast + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 14, + hashLog: 15, + searchLog: 2, + minMatch: 4, + targetLength: 0, + strategy: ZSTD_strategy.ZSTD_dfast + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 14, + hashLog: 14, + searchLog: 4, + minMatch: 4, + targetLength: 2, + strategy: ZSTD_strategy.ZSTD_greedy + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 14, + hashLog: 14, + searchLog: 3, + minMatch: 4, + targetLength: 4, + strategy: ZSTD_strategy.ZSTD_lazy + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 14, + hashLog: 14, + searchLog: 4, + minMatch: 4, + targetLength: 8, + strategy: ZSTD_strategy.ZSTD_lazy2 + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 14, + hashLog: 14, + searchLog: 6, + minMatch: 4, + targetLength: 8, + strategy: ZSTD_strategy.ZSTD_lazy2 + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 14, + hashLog: 14, + searchLog: 8, + minMatch: 4, + targetLength: 8, + strategy: ZSTD_strategy.ZSTD_lazy2 + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 15, + hashLog: 14, + searchLog: 5, + minMatch: 4, + targetLength: 8, + strategy: ZSTD_strategy.ZSTD_btlazy2 + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 15, + hashLog: 14, + searchLog: 9, + minMatch: 4, + targetLength: 8, + strategy: ZSTD_strategy.ZSTD_btlazy2 + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 15, + hashLog: 14, + searchLog: 3, + minMatch: 4, + targetLength: 12, + strategy: ZSTD_strategy.ZSTD_btopt + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 15, + hashLog: 14, + searchLog: 4, + minMatch: 3, + targetLength: 24, + strategy: ZSTD_strategy.ZSTD_btopt + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 15, + hashLog: 14, + searchLog: 5, + minMatch: 3, + targetLength: 32, + strategy: ZSTD_strategy.ZSTD_btultra + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 15, + hashLog: 15, + searchLog: 6, + minMatch: 3, + targetLength: 64, + strategy: ZSTD_strategy.ZSTD_btultra + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 15, + hashLog: 15, + searchLog: 7, + minMatch: 3, + targetLength: 256, + strategy: ZSTD_strategy.ZSTD_btultra + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 15, + hashLog: 15, + searchLog: 5, + minMatch: 3, + targetLength: 48, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 15, + hashLog: 15, + searchLog: 6, + minMatch: 3, + targetLength: 128, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 15, + hashLog: 15, + searchLog: 7, + minMatch: 3, + targetLength: 256, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 15, + hashLog: 15, + searchLog: 8, + minMatch: 3, + targetLength: 256, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 15, + hashLog: 15, + searchLog: 8, + minMatch: 3, + targetLength: 512, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 15, + hashLog: 15, + searchLog: 9, + minMatch: 3, + targetLength: 512, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 15, + hashLog: 15, + searchLog: 10, + minMatch: 3, + targetLength: 999, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + }, + }; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Compiler.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Compiler.cs index 27ab2cdf2..ab5def3d6 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Compiler.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Compiler.cs @@ -59,4 +59,4 @@ private static nint ZSTD_wrappedPtrDiff(byte* lhs, byte* rhs) return add > 0 ? ptr + add : ptr; } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Cover.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Cover.cs index ca1d7aa86..e90943ac6 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Cover.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Cover.cs @@ -5,6 +5,7 @@ namespace ZstdSharp.Unsafe public static unsafe partial class Methods { private static int g_displayLevel = 0; + /** * Returns the sum of the sample sizes. */ @@ -23,7 +24,11 @@ private static nuint COVER_sum(nuint* samplesSizes, uint nbSamples) /** * Warns the user when their corpus is too small. */ - private static void COVER_warnOnSmallCorpus(nuint maxDictSize, nuint nbDmers, int displayLevel) + private static void COVER_warnOnSmallCorpus( + nuint maxDictSize, + nuint nbDmers, + int displayLevel + ) { double ratio = nbDmers / (double)maxDictSize; if (ratio >= 10) @@ -45,7 +50,12 @@ private static void COVER_warnOnSmallCorpus(nuint maxDictSize, nuint nbDmers, in * @param passes The target number of passes over the dmer corpus. * More passes means a better dictionary. */ - private static COVER_epoch_info_t COVER_computeEpochs(uint maxDictSize, uint nbDmers, uint k, uint passes) + private static COVER_epoch_info_t COVER_computeEpochs( + uint maxDictSize, + uint nbDmers, + uint k, + uint passes + ) { uint minEpochSize = k * 10; COVER_epoch_info_t epochs; @@ -66,7 +76,16 @@ private static COVER_epoch_info_t COVER_computeEpochs(uint maxDictSize, uint nbD /** * Checks total compressed size of a dictionary */ - private static nuint COVER_checkTotalCompressedSize(ZDICT_cover_params_t parameters, nuint* samplesSizes, byte* samples, nuint* offsets, nuint nbTrainSamples, nuint nbSamples, byte* dict, nuint dictBufferCapacity) + private static nuint COVER_checkTotalCompressedSize( + ZDICT_cover_params_t parameters, + nuint* samplesSizes, + byte* samples, + nuint* offsets, + nuint nbTrainSamples, + nuint nbSamples, + byte* dict, + nuint dictBufferCapacity + ) { nuint totalCompressedSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); /* Pointers */ @@ -81,7 +100,8 @@ private static nuint COVER_checkTotalCompressedSize(ZDICT_cover_params_t paramet i = parameters.splitPoint < 1 ? nbTrainSamples : 0; for (; i < nbSamples; ++i) { - maxSampleSize = samplesSizes[i] > maxSampleSize ? samplesSizes[i] : maxSampleSize; + maxSampleSize = + samplesSizes[i] > maxSampleSize ? samplesSizes[i] : maxSampleSize; } dstCapacity = ZSTD_compressBound(maxSampleSize); @@ -99,7 +119,14 @@ private static nuint COVER_checkTotalCompressedSize(ZDICT_cover_params_t paramet i = parameters.splitPoint < 1 ? nbTrainSamples : 0; for (; i < nbSamples; ++i) { - nuint size = ZSTD_compress_usingCDict(cctx, dst, dstCapacity, samples + offsets[i], samplesSizes[i], cdict); + nuint size = ZSTD_compress_usingCDict( + cctx, + dst, + dstCapacity, + samples + offsets[i], + samplesSizes[i], + cdict + ); if (ERR_isError(size)) { totalCompressedSize = size; @@ -109,7 +136,7 @@ private static nuint COVER_checkTotalCompressedSize(ZDICT_cover_params_t paramet totalCompressedSize += size; } - _compressCleanup: + _compressCleanup: ZSTD_freeCCtx(cctx); ZSTD_freeCDict(cdict); if (dst != null) @@ -194,7 +221,11 @@ private static void COVER_best_start(COVER_best_s* best) * Decrements liveJobs and signals any waiting threads if liveJobs == 0. * If this dictionary is the best so far save it and its parameters. */ - private static void COVER_best_finish(COVER_best_s* best, ZDICT_cover_params_t parameters, COVER_dictSelection selection) + private static void COVER_best_finish( + COVER_best_s* best, + ZDICT_cover_params_t parameters, + COVER_dictSelection selection + ) { void* dict = selection.dictContent; nuint compressedSize = selection.totalCompressedSize; @@ -221,7 +252,9 @@ private static void COVER_best_finish(COVER_best_s* best, ZDICT_cover_params_t p best->dict = malloc(dictSize); if (best->dict == null) { - best->compressedSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + best->compressedSize = unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC) + ); best->dictSize = 0; SynchronizationWrapper.Pulse(&best->mutex); SynchronizationWrapper.Exit(&best->mutex); @@ -271,7 +304,9 @@ private static COVER_dictSelection COVER_dictSelectionError(nuint error) */ private static uint COVER_dictSelectionIsError(COVER_dictSelection selection) { - return ERR_isError(selection.totalCompressedSize) || selection.dictContent == null ? 1U : 0U; + return ERR_isError(selection.totalCompressedSize) || selection.dictContent == null + ? 1U + : 0U; } /** @@ -289,7 +324,19 @@ private static void COVER_dictSelectionFree(COVER_dictSelection selection) * smallest dictionary within a specified regression of the compressed size * from the largest dictionary. */ - private static COVER_dictSelection COVER_selectDict(byte* customDictContent, nuint dictBufferCapacity, nuint dictContentSize, byte* samplesBuffer, nuint* samplesSizes, uint nbFinalizeSamples, nuint nbCheckSamples, nuint nbSamples, ZDICT_cover_params_t @params, nuint* offsets, nuint totalCompressedSize) + private static COVER_dictSelection COVER_selectDict( + byte* customDictContent, + nuint dictBufferCapacity, + nuint dictContentSize, + byte* samplesBuffer, + nuint* samplesSizes, + uint nbFinalizeSamples, + nuint nbCheckSamples, + nuint nbSamples, + ZDICT_cover_params_t @params, + nuint* offsets, + nuint totalCompressedSize + ) { nuint largestDict = 0; nuint largestCompressed = 0; @@ -305,7 +352,16 @@ private static COVER_dictSelection COVER_selectDict(byte* customDictContent, nui } memcpy(largestDictbuffer, customDictContent, (uint)dictContentSize); - dictContentSize = ZDICT_finalizeDictionary(largestDictbuffer, dictBufferCapacity, customDictContent, dictContentSize, samplesBuffer, samplesSizes, nbFinalizeSamples, @params.zParams); + dictContentSize = ZDICT_finalizeDictionary( + largestDictbuffer, + dictBufferCapacity, + customDictContent, + dictContentSize, + samplesBuffer, + samplesSizes, + nbFinalizeSamples, + @params.zParams + ); if (ZDICT_isError(dictContentSize)) { free(largestDictbuffer); @@ -313,7 +369,16 @@ private static COVER_dictSelection COVER_selectDict(byte* customDictContent, nui return COVER_dictSelectionError(dictContentSize); } - totalCompressedSize = COVER_checkTotalCompressedSize(@params, samplesSizes, samplesBuffer, offsets, nbCheckSamples, nbSamples, largestDictbuffer, dictContentSize); + totalCompressedSize = COVER_checkTotalCompressedSize( + @params, + samplesSizes, + samplesBuffer, + offsets, + nbCheckSamples, + nbSamples, + largestDictbuffer, + dictContentSize + ); if (ERR_isError(totalCompressedSize)) { free(largestDictbuffer); @@ -333,7 +398,16 @@ private static COVER_dictSelection COVER_selectDict(byte* customDictContent, nui while (dictContentSize < largestDict) { memcpy(candidateDictBuffer, largestDictbuffer, (uint)largestDict); - dictContentSize = ZDICT_finalizeDictionary(candidateDictBuffer, dictBufferCapacity, customDictContentEnd - dictContentSize, dictContentSize, samplesBuffer, samplesSizes, nbFinalizeSamples, @params.zParams); + dictContentSize = ZDICT_finalizeDictionary( + candidateDictBuffer, + dictBufferCapacity, + customDictContentEnd - dictContentSize, + dictContentSize, + samplesBuffer, + samplesSizes, + nbFinalizeSamples, + @params.zParams + ); if (ZDICT_isError(dictContentSize)) { free(largestDictbuffer); @@ -341,7 +415,16 @@ private static COVER_dictSelection COVER_selectDict(byte* customDictContent, nui return COVER_dictSelectionError(dictContentSize); } - totalCompressedSize = COVER_checkTotalCompressedSize(@params, samplesSizes, samplesBuffer, offsets, nbCheckSamples, nbSamples, candidateDictBuffer, dictContentSize); + totalCompressedSize = COVER_checkTotalCompressedSize( + @params, + samplesSizes, + samplesBuffer, + offsets, + nbCheckSamples, + nbSamples, + candidateDictBuffer, + dictContentSize + ); if (ERR_isError(totalCompressedSize)) { free(largestDictbuffer); @@ -352,7 +435,11 @@ private static COVER_dictSelection COVER_selectDict(byte* customDictContent, nui if (totalCompressedSize <= largestCompressed * regressionTolerance) { free(largestDictbuffer); - return setDictSelection(candidateDictBuffer, dictContentSize, totalCompressedSize); + return setDictSelection( + candidateDictBuffer, + dictContentSize, + totalCompressedSize + ); } dictContentSize *= 2; @@ -364,4 +451,4 @@ private static COVER_dictSelection COVER_selectDict(byte* customDictContent, nui return setDictSelection(largestDictbuffer, dictContentSize, totalCompressedSize); } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/DTableDesc.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/DTableDesc.cs index e3fca967e..a0baad8d2 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/DTableDesc.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/DTableDesc.cs @@ -10,4 +10,4 @@ public struct DTableDesc public byte tableLog; public byte reserved; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/EStats_ress_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/EStats_ress_t.cs index ecf8dc2cc..f55e6f7fc 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/EStats_ress_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/EStats_ress_t.cs @@ -4,9 +4,11 @@ public unsafe struct EStats_ress_t { /* dictionary */ public ZSTD_CDict_s* dict; + /* working context */ public ZSTD_CCtx_s* zc; + /* must be ZSTD_BLOCKSIZE_MAX allocated */ public void* workPlace; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/EntropyCommon.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/EntropyCommon.cs index 12c39f6bd..38f1729f5 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/EntropyCommon.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/EntropyCommon.cs @@ -37,7 +37,13 @@ private static string HUF_getErrorName(nuint code) * FSE NCount encoding-decoding ****************************************************************/ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint FSE_readNCount_body(short* normalizedCounter, uint* maxSVPtr, uint* tableLogPtr, void* headerBuffer, nuint hbSize) + private static nuint FSE_readNCount_body( + short* normalizedCounter, + uint* maxSVPtr, + uint* tableLogPtr, + void* headerBuffer, + nuint hbSize + ) { byte* istart = (byte*)headerBuffer; byte* iend = istart + hbSize; @@ -57,11 +63,19 @@ private static nuint FSE_readNCount_body(short* normalizedCounter, uint* maxSVPt memset(buffer, 0, sizeof(sbyte) * 8); memcpy(buffer, headerBuffer, (uint)hbSize); { - nuint countSize = FSE_readNCount(normalizedCounter, maxSVPtr, tableLogPtr, buffer, sizeof(sbyte) * 8); + nuint countSize = FSE_readNCount( + normalizedCounter, + maxSVPtr, + tableLogPtr, + buffer, + sizeof(sbyte) * 8 + ); if (FSE_isError(countSize)) return countSize; if (countSize > hbSize) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); return countSize; } } @@ -198,17 +212,42 @@ private static nuint FSE_readNCount_body(short* normalizedCounter, uint* maxSVPt } /* Avoids the FORCE_INLINE of the _body() function. */ - private static nuint FSE_readNCount_body_default(short* normalizedCounter, uint* maxSVPtr, uint* tableLogPtr, void* headerBuffer, nuint hbSize) + private static nuint FSE_readNCount_body_default( + short* normalizedCounter, + uint* maxSVPtr, + uint* tableLogPtr, + void* headerBuffer, + nuint hbSize + ) { - return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize); + return FSE_readNCount_body( + normalizedCounter, + maxSVPtr, + tableLogPtr, + headerBuffer, + hbSize + ); } /*! FSE_readNCount_bmi2(): * Same as FSE_readNCount() but pass bmi2=1 when your CPU supports BMI2 and 0 otherwise. */ - private static nuint FSE_readNCount_bmi2(short* normalizedCounter, uint* maxSVPtr, uint* tableLogPtr, void* headerBuffer, nuint hbSize, int bmi2) + private static nuint FSE_readNCount_bmi2( + short* normalizedCounter, + uint* maxSVPtr, + uint* tableLogPtr, + void* headerBuffer, + nuint hbSize, + int bmi2 + ) { - return FSE_readNCount_body_default(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize); + return FSE_readNCount_body_default( + normalizedCounter, + maxSVPtr, + tableLogPtr, + headerBuffer, + hbSize + ); } /*! FSE_readNCount(): @@ -216,9 +255,22 @@ Read compactly saved 'normalizedCounter' from 'rBuffer'. @return : size read from 'rBuffer', or an errorCode, which can be tested using FSE_isError(). maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */ - private static nuint FSE_readNCount(short* normalizedCounter, uint* maxSVPtr, uint* tableLogPtr, void* headerBuffer, nuint hbSize) + private static nuint FSE_readNCount( + short* normalizedCounter, + uint* maxSVPtr, + uint* tableLogPtr, + void* headerBuffer, + nuint hbSize + ) { - return FSE_readNCount_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize, 0); + return FSE_readNCount_bmi2( + normalizedCounter, + maxSVPtr, + tableLogPtr, + headerBuffer, + hbSize, + 0 + ); } /*! HUF_readStats() : @@ -228,14 +280,44 @@ private static nuint FSE_readNCount(short* normalizedCounter, uint* maxSVPtr, ui @return : size read from `src` , or an error Code . Note : Needed by HUF_readCTable() and HUF_readDTableX?() . */ - private static nuint HUF_readStats(byte* huffWeight, nuint hwSize, uint* rankStats, uint* nbSymbolsPtr, uint* tableLogPtr, void* src, nuint srcSize) + private static nuint HUF_readStats( + byte* huffWeight, + nuint hwSize, + uint* rankStats, + uint* nbSymbolsPtr, + uint* tableLogPtr, + void* src, + nuint srcSize + ) { uint* wksp = stackalloc uint[219]; - return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, wksp, sizeof(uint) * 219, 0); + return HUF_readStats_wksp( + huffWeight, + hwSize, + rankStats, + nbSymbolsPtr, + tableLogPtr, + src, + srcSize, + wksp, + sizeof(uint) * 219, + 0 + ); } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint HUF_readStats_body(byte* huffWeight, nuint hwSize, uint* rankStats, uint* nbSymbolsPtr, uint* tableLogPtr, void* src, nuint srcSize, void* workSpace, nuint wkspSize, int bmi2) + private static nuint HUF_readStats_body( + byte* huffWeight, + nuint hwSize, + uint* rankStats, + uint* nbSymbolsPtr, + uint* tableLogPtr, + void* src, + nuint srcSize, + void* workSpace, + nuint wkspSize, + int bmi2 + ) { uint weightTotal; byte* ip = (byte*)src; @@ -266,7 +348,16 @@ private static nuint HUF_readStats_body(byte* huffWeight, nuint hwSize, uint* ra { if (iSize + 1 > srcSize) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); - oSize = FSE_decompress_wksp_bmi2(huffWeight, hwSize - 1, ip + 1, iSize, 6, workSpace, wkspSize, bmi2); + oSize = FSE_decompress_wksp_bmi2( + huffWeight, + hwSize - 1, + ip + 1, + iSize, + 6, + workSpace, + wkspSize, + bmi2 + ); if (FSE_isError(oSize)) return oSize; } @@ -278,7 +369,9 @@ private static nuint HUF_readStats_body(byte* huffWeight, nuint hwSize, uint* ra for (n = 0; n < oSize; n++) { if (huffWeight[n] > 12) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); rankStats[huffWeight[n]]++; weightTotal += (uint)(1 << huffWeight[n] >> 1); } @@ -297,7 +390,9 @@ private static nuint HUF_readStats_body(byte* huffWeight, nuint hwSize, uint* ra uint verif = (uint)(1 << (int)ZSTD_highbit32(rest)); uint lastWeight = ZSTD_highbit32(rest) + 1; if (verif != rest) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); huffWeight[oSize] = (byte)lastWeight; rankStats[lastWeight]++; } @@ -310,14 +405,56 @@ private static nuint HUF_readStats_body(byte* huffWeight, nuint hwSize, uint* ra } /* Avoids the FORCE_INLINE of the _body() function. */ - private static nuint HUF_readStats_body_default(byte* huffWeight, nuint hwSize, uint* rankStats, uint* nbSymbolsPtr, uint* tableLogPtr, void* src, nuint srcSize, void* workSpace, nuint wkspSize) + private static nuint HUF_readStats_body_default( + byte* huffWeight, + nuint hwSize, + uint* rankStats, + uint* nbSymbolsPtr, + uint* tableLogPtr, + void* src, + nuint srcSize, + void* workSpace, + nuint wkspSize + ) { - return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 0); + return HUF_readStats_body( + huffWeight, + hwSize, + rankStats, + nbSymbolsPtr, + tableLogPtr, + src, + srcSize, + workSpace, + wkspSize, + 0 + ); } - private static nuint HUF_readStats_wksp(byte* huffWeight, nuint hwSize, uint* rankStats, uint* nbSymbolsPtr, uint* tableLogPtr, void* src, nuint srcSize, void* workSpace, nuint wkspSize, int flags) + private static nuint HUF_readStats_wksp( + byte* huffWeight, + nuint hwSize, + uint* rankStats, + uint* nbSymbolsPtr, + uint* tableLogPtr, + void* src, + nuint srcSize, + void* workSpace, + nuint wkspSize, + int flags + ) { - return HUF_readStats_body_default(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize); + return HUF_readStats_body_default( + huffWeight, + hwSize, + rankStats, + nbSymbolsPtr, + tableLogPtr, + src, + srcSize, + workSpace, + wkspSize + ); } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ErrorPrivate.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ErrorPrivate.cs index a04be98f8..7c4956a6e 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ErrorPrivate.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ErrorPrivate.cs @@ -108,4 +108,4 @@ private static string ERR_getErrorString(ZSTD_ErrorCode code) } } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/EstimatedBlockSize.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/EstimatedBlockSize.cs index 5e77ffd5d..6ea7f508d 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/EstimatedBlockSize.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/EstimatedBlockSize.cs @@ -5,4 +5,4 @@ public struct EstimatedBlockSize public nuint estLitSize; public nuint estBlockSize; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_accel_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_accel_t.cs index 73a307fbd..773e913ff 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_accel_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_accel_t.cs @@ -7,12 +7,14 @@ public struct FASTCOVER_accel_t { /* Percentage of training samples used for ZDICT_finalizeDictionary */ public uint finalize; + /* Number of dmer skipped between each dmer counted in computeFrequency */ public uint skip; + public FASTCOVER_accel_t(uint finalize, uint skip) { this.finalize = finalize; this.skip = skip; } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_ctx_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_ctx_t.cs index 718132baf..d5fa4f4ef 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_ctx_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_ctx_t.cs @@ -17,4 +17,4 @@ public unsafe struct FASTCOVER_ctx_t public uint f; public FASTCOVER_accel_t accelParams; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_tryParameters_data_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_tryParameters_data_s.cs index c80a4625e..45d67a0a8 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_tryParameters_data_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_tryParameters_data_s.cs @@ -10,4 +10,4 @@ public unsafe struct FASTCOVER_tryParameters_data_s public nuint dictBufferCapacity; public ZDICT_cover_params_t parameters; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FPStats.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FPStats.cs index d97e84828..70ed5f8c2 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/FPStats.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FPStats.cs @@ -5,4 +5,4 @@ public struct FPStats public Fingerprint pastEvents; public Fingerprint newEvents; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_CState_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_CState_t.cs index a45fabd17..b6bf1e42e 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_CState_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_CState_t.cs @@ -14,4 +14,4 @@ public unsafe struct FSE_CState_t public void* symbolTT; public uint stateLog; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DState_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DState_t.cs index 14d967ab1..0ad9873c7 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DState_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DState_t.cs @@ -6,7 +6,8 @@ namespace ZstdSharp.Unsafe public unsafe struct FSE_DState_t { public nuint state; + /* precise table may vary, depending on U16 */ public void* table; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DTableHeader.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DTableHeader.cs index f4c638962..9fa98926b 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DTableHeader.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DTableHeader.cs @@ -6,4 +6,4 @@ public struct FSE_DTableHeader public ushort tableLog; public ushort fastMode; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DecompressWksp.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DecompressWksp.cs index 1a3e02289..09e7c440e 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DecompressWksp.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DecompressWksp.cs @@ -4,4 +4,4 @@ public unsafe struct FSE_DecompressWksp { public fixed short ncount[256]; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_decode_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_decode_t.cs index 49f2ab6d8..49f07ea3d 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_decode_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_decode_t.cs @@ -6,4 +6,4 @@ public struct FSE_decode_t public byte symbol; public byte nbBits; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_repeat.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_repeat.cs index 157431722..52bc103ce 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_repeat.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_repeat.cs @@ -4,9 +4,11 @@ public enum FSE_repeat { /**< Cannot use the previous table */ FSE_repeat_none, + /**< Can use the previous table but it must be checked */ FSE_repeat_check, + /**< Can use the previous table and it is assumed to be valid */ - FSE_repeat_valid + FSE_repeat_valid, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_symbolCompressionTransform.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_symbolCompressionTransform.cs index 5172daaaf..55dd48349 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_symbolCompressionTransform.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_symbolCompressionTransform.cs @@ -8,4 +8,4 @@ public struct FSE_symbolCompressionTransform public int deltaFindState; public uint deltaNbBits; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Fastcover.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Fastcover.cs index 5d7753448..03e4179be 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Fastcover.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Fastcover.cs @@ -20,7 +20,24 @@ private static nuint FASTCOVER_hashPtrToIndex(void* p, uint f, uint d) return ZSTD_hash8Ptr(p, f); } - private static readonly FASTCOVER_accel_t* FASTCOVER_defaultAccelParameters = GetArrayPointer(new FASTCOVER_accel_t[11] { new FASTCOVER_accel_t(finalize: 100, skip: 0), new FASTCOVER_accel_t(finalize: 100, skip: 0), new FASTCOVER_accel_t(finalize: 50, skip: 1), new FASTCOVER_accel_t(finalize: 34, skip: 2), new FASTCOVER_accel_t(finalize: 25, skip: 3), new FASTCOVER_accel_t(finalize: 20, skip: 4), new FASTCOVER_accel_t(finalize: 17, skip: 5), new FASTCOVER_accel_t(finalize: 14, skip: 6), new FASTCOVER_accel_t(finalize: 13, skip: 7), new FASTCOVER_accel_t(finalize: 11, skip: 8), new FASTCOVER_accel_t(finalize: 10, skip: 9) }); + private static readonly FASTCOVER_accel_t* FASTCOVER_defaultAccelParameters = + GetArrayPointer( + new FASTCOVER_accel_t[11] + { + new FASTCOVER_accel_t(finalize: 100, skip: 0), + new FASTCOVER_accel_t(finalize: 100, skip: 0), + new FASTCOVER_accel_t(finalize: 50, skip: 1), + new FASTCOVER_accel_t(finalize: 34, skip: 2), + new FASTCOVER_accel_t(finalize: 25, skip: 3), + new FASTCOVER_accel_t(finalize: 20, skip: 4), + new FASTCOVER_accel_t(finalize: 17, skip: 5), + new FASTCOVER_accel_t(finalize: 14, skip: 6), + new FASTCOVER_accel_t(finalize: 13, skip: 7), + new FASTCOVER_accel_t(finalize: 11, skip: 8), + new FASTCOVER_accel_t(finalize: 10, skip: 9), + } + ); + /*-************************************* * Helper functions ***************************************/ @@ -35,7 +52,14 @@ private static nuint FASTCOVER_hashPtrToIndex(void* p, uint f, uint d) * * Once the dmer with hash value d is in the dictionary we set F(d) = 0. */ - private static COVER_segment_t FASTCOVER_selectSegment(FASTCOVER_ctx_t* ctx, uint* freqs, uint begin, uint end, ZDICT_cover_params_t parameters, ushort* segmentFreqs) + private static COVER_segment_t FASTCOVER_selectSegment( + FASTCOVER_ctx_t* ctx, + uint* freqs, + uint begin, + uint end, + ZDICT_cover_params_t parameters, + ushort* segmentFreqs + ) { /* Constants */ uint k = parameters.k; @@ -47,7 +71,7 @@ private static COVER_segment_t FASTCOVER_selectSegment(FASTCOVER_ctx_t* ctx, uin { begin = 0, end = 0, - score = 0 + score = 0, }; COVER_segment_t activeSegment; activeSegment.begin = begin; @@ -67,7 +91,11 @@ private static COVER_segment_t FASTCOVER_selectSegment(FASTCOVER_ctx_t* ctx, uin if (activeSegment.end - activeSegment.begin == dmersInK + 1) { /* Get hash value of the dmer to be eliminated from active segment */ - nuint delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d); + nuint delIndex = FASTCOVER_hashPtrToIndex( + ctx->samples + activeSegment.begin, + f, + d + ); segmentFreqs[delIndex] -= 1; if (segmentFreqs[delIndex] == 0) { @@ -103,7 +131,12 @@ private static COVER_segment_t FASTCOVER_selectSegment(FASTCOVER_ctx_t* ctx, uin return bestSegment; } - private static int FASTCOVER_checkParameters(ZDICT_cover_params_t parameters, nuint maxDictSize, uint f, uint accel) + private static int FASTCOVER_checkParameters( + ZDICT_cover_params_t parameters, + nuint maxDictSize, + uint f, + uint accel + ) { if (parameters.d == 0 || parameters.k == 0) { @@ -189,16 +222,32 @@ private static void FASTCOVER_computeFrequency(uint* freqs, FASTCOVER_ctx_t* ctx * Returns 0 on success or error code on error. * The context must be destroyed with `FASTCOVER_ctx_destroy()`. */ - private static nuint FASTCOVER_ctx_init(FASTCOVER_ctx_t* ctx, void* samplesBuffer, nuint* samplesSizes, uint nbSamples, uint d, double splitPoint, uint f, FASTCOVER_accel_t accelParams) + private static nuint FASTCOVER_ctx_init( + FASTCOVER_ctx_t* ctx, + void* samplesBuffer, + nuint* samplesSizes, + uint nbSamples, + uint d, + double splitPoint, + uint f, + FASTCOVER_accel_t accelParams + ) { byte* samples = (byte*)samplesBuffer; nuint totalSamplesSize = COVER_sum(samplesSizes, nbSamples); /* Split samples into testing and training sets */ uint nbTrainSamples = splitPoint < 1 ? (uint)(nbSamples * splitPoint) : nbSamples; uint nbTestSamples = splitPoint < 1 ? nbSamples - nbTrainSamples : nbSamples; - nuint trainingSamplesSize = splitPoint < 1 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize; - nuint testSamplesSize = splitPoint < 1 ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize; - if (totalSamplesSize < (d > sizeof(ulong) ? d : sizeof(ulong)) || totalSamplesSize >= (sizeof(nuint) == 8 ? unchecked((uint)-1) : 1 * (1U << 30))) + nuint trainingSamplesSize = + splitPoint < 1 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize; + nuint testSamplesSize = + splitPoint < 1 + ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) + : totalSamplesSize; + if ( + totalSamplesSize < (d > sizeof(ulong) ? d : sizeof(ulong)) + || totalSamplesSize >= (sizeof(nuint) == 8 ? unchecked((uint)-1) : 1 * (1U << 30)) + ) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); } @@ -224,7 +273,7 @@ private static nuint FASTCOVER_ctx_init(FASTCOVER_ctx_t* ctx, void* samplesBuffe d = d, f = f, accelParams = accelParams, - offsets = (nuint*)calloc(nbSamples + 1, (ulong)sizeof(nuint)) + offsets = (nuint*)calloc(nbSamples + 1, (ulong)sizeof(nuint)), }; if (ctx->offsets == null) { @@ -256,12 +305,24 @@ private static nuint FASTCOVER_ctx_init(FASTCOVER_ctx_t* ctx, void* samplesBuffe /** * Given the prepared context build the dictionary. */ - private static nuint FASTCOVER_buildDictionary(FASTCOVER_ctx_t* ctx, uint* freqs, void* dictBuffer, nuint dictBufferCapacity, ZDICT_cover_params_t parameters, ushort* segmentFreqs) + private static nuint FASTCOVER_buildDictionary( + FASTCOVER_ctx_t* ctx, + uint* freqs, + void* dictBuffer, + nuint dictBufferCapacity, + ZDICT_cover_params_t parameters, + ushort* segmentFreqs + ) { byte* dict = (byte*)dictBuffer; nuint tail = dictBufferCapacity; /* Divide the data into epochs. We will select one segment from each epoch. */ - COVER_epoch_info_t epochs = COVER_computeEpochs((uint)dictBufferCapacity, (uint)ctx->nbDmers, parameters.k, 1); + COVER_epoch_info_t epochs = COVER_computeEpochs( + (uint)dictBufferCapacity, + (uint)ctx->nbDmers, + parameters.k, + 1 + ); const nuint maxZeroScoreRun = 10; nuint zeroScoreRun = 0; nuint epoch; @@ -271,7 +332,14 @@ private static nuint FASTCOVER_buildDictionary(FASTCOVER_ctx_t* ctx, uint* freqs uint epochEnd = epochBegin + epochs.size; nuint segmentSize; /* Select a segment */ - COVER_segment_t segment = FASTCOVER_selectSegment(ctx, freqs, epochBegin, epochEnd, parameters, segmentFreqs); + COVER_segment_t segment = FASTCOVER_selectSegment( + ctx, + freqs, + epochBegin, + epochEnd, + parameters, + segmentFreqs + ); if (segment.score == 0) { if (++zeroScoreRun >= maxZeroScoreRun) @@ -283,7 +351,10 @@ private static nuint FASTCOVER_buildDictionary(FASTCOVER_ctx_t* ctx, uint* freqs } zeroScoreRun = 0; - segmentSize = segment.end - segment.begin + parameters.d - 1 < tail ? segment.end - segment.begin + parameters.d - 1 : tail; + segmentSize = + segment.end - segment.begin + parameters.d - 1 < tail + ? segment.end - segment.begin + parameters.d - 1 + : tail; if (segmentSize < parameters.d) { break; @@ -313,7 +384,9 @@ private static void FASTCOVER_tryParameters(void* opaque) ushort* segmentFreqs = (ushort*)calloc((ulong)1 << (int)ctx->f, sizeof(ushort)); /* Allocate space for hash table, dict, and freqs */ byte* dict = (byte*)malloc(dictBufferCapacity); - COVER_dictSelection selection = COVER_dictSelectionError(unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC))); + COVER_dictSelection selection = COVER_dictSelectionError( + unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)) + ); uint* freqs = (uint*)malloc(((ulong)1 << (int)ctx->f) * sizeof(uint)); if (segmentFreqs == null || dict == null || freqs == null) { @@ -322,16 +395,37 @@ private static void FASTCOVER_tryParameters(void* opaque) memcpy(freqs, ctx->freqs, (uint)(((ulong)1 << (int)ctx->f) * sizeof(uint))); { - nuint tail = FASTCOVER_buildDictionary(ctx, freqs, dict, dictBufferCapacity, parameters, segmentFreqs); - uint nbFinalizeSamples = (uint)(ctx->nbTrainSamples * ctx->accelParams.finalize / 100); - selection = COVER_selectDict(dict + tail, dictBufferCapacity, dictBufferCapacity - tail, ctx->samples, ctx->samplesSizes, nbFinalizeSamples, ctx->nbTrainSamples, ctx->nbSamples, parameters, ctx->offsets, totalCompressedSize); + nuint tail = FASTCOVER_buildDictionary( + ctx, + freqs, + dict, + dictBufferCapacity, + parameters, + segmentFreqs + ); + uint nbFinalizeSamples = (uint)( + ctx->nbTrainSamples * ctx->accelParams.finalize / 100 + ); + selection = COVER_selectDict( + dict + tail, + dictBufferCapacity, + dictBufferCapacity - tail, + ctx->samples, + ctx->samplesSizes, + nbFinalizeSamples, + ctx->nbTrainSamples, + ctx->nbSamples, + parameters, + ctx->offsets, + totalCompressedSize + ); if (COVER_dictSelectionIsError(selection) != 0) { goto _cleanup; } } - _cleanup: + _cleanup: free(dict); COVER_best_finish(data->best, parameters, selection); free(data); @@ -340,7 +434,10 @@ private static void FASTCOVER_tryParameters(void* opaque) free(freqs); } - private static void FASTCOVER_convertToCoverParams(ZDICT_fastCover_params_t fastCoverParams, ZDICT_cover_params_t* coverParams) + private static void FASTCOVER_convertToCoverParams( + ZDICT_fastCover_params_t fastCoverParams, + ZDICT_cover_params_t* coverParams + ) { coverParams->k = fastCoverParams.k; coverParams->d = fastCoverParams.d; @@ -351,7 +448,12 @@ private static void FASTCOVER_convertToCoverParams(ZDICT_fastCover_params_t fast coverParams->shrinkDict = fastCoverParams.shrinkDict; } - private static void FASTCOVER_convertToFastCoverParams(ZDICT_cover_params_t coverParams, ZDICT_fastCover_params_t* fastCoverParams, uint f, uint accel) + private static void FASTCOVER_convertToFastCoverParams( + ZDICT_cover_params_t coverParams, + ZDICT_fastCover_params_t* fastCoverParams, + uint f, + uint accel + ) { fastCoverParams->k = coverParams.k; fastCoverParams->d = coverParams.d; @@ -380,7 +482,14 @@ private static void FASTCOVER_convertToFastCoverParams(ZDICT_cover_params_t cove * In general, it's recommended to provide a few thousands samples, though this can vary a lot. * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. */ - public static nuint ZDICT_trainFromBuffer_fastCover(void* dictBuffer, nuint dictBufferCapacity, void* samplesBuffer, nuint* samplesSizes, uint nbSamples, ZDICT_fastCover_params_t parameters) + public static nuint ZDICT_trainFromBuffer_fastCover( + void* dictBuffer, + nuint dictBufferCapacity, + void* samplesBuffer, + nuint* samplesSizes, + uint nbSamples, + ZDICT_fastCover_params_t parameters + ) { byte* dict = (byte*)dictBuffer; FASTCOVER_ctx_t ctx; @@ -392,7 +501,14 @@ public static nuint ZDICT_trainFromBuffer_fastCover(void* dictBuffer, nuint dict parameters.accel = parameters.accel == 0 ? 1 : parameters.accel; coverParams = new ZDICT_cover_params_t(); FASTCOVER_convertToCoverParams(parameters, &coverParams); - if (FASTCOVER_checkParameters(coverParams, dictBufferCapacity, parameters.f, parameters.accel) == 0) + if ( + FASTCOVER_checkParameters( + coverParams, + dictBufferCapacity, + parameters.f, + parameters.accel + ) == 0 + ) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } @@ -409,7 +525,16 @@ public static nuint ZDICT_trainFromBuffer_fastCover(void* dictBuffer, nuint dict accelParams = FASTCOVER_defaultAccelParameters[parameters.accel]; { - nuint initVal = FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, coverParams.d, parameters.splitPoint, parameters.f, accelParams); + nuint initVal = FASTCOVER_ctx_init( + &ctx, + samplesBuffer, + samplesSizes, + nbSamples, + coverParams.d, + parameters.splitPoint, + parameters.f, + accelParams + ); if (ERR_isError(initVal)) { return initVal; @@ -419,13 +544,32 @@ public static nuint ZDICT_trainFromBuffer_fastCover(void* dictBuffer, nuint dict COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, g_displayLevel); { /* Initialize array to keep track of frequency of dmer within activeSegment */ - ushort* segmentFreqs = (ushort*)calloc((ulong)1 << (int)parameters.f, sizeof(ushort)); - nuint tail = FASTCOVER_buildDictionary(&ctx, ctx.freqs, dictBuffer, dictBufferCapacity, coverParams, segmentFreqs); - uint nbFinalizeSamples = (uint)(ctx.nbTrainSamples * ctx.accelParams.finalize / 100); - nuint dictionarySize = ZDICT_finalizeDictionary(dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail, samplesBuffer, samplesSizes, nbFinalizeSamples, coverParams.zParams); - if (!ERR_isError(dictionarySize)) - { - } + ushort* segmentFreqs = (ushort*)calloc( + (ulong)1 << (int)parameters.f, + sizeof(ushort) + ); + nuint tail = FASTCOVER_buildDictionary( + &ctx, + ctx.freqs, + dictBuffer, + dictBufferCapacity, + coverParams, + segmentFreqs + ); + uint nbFinalizeSamples = (uint)( + ctx.nbTrainSamples * ctx.accelParams.finalize / 100 + ); + nuint dictionarySize = ZDICT_finalizeDictionary( + dict, + dictBufferCapacity, + dict + tail, + dictBufferCapacity - tail, + samplesBuffer, + samplesSizes, + nbFinalizeSamples, + coverParams.zParams + ); + if (!ERR_isError(dictionarySize)) { } FASTCOVER_ctx_destroy(&ctx); free(segmentFreqs); @@ -451,7 +595,14 @@ public static nuint ZDICT_trainFromBuffer_fastCover(void* dictBuffer, nuint dict * See ZDICT_trainFromBuffer() for details on failure modes. * Note: ZDICT_optimizeTrainFromBuffer_fastCover() requires about 6 * 2^f bytes of memory for each thread. */ - public static nuint ZDICT_optimizeTrainFromBuffer_fastCover(void* dictBuffer, nuint dictBufferCapacity, void* samplesBuffer, nuint* samplesSizes, uint nbSamples, ZDICT_fastCover_params_t* parameters) + public static nuint ZDICT_optimizeTrainFromBuffer_fastCover( + void* dictBuffer, + nuint dictBufferCapacity, + void* samplesBuffer, + nuint* samplesSizes, + uint nbSamples, + ZDICT_fastCover_params_t* parameters + ) { ZDICT_cover_params_t coverParams; FASTCOVER_accel_t accelParams; @@ -520,7 +671,16 @@ public static nuint ZDICT_optimizeTrainFromBuffer_fastCover(void* dictBuffer, nu /* Initialize the context for this value of d */ FASTCOVER_ctx_t ctx; { - nuint initVal = FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint, f, accelParams); + nuint initVal = FASTCOVER_ctx_init( + &ctx, + samplesBuffer, + samplesSizes, + nbSamples, + d, + splitPoint, + f, + accelParams + ); if (ERR_isError(initVal)) { COVER_best_destroy(&best); @@ -538,13 +698,17 @@ public static nuint ZDICT_optimizeTrainFromBuffer_fastCover(void* dictBuffer, nu for (k = kMinK; k <= kMaxK; k += kStepSize) { /* Prepare the arguments */ - FASTCOVER_tryParameters_data_s* data = (FASTCOVER_tryParameters_data_s*)malloc((ulong)sizeof(FASTCOVER_tryParameters_data_s)); + FASTCOVER_tryParameters_data_s* data = (FASTCOVER_tryParameters_data_s*)malloc( + (ulong)sizeof(FASTCOVER_tryParameters_data_s) + ); if (data == null) { COVER_best_destroy(&best); FASTCOVER_ctx_destroy(&ctx); POOL_free(pool); - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) + ); } data->ctx = &ctx; @@ -557,7 +721,14 @@ public static nuint ZDICT_optimizeTrainFromBuffer_fastCover(void* dictBuffer, nu data->parameters.steps = kSteps; data->parameters.shrinkDict = shrinkDict; data->parameters.zParams.notificationLevel = (uint)g_displayLevel; - if (FASTCOVER_checkParameters(data->parameters, dictBufferCapacity, data->ctx->f, accel) == 0) + if ( + FASTCOVER_checkParameters( + data->parameters, + dictBufferCapacity, + data->ctx->f, + accel + ) == 0 + ) { free(data); continue; @@ -566,7 +737,11 @@ public static nuint ZDICT_optimizeTrainFromBuffer_fastCover(void* dictBuffer, nu COVER_best_start(&best); if (pool != null) { - POOL_add(pool, (delegate* managed)(&FASTCOVER_tryParameters), data); + POOL_add( + pool, + (delegate* managed)(&FASTCOVER_tryParameters), + data + ); } else { @@ -598,4 +773,4 @@ public static nuint ZDICT_optimizeTrainFromBuffer_fastCover(void* dictBuffer, nu } } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Fingerprint.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Fingerprint.cs index 6ef7760f7..90559d8fa 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Fingerprint.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Fingerprint.cs @@ -5,4 +5,4 @@ public unsafe struct Fingerprint public fixed uint events[1024]; public nuint nbEvents; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Fse.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Fse.cs index dc2174b90..f42b45a14 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Fse.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Fse.cs @@ -25,28 +25,52 @@ private static void FSE_initCState2(ref FSE_CState_t statePtr, uint* ct, uint sy { FSE_initCState(ref statePtr, ct); { - FSE_symbolCompressionTransform symbolTT = ((FSE_symbolCompressionTransform*)statePtr.symbolTT)[symbol]; + FSE_symbolCompressionTransform symbolTT = ( + (FSE_symbolCompressionTransform*)statePtr.symbolTT + )[symbol]; ushort* stateTable = (ushort*)statePtr.stateTable; uint nbBitsOut = symbolTT.deltaNbBits + (1 << 15) >> 16; statePtr.value = (nint)((nbBitsOut << 16) - symbolTT.deltaNbBits); - statePtr.value = stateTable[(statePtr.value >> (int)nbBitsOut) + symbolTT.deltaFindState]; + statePtr.value = stateTable[ + (statePtr.value >> (int)nbBitsOut) + symbolTT.deltaFindState + ]; } } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void FSE_encodeSymbol(ref nuint bitC_bitContainer, ref uint bitC_bitPos, ref FSE_CState_t statePtr, uint symbol) + private static void FSE_encodeSymbol( + ref nuint bitC_bitContainer, + ref uint bitC_bitPos, + ref FSE_CState_t statePtr, + uint symbol + ) { - FSE_symbolCompressionTransform symbolTT = ((FSE_symbolCompressionTransform*)statePtr.symbolTT)[symbol]; + FSE_symbolCompressionTransform symbolTT = ( + (FSE_symbolCompressionTransform*)statePtr.symbolTT + )[symbol]; ushort* stateTable = (ushort*)statePtr.stateTable; uint nbBitsOut = (uint)statePtr.value + symbolTT.deltaNbBits >> 16; BIT_addBits(ref bitC_bitContainer, ref bitC_bitPos, (nuint)statePtr.value, nbBitsOut); - statePtr.value = stateTable[(statePtr.value >> (int)nbBitsOut) + symbolTT.deltaFindState]; + statePtr.value = stateTable[ + (statePtr.value >> (int)nbBitsOut) + symbolTT.deltaFindState + ]; } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void FSE_flushCState(ref nuint bitC_bitContainer, ref uint bitC_bitPos, ref sbyte* bitC_ptr, sbyte* bitC_endPtr, ref FSE_CState_t statePtr) + private static void FSE_flushCState( + ref nuint bitC_bitContainer, + ref uint bitC_bitPos, + ref sbyte* bitC_ptr, + sbyte* bitC_endPtr, + ref FSE_CState_t statePtr + ) { - BIT_addBits(ref bitC_bitContainer, ref bitC_bitPos, (nuint)statePtr.value, statePtr.stateLog); + BIT_addBits( + ref bitC_bitContainer, + ref bitC_bitPos, + (nuint)statePtr.value, + statePtr.stateLog + ); BIT_flushBits(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr); } @@ -67,7 +91,12 @@ private static uint FSE_getMaxNbBits(void* symbolTTPtr, uint symbolValue) * note 1 : assume symbolValue is valid (<= maxSymbolValue) * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint FSE_bitCost(void* symbolTTPtr, uint tableLog, uint symbolValue, uint accuracyLog) + private static uint FSE_bitCost( + void* symbolTTPtr, + uint tableLog, + uint symbolValue, + uint accuracyLog + ) { FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*)symbolTTPtr; uint minNbBits = symbolTT[symbolValue].deltaNbBits >> 16; @@ -76,9 +105,11 @@ private static uint FSE_bitCost(void* symbolTTPtr, uint tableLog, uint symbolVal assert(accuracyLog < 31 - tableLog); { uint tableSize = (uint)(1 << (int)tableLog); - uint deltaFromThreshold = threshold - (symbolTT[symbolValue].deltaNbBits + tableSize); + uint deltaFromThreshold = + threshold - (symbolTT[symbolValue].deltaNbBits + tableSize); /* linear interpolation (very approximate) */ - uint normalizedDeltaFromThreshold = deltaFromThreshold << (int)accuracyLog >> (int)tableLog; + uint normalizedDeltaFromThreshold = + deltaFromThreshold << (int)accuracyLog >> (int)tableLog; uint bitMultiplier = (uint)(1 << (int)accuracyLog); assert(symbolTT[symbolValue].deltaNbBits + tableSize <= threshold); assert(normalizedDeltaFromThreshold <= bitMultiplier); @@ -87,12 +118,26 @@ private static uint FSE_bitCost(void* symbolTTPtr, uint tableLog, uint symbolVal } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void FSE_initDState(ref FSE_DState_t DStatePtr, ref BIT_DStream_t bitD, uint* dt) + private static void FSE_initDState( + ref FSE_DState_t DStatePtr, + ref BIT_DStream_t bitD, + uint* dt + ) { void* ptr = dt; FSE_DTableHeader* DTableH = (FSE_DTableHeader*)ptr; - DStatePtr.state = BIT_readBits(bitD.bitContainer, ref bitD.bitsConsumed, DTableH->tableLog); - BIT_reloadDStream(ref bitD.bitContainer, ref bitD.bitsConsumed, ref bitD.ptr, bitD.start, bitD.limitPtr); + DStatePtr.state = BIT_readBits( + bitD.bitContainer, + ref bitD.bitsConsumed, + DTableH->tableLog + ); + BIT_reloadDStream( + ref bitD.bitContainer, + ref bitD.bitsConsumed, + ref bitD.ptr, + bitD.start, + bitD.limitPtr + ); DStatePtr.table = dt + 1; } @@ -113,7 +158,11 @@ private static void FSE_updateState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static byte FSE_decodeSymbol(ref FSE_DState_t DStatePtr, nuint bitD_bitContainer, ref uint bitD_bitsConsumed) + private static byte FSE_decodeSymbol( + ref FSE_DState_t DStatePtr, + nuint bitD_bitContainer, + ref uint bitD_bitsConsumed + ) { FSE_decode_t DInfo = ((FSE_decode_t*)DStatePtr.table)[DStatePtr.state]; uint nbBits = DInfo.nbBits; @@ -126,7 +175,11 @@ private static byte FSE_decodeSymbol(ref FSE_DState_t DStatePtr, nuint bitD_bitC /*! FSE_decodeSymbolFast() : unsafe, only works if no symbol has a probability > 50% */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static byte FSE_decodeSymbolFast(ref FSE_DState_t DStatePtr, nuint bitD_bitContainer, ref uint bitD_bitsConsumed) + private static byte FSE_decodeSymbolFast( + ref FSE_DState_t DStatePtr, + nuint bitD_bitContainer, + ref uint bitD_bitsConsumed + ) { FSE_decode_t DInfo = ((FSE_decode_t*)DStatePtr.table)[DStatePtr.state]; uint nbBits = DInfo.nbBits; diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FseCompress.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FseCompress.cs index a218439f1..2a7de3aba 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/FseCompress.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FseCompress.cs @@ -1,6 +1,6 @@ -using static ZstdSharp.UnsafeHelper; using System; using System.Runtime.InteropServices; +using static ZstdSharp.UnsafeHelper; namespace ZstdSharp.Unsafe { @@ -11,7 +11,14 @@ public static unsafe partial class Methods * wkspSize should be sized to handle worst case situation, which is `1< wkspSize) + if ( + sizeof(uint) + * ( + (maxSymbolValue + 2 + (1UL << (int)tableLog)) / 2 + + sizeof(ulong) / sizeof(uint) + ) + > wkspSize + ) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); tableU16[-2] = (ushort)tableLog; tableU16[-1] = (ushort)maxSymbolValue; @@ -137,7 +151,8 @@ private static nuint FSE_buildCTable_wksp(uint* ct, short* normalizedCounter, ui switch (normalizedCounter[s]) { case 0: - symbolTT[s].deltaNbBits = (tableLog + 1 << 16) - (uint)(1 << (int)tableLog); + symbolTT[s].deltaNbBits = + (tableLog + 1 << 16) - (uint)(1 << (int)tableLog); break; case -1: case 1: @@ -148,11 +163,15 @@ private static nuint FSE_buildCTable_wksp(uint* ct, short* normalizedCounter, ui break; default: assert(normalizedCounter[s] > 1); + { - uint maxBitsOut = tableLog - ZSTD_highbit32((uint)normalizedCounter[s] - 1); + uint maxBitsOut = + tableLog - ZSTD_highbit32((uint)normalizedCounter[s] - 1); uint minStatePlus = (uint)normalizedCounter[s] << (int)maxBitsOut; symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus; - symbolTT[s].deltaFindState = (int)(total - (uint)normalizedCounter[s]); + symbolTT[s].deltaFindState = (int)( + total - (uint)normalizedCounter[s] + ); total += (uint)normalizedCounter[s]; } @@ -173,7 +192,14 @@ private static nuint FSE_NCountWriteBound(uint maxSymbolValue, uint tableLog) return maxSymbolValue != 0 ? maxHeaderSize : 512; } - private static nuint FSE_writeNCount_generic(void* header, nuint headerBufferSize, short* normalizedCounter, uint maxSymbolValue, uint tableLog, uint writeIsSafe) + private static nuint FSE_writeNCount_generic( + void* header, + nuint headerBufferSize, + short* normalizedCounter, + uint maxSymbolValue, + uint tableLog, + uint writeIsSafe + ) { byte* ostart = (byte*)header; byte* @out = ostart; @@ -206,7 +232,9 @@ private static nuint FSE_writeNCount_generic(void* header, nuint headerBufferSiz start += 24; bitStream += 0xFFFFU << bitCount; if (writeIsSafe == 0 && @out > oend - 2) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) + ); @out[0] = (byte)bitStream; @out[1] = (byte)(bitStream >> 8); @out += 2; @@ -225,7 +253,9 @@ private static nuint FSE_writeNCount_generic(void* header, nuint headerBufferSiz if (bitCount > 16) { if (writeIsSafe == 0 && @out > oend - 2) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) + ); @out[0] = (byte)bitStream; @out[1] = (byte)(bitStream >> 8); @out += 2; @@ -282,15 +312,35 @@ private static nuint FSE_writeNCount_generic(void* header, nuint headerBufferSiz Compactly save 'normalizedCounter' into 'buffer'. @return : size of the compressed table, or an errorCode, which can be tested using FSE_isError(). */ - private static nuint FSE_writeNCount(void* buffer, nuint bufferSize, short* normalizedCounter, uint maxSymbolValue, uint tableLog) + private static nuint FSE_writeNCount( + void* buffer, + nuint bufferSize, + short* normalizedCounter, + uint maxSymbolValue, + uint tableLog + ) { if (tableLog > 14 - 2) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); if (tableLog < 5) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog)) - return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0); - return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1); + return FSE_writeNCount_generic( + buffer, + bufferSize, + normalizedCounter, + maxSymbolValue, + tableLog, + 0 + ); + return FSE_writeNCount_generic( + buffer, + bufferSize, + normalizedCounter, + maxSymbolValue, + tableLog, + 1 + ); } /* provides the minimum logSize to safely represent a distribution */ @@ -306,7 +356,12 @@ private static uint FSE_minTableLog(nuint srcSize, uint maxSymbolValue) /* ***************************************** * FSE advanced API ***************************************** */ - private static uint FSE_optimalTableLog_internal(uint maxTableLog, nuint srcSize, uint maxSymbolValue, uint minus) + private static uint FSE_optimalTableLog_internal( + uint maxTableLog, + nuint srcSize, + uint maxSymbolValue, + uint minus + ) { uint maxBitsSrc = ZSTD_highbit32((uint)(srcSize - 1)) - minus; uint tableLog = maxTableLog; @@ -329,14 +384,25 @@ private static uint FSE_optimalTableLog_internal(uint maxTableLog, nuint srcSize dynamically downsize 'tableLog' when conditions are met. It saves CPU time, by using smaller tables, while preserving or even improving compression ratio. @return : recommended tableLog (necessarily <= 'maxTableLog') */ - private static uint FSE_optimalTableLog(uint maxTableLog, nuint srcSize, uint maxSymbolValue) + private static uint FSE_optimalTableLog( + uint maxTableLog, + nuint srcSize, + uint maxSymbolValue + ) { return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2); } /* Secondary normalization method. To be used when primary method fails. */ - private static nuint FSE_normalizeM2(short* norm, uint tableLog, uint* count, nuint total, uint maxSymbolValue, short lowProbCount) + private static nuint FSE_normalizeM2( + short* norm, + uint tableLog, + uint* count, + nuint total, + uint maxSymbolValue, + short lowProbCount + ) { const short NOT_YET_ASSIGNED = -2; uint s; @@ -397,7 +463,8 @@ private static nuint FSE_normalizeM2(short* norm, uint tableLog, uint* count, nu /* all values are pretty poor; probably incompressible data (should have already been detected); find max, then give all remaining points to max */ - uint maxV = 0, maxC = 0; + uint maxV = 0, + maxC = 0; for (s = 0; s <= maxSymbolValue; s++) if (count[s] > maxC) { @@ -447,21 +514,18 @@ private static nuint FSE_normalizeM2(short* norm, uint tableLog, uint* count, nu } #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_rtbTable => new uint[8] - { - 0, - 473195, - 504333, - 520860, - 550000, - 700000, - 750000, - 830000 - }; - private static uint* rtbTable => (uint*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_rtbTable)); + private static ReadOnlySpan Span_rtbTable => + new uint[8] { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 }; + private static uint* rtbTable => + (uint*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_rtbTable) + ); #else - private static readonly uint* rtbTable = GetArrayPointer(new uint[8] { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 }); + private static readonly uint* rtbTable = GetArrayPointer( + new uint[8] { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 } + ); #endif /*! FSE_normalizeCount(): normalize counts so that sum(count[]) == Power_of_2 (2^tableLog) @@ -474,7 +538,14 @@ faster. If you are compressing a small amount of data (< 2 KB) then useLowProbCo Otherwise, useLowProbCount=1 is a good default, since the speed difference is small. @return : tableLog, or an errorCode, which can be tested using FSE_isError() */ - private static nuint FSE_normalizeCount(short* normalizedCounter, uint tableLog, uint* count, nuint total, uint maxSymbolValue, uint useLowProbCount) + private static nuint FSE_normalizeCount( + short* normalizedCounter, + uint tableLog, + uint* count, + nuint total, + uint maxSymbolValue, + uint useLowProbCount + ) { if (tableLog == 0) tableLog = 13 - 2; @@ -516,7 +587,9 @@ private static nuint FSE_normalizeCount(short* normalizedCounter, uint tableLog, if (proba < 8) { ulong restToBeat = vStep * rtbTable[proba]; - proba += (short)(count[s] * step - ((ulong)proba << (int)scale) > restToBeat ? 1 : 0); + proba += (short)( + count[s] * step - ((ulong)proba << (int)scale) > restToBeat ? 1 : 0 + ); } if (proba > largestP) @@ -533,7 +606,14 @@ private static nuint FSE_normalizeCount(short* normalizedCounter, uint tableLog, if (-stillToDistribute >= normalizedCounter[largest] >> 1) { /* corner case, need another normalization method */ - nuint errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue, lowProbCount); + nuint errorCode = FSE_normalizeM2( + normalizedCounter, + tableLog, + count, + total, + maxSymbolValue, + lowProbCount + ); if (ERR_isError(errorCode)) return errorCode; } @@ -560,14 +640,22 @@ private static nuint FSE_buildCTable_rle(uint* ct, byte symbolValue) return 0; } - private static nuint FSE_compress_usingCTable_generic(void* dst, nuint dstSize, void* src, nuint srcSize, uint* ct, uint fast) + private static nuint FSE_compress_usingCTable_generic( + void* dst, + nuint dstSize, + void* src, + nuint srcSize, + uint* ct, + uint fast + ) { byte* istart = (byte*)src; byte* iend = istart + srcSize; byte* ip = iend; BIT_CStream_t bitC; System.Runtime.CompilerServices.Unsafe.SkipInit(out bitC); - FSE_CState_t CState1, CState2; + FSE_CState_t CState1, + CState2; System.Runtime.CompilerServices.Unsafe.SkipInit(out CState1); System.Runtime.CompilerServices.Unsafe.SkipInit(out CState2); if (srcSize <= 2) @@ -588,9 +676,19 @@ private static nuint FSE_compress_usingCTable_generic(void* dst, nuint dstSize, FSE_initCState2(ref CState2, ct, *--ip); FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState1, *--ip); if (fast != 0) - BIT_flushBitsFast(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr); + BIT_flushBitsFast( + ref bitC_bitContainer, + ref bitC_bitPos, + ref bitC_ptr, + bitC_endPtr + ); else - BIT_flushBits(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr); + BIT_flushBits( + ref bitC_bitContainer, + ref bitC_bitPos, + ref bitC_ptr, + bitC_endPtr + ); } else { @@ -604,9 +702,19 @@ private static nuint FSE_compress_usingCTable_generic(void* dst, nuint dstSize, FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState2, *--ip); FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState1, *--ip); if (fast != 0) - BIT_flushBitsFast(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr); + BIT_flushBitsFast( + ref bitC_bitContainer, + ref bitC_bitPos, + ref bitC_ptr, + bitC_endPtr + ); else - BIT_flushBits(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr); + BIT_flushBits( + ref bitC_bitContainer, + ref bitC_bitPos, + ref bitC_ptr, + bitC_endPtr + ); } while (ip > istart) @@ -614,9 +722,19 @@ private static nuint FSE_compress_usingCTable_generic(void* dst, nuint dstSize, FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState2, *--ip); if (sizeof(nuint) * 8 < (14 - 2) * 2 + 7) if (fast != 0) - BIT_flushBitsFast(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr); + BIT_flushBitsFast( + ref bitC_bitContainer, + ref bitC_bitPos, + ref bitC_ptr, + bitC_endPtr + ); else - BIT_flushBits(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr); + BIT_flushBits( + ref bitC_bitContainer, + ref bitC_bitPos, + ref bitC_ptr, + bitC_endPtr + ); FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState1, *--ip); if (sizeof(nuint) * 8 > (14 - 2) * 4 + 7) { @@ -625,14 +743,42 @@ private static nuint FSE_compress_usingCTable_generic(void* dst, nuint dstSize, } if (fast != 0) - BIT_flushBitsFast(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr); + BIT_flushBitsFast( + ref bitC_bitContainer, + ref bitC_bitPos, + ref bitC_ptr, + bitC_endPtr + ); else - BIT_flushBits(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr); + BIT_flushBits( + ref bitC_bitContainer, + ref bitC_bitPos, + ref bitC_ptr, + bitC_endPtr + ); } - FSE_flushCState(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr, ref CState2); - FSE_flushCState(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr, ref CState1); - return BIT_closeCStream(ref bitC_bitContainer, ref bitC_bitPos, bitC_ptr, bitC_endPtr, bitC.startPtr); + FSE_flushCState( + ref bitC_bitContainer, + ref bitC_bitPos, + ref bitC_ptr, + bitC_endPtr, + ref CState2 + ); + FSE_flushCState( + ref bitC_bitContainer, + ref bitC_bitPos, + ref bitC_ptr, + bitC_endPtr, + ref CState1 + ); + return BIT_closeCStream( + ref bitC_bitContainer, + ref bitC_bitPos, + bitC_ptr, + bitC_endPtr, + bitC.startPtr + ); } /*! FSE_compress_usingCTable(): @@ -640,7 +786,13 @@ private static nuint FSE_compress_usingCTable_generic(void* dst, nuint dstSize, @return : size of compressed data (<= `dstCapacity`), or 0 if compressed data could not fit into `dst`, or an errorCode, which can be tested using FSE_isError() */ - private static nuint FSE_compress_usingCTable(void* dst, nuint dstSize, void* src, nuint srcSize, uint* ct) + private static nuint FSE_compress_usingCTable( + void* dst, + nuint dstSize, + void* src, + nuint srcSize, + uint* ct + ) { uint fast = dstSize >= srcSize + (srcSize >> 7) + 4 + (nuint)sizeof(nuint) ? 1U : 0U; if (fast != 0) @@ -657,4 +809,4 @@ private static nuint FSE_compressBound(nuint size) return 512 + (size + (size >> 7) + 4 + (nuint)sizeof(nuint)); } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FseDecompress.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FseDecompress.cs index 91ff90d62..9dee183ab 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/FseDecompress.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FseDecompress.cs @@ -1,11 +1,18 @@ -using static ZstdSharp.UnsafeHelper; using System.Runtime.CompilerServices; +using static ZstdSharp.UnsafeHelper; namespace ZstdSharp.Unsafe { public static unsafe partial class Methods { - private static nuint FSE_buildDTable_internal(uint* dt, short* normalizedCounter, uint maxSymbolValue, uint tableLog, void* workSpace, nuint wkspSize) + private static nuint FSE_buildDTable_internal( + uint* dt, + short* normalizedCounter, + uint maxSymbolValue, + uint tableLog, + void* workSpace, + nuint wkspSize + ) { /* because *dt is unsigned, 32-bits aligned on 32-bits */ void* tdPtr = dt + 1; @@ -94,7 +101,8 @@ private static nuint FSE_buildDTable_internal(uint* dt, short* normalizedCounter { uint tableMask = tableSize - 1; uint step = (tableSize >> 1) + (tableSize >> 3) + 3; - uint s, position = 0; + uint s, + position = 0; for (s = 0; s < maxSV1; s++) { int i; @@ -118,23 +126,46 @@ private static nuint FSE_buildDTable_internal(uint* dt, short* normalizedCounter byte symbol = tableDecode[u].symbol; uint nextState = symbolNext[symbol]++; tableDecode[u].nbBits = (byte)(tableLog - ZSTD_highbit32(nextState)); - tableDecode[u].newState = (ushort)((nextState << tableDecode[u].nbBits) - tableSize); + tableDecode[u].newState = (ushort)( + (nextState << tableDecode[u].nbBits) - tableSize + ); } } return 0; } - private static nuint FSE_buildDTable_wksp(uint* dt, short* normalizedCounter, uint maxSymbolValue, uint tableLog, void* workSpace, nuint wkspSize) + private static nuint FSE_buildDTable_wksp( + uint* dt, + short* normalizedCounter, + uint maxSymbolValue, + uint tableLog, + void* workSpace, + nuint wkspSize + ) { - return FSE_buildDTable_internal(dt, normalizedCounter, maxSymbolValue, tableLog, workSpace, wkspSize); + return FSE_buildDTable_internal( + dt, + normalizedCounter, + maxSymbolValue, + tableLog, + workSpace, + wkspSize + ); } /*-******************************************************* * Decompression (Byte symbols) *********************************************************/ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint FSE_decompress_usingDTable_generic(void* dst, nuint maxDstSize, void* cSrc, nuint cSrcSize, uint* dt, uint fast) + private static nuint FSE_decompress_usingDTable_generic( + void* dst, + nuint maxDstSize, + void* cSrc, + nuint cSrcSize, + uint* dt, + uint fast + ) { byte* ostart = (byte*)dst; byte* op = ostart; @@ -160,49 +191,144 @@ private static nuint FSE_decompress_usingDTable_generic(void* dst, nuint maxDstS sbyte* bitD_ptr = bitD.ptr; sbyte* bitD_start = bitD.start; sbyte* bitD_limitPtr = bitD.limitPtr; - if (BIT_reloadDStream(ref bitD_bitContainer, ref bitD_bitsConsumed, ref bitD_ptr, bitD_start, bitD_limitPtr) == BIT_DStream_status.BIT_DStream_overflow) + if ( + BIT_reloadDStream( + ref bitD_bitContainer, + ref bitD_bitsConsumed, + ref bitD_ptr, + bitD_start, + bitD_limitPtr + ) == BIT_DStream_status.BIT_DStream_overflow + ) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); } - for (; BIT_reloadDStream(ref bitD_bitContainer, ref bitD_bitsConsumed, ref bitD_ptr, bitD_start, bitD_limitPtr) == BIT_DStream_status.BIT_DStream_unfinished && op < olimit; op += 4) + for ( + ; + BIT_reloadDStream( + ref bitD_bitContainer, + ref bitD_bitsConsumed, + ref bitD_ptr, + bitD_start, + bitD_limitPtr + ) == BIT_DStream_status.BIT_DStream_unfinished + && op < olimit; + op += 4 + ) { - op[0] = fast != 0 ? FSE_decodeSymbolFast(ref state1, bitD_bitContainer, ref bitD_bitsConsumed) : FSE_decodeSymbol(ref state1, bitD_bitContainer, ref bitD_bitsConsumed); + op[0] = + fast != 0 + ? FSE_decodeSymbolFast(ref state1, bitD_bitContainer, ref bitD_bitsConsumed) + : FSE_decodeSymbol(ref state1, bitD_bitContainer, ref bitD_bitsConsumed); if ((14 - 2) * 2 + 7 > sizeof(nuint) * 8) - BIT_reloadDStream(ref bitD_bitContainer, ref bitD_bitsConsumed, ref bitD_ptr, bitD_start, bitD_limitPtr); - op[1] = fast != 0 ? FSE_decodeSymbolFast(ref state2, bitD_bitContainer, ref bitD_bitsConsumed) : FSE_decodeSymbol(ref state2, bitD_bitContainer, ref bitD_bitsConsumed); + BIT_reloadDStream( + ref bitD_bitContainer, + ref bitD_bitsConsumed, + ref bitD_ptr, + bitD_start, + bitD_limitPtr + ); + op[1] = + fast != 0 + ? FSE_decodeSymbolFast(ref state2, bitD_bitContainer, ref bitD_bitsConsumed) + : FSE_decodeSymbol(ref state2, bitD_bitContainer, ref bitD_bitsConsumed); if ((14 - 2) * 4 + 7 > sizeof(nuint) * 8) { - if (BIT_reloadDStream(ref bitD_bitContainer, ref bitD_bitsConsumed, ref bitD_ptr, bitD_start, bitD_limitPtr) > BIT_DStream_status.BIT_DStream_unfinished) + if ( + BIT_reloadDStream( + ref bitD_bitContainer, + ref bitD_bitsConsumed, + ref bitD_ptr, + bitD_start, + bitD_limitPtr + ) > BIT_DStream_status.BIT_DStream_unfinished + ) { op += 2; break; } } - op[2] = fast != 0 ? FSE_decodeSymbolFast(ref state1, bitD_bitContainer, ref bitD_bitsConsumed) : FSE_decodeSymbol(ref state1, bitD_bitContainer, ref bitD_bitsConsumed); + op[2] = + fast != 0 + ? FSE_decodeSymbolFast(ref state1, bitD_bitContainer, ref bitD_bitsConsumed) + : FSE_decodeSymbol(ref state1, bitD_bitContainer, ref bitD_bitsConsumed); if ((14 - 2) * 2 + 7 > sizeof(nuint) * 8) - BIT_reloadDStream(ref bitD_bitContainer, ref bitD_bitsConsumed, ref bitD_ptr, bitD_start, bitD_limitPtr); - op[3] = fast != 0 ? FSE_decodeSymbolFast(ref state2, bitD_bitContainer, ref bitD_bitsConsumed) : FSE_decodeSymbol(ref state2, bitD_bitContainer, ref bitD_bitsConsumed); + BIT_reloadDStream( + ref bitD_bitContainer, + ref bitD_bitsConsumed, + ref bitD_ptr, + bitD_start, + bitD_limitPtr + ); + op[3] = + fast != 0 + ? FSE_decodeSymbolFast(ref state2, bitD_bitContainer, ref bitD_bitsConsumed) + : FSE_decodeSymbol(ref state2, bitD_bitContainer, ref bitD_bitsConsumed); } while (true) { if (op > omax - 2) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - *op++ = fast != 0 ? FSE_decodeSymbolFast(ref state1, bitD_bitContainer, ref bitD_bitsConsumed) : FSE_decodeSymbol(ref state1, bitD_bitContainer, ref bitD_bitsConsumed); - if (BIT_reloadDStream(ref bitD_bitContainer, ref bitD_bitsConsumed, ref bitD_ptr, bitD_start, bitD_limitPtr) == BIT_DStream_status.BIT_DStream_overflow) + *op++ = + fast != 0 + ? FSE_decodeSymbolFast(ref state1, bitD_bitContainer, ref bitD_bitsConsumed) + : FSE_decodeSymbol(ref state1, bitD_bitContainer, ref bitD_bitsConsumed); + if ( + BIT_reloadDStream( + ref bitD_bitContainer, + ref bitD_bitsConsumed, + ref bitD_ptr, + bitD_start, + bitD_limitPtr + ) == BIT_DStream_status.BIT_DStream_overflow + ) { - *op++ = fast != 0 ? FSE_decodeSymbolFast(ref state2, bitD_bitContainer, ref bitD_bitsConsumed) : FSE_decodeSymbol(ref state2, bitD_bitContainer, ref bitD_bitsConsumed); + *op++ = + fast != 0 + ? FSE_decodeSymbolFast( + ref state2, + bitD_bitContainer, + ref bitD_bitsConsumed + ) + : FSE_decodeSymbol( + ref state2, + bitD_bitContainer, + ref bitD_bitsConsumed + ); break; } if (op > omax - 2) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - *op++ = fast != 0 ? FSE_decodeSymbolFast(ref state2, bitD_bitContainer, ref bitD_bitsConsumed) : FSE_decodeSymbol(ref state2, bitD_bitContainer, ref bitD_bitsConsumed); - if (BIT_reloadDStream(ref bitD_bitContainer, ref bitD_bitsConsumed, ref bitD_ptr, bitD_start, bitD_limitPtr) == BIT_DStream_status.BIT_DStream_overflow) + *op++ = + fast != 0 + ? FSE_decodeSymbolFast(ref state2, bitD_bitContainer, ref bitD_bitsConsumed) + : FSE_decodeSymbol(ref state2, bitD_bitContainer, ref bitD_bitsConsumed); + if ( + BIT_reloadDStream( + ref bitD_bitContainer, + ref bitD_bitsConsumed, + ref bitD_ptr, + bitD_start, + bitD_limitPtr + ) == BIT_DStream_status.BIT_DStream_overflow + ) { - *op++ = fast != 0 ? FSE_decodeSymbolFast(ref state1, bitD_bitContainer, ref bitD_bitsConsumed) : FSE_decodeSymbol(ref state1, bitD_bitContainer, ref bitD_bitsConsumed); + *op++ = + fast != 0 + ? FSE_decodeSymbolFast( + ref state1, + bitD_bitContainer, + ref bitD_bitsConsumed + ) + : FSE_decodeSymbol( + ref state1, + bitD_bitContainer, + ref bitD_bitsConsumed + ); break; } } @@ -212,7 +338,16 @@ private static nuint FSE_decompress_usingDTable_generic(void* dst, nuint maxDstS } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint FSE_decompress_wksp_body(void* dst, nuint dstCapacity, void* cSrc, nuint cSrcSize, uint maxLog, void* workSpace, nuint wkspSize, int bmi2) + private static nuint FSE_decompress_wksp_body( + void* dst, + nuint dstCapacity, + void* cSrc, + nuint cSrcSize, + uint maxLog, + void* workSpace, + nuint wkspSize, + int bmi2 + ) { byte* istart = (byte*)cSrc; byte* ip = istart; @@ -224,7 +359,14 @@ private static nuint FSE_decompress_wksp_body(void* dst, nuint dstCapacity, void if (wkspSize < (nuint)sizeof(FSE_DecompressWksp)) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); { - nuint NCountLength = FSE_readNCount_bmi2(wksp->ncount, &maxSymbolValue, &tableLog, istart, cSrcSize, bmi2); + nuint NCountLength = FSE_readNCount_bmi2( + wksp->ncount, + &maxSymbolValue, + &tableLog, + istart, + cSrcSize, + bmi2 + ); if (ERR_isError(NCountLength)) return NCountLength; if (tableLog > maxLog) @@ -234,13 +376,42 @@ private static nuint FSE_decompress_wksp_body(void* dst, nuint dstCapacity, void cSrcSize -= NCountLength; } - if (((ulong)(1 + (1 << (int)tableLog) + 1) + (sizeof(short) * (maxSymbolValue + 1) + (1UL << (int)tableLog) + 8 + sizeof(uint) - 1) / sizeof(uint) + (255 + 1) / 2 + 1) * sizeof(uint) > wkspSize) + if ( + ( + (ulong)(1 + (1 << (int)tableLog) + 1) + + ( + sizeof(short) * (maxSymbolValue + 1) + + (1UL << (int)tableLog) + + 8 + + sizeof(uint) + - 1 + ) / sizeof(uint) + + (255 + 1) / 2 + + 1 + ) * sizeof(uint) + > wkspSize + ) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); - assert((nuint)(sizeof(FSE_DecompressWksp) + (1 + (1 << (int)tableLog)) * sizeof(uint)) <= wkspSize); - workSpace = (byte*)workSpace + sizeof(FSE_DecompressWksp) + (1 + (1 << (int)tableLog)) * sizeof(uint); - wkspSize -= (nuint)(sizeof(FSE_DecompressWksp) + (1 + (1 << (int)tableLog)) * sizeof(uint)); + assert( + (nuint)(sizeof(FSE_DecompressWksp) + (1 + (1 << (int)tableLog)) * sizeof(uint)) + <= wkspSize + ); + workSpace = + (byte*)workSpace + + sizeof(FSE_DecompressWksp) + + (1 + (1 << (int)tableLog)) * sizeof(uint); + wkspSize -= (nuint)( + sizeof(FSE_DecompressWksp) + (1 + (1 << (int)tableLog)) * sizeof(uint) + ); { - nuint _var_err__ = FSE_buildDTable_internal(dtable, wksp->ncount, maxSymbolValue, tableLog, workSpace, wkspSize); + nuint _var_err__ = FSE_buildDTable_internal( + dtable, + wksp->ncount, + maxSymbolValue, + tableLog, + workSpace, + wkspSize + ); if (ERR_isError(_var_err__)) return _var_err__; } @@ -250,20 +421,68 @@ private static nuint FSE_decompress_wksp_body(void* dst, nuint dstCapacity, void FSE_DTableHeader* DTableH = (FSE_DTableHeader*)ptr; uint fastMode = DTableH->fastMode; if (fastMode != 0) - return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, dtable, 1); - return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, dtable, 0); + return FSE_decompress_usingDTable_generic( + dst, + dstCapacity, + ip, + cSrcSize, + dtable, + 1 + ); + return FSE_decompress_usingDTable_generic( + dst, + dstCapacity, + ip, + cSrcSize, + dtable, + 0 + ); } } /* Avoids the FORCE_INLINE of the _body() function. */ - private static nuint FSE_decompress_wksp_body_default(void* dst, nuint dstCapacity, void* cSrc, nuint cSrcSize, uint maxLog, void* workSpace, nuint wkspSize) + private static nuint FSE_decompress_wksp_body_default( + void* dst, + nuint dstCapacity, + void* cSrc, + nuint cSrcSize, + uint maxLog, + void* workSpace, + nuint wkspSize + ) { - return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 0); + return FSE_decompress_wksp_body( + dst, + dstCapacity, + cSrc, + cSrcSize, + maxLog, + workSpace, + wkspSize, + 0 + ); } - private static nuint FSE_decompress_wksp_bmi2(void* dst, nuint dstCapacity, void* cSrc, nuint cSrcSize, uint maxLog, void* workSpace, nuint wkspSize, int bmi2) + private static nuint FSE_decompress_wksp_bmi2( + void* dst, + nuint dstCapacity, + void* cSrc, + nuint cSrcSize, + uint maxLog, + void* workSpace, + nuint wkspSize, + int bmi2 + ) { - return FSE_decompress_wksp_body_default(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize); + return FSE_decompress_wksp_body_default( + dst, + dstCapacity, + cSrc, + cSrcSize, + maxLog, + workSpace, + wkspSize + ); } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HIST_checkInput_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HIST_checkInput_e.cs index 4d6c1081d..a36d71635 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HIST_checkInput_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HIST_checkInput_e.cs @@ -3,6 +3,6 @@ namespace ZstdSharp.Unsafe public enum HIST_checkInput_e { trustInput, - checkMaxSymbolValue + checkMaxSymbolValue, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CStream_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CStream_t.cs index c28390d57..e00f59c84 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CStream_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CStream_t.cs @@ -7,6 +7,7 @@ public unsafe struct HUF_CStream_t public byte* startPtr; public byte* ptr; public byte* endPtr; + public unsafe struct _bitContainer_e__FixedBuffer { public nuint e0; @@ -19,4 +20,4 @@ public unsafe struct _bitPos_e__FixedBuffer public nuint e1; } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CTableHeader.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CTableHeader.cs index b2d934c7f..f89e380cd 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CTableHeader.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CTableHeader.cs @@ -6,4 +6,4 @@ public unsafe struct HUF_CTableHeader public byte maxSymbolValue; public fixed byte unused[6]; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CompressWeightsWksp.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CompressWeightsWksp.cs index 0a1a294bc..f3017cfa6 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CompressWeightsWksp.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CompressWeightsWksp.cs @@ -7,4 +7,4 @@ public unsafe struct HUF_CompressWeightsWksp public fixed uint count[13]; public fixed short norm[13]; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DEltX1.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DEltX1.cs index 6d907c47b..1ce3290b3 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DEltX1.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DEltX1.cs @@ -9,4 +9,4 @@ public struct HUF_DEltX1 public byte nbBits; public byte @byte; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DEltX2.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DEltX2.cs index 51accf239..534b89829 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DEltX2.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DEltX2.cs @@ -10,4 +10,4 @@ public struct HUF_DEltX2 public byte nbBits; public byte length; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DecompressFastArgs.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DecompressFastArgs.cs index 0cebbd524..4add3e9de 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DecompressFastArgs.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DecompressFastArgs.cs @@ -22,6 +22,7 @@ public unsafe struct HUF_DecompressFastArgs public byte* ilowest; public byte* oend; public _iend_e__FixedBuffer iend; + public unsafe struct _ip_e__FixedBuffer { public byte* e0; @@ -46,4 +47,4 @@ public unsafe struct _iend_e__FixedBuffer public byte* e3; } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_ReadDTableX1_Workspace.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_ReadDTableX1_Workspace.cs index e1377ed49..47e6cca50 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_ReadDTableX1_Workspace.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_ReadDTableX1_Workspace.cs @@ -8,4 +8,4 @@ public unsafe struct HUF_ReadDTableX1_Workspace public fixed byte symbols[256]; public fixed byte huffWeight[256]; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_ReadDTableX2_Workspace.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_ReadDTableX2_Workspace.cs index 9bc3d35ff..03dc19a92 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_ReadDTableX2_Workspace.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_ReadDTableX2_Workspace.cs @@ -10,6 +10,7 @@ public unsafe struct HUF_ReadDTableX2_Workspace public _sortedSymbol_e__FixedBuffer sortedSymbol; public fixed byte weightList[256]; public fixed uint calleeWksp[219]; + #if NET8_0_OR_GREATER [InlineArray(12)] public unsafe struct _rankVal_e__FixedBuffer @@ -304,4 +305,4 @@ public unsafe struct _sortedSymbol_e__FixedBuffer } #endif } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_WriteCTableWksp.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_WriteCTableWksp.cs index 4e70d5dc7..2c6f9c87e 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_WriteCTableWksp.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_WriteCTableWksp.cs @@ -3,8 +3,9 @@ namespace ZstdSharp.Unsafe public unsafe struct HUF_WriteCTableWksp { public HUF_CompressWeightsWksp wksp; + /* precomputed conversion table */ public fixed byte bitsToWeight[13]; public fixed byte huffWeight[255]; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_buildCTable_wksp_tables.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_buildCTable_wksp_tables.cs index c1394b01d..f259c5b0d 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_buildCTable_wksp_tables.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_buildCTable_wksp_tables.cs @@ -6,6 +6,7 @@ public struct HUF_buildCTable_wksp_tables { public _huffNodeTbl_e__FixedBuffer huffNodeTbl; public _rankPosition_e__FixedBuffer rankPosition; + #if NET8_0_OR_GREATER [InlineArray(512)] public unsafe struct _huffNodeTbl_e__FixedBuffer @@ -736,4 +737,4 @@ public unsafe struct _rankPosition_e__FixedBuffer } #endif } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_compress_tables_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_compress_tables_t.cs index a13ec931c..14bf2714c 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_compress_tables_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_compress_tables_t.cs @@ -7,6 +7,7 @@ public unsafe struct HUF_compress_tables_t public fixed uint count[256]; public _CTable_e__FixedBuffer CTable; public _wksps_e__Union wksps; + #if NET8_0_OR_GREATER [InlineArray(257)] public unsafe struct _CTable_e__FixedBuffer @@ -277,4 +278,4 @@ public unsafe struct _CTable_e__FixedBuffer } #endif } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_flags_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_flags_e.cs index 9a8903776..fe365c454 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_flags_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_flags_e.cs @@ -11,30 +11,35 @@ public enum HUF_flags_e * Otherwise: Ignored. */ HUF_flags_bmi2 = 1 << 0, + /** * If set: Test possible table depths to find the one that produces the smallest header + encoded size. * If unset: Use heuristic to find the table depth. */ HUF_flags_optimalDepth = 1 << 1, + /** * If set: If the previous table can encode the input, always reuse the previous table. * If unset: If the previous table can encode the input, reuse the previous table if it results in a smaller output. */ HUF_flags_preferRepeat = 1 << 2, + /** * If set: Sample the input and check if the sample is uncompressible, if it is then don't attempt to compress. * If unset: Always histogram the entire input. */ HUF_flags_suspectUncompressible = 1 << 3, + /** * If set: Don't use assembly implementations * If unset: Allow using assembly implementations */ HUF_flags_disableAsm = 1 << 4, + /** * If set: Don't use the fast decoding loop, always use the fallback decoding loop. * If unset: Use the fast decoding loop when possible. */ - HUF_flags_disableFast = 1 << 5 + HUF_flags_disableFast = 1 << 5, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_nbStreams_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_nbStreams_e.cs index e4d5a86f1..55a931ac1 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_nbStreams_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_nbStreams_e.cs @@ -3,6 +3,6 @@ namespace ZstdSharp.Unsafe public enum HUF_nbStreams_e { HUF_singleStream, - HUF_fourStreams + HUF_fourStreams, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_repeat.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_repeat.cs index 39ed82d8b..2bae2cae0 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_repeat.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_repeat.cs @@ -4,9 +4,11 @@ public enum HUF_repeat { /**< Cannot use the previous table */ HUF_repeat_none, + /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */ HUF_repeat_check, + /**< Can use the previous table and it is assumed to be valid */ - HUF_repeat_valid + HUF_repeat_valid, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Hist.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Hist.cs index 7dce78ffd..586b280d9 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Hist.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Hist.cs @@ -31,7 +31,12 @@ private static void HIST_add(uint* count, void* src, nuint srcSize) * @return : count of the most frequent symbol. * Note this function doesn't produce any error (i.e. it must succeed). */ - private static uint HIST_count_simple(uint* count, uint* maxSymbolValuePtr, void* src, nuint srcSize) + private static uint HIST_count_simple( + uint* count, + uint* maxSymbolValuePtr, + void* src, + nuint srcSize + ) { byte* ip = (byte*)src; byte* end = ip + srcSize; @@ -71,7 +76,14 @@ private static uint HIST_count_simple(uint* count, uint* maxSymbolValuePtr, void * `workSpace` must be a U32 table of size >= HIST_WKSP_SIZE_U32. * @return : largest histogram frequency, * or an error code (notably when histogram's alphabet is larger than *maxSymbolValuePtr) */ - private static nuint HIST_count_parallel_wksp(uint* count, uint* maxSymbolValuePtr, void* source, nuint sourceSize, HIST_checkInput_e check, uint* workSpace) + private static nuint HIST_count_parallel_wksp( + uint* count, + uint* maxSymbolValuePtr, + void* source, + nuint sourceSize, + HIST_checkInput_e check, + uint* workSpace + ) { byte* ip = (byte*)source; byte* iend = ip + sourceSize; @@ -145,7 +157,9 @@ private static nuint HIST_count_parallel_wksp(uint* count, uint* maxSymbolValueP while (Counting1[maxSymbolValue] == 0) maxSymbolValue--; if (check != default && maxSymbolValue > *maxSymbolValuePtr) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooSmall)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooSmall) + ); *maxSymbolValuePtr = maxSymbolValue; memmove(count, Counting1, countSize); } @@ -158,7 +172,14 @@ private static nuint HIST_count_parallel_wksp(uint* count, uint* maxSymbolValueP * `workSpace` is a writable buffer which must be 4-bytes aligned, * `workSpaceSize` must be >= HIST_WKSP_SIZE */ - private static nuint HIST_countFast_wksp(uint* count, uint* maxSymbolValuePtr, void* source, nuint sourceSize, void* workSpace, nuint workSpaceSize) + private static nuint HIST_countFast_wksp( + uint* count, + uint* maxSymbolValuePtr, + void* source, + nuint sourceSize, + void* workSpace, + nuint workSpaceSize + ) { if (sourceSize < 1500) return HIST_count_simple(count, maxSymbolValuePtr, source, sourceSize); @@ -166,29 +187,69 @@ private static nuint HIST_countFast_wksp(uint* count, uint* maxSymbolValuePtr, v return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); if (workSpaceSize < 1024 * sizeof(uint)) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_workSpace_tooSmall)); - return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, HIST_checkInput_e.trustInput, (uint*)workSpace); + return HIST_count_parallel_wksp( + count, + maxSymbolValuePtr, + source, + sourceSize, + HIST_checkInput_e.trustInput, + (uint*)workSpace + ); } /* HIST_count_wksp() : * Same as HIST_count(), but using an externally provided scratch buffer. * `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */ - private static nuint HIST_count_wksp(uint* count, uint* maxSymbolValuePtr, void* source, nuint sourceSize, void* workSpace, nuint workSpaceSize) + private static nuint HIST_count_wksp( + uint* count, + uint* maxSymbolValuePtr, + void* source, + nuint sourceSize, + void* workSpace, + nuint workSpaceSize + ) { if (((nuint)workSpace & 3) != 0) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); if (workSpaceSize < 1024 * sizeof(uint)) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_workSpace_tooSmall)); if (*maxSymbolValuePtr < 255) - return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, HIST_checkInput_e.checkMaxSymbolValue, (uint*)workSpace); + return HIST_count_parallel_wksp( + count, + maxSymbolValuePtr, + source, + sourceSize, + HIST_checkInput_e.checkMaxSymbolValue, + (uint*)workSpace + ); *maxSymbolValuePtr = 255; - return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace, workSpaceSize); + return HIST_countFast_wksp( + count, + maxSymbolValuePtr, + source, + sourceSize, + workSpace, + workSpaceSize + ); } /* fast variant (unsafe : won't check if src contains values beyond count[] limit) */ - private static nuint HIST_countFast(uint* count, uint* maxSymbolValuePtr, void* source, nuint sourceSize) + private static nuint HIST_countFast( + uint* count, + uint* maxSymbolValuePtr, + void* source, + nuint sourceSize + ) { uint* tmpCounters = stackalloc uint[1024]; - return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters, sizeof(uint) * 1024); + return HIST_countFast_wksp( + count, + maxSymbolValuePtr, + source, + sourceSize, + tmpCounters, + sizeof(uint) * 1024 + ); } /*! HIST_count(): @@ -199,10 +260,22 @@ private static nuint HIST_countFast(uint* count, uint* maxSymbolValuePtr, void* * or an error code, which can be tested using HIST_isError(). * note : if return == srcSize, there is only one symbol. */ - private static nuint HIST_count(uint* count, uint* maxSymbolValuePtr, void* src, nuint srcSize) + private static nuint HIST_count( + uint* count, + uint* maxSymbolValuePtr, + void* src, + nuint srcSize + ) { uint* tmpCounters = stackalloc uint[1024]; - return HIST_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters, sizeof(uint) * 1024); + return HIST_count_wksp( + count, + maxSymbolValuePtr, + src, + srcSize, + tmpCounters, + sizeof(uint) * 1024 + ); } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HufCompress.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HufCompress.cs index a41274cdb..15bfe2608 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HufCompress.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HufCompress.cs @@ -1,11 +1,15 @@ -using static ZstdSharp.UnsafeHelper; using System.Runtime.CompilerServices; +using static ZstdSharp.UnsafeHelper; namespace ZstdSharp.Unsafe { public static unsafe partial class Methods { - private static void* HUF_alignUpWorkspace(void* workspace, nuint* workspaceSizePtr, nuint align) + private static void* HUF_alignUpWorkspace( + void* workspace, + nuint* workspaceSizePtr, + nuint align + ) { nuint mask = align - 1; nuint rem = (nuint)workspace & mask; @@ -27,21 +31,37 @@ public static unsafe partial class Methods } } - private static nuint HUF_compressWeights(void* dst, nuint dstSize, void* weightTable, nuint wtSize, void* workspace, nuint workspaceSize) + private static nuint HUF_compressWeights( + void* dst, + nuint dstSize, + void* weightTable, + nuint wtSize, + void* workspace, + nuint workspaceSize + ) { byte* ostart = (byte*)dst; byte* op = ostart; byte* oend = ostart + dstSize; uint maxSymbolValue = 12; uint tableLog = 6; - HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, sizeof(uint)); + HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)HUF_alignUpWorkspace( + workspace, + &workspaceSize, + sizeof(uint) + ); if (workspaceSize < (nuint)sizeof(HUF_CompressWeightsWksp)) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); if (wtSize <= 1) return 0; { /* never fails */ - uint maxCount = HIST_count_simple(wksp->count, &maxSymbolValue, weightTable, wtSize); + uint maxCount = HIST_count_simple( + wksp->count, + &maxSymbolValue, + weightTable, + wtSize + ); if (maxCount == wtSize) return 1; if (maxCount == 1) @@ -51,13 +71,26 @@ private static nuint HUF_compressWeights(void* dst, nuint dstSize, void* weightT tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue); { /* useLowProbCount */ - nuint _var_err__ = FSE_normalizeCount(wksp->norm, tableLog, wksp->count, wtSize, maxSymbolValue, 0); + nuint _var_err__ = FSE_normalizeCount( + wksp->norm, + tableLog, + wksp->count, + wtSize, + maxSymbolValue, + 0 + ); if (ERR_isError(_var_err__)) return _var_err__; } { - nuint hSize = FSE_writeNCount(op, (nuint)(oend - op), wksp->norm, maxSymbolValue, tableLog); + nuint hSize = FSE_writeNCount( + op, + (nuint)(oend - op), + wksp->norm, + maxSymbolValue, + tableLog + ); if (ERR_isError(hSize)) return hSize; op += hSize; @@ -65,13 +98,26 @@ private static nuint HUF_compressWeights(void* dst, nuint dstSize, void* weightT { /* Compress */ - nuint _var_err__ = FSE_buildCTable_wksp(wksp->CTable, wksp->norm, maxSymbolValue, tableLog, wksp->scratchBuffer, sizeof(uint) * 41); + nuint _var_err__ = FSE_buildCTable_wksp( + wksp->CTable, + wksp->norm, + maxSymbolValue, + tableLog, + wksp->scratchBuffer, + sizeof(uint) * 41 + ); if (ERR_isError(_var_err__)) return _var_err__; } { - nuint cSize = FSE_compress_usingCTable(op, (nuint)(oend - op), weightTable, wtSize, wksp->CTable); + nuint cSize = FSE_compress_usingCTable( + op, + (nuint)(oend - op), + weightTable, + wtSize, + wksp->CTable + ); if (ERR_isError(cSize)) return cSize; if (cSize == 0) @@ -145,12 +191,24 @@ private static void HUF_writeCTableHeader(nuint* ctable, uint tableLog, uint max memcpy(ctable, &header, (uint)sizeof(nuint)); } - private static nuint HUF_writeCTable_wksp(void* dst, nuint maxDstSize, nuint* CTable, uint maxSymbolValue, uint huffLog, void* workspace, nuint workspaceSize) + private static nuint HUF_writeCTable_wksp( + void* dst, + nuint maxDstSize, + nuint* CTable, + uint maxSymbolValue, + uint huffLog, + void* workspace, + nuint workspaceSize + ) { nuint* ct = CTable + 1; byte* op = (byte*)dst; uint n; - HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, sizeof(uint)); + HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)HUF_alignUpWorkspace( + workspace, + &workspaceSize, + sizeof(uint) + ); assert(HUF_readCTableHeader(CTable).maxSymbolValue == maxSymbolValue); assert(HUF_readCTableHeader(CTable).tableLog == huffLog); if (workspaceSize < (nuint)sizeof(HUF_WriteCTableWksp)) @@ -165,7 +223,14 @@ private static nuint HUF_writeCTable_wksp(void* dst, nuint maxDstSize, nuint* CT if (maxDstSize < 1) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); { - nuint hSize = HUF_compressWeights(op + 1, maxDstSize - 1, wksp->huffWeight, maxSymbolValue, &wksp->wksp, (nuint)sizeof(HUF_CompressWeightsWksp)); + nuint hSize = HUF_compressWeights( + op + 1, + maxDstSize - 1, + wksp->huffWeight, + maxSymbolValue, + &wksp->wksp, + (nuint)sizeof(HUF_CompressWeightsWksp) + ); if (ERR_isError(hSize)) return hSize; if (hSize > 1 && hSize < maxSymbolValue / 2) @@ -188,7 +253,13 @@ private static nuint HUF_writeCTable_wksp(void* dst, nuint maxDstSize, nuint* CT /** HUF_readCTable() : * Loading a CTable saved with HUF_writeCTable() */ - private static nuint HUF_readCTable(nuint* CTable, uint* maxSymbolValuePtr, void* src, nuint srcSize, uint* hasZeroWeights) + private static nuint HUF_readCTable( + nuint* CTable, + uint* maxSymbolValuePtr, + void* src, + nuint srcSize, + uint* hasZeroWeights + ) { /* init not required, even though some static analyzer may complain */ byte* huffWeight = stackalloc byte[256]; @@ -198,7 +269,15 @@ private static nuint HUF_readCTable(nuint* CTable, uint* maxSymbolValuePtr, void uint nbSymbols = 0; nuint* ct = CTable + 1; /* get symbol weights */ - nuint readSize = HUF_readStats(huffWeight, 255 + 1, rankVal, &nbSymbols, &tableLog, src, srcSize); + nuint readSize = HUF_readStats( + huffWeight, + 255 + 1, + rankVal, + &nbSymbols, + &tableLog, + src, + srcSize + ); if (ERR_isError(readSize)) return readSize; *hasZeroWeights = rankVal[0] > 0 ? 1U : 0U; @@ -209,7 +288,8 @@ private static nuint HUF_readCTable(nuint* CTable, uint* maxSymbolValuePtr, void *maxSymbolValuePtr = nbSymbols - 1; HUF_writeCTableHeader(CTable, tableLog, *maxSymbolValuePtr); { - uint n, nextRankStart = 0; + uint n, + nextRankStart = 0; for (n = 1; n <= tableLog; n++) { uint curr = nextRankStart; @@ -297,7 +377,11 @@ private static uint HUF_getNbBitsFromCTable(nuint* CTable, uint symbolValue) * respect targetNbBits. * @return The maximum number of bits of the Huffman tree after adjustment. */ - private static uint HUF_setMaxHeight(nodeElt_s* huffNode, uint lastNonNull, uint targetNbBits) + private static uint HUF_setMaxHeight( + nodeElt_s* huffNode, + uint lastNonNull, + uint targetNbBits + ) { uint largestBits = huffNode[lastNonNull].nbBits; if (largestBits <= targetNbBits) @@ -308,7 +392,9 @@ private static uint HUF_setMaxHeight(nodeElt_s* huffNode, uint lastNonNull, uint int n = (int)lastNonNull; while (huffNode[n].nbBits > targetNbBits) { - totalCost += (int)(baseCost - (uint)(1 << (int)(largestBits - huffNode[n].nbBits))); + totalCost += (int)( + baseCost - (uint)(1 << (int)(largestBits - huffNode[n].nbBits)) + ); huffNode[n].nbBits = (byte)targetNbBits; n--; } @@ -370,7 +456,10 @@ private static uint HUF_setMaxHeight(nodeElt_s* huffNode, uint lastNonNull, uint else { rankLast[nBitsToDecrease]--; - if (huffNode[rankLast[nBitsToDecrease]].nbBits != targetNbBits - nBitsToDecrease) + if ( + huffNode[rankLast[nBitsToDecrease]].nbBits + != targetNbBits - nBitsToDecrease + ) rankLast[nBitsToDecrease] = noSymbol; } } @@ -404,7 +493,9 @@ private static uint HUF_setMaxHeight(nodeElt_s* huffNode, uint lastNonNull, uint [MethodImpl(MethodImplOptions.AggressiveInlining)] private static uint HUF_getIndex(uint count) { - return count < 192 - 1 - 32 - 1 + ZSTD_highbit32(192 - 1 - 32 - 1) ? count : ZSTD_highbit32(count) + (192 - 1 - 32 - 1); + return count < 192 - 1 - 32 - 1 + ZSTD_highbit32(192 - 1 - 32 - 1) + ? count + : ZSTD_highbit32(count) + (192 - 1 - 32 - 1); } /* Helper swap function for HUF_quickSortPartition() */ @@ -513,7 +604,12 @@ private static void HUF_simpleQuickSort(nodeElt_s* arr, int low, int high) * @param[in] maxSymbolValue Maximum symbol value. * @param rankPosition This is a scratch workspace. Must have RANK_POSITION_TABLE_SIZE entries. */ - private static void HUF_sort(nodeElt_s* huffNode, uint* count, uint maxSymbolValue, rankPos* rankPosition) + private static void HUF_sort( + nodeElt_s* huffNode, + uint* count, + uint maxSymbolValue, + rankPos* rankPosition + ) { uint n; uint maxSymbolValue1 = maxSymbolValue + 1; @@ -567,9 +663,11 @@ private static int HUF_buildTree(nodeElt_s* huffNode, uint maxSymbolValue) { nodeElt_s* huffNode0 = huffNode - 1; int nonNullRank; - int lowS, lowN; + int lowS, + lowN; int nodeNb = 255 + 1; - int n, nodeRoot; + int n, + nodeRoot; nonNullRank = (int)maxSymbolValue; while (huffNode[nonNullRank].count == 0) nonNullRank--; @@ -610,7 +708,13 @@ private static int HUF_buildTree(nodeElt_s* huffNode, uint maxSymbolValue) * @param maxSymbolValue The maximum symbol value. * @param maxNbBits The exact maximum number of bits used in the Huffman tree. */ - private static void HUF_buildCTableFromTree(nuint* CTable, nodeElt_s* huffNode, int nonNullRank, uint maxSymbolValue, uint maxNbBits) + private static void HUF_buildCTableFromTree( + nuint* CTable, + nodeElt_s* huffNode, + int nonNullRank, + uint maxSymbolValue, + uint maxNbBits + ) { nuint* ct = CTable + 1; /* fill result into ctable (val, nbBits) */ @@ -639,9 +743,21 @@ private static void HUF_buildCTableFromTree(nuint* CTable, nodeElt_s* huffNode, HUF_writeCTableHeader(CTable, maxNbBits, maxSymbolValue); } - private static nuint HUF_buildCTable_wksp(nuint* CTable, uint* count, uint maxSymbolValue, uint maxNbBits, void* workSpace, nuint wkspSize) + private static nuint HUF_buildCTable_wksp( + nuint* CTable, + uint* count, + uint maxSymbolValue, + uint maxNbBits, + void* workSpace, + nuint wkspSize + ) { - HUF_buildCTable_wksp_tables* wksp_tables = (HUF_buildCTable_wksp_tables*)HUF_alignUpWorkspace(workSpace, &wkspSize, sizeof(uint)); + HUF_buildCTable_wksp_tables* wksp_tables = + (HUF_buildCTable_wksp_tables*)HUF_alignUpWorkspace( + workSpace, + &wkspSize, + sizeof(uint) + ); nodeElt_s* huffNode0 = &wksp_tables->huffNodeTbl.e0; nodeElt_s* huffNode = huffNode0 + 1; int nonNullRank; @@ -661,7 +777,11 @@ private static nuint HUF_buildCTable_wksp(nuint* CTable, uint* count, uint maxSy return maxNbBits; } - private static nuint HUF_estimateCompressedSize(nuint* CTable, uint* count, uint maxSymbolValue) + private static nuint HUF_estimateCompressedSize( + nuint* CTable, + uint* count, + uint maxSymbolValue + ) { nuint* ct = CTable + 1; nuint nbBits = 0; @@ -700,13 +820,17 @@ private static nuint HUF_compressBound(nuint size) * Initializes the bitstream. * @returns 0 or an error code. */ - private static nuint HUF_initCStream(ref HUF_CStream_t bitC, void* startPtr, nuint dstCapacity) + private static nuint HUF_initCStream( + ref HUF_CStream_t bitC, + void* startPtr, + nuint dstCapacity + ) { bitC = new HUF_CStream_t { startPtr = (byte*)startPtr, ptr = (byte*)startPtr, - endPtr = (byte*)startPtr + dstCapacity - sizeof(nuint) + endPtr = (byte*)startPtr + dstCapacity - sizeof(nuint), }; if (dstCapacity <= (nuint)sizeof(nuint)) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); @@ -724,7 +848,12 @@ private static nuint HUF_initCStream(ref HUF_CStream_t bitC, void* startPtr, nui * otherwise it must be 0. HUF_addBits() is faster when fast is set. */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void HUF_addBits(ref nuint bitC_bitContainer_e0, ref nuint bitC_bitPos_e0, nuint elt, int kFast) + private static void HUF_addBits( + ref nuint bitC_bitContainer_e0, + ref nuint bitC_bitPos_e0, + nuint elt, + int kFast + ) { assert(HUF_getNbBits(elt) <= 12); bitC_bitContainer_e0 >>= (int)HUF_getNbBits(elt); @@ -745,7 +874,12 @@ private static void HUF_zeroIndex1(ref nuint bitC_bitContainer_e1, ref nuint bit * and zeros the bit container @ index 1. */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void HUF_mergeIndex1(ref nuint bitC_bitContainer_e0, ref nuint bitC_bitPos_e0, ref nuint bitC_bitContainer_e1, ref nuint bitC_bitPos_e1) + private static void HUF_mergeIndex1( + ref nuint bitC_bitContainer_e0, + ref nuint bitC_bitPos_e0, + ref nuint bitC_bitContainer_e1, + ref nuint bitC_bitPos_e1 + ) { assert((bitC_bitPos_e1 & 0xFF) < (nuint)(sizeof(nuint) * 8)); bitC_bitContainer_e0 >>= (int)(bitC_bitPos_e1 & 0xFF); @@ -762,7 +896,13 @@ private static void HUF_mergeIndex1(ref nuint bitC_bitContainer_e0, ref nuint bi * the bit container will not overflow. */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void HUF_flushBits(ref nuint bitC_bitContainer_e0, ref nuint bitC_bitPos_e0, ref byte* bitC_ptr, byte* bitC_endPtr, int kFast) + private static void HUF_flushBits( + ref nuint bitC_bitContainer_e0, + ref nuint bitC_bitPos_e0, + ref byte* bitC_ptr, + byte* bitC_endPtr, + int kFast + ) { /* The upper bits of bitPos are noisy, so we must mask by 0xFF. */ nuint nbBits = bitC_bitPos_e0 & 0xFF; @@ -797,7 +937,13 @@ private static nuint HUF_endMark() private static nuint HUF_closeCStream(ref HUF_CStream_t bitC) { HUF_addBits(ref bitC.bitContainer.e0, ref bitC.bitPos.e0, HUF_endMark(), 0); - HUF_flushBits(ref bitC.bitContainer.e0, ref bitC.bitPos.e0, ref bitC.ptr, bitC.endPtr, 0); + HUF_flushBits( + ref bitC.bitContainer.e0, + ref bitC.bitPos.e0, + ref bitC.ptr, + bitC.endPtr, + 0 + ); { nuint nbBits = bitC.bitPos.e0 & 0xFF; if (bitC.ptr >= bitC.endPtr) @@ -807,13 +953,27 @@ private static nuint HUF_closeCStream(ref HUF_CStream_t bitC) } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void HUF_encodeSymbol(ref nuint bitCPtr_bitContainer_e0, ref nuint bitCPtr_bitPos_e0, uint symbol, nuint* CTable, int fast) + private static void HUF_encodeSymbol( + ref nuint bitCPtr_bitContainer_e0, + ref nuint bitCPtr_bitPos_e0, + uint symbol, + nuint* CTable, + int fast + ) { HUF_addBits(ref bitCPtr_bitContainer_e0, ref bitCPtr_bitPos_e0, CTable[symbol], fast); } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void HUF_compress1X_usingCTable_internal_body_loop(ref HUF_CStream_t bitC, byte* ip, nuint srcSize, nuint* ct, int kUnroll, int kFastFlush, int kLastFast) + private static void HUF_compress1X_usingCTable_internal_body_loop( + ref HUF_CStream_t bitC, + byte* ip, + nuint srcSize, + nuint* ct, + int kUnroll, + int kFastFlush, + int kLastFast + ) { byte* bitC_ptr = bitC.ptr; byte* bitC_endPtr = bitC.endPtr; @@ -831,7 +991,13 @@ private static void HUF_compress1X_usingCTable_internal_body_loop(ref HUF_CStrea HUF_encodeSymbol(ref bitC_bitContainer_e0, ref bitC_bitPos_e0, ip[--n], ct, 0); } - HUF_flushBits(ref bitC_bitContainer_e0, ref bitC_bitPos_e0, ref bitC_ptr, bitC_endPtr, kFastFlush); + HUF_flushBits( + ref bitC_bitContainer_e0, + ref bitC_bitPos_e0, + ref bitC_ptr, + bitC_endPtr, + kFastFlush + ); } assert(n % kUnroll == 0); @@ -840,11 +1006,29 @@ private static void HUF_compress1X_usingCTable_internal_body_loop(ref HUF_CStrea int u; for (u = 1; u < kUnroll; ++u) { - HUF_encodeSymbol(ref bitC_bitContainer_e0, ref bitC_bitPos_e0, ip[n - u], ct, 1); + HUF_encodeSymbol( + ref bitC_bitContainer_e0, + ref bitC_bitPos_e0, + ip[n - u], + ct, + 1 + ); } - HUF_encodeSymbol(ref bitC_bitContainer_e0, ref bitC_bitPos_e0, ip[n - kUnroll], ct, kLastFast); - HUF_flushBits(ref bitC_bitContainer_e0, ref bitC_bitPos_e0, ref bitC_ptr, bitC_endPtr, kFastFlush); + HUF_encodeSymbol( + ref bitC_bitContainer_e0, + ref bitC_bitPos_e0, + ip[n - kUnroll], + ct, + kLastFast + ); + HUF_flushBits( + ref bitC_bitContainer_e0, + ref bitC_bitPos_e0, + ref bitC_ptr, + bitC_endPtr, + kFastFlush + ); n -= kUnroll; } @@ -855,20 +1039,61 @@ private static void HUF_compress1X_usingCTable_internal_body_loop(ref HUF_CStrea int u; for (u = 1; u < kUnroll; ++u) { - HUF_encodeSymbol(ref bitC_bitContainer_e0, ref bitC_bitPos_e0, ip[n - u], ct, 1); + HUF_encodeSymbol( + ref bitC_bitContainer_e0, + ref bitC_bitPos_e0, + ip[n - u], + ct, + 1 + ); } - HUF_encodeSymbol(ref bitC_bitContainer_e0, ref bitC_bitPos_e0, ip[n - kUnroll], ct, kLastFast); - HUF_flushBits(ref bitC_bitContainer_e0, ref bitC_bitPos_e0, ref bitC_ptr, bitC_endPtr, kFastFlush); + HUF_encodeSymbol( + ref bitC_bitContainer_e0, + ref bitC_bitPos_e0, + ip[n - kUnroll], + ct, + kLastFast + ); + HUF_flushBits( + ref bitC_bitContainer_e0, + ref bitC_bitPos_e0, + ref bitC_ptr, + bitC_endPtr, + kFastFlush + ); HUF_zeroIndex1(ref bitC_bitContainer_e1, ref bitC_bitPos_e1); for (u = 1; u < kUnroll; ++u) { - HUF_encodeSymbol(ref bitC_bitContainer_e1, ref bitC_bitPos_e1, ip[n - kUnroll - u], ct, 1); + HUF_encodeSymbol( + ref bitC_bitContainer_e1, + ref bitC_bitPos_e1, + ip[n - kUnroll - u], + ct, + 1 + ); } - HUF_encodeSymbol(ref bitC_bitContainer_e1, ref bitC_bitPos_e1, ip[n - kUnroll - kUnroll], ct, kLastFast); - HUF_mergeIndex1(ref bitC_bitContainer_e0, ref bitC_bitPos_e0, ref bitC_bitContainer_e1, ref bitC_bitPos_e1); - HUF_flushBits(ref bitC_bitContainer_e0, ref bitC_bitPos_e0, ref bitC_ptr, bitC_endPtr, kFastFlush); + HUF_encodeSymbol( + ref bitC_bitContainer_e1, + ref bitC_bitPos_e1, + ip[n - kUnroll - kUnroll], + ct, + kLastFast + ); + HUF_mergeIndex1( + ref bitC_bitContainer_e0, + ref bitC_bitPos_e0, + ref bitC_bitContainer_e1, + ref bitC_bitPos_e1 + ); + HUF_flushBits( + ref bitC_bitContainer_e0, + ref bitC_bitPos_e0, + ref bitC_ptr, + bitC_endPtr, + kFastFlush + ); } assert(n == 0); @@ -890,7 +1115,13 @@ private static nuint HUF_tightCompressBound(nuint srcSize, nuint tableLog) return (srcSize * tableLog >> 3) + 8; } - private static nuint HUF_compress1X_usingCTable_internal_body(void* dst, nuint dstSize, void* src, nuint srcSize, nuint* CTable) + private static nuint HUF_compress1X_usingCTable_internal_body( + void* dst, + nuint dstSize, + void* src, + nuint srcSize, + nuint* CTable + ) { uint tableLog = HUF_readCTableHeader(CTable).tableLog; nuint* ct = CTable + 1; @@ -909,7 +1140,15 @@ private static nuint HUF_compress1X_usingCTable_internal_body(void* dst, nuint d } if (dstSize < HUF_tightCompressBound(srcSize, tableLog) || tableLog > 11) - HUF_compress1X_usingCTable_internal_body_loop(ref bitC, ip, srcSize, ct, MEM_32bits ? 2 : 4, 0, 0); + HUF_compress1X_usingCTable_internal_body_loop( + ref bitC, + ip, + srcSize, + ct, + MEM_32bits ? 2 : 4, + 0, + 0 + ); else { if (MEM_32bits) @@ -917,16 +1156,40 @@ private static nuint HUF_compress1X_usingCTable_internal_body(void* dst, nuint d switch (tableLog) { case 11: - HUF_compress1X_usingCTable_internal_body_loop(ref bitC, ip, srcSize, ct, 2, 1, 0); + HUF_compress1X_usingCTable_internal_body_loop( + ref bitC, + ip, + srcSize, + ct, + 2, + 1, + 0 + ); break; case 10: case 9: case 8: - HUF_compress1X_usingCTable_internal_body_loop(ref bitC, ip, srcSize, ct, 2, 1, 1); + HUF_compress1X_usingCTable_internal_body_loop( + ref bitC, + ip, + srcSize, + ct, + 2, + 1, + 1 + ); break; case 7: default: - HUF_compress1X_usingCTable_internal_body_loop(ref bitC, ip, srcSize, ct, 3, 1, 1); + HUF_compress1X_usingCTable_internal_body_loop( + ref bitC, + ip, + srcSize, + ct, + 3, + 1, + 1 + ); break; } } @@ -935,23 +1198,71 @@ private static nuint HUF_compress1X_usingCTable_internal_body(void* dst, nuint d switch (tableLog) { case 11: - HUF_compress1X_usingCTable_internal_body_loop(ref bitC, ip, srcSize, ct, 5, 1, 0); + HUF_compress1X_usingCTable_internal_body_loop( + ref bitC, + ip, + srcSize, + ct, + 5, + 1, + 0 + ); break; case 10: - HUF_compress1X_usingCTable_internal_body_loop(ref bitC, ip, srcSize, ct, 5, 1, 1); + HUF_compress1X_usingCTable_internal_body_loop( + ref bitC, + ip, + srcSize, + ct, + 5, + 1, + 1 + ); break; case 9: - HUF_compress1X_usingCTable_internal_body_loop(ref bitC, ip, srcSize, ct, 6, 1, 0); + HUF_compress1X_usingCTable_internal_body_loop( + ref bitC, + ip, + srcSize, + ct, + 6, + 1, + 0 + ); break; case 8: - HUF_compress1X_usingCTable_internal_body_loop(ref bitC, ip, srcSize, ct, 7, 1, 0); + HUF_compress1X_usingCTable_internal_body_loop( + ref bitC, + ip, + srcSize, + ct, + 7, + 1, + 0 + ); break; case 7: - HUF_compress1X_usingCTable_internal_body_loop(ref bitC, ip, srcSize, ct, 8, 1, 0); + HUF_compress1X_usingCTable_internal_body_loop( + ref bitC, + ip, + srcSize, + ct, + 8, + 1, + 0 + ); break; case 6: default: - HUF_compress1X_usingCTable_internal_body_loop(ref bitC, ip, srcSize, ct, 9, 1, 1); + HUF_compress1X_usingCTable_internal_body_loop( + ref bitC, + ip, + srcSize, + ct, + 9, + 1, + 1 + ); break; } } @@ -961,7 +1272,14 @@ private static nuint HUF_compress1X_usingCTable_internal_body(void* dst, nuint d return HUF_closeCStream(ref bitC); } - private static nuint HUF_compress1X_usingCTable_internal(void* dst, nuint dstSize, void* src, nuint srcSize, nuint* CTable, int flags) + private static nuint HUF_compress1X_usingCTable_internal( + void* dst, + nuint dstSize, + void* src, + nuint srcSize, + nuint* CTable, + int flags + ) { return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); } @@ -969,12 +1287,26 @@ private static nuint HUF_compress1X_usingCTable_internal(void* dst, nuint dstSiz /* ====================== */ /* single stream variants */ /* ====================== */ - private static nuint HUF_compress1X_usingCTable(void* dst, nuint dstSize, void* src, nuint srcSize, nuint* CTable, int flags) + private static nuint HUF_compress1X_usingCTable( + void* dst, + nuint dstSize, + void* src, + nuint srcSize, + nuint* CTable, + int flags + ) { return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, flags); } - private static nuint HUF_compress4X_usingCTable_internal(void* dst, nuint dstSize, void* src, nuint srcSize, nuint* CTable, int flags) + private static nuint HUF_compress4X_usingCTable_internal( + void* dst, + nuint dstSize, + void* src, + nuint srcSize, + nuint* CTable, + int flags + ) { /* first 3 segments */ nuint segmentSize = (srcSize + 3) / 4; @@ -990,7 +1322,14 @@ private static nuint HUF_compress4X_usingCTable_internal(void* dst, nuint dstSiz op += 6; assert(op <= oend); { - nuint cSize = HUF_compress1X_usingCTable_internal(op, (nuint)(oend - op), ip, segmentSize, CTable, flags); + nuint cSize = HUF_compress1X_usingCTable_internal( + op, + (nuint)(oend - op), + ip, + segmentSize, + CTable, + flags + ); if (ERR_isError(cSize)) return cSize; if (cSize == 0 || cSize > 65535) @@ -1002,7 +1341,14 @@ private static nuint HUF_compress4X_usingCTable_internal(void* dst, nuint dstSiz ip += segmentSize; assert(op <= oend); { - nuint cSize = HUF_compress1X_usingCTable_internal(op, (nuint)(oend - op), ip, segmentSize, CTable, flags); + nuint cSize = HUF_compress1X_usingCTable_internal( + op, + (nuint)(oend - op), + ip, + segmentSize, + CTable, + flags + ); if (ERR_isError(cSize)) return cSize; if (cSize == 0 || cSize > 65535) @@ -1014,7 +1360,14 @@ private static nuint HUF_compress4X_usingCTable_internal(void* dst, nuint dstSiz ip += segmentSize; assert(op <= oend); { - nuint cSize = HUF_compress1X_usingCTable_internal(op, (nuint)(oend - op), ip, segmentSize, CTable, flags); + nuint cSize = HUF_compress1X_usingCTable_internal( + op, + (nuint)(oend - op), + ip, + segmentSize, + CTable, + flags + ); if (ERR_isError(cSize)) return cSize; if (cSize == 0 || cSize > 65535) @@ -1027,7 +1380,14 @@ private static nuint HUF_compress4X_usingCTable_internal(void* dst, nuint dstSiz assert(op <= oend); assert(ip <= iend); { - nuint cSize = HUF_compress1X_usingCTable_internal(op, (nuint)(oend - op), ip, (nuint)(iend - ip), CTable, flags); + nuint cSize = HUF_compress1X_usingCTable_internal( + op, + (nuint)(oend - op), + ip, + (nuint)(iend - ip), + CTable, + flags + ); if (ERR_isError(cSize)) return cSize; if (cSize == 0 || cSize > 65535) @@ -1038,14 +1398,47 @@ private static nuint HUF_compress4X_usingCTable_internal(void* dst, nuint dstSiz return (nuint)(op - ostart); } - private static nuint HUF_compress4X_usingCTable(void* dst, nuint dstSize, void* src, nuint srcSize, nuint* CTable, int flags) + private static nuint HUF_compress4X_usingCTable( + void* dst, + nuint dstSize, + void* src, + nuint srcSize, + nuint* CTable, + int flags + ) { return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, flags); } - private static nuint HUF_compressCTable_internal(byte* ostart, byte* op, byte* oend, void* src, nuint srcSize, HUF_nbStreams_e nbStreams, nuint* CTable, int flags) + private static nuint HUF_compressCTable_internal( + byte* ostart, + byte* op, + byte* oend, + void* src, + nuint srcSize, + HUF_nbStreams_e nbStreams, + nuint* CTable, + int flags + ) { - nuint cSize = nbStreams == HUF_nbStreams_e.HUF_singleStream ? HUF_compress1X_usingCTable_internal(op, (nuint)(oend - op), src, srcSize, CTable, flags) : HUF_compress4X_usingCTable_internal(op, (nuint)(oend - op), src, srcSize, CTable, flags); + nuint cSize = + nbStreams == HUF_nbStreams_e.HUF_singleStream + ? HUF_compress1X_usingCTable_internal( + op, + (nuint)(oend - op), + src, + srcSize, + CTable, + flags + ) + : HUF_compress4X_usingCTable_internal( + op, + (nuint)(oend - op), + src, + srcSize, + CTable, + flags + ); if (ERR_isError(cSize)) { return cSize; @@ -1096,7 +1489,16 @@ private static uint HUF_minTableLog(uint symbolCardinality) return minBitsSymbols; } - private static uint HUF_optimalTableLog(uint maxTableLog, nuint srcSize, uint maxSymbolValue, void* workSpace, nuint wkspSize, nuint* table, uint* count, int flags) + private static uint HUF_optimalTableLog( + uint maxTableLog, + nuint srcSize, + uint maxSymbolValue, + void* workSpace, + nuint wkspSize, + nuint* table, + uint* count, + int flags + ) { assert(srcSize > 1); assert(wkspSize >= (nuint)sizeof(HUF_buildCTable_wksp_tables)); @@ -1108,20 +1510,37 @@ private static uint HUF_optimalTableLog(uint maxTableLog, nuint srcSize, uint ma { byte* dst = (byte*)workSpace + sizeof(HUF_WriteCTableWksp); nuint dstSize = wkspSize - (nuint)sizeof(HUF_WriteCTableWksp); - nuint hSize, newSize; + nuint hSize, + newSize; uint symbolCardinality = HUF_cardinality(count, maxSymbolValue); uint minTableLog = HUF_minTableLog(symbolCardinality); nuint optSize = unchecked((nuint)~0) - 1; - uint optLog = maxTableLog, optLogGuess; + uint optLog = maxTableLog, + optLogGuess; for (optLogGuess = minTableLog; optLogGuess <= maxTableLog; optLogGuess++) { { - nuint maxBits = HUF_buildCTable_wksp(table, count, maxSymbolValue, optLogGuess, workSpace, wkspSize); + nuint maxBits = HUF_buildCTable_wksp( + table, + count, + maxSymbolValue, + optLogGuess, + workSpace, + wkspSize + ); if (ERR_isError(maxBits)) continue; if (maxBits < optLogGuess && optLogGuess > minTableLog) break; - hSize = HUF_writeCTable_wksp(dst, dstSize, table, maxSymbolValue, (uint)maxBits, workSpace, wkspSize); + hSize = HUF_writeCTable_wksp( + dst, + dstSize, + table, + maxSymbolValue, + (uint)maxBits, + workSpace, + wkspSize + ); } if (ERR_isError(hSize)) @@ -1147,9 +1566,26 @@ private static uint HUF_optimalTableLog(uint maxTableLog, nuint srcSize, uint ma /* HUF_compress_internal() : * `workSpace_align4` must be aligned on 4-bytes boundaries, * and occupies the same space as a table of HUF_WORKSPACE_SIZE_U64 unsigned */ - private static nuint HUF_compress_internal(void* dst, nuint dstSize, void* src, nuint srcSize, uint maxSymbolValue, uint huffLog, HUF_nbStreams_e nbStreams, void* workSpace, nuint wkspSize, nuint* oldHufTable, HUF_repeat* repeat, int flags) + private static nuint HUF_compress_internal( + void* dst, + nuint dstSize, + void* src, + nuint srcSize, + uint maxSymbolValue, + uint huffLog, + HUF_nbStreams_e nbStreams, + void* workSpace, + nuint wkspSize, + nuint* oldHufTable, + HUF_repeat* repeat, + int flags + ) { - HUF_compress_tables_t* table = (HUF_compress_tables_t*)HUF_alignUpWorkspace(workSpace, &wkspSize, sizeof(ulong)); + HUF_compress_tables_t* table = (HUF_compress_tables_t*)HUF_alignUpWorkspace( + workSpace, + &wkspSize, + sizeof(ulong) + ); byte* ostart = (byte*)dst; byte* oend = ostart + dstSize; byte* op = ostart; @@ -1169,17 +1605,38 @@ private static nuint HUF_compress_internal(void* dst, nuint dstSize, void* src, maxSymbolValue = 255; if (huffLog == 0) huffLog = 11; - if ((flags & (int)HUF_flags_e.HUF_flags_preferRepeat) != 0 && repeat != null && *repeat == HUF_repeat.HUF_repeat_valid) + if ( + (flags & (int)HUF_flags_e.HUF_flags_preferRepeat) != 0 + && repeat != null + && *repeat == HUF_repeat.HUF_repeat_valid + ) { - return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, nbStreams, oldHufTable, flags); + return HUF_compressCTable_internal( + ostart, + op, + oend, + src, + srcSize, + nbStreams, + oldHufTable, + flags + ); } - if ((flags & (int)HUF_flags_e.HUF_flags_suspectUncompressible) != 0 && srcSize >= 4096 * 10) + if ( + (flags & (int)HUF_flags_e.HUF_flags_suspectUncompressible) != 0 + && srcSize >= 4096 * 10 + ) { nuint largestTotal = 0; { uint maxSymbolValueBegin = maxSymbolValue; - nuint largestBegin = HIST_count_simple(table->count, &maxSymbolValueBegin, (byte*)src, 4096); + nuint largestBegin = HIST_count_simple( + table->count, + &maxSymbolValueBegin, + (byte*)src, + 4096 + ); if (ERR_isError(largestBegin)) return largestBegin; largestTotal += largestBegin; @@ -1187,7 +1644,12 @@ private static nuint HUF_compress_internal(void* dst, nuint dstSize, void* src, { uint maxSymbolValueEnd = maxSymbolValue; - nuint largestEnd = HIST_count_simple(table->count, &maxSymbolValueEnd, (byte*)src + srcSize - 4096, 4096); + nuint largestEnd = HIST_count_simple( + table->count, + &maxSymbolValueEnd, + (byte*)src + srcSize - 4096, + 4096 + ); if (ERR_isError(largestEnd)) return largestEnd; largestTotal += largestEnd; @@ -1198,7 +1660,14 @@ private static nuint HUF_compress_internal(void* dst, nuint dstSize, void* src, } { - nuint largest = HIST_count_wksp(table->count, &maxSymbolValue, (byte*)src, srcSize, table->wksps.hist_wksp, sizeof(uint) * 1024); + nuint largest = HIST_count_wksp( + table->count, + &maxSymbolValue, + (byte*)src, + srcSize, + table->wksps.hist_wksp, + sizeof(uint) * 1024 + ); if (ERR_isError(largest)) return largest; if (largest == srcSize) @@ -1211,19 +1680,52 @@ private static nuint HUF_compress_internal(void* dst, nuint dstSize, void* src, return 0; } - if (repeat != null && *repeat == HUF_repeat.HUF_repeat_check && HUF_validateCTable(oldHufTable, table->count, maxSymbolValue) == 0) + if ( + repeat != null + && *repeat == HUF_repeat.HUF_repeat_check + && HUF_validateCTable(oldHufTable, table->count, maxSymbolValue) == 0 + ) { *repeat = HUF_repeat.HUF_repeat_none; } - if ((flags & (int)HUF_flags_e.HUF_flags_preferRepeat) != 0 && repeat != null && *repeat != HUF_repeat.HUF_repeat_none) + if ( + (flags & (int)HUF_flags_e.HUF_flags_preferRepeat) != 0 + && repeat != null + && *repeat != HUF_repeat.HUF_repeat_none + ) { - return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, nbStreams, oldHufTable, flags); + return HUF_compressCTable_internal( + ostart, + op, + oend, + src, + srcSize, + nbStreams, + oldHufTable, + flags + ); } - huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue, &table->wksps, (nuint)sizeof(_wksps_e__Union), &table->CTable.e0, table->count, flags); + huffLog = HUF_optimalTableLog( + huffLog, + srcSize, + maxSymbolValue, + &table->wksps, + (nuint)sizeof(_wksps_e__Union), + &table->CTable.e0, + table->count, + flags + ); { - nuint maxBits = HUF_buildCTable_wksp(&table->CTable.e0, table->count, maxSymbolValue, huffLog, &table->wksps.buildCTable_wksp, (nuint)sizeof(HUF_buildCTable_wksp_tables)); + nuint maxBits = HUF_buildCTable_wksp( + &table->CTable.e0, + table->count, + maxSymbolValue, + huffLog, + &table->wksps.buildCTable_wksp, + (nuint)sizeof(HUF_buildCTable_wksp_tables) + ); { nuint _var_err__ = maxBits; if (ERR_isError(_var_err__)) @@ -1234,16 +1736,41 @@ private static nuint HUF_compress_internal(void* dst, nuint dstSize, void* src, } { - nuint hSize = HUF_writeCTable_wksp(op, dstSize, &table->CTable.e0, maxSymbolValue, huffLog, &table->wksps.writeCTable_wksp, (nuint)sizeof(HUF_WriteCTableWksp)); + nuint hSize = HUF_writeCTable_wksp( + op, + dstSize, + &table->CTable.e0, + maxSymbolValue, + huffLog, + &table->wksps.writeCTable_wksp, + (nuint)sizeof(HUF_WriteCTableWksp) + ); if (ERR_isError(hSize)) return hSize; if (repeat != null && *repeat != HUF_repeat.HUF_repeat_none) { - nuint oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue); - nuint newSize = HUF_estimateCompressedSize(&table->CTable.e0, table->count, maxSymbolValue); + nuint oldSize = HUF_estimateCompressedSize( + oldHufTable, + table->count, + maxSymbolValue + ); + nuint newSize = HUF_estimateCompressedSize( + &table->CTable.e0, + table->count, + maxSymbolValue + ); if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) { - return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, nbStreams, oldHufTable, flags); + return HUF_compressCTable_internal( + ostart, + op, + oend, + src, + srcSize, + nbStreams, + oldHufTable, + flags + ); } } @@ -1262,7 +1789,16 @@ private static nuint HUF_compress_internal(void* dst, nuint dstSize, void* src, memcpy(oldHufTable, &table->CTable.e0, sizeof(ulong) * 257); } - return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, nbStreams, &table->CTable.e0, flags); + return HUF_compressCTable_internal( + ostart, + op, + oend, + src, + srcSize, + nbStreams, + &table->CTable.e0, + flags + ); } /** HUF_compress1X_repeat() : @@ -1271,18 +1807,68 @@ private static nuint HUF_compress_internal(void* dst, nuint dstSize, void* src, * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. * If preferRepeat then the old table will always be used if valid. * If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */ - private static nuint HUF_compress1X_repeat(void* dst, nuint dstSize, void* src, nuint srcSize, uint maxSymbolValue, uint huffLog, void* workSpace, nuint wkspSize, nuint* hufTable, HUF_repeat* repeat, int flags) + private static nuint HUF_compress1X_repeat( + void* dst, + nuint dstSize, + void* src, + nuint srcSize, + uint maxSymbolValue, + uint huffLog, + void* workSpace, + nuint wkspSize, + nuint* hufTable, + HUF_repeat* repeat, + int flags + ) { - return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, HUF_nbStreams_e.HUF_singleStream, workSpace, wkspSize, hufTable, repeat, flags); + return HUF_compress_internal( + dst, + dstSize, + src, + srcSize, + maxSymbolValue, + huffLog, + HUF_nbStreams_e.HUF_singleStream, + workSpace, + wkspSize, + hufTable, + repeat, + flags + ); } /* HUF_compress4X_repeat(): * compress input using 4 streams. * consider skipping quickly * reuse an existing huffman compression table */ - private static nuint HUF_compress4X_repeat(void* dst, nuint dstSize, void* src, nuint srcSize, uint maxSymbolValue, uint huffLog, void* workSpace, nuint wkspSize, nuint* hufTable, HUF_repeat* repeat, int flags) + private static nuint HUF_compress4X_repeat( + void* dst, + nuint dstSize, + void* src, + nuint srcSize, + uint maxSymbolValue, + uint huffLog, + void* workSpace, + nuint wkspSize, + nuint* hufTable, + HUF_repeat* repeat, + int flags + ) { - return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, HUF_nbStreams_e.HUF_fourStreams, workSpace, wkspSize, hufTable, repeat, flags); + return HUF_compress_internal( + dst, + dstSize, + src, + srcSize, + maxSymbolValue, + huffLog, + HUF_nbStreams_e.HUF_fourStreams, + workSpace, + wkspSize, + hufTable, + repeat, + flags + ); } } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HufDecompress.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HufDecompress.cs index fc9492c48..e3a2048d3 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HufDecompress.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HufDecompress.cs @@ -1,6 +1,6 @@ -using static ZstdSharp.UnsafeHelper; using System; using System.Runtime.CompilerServices; +using static ZstdSharp.UnsafeHelper; namespace ZstdSharp.Unsafe { @@ -29,7 +29,14 @@ private static nuint HUF_initFastDStream(byte* ip) * 0 if the fallback implementation should be used. * Or an error code on failure. */ - private static nuint HUF_DecompressFastArgs_init(HUF_DecompressFastArgs* args, void* dst, nuint dstSize, void* src, nuint srcSize, uint* DTable) + private static nuint HUF_DecompressFastArgs_init( + HUF_DecompressFastArgs* args, + void* dst, + nuint dstSize, + void* src, + nuint srcSize, + uint* DTable + ) { void* dt = DTable + 1; uint dtLog = HUF_getDTableDesc(DTable).tableLog; @@ -79,7 +86,12 @@ private static nuint HUF_DecompressFastArgs_init(HUF_DecompressFastArgs* args, v return 1; } - private static nuint HUF_initRemainingDStream(BIT_DStream_t* bit, HUF_DecompressFastArgs* args, int stream, byte* segmentEnd) + private static nuint HUF_initRemainingDStream( + BIT_DStream_t* bit, + HUF_DecompressFastArgs* args, + int stream, + byte* segmentEnd + ) { if ((&args->op.e0)[stream] > segmentEnd) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); @@ -121,7 +133,13 @@ private static ulong HUF_DEltX1_set4(byte symbol, byte nbBits) * If tableLog > targetTableLog this is a no-op. * @returns New tableLog */ - private static uint HUF_rescaleStats(byte* huffWeight, uint* rankVal, uint nbSymbols, uint tableLog, uint targetTableLog) + private static uint HUF_rescaleStats( + byte* huffWeight, + uint* rankVal, + uint nbSymbols, + uint tableLog, + uint targetTableLog + ) { if (tableLog > targetTableLog) return tableLog; @@ -148,7 +166,14 @@ private static uint HUF_rescaleStats(byte* huffWeight, uint* rankVal, uint nbSym return targetTableLog; } - private static nuint HUF_readDTableX1_wksp(uint* DTable, void* src, nuint srcSize, void* workSpace, nuint wkspSize, int flags) + private static nuint HUF_readDTableX1_wksp( + uint* DTable, + void* src, + nuint srcSize, + void* workSpace, + nuint wkspSize, + int flags + ) { uint tableLog = 0; uint nbSymbols = 0; @@ -158,14 +183,31 @@ private static nuint HUF_readDTableX1_wksp(uint* DTable, void* src, nuint srcSiz HUF_ReadDTableX1_Workspace* wksp = (HUF_ReadDTableX1_Workspace*)workSpace; if ((nuint)sizeof(HUF_ReadDTableX1_Workspace) > wkspSize) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); - iSize = HUF_readStats_wksp(wksp->huffWeight, 255 + 1, wksp->rankVal, &nbSymbols, &tableLog, src, srcSize, wksp->statsWksp, sizeof(uint) * 219, flags); + iSize = HUF_readStats_wksp( + wksp->huffWeight, + 255 + 1, + wksp->rankVal, + &nbSymbols, + &tableLog, + src, + srcSize, + wksp->statsWksp, + sizeof(uint) * 219, + flags + ); if (ERR_isError(iSize)) return iSize; { DTableDesc dtd = HUF_getDTableDesc(DTable); uint maxTableLog = (uint)(dtd.maxTableLog + 1); uint targetTableLog = maxTableLog < 11 ? maxTableLog : 11; - tableLog = HUF_rescaleStats(wksp->huffWeight, wksp->rankVal, nbSymbols, tableLog, targetTableLog); + tableLog = HUF_rescaleStats( + wksp->huffWeight, + wksp->rankVal, + nbSymbols, + tableLog, + targetTableLog + ); if (tableLog > (uint)(dtd.maxTableLog + 1)) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); dtd.tableType = 0; @@ -296,12 +338,21 @@ private static byte HUF_decodeSymbolX1(BIT_DStream_t* Dstream, HUF_DEltX1* dt, u } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint HUF_decodeStreamX1(byte* p, BIT_DStream_t* bitDPtr, byte* pEnd, HUF_DEltX1* dt, uint dtLog) + private static nuint HUF_decodeStreamX1( + byte* p, + BIT_DStream_t* bitDPtr, + byte* pEnd, + HUF_DEltX1* dt, + uint dtLog + ) { byte* pStart = p; if (pEnd - p > 3) { - while (BIT_reloadDStream(bitDPtr) == BIT_DStream_status.BIT_DStream_unfinished && p < pEnd - 3) + while ( + BIT_reloadDStream(bitDPtr) == BIT_DStream_status.BIT_DStream_unfinished + && p < pEnd - 3 + ) { if (MEM_64bits) *p++ = HUF_decodeSymbolX1(bitDPtr, dt, dtLog); @@ -317,7 +368,10 @@ private static nuint HUF_decodeStreamX1(byte* p, BIT_DStream_t* bitDPtr, byte* p } if (MEM_32bits) - while (BIT_reloadDStream(bitDPtr) == BIT_DStream_status.BIT_DStream_unfinished && p < pEnd) + while ( + BIT_reloadDStream(bitDPtr) == BIT_DStream_status.BIT_DStream_unfinished + && p < pEnd + ) *p++ = HUF_decodeSymbolX1(bitDPtr, dt, dtLog); while (p < pEnd) *p++ = HUF_decodeSymbolX1(bitDPtr, dt, dtLog); @@ -325,7 +379,13 @@ private static nuint HUF_decodeStreamX1(byte* p, BIT_DStream_t* bitDPtr, byte* p } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint HUF_decompress1X1_usingDTable_internal_body(void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, uint* DTable) + private static nuint HUF_decompress1X1_usingDTable_internal_body( + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + uint* DTable + ) { byte* op = (byte*)dst; byte* oend = ZSTD_maybeNullPtrAdd(op, (nint)dstSize); @@ -351,7 +411,13 @@ private static nuint HUF_decompress1X1_usingDTable_internal_body(void* dst, nuin * @dstSize >= 6 */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint HUF_decompress4X1_usingDTable_internal_body(void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, uint* DTable) + private static nuint HUF_decompress4X1_usingDTable_internal_body( + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + uint* DTable + ) { if (cSrcSize < 10) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); @@ -420,7 +486,7 @@ private static nuint HUF_decompress4X1_usingDTable_internal_body(void* dst, nuin if ((nuint)(oend - op4) >= (nuint)sizeof(nuint)) { - for (; (endSignal & (uint)(op4 < olimit ? 1 : 0)) != 0;) + for (; (endSignal & (uint)(op4 < olimit ? 1 : 0)) != 0; ) { if (MEM_64bits) *op1++ = HUF_decodeSymbolX1(&bitD1, dt, dtLog); @@ -446,10 +512,26 @@ private static nuint HUF_decompress4X1_usingDTable_internal_body(void* dst, nuin *op2++ = HUF_decodeSymbolX1(&bitD2, dt, dtLog); *op3++ = HUF_decodeSymbolX1(&bitD3, dt, dtLog); *op4++ = HUF_decodeSymbolX1(&bitD4, dt, dtLog); - endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_status.BIT_DStream_unfinished ? 1U : 0U; - endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_status.BIT_DStream_unfinished ? 1U : 0U; - endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_status.BIT_DStream_unfinished ? 1U : 0U; - endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_status.BIT_DStream_unfinished ? 1U : 0U; + endSignal &= + BIT_reloadDStreamFast(&bitD1) + == BIT_DStream_status.BIT_DStream_unfinished + ? 1U + : 0U; + endSignal &= + BIT_reloadDStreamFast(&bitD2) + == BIT_DStream_status.BIT_DStream_unfinished + ? 1U + : 0U; + endSignal &= + BIT_reloadDStreamFast(&bitD3) + == BIT_DStream_status.BIT_DStream_unfinished + ? 1U + : 0U; + endSignal &= + BIT_reloadDStreamFast(&bitD4) + == BIT_DStream_status.BIT_DStream_unfinished + ? 1U + : 0U; } } @@ -464,25 +546,54 @@ private static nuint HUF_decompress4X1_usingDTable_internal_body(void* dst, nuin HUF_decodeStreamX1(op3, &bitD3, opStart4, dt, dtLog); HUF_decodeStreamX1(op4, &bitD4, oend, dt, dtLog); { - uint endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); + uint endCheck = + BIT_endOfDStream(&bitD1) + & BIT_endOfDStream(&bitD2) + & BIT_endOfDStream(&bitD3) + & BIT_endOfDStream(&bitD4); if (endCheck == 0) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); } return dstSize; } } - private static nuint HUF_decompress4X1_usingDTable_internal_default(void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, uint* DTable) + private static nuint HUF_decompress4X1_usingDTable_internal_default( + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + uint* DTable + ) { - return HUF_decompress4X1_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable); + return HUF_decompress4X1_usingDTable_internal_body( + dst, + dstSize, + cSrc, + cSrcSize, + DTable + ); } - private static void HUF_decompress4X1_usingDTable_internal_fast_c_loop(HUF_DecompressFastArgs* args) + private static void HUF_decompress4X1_usingDTable_internal_fast_c_loop( + HUF_DecompressFastArgs* args + ) { - ulong bits0, bits1, bits2, bits3; - byte* ip0, ip1, ip2, ip3; - byte* op0, op1, op2, op3; + ulong bits0, + bits1, + bits2, + bits3; + byte* ip0, + ip1, + ip2, + ip3; + byte* op0, + op1, + op2, + op3; ushort* dtable = (ushort*)args->dt; byte* oend = args->oend; byte* ilowest = args->ilowest; @@ -759,11 +870,10 @@ private static void HUF_decompress4X1_usingDTable_internal_fast_c_loop(HUF_Decom bits3 <<= nbBits; } } - } - while (op3 < olimit); + } while (op3 < olimit); } - _out: + _out: args->bits[0] = bits0; args->bits[1] = bits1; args->bits[2] = bits2; @@ -783,14 +893,28 @@ private static void HUF_decompress4X1_usingDTable_internal_fast_c_loop(HUF_Decom * 0 if the fallback implementation should be used * An error if an error occurred */ - private static nuint HUF_decompress4X1_usingDTable_internal_fast(void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, uint* DTable, void* loopFn) + private static nuint HUF_decompress4X1_usingDTable_internal_fast( + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + uint* DTable, + void* loopFn + ) { void* dt = DTable + 1; byte* ilowest = (byte*)cSrc; byte* oend = ZSTD_maybeNullPtrAdd((byte*)dst, (nint)dstSize); HUF_DecompressFastArgs args; { - nuint ret = HUF_DecompressFastArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable); + nuint ret = HUF_DecompressFastArgs_init( + &args, + dst, + dstSize, + cSrc, + cSrcSize, + DTable + ); { nuint err_code = ret; if (ERR_isError(err_code)) @@ -832,9 +956,17 @@ private static nuint HUF_decompress4X1_usingDTable_internal_fast(void* dst, nuin } } - (&args.op.e0)[i] += HUF_decodeStreamX1((&args.op.e0)[i], &bit, segmentEnd, (HUF_DEltX1*)dt, 11); + (&args.op.e0)[i] += HUF_decodeStreamX1( + (&args.op.e0)[i], + &bit, + segmentEnd, + (HUF_DEltX1*)dt, + 11 + ); if ((&args.op.e0)[i] != segmentEnd) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); } } @@ -842,26 +974,72 @@ private static nuint HUF_decompress4X1_usingDTable_internal_fast(void* dst, nuin return dstSize; } - private static nuint HUF_decompress1X1_usingDTable_internal(void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, uint* DTable, int flags) + private static nuint HUF_decompress1X1_usingDTable_internal( + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + uint* DTable, + int flags + ) { - return HUF_decompress1X1_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable); + return HUF_decompress1X1_usingDTable_internal_body( + dst, + dstSize, + cSrc, + cSrcSize, + DTable + ); } - private static nuint HUF_decompress4X1_usingDTable_internal(void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, uint* DTable, int flags) + private static nuint HUF_decompress4X1_usingDTable_internal( + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + uint* DTable, + int flags + ) { - void* fallbackFn = (delegate* managed)(&HUF_decompress4X1_usingDTable_internal_default); - void* loopFn = (delegate* managed)(&HUF_decompress4X1_usingDTable_internal_fast_c_loop); + void* fallbackFn = (delegate* managed)( + &HUF_decompress4X1_usingDTable_internal_default + ); + void* loopFn = (delegate* managed)( + &HUF_decompress4X1_usingDTable_internal_fast_c_loop + ); if ((flags & (int)HUF_flags_e.HUF_flags_disableFast) == 0) { - nuint ret = HUF_decompress4X1_usingDTable_internal_fast(dst, dstSize, cSrc, cSrcSize, DTable, loopFn); + nuint ret = HUF_decompress4X1_usingDTable_internal_fast( + dst, + dstSize, + cSrc, + cSrcSize, + DTable, + loopFn + ); if (ret != 0) return ret; } - return ((delegate* managed)fallbackFn)(dst, dstSize, cSrc, cSrcSize, DTable); + return ((delegate* managed)fallbackFn)( + dst, + dstSize, + cSrc, + cSrcSize, + DTable + ); } - private static nuint HUF_decompress4X1_DCtx_wksp(uint* dctx, void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, void* workSpace, nuint wkspSize, int flags) + private static nuint HUF_decompress4X1_DCtx_wksp( + uint* dctx, + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + void* workSpace, + nuint wkspSize, + int flags + ) { byte* ip = (byte*)cSrc; nuint hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize, flags); @@ -928,7 +1106,15 @@ private static ulong HUF_buildDEltX2U64(uint symbol, uint nbBits, ushort baseSeq * @param level The level in the table. Must be 1 or 2. */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void HUF_fillDTableX2ForWeight(HUF_DEltX2* DTableRank, sortedSymbol_t* begin, sortedSymbol_t* end, uint nbBits, uint tableLog, ushort baseSeq, int level) + private static void HUF_fillDTableX2ForWeight( + HUF_DEltX2* DTableRank, + sortedSymbol_t* begin, + sortedSymbol_t* end, + uint nbBits, + uint tableLog, + ushort baseSeq, + int level + ) { /* quiet static-analyzer */ uint length = 1U << (int)(tableLog - nbBits & 0x1F); @@ -997,7 +1183,18 @@ private static void HUF_fillDTableX2ForWeight(HUF_DEltX2* DTableRank, sortedSymb /* HUF_fillDTableX2Level2() : * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, uint targetLog, uint consumedBits, uint* rankVal, int minWeight, int maxWeight1, sortedSymbol_t* sortedSymbols, uint* rankStart, uint nbBitsBaseline, ushort baseSeq) + private static void HUF_fillDTableX2Level2( + HUF_DEltX2* DTable, + uint targetLog, + uint consumedBits, + uint* rankVal, + int minWeight, + int maxWeight1, + sortedSymbol_t* sortedSymbols, + uint* rankStart, + uint nbBitsBaseline, + ushort baseSeq + ) { if (minWeight > 1) { @@ -1043,12 +1240,28 @@ private static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, uint targetLog, u int end = (int)rankStart[w + 1]; uint nbBits = nbBitsBaseline - (uint)w; uint totalBits = nbBits + consumedBits; - HUF_fillDTableX2ForWeight(DTable + rankVal[w], sortedSymbols + begin, sortedSymbols + end, totalBits, targetLog, baseSeq, 2); + HUF_fillDTableX2ForWeight( + DTable + rankVal[w], + sortedSymbols + begin, + sortedSymbols + end, + totalBits, + targetLog, + baseSeq, + 2 + ); } } } - private static void HUF_fillDTableX2(HUF_DEltX2* DTable, uint targetLog, sortedSymbol_t* sortedList, uint* rankStart, rankValCol_t* rankValOrigin, uint maxWeight, uint nbBitsBaseline) + private static void HUF_fillDTableX2( + HUF_DEltX2* DTable, + uint targetLog, + sortedSymbol_t* sortedList, + uint* rankStart, + rankValCol_t* rankValOrigin, + uint maxWeight, + uint nbBitsBaseline + ) { uint* rankVal = (uint*)&rankValOrigin[0]; /* note : targetLog >= srcLog, hence scaleLog <= 1 */ @@ -1073,20 +1286,48 @@ private static void HUF_fillDTableX2(HUF_DEltX2* DTable, uint targetLog, sortedS minWeight = 1; for (s = begin; s != end; ++s) { - HUF_fillDTableX2Level2(DTable + start, targetLog, nbBits, (uint*)&rankValOrigin[nbBits], minWeight, wEnd, sortedList, rankStart, nbBitsBaseline, sortedList[s].symbol); + HUF_fillDTableX2Level2( + DTable + start, + targetLog, + nbBits, + (uint*)&rankValOrigin[nbBits], + minWeight, + wEnd, + sortedList, + rankStart, + nbBitsBaseline, + sortedList[s].symbol + ); start += (int)length; } } else { - HUF_fillDTableX2ForWeight(DTable + rankVal[w], sortedList + begin, sortedList + end, nbBits, targetLog, 0, 1); + HUF_fillDTableX2ForWeight( + DTable + rankVal[w], + sortedList + begin, + sortedList + end, + nbBits, + targetLog, + 0, + 1 + ); } } } - private static nuint HUF_readDTableX2_wksp(uint* DTable, void* src, nuint srcSize, void* workSpace, nuint wkspSize, int flags) + private static nuint HUF_readDTableX2_wksp( + uint* DTable, + void* src, + nuint srcSize, + void* workSpace, + nuint wkspSize, + int flags + ) { - uint tableLog, maxW, nbSymbols; + uint tableLog, + maxW, + nbSymbols; DTableDesc dtd = HUF_getDTableDesc(DTable); uint maxTableLog = dtd.maxTableLog; nuint iSize; @@ -1102,19 +1343,29 @@ private static nuint HUF_readDTableX2_wksp(uint* DTable, void* src, nuint srcSiz memset(wksp->rankStart0, 0, sizeof(uint) * 15); if (maxTableLog > 12) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); - iSize = HUF_readStats_wksp(wksp->weightList, 255 + 1, wksp->rankStats, &nbSymbols, &tableLog, src, srcSize, wksp->calleeWksp, sizeof(uint) * 219, flags); + iSize = HUF_readStats_wksp( + wksp->weightList, + 255 + 1, + wksp->rankStats, + &nbSymbols, + &tableLog, + src, + srcSize, + wksp->calleeWksp, + sizeof(uint) * 219, + flags + ); if (ERR_isError(iSize)) return iSize; if (tableLog > maxTableLog) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); if (tableLog <= 11 && maxTableLog > 11) maxTableLog = 11; - for (maxW = tableLog; wksp->rankStats[maxW] == 0; maxW--) - { - } + for (maxW = tableLog; wksp->rankStats[maxW] == 0; maxW--) { } { - uint w, nextRankStart = 0; + uint w, + nextRankStart = 0; for (w = 1; w < maxW + 1; w++) { uint curr = nextRankStart; @@ -1168,7 +1419,15 @@ private static nuint HUF_readDTableX2_wksp(uint* DTable, void* src, nuint srcSiz } } - HUF_fillDTableX2(dt, maxTableLog, &wksp->sortedSymbol.e0, wksp->rankStart0, &wksp->rankVal.e0, maxW, tableLog + 1); + HUF_fillDTableX2( + dt, + maxTableLog, + &wksp->sortedSymbol.e0, + wksp->rankStart0, + &wksp->rankVal.e0, + maxW, + tableLog + 1 + ); dtd.tableLog = (byte)maxTableLog; dtd.tableType = 1; memcpy(DTable, &dtd, (uint)sizeof(DTableDesc)); @@ -1176,7 +1435,12 @@ private static nuint HUF_readDTableX2_wksp(uint* DTable, void* src, nuint srcSiz } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint HUF_decodeSymbolX2(void* op, BIT_DStream_t* DStream, HUF_DEltX2* dt, uint dtLog) + private static uint HUF_decodeSymbolX2( + void* op, + BIT_DStream_t* DStream, + HUF_DEltX2* dt, + uint dtLog + ) { /* note : dtLog >= 1 */ nuint val = BIT_lookBitsFast(DStream, dtLog); @@ -1186,7 +1450,12 @@ private static uint HUF_decodeSymbolX2(void* op, BIT_DStream_t* DStream, HUF_DEl } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint HUF_decodeLastSymbolX2(void* op, BIT_DStream_t* DStream, HUF_DEltX2* dt, uint dtLog) + private static uint HUF_decodeLastSymbolX2( + void* op, + BIT_DStream_t* DStream, + HUF_DEltX2* dt, + uint dtLog + ) { /* note : dtLog >= 1 */ nuint val = BIT_lookBitsFast(DStream, dtLog); @@ -1209,14 +1478,23 @@ private static uint HUF_decodeLastSymbolX2(void* op, BIT_DStream_t* DStream, HUF } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint HUF_decodeStreamX2(byte* p, BIT_DStream_t* bitDPtr, byte* pEnd, HUF_DEltX2* dt, uint dtLog) + private static nuint HUF_decodeStreamX2( + byte* p, + BIT_DStream_t* bitDPtr, + byte* pEnd, + HUF_DEltX2* dt, + uint dtLog + ) { byte* pStart = p; if ((nuint)(pEnd - p) >= (nuint)sizeof(nuint)) { if (dtLog <= 11 && MEM_64bits) { - while (BIT_reloadDStream(bitDPtr) == BIT_DStream_status.BIT_DStream_unfinished && p < pEnd - 9) + while ( + BIT_reloadDStream(bitDPtr) == BIT_DStream_status.BIT_DStream_unfinished + && p < pEnd - 9 + ) { p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); @@ -1227,7 +1505,10 @@ private static nuint HUF_decodeStreamX2(byte* p, BIT_DStream_t* bitDPtr, byte* p } else { - while (BIT_reloadDStream(bitDPtr) == BIT_DStream_status.BIT_DStream_unfinished && p < pEnd - (sizeof(nuint) - 1)) + while ( + BIT_reloadDStream(bitDPtr) == BIT_DStream_status.BIT_DStream_unfinished + && p < pEnd - (sizeof(nuint) - 1) + ) { if (MEM_64bits) p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); @@ -1245,7 +1526,10 @@ private static nuint HUF_decodeStreamX2(byte* p, BIT_DStream_t* bitDPtr, byte* p if ((nuint)(pEnd - p) >= 2) { - while (BIT_reloadDStream(bitDPtr) == BIT_DStream_status.BIT_DStream_unfinished && p <= pEnd - 2) + while ( + BIT_reloadDStream(bitDPtr) == BIT_DStream_status.BIT_DStream_unfinished + && p <= pEnd - 2 + ) p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); while (p <= pEnd - 2) p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); @@ -1257,7 +1541,13 @@ private static nuint HUF_decodeStreamX2(byte* p, BIT_DStream_t* bitDPtr, byte* p } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint HUF_decompress1X2_usingDTable_internal_body(void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, uint* DTable) + private static nuint HUF_decompress1X2_usingDTable_internal_body( + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + uint* DTable + ) { BIT_DStream_t bitD; { @@ -1287,7 +1577,13 @@ private static nuint HUF_decompress1X2_usingDTable_internal_body(void* dst, nuin * @dstSize >= 6 */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint HUF_decompress4X2_usingDTable_internal_body(void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, uint* DTable) + private static nuint HUF_decompress4X2_usingDTable_internal_body( + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + uint* DTable + ) { if (cSrcSize < 10) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); @@ -1356,7 +1652,7 @@ private static nuint HUF_decompress4X2_usingDTable_internal_body(void* dst, nuin if ((nuint)(oend - op4) >= (nuint)sizeof(nuint)) { - for (; (endSignal & (uint)(op4 < olimit ? 1 : 0)) != 0;) + for (; (endSignal & (uint)(op4 < olimit ? 1 : 0)) != 0; ) { if (MEM_64bits) op1 += HUF_decodeSymbolX2(op1, &bitD1, dt, dtLog); @@ -1370,8 +1666,16 @@ private static nuint HUF_decompress4X2_usingDTable_internal_body(void* dst, nuin if (MEM_64bits) op2 += HUF_decodeSymbolX2(op2, &bitD2, dt, dtLog); op2 += HUF_decodeSymbolX2(op2, &bitD2, dt, dtLog); - endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_status.BIT_DStream_unfinished ? 1U : 0U; - endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_status.BIT_DStream_unfinished ? 1U : 0U; + endSignal &= + BIT_reloadDStreamFast(&bitD1) + == BIT_DStream_status.BIT_DStream_unfinished + ? 1U + : 0U; + endSignal &= + BIT_reloadDStreamFast(&bitD2) + == BIT_DStream_status.BIT_DStream_unfinished + ? 1U + : 0U; if (MEM_64bits) op3 += HUF_decodeSymbolX2(op3, &bitD3, dt, dtLog); op3 += HUF_decodeSymbolX2(op3, &bitD3, dt, dtLog); @@ -1384,8 +1688,16 @@ private static nuint HUF_decompress4X2_usingDTable_internal_body(void* dst, nuin if (MEM_64bits) op4 += HUF_decodeSymbolX2(op4, &bitD4, dt, dtLog); op4 += HUF_decodeSymbolX2(op4, &bitD4, dt, dtLog); - endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_status.BIT_DStream_unfinished ? 1U : 0U; - endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_status.BIT_DStream_unfinished ? 1U : 0U; + endSignal &= + BIT_reloadDStreamFast(&bitD3) + == BIT_DStream_status.BIT_DStream_unfinished + ? 1U + : 0U; + endSignal &= + BIT_reloadDStreamFast(&bitD4) + == BIT_DStream_status.BIT_DStream_unfinished + ? 1U + : 0U; } } @@ -1400,26 +1712,58 @@ private static nuint HUF_decompress4X2_usingDTable_internal_body(void* dst, nuin HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog); HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog); { - uint endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); + uint endCheck = + BIT_endOfDStream(&bitD1) + & BIT_endOfDStream(&bitD2) + & BIT_endOfDStream(&bitD3) + & BIT_endOfDStream(&bitD4); if (endCheck == 0) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); } return dstSize; } } - private static nuint HUF_decompress4X2_usingDTable_internal_default(void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, uint* DTable) + private static nuint HUF_decompress4X2_usingDTable_internal_default( + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + uint* DTable + ) { - return HUF_decompress4X2_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable); + return HUF_decompress4X2_usingDTable_internal_body( + dst, + dstSize, + cSrc, + cSrcSize, + DTable + ); } - private static void HUF_decompress4X2_usingDTable_internal_fast_c_loop(HUF_DecompressFastArgs* args) + private static void HUF_decompress4X2_usingDTable_internal_fast_c_loop( + HUF_DecompressFastArgs* args + ) { - ulong bits0, bits1, bits2, bits3; - byte* ip0, ip1, ip2, ip3; - byte* op0, op1, op2, op3; - byte* oend0, oend1, oend2, oend3; + ulong bits0, + bits1, + bits2, + bits3; + byte* ip0, + ip1, + ip2, + ip3; + byte* op0, + op1, + op2, + op3; + byte* oend0, + oend1, + oend2, + oend3; HUF_DEltX2* dtable = (HUF_DEltX2*)args->dt; byte* ilowest = args->ilowest; bits0 = args->bits[0]; @@ -1750,11 +2094,10 @@ private static void HUF_decompress4X2_usingDTable_internal_fast_c_loop(HUF_Decom } } } - } - while (op3 < olimit); + } while (op3 < olimit); } - _out: + _out: args->bits[0] = bits0; args->bits[1] = bits1; args->bits[2] = bits2; @@ -1769,14 +2112,28 @@ private static void HUF_decompress4X2_usingDTable_internal_fast_c_loop(HUF_Decom args->op.e3 = op3; } - private static nuint HUF_decompress4X2_usingDTable_internal_fast(void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, uint* DTable, void* loopFn) + private static nuint HUF_decompress4X2_usingDTable_internal_fast( + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + uint* DTable, + void* loopFn + ) { void* dt = DTable + 1; byte* ilowest = (byte*)cSrc; byte* oend = ZSTD_maybeNullPtrAdd((byte*)dst, (nint)dstSize); HUF_DecompressFastArgs args; { - nuint ret = HUF_DecompressFastArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable); + nuint ret = HUF_DecompressFastArgs_init( + &args, + dst, + dstSize, + cSrc, + cSrcSize, + DTable + ); { nuint err_code = ret; if (ERR_isError(err_code)) @@ -1817,35 +2174,89 @@ private static nuint HUF_decompress4X2_usingDTable_internal_fast(void* dst, nuin } } - (&args.op.e0)[i] += HUF_decodeStreamX2((&args.op.e0)[i], &bit, segmentEnd, (HUF_DEltX2*)dt, 11); + (&args.op.e0)[i] += HUF_decodeStreamX2( + (&args.op.e0)[i], + &bit, + segmentEnd, + (HUF_DEltX2*)dt, + 11 + ); if ((&args.op.e0)[i] != segmentEnd) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); } } return dstSize; } - private static nuint HUF_decompress4X2_usingDTable_internal(void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, uint* DTable, int flags) + private static nuint HUF_decompress4X2_usingDTable_internal( + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + uint* DTable, + int flags + ) { - void* fallbackFn = (delegate* managed)(&HUF_decompress4X2_usingDTable_internal_default); - void* loopFn = (delegate* managed)(&HUF_decompress4X2_usingDTable_internal_fast_c_loop); + void* fallbackFn = (delegate* managed)( + &HUF_decompress4X2_usingDTable_internal_default + ); + void* loopFn = (delegate* managed)( + &HUF_decompress4X2_usingDTable_internal_fast_c_loop + ); if ((flags & (int)HUF_flags_e.HUF_flags_disableFast) == 0) { - nuint ret = HUF_decompress4X2_usingDTable_internal_fast(dst, dstSize, cSrc, cSrcSize, DTable, loopFn); + nuint ret = HUF_decompress4X2_usingDTable_internal_fast( + dst, + dstSize, + cSrc, + cSrcSize, + DTable, + loopFn + ); if (ret != 0) return ret; } - return ((delegate* managed)fallbackFn)(dst, dstSize, cSrc, cSrcSize, DTable); + return ((delegate* managed)fallbackFn)( + dst, + dstSize, + cSrc, + cSrcSize, + DTable + ); } - private static nuint HUF_decompress1X2_usingDTable_internal(void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, uint* DTable, int flags) + private static nuint HUF_decompress1X2_usingDTable_internal( + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + uint* DTable, + int flags + ) { - return HUF_decompress1X2_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable); + return HUF_decompress1X2_usingDTable_internal_body( + dst, + dstSize, + cSrc, + cSrcSize, + DTable + ); } - private static nuint HUF_decompress1X2_DCtx_wksp(uint* DCtx, void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, void* workSpace, nuint wkspSize, int flags) + private static nuint HUF_decompress1X2_DCtx_wksp( + uint* DCtx, + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + void* workSpace, + nuint wkspSize, + int flags + ) { byte* ip = (byte*)cSrc; nuint hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize, flags); @@ -1858,7 +2269,16 @@ private static nuint HUF_decompress1X2_DCtx_wksp(uint* DCtx, void* dst, nuint ds return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, flags); } - private static nuint HUF_decompress4X2_DCtx_wksp(uint* dctx, void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, void* workSpace, nuint wkspSize, int flags) + private static nuint HUF_decompress4X2_DCtx_wksp( + uint* dctx, + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + void* workSpace, + nuint wkspSize, + int flags + ) { byte* ip = (byte*)cSrc; nuint hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize, flags); @@ -1876,84 +2296,85 @@ private static nuint HUF_decompress4X2_DCtx_wksp(uint* dctx, void* dst, nuint ds new algo_time_t[2] { new algo_time_t(tableTime: 0, decode256Time: 0), - new algo_time_t(tableTime: 1, decode256Time: 1) + new algo_time_t(tableTime: 1, decode256Time: 1), }, new algo_time_t[2] { new algo_time_t(tableTime: 0, decode256Time: 0), - new algo_time_t(tableTime: 1, decode256Time: 1) + new algo_time_t(tableTime: 1, decode256Time: 1), }, new algo_time_t[2] { new algo_time_t(tableTime: 150, decode256Time: 216), - new algo_time_t(tableTime: 381, decode256Time: 119) + new algo_time_t(tableTime: 381, decode256Time: 119), }, new algo_time_t[2] { new algo_time_t(tableTime: 170, decode256Time: 205), - new algo_time_t(tableTime: 514, decode256Time: 112) + new algo_time_t(tableTime: 514, decode256Time: 112), }, new algo_time_t[2] { new algo_time_t(tableTime: 177, decode256Time: 199), - new algo_time_t(tableTime: 539, decode256Time: 110) + new algo_time_t(tableTime: 539, decode256Time: 110), }, new algo_time_t[2] { new algo_time_t(tableTime: 197, decode256Time: 194), - new algo_time_t(tableTime: 644, decode256Time: 107) + new algo_time_t(tableTime: 644, decode256Time: 107), }, new algo_time_t[2] { new algo_time_t(tableTime: 221, decode256Time: 192), - new algo_time_t(tableTime: 735, decode256Time: 107) + new algo_time_t(tableTime: 735, decode256Time: 107), }, new algo_time_t[2] { new algo_time_t(tableTime: 256, decode256Time: 189), - new algo_time_t(tableTime: 881, decode256Time: 106) + new algo_time_t(tableTime: 881, decode256Time: 106), }, new algo_time_t[2] { new algo_time_t(tableTime: 359, decode256Time: 188), - new algo_time_t(tableTime: 1167, decode256Time: 109) + new algo_time_t(tableTime: 1167, decode256Time: 109), }, new algo_time_t[2] { new algo_time_t(tableTime: 582, decode256Time: 187), - new algo_time_t(tableTime: 1570, decode256Time: 114) + new algo_time_t(tableTime: 1570, decode256Time: 114), }, new algo_time_t[2] { new algo_time_t(tableTime: 688, decode256Time: 187), - new algo_time_t(tableTime: 1712, decode256Time: 122) + new algo_time_t(tableTime: 1712, decode256Time: 122), }, new algo_time_t[2] { new algo_time_t(tableTime: 825, decode256Time: 186), - new algo_time_t(tableTime: 1965, decode256Time: 136) + new algo_time_t(tableTime: 1965, decode256Time: 136), }, new algo_time_t[2] { new algo_time_t(tableTime: 976, decode256Time: 185), - new algo_time_t(tableTime: 2131, decode256Time: 150) + new algo_time_t(tableTime: 2131, decode256Time: 150), }, new algo_time_t[2] { new algo_time_t(tableTime: 1180, decode256Time: 186), - new algo_time_t(tableTime: 2070, decode256Time: 175) + new algo_time_t(tableTime: 2070, decode256Time: 175), }, new algo_time_t[2] { new algo_time_t(tableTime: 1377, decode256Time: 185), - new algo_time_t(tableTime: 1731, decode256Time: 202) + new algo_time_t(tableTime: 1731, decode256Time: 202), }, new algo_time_t[2] { new algo_time_t(tableTime: 1412, decode256Time: 185), - new algo_time_t(tableTime: 1695, decode256Time: 202) - } + new algo_time_t(tableTime: 1695, decode256Time: 202), + }, }; + /** HUF_selectDecoder() : * Tells which decoder is likely to decode faster, * based on a set of pre-computed metrics. @@ -1974,7 +2395,16 @@ private static uint HUF_selectDecoder(nuint dstSize, nuint cSrcSize) } } - private static nuint HUF_decompress1X_DCtx_wksp(uint* dctx, void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, void* workSpace, nuint wkspSize, int flags) + private static nuint HUF_decompress1X_DCtx_wksp( + uint* dctx, + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + void* workSpace, + nuint wkspSize, + int flags + ) { if (dstSize == 0) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); @@ -1994,20 +2424,72 @@ private static nuint HUF_decompress1X_DCtx_wksp(uint* dctx, void* dst, nuint dst { uint algoNb = HUF_selectDecoder(dstSize, cSrcSize); - return algoNb != 0 ? HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags) : HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags); + return algoNb != 0 + ? HUF_decompress1X2_DCtx_wksp( + dctx, + dst, + dstSize, + cSrc, + cSrcSize, + workSpace, + wkspSize, + flags + ) + : HUF_decompress1X1_DCtx_wksp( + dctx, + dst, + dstSize, + cSrc, + cSrcSize, + workSpace, + wkspSize, + flags + ); } } /* BMI2 variants. * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0. */ - private static nuint HUF_decompress1X_usingDTable(void* dst, nuint maxDstSize, void* cSrc, nuint cSrcSize, uint* DTable, int flags) + private static nuint HUF_decompress1X_usingDTable( + void* dst, + nuint maxDstSize, + void* cSrc, + nuint cSrcSize, + uint* DTable, + int flags + ) { DTableDesc dtd = HUF_getDTableDesc(DTable); - return dtd.tableType != 0 ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags) : HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags); + return dtd.tableType != 0 + ? HUF_decompress1X2_usingDTable_internal( + dst, + maxDstSize, + cSrc, + cSrcSize, + DTable, + flags + ) + : HUF_decompress1X1_usingDTable_internal( + dst, + maxDstSize, + cSrc, + cSrcSize, + DTable, + flags + ); } - private static nuint HUF_decompress1X1_DCtx_wksp(uint* dctx, void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, void* workSpace, nuint wkspSize, int flags) + private static nuint HUF_decompress1X1_DCtx_wksp( + uint* dctx, + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + void* workSpace, + nuint wkspSize, + int flags + ) { byte* ip = (byte*)cSrc; nuint hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize, flags); @@ -2020,13 +2502,45 @@ private static nuint HUF_decompress1X1_DCtx_wksp(uint* dctx, void* dst, nuint ds return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags); } - private static nuint HUF_decompress4X_usingDTable(void* dst, nuint maxDstSize, void* cSrc, nuint cSrcSize, uint* DTable, int flags) + private static nuint HUF_decompress4X_usingDTable( + void* dst, + nuint maxDstSize, + void* cSrc, + nuint cSrcSize, + uint* DTable, + int flags + ) { DTableDesc dtd = HUF_getDTableDesc(DTable); - return dtd.tableType != 0 ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags) : HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags); + return dtd.tableType != 0 + ? HUF_decompress4X2_usingDTable_internal( + dst, + maxDstSize, + cSrc, + cSrcSize, + DTable, + flags + ) + : HUF_decompress4X1_usingDTable_internal( + dst, + maxDstSize, + cSrc, + cSrcSize, + DTable, + flags + ); } - private static nuint HUF_decompress4X_hufOnly_wksp(uint* dctx, void* dst, nuint dstSize, void* cSrc, nuint cSrcSize, void* workSpace, nuint wkspSize, int flags) + private static nuint HUF_decompress4X_hufOnly_wksp( + uint* dctx, + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + void* workSpace, + nuint wkspSize, + int flags + ) { if (dstSize == 0) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); @@ -2034,7 +2548,27 @@ private static nuint HUF_decompress4X_hufOnly_wksp(uint* dctx, void* dst, nuint return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); { uint algoNb = HUF_selectDecoder(dstSize, cSrcSize); - return algoNb != 0 ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags) : HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags); + return algoNb != 0 + ? HUF_decompress4X2_DCtx_wksp( + dctx, + dst, + dstSize, + cSrc, + cSrcSize, + workSpace, + wkspSize, + flags + ) + : HUF_decompress4X1_DCtx_wksp( + dctx, + dst, + dstSize, + cSrc, + cSrcSize, + workSpace, + wkspSize, + flags + ); } } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Mem.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Mem.cs index 1f918fff1..284a043a3 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Mem.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Mem.cs @@ -41,10 +41,12 @@ can sometimes prove slower */ private static nuint MEM_readST(void* memPtr) => BclUnsafe.ReadUnaligned(memPtr); [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void MEM_write16(void* memPtr, ushort value) => BclUnsafe.WriteUnaligned(memPtr, value); + private static void MEM_write16(void* memPtr, ushort value) => + BclUnsafe.WriteUnaligned(memPtr, value); [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void MEM_write64(void* memPtr, ulong value) => BclUnsafe.WriteUnaligned(memPtr, value); + private static void MEM_write64(void* memPtr, ulong value) => + BclUnsafe.WriteUnaligned(memPtr, value); /*=== Little endian r/w ===*/ [MethodImpl(MethodImplOptions.AggressiveInlining)] @@ -125,8 +127,8 @@ private static void MEM_writeLE64(void* memPtr, ulong val64) [MethodImpl(MethodImplOptions.AggressiveInlining)] private static nuint ReverseEndiannessNative(nuint val) => MEM_32bits - ? BinaryPrimitives.ReverseEndianness((uint) val) - : (nuint) BinaryPrimitives.ReverseEndianness(val); + ? BinaryPrimitives.ReverseEndianness((uint)val) + : (nuint)BinaryPrimitives.ReverseEndianness(val); #endif [MethodImpl(MethodImplOptions.AggressiveInlining)] diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/RSyncState_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/RSyncState_t.cs index d2065d07d..e82e241ac 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/RSyncState_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/RSyncState_t.cs @@ -6,4 +6,4 @@ public struct RSyncState_t public ulong hitMask; public ulong primePower; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Range.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Range.cs index bc0c8d34b..3829e85b9 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Range.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Range.cs @@ -5,10 +5,11 @@ public unsafe struct Range { public void* start; public nuint size; + public Range(void* start, nuint size) { this.start = start; this.size = size; } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/RawSeqStore_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/RawSeqStore_t.cs index 0e2334cd2..15b5b2ca2 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/RawSeqStore_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/RawSeqStore_t.cs @@ -4,16 +4,27 @@ public unsafe struct RawSeqStore_t { /* The start of the sequences */ public rawSeq* seq; + /* The index in seq where reading stopped. pos <= size. */ public nuint pos; + /* The position within the sequence at seq[pos] where reading stopped. posInSequence <= seq[pos].litLength + seq[pos].matchLength */ public nuint posInSequence; + /* The number of sequences. <= capacity. */ public nuint size; + /* The capacity starting from `seq` pointer */ public nuint capacity; - public RawSeqStore_t(rawSeq* seq, nuint pos, nuint posInSequence, nuint size, nuint capacity) + + public RawSeqStore_t( + rawSeq* seq, + nuint pos, + nuint posInSequence, + nuint size, + nuint capacity + ) { this.seq = seq; this.pos = pos; @@ -22,4 +33,4 @@ public RawSeqStore_t(rawSeq* seq, nuint pos, nuint posInSequence, nuint size, nu this.capacity = capacity; } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/RoundBuff_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/RoundBuff_t.cs index 22631e10f..90db6546f 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/RoundBuff_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/RoundBuff_t.cs @@ -8,14 +8,17 @@ public unsafe struct RoundBuff_t * sure it doesn't overlap with any pieces still in use. */ public byte* buffer; + /* The capacity of buffer. */ public nuint capacity; + /* The position of the current inBuff in the round * buffer. Updated past the end if the inBuff once * the inBuff is sent to the worker thread. * pos <= capacity. */ public nuint pos; + public RoundBuff_t(byte* buffer, nuint capacity, nuint pos) { this.buffer = buffer; @@ -23,4 +26,4 @@ public RoundBuff_t(byte* buffer, nuint capacity, nuint pos) this.pos = pos; } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqCollector.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqCollector.cs index 883517c93..b39d020f7 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqCollector.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqCollector.cs @@ -7,4 +7,4 @@ public unsafe struct SeqCollector public nuint seqIndex; public nuint maxSequences; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqDef_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqDef_s.cs index 660f38661..20b849dc0 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqDef_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqDef_s.cs @@ -8,7 +8,8 @@ public struct SeqDef_s /* offBase == Offset + ZSTD_REP_NUM, or repcode 1,2,3 */ public uint offBase; public ushort litLength; + /* mlBase == matchLength - MINMATCH */ public ushort mlBase; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqStore_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqStore_t.cs index 4b661a552..d4d5cb973 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqStore_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqStore_t.cs @@ -3,9 +3,11 @@ namespace ZstdSharp.Unsafe public unsafe struct SeqStore_t { public SeqDef_s* sequencesStart; + /* ptr to end of sequences */ public SeqDef_s* sequences; public byte* litStart; + /* ptr to end of literals */ public byte* lit; public byte* llCode; @@ -13,12 +15,14 @@ public unsafe struct SeqStore_t public byte* ofCode; public nuint maxNbSeq; public nuint maxNbLit; + /* longLengthPos and longLengthType to allow us to represent either a single litLength or matchLength * in the seqStore that has a value larger than U16 (if it exists). To do so, we increment * the existing value of the litLength or matchLength by 0x10000. */ public ZSTD_longLengthType_e longLengthType; + /* Index of the sequence to apply long length modification to */ public uint longLengthPos; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/SerialState.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/SerialState.cs index 4b39c6c40..c88b175ca 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/SerialState.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/SerialState.cs @@ -9,13 +9,16 @@ public unsafe struct SerialState public ldmState_t ldmState; public XXH64_state_s xxhState; public uint nextJobID; + /* Protects ldmWindow. * Must be acquired after the main mutex when acquiring both. */ public void* ldmWindowMutex; + /* Signaled when ldmWindow is updated */ public void* ldmWindowCond; + /* A thread-safe copy of ldmState.window */ public ZSTD_window_t ldmWindow; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/SymbolEncodingType_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/SymbolEncodingType_e.cs index 7763eaf89..3fc6e04c9 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/SymbolEncodingType_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/SymbolEncodingType_e.cs @@ -5,6 +5,6 @@ public enum SymbolEncodingType_e set_basic, set_rle, set_compressed, - set_repeat + set_repeat, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/SyncPoint.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/SyncPoint.cs index bf93adfcc..db9ada1dc 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/SyncPoint.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/SyncPoint.cs @@ -4,7 +4,8 @@ public struct SyncPoint { /* The number of bytes to load from the input. */ public nuint toLoad; + /* Boolean declaring if we must flush because we found a synchronization point. */ public int flush; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH32_canonical_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH32_canonical_t.cs index fbb3572f8..c636b30f5 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH32_canonical_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH32_canonical_t.cs @@ -8,4 +8,4 @@ public unsafe struct XXH32_canonical_t /*!< Hash bytes, big endian */ public fixed byte digest[4]; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH32_state_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH32_state_s.cs index 01c6229e3..413ae77f4 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH32_state_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH32_state_s.cs @@ -16,15 +16,20 @@ public unsafe struct XXH32_state_s { /*!< Total length hashed, modulo 2^32 */ public uint total_len_32; + /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */ public uint large_len; + /*!< Accumulator lanes */ public fixed uint v[4]; + /*!< Internal buffer for partial reads. Treated as unsigned char[16]. */ public fixed uint mem32[4]; + /*!< Amount of data in @ref mem32 */ public uint memsize; + /*!< Reserved field. Do not read nor write to it. */ public uint reserved; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH64_canonical_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH64_canonical_t.cs index fc5ffb469..1f18d31ab 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH64_canonical_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH64_canonical_t.cs @@ -7,4 +7,4 @@ public unsafe struct XXH64_canonical_t { public fixed byte digest[8]; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH64_state_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH64_state_s.cs index 3ba3dd4a9..448af8cec 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH64_state_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH64_state_s.cs @@ -16,15 +16,20 @@ public unsafe struct XXH64_state_s { /*!< Total length hashed. This is always 64-bit. */ public ulong total_len; + /*!< Accumulator lanes */ public fixed ulong v[4]; + /*!< Internal buffer for partial reads. Treated as unsigned char[32]. */ public fixed ulong mem64[4]; + /*!< Amount of data in @ref mem64 */ public uint memsize; + /*!< Reserved field, needed for padding anyways*/ public uint reserved32; + /*!< Reserved field. Do not read or write to it. */ public ulong reserved64; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH_alignment.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH_alignment.cs index e3b721519..1a14c9178 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH_alignment.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH_alignment.cs @@ -8,7 +8,8 @@ public enum XXH_alignment { /*!< Aligned */ XXH_aligned, + /*!< Possibly unaligned */ - XXH_unaligned + XXH_unaligned, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH_errorcode.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH_errorcode.cs index bd3568df0..f88ee6364 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH_errorcode.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH_errorcode.cs @@ -7,7 +7,8 @@ public enum XXH_errorcode { /*!< OK */ XXH_OK = 0, + /*!< Error */ - XXH_ERROR + XXH_ERROR, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Xxhash.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Xxhash.cs index 081fca15a..ee25063c6 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Xxhash.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Xxhash.cs @@ -1,8 +1,8 @@ -using static ZstdSharp.UnsafeHelper; using System; using System.Buffers.Binary; using System.Numerics; using System.Runtime.CompilerServices; +using static ZstdSharp.UnsafeHelper; namespace ZstdSharp.Unsafe { @@ -39,13 +39,17 @@ private static void XXH_memcpy(void* dest, void* src, nuint size) [MethodImpl(MethodImplOptions.AggressiveInlining)] private static uint XXH_readLE32(void* ptr) { - return BitConverter.IsLittleEndian ? MEM_read32(ptr) : BinaryPrimitives.ReverseEndianness(MEM_read32(ptr)); + return BitConverter.IsLittleEndian + ? MEM_read32(ptr) + : BinaryPrimitives.ReverseEndianness(MEM_read32(ptr)); } [MethodImpl(MethodImplOptions.AggressiveInlining)] private static uint XXH_readBE32(void* ptr) { - return BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(MEM_read32(ptr)) : MEM_read32(ptr); + return BitConverter.IsLittleEndian + ? BinaryPrimitives.ReverseEndianness(MEM_read32(ptr)) + : MEM_read32(ptr); } private static uint XXH_readLE32_align(void* ptr, XXH_alignment align) @@ -56,7 +60,9 @@ private static uint XXH_readLE32_align(void* ptr, XXH_alignment align) } else { - return BitConverter.IsLittleEndian ? *(uint*)ptr : BinaryPrimitives.ReverseEndianness(*(uint*)ptr); + return BitConverter.IsLittleEndian + ? *(uint*)ptr + : BinaryPrimitives.ReverseEndianness(*(uint*)ptr); } } @@ -158,7 +164,12 @@ private static uint XXH32_finalize(uint hash, byte* ptr, nuint len, XXH_alignmen * @param align Whether @p input is aligned. * @return The calculated hash. */ - private static uint XXH32_endian_align(byte* input, nuint len, uint seed, XXH_alignment align) + private static uint XXH32_endian_align( + byte* input, + nuint len, + uint seed, + XXH_alignment align + ) { uint h32; if (len >= 16) @@ -179,9 +190,12 @@ private static uint XXH32_endian_align(byte* input, nuint len, uint seed, XXH_al input += 4; v4 = XXH32_round(v4, XXH_readLE32_align(input, align)); input += 4; - } - while (input < limit); - h32 = BitOperations.RotateLeft(v1, 1) + BitOperations.RotateLeft(v2, 7) + BitOperations.RotateLeft(v3, 12) + BitOperations.RotateLeft(v4, 18); + } while (input < limit); + h32 = + BitOperations.RotateLeft(v1, 1) + + BitOperations.RotateLeft(v2, 7) + + BitOperations.RotateLeft(v3, 12) + + BitOperations.RotateLeft(v4, 18); } else { @@ -279,8 +293,7 @@ private static XXH_errorcode ZSTD_XXH32_update(XXH32_state_s* state, void* input p += 4; state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p)); p += 4; - } - while (p <= limit); + } while (p <= limit); } if (p < bEnd) @@ -299,7 +312,11 @@ private static uint ZSTD_XXH32_digest(XXH32_state_s* state) uint h32; if (state->large_len != 0) { - h32 = BitOperations.RotateLeft(state->v[0], 1) + BitOperations.RotateLeft(state->v[1], 7) + BitOperations.RotateLeft(state->v[2], 12) + BitOperations.RotateLeft(state->v[3], 18); + h32 = + BitOperations.RotateLeft(state->v[0], 1) + + BitOperations.RotateLeft(state->v[1], 7) + + BitOperations.RotateLeft(state->v[2], 12) + + BitOperations.RotateLeft(state->v[3], 18); } else { @@ -307,7 +324,12 @@ private static uint ZSTD_XXH32_digest(XXH32_state_s* state) } h32 += state->total_len_32; - return XXH32_finalize(h32, (byte*)state->mem32, state->memsize, XXH_alignment.XXH_aligned); + return XXH32_finalize( + h32, + (byte*)state->mem32, + state->memsize, + XXH_alignment.XXH_aligned + ); } /*! @ingroup XXH32_family */ @@ -328,13 +350,17 @@ private static uint ZSTD_XXH32_hashFromCanonical(XXH32_canonical_t* src) [MethodImpl(MethodImplOptions.AggressiveInlining)] private static ulong XXH_readLE64(void* ptr) { - return BitConverter.IsLittleEndian ? MEM_read64(ptr) : BinaryPrimitives.ReverseEndianness(MEM_read64(ptr)); + return BitConverter.IsLittleEndian + ? MEM_read64(ptr) + : BinaryPrimitives.ReverseEndianness(MEM_read64(ptr)); } [MethodImpl(MethodImplOptions.AggressiveInlining)] private static ulong XXH_readBE64(void* ptr) { - return BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(MEM_read64(ptr)) : MEM_read64(ptr); + return BitConverter.IsLittleEndian + ? BinaryPrimitives.ReverseEndianness(MEM_read64(ptr)) + : MEM_read64(ptr); } private static ulong XXH_readLE64_align(void* ptr, XXH_alignment align) @@ -342,7 +368,9 @@ private static ulong XXH_readLE64_align(void* ptr, XXH_alignment align) if (align == XXH_alignment.XXH_unaligned) return XXH_readLE64(ptr); else - return BitConverter.IsLittleEndian ? *(ulong*)ptr : BinaryPrimitives.ReverseEndianness(*(ulong*)ptr); + return BitConverter.IsLittleEndian + ? *(ulong*)ptr + : BinaryPrimitives.ReverseEndianness(*(ulong*)ptr); } /*! @copydoc XXH32_round */ @@ -398,7 +426,9 @@ private static ulong XXH64_finalize(ulong hash, byte* ptr, nuint len, XXH_alignm ulong k1 = XXH64_round(0, XXH_readLE64_align(ptr, align)); ptr += 8; hash ^= k1; - hash = BitOperations.RotateLeft(hash, 27) * 0x9E3779B185EBCA87UL + 0x85EBCA77C2B2AE63UL; + hash = + BitOperations.RotateLeft(hash, 27) * 0x9E3779B185EBCA87UL + + 0x85EBCA77C2B2AE63UL; len -= 8; } @@ -406,7 +436,9 @@ private static ulong XXH64_finalize(ulong hash, byte* ptr, nuint len, XXH_alignm { hash ^= XXH_readLE32_align(ptr, align) * 0x9E3779B185EBCA87UL; ptr += 4; - hash = BitOperations.RotateLeft(hash, 23) * 0xC2B2AE3D27D4EB4FUL + 0x165667B19E3779F9UL; + hash = + BitOperations.RotateLeft(hash, 23) * 0xC2B2AE3D27D4EB4FUL + + 0x165667B19E3779F9UL; len -= 4; } @@ -428,7 +460,12 @@ private static ulong XXH64_finalize(ulong hash, byte* ptr, nuint len, XXH_alignm * @param align Whether @p input is aligned. * @return The calculated hash. */ - private static ulong XXH64_endian_align(byte* input, nuint len, ulong seed, XXH_alignment align) + private static ulong XXH64_endian_align( + byte* input, + nuint len, + ulong seed, + XXH_alignment align + ) { ulong h64; if (len >= 32) @@ -449,9 +486,12 @@ private static ulong XXH64_endian_align(byte* input, nuint len, ulong seed, XXH_ input += 8; v4 = XXH64_round(v4, XXH_readLE64_align(input, align)); input += 8; - } - while (input < limit); - h64 = BitOperations.RotateLeft(v1, 1) + BitOperations.RotateLeft(v2, 7) + BitOperations.RotateLeft(v3, 12) + BitOperations.RotateLeft(v4, 18); + } while (input < limit); + h64 = + BitOperations.RotateLeft(v1, 1) + + BitOperations.RotateLeft(v2, 7) + + BitOperations.RotateLeft(v3, 12) + + BitOperations.RotateLeft(v4, 18); h64 = XXH64_mergeRound(h64, v1); h64 = XXH64_mergeRound(h64, v2); h64 = XXH64_mergeRound(h64, v3); @@ -545,8 +585,7 @@ private static XXH_errorcode ZSTD_XXH64_update(XXH64_state_s* state, void* input p += 8; state->v[3] = XXH64_round(state->v[3], XXH_readLE64(p)); p += 8; - } - while (p <= limit); + } while (p <= limit); } if (p < bEnd) @@ -565,7 +604,11 @@ private static ulong ZSTD_XXH64_digest(XXH64_state_s* state) ulong h64; if (state->total_len >= 32) { - h64 = BitOperations.RotateLeft(state->v[0], 1) + BitOperations.RotateLeft(state->v[1], 7) + BitOperations.RotateLeft(state->v[2], 12) + BitOperations.RotateLeft(state->v[3], 18); + h64 = + BitOperations.RotateLeft(state->v[0], 1) + + BitOperations.RotateLeft(state->v[1], 7) + + BitOperations.RotateLeft(state->v[2], 12) + + BitOperations.RotateLeft(state->v[3], 18); h64 = XXH64_mergeRound(h64, state->v[0]); h64 = XXH64_mergeRound(h64, state->v[1]); h64 = XXH64_mergeRound(h64, state->v[2]); @@ -577,7 +620,12 @@ private static ulong ZSTD_XXH64_digest(XXH64_state_s* state) } h64 += state->total_len; - return XXH64_finalize(h64, (byte*)state->mem64, (nuint)state->total_len, XXH_alignment.XXH_aligned); + return XXH64_finalize( + h64, + (byte*)state->mem64, + (nuint)state->total_len, + XXH_alignment.XXH_aligned + ); } /*! @ingroup XXH64_family */ diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_cover_params_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_cover_params_t.cs index 2b27c1a10..9913870c6 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_cover_params_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_cover_params_t.cs @@ -8,18 +8,24 @@ public struct ZDICT_cover_params_t { /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */ public uint k; + /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */ public uint d; + /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */ public uint steps; + /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */ public uint nbThreads; + /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (1.0), 1.0 when all samples are used for both training and testing */ public double splitPoint; + /* Train dictionaries to shrink in size starting from the minimum size and selects the smallest dictionary that is shrinkDictMaxRegression% worse than the largest dictionary. 0 means no shrinking and 1 means shrinking */ public uint shrinkDict; + /* Sets shrinkDictMaxRegression so that a smaller dictionary can be at worse shrinkDictMaxRegression% worse than the max dict size dictionary. */ public uint shrinkDictMaxRegression; public ZDICT_params_t zParams; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_fastCover_params_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_fastCover_params_t.cs index eea3bb332..8fa765d8f 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_fastCover_params_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_fastCover_params_t.cs @@ -4,22 +4,30 @@ public struct ZDICT_fastCover_params_t { /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */ public uint k; + /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */ public uint d; + /* log of size of frequency array : constraint: 0 < f <= 31 : 1 means default(20)*/ public uint f; + /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */ public uint steps; + /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */ public uint nbThreads; + /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (0.75), 1.0 when all samples are used for both training and testing */ public double splitPoint; + /* Acceleration level: constraint: 0 < accel <= 10, higher means faster and less accurate, 0 means default(1) */ public uint accel; + /* Train dictionaries to shrink in size starting from the minimum size and selects the smallest dictionary that is shrinkDictMaxRegression% worse than the largest dictionary. 0 means no shrinking and 1 means shrinking */ public uint shrinkDict; + /* Sets shrinkDictMaxRegression so that a smaller dictionary can be at worse shrinkDictMaxRegression% worse than the max dict size dictionary. */ public uint shrinkDictMaxRegression; public ZDICT_params_t zParams; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_legacy_params_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_legacy_params_t.cs index 466cf2bc2..001db2b90 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_legacy_params_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_legacy_params_t.cs @@ -6,4 +6,4 @@ public struct ZDICT_legacy_params_t public uint selectivityLevel; public ZDICT_params_t zParams; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_params_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_params_t.cs index 28668ac51..6609d6289 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_params_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_params_t.cs @@ -4,8 +4,10 @@ public struct ZDICT_params_t { /**< optimize for a specific zstd compression level; 0 means default */ public int compressionLevel; + /**< Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */ public uint notificationLevel; + /**< force dictID value; 0 means auto mode (32-bits random value) * NOTE: The zstd format reserves some dictionary IDs for future use. * You may use them in private settings, but be warned that they @@ -16,4 +18,4 @@ public struct ZDICT_params_t */ public uint dictID; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_CCtxPool.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_CCtxPool.cs index 691635a37..3694a0d78 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_CCtxPool.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_CCtxPool.cs @@ -10,4 +10,4 @@ public unsafe struct ZSTDMT_CCtxPool public ZSTD_customMem cMem; public ZSTD_CCtx_s** cctxs; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_CCtx_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_CCtx_s.cs index 7185cb5a1..3daf397fe 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_CCtx_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_CCtx_s.cs @@ -10,6 +10,7 @@ public unsafe struct ZSTDMT_CCtx_s public ZSTD_CCtx_params_s @params; public nuint targetSectionSize; public nuint targetPrefixSize; + /* 1 => one job is already prepared, but pool has shortage of workers. Don't create a new job. */ public int jobReady; public InBuff_t inBuff; @@ -29,4 +30,4 @@ public unsafe struct ZSTDMT_CCtx_s public ZSTD_CDict_s* cdict; public uint providedFactory; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_bufferPool_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_bufferPool_s.cs index a5333bfe1..79d0aceb0 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_bufferPool_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_bufferPool_s.cs @@ -9,4 +9,4 @@ public unsafe struct ZSTDMT_bufferPool_s public ZSTD_customMem cMem; public buffer_s* buffers; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_jobDescription.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_jobDescription.cs index b0fb7a6e7..f138a4e90 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_jobDescription.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_jobDescription.cs @@ -4,41 +4,59 @@ public unsafe struct ZSTDMT_jobDescription { /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx */ public nuint consumed; + /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx, then set0 by mtctx */ public nuint cSize; + /* Thread-safe - used by mtctx and worker */ public void* job_mutex; + /* Thread-safe - used by mtctx and worker */ public void* job_cond; + /* Thread-safe - used by mtctx and (all) workers */ public ZSTDMT_CCtxPool* cctxPool; + /* Thread-safe - used by mtctx and (all) workers */ public ZSTDMT_bufferPool_s* bufPool; + /* Thread-safe - used by mtctx and (all) workers */ public ZSTDMT_bufferPool_s* seqPool; + /* Thread-safe - used by mtctx and (all) workers */ public SerialState* serial; + /* set by worker (or mtctx), then read by worker & mtctx, then modified by mtctx => no barrier */ public buffer_s dstBuff; + /* set by mtctx, then read by worker & mtctx => no barrier */ public Range prefix; + /* set by mtctx, then read by worker & mtctx => no barrier */ public Range src; + /* set by mtctx, then read by worker => no barrier */ public uint jobID; + /* set by mtctx, then read by worker => no barrier */ public uint firstJob; + /* set by mtctx, then read by worker => no barrier */ public uint lastJob; + /* set by mtctx, then read by worker => no barrier */ public ZSTD_CCtx_params_s @params; + /* set by mtctx, then read by worker => no barrier */ public ZSTD_CDict_s* cdict; + /* set by mtctx, then read by worker => no barrier */ public ulong fullFrameSize; + /* used only by mtctx */ public nuint dstFlushed; + /* used only by mtctx */ public uint frameChecksumNeeded; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BlockCompressor_f.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BlockCompressor_f.cs index 9d9a7e3b2..f65e81c70 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BlockCompressor_f.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BlockCompressor_f.cs @@ -3,5 +3,11 @@ namespace ZstdSharp.Unsafe { [UnmanagedFunctionPointer(CallingConvention.Cdecl)] - public unsafe delegate nuint ZSTD_BlockCompressor_f(ZSTD_MatchState_t* bs, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize); -} \ No newline at end of file + public unsafe delegate nuint ZSTD_BlockCompressor_f( + ZSTD_MatchState_t* bs, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ); +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BuildCTableWksp.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BuildCTableWksp.cs index 98631bfa2..1020f6958 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BuildCTableWksp.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BuildCTableWksp.cs @@ -5,4 +5,4 @@ public unsafe struct ZSTD_BuildCTableWksp public fixed short norm[53]; public fixed uint wksp[285]; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BuildSeqStore_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BuildSeqStore_e.cs index 6e575561a..3f0b7d177 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BuildSeqStore_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BuildSeqStore_e.cs @@ -3,6 +3,6 @@ namespace ZstdSharp.Unsafe public enum ZSTD_BuildSeqStore_e { ZSTDbss_compress, - ZSTDbss_noCompress + ZSTDbss_noCompress, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_CCtx_params_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_CCtx_params_s.cs index d29f527d2..8281cc693 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_CCtx_params_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_CCtx_params_s.cs @@ -6,34 +6,43 @@ public unsafe struct ZSTD_CCtx_params_s public ZSTD_compressionParameters cParams; public ZSTD_frameParameters fParams; public int compressionLevel; + /* force back-references to respect limit of * 1< dFast. * Special: value 0 means "use default hashLog". */ ZSTD_c_hashLog = 102, + /* Size of the multi-probe search table, as a power of 2. * Resulting memory usage is (1 << (chainLog+2)). * Must be clamped between ZSTD_CHAINLOG_MIN and ZSTD_CHAINLOG_MAX. @@ -38,11 +41,13 @@ public enum ZSTD_cParameter * in which case it defines a secondary probe table. * Special: value 0 means "use default chainLog". */ ZSTD_c_chainLog = 103, + /* Number of search attempts, as a power of 2. * More attempts result in better and slower compression. * This parameter is useless for "fast" and "dFast" strategies. * Special: value 0 means "use default searchLog". */ ZSTD_c_searchLog = 104, + /* Minimum size of searched matches. * Note that Zstandard can still find matches of smaller size, * it just tweaks its search algorithm to look for this size and larger. @@ -52,6 +57,7 @@ public enum ZSTD_cParameter * , for all strategies > fast, effective maximum is 6. * Special: value 0 means "use default minMatchLength". */ ZSTD_c_minMatch = 105, + /* Impact of this field depends on strategy. * For strategies btopt, btultra & btultra2: * Length of Match considered "good enough" to stop search. @@ -61,11 +67,13 @@ public enum ZSTD_cParameter * Larger values make compression faster, and weaker. * Special: value 0 means "use default targetLength". */ ZSTD_c_targetLength = 106, + /* See ZSTD_strategy enum definition. * The higher the value of selected strategy, the more complex it is, * resulting in stronger and slower compression. * Special: value 0 means "use default strategy". */ ZSTD_c_strategy = 107, + /* v1.5.6+ * Attempts to fit compressed block size into approximately targetCBlockSize. * Bound by ZSTD_TARGETCBLOCKSIZE_MIN and ZSTD_TARGETCBLOCKSIZE_MAX. @@ -79,6 +87,7 @@ public enum ZSTD_cParameter * due to massive performance regressions. */ ZSTD_c_targetCBlockSize = 130, + /* Enable long distance matching. * This parameter is designed to improve compression ratio * for large inputs, by finding large matches at long distance. @@ -88,6 +97,7 @@ public enum ZSTD_cParameter * Note: will be enabled by default if ZSTD_c_windowLog >= 128 MB and * compression strategy >= ZSTD_btopt (== compression level 16+) */ ZSTD_c_enableLongDistanceMatching = 160, + /* Size of the table for long distance matching, as a power of 2. * Larger values increase memory usage and compression ratio, * but decrease compression speed. @@ -95,16 +105,19 @@ public enum ZSTD_cParameter * default: windowlog - 7. * Special: value 0 means "automatically determine hashlog". */ ZSTD_c_ldmHashLog = 161, + /* Minimum match size for long distance matcher. * Larger/too small values usually decrease compression ratio. * Must be clamped between ZSTD_LDM_MINMATCH_MIN and ZSTD_LDM_MINMATCH_MAX. * Special: value 0 means "use default value" (default: 64). */ ZSTD_c_ldmMinMatch = 162, + /* Log size of each bucket in the LDM hash table for collision resolution. * Larger values improve collision resolution but decrease compression speed. * The maximum value is ZSTD_LDM_BUCKETSIZELOG_MAX. * Special: value 0 means "use default value" (default: 3). */ ZSTD_c_ldmBucketSizeLog = 163, + /* Frequency of inserting/looking up entries into the LDM hash table. * Must be clamped between 0 and (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN). * Default is MAX(0, (windowLog - ldmHashLog)), optimizing hash table usage. @@ -112,15 +125,19 @@ public enum ZSTD_cParameter * Deviating far from default value will likely result in a compression ratio decrease. * Special: value 0 means "automatically determine hashRateLog". */ ZSTD_c_ldmHashRateLog = 164, + /* Content size will be written into frame header _whenever known_ (default:1) * Content size must be known at the beginning of compression. * This is automatically the case when using ZSTD_compress2(), * For streaming scenarios, content size must be provided with ZSTD_CCtx_setPledgedSrcSize() */ ZSTD_c_contentSizeFlag = 200, + /* A 32-bits checksum of content is written at end of frame (default:0) */ ZSTD_c_checksumFlag = 201, + /* When applicable, dictionary's ID is written into frame header (default:1) */ ZSTD_c_dictIDFlag = 202, + /* Select how many threads will be spawned to compress in parallel. * When nbWorkers >= 1, triggers asynchronous mode when invoking ZSTD_compressStream*() : * ZSTD_compressStream*() consumes input and flush output if possible, but immediately gives back control to caller, @@ -131,12 +148,14 @@ public enum ZSTD_cParameter * Default value is `0`, aka "single-threaded mode" : no worker is spawned, * compression is performed inside Caller's thread, and all invocations are blocking */ ZSTD_c_nbWorkers = 400, + /* Size of a compression job. This value is enforced only when nbWorkers >= 1. * Each compression job is completed in parallel, so this value can indirectly impact the nb of active threads. * 0 means default, which is dynamically determined based on compression parameters. * Job size must be a minimum of overlap size, or ZSTDMT_JOBSIZE_MIN (= 512 KB), whichever is largest. * The minimum size is automatically and transparently enforced. */ ZSTD_c_jobSize = 401, + /* Control the overlap size, as a fraction of window size. * The overlap size is an amount of data reloaded from previous job at the beginning of a new job. * It helps preserve compression ratio, while each job is compressed in parallel. @@ -150,6 +169,7 @@ public enum ZSTD_cParameter * 9: full window; 8: w/2; 7: w/4; 6: w/8; 5:w/16; 4: w/32; 3:w/64; 2:w/128; 1:no overlap; 0:default * default value varies between 6 and 9, depending on strategy */ ZSTD_c_overlapLog = 402, + /* note : additional experimental parameters are also available * within the experimental section of the API. * At the time of this writing, they include : @@ -179,6 +199,7 @@ public enum ZSTD_cParameter ZSTD_c_experimentalParam3 = 1000, ZSTD_c_experimentalParam4 = 1001, ZSTD_c_experimentalParam5 = 1002, + /* was ZSTD_c_experimentalParam6=1003; is now ZSTD_c_targetCBlockSize */ ZSTD_c_experimentalParam7 = 1004, ZSTD_c_experimentalParam8 = 1005, @@ -193,6 +214,6 @@ public enum ZSTD_cParameter ZSTD_c_experimentalParam17 = 1014, ZSTD_c_experimentalParam18 = 1015, ZSTD_c_experimentalParam19 = 1016, - ZSTD_c_experimentalParam20 = 1017 + ZSTD_c_experimentalParam20 = 1017, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cStreamStage.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cStreamStage.cs index 770eaa918..443520a98 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cStreamStage.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cStreamStage.cs @@ -4,6 +4,6 @@ public enum ZSTD_cStreamStage { zcss_init = 0, zcss_load, - zcss_flush + zcss_flush, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compResetPolicy_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compResetPolicy_e.cs index 0f474bc05..95c055d20 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compResetPolicy_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compResetPolicy_e.cs @@ -10,6 +10,6 @@ namespace ZstdSharp.Unsafe public enum ZSTD_compResetPolicy_e { ZSTDcrp_makeClean, - ZSTDcrp_leaveDirty + ZSTDcrp_leaveDirty, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressedBlockState_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressedBlockState_t.cs index 24a9e6f8d..ab5dbe726 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressedBlockState_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressedBlockState_t.cs @@ -5,4 +5,4 @@ public unsafe struct ZSTD_compressedBlockState_t public ZSTD_entropyCTables_t entropy; public fixed uint rep[3]; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressionParameters.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressionParameters.cs index f27b7cf8f..0fe0b5c53 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressionParameters.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressionParameters.cs @@ -4,19 +4,34 @@ public struct ZSTD_compressionParameters { /**< largest match distance : larger == more compression, more memory needed during decompression */ public uint windowLog; + /**< fully searched segment : larger == more compression, slower, more memory (useless for fast) */ public uint chainLog; + /**< dispatch table : larger == faster, more memory */ public uint hashLog; + /**< nb of searches : larger == more compression, slower */ public uint searchLog; + /**< match length searched : larger == faster decompression, sometimes less compression */ public uint minMatch; + /**< acceptable match size for optimal parser (only) : larger == more compression, slower */ public uint targetLength; + /**< see ZSTD_strategy definition above */ public ZSTD_strategy strategy; - public ZSTD_compressionParameters(uint windowLog, uint chainLog, uint hashLog, uint searchLog, uint minMatch, uint targetLength, ZSTD_strategy strategy) + + public ZSTD_compressionParameters( + uint windowLog, + uint chainLog, + uint hashLog, + uint searchLog, + uint minMatch, + uint targetLength, + ZSTD_strategy strategy + ) { this.windowLog = windowLog; this.chainLog = chainLog; @@ -27,4 +42,4 @@ public ZSTD_compressionParameters(uint windowLog, uint chainLog, uint hashLog, u this.strategy = strategy; } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressionStage_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressionStage_e.cs index 79447b487..a6c0f7ec8 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressionStage_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressionStage_e.cs @@ -8,6 +8,6 @@ public enum ZSTD_compressionStage_e ZSTDcs_created = 0, ZSTDcs_init, ZSTDcs_ongoing, - ZSTDcs_ending + ZSTDcs_ending, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_customMem.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_customMem.cs index e34bfd7ba..311ac0a36 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_customMem.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_customMem.cs @@ -5,6 +5,7 @@ public unsafe struct ZSTD_customMem public void* customAlloc; public void* customFree; public void* opaque; + public ZSTD_customMem(void* customAlloc, void* customFree, void* opaque) { this.customAlloc = customAlloc; @@ -12,4 +13,4 @@ public ZSTD_customMem(void* customAlloc, void* customFree, void* opaque) this.opaque = opaque; } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp.cs index d60a74682..0e9e7ae2b 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp.cs @@ -108,4 +108,4 @@ public unsafe struct ZSTD_cwksp public ZSTD_cwksp_alloc_phase_e phase; public ZSTD_cwksp_static_alloc_e isStatic; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp_alloc_phase_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp_alloc_phase_e.cs index 86136383a..f3f266f86 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp_alloc_phase_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp_alloc_phase_e.cs @@ -8,6 +8,6 @@ public enum ZSTD_cwksp_alloc_phase_e ZSTD_cwksp_alloc_objects, ZSTD_cwksp_alloc_aligned_init_once, ZSTD_cwksp_alloc_aligned, - ZSTD_cwksp_alloc_buffers + ZSTD_cwksp_alloc_buffers, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp_static_alloc_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp_static_alloc_e.cs index 08d03b7e7..25e530df5 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp_static_alloc_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp_static_alloc_e.cs @@ -8,6 +8,6 @@ namespace ZstdSharp.Unsafe public enum ZSTD_cwksp_static_alloc_e { ZSTD_cwksp_dynamic_alloc, - ZSTD_cwksp_static_alloc + ZSTD_cwksp_static_alloc, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dParameter.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dParameter.cs index ef9d206a8..200961439 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dParameter.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dParameter.cs @@ -16,6 +16,7 @@ public enum ZSTD_dParameter * By default, a decompression context accepts window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT). * Special: value 0 means "use default maximum windowLog". */ ZSTD_d_windowLogMax = 100, + /* note : additional experimental parameters are also available * within the experimental section of the API. * At the time of this writing, they include : @@ -33,6 +34,6 @@ public enum ZSTD_dParameter ZSTD_d_experimentalParam3 = 1002, ZSTD_d_experimentalParam4 = 1003, ZSTD_d_experimentalParam5 = 1004, - ZSTD_d_experimentalParam6 = 1005 + ZSTD_d_experimentalParam6 = 1005, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dStage.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dStage.cs index 7374cd074..104155fee 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dStage.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dStage.cs @@ -9,6 +9,6 @@ public enum ZSTD_dStage ZSTDds_decompressLastBlock, ZSTDds_checkChecksum, ZSTDds_decodeSkippableHeader, - ZSTDds_skipFrame + ZSTDds_skipFrame, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dStreamStage.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dStreamStage.cs index eea32f243..fb40404f6 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dStreamStage.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dStreamStage.cs @@ -6,6 +6,6 @@ public enum ZSTD_dStreamStage zdss_loadHeader, zdss_read, zdss_load, - zdss_flush + zdss_flush, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictAttachPref_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictAttachPref_e.cs index 59e4eb33c..33983b06e 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictAttachPref_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictAttachPref_e.cs @@ -4,11 +4,14 @@ public enum ZSTD_dictAttachPref_e { /* Use the default heuristic. */ ZSTD_dictDefaultAttach = 0, + /* Never copy the dictionary. */ ZSTD_dictForceAttach = 1, + /* Always copy the dictionary. */ ZSTD_dictForceCopy = 2, + /* Always reload the dictionary */ - ZSTD_dictForceLoad = 3 + ZSTD_dictForceLoad = 3, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictContentType_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictContentType_e.cs index 5ecb77db7..1a486605c 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictContentType_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictContentType_e.cs @@ -4,9 +4,11 @@ public enum ZSTD_dictContentType_e { /* dictionary is "full" when starting with ZSTD_MAGIC_DICTIONARY, otherwise it is "rawContent" */ ZSTD_dct_auto = 0, + /* ensures dictionary is always loaded as rawContent, even if it starts with ZSTD_MAGIC_DICTIONARY */ ZSTD_dct_rawContent = 1, + /* refuses to load a dictionary if it does not respect Zstandard's specification, starting with ZSTD_MAGIC_DICTIONARY */ - ZSTD_dct_fullDict = 2 + ZSTD_dct_fullDict = 2, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictLoadMethod_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictLoadMethod_e.cs index 68fa70a61..ea0d1ac2d 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictLoadMethod_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictLoadMethod_e.cs @@ -4,7 +4,8 @@ public enum ZSTD_dictLoadMethod_e { /**< Copy dictionary content internally */ ZSTD_dlm_byCopy = 0, + /**< Reference dictionary content -- the dictionary buffer must outlive its users. */ - ZSTD_dlm_byRef = 1 + ZSTD_dlm_byRef = 1, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictMode_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictMode_e.cs index 1db92e439..70b51926a 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictMode_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictMode_e.cs @@ -5,6 +5,6 @@ public enum ZSTD_dictMode_e ZSTD_noDict = 0, ZSTD_extDict = 1, ZSTD_dictMatchState = 2, - ZSTD_dedicatedDictSearch = 3 + ZSTD_dedicatedDictSearch = 3, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictTableLoadMethod_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictTableLoadMethod_e.cs index daffd0b58..2fab404ed 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictTableLoadMethod_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictTableLoadMethod_e.cs @@ -3,6 +3,6 @@ namespace ZstdSharp.Unsafe public enum ZSTD_dictTableLoadMethod_e { ZSTD_dtlm_fast, - ZSTD_dtlm_full + ZSTD_dtlm_full, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictUses_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictUses_e.cs index 8cc95816e..37893d7e8 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictUses_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictUses_e.cs @@ -4,9 +4,11 @@ public enum ZSTD_dictUses_e { /* Use the dictionary indefinitely */ ZSTD_use_indefinitely = -1, + /* Do not use the dictionary (if one exists free it) */ ZSTD_dont_use = 0, + /* Use the dictionary once and set to ZSTD_dont_use */ - ZSTD_use_once = 1 + ZSTD_use_once = 1, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyCTablesMetadata_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyCTablesMetadata_t.cs index 7b5b682d7..747e4ba47 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyCTablesMetadata_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyCTablesMetadata_t.cs @@ -5,4 +5,4 @@ public struct ZSTD_entropyCTablesMetadata_t public ZSTD_hufCTablesMetadata_t hufMetadata; public ZSTD_fseCTablesMetadata_t fseMetadata; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyCTables_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyCTables_t.cs index 297f30bcf..57171c809 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyCTables_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyCTables_t.cs @@ -5,4 +5,4 @@ public struct ZSTD_entropyCTables_t public ZSTD_hufCTables_t huf; public ZSTD_fseCTables_t fse; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyDTables_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyDTables_t.cs index 9f336d2f6..638cbd7bc 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyDTables_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyDTables_t.cs @@ -6,14 +6,18 @@ public unsafe struct ZSTD_entropyDTables_t { /* Note : Space reserved for FSE Tables */ public _LLTable_e__FixedBuffer LLTable; + /* is also used as temporary workspace while building hufTable during DDict creation */ public _OFTable_e__FixedBuffer OFTable; + /* and therefore must be at least HUF_DECOMPRESS_WORKSPACE_SIZE large */ public _MLTable_e__FixedBuffer MLTable; + /* can accommodate HUF_decompress4X */ public fixed uint hufTable[4097]; public fixed uint rep[3]; public fixed uint workspace[157]; + #if NET8_0_OR_GREATER [InlineArray(513)] public unsafe struct _LLTable_e__FixedBuffer @@ -1336,4 +1340,4 @@ public unsafe struct _MLTable_e__FixedBuffer } #endif } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_forceIgnoreChecksum_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_forceIgnoreChecksum_e.cs index e66390080..db2885ee7 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_forceIgnoreChecksum_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_forceIgnoreChecksum_e.cs @@ -4,6 +4,6 @@ public enum ZSTD_forceIgnoreChecksum_e { /* Note: this enum controls ZSTD_d_forceIgnoreChecksum */ ZSTD_d_validateChecksum = 0, - ZSTD_d_ignoreChecksum = 1 + ZSTD_d_ignoreChecksum = 1, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_format_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_format_e.cs index 3d3178801..c36c2ac78 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_format_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_format_e.cs @@ -4,9 +4,10 @@ public enum ZSTD_format_e { /* zstd frame format, specified in zstd_compression_format.md (default) */ ZSTD_f_zstd1 = 0, + /* Variant of zstd frame format, without initial 4-bytes magic number. * Useful to save 4 bytes per generated frame. * Decoder cannot recognise automatically this format, requiring this instruction. */ - ZSTD_f_zstd1_magicless = 1 + ZSTD_f_zstd1_magicless = 1, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameHeader.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameHeader.cs index 8baa840de..23fb31b18 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameHeader.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameHeader.cs @@ -4,16 +4,19 @@ public struct ZSTD_frameHeader { /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */ public ulong frameContentSize; + /* can be very large, up to <= frameContentSize */ public ulong windowSize; public uint blockSizeMax; + /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */ public ZSTD_frameType_e frameType; public uint headerSize; + /* for ZSTD_skippableFrame, contains the skippable magic variant [0-15] */ public uint dictID; public uint checksumFlag; public uint _reserved1; public uint _reserved2; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameParameters.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameParameters.cs index 54c509374..8f03e4576 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameParameters.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameParameters.cs @@ -4,9 +4,11 @@ public struct ZSTD_frameParameters { /**< 1: content size will be in frame header (when known) */ public int contentSizeFlag; + /**< 1: generate a 32-bits checksum using XXH64 algorithm at end of frame, for error detection */ public int checksumFlag; + /**< 1: no dictID will be saved into frame header (dictID is only useful for dictionary compression) */ public int noDictIDFlag; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameProgression.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameProgression.cs index 22b7fece6..8e9e46b06 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameProgression.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameProgression.cs @@ -4,15 +4,20 @@ public struct ZSTD_frameProgression { /* nb input bytes read and buffered */ public ulong ingested; + /* nb input bytes actually compressed */ public ulong consumed; + /* nb of compressed bytes generated and buffered */ public ulong produced; + /* nb of compressed bytes flushed : not provided; can be tracked from caller side */ public ulong flushed; + /* MT only : latest started job nb */ public uint currentJobID; + /* MT only : nb of workers actively compressing at probe time */ public uint nbActiveWorkers; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameSizeInfo.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameSizeInfo.cs index 65eca157c..8d60693f1 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameSizeInfo.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameSizeInfo.cs @@ -12,4 +12,4 @@ public struct ZSTD_frameSizeInfo public nuint compressedSize; public ulong decompressedBound; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameType_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameType_e.cs index 9a451ee98..4cb02e714 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameType_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameType_e.cs @@ -3,6 +3,6 @@ namespace ZstdSharp.Unsafe public enum ZSTD_frameType_e { ZSTD_frame, - ZSTD_skippableFrame + ZSTD_skippableFrame, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseCTablesMetadata_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseCTablesMetadata_t.cs index a051c501d..0e89337b3 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseCTablesMetadata_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseCTablesMetadata_t.cs @@ -12,7 +12,8 @@ public unsafe struct ZSTD_fseCTablesMetadata_t public SymbolEncodingType_e mlType; public fixed byte fseTablesBuffer[133]; public nuint fseTablesSize; + /* This is to account for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */ public nuint lastCountSize; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseCTables_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseCTables_t.cs index b8b43c914..b716d40ee 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseCTables_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseCTables_t.cs @@ -9,4 +9,4 @@ public unsafe struct ZSTD_fseCTables_t public FSE_repeat matchlength_repeatMode; public FSE_repeat litlength_repeatMode; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseState.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseState.cs index 46fe512b3..f77f7116e 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseState.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseState.cs @@ -5,4 +5,4 @@ public unsafe struct ZSTD_fseState public nuint state; public ZSTD_seqSymbol* table; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_getAllMatchesFn.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_getAllMatchesFn.cs index ab0bb109d..b6e49f4ed 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_getAllMatchesFn.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_getAllMatchesFn.cs @@ -3,5 +3,14 @@ namespace ZstdSharp.Unsafe { [UnmanagedFunctionPointer(CallingConvention.Cdecl)] - public unsafe delegate uint ZSTD_getAllMatchesFn(ZSTD_match_t* param0, ZSTD_MatchState_t* param1, uint* param2, byte* param3, byte* param4, uint* rep, uint ll0, uint lengthToBeat); -} \ No newline at end of file + public unsafe delegate uint ZSTD_getAllMatchesFn( + ZSTD_match_t* param0, + ZSTD_MatchState_t* param1, + uint* param2, + byte* param3, + byte* param4, + uint* rep, + uint ll0, + uint lengthToBeat + ); +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_hufCTablesMetadata_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_hufCTablesMetadata_t.cs index 7a2b76049..d4b5d4da1 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_hufCTablesMetadata_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_hufCTablesMetadata_t.cs @@ -14,4 +14,4 @@ public unsafe struct ZSTD_hufCTablesMetadata_t public fixed byte hufDesBuffer[128]; public nuint hufDesSize; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_hufCTables_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_hufCTables_t.cs index 00311fbb4..a697fa630 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_hufCTables_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_hufCTables_t.cs @@ -6,6 +6,7 @@ public unsafe struct ZSTD_hufCTables_t { public _CTable_e__FixedBuffer CTable; public HUF_repeat repeatMode; + #if NET8_0_OR_GREATER [InlineArray(257)] public unsafe struct _CTable_e__FixedBuffer @@ -276,4 +277,4 @@ public unsafe struct _CTable_e__FixedBuffer } #endif } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_inBuffer_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_inBuffer_s.cs index 660b16124..7a92d4439 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_inBuffer_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_inBuffer_s.cs @@ -7,9 +7,11 @@ public unsafe struct ZSTD_inBuffer_s { /**< start of input buffer */ public void* src; + /**< size of input buffer */ public nuint size; + /**< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size */ public nuint pos; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_indexResetPolicy_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_indexResetPolicy_e.cs index 3f225a7ea..ed9ef92b7 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_indexResetPolicy_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_indexResetPolicy_e.cs @@ -8,6 +8,6 @@ namespace ZstdSharp.Unsafe public enum ZSTD_indexResetPolicy_e { ZSTDirp_continue, - ZSTDirp_reset + ZSTDirp_reset, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_litLocation_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_litLocation_e.cs index 8136cc517..69160e0b4 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_litLocation_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_litLocation_e.cs @@ -4,9 +4,11 @@ public enum ZSTD_litLocation_e { /* Stored entirely within litExtraBuffer */ ZSTD_not_in_dst = 0, + /* Stored entirely within dst (in memory after current output write) */ ZSTD_in_dst = 1, + /* Split between litExtraBuffer and dst */ - ZSTD_split = 2 + ZSTD_split = 2, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_literalCompressionMode_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_literalCompressionMode_e.cs index 6382a0c86..63d35773f 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_literalCompressionMode_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_literalCompressionMode_e.cs @@ -6,10 +6,12 @@ public enum ZSTD_literalCompressionMode_e * Negative compression levels will be uncompressed, and positive compression * levels will be compressed. */ ZSTD_lcm_auto = 0, + /**< Always attempt Huffman compression. Uncompressed literals will still be * emitted if Huffman compression is not profitable. */ ZSTD_lcm_huffman = 1, + /**< Always emit uncompressed literals. */ - ZSTD_lcm_uncompressed = 2 + ZSTD_lcm_uncompressed = 2, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_localDict.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_localDict.cs index 9f8fa4a4f..9cbcf7c7a 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_localDict.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_localDict.cs @@ -8,4 +8,4 @@ public unsafe struct ZSTD_localDict public ZSTD_dictContentType_e dictContentType; public ZSTD_CDict_s* cdict; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_longLengthType_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_longLengthType_e.cs index a31f06b2a..37c62011b 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_longLengthType_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_longLengthType_e.cs @@ -5,9 +5,11 @@ public enum ZSTD_longLengthType_e { /* no longLengthType */ ZSTD_llt_none = 0, + /* represents a long literal */ ZSTD_llt_literalLength = 1, + /* represents a long match */ - ZSTD_llt_matchLength = 2 + ZSTD_llt_matchLength = 2, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_longOffset_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_longOffset_e.cs index 2c67c67bd..3f67603de 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_longOffset_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_longOffset_e.cs @@ -3,6 +3,6 @@ namespace ZstdSharp.Unsafe public enum ZSTD_longOffset_e { ZSTD_lo_isRegularOffset, - ZSTD_lo_isLongOffset = 1 + ZSTD_lo_isLongOffset = 1, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_match_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_match_t.cs index b8a5f3d32..5bf187cc7 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_match_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_match_t.cs @@ -7,7 +7,8 @@ public struct ZSTD_match_t { /* Offset sumtype code for the match, using ZSTD_storeSeq() format */ public uint off; + /* Raw length of match */ public uint len; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_nextInputType_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_nextInputType_e.cs index af341a4ff..ee85055b7 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_nextInputType_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_nextInputType_e.cs @@ -7,6 +7,6 @@ public enum ZSTD_nextInputType_e ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, - ZSTDnit_skippableFrame + ZSTDnit_skippableFrame, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_optLdm_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_optLdm_t.cs index adc1a38f6..6ac86bf86 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_optLdm_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_optLdm_t.cs @@ -5,11 +5,14 @@ public struct ZSTD_optLdm_t { /* External match candidates store for this block */ public RawSeqStore_t seqStore; + /* Start position of the current match candidate */ public uint startPosInBlock; + /* End position of the current match candidate */ public uint endPosInBlock; + /* Offset of the match candidate */ public uint offset; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_optimal_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_optimal_t.cs index e7d7f5cb1..426617786 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_optimal_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_optimal_t.cs @@ -4,13 +4,17 @@ public unsafe struct ZSTD_optimal_t { /* price from beginning of segment to this position */ public int price; + /* offset of previous match */ public uint off; + /* length of previous match */ public uint mlen; + /* nb of literals since previous match */ public uint litlen; + /* offset history after previous match */ public fixed uint rep[3]; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_outBuffer_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_outBuffer_s.cs index 6ecb86adc..9411454d8 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_outBuffer_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_outBuffer_s.cs @@ -4,9 +4,11 @@ public unsafe struct ZSTD_outBuffer_s { /**< start of output buffer */ public void* dst; + /**< size of output buffer */ public nuint size; + /**< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size */ public nuint pos; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_overlap_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_overlap_e.cs index 02941dd0e..79bafe5ba 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_overlap_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_overlap_e.cs @@ -3,6 +3,6 @@ namespace ZstdSharp.Unsafe public enum ZSTD_overlap_e { ZSTD_no_overlap, - ZSTD_overlap_src_before_dst + ZSTD_overlap_src_before_dst, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_paramSwitch_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_paramSwitch_e.cs index 99acee5c0..c769bcbb0 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_paramSwitch_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_paramSwitch_e.cs @@ -4,9 +4,11 @@ public enum ZSTD_paramSwitch_e { /* Let the library automatically determine whether the feature shall be enabled */ ZSTD_ps_auto = 0, + /* Force-enable the feature */ ZSTD_ps_enable = 1, + /* Do not use the feature */ - ZSTD_ps_disable = 2 + ZSTD_ps_disable = 2, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_parameters.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_parameters.cs index cff10cd14..01441a695 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_parameters.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_parameters.cs @@ -5,4 +5,4 @@ public struct ZSTD_parameters public ZSTD_compressionParameters cParams; public ZSTD_frameParameters fParams; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_prefixDict_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_prefixDict_s.cs index d3fc6d936..25eb04351 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_prefixDict_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_prefixDict_s.cs @@ -6,4 +6,4 @@ public unsafe struct ZSTD_prefixDict_s public nuint dictSize; public ZSTD_dictContentType_e dictContentType; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_refMultipleDDicts_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_refMultipleDDicts_e.cs index 36ebf2493..d8dd23e16 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_refMultipleDDicts_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_refMultipleDDicts_e.cs @@ -4,6 +4,6 @@ public enum ZSTD_refMultipleDDicts_e { /* Note: this enum controls ZSTD_d_refMultipleDDicts */ ZSTD_rmd_refSingleDDict = 0, - ZSTD_rmd_refMultipleDDicts = 1 + ZSTD_rmd_refMultipleDDicts = 1, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_resetTarget_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_resetTarget_e.cs index 417ff5b66..ebb3d5d9f 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_resetTarget_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_resetTarget_e.cs @@ -3,6 +3,6 @@ namespace ZstdSharp.Unsafe public enum ZSTD_resetTarget_e { ZSTD_resetTarget_CDict, - ZSTD_resetTarget_CCtx + ZSTD_resetTarget_CCtx, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_seqSymbol.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_seqSymbol.cs index f78770e59..cf5d742a7 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_seqSymbol.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_seqSymbol.cs @@ -6,6 +6,7 @@ public struct ZSTD_seqSymbol public byte nbAdditionalBits; public byte nbBits; public uint baseValue; + public ZSTD_seqSymbol(ushort nextState, byte nbAdditionalBits, byte nbBits, uint baseValue) { this.nextState = nextState; @@ -14,4 +15,4 @@ public ZSTD_seqSymbol(ushort nextState, byte nbAdditionalBits, byte nbBits, uint this.baseValue = baseValue; } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_seqSymbol_header.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_seqSymbol_header.cs index 16925999b..573487311 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_seqSymbol_header.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_seqSymbol_header.cs @@ -8,4 +8,4 @@ public struct ZSTD_seqSymbol_header public uint fastMode; public uint tableLog; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_sequenceFormat_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_sequenceFormat_e.cs index ed8d9c8e6..5fc478421 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_sequenceFormat_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_sequenceFormat_e.cs @@ -4,7 +4,8 @@ public enum ZSTD_sequenceFormat_e { /* ZSTD_Sequence[] has no block delimiters, just sequences */ ZSTD_sf_noBlockDelimiters = 0, + /* ZSTD_Sequence[] contains explicit block delimiters */ - ZSTD_sf_explicitBlockDelimiters = 1 + ZSTD_sf_explicitBlockDelimiters = 1, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_strategy.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_strategy.cs index e4e5e4b50..e6d8fb08a 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_strategy.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_strategy.cs @@ -11,6 +11,6 @@ public enum ZSTD_strategy ZSTD_btlazy2 = 6, ZSTD_btopt = 7, ZSTD_btultra = 8, - ZSTD_btultra2 = 9 + ZSTD_btultra2 = 9, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_symbolEncodingTypeStats_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_symbolEncodingTypeStats_t.cs index bcfc57b2d..143226b19 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_symbolEncodingTypeStats_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_symbolEncodingTypeStats_t.cs @@ -9,8 +9,9 @@ public struct ZSTD_symbolEncodingTypeStats_t public uint Offtype; public uint MLtype; public nuint size; + /* Accounts for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */ public nuint lastCountSize; public int longOffsets; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_tableFillPurpose_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_tableFillPurpose_e.cs index 8bb66024a..47a92f8f5 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_tableFillPurpose_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_tableFillPurpose_e.cs @@ -3,6 +3,6 @@ namespace ZstdSharp.Unsafe public enum ZSTD_tableFillPurpose_e { ZSTD_tfp_forCCtx, - ZSTD_tfp_forCDict + ZSTD_tfp_forCDict, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_window_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_window_t.cs index 46325d7b4..53d92b75d 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_window_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_window_t.cs @@ -4,18 +4,23 @@ public unsafe struct ZSTD_window_t { /* next block here to continue on current prefix */ public byte* nextSrc; + /* All regular indexes relative to this position */ public byte* @base; + /* extDict indexes relative to this position */ public byte* dictBase; + /* below that point, need extDict */ public uint dictLimit; + /* below that point, no more valid data */ public uint lowLimit; + /* Number of times overflow correction has run since * ZSTD_window_init(). Useful for debugging coredumps * and for ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY. */ public uint nbOverflowCorrections; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Zdict.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Zdict.cs index ba6262bc1..0b50173da 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Zdict.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Zdict.cs @@ -17,9 +17,24 @@ public static string ZDICT_getErrorName(nuint errorCode) return ERR_getErrorName(errorCode); } - private static void ZDICT_countEStats(EStats_ress_t esr, ZSTD_parameters* @params, uint* countLit, uint* offsetcodeCount, uint* matchlengthCount, uint* litlengthCount, uint* repOffsets, void* src, nuint srcSize, uint notificationLevel) + private static void ZDICT_countEStats( + EStats_ress_t esr, + ZSTD_parameters* @params, + uint* countLit, + uint* offsetcodeCount, + uint* matchlengthCount, + uint* litlengthCount, + uint* repOffsets, + void* src, + nuint srcSize, + uint notificationLevel + ) { - nuint blockSizeMax = (nuint)(1 << 17 < 1 << (int)@params->cParams.windowLog ? 1 << 17 : 1 << (int)@params->cParams.windowLog); + nuint blockSizeMax = (nuint)( + 1 << 17 < 1 << (int)@params->cParams.windowLog + ? 1 << 17 + : 1 << (int)@params->cParams.windowLog + ); nuint cSize; if (srcSize > blockSizeMax) srcSize = blockSizeMax; @@ -125,7 +140,17 @@ private static void ZDICT_flatLit(uint* countLit) countLit[254] = 1; } - private static nuint ZDICT_analyzeEntropy(void* dstBuffer, nuint maxDstSize, int compressionLevel, void* srcBuffer, nuint* fileSizes, uint nbFiles, void* dictBuffer, nuint dictBufferSize, uint notificationLevel) + private static nuint ZDICT_analyzeEntropy( + void* dstBuffer, + nuint maxDstSize, + int compressionLevel, + void* srcBuffer, + nuint* fileSizes, + uint nbFiles, + void* dictBuffer, + nuint dictBufferSize, + uint notificationLevel + ) { uint* countLit = stackalloc uint[256]; /* no final ; */ @@ -143,11 +168,17 @@ private static nuint ZDICT_analyzeEntropy(void* dstBuffer, nuint maxDstSize, int { dict = null, zc = null, - workPlace = null + workPlace = null, }; ZSTD_parameters @params; - uint u, huffLog = 11, Offlog = 8, mlLog = 9, llLog = 9, total; - nuint pos = 0, errorCode; + uint u, + huffLog = 11, + Offlog = 8, + mlLog = 9, + llLog = 9, + total; + nuint pos = 0, + errorCode; nuint eSize = 0; nuint totalSrcSize = ZDICT_totalSampleSize(fileSizes, nbFiles); nuint averageSampleSize = totalSrcSize / (nbFiles + (uint)(nbFiles == 0 ? 1 : 0)); @@ -155,7 +186,9 @@ private static nuint ZDICT_analyzeEntropy(void* dstBuffer, nuint maxDstSize, int uint* wksp = stackalloc uint[1216]; if (offcodeMax > 30) { - eSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionaryCreation_failed)); + eSize = unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionaryCreation_failed) + ); goto _cleanup; } @@ -173,7 +206,14 @@ private static nuint ZDICT_analyzeEntropy(void* dstBuffer, nuint maxDstSize, int if (compressionLevel == 0) compressionLevel = 3; @params = ZSTD_getParams(compressionLevel, averageSampleSize, dictBufferSize); - esr.dict = ZSTD_createCDict_advanced(dictBuffer, dictBufferSize, ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, ZSTD_dictContentType_e.ZSTD_dct_rawContent, @params.cParams, ZSTD_defaultCMem); + esr.dict = ZSTD_createCDict_advanced( + dictBuffer, + dictBufferSize, + ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, + ZSTD_dictContentType_e.ZSTD_dct_rawContent, + @params.cParams, + ZSTD_defaultCMem + ); esr.zc = ZSTD_createCCtx(); esr.workPlace = malloc(1 << 17); if (esr.dict == null || esr.zc == null || esr.workPlace == null) @@ -184,19 +224,35 @@ private static nuint ZDICT_analyzeEntropy(void* dstBuffer, nuint maxDstSize, int for (u = 0; u < nbFiles; u++) { - ZDICT_countEStats(esr, &@params, countLit, offcodeCount, matchLengthCount, litLengthCount, repOffset, (sbyte*)srcBuffer + pos, fileSizes[u], notificationLevel); + ZDICT_countEStats( + esr, + &@params, + countLit, + offcodeCount, + matchLengthCount, + litLengthCount, + repOffset, + (sbyte*)srcBuffer + pos, + fileSizes[u], + notificationLevel + ); pos += fileSizes[u]; } if (notificationLevel >= 4) { - for (u = 0; u <= offcodeMax; u++) - { - } + for (u = 0; u <= offcodeMax; u++) { } } { - nuint maxNbBits = HUF_buildCTable_wksp(hufTable, countLit, 255, huffLog, wksp, sizeof(uint) * 1216); + nuint maxNbBits = HUF_buildCTable_wksp( + hufTable, + countLit, + 255, + huffLog, + wksp, + sizeof(uint) * 1216 + ); if (ERR_isError(maxNbBits)) { eSize = maxNbBits; @@ -206,7 +262,14 @@ private static nuint ZDICT_analyzeEntropy(void* dstBuffer, nuint maxDstSize, int if (maxNbBits == 8) { ZDICT_flatLit(countLit); - maxNbBits = HUF_buildCTable_wksp(hufTable, countLit, 255, huffLog, wksp, sizeof(uint) * 1216); + maxNbBits = HUF_buildCTable_wksp( + hufTable, + countLit, + 255, + huffLog, + wksp, + sizeof(uint) * 1216 + ); assert(maxNbBits == 9); } @@ -222,7 +285,14 @@ private static nuint ZDICT_analyzeEntropy(void* dstBuffer, nuint maxDstSize, int total = 0; for (u = 0; u <= offcodeMax; u++) total += offcodeCount[u]; - errorCode = FSE_normalizeCount(offcodeNCount, Offlog, offcodeCount, total, offcodeMax, 1); + errorCode = FSE_normalizeCount( + offcodeNCount, + Offlog, + offcodeCount, + total, + offcodeMax, + 1 + ); if (ERR_isError(errorCode)) { eSize = errorCode; @@ -233,7 +303,14 @@ private static nuint ZDICT_analyzeEntropy(void* dstBuffer, nuint maxDstSize, int total = 0; for (u = 0; u <= 52; u++) total += matchLengthCount[u]; - errorCode = FSE_normalizeCount(matchLengthNCount, mlLog, matchLengthCount, total, 52, 1); + errorCode = FSE_normalizeCount( + matchLengthNCount, + mlLog, + matchLengthCount, + total, + 52, + 1 + ); if (ERR_isError(errorCode)) { eSize = errorCode; @@ -253,7 +330,15 @@ private static nuint ZDICT_analyzeEntropy(void* dstBuffer, nuint maxDstSize, int llLog = (uint)errorCode; { - nuint hhSize = HUF_writeCTable_wksp(dstPtr, maxDstSize, hufTable, 255, huffLog, wksp, sizeof(uint) * 1216); + nuint hhSize = HUF_writeCTable_wksp( + dstPtr, + maxDstSize, + hufTable, + 255, + huffLog, + wksp, + sizeof(uint) * 1216 + ); if (ERR_isError(hhSize)) { eSize = hhSize; @@ -314,7 +399,7 @@ private static nuint ZDICT_analyzeEntropy(void* dstBuffer, nuint maxDstSize, int MEM_writeLE32(dstPtr + 4, repStartValue[1]); MEM_writeLE32(dstPtr + 8, repStartValue[2]); eSize += 12; - _cleanup: + _cleanup: ZSTD_freeCDict(esr.dict); ZSTD_freeCCtx(esr.zc); free(esr.workPlace); @@ -368,7 +453,16 @@ private static uint ZDICT_maxRep(uint* reps) * * Samples are uncompressible * * Samples are all exactly the same */ - public static nuint ZDICT_finalizeDictionary(void* dictBuffer, nuint dictBufferCapacity, void* customDictContent, nuint dictContentSize, void* samplesBuffer, nuint* samplesSizes, uint nbSamples, ZDICT_params_t @params) + public static nuint ZDICT_finalizeDictionary( + void* dictBuffer, + nuint dictBufferCapacity, + void* customDictContent, + nuint dictContentSize, + void* samplesBuffer, + nuint* samplesSizes, + uint nbSamples, + ZDICT_params_t @params + ) { nuint hSize; byte* header = stackalloc byte[256]; @@ -391,7 +485,17 @@ public static nuint ZDICT_finalizeDictionary(void* dictBuffer, nuint dictBufferC hSize = 8; { - nuint eSize = ZDICT_analyzeEntropy(header + hSize, 256 - hSize, compressionLevel, samplesBuffer, samplesSizes, nbSamples, customDictContent, dictContentSize, notificationLevel); + nuint eSize = ZDICT_analyzeEntropy( + header + hSize, + 256 - hSize, + compressionLevel, + samplesBuffer, + samplesSizes, + nbSamples, + customDictContent, + dictContentSize, + notificationLevel + ); if (ZDICT_isError(eSize)) return eSize; hSize += eSize; @@ -434,13 +538,31 @@ public static nuint ZDICT_finalizeDictionary(void* dictBuffer, nuint dictBufferC } } - private static nuint ZDICT_addEntropyTablesFromBuffer_advanced(void* dictBuffer, nuint dictContentSize, nuint dictBufferCapacity, void* samplesBuffer, nuint* samplesSizes, uint nbSamples, ZDICT_params_t @params) + private static nuint ZDICT_addEntropyTablesFromBuffer_advanced( + void* dictBuffer, + nuint dictContentSize, + nuint dictBufferCapacity, + void* samplesBuffer, + nuint* samplesSizes, + uint nbSamples, + ZDICT_params_t @params + ) { int compressionLevel = @params.compressionLevel == 0 ? 3 : @params.compressionLevel; uint notificationLevel = @params.notificationLevel; nuint hSize = 8; { - nuint eSize = ZDICT_analyzeEntropy((sbyte*)dictBuffer + hSize, dictBufferCapacity - hSize, compressionLevel, samplesBuffer, samplesSizes, nbSamples, (sbyte*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize, notificationLevel); + nuint eSize = ZDICT_analyzeEntropy( + (sbyte*)dictBuffer + hSize, + dictBufferCapacity - hSize, + compressionLevel, + samplesBuffer, + samplesSizes, + nbSamples, + (sbyte*)dictBuffer + dictBufferCapacity - dictContentSize, + dictContentSize, + notificationLevel + ); if (ZDICT_isError(eSize)) return eSize; hSize += eSize; @@ -448,15 +570,25 @@ private static nuint ZDICT_addEntropyTablesFromBuffer_advanced(void* dictBuffer, MEM_writeLE32(dictBuffer, 0xEC30A437); { - ulong randomID = ZSTD_XXH64((sbyte*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize, 0); + ulong randomID = ZSTD_XXH64( + (sbyte*)dictBuffer + dictBufferCapacity - dictContentSize, + dictContentSize, + 0 + ); uint compliantID = (uint)(randomID % ((1U << 31) - 32768) + 32768); uint dictID = @params.dictID != 0 ? @params.dictID : compliantID; MEM_writeLE32((sbyte*)dictBuffer + 4, dictID); } if (hSize + dictContentSize < dictBufferCapacity) - memmove((sbyte*)dictBuffer + hSize, (sbyte*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize); - return dictBufferCapacity < hSize + dictContentSize ? dictBufferCapacity : hSize + dictContentSize; + memmove( + (sbyte*)dictBuffer + hSize, + (sbyte*)dictBuffer + dictBufferCapacity - dictContentSize, + dictContentSize + ); + return dictBufferCapacity < hSize + dictContentSize + ? dictBufferCapacity + : hSize + dictContentSize; } /*! ZDICT_trainFromBuffer(): @@ -479,23 +611,47 @@ private static nuint ZDICT_addEntropyTablesFromBuffer_advanced(void* dictBuffer, * In general, it's recommended to provide a few thousands samples, though this can vary a lot. * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. */ - public static nuint ZDICT_trainFromBuffer(void* dictBuffer, nuint dictBufferCapacity, void* samplesBuffer, nuint* samplesSizes, uint nbSamples) + public static nuint ZDICT_trainFromBuffer( + void* dictBuffer, + nuint dictBufferCapacity, + void* samplesBuffer, + nuint* samplesSizes, + uint nbSamples + ) { ZDICT_fastCover_params_t @params; - @params = new ZDICT_fastCover_params_t - { - d = 8, - steps = 4 - }; + @params = new ZDICT_fastCover_params_t { d = 8, steps = 4 }; @params.zParams.compressionLevel = 3; - return ZDICT_optimizeTrainFromBuffer_fastCover(dictBuffer, dictBufferCapacity, samplesBuffer, samplesSizes, nbSamples, &@params); + return ZDICT_optimizeTrainFromBuffer_fastCover( + dictBuffer, + dictBufferCapacity, + samplesBuffer, + samplesSizes, + nbSamples, + &@params + ); } - public static nuint ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, nuint dictContentSize, nuint dictBufferCapacity, void* samplesBuffer, nuint* samplesSizes, uint nbSamples) + public static nuint ZDICT_addEntropyTablesFromBuffer( + void* dictBuffer, + nuint dictContentSize, + nuint dictBufferCapacity, + void* samplesBuffer, + nuint* samplesSizes, + uint nbSamples + ) { ZDICT_params_t @params; @params = new ZDICT_params_t(); - return ZDICT_addEntropyTablesFromBuffer_advanced(dictBuffer, dictContentSize, dictBufferCapacity, samplesBuffer, samplesSizes, nbSamples, @params); + return ZDICT_addEntropyTablesFromBuffer_advanced( + dictBuffer, + dictContentSize, + dictBufferCapacity, + samplesBuffer, + samplesSizes, + nbSamples, + @params + ); } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Zstd.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Zstd.cs index 875d50bab..f3a0972d0 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Zstd.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Zstd.cs @@ -2,6 +2,10 @@ namespace ZstdSharp.Unsafe { public static unsafe partial class Methods { - private static readonly ZSTD_customMem ZSTD_defaultCMem = new ZSTD_customMem(customAlloc: null, customFree: null, opaque: null); + private static readonly ZSTD_customMem ZSTD_defaultCMem = new ZSTD_customMem( + customAlloc: null, + customFree: null, + opaque: null + ); } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCommon.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCommon.cs index 4cb1c744b..92312bf30 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCommon.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCommon.cs @@ -46,4 +46,4 @@ public static string ZSTD_getErrorString(ZSTD_ErrorCode code) return ERR_getErrorString(code); } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompress.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompress.cs index e5eee65b4..46b98f3ad 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompress.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompress.cs @@ -1,9 +1,9 @@ -using static ZstdSharp.UnsafeHelper; using System; -using System.Runtime.InteropServices; +using System.Diagnostics; using System.Numerics; using System.Runtime.CompilerServices; -using System.Diagnostics; +using System.Runtime.InteropServices; +using static ZstdSharp.UnsafeHelper; namespace ZstdSharp.Unsafe { @@ -22,7 +22,12 @@ public static unsafe partial class Methods */ public static nuint ZSTD_compressBound(nuint srcSize) { - nuint r = srcSize >= (sizeof(nuint) == 8 ? 0xFF00FF00FF00FF00UL : 0xFF00FF00U) ? 0 : srcSize + (srcSize >> 8) + (srcSize < 128 << 10 ? (128 << 10) - srcSize >> 11 : 0); + nuint r = + srcSize >= (sizeof(nuint) == 8 ? 0xFF00FF00FF00FF00UL : 0xFF00FF00U) + ? 0 + : srcSize + + (srcSize >> 8) + + (srcSize < 128 << 10 ? (128 << 10) - srcSize >> 11 : 0); if (r == 0) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); return r; @@ -36,11 +41,7 @@ public static nuint ZSTD_compressBound(nuint srcSize) private static void ZSTD_initCCtx(ZSTD_CCtx_s* cctx, ZSTD_customMem memManager) { assert(cctx != null); - *cctx = new ZSTD_CCtx_s - { - customMem = memManager, - bmi2 = 0 - }; + *cctx = new ZSTD_CCtx_s { customMem = memManager, bmi2 = 0 }; { nuint err = ZSTD_CCtx_reset(cctx, ZSTD_ResetDirective.ZSTD_reset_parameters); assert(!ERR_isError(err)); @@ -49,10 +50,16 @@ private static void ZSTD_initCCtx(ZSTD_CCtx_s* cctx, ZSTD_customMem memManager) public static ZSTD_CCtx_s* ZSTD_createCCtx_advanced(ZSTD_customMem customMem) { - if (((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) != 0) + if ( + ((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) + != 0 + ) return null; { - ZSTD_CCtx_s* cctx = (ZSTD_CCtx_s*)ZSTD_customMalloc((nuint)sizeof(ZSTD_CCtx_s), customMem); + ZSTD_CCtx_s* cctx = (ZSTD_CCtx_s*)ZSTD_customMalloc( + (nuint)sizeof(ZSTD_CCtx_s), + customMem + ); if (cctx == null) return null; ZSTD_initCCtx(cctx, customMem); @@ -89,19 +96,50 @@ private static void ZSTD_initCCtx(ZSTD_CCtx_s* cctx, ZSTD_customMem memManager) return null; if (((nuint)workspace & 7) != 0) return null; - ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc_e.ZSTD_cwksp_static_alloc); + ZSTD_cwksp_init( + &ws, + workspace, + workspaceSize, + ZSTD_cwksp_static_alloc_e.ZSTD_cwksp_static_alloc + ); cctx = (ZSTD_CCtx_s*)ZSTD_cwksp_reserve_object(&ws, (nuint)sizeof(ZSTD_CCtx_s)); if (cctx == null) return null; *cctx = new ZSTD_CCtx_s(); ZSTD_cwksp_move(&cctx->workspace, &ws); cctx->staticSize = workspaceSize; - if (ZSTD_cwksp_check_available(&cctx->workspace, (nuint)(((8 << 10) + 512 + sizeof(uint) * (52 + 2) > 8208 ? (8 << 10) + 512 + sizeof(uint) * (52 + 2) : 8208) + 2 * sizeof(ZSTD_compressedBlockState_t))) == 0) + if ( + ZSTD_cwksp_check_available( + &cctx->workspace, + (nuint)( + ( + (8 << 10) + 512 + sizeof(uint) * (52 + 2) > 8208 + ? (8 << 10) + 512 + sizeof(uint) * (52 + 2) + : 8208 + ) + + 2 * sizeof(ZSTD_compressedBlockState_t) + ) + ) == 0 + ) return null; - cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, (nuint)sizeof(ZSTD_compressedBlockState_t)); - cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, (nuint)sizeof(ZSTD_compressedBlockState_t)); - cctx->tmpWorkspace = ZSTD_cwksp_reserve_object(&cctx->workspace, (8 << 10) + 512 + sizeof(uint) * (52 + 2) > 8208 ? (8 << 10) + 512 + sizeof(uint) * (52 + 2) : 8208); - cctx->tmpWkspSize = (8 << 10) + 512 + sizeof(uint) * (52 + 2) > 8208 ? (8 << 10) + 512 + sizeof(uint) * (52 + 2) : 8208; + cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object( + &cctx->workspace, + (nuint)sizeof(ZSTD_compressedBlockState_t) + ); + cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object( + &cctx->workspace, + (nuint)sizeof(ZSTD_compressedBlockState_t) + ); + cctx->tmpWorkspace = ZSTD_cwksp_reserve_object( + &cctx->workspace, + (8 << 10) + 512 + sizeof(uint) * (52 + 2) > 8208 + ? (8 << 10) + 512 + sizeof(uint) * (52 + 2) + : 8208 + ); + cctx->tmpWkspSize = + (8 << 10) + 512 + sizeof(uint) * (52 + 2) > 8208 + ? (8 << 10) + 512 + sizeof(uint) * (52 + 2) + : 8208; cctx->bmi2 = 0; return cctx; } @@ -166,7 +204,10 @@ public static nuint ZSTD_sizeof_CCtx(ZSTD_CCtx_s* cctx) { if (cctx == null) return 0; - return (nuint)(cctx->workspace.workspace == cctx ? 0 : sizeof(ZSTD_CCtx_s)) + ZSTD_cwksp_sizeof(&cctx->workspace) + ZSTD_sizeof_localDict(cctx->localDict) + ZSTD_sizeof_mtctx(cctx); + return (nuint)(cctx->workspace.workspace == cctx ? 0 : sizeof(ZSTD_CCtx_s)) + + ZSTD_cwksp_sizeof(&cctx->workspace) + + ZSTD_sizeof_localDict(cctx->localDict) + + ZSTD_sizeof_mtctx(cctx); } public static nuint ZSTD_sizeof_CStream(ZSTD_CCtx_s* zcs) @@ -183,7 +224,9 @@ public static nuint ZSTD_sizeof_CStream(ZSTD_CCtx_s* zcs) /* Returns true if the strategy supports using a row based matchfinder */ private static int ZSTD_rowMatchFinderSupported(ZSTD_strategy strategy) { - return strategy >= ZSTD_strategy.ZSTD_greedy && strategy <= ZSTD_strategy.ZSTD_lazy2 ? 1 : 0; + return strategy >= ZSTD_strategy.ZSTD_greedy && strategy <= ZSTD_strategy.ZSTD_lazy2 + ? 1 + : 0; } /* Returns true if the strategy and useRowMatchFinder mode indicate that we will use the row based matchfinder @@ -192,11 +235,18 @@ private static int ZSTD_rowMatchFinderSupported(ZSTD_strategy strategy) private static int ZSTD_rowMatchFinderUsed(ZSTD_strategy strategy, ZSTD_paramSwitch_e mode) { assert(mode != ZSTD_paramSwitch_e.ZSTD_ps_auto); - return ZSTD_rowMatchFinderSupported(strategy) != 0 && mode == ZSTD_paramSwitch_e.ZSTD_ps_enable ? 1 : 0; + return + ZSTD_rowMatchFinderSupported(strategy) != 0 + && mode == ZSTD_paramSwitch_e.ZSTD_ps_enable + ? 1 + : 0; } /* Returns row matchfinder usage given an initial mode and cParams */ - private static ZSTD_paramSwitch_e ZSTD_resolveRowMatchFinderMode(ZSTD_paramSwitch_e mode, ZSTD_compressionParameters* cParams) + private static ZSTD_paramSwitch_e ZSTD_resolveRowMatchFinderMode( + ZSTD_paramSwitch_e mode, + ZSTD_compressionParameters* cParams + ) { if (mode != ZSTD_paramSwitch_e.ZSTD_ps_auto) return mode; @@ -209,29 +259,48 @@ private static ZSTD_paramSwitch_e ZSTD_resolveRowMatchFinderMode(ZSTD_paramSwitc } /* Returns block splitter usage (generally speaking, when using slower/stronger compression modes) */ - private static ZSTD_paramSwitch_e ZSTD_resolveBlockSplitterMode(ZSTD_paramSwitch_e mode, ZSTD_compressionParameters* cParams) + private static ZSTD_paramSwitch_e ZSTD_resolveBlockSplitterMode( + ZSTD_paramSwitch_e mode, + ZSTD_compressionParameters* cParams + ) { if (mode != ZSTD_paramSwitch_e.ZSTD_ps_auto) return mode; - return cParams->strategy >= ZSTD_strategy.ZSTD_btopt && cParams->windowLog >= 17 ? ZSTD_paramSwitch_e.ZSTD_ps_enable : ZSTD_paramSwitch_e.ZSTD_ps_disable; + return cParams->strategy >= ZSTD_strategy.ZSTD_btopt && cParams->windowLog >= 17 + ? ZSTD_paramSwitch_e.ZSTD_ps_enable + : ZSTD_paramSwitch_e.ZSTD_ps_disable; } /* Returns 1 if the arguments indicate that we should allocate a chainTable, 0 otherwise */ - private static int ZSTD_allocateChainTable(ZSTD_strategy strategy, ZSTD_paramSwitch_e useRowMatchFinder, uint forDDSDict) + private static int ZSTD_allocateChainTable( + ZSTD_strategy strategy, + ZSTD_paramSwitch_e useRowMatchFinder, + uint forDDSDict + ) { assert(useRowMatchFinder != ZSTD_paramSwitch_e.ZSTD_ps_auto); - return forDDSDict != 0 || strategy != ZSTD_strategy.ZSTD_fast && ZSTD_rowMatchFinderUsed(strategy, useRowMatchFinder) == 0 ? 1 : 0; + return + forDDSDict != 0 + || strategy != ZSTD_strategy.ZSTD_fast + && ZSTD_rowMatchFinderUsed(strategy, useRowMatchFinder) == 0 + ? 1 + : 0; } /* Returns ZSTD_ps_enable if compression parameters are such that we should * enable long distance matching (wlog >= 27, strategy >= btopt). * Returns ZSTD_ps_disable otherwise. */ - private static ZSTD_paramSwitch_e ZSTD_resolveEnableLdm(ZSTD_paramSwitch_e mode, ZSTD_compressionParameters* cParams) + private static ZSTD_paramSwitch_e ZSTD_resolveEnableLdm( + ZSTD_paramSwitch_e mode, + ZSTD_compressionParameters* cParams + ) { if (mode != ZSTD_paramSwitch_e.ZSTD_ps_auto) return mode; - return cParams->strategy >= ZSTD_strategy.ZSTD_btopt && cParams->windowLog >= 27 ? ZSTD_paramSwitch_e.ZSTD_ps_enable : ZSTD_paramSwitch_e.ZSTD_ps_disable; + return cParams->strategy >= ZSTD_strategy.ZSTD_btopt && cParams->windowLog >= 27 + ? ZSTD_paramSwitch_e.ZSTD_ps_enable + : ZSTD_paramSwitch_e.ZSTD_ps_disable; } private static int ZSTD_resolveExternalSequenceValidation(int mode) @@ -252,7 +321,10 @@ private static nuint ZSTD_resolveMaxBlockSize(nuint maxBlockSize) } } - private static ZSTD_paramSwitch_e ZSTD_resolveExternalRepcodeSearch(ZSTD_paramSwitch_e value, int cLevel) + private static ZSTD_paramSwitch_e ZSTD_resolveExternalRepcodeSearch( + ZSTD_paramSwitch_e value, + int cLevel + ) { if (value != ZSTD_paramSwitch_e.ZSTD_ps_auto) return value; @@ -270,15 +342,24 @@ private static ZSTD_paramSwitch_e ZSTD_resolveExternalRepcodeSearch(ZSTD_paramSw * If so, the tags need to be removed in ZSTD_resetCCtx_byCopyingCDict. */ private static int ZSTD_CDictIndicesAreTagged(ZSTD_compressionParameters* cParams) { - return cParams->strategy == ZSTD_strategy.ZSTD_fast || cParams->strategy == ZSTD_strategy.ZSTD_dfast ? 1 : 0; + return + cParams->strategy == ZSTD_strategy.ZSTD_fast + || cParams->strategy == ZSTD_strategy.ZSTD_dfast + ? 1 + : 0; } - private static ZSTD_CCtx_params_s ZSTD_makeCCtxParamsFromCParams(ZSTD_compressionParameters cParams) + private static ZSTD_CCtx_params_s ZSTD_makeCCtxParamsFromCParams( + ZSTD_compressionParameters cParams + ) { ZSTD_CCtx_params_s cctxParams; ZSTD_CCtxParams_init(&cctxParams, 3); cctxParams.cParams = cParams; - cctxParams.ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams.ldmParams.enableLdm, &cParams); + cctxParams.ldmParams.enableLdm = ZSTD_resolveEnableLdm( + cctxParams.ldmParams.enableLdm, + &cParams + ); if (cctxParams.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) { ZSTD_ldm_adjustParameters(&cctxParams.ldmParams, &cParams); @@ -286,11 +367,22 @@ private static ZSTD_CCtx_params_s ZSTD_makeCCtxParamsFromCParams(ZSTD_compressio assert(cctxParams.ldmParams.hashRateLog < 32); } - cctxParams.postBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams.postBlockSplitter, &cParams); - cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams); - cctxParams.validateSequences = ZSTD_resolveExternalSequenceValidation(cctxParams.validateSequences); + cctxParams.postBlockSplitter = ZSTD_resolveBlockSplitterMode( + cctxParams.postBlockSplitter, + &cParams + ); + cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode( + cctxParams.useRowMatchFinder, + &cParams + ); + cctxParams.validateSequences = ZSTD_resolveExternalSequenceValidation( + cctxParams.validateSequences + ); cctxParams.maxBlockSize = ZSTD_resolveMaxBlockSize(cctxParams.maxBlockSize); - cctxParams.searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(cctxParams.searchForExternalRepcodes, cctxParams.compressionLevel); + cctxParams.searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch( + cctxParams.searchForExternalRepcodes, + cctxParams.compressionLevel + ); assert(ZSTD_checkCParams(cParams) == 0); return cctxParams; } @@ -298,9 +390,15 @@ private static ZSTD_CCtx_params_s ZSTD_makeCCtxParamsFromCParams(ZSTD_compressio private static ZSTD_CCtx_params_s* ZSTD_createCCtxParams_advanced(ZSTD_customMem customMem) { ZSTD_CCtx_params_s* @params; - if (((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) != 0) + if ( + ((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) + != 0 + ) return null; - @params = (ZSTD_CCtx_params_s*)ZSTD_customCalloc((nuint)sizeof(ZSTD_CCtx_params_s), customMem); + @params = (ZSTD_CCtx_params_s*)ZSTD_customCalloc( + (nuint)sizeof(ZSTD_CCtx_params_s), + customMem + ); if (@params == null) { return null; @@ -356,17 +454,17 @@ public static nuint ZSTD_CCtxParams_reset(ZSTD_CCtx_params_s* @params) * Initializes the compression parameters of cctxParams according to * compression level. All other parameters are reset to their default values. */ - public static nuint ZSTD_CCtxParams_init(ZSTD_CCtx_params_s* cctxParams, int compressionLevel) + public static nuint ZSTD_CCtxParams_init( + ZSTD_CCtx_params_s* cctxParams, + int compressionLevel + ) { if (cctxParams == null) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); } - *cctxParams = new ZSTD_CCtx_params_s - { - compressionLevel = compressionLevel - }; + *cctxParams = new ZSTD_CCtx_params_s { compressionLevel = compressionLevel }; cctxParams->fParams.contentSizeFlag = 1; return 0; } @@ -375,7 +473,11 @@ public static nuint ZSTD_CCtxParams_init(ZSTD_CCtx_params_s* cctxParams, int com * Initializes `cctxParams` from `params` and `compressionLevel`. * @param compressionLevel If params are derived from a compression level then that compression level, otherwise ZSTD_NO_CLEVEL. */ - private static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params_s* cctxParams, ZSTD_parameters* @params, int compressionLevel) + private static void ZSTD_CCtxParams_init_internal( + ZSTD_CCtx_params_s* cctxParams, + ZSTD_parameters* @params, + int compressionLevel + ) { assert(ZSTD_checkCParams(@params->cParams) == 0); *cctxParams = new ZSTD_CCtx_params_s @@ -383,20 +485,37 @@ private static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params_s* cctxParams cParams = @params->cParams, fParams = @params->fParams, compressionLevel = compressionLevel, - useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams->useRowMatchFinder, &@params->cParams), - postBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams->postBlockSplitter, &@params->cParams) + useRowMatchFinder = ZSTD_resolveRowMatchFinderMode( + cctxParams->useRowMatchFinder, + &@params->cParams + ), + postBlockSplitter = ZSTD_resolveBlockSplitterMode( + cctxParams->postBlockSplitter, + &@params->cParams + ), }; - cctxParams->ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams->ldmParams.enableLdm, &@params->cParams); - cctxParams->validateSequences = ZSTD_resolveExternalSequenceValidation(cctxParams->validateSequences); + cctxParams->ldmParams.enableLdm = ZSTD_resolveEnableLdm( + cctxParams->ldmParams.enableLdm, + &@params->cParams + ); + cctxParams->validateSequences = ZSTD_resolveExternalSequenceValidation( + cctxParams->validateSequences + ); cctxParams->maxBlockSize = ZSTD_resolveMaxBlockSize(cctxParams->maxBlockSize); - cctxParams->searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(cctxParams->searchForExternalRepcodes, compressionLevel); + cctxParams->searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch( + cctxParams->searchForExternalRepcodes, + compressionLevel + ); } /*! ZSTD_CCtxParams_init_advanced() : * Initializes the compression and frame parameters of cctxParams according to * params. All other parameters are reset to their default values. */ - public static nuint ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params_s* cctxParams, ZSTD_parameters @params) + public static nuint ZSTD_CCtxParams_init_advanced( + ZSTD_CCtx_params_s* cctxParams, + ZSTD_parameters @params + ) { if (cctxParams == null) { @@ -419,7 +538,10 @@ public static nuint ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params_s* cctxParams * Sets cctxParams' cParams and fParams from params, but otherwise leaves them alone. * @param params Validated zstd parameters. */ - private static void ZSTD_CCtxParams_setZstdParams(ZSTD_CCtx_params_s* cctxParams, ZSTD_parameters* @params) + private static void ZSTD_CCtxParams_setZstdParams( + ZSTD_CCtx_params_s* cctxParams, + ZSTD_parameters* @params + ) { assert(ZSTD_checkCParams(@params->cParams) == 0); cctxParams->cParams = @params->cParams; @@ -440,7 +562,7 @@ public static ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param) { error = 0, lowerBound = 0, - upperBound = 0 + upperBound = 0, }; switch (param) { @@ -454,7 +576,12 @@ public static ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param) return bounds; case ZSTD_cParameter.ZSTD_c_hashLog: bounds.lowerBound = 6; - bounds.upperBound = (sizeof(nuint) == 4 ? 30 : 31) < 30 ? sizeof(nuint) == 4 ? 30 : 31 : 30; + bounds.upperBound = + (sizeof(nuint) == 4 ? 30 : 31) < 30 + ? sizeof(nuint) == 4 + ? 30 + : 31 + : 30; return bounds; case ZSTD_cParameter.ZSTD_c_chainLog: bounds.lowerBound = 6; @@ -510,7 +637,12 @@ public static ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param) return bounds; case ZSTD_cParameter.ZSTD_c_ldmHashLog: bounds.lowerBound = 6; - bounds.upperBound = (sizeof(nuint) == 4 ? 30 : 31) < 30 ? sizeof(nuint) == 4 ? 30 : 31 : 30; + bounds.upperBound = + (sizeof(nuint) == 4 ? 30 : 31) < 30 + ? sizeof(nuint) == 4 + ? 30 + : 31 + : 30; return bounds; case ZSTD_cParameter.ZSTD_c_ldmMinMatch: bounds.lowerBound = 4; @@ -598,7 +730,9 @@ public static ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param) bounds.upperBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_disable; return bounds; default: - bounds.error = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported)); + bounds.error = unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported) + ); return bounds; } } @@ -678,7 +812,11 @@ private static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param) * new parameters will be active for next job only (after a flush()). * @return : an error code (which can be tested using ZSTD_isError()). */ - public static nuint ZSTD_CCtx_setParameter(ZSTD_CCtx_s* cctx, ZSTD_cParameter param, int value) + public static nuint ZSTD_CCtx_setParameter( + ZSTD_CCtx_s* cctx, + ZSTD_cParameter param, + int value + ) { if (cctx->streamStage != ZSTD_cStreamStage.zcss_init) { @@ -697,7 +835,9 @@ public static nuint ZSTD_CCtx_setParameter(ZSTD_CCtx_s* cctx, ZSTD_cParameter pa case ZSTD_cParameter.ZSTD_c_nbWorkers: if (value != 0 && cctx->staticSize != 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported) + ); } break; @@ -741,7 +881,9 @@ public static nuint ZSTD_CCtx_setParameter(ZSTD_CCtx_s* cctx, ZSTD_cParameter pa case ZSTD_cParameter.ZSTD_c_experimentalParam19: break; default: - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported) + ); } return ZSTD_CCtxParams_setParameter(&cctx->requestedParams, param, value); @@ -755,42 +897,53 @@ public static nuint ZSTD_CCtx_setParameter(ZSTD_CCtx_s* cctx, ZSTD_cParameter pa * @result : a code representing success or failure (which can be tested with * ZSTD_isError()). */ - public static nuint ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params_s* CCtxParams, ZSTD_cParameter param, int value) + public static nuint ZSTD_CCtxParams_setParameter( + ZSTD_CCtx_params_s* CCtxParams, + ZSTD_cParameter param, + int value + ) { switch (param) { case ZSTD_cParameter.ZSTD_c_experimentalParam2: - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam2, value) == 0) + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam2, value) + == 0 + ) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } CCtxParams->format = (ZSTD_format_e)value; return (nuint)CCtxParams->format; case ZSTD_cParameter.ZSTD_c_compressionLevel: + { { + nuint err_code = ZSTD_cParam_clampBounds(param, &value); + if (ERR_isError(err_code)) { - nuint err_code = ZSTD_cParam_clampBounds(param, &value); - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } - - if (value == 0) - CCtxParams->compressionLevel = 3; - else - CCtxParams->compressionLevel = value; - if (CCtxParams->compressionLevel >= 0) - return (nuint)CCtxParams->compressionLevel; - return 0; } + if (value == 0) + CCtxParams->compressionLevel = 3; + else + CCtxParams->compressionLevel = value; + if (CCtxParams->compressionLevel >= 0) + return (nuint)CCtxParams->compressionLevel; + return 0; + } + case ZSTD_cParameter.ZSTD_c_windowLog: if (value != 0) if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_windowLog, value) == 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } CCtxParams->cParams.windowLog = (uint)value; @@ -799,7 +952,9 @@ public static nuint ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params_s* CCtxParams, if (value != 0) if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_hashLog, value) == 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } CCtxParams->cParams.hashLog = (uint)value; @@ -808,7 +963,9 @@ public static nuint ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params_s* CCtxParams, if (value != 0) if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_chainLog, value) == 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } CCtxParams->cParams.chainLog = (uint)value; @@ -817,7 +974,9 @@ public static nuint ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params_s* CCtxParams, if (value != 0) if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_searchLog, value) == 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } CCtxParams->cParams.searchLog = (uint)value; @@ -826,7 +985,9 @@ public static nuint ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params_s* CCtxParams, if (value != 0) if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_minMatch, value) == 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } CCtxParams->cParams.minMatch = (uint)value; @@ -834,7 +995,9 @@ public static nuint ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params_s* CCtxParams, case ZSTD_cParameter.ZSTD_c_targetLength: if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_targetLength, value) == 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } CCtxParams->cParams.targetLength = (uint)value; @@ -843,7 +1006,9 @@ public static nuint ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params_s* CCtxParams, if (value != 0) if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_strategy, value) == 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } CCtxParams->cParams.strategy = (ZSTD_strategy)value; @@ -861,29 +1026,43 @@ public static nuint ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params_s* CCtxParams, CCtxParams->forceWindow = value != 0 ? 1 : 0; return (nuint)CCtxParams->forceWindow; case ZSTD_cParameter.ZSTD_c_experimentalParam4: + { + ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value; + if ( + ZSTD_cParam_withinBounds( + ZSTD_cParameter.ZSTD_c_experimentalParam4, + (int)pref + ) == 0 + ) { - ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value; - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam4, (int)pref) == 0) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); - } - - CCtxParams->attachDictPref = pref; - return (nuint)CCtxParams->attachDictPref; + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } + CCtxParams->attachDictPref = pref; + return (nuint)CCtxParams->attachDictPref; + } + case ZSTD_cParameter.ZSTD_c_experimentalParam5: + { + ZSTD_paramSwitch_e lcm = (ZSTD_paramSwitch_e)value; + if ( + ZSTD_cParam_withinBounds( + ZSTD_cParameter.ZSTD_c_experimentalParam5, + (int)lcm + ) == 0 + ) { - ZSTD_paramSwitch_e lcm = (ZSTD_paramSwitch_e)value; - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam5, (int)lcm) == 0) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); - } - - CCtxParams->literalCompressionMode = lcm; - return (nuint)CCtxParams->literalCompressionMode; + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } + CCtxParams->literalCompressionMode = lcm; + return (nuint)CCtxParams->literalCompressionMode; + } + case ZSTD_cParameter.ZSTD_c_nbWorkers: { nuint err_code = ZSTD_cParam_clampBounds(param, &value); @@ -898,6 +1077,7 @@ public static nuint ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params_s* CCtxParams, case ZSTD_cParameter.ZSTD_c_jobSize: if (value != 0 && value < 512 * (1 << 10)) value = 512 * (1 << 10); + { nuint err_code = ZSTD_cParam_clampBounds(param, &value); if (ERR_isError(err_code)) @@ -911,7 +1091,10 @@ public static nuint ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params_s* CCtxParams, return CCtxParams->jobSize; case ZSTD_cParameter.ZSTD_c_overlapLog: { - nuint err_code = ZSTD_cParam_clampBounds(ZSTD_cParameter.ZSTD_c_overlapLog, &value); + nuint err_code = ZSTD_cParam_clampBounds( + ZSTD_cParameter.ZSTD_c_overlapLog, + &value + ); if (ERR_isError(err_code)) { return err_code; @@ -922,7 +1105,10 @@ public static nuint ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params_s* CCtxParams, return (nuint)CCtxParams->overlapLog; case ZSTD_cParameter.ZSTD_c_experimentalParam1: { - nuint err_code = ZSTD_cParam_clampBounds(ZSTD_cParameter.ZSTD_c_overlapLog, &value); + nuint err_code = ZSTD_cParam_clampBounds( + ZSTD_cParameter.ZSTD_c_overlapLog, + &value + ); if (ERR_isError(err_code)) { return err_code; @@ -935,9 +1121,16 @@ public static nuint ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params_s* CCtxParams, CCtxParams->enableDedicatedDictSearch = value != 0 ? 1 : 0; return (nuint)CCtxParams->enableDedicatedDictSearch; case ZSTD_cParameter.ZSTD_c_enableLongDistanceMatching: - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_enableLongDistanceMatching, value) == 0) + if ( + ZSTD_cParam_withinBounds( + ZSTD_cParameter.ZSTD_c_enableLongDistanceMatching, + value + ) == 0 + ) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } CCtxParams->ldmParams.enableLdm = (ZSTD_paramSwitch_e)value; @@ -946,34 +1139,50 @@ public static nuint ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params_s* CCtxParams, if (value != 0) if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_ldmHashLog, value) == 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } CCtxParams->ldmParams.hashLog = (uint)value; return CCtxParams->ldmParams.hashLog; case ZSTD_cParameter.ZSTD_c_ldmMinMatch: if (value != 0) - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_ldmMinMatch, value) == 0) + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_ldmMinMatch, value) == 0 + ) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } CCtxParams->ldmParams.minMatchLength = (uint)value; return CCtxParams->ldmParams.minMatchLength; case ZSTD_cParameter.ZSTD_c_ldmBucketSizeLog: if (value != 0) - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_ldmBucketSizeLog, value) == 0) + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_ldmBucketSizeLog, value) + == 0 + ) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } CCtxParams->ldmParams.bucketSizeLog = (uint)value; return CCtxParams->ldmParams.bucketSizeLog; case ZSTD_cParameter.ZSTD_c_ldmHashRateLog: if (value != 0) - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_ldmHashRateLog, value) == 0) + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_ldmHashRateLog, value) + == 0 + ) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } CCtxParams->ldmParams.hashRateLog = (uint)value; @@ -982,9 +1191,14 @@ public static nuint ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params_s* CCtxParams, if (value != 0) { value = value > 1340 ? value : 1340; - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_targetCBlockSize, value) == 0) + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_targetCBlockSize, value) + == 0 + ) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } } @@ -992,113 +1206,184 @@ public static nuint ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params_s* CCtxParams, return CCtxParams->targetCBlockSize; case ZSTD_cParameter.ZSTD_c_experimentalParam7: if (value != 0) - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam7, value) == 0) + if ( + ZSTD_cParam_withinBounds( + ZSTD_cParameter.ZSTD_c_experimentalParam7, + value + ) == 0 + ) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } CCtxParams->srcSizeHint = value; return (nuint)CCtxParams->srcSizeHint; case ZSTD_cParameter.ZSTD_c_experimentalParam9: - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam9, value) == 0) + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam9, value) + == 0 + ) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } CCtxParams->inBufferMode = (ZSTD_bufferMode_e)value; return (nuint)CCtxParams->inBufferMode; case ZSTD_cParameter.ZSTD_c_experimentalParam10: - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam10, value) == 0) + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam10, value) + == 0 + ) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } CCtxParams->outBufferMode = (ZSTD_bufferMode_e)value; return (nuint)CCtxParams->outBufferMode; case ZSTD_cParameter.ZSTD_c_experimentalParam11: - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam11, value) == 0) + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam11, value) + == 0 + ) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } CCtxParams->blockDelimiters = (ZSTD_sequenceFormat_e)value; return (nuint)CCtxParams->blockDelimiters; case ZSTD_cParameter.ZSTD_c_experimentalParam12: - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam12, value) == 0) + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam12, value) + == 0 + ) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } CCtxParams->validateSequences = value; return (nuint)CCtxParams->validateSequences; case ZSTD_cParameter.ZSTD_c_experimentalParam13: - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam13, value) == 0) + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam13, value) + == 0 + ) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } CCtxParams->postBlockSplitter = (ZSTD_paramSwitch_e)value; return (nuint)CCtxParams->postBlockSplitter; case ZSTD_cParameter.ZSTD_c_experimentalParam20: - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam20, value) == 0) + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam20, value) + == 0 + ) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } CCtxParams->preBlockSplitter_level = value; return (nuint)CCtxParams->preBlockSplitter_level; case ZSTD_cParameter.ZSTD_c_experimentalParam14: - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam14, value) == 0) + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam14, value) + == 0 + ) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } CCtxParams->useRowMatchFinder = (ZSTD_paramSwitch_e)value; return (nuint)CCtxParams->useRowMatchFinder; case ZSTD_cParameter.ZSTD_c_experimentalParam15: - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam15, value) == 0) + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam15, value) + == 0 + ) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } CCtxParams->deterministicRefPrefix = !(value == 0) ? 1 : 0; return (nuint)CCtxParams->deterministicRefPrefix; case ZSTD_cParameter.ZSTD_c_experimentalParam16: - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam16, value) == 0) + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam16, value) + == 0 + ) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } CCtxParams->prefetchCDictTables = (ZSTD_paramSwitch_e)value; return (nuint)CCtxParams->prefetchCDictTables; case ZSTD_cParameter.ZSTD_c_experimentalParam17: - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam17, value) == 0) + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam17, value) + == 0 + ) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } CCtxParams->enableMatchFinderFallback = value; return (nuint)CCtxParams->enableMatchFinderFallback; case ZSTD_cParameter.ZSTD_c_experimentalParam18: if (value != 0) - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam18, value) == 0) + if ( + ZSTD_cParam_withinBounds( + ZSTD_cParameter.ZSTD_c_experimentalParam18, + value + ) == 0 + ) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } assert(value >= 0); CCtxParams->maxBlockSize = (nuint)value; return CCtxParams->maxBlockSize; case ZSTD_cParameter.ZSTD_c_experimentalParam19: - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam19, value) == 0) + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam19, value) + == 0 + ) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } CCtxParams->searchForExternalRepcodes = (ZSTD_paramSwitch_e)value; return (nuint)CCtxParams->searchForExternalRepcodes; default: - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported) + ); } } @@ -1107,7 +1392,11 @@ public static nuint ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params_s* CCtxParams, * and store it into int* value. * @return : 0, or an error code (which can be tested with ZSTD_isError()). */ - public static nuint ZSTD_CCtx_getParameter(ZSTD_CCtx_s* cctx, ZSTD_cParameter param, int* value) + public static nuint ZSTD_CCtx_getParameter( + ZSTD_CCtx_s* cctx, + ZSTD_cParameter param, + int* value + ) { return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value); } @@ -1117,7 +1406,11 @@ public static nuint ZSTD_CCtx_getParameter(ZSTD_CCtx_s* cctx, ZSTD_cParameter pa * Get the requested value of one compression parameter, selected by enum ZSTD_cParameter. * @result : 0, or an error code (which can be tested with ZSTD_isError()). */ - public static nuint ZSTD_CCtxParams_getParameter(ZSTD_CCtx_params_s* CCtxParams, ZSTD_cParameter param, int* value) + public static nuint ZSTD_CCtxParams_getParameter( + ZSTD_CCtx_params_s* CCtxParams, + ZSTD_cParameter param, + int* value + ) { switch (param) { @@ -1240,7 +1533,9 @@ public static nuint ZSTD_CCtxParams_getParameter(ZSTD_CCtx_params_s* CCtxParams, *value = (int)CCtxParams->searchForExternalRepcodes; break; default: - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported) + ); } return 0; @@ -1253,7 +1548,10 @@ public static nuint ZSTD_CCtxParams_getParameter(ZSTD_CCtx_params_s* CCtxParams, * This is possible even if a compression is ongoing. * In which case, new parameters will be applied on the fly, starting with next compression job. */ - public static nuint ZSTD_CCtx_setParametersUsingCCtxParams(ZSTD_CCtx_s* cctx, ZSTD_CCtx_params_s* @params) + public static nuint ZSTD_CCtx_setParametersUsingCCtxParams( + ZSTD_CCtx_s* cctx, + ZSTD_CCtx_params_s* @params + ) { if (cctx->streamStage != ZSTD_cStreamStage.zcss_init) { @@ -1276,7 +1574,10 @@ public static nuint ZSTD_CCtx_setParametersUsingCCtxParams(ZSTD_CCtx_s* cctx, ZS * @return 0 on success, or an error code (can be checked with ZSTD_isError()). * On failure, no parameters are updated. */ - public static nuint ZSTD_CCtx_setCParams(ZSTD_CCtx_s* cctx, ZSTD_compressionParameters cparams) + public static nuint ZSTD_CCtx_setCParams( + ZSTD_CCtx_s* cctx, + ZSTD_compressionParameters cparams + ) { { /* only update if all parameters are valid */ @@ -1288,7 +1589,11 @@ public static nuint ZSTD_CCtx_setCParams(ZSTD_CCtx_s* cctx, ZSTD_compressionPara } { - nuint err_code = ZSTD_CCtx_setParameter(cctx, ZSTD_cParameter.ZSTD_c_windowLog, (int)cparams.windowLog); + nuint err_code = ZSTD_CCtx_setParameter( + cctx, + ZSTD_cParameter.ZSTD_c_windowLog, + (int)cparams.windowLog + ); if (ERR_isError(err_code)) { return err_code; @@ -1296,7 +1601,11 @@ public static nuint ZSTD_CCtx_setCParams(ZSTD_CCtx_s* cctx, ZSTD_compressionPara } { - nuint err_code = ZSTD_CCtx_setParameter(cctx, ZSTD_cParameter.ZSTD_c_chainLog, (int)cparams.chainLog); + nuint err_code = ZSTD_CCtx_setParameter( + cctx, + ZSTD_cParameter.ZSTD_c_chainLog, + (int)cparams.chainLog + ); if (ERR_isError(err_code)) { return err_code; @@ -1304,7 +1613,11 @@ public static nuint ZSTD_CCtx_setCParams(ZSTD_CCtx_s* cctx, ZSTD_compressionPara } { - nuint err_code = ZSTD_CCtx_setParameter(cctx, ZSTD_cParameter.ZSTD_c_hashLog, (int)cparams.hashLog); + nuint err_code = ZSTD_CCtx_setParameter( + cctx, + ZSTD_cParameter.ZSTD_c_hashLog, + (int)cparams.hashLog + ); if (ERR_isError(err_code)) { return err_code; @@ -1312,7 +1625,11 @@ public static nuint ZSTD_CCtx_setCParams(ZSTD_CCtx_s* cctx, ZSTD_compressionPara } { - nuint err_code = ZSTD_CCtx_setParameter(cctx, ZSTD_cParameter.ZSTD_c_searchLog, (int)cparams.searchLog); + nuint err_code = ZSTD_CCtx_setParameter( + cctx, + ZSTD_cParameter.ZSTD_c_searchLog, + (int)cparams.searchLog + ); if (ERR_isError(err_code)) { return err_code; @@ -1320,7 +1637,11 @@ public static nuint ZSTD_CCtx_setCParams(ZSTD_CCtx_s* cctx, ZSTD_compressionPara } { - nuint err_code = ZSTD_CCtx_setParameter(cctx, ZSTD_cParameter.ZSTD_c_minMatch, (int)cparams.minMatch); + nuint err_code = ZSTD_CCtx_setParameter( + cctx, + ZSTD_cParameter.ZSTD_c_minMatch, + (int)cparams.minMatch + ); if (ERR_isError(err_code)) { return err_code; @@ -1328,7 +1649,11 @@ public static nuint ZSTD_CCtx_setCParams(ZSTD_CCtx_s* cctx, ZSTD_compressionPara } { - nuint err_code = ZSTD_CCtx_setParameter(cctx, ZSTD_cParameter.ZSTD_c_targetLength, (int)cparams.targetLength); + nuint err_code = ZSTD_CCtx_setParameter( + cctx, + ZSTD_cParameter.ZSTD_c_targetLength, + (int)cparams.targetLength + ); if (ERR_isError(err_code)) { return err_code; @@ -1336,7 +1661,11 @@ public static nuint ZSTD_CCtx_setCParams(ZSTD_CCtx_s* cctx, ZSTD_compressionPara } { - nuint err_code = ZSTD_CCtx_setParameter(cctx, ZSTD_cParameter.ZSTD_c_strategy, (int)cparams.strategy); + nuint err_code = ZSTD_CCtx_setParameter( + cctx, + ZSTD_cParameter.ZSTD_c_strategy, + (int)cparams.strategy + ); if (ERR_isError(err_code)) { return err_code; @@ -1353,7 +1682,11 @@ public static nuint ZSTD_CCtx_setCParams(ZSTD_CCtx_s* cctx, ZSTD_compressionPara public static nuint ZSTD_CCtx_setFParams(ZSTD_CCtx_s* cctx, ZSTD_frameParameters fparams) { { - nuint err_code = ZSTD_CCtx_setParameter(cctx, ZSTD_cParameter.ZSTD_c_contentSizeFlag, fparams.contentSizeFlag != 0 ? 1 : 0); + nuint err_code = ZSTD_CCtx_setParameter( + cctx, + ZSTD_cParameter.ZSTD_c_contentSizeFlag, + fparams.contentSizeFlag != 0 ? 1 : 0 + ); if (ERR_isError(err_code)) { return err_code; @@ -1361,7 +1694,11 @@ public static nuint ZSTD_CCtx_setFParams(ZSTD_CCtx_s* cctx, ZSTD_frameParameters } { - nuint err_code = ZSTD_CCtx_setParameter(cctx, ZSTD_cParameter.ZSTD_c_checksumFlag, fparams.checksumFlag != 0 ? 1 : 0); + nuint err_code = ZSTD_CCtx_setParameter( + cctx, + ZSTD_cParameter.ZSTD_c_checksumFlag, + fparams.checksumFlag != 0 ? 1 : 0 + ); if (ERR_isError(err_code)) { return err_code; @@ -1369,7 +1706,11 @@ public static nuint ZSTD_CCtx_setFParams(ZSTD_CCtx_s* cctx, ZSTD_frameParameters } { - nuint err_code = ZSTD_CCtx_setParameter(cctx, ZSTD_cParameter.ZSTD_c_dictIDFlag, fparams.noDictIDFlag == 0 ? 1 : 0); + nuint err_code = ZSTD_CCtx_setParameter( + cctx, + ZSTD_cParameter.ZSTD_c_dictIDFlag, + fparams.noDictIDFlag == 0 ? 1 : 0 + ); if (ERR_isError(err_code)) { return err_code; @@ -1466,7 +1807,14 @@ private static nuint ZSTD_initLocalDict(ZSTD_CCtx_s* cctx) assert(dl->dictSize > 0); assert(cctx->cdict == null); assert(cctx->prefixDict.dict == null); - dl->cdict = ZSTD_createCDict_advanced2(dl->dict, dl->dictSize, ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, dl->dictContentType, &cctx->requestedParams, cctx->customMem); + dl->cdict = ZSTD_createCDict_advanced2( + dl->dict, + dl->dictSize, + ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, + dl->dictContentType, + &cctx->requestedParams, + cctx->customMem + ); if (dl->cdict == null) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); @@ -1480,7 +1828,13 @@ private static nuint ZSTD_initLocalDict(ZSTD_CCtx_s* cctx) * Same as ZSTD_CCtx_loadDictionary(), but gives finer control over * how to load the dictionary (by copy ? by reference ?) * and how to interpret it (automatic ? force raw mode ? full mode only ?) */ - public static nuint ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx_s* cctx, void* dict, nuint dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType) + public static nuint ZSTD_CCtx_loadDictionary_advanced( + ZSTD_CCtx_s* cctx, + void* dict, + nuint dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType + ) { if (cctx->streamStage != ZSTD_cStreamStage.zcss_init) { @@ -1522,9 +1876,19 @@ public static nuint ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx_s* cctx, void* d /*! ZSTD_CCtx_loadDictionary_byReference() : * Same as ZSTD_CCtx_loadDictionary(), but dictionary content is referenced, instead of being copied into CCtx. * It saves some memory, but also requires that `dict` outlives its usage within `cctx` */ - public static nuint ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx_s* cctx, void* dict, nuint dictSize) + public static nuint ZSTD_CCtx_loadDictionary_byReference( + ZSTD_CCtx_s* cctx, + void* dict, + nuint dictSize + ) { - return ZSTD_CCtx_loadDictionary_advanced(cctx, dict, dictSize, ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, ZSTD_dictContentType_e.ZSTD_dct_auto); + return ZSTD_CCtx_loadDictionary_advanced( + cctx, + dict, + dictSize, + ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, + ZSTD_dictContentType_e.ZSTD_dct_auto + ); } /*! ZSTD_CCtx_loadDictionary() : Requires v1.4.0+ @@ -1551,7 +1915,13 @@ public static nuint ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx_s* cctx, void */ public static nuint ZSTD_CCtx_loadDictionary(ZSTD_CCtx_s* cctx, void* dict, nuint dictSize) { - return ZSTD_CCtx_loadDictionary_advanced(cctx, dict, dictSize, ZSTD_dictLoadMethod_e.ZSTD_dlm_byCopy, ZSTD_dictContentType_e.ZSTD_dct_auto); + return ZSTD_CCtx_loadDictionary_advanced( + cctx, + dict, + dictSize, + ZSTD_dictLoadMethod_e.ZSTD_dlm_byCopy, + ZSTD_dictContentType_e.ZSTD_dct_auto + ); } /*! ZSTD_CCtx_refCDict() : Requires v1.4.0+ @@ -1610,13 +1980,23 @@ public static nuint ZSTD_CCtx_refThreadPool(ZSTD_CCtx_s* cctx, void* pool) * Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation. */ public static nuint ZSTD_CCtx_refPrefix(ZSTD_CCtx_s* cctx, void* prefix, nuint prefixSize) { - return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dictContentType_e.ZSTD_dct_rawContent); + return ZSTD_CCtx_refPrefix_advanced( + cctx, + prefix, + prefixSize, + ZSTD_dictContentType_e.ZSTD_dct_rawContent + ); } /*! ZSTD_CCtx_refPrefix_advanced() : * Same as ZSTD_CCtx_refPrefix(), but gives finer control over * how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */ - public static nuint ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx_s* cctx, void* prefix, nuint prefixSize, ZSTD_dictContentType_e dictContentType) + public static nuint ZSTD_CCtx_refPrefix_advanced( + ZSTD_CCtx_s* cctx, + void* prefix, + nuint prefixSize, + ZSTD_dictContentType_e dictContentType + ) { if (cctx->streamStage != ZSTD_cStreamStage.zcss_init) { @@ -1638,13 +2018,19 @@ public static nuint ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx_s* cctx, void* prefix * Also dumps dictionary */ public static nuint ZSTD_CCtx_reset(ZSTD_CCtx_s* cctx, ZSTD_ResetDirective reset) { - if (reset == ZSTD_ResetDirective.ZSTD_reset_session_only || reset == ZSTD_ResetDirective.ZSTD_reset_session_and_parameters) + if ( + reset == ZSTD_ResetDirective.ZSTD_reset_session_only + || reset == ZSTD_ResetDirective.ZSTD_reset_session_and_parameters + ) { cctx->streamStage = ZSTD_cStreamStage.zcss_init; cctx->pledgedSrcSizePlusOne = 0; } - if (reset == ZSTD_ResetDirective.ZSTD_reset_parameters || reset == ZSTD_ResetDirective.ZSTD_reset_session_and_parameters) + if ( + reset == ZSTD_ResetDirective.ZSTD_reset_parameters + || reset == ZSTD_ResetDirective.ZSTD_reset_session_and_parameters + ) { if (cctx->streamStage != ZSTD_cStreamStage.zcss_init) { @@ -1663,12 +2049,18 @@ control CParam values remain within authorized range. @return : 0, or an error code if one value is beyond authorized range */ public static nuint ZSTD_checkCParams(ZSTD_compressionParameters cParams) { - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_windowLog, (int)cParams.windowLog) == 0) + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_windowLog, (int)cParams.windowLog) + == 0 + ) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_chainLog, (int)cParams.chainLog) == 0) + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_chainLog, (int)cParams.chainLog) + == 0 + ) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } @@ -1678,22 +2070,36 @@ public static nuint ZSTD_checkCParams(ZSTD_compressionParameters cParams) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_searchLog, (int)cParams.searchLog) == 0) + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_searchLog, (int)cParams.searchLog) + == 0 + ) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_minMatch, (int)cParams.minMatch) == 0) + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_minMatch, (int)cParams.minMatch) + == 0 + ) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_targetLength, (int)cParams.targetLength) == 0) + if ( + ZSTD_cParam_withinBounds( + ZSTD_cParameter.ZSTD_c_targetLength, + (int)cParams.targetLength + ) == 0 + ) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_strategy, (int)cParams.strategy) == 0) + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_strategy, (int)cParams.strategy) + == 0 + ) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } @@ -1704,7 +2110,9 @@ public static nuint ZSTD_checkCParams(ZSTD_compressionParameters cParams) /** ZSTD_clampCParams() : * make CParam values within valid range. * @return : valid CParams */ - private static ZSTD_compressionParameters ZSTD_clampCParams(ZSTD_compressionParameters cParams) + private static ZSTD_compressionParameters ZSTD_clampCParams( + ZSTD_compressionParameters cParams + ) { { ZSTD_bounds bounds = ZSTD_cParam_getBounds(ZSTD_cParameter.ZSTD_c_windowLog); @@ -1816,7 +2224,13 @@ private static uint ZSTD_dictAndWindowLog(uint windowLog, ulong srcSize, ulong d * `mode` is the mode for parameter adjustment. See docs for `ZSTD_CParamMode_e`. * note : `srcSize==0` means 0! * condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */ - private static ZSTD_compressionParameters ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar, ulong srcSize, nuint dictSize, ZSTD_CParamMode_e mode, ZSTD_paramSwitch_e useRowMatchFinder) + private static ZSTD_compressionParameters ZSTD_adjustCParams_internal( + ZSTD_compressionParameters cPar, + ulong srcSize, + nuint dictSize, + ZSTD_CParamMode_e mode, + ZSTD_paramSwitch_e useRowMatchFinder + ) { /* (1<<9) + 1 */ const ulong minSrcSize = 513; @@ -1860,7 +2274,10 @@ private static ZSTD_compressionParameters ZSTD_adjustCParams_internal(ZSTD_compr if (cPar.windowLog < 10) cPar.windowLog = 10; - if (mode == ZSTD_CParamMode_e.ZSTD_cpm_createCDict && ZSTD_CDictIndicesAreTagged(&cPar) != 0) + if ( + mode == ZSTD_CParamMode_e.ZSTD_cpm_createCDict + && ZSTD_CDictIndicesAreTagged(&cPar) != 0 + ) { const uint maxShortCacheHashLog = 32 - 8; if (cPar.hashLog > maxShortCacheHashLog) @@ -1879,7 +2296,10 @@ private static ZSTD_compressionParameters ZSTD_adjustCParams_internal(ZSTD_compr if (ZSTD_rowMatchFinderUsed(cPar.strategy, useRowMatchFinder) != 0) { /* Switch to 32-entry rows if searchLog is 5 (or more) */ - uint rowLog = cPar.searchLog <= 4 ? 4 : cPar.searchLog <= 6 ? cPar.searchLog : 6; + uint rowLog = + cPar.searchLog <= 4 ? 4 + : cPar.searchLog <= 6 ? cPar.searchLog + : 6; const uint maxRowHashLog = 32 - 8; uint maxHashLog = maxRowHashLog + rowLog; assert(cPar.hashLog >= rowLog); @@ -1898,15 +2318,28 @@ private static ZSTD_compressionParameters ZSTD_adjustCParams_internal(ZSTD_compr * `dictSize` must be `0` when there is no dictionary. * cPar can be invalid : all parameters will be clamped within valid range in the @return struct. * This function never fails (wide contract) */ - public static ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, ulong srcSize, nuint dictSize) + public static ZSTD_compressionParameters ZSTD_adjustCParams( + ZSTD_compressionParameters cPar, + ulong srcSize, + nuint dictSize + ) { cPar = ZSTD_clampCParams(cPar); if (srcSize == 0) srcSize = unchecked(0UL - 1); - return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_CParamMode_e.ZSTD_cpm_unknown, ZSTD_paramSwitch_e.ZSTD_ps_auto); + return ZSTD_adjustCParams_internal( + cPar, + srcSize, + dictSize, + ZSTD_CParamMode_e.ZSTD_cpm_unknown, + ZSTD_paramSwitch_e.ZSTD_ps_auto + ); } - private static void ZSTD_overrideCParams(ZSTD_compressionParameters* cParams, ZSTD_compressionParameters* overrides) + private static void ZSTD_overrideCParams( + ZSTD_compressionParameters* cParams, + ZSTD_compressionParameters* overrides + ) { if (overrides->windowLog != 0) cParams->windowLog = overrides->windowLog; @@ -1929,7 +2362,12 @@ private static void ZSTD_overrideCParams(ZSTD_compressionParameters* cParams, ZS * LDM and manually set compression parameters. * Note: srcSizeHint == 0 means 0! */ - private static ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(ZSTD_CCtx_params_s* CCtxParams, ulong srcSizeHint, nuint dictSize, ZSTD_CParamMode_e mode) + private static ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams( + ZSTD_CCtx_params_s* CCtxParams, + ulong srcSizeHint, + nuint dictSize, + ZSTD_CParamMode_e mode + ) { ZSTD_compressionParameters cParams; if (srcSizeHint == unchecked(0UL - 1) && CCtxParams->srcSizeHint > 0) @@ -1938,27 +2376,70 @@ private static ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(ZSTD_CCt srcSizeHint = (ulong)CCtxParams->srcSizeHint; } - cParams = ZSTD_getCParams_internal(CCtxParams->compressionLevel, srcSizeHint, dictSize, mode); + cParams = ZSTD_getCParams_internal( + CCtxParams->compressionLevel, + srcSizeHint, + dictSize, + mode + ); if (CCtxParams->ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) cParams.windowLog = 27; ZSTD_overrideCParams(&cParams, &CCtxParams->cParams); assert(ZSTD_checkCParams(cParams) == 0); - return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize, mode, CCtxParams->useRowMatchFinder); - } - - private static nuint ZSTD_sizeof_matchState(ZSTD_compressionParameters* cParams, ZSTD_paramSwitch_e useRowMatchFinder, int enableDedicatedDictSearch, uint forCCtx) + return ZSTD_adjustCParams_internal( + cParams, + srcSizeHint, + dictSize, + mode, + CCtxParams->useRowMatchFinder + ); + } + + private static nuint ZSTD_sizeof_matchState( + ZSTD_compressionParameters* cParams, + ZSTD_paramSwitch_e useRowMatchFinder, + int enableDedicatedDictSearch, + uint forCCtx + ) { /* chain table size should be 0 for fast or row-hash strategies */ - nuint chainSize = ZSTD_allocateChainTable(cParams->strategy, useRowMatchFinder, enableDedicatedDictSearch != 0 && forCCtx == 0 ? 1U : 0U) != 0 ? (nuint)1 << (int)cParams->chainLog : 0; + nuint chainSize = + ZSTD_allocateChainTable( + cParams->strategy, + useRowMatchFinder, + enableDedicatedDictSearch != 0 && forCCtx == 0 ? 1U : 0U + ) != 0 + ? (nuint)1 << (int)cParams->chainLog + : 0; nuint hSize = (nuint)1 << (int)cParams->hashLog; - uint hashLog3 = forCCtx != 0 && cParams->minMatch == 3 ? 17 < cParams->windowLog ? 17 : cParams->windowLog : 0; + uint hashLog3 = + forCCtx != 0 && cParams->minMatch == 3 + ? 17 < cParams->windowLog + ? 17 + : cParams->windowLog + : 0; nuint h3Size = hashLog3 != 0 ? (nuint)1 << (int)hashLog3 : 0; /* We don't use ZSTD_cwksp_alloc_size() here because the tables aren't * surrounded by redzones in ASAN. */ - nuint tableSpace = chainSize * sizeof(uint) + hSize * sizeof(uint) + h3Size * sizeof(uint); - nuint optPotentialSpace = ZSTD_cwksp_aligned64_alloc_size((52 + 1) * sizeof(uint)) + ZSTD_cwksp_aligned64_alloc_size((35 + 1) * sizeof(uint)) + ZSTD_cwksp_aligned64_alloc_size((31 + 1) * sizeof(uint)) + ZSTD_cwksp_aligned64_alloc_size((1 << 8) * sizeof(uint)) + ZSTD_cwksp_aligned64_alloc_size((nuint)(((1 << 12) + 3) * sizeof(ZSTD_match_t))) + ZSTD_cwksp_aligned64_alloc_size((nuint)(((1 << 12) + 3) * sizeof(ZSTD_optimal_t))); - nuint lazyAdditionalSpace = ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder) != 0 ? ZSTD_cwksp_aligned64_alloc_size(hSize) : 0; - nuint optSpace = forCCtx != 0 && cParams->strategy >= ZSTD_strategy.ZSTD_btopt ? optPotentialSpace : 0; + nuint tableSpace = + chainSize * sizeof(uint) + hSize * sizeof(uint) + h3Size * sizeof(uint); + nuint optPotentialSpace = + ZSTD_cwksp_aligned64_alloc_size((52 + 1) * sizeof(uint)) + + ZSTD_cwksp_aligned64_alloc_size((35 + 1) * sizeof(uint)) + + ZSTD_cwksp_aligned64_alloc_size((31 + 1) * sizeof(uint)) + + ZSTD_cwksp_aligned64_alloc_size((1 << 8) * sizeof(uint)) + + ZSTD_cwksp_aligned64_alloc_size((nuint)(((1 << 12) + 3) * sizeof(ZSTD_match_t))) + + ZSTD_cwksp_aligned64_alloc_size( + (nuint)(((1 << 12) + 3) * sizeof(ZSTD_optimal_t)) + ); + nuint lazyAdditionalSpace = + ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder) != 0 + ? ZSTD_cwksp_aligned64_alloc_size(hSize) + : 0; + nuint optSpace = + forCCtx != 0 && cParams->strategy >= ZSTD_strategy.ZSTD_btopt + ? optPotentialSpace + : 0; nuint slackSpace = ZSTD_cwksp_slack_space_required(); assert(useRowMatchFinder != ZSTD_paramSwitch_e.ZSTD_ps_auto); return tableSpace + optSpace + slackSpace + lazyAdditionalSpace; @@ -1972,37 +2453,98 @@ private static nuint ZSTD_maxNbSeq(nuint blockSize, uint minMatch, int useSequen return blockSize / divider; } - private static nuint ZSTD_estimateCCtxSize_usingCCtxParams_internal(ZSTD_compressionParameters* cParams, ldmParams_t* ldmParams, int isStatic, ZSTD_paramSwitch_e useRowMatchFinder, nuint buffInSize, nuint buffOutSize, ulong pledgedSrcSize, int useSequenceProducer, nuint maxBlockSize) - { - nuint windowSize = (nuint)(1UL << (int)cParams->windowLog <= 1UL ? 1UL : 1UL << (int)cParams->windowLog <= pledgedSrcSize ? 1UL << (int)cParams->windowLog : pledgedSrcSize); - nuint blockSize = ZSTD_resolveMaxBlockSize(maxBlockSize) < windowSize ? ZSTD_resolveMaxBlockSize(maxBlockSize) : windowSize; + private static nuint ZSTD_estimateCCtxSize_usingCCtxParams_internal( + ZSTD_compressionParameters* cParams, + ldmParams_t* ldmParams, + int isStatic, + ZSTD_paramSwitch_e useRowMatchFinder, + nuint buffInSize, + nuint buffOutSize, + ulong pledgedSrcSize, + int useSequenceProducer, + nuint maxBlockSize + ) + { + nuint windowSize = (nuint)( + 1UL << (int)cParams->windowLog <= 1UL ? 1UL + : 1UL << (int)cParams->windowLog <= pledgedSrcSize ? 1UL << (int)cParams->windowLog + : pledgedSrcSize + ); + nuint blockSize = + ZSTD_resolveMaxBlockSize(maxBlockSize) < windowSize + ? ZSTD_resolveMaxBlockSize(maxBlockSize) + : windowSize; nuint maxNbSeq = ZSTD_maxNbSeq(blockSize, cParams->minMatch, useSequenceProducer); - nuint tokenSpace = ZSTD_cwksp_alloc_size(32 + blockSize) + ZSTD_cwksp_aligned64_alloc_size(maxNbSeq * (nuint)sizeof(SeqDef_s)) + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(byte)); - nuint tmpWorkSpace = ZSTD_cwksp_alloc_size((8 << 10) + 512 + sizeof(uint) * (52 + 2) > 8208 ? (8 << 10) + 512 + sizeof(uint) * (52 + 2) : 8208); - nuint blockStateSpace = 2 * ZSTD_cwksp_alloc_size((nuint)sizeof(ZSTD_compressedBlockState_t)); + nuint tokenSpace = + ZSTD_cwksp_alloc_size(32 + blockSize) + + ZSTD_cwksp_aligned64_alloc_size(maxNbSeq * (nuint)sizeof(SeqDef_s)) + + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(byte)); + nuint tmpWorkSpace = ZSTD_cwksp_alloc_size( + (8 << 10) + 512 + sizeof(uint) * (52 + 2) > 8208 + ? (8 << 10) + 512 + sizeof(uint) * (52 + 2) + : 8208 + ); + nuint blockStateSpace = + 2 * ZSTD_cwksp_alloc_size((nuint)sizeof(ZSTD_compressedBlockState_t)); /* enableDedicatedDictSearch */ nuint matchStateSize = ZSTD_sizeof_matchState(cParams, useRowMatchFinder, 0, 1); nuint ldmSpace = ZSTD_ldm_getTableSize(*ldmParams); nuint maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(*ldmParams, blockSize); - nuint ldmSeqSpace = ldmParams->enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable ? ZSTD_cwksp_aligned64_alloc_size(maxNbLdmSeq * (nuint)sizeof(rawSeq)) : 0; - nuint bufferSpace = ZSTD_cwksp_alloc_size(buffInSize) + ZSTD_cwksp_alloc_size(buffOutSize); + nuint ldmSeqSpace = + ldmParams->enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable + ? ZSTD_cwksp_aligned64_alloc_size(maxNbLdmSeq * (nuint)sizeof(rawSeq)) + : 0; + nuint bufferSpace = + ZSTD_cwksp_alloc_size(buffInSize) + ZSTD_cwksp_alloc_size(buffOutSize); nuint cctxSpace = isStatic != 0 ? ZSTD_cwksp_alloc_size((nuint)sizeof(ZSTD_CCtx_s)) : 0; nuint maxNbExternalSeq = ZSTD_sequenceBound(blockSize); - nuint externalSeqSpace = useSequenceProducer != 0 ? ZSTD_cwksp_aligned64_alloc_size(maxNbExternalSeq * (nuint)sizeof(ZSTD_Sequence)) : 0; - nuint neededSpace = cctxSpace + tmpWorkSpace + blockStateSpace + ldmSpace + ldmSeqSpace + matchStateSize + tokenSpace + bufferSpace + externalSeqSpace; + nuint externalSeqSpace = + useSequenceProducer != 0 + ? ZSTD_cwksp_aligned64_alloc_size( + maxNbExternalSeq * (nuint)sizeof(ZSTD_Sequence) + ) + : 0; + nuint neededSpace = + cctxSpace + + tmpWorkSpace + + blockStateSpace + + ldmSpace + + ldmSeqSpace + + matchStateSize + + tokenSpace + + bufferSpace + + externalSeqSpace; return neededSpace; } public static nuint ZSTD_estimateCCtxSize_usingCCtxParams(ZSTD_CCtx_params_s* @params) { - ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams(@params, unchecked(0UL - 1), 0, ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict); - ZSTD_paramSwitch_e useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(@params->useRowMatchFinder, &cParams); + ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams( + @params, + unchecked(0UL - 1), + 0, + ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict + ); + ZSTD_paramSwitch_e useRowMatchFinder = ZSTD_resolveRowMatchFinderMode( + @params->useRowMatchFinder, + &cParams + ); if (@params->nbWorkers > 0) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); } - return ZSTD_estimateCCtxSize_usingCCtxParams_internal(&cParams, &@params->ldmParams, 1, useRowMatchFinder, 0, 0, unchecked(0UL - 1), ZSTD_hasExtSeqProd(@params), @params->maxBlockSize); + return ZSTD_estimateCCtxSize_usingCCtxParams_internal( + &cParams, + &@params->ldmParams, + 1, + useRowMatchFinder, + 0, + 0, + unchecked(0UL - 1), + ZSTD_hasExtSeqProd(@params), + @params->maxBlockSize + ); } public static nuint ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams) @@ -2026,18 +2568,26 @@ public static nuint ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameter } #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_srcSizeTiers => new ulong[4] - { - 16 * (1 << 10), - 128 * (1 << 10), - 256 * (1 << 10), - unchecked(0UL - 1) - }; - private static ulong* srcSizeTiers => (ulong*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_srcSizeTiers)); + private static ReadOnlySpan Span_srcSizeTiers => + new ulong[4] { 16 * (1 << 10), 128 * (1 << 10), 256 * (1 << 10), unchecked(0UL - 1) }; + private static ulong* srcSizeTiers => + (ulong*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_srcSizeTiers) + ); #else - private static readonly ulong* srcSizeTiers = GetArrayPointer(new ulong[4] { (ulong)(16 * (1 << 10)), (ulong)(128 * (1 << 10)), (ulong)(256 * (1 << 10)), (unchecked(0UL - 1)) }); + private static readonly ulong* srcSizeTiers = GetArrayPointer( + new ulong[4] + { + (ulong)(16 * (1 << 10)), + (ulong)(128 * (1 << 10)), + (ulong)(256 * (1 << 10)), + (unchecked(0UL - 1)), + } + ); #endif + private static nuint ZSTD_estimateCCtxSize_internal(int compressionLevel) { int tier = 0; @@ -2045,8 +2595,16 @@ private static nuint ZSTD_estimateCCtxSize_internal(int compressionLevel) for (; tier < 4; ++tier) { /* Choose the set of cParams for a given level across all srcSizes that give the largest cctxSize */ - ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeTiers[tier], 0, ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict); - largestSize = ZSTD_estimateCCtxSize_usingCParams(cParams) > largestSize ? ZSTD_estimateCCtxSize_usingCParams(cParams) : largestSize; + ZSTD_compressionParameters cParams = ZSTD_getCParams_internal( + compressionLevel, + srcSizeTiers[tier], + 0, + ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict + ); + largestSize = + ZSTD_estimateCCtxSize_usingCParams(cParams) > largestSize + ? ZSTD_estimateCCtxSize_usingCParams(cParams) + : largestSize; } return largestSize; @@ -2083,7 +2641,11 @@ public static nuint ZSTD_estimateCCtxSize(int compressionLevel) { int level; nuint memBudget = 0; - for (level = compressionLevel < 1 ? compressionLevel : 1; level <= compressionLevel; level++) + for ( + level = compressionLevel < 1 ? compressionLevel : 1; + level <= compressionLevel; + level++ + ) { /* Ensure monotonically increasing memory usage as compression level increases */ nuint newMB = ZSTD_estimateCCtxSize_internal(level); @@ -2102,16 +2664,46 @@ public static nuint ZSTD_estimateCStreamSize_usingCCtxParams(ZSTD_CCtx_params_s* } { - ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams(@params, unchecked(0UL - 1), 0, ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict); - nuint blockSize = ZSTD_resolveMaxBlockSize(@params->maxBlockSize) < (nuint)1 << (int)cParams.windowLog ? ZSTD_resolveMaxBlockSize(@params->maxBlockSize) : (nuint)1 << (int)cParams.windowLog; - nuint inBuffSize = @params->inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered ? ((nuint)1 << (int)cParams.windowLog) + blockSize : 0; - nuint outBuffSize = @params->outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered ? ZSTD_compressBound(blockSize) + 1 : 0; - ZSTD_paramSwitch_e useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(@params->useRowMatchFinder, &@params->cParams); - return ZSTD_estimateCCtxSize_usingCCtxParams_internal(&cParams, &@params->ldmParams, 1, useRowMatchFinder, inBuffSize, outBuffSize, unchecked(0UL - 1), ZSTD_hasExtSeqProd(@params), @params->maxBlockSize); - } - } - - public static nuint ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams) + ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams( + @params, + unchecked(0UL - 1), + 0, + ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict + ); + nuint blockSize = + ZSTD_resolveMaxBlockSize(@params->maxBlockSize) + < (nuint)1 << (int)cParams.windowLog + ? ZSTD_resolveMaxBlockSize(@params->maxBlockSize) + : (nuint)1 << (int)cParams.windowLog; + nuint inBuffSize = + @params->inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered + ? ((nuint)1 << (int)cParams.windowLog) + blockSize + : 0; + nuint outBuffSize = + @params->outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered + ? ZSTD_compressBound(blockSize) + 1 + : 0; + ZSTD_paramSwitch_e useRowMatchFinder = ZSTD_resolveRowMatchFinderMode( + @params->useRowMatchFinder, + &@params->cParams + ); + return ZSTD_estimateCCtxSize_usingCCtxParams_internal( + &cParams, + &@params->ldmParams, + 1, + useRowMatchFinder, + inBuffSize, + outBuffSize, + unchecked(0UL - 1), + ZSTD_hasExtSeqProd(@params), + @params->maxBlockSize + ); + } + } + + public static nuint ZSTD_estimateCStreamSize_usingCParams( + ZSTD_compressionParameters cParams + ) { ZSTD_CCtx_params_s initialParams = ZSTD_makeCCtxParamsFromCParams(cParams); if (ZSTD_rowMatchFinderSupported(cParams.strategy) != 0) @@ -2133,7 +2725,12 @@ public static nuint ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParame private static nuint ZSTD_estimateCStreamSize_internal(int compressionLevel) { - ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, unchecked(0UL - 1), 0, ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict); + ZSTD_compressionParameters cParams = ZSTD_getCParams_internal( + compressionLevel, + unchecked(0UL - 1), + 0, + ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict + ); return ZSTD_estimateCStreamSize_usingCParams(cParams); } @@ -2161,7 +2758,11 @@ public static nuint ZSTD_estimateCStreamSize(int compressionLevel) { int level; nuint memBudget = 0; - for (level = compressionLevel < 1 ? compressionLevel : 1; level <= compressionLevel; level++) + for ( + level = compressionLevel < 1 ? compressionLevel : 1; + level <= compressionLevel; + level++ + ) { nuint newMB = ZSTD_estimateCStreamSize_internal(level); if (newMB > memBudget) @@ -2214,7 +2815,10 @@ public static nuint ZSTD_toFlushNow(ZSTD_CCtx_s* cctx) } [Conditional("DEBUG")] - private static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1, ZSTD_compressionParameters cParams2) + private static void ZSTD_assertEqualCParams( + ZSTD_compressionParameters cParams1, + ZSTD_compressionParameters cParams2 + ) { assert(cParams1.windowLog == cParams2.windowLog); assert(cParams1.chainLog == cParams2.chainLog); @@ -2265,12 +2869,35 @@ private static void ZSTD_advanceHashSalt(ZSTD_MatchState_t* ms) ms->hashSalt = ZSTD_bitmix(ms->hashSalt, 8) ^ ZSTD_bitmix(ms->hashSaltEntropy, 4); } - private static nuint ZSTD_reset_matchState(ZSTD_MatchState_t* ms, ZSTD_cwksp* ws, ZSTD_compressionParameters* cParams, ZSTD_paramSwitch_e useRowMatchFinder, ZSTD_compResetPolicy_e crp, ZSTD_indexResetPolicy_e forceResetIndex, ZSTD_resetTarget_e forWho) + private static nuint ZSTD_reset_matchState( + ZSTD_MatchState_t* ms, + ZSTD_cwksp* ws, + ZSTD_compressionParameters* cParams, + ZSTD_paramSwitch_e useRowMatchFinder, + ZSTD_compResetPolicy_e crp, + ZSTD_indexResetPolicy_e forceResetIndex, + ZSTD_resetTarget_e forWho + ) { /* disable chain table allocation for fast or row-based strategies */ - nuint chainSize = ZSTD_allocateChainTable(cParams->strategy, useRowMatchFinder, ms->dedicatedDictSearch != 0 && forWho == ZSTD_resetTarget_e.ZSTD_resetTarget_CDict ? 1U : 0U) != 0 ? (nuint)1 << (int)cParams->chainLog : 0; + nuint chainSize = + ZSTD_allocateChainTable( + cParams->strategy, + useRowMatchFinder, + ms->dedicatedDictSearch != 0 + && forWho == ZSTD_resetTarget_e.ZSTD_resetTarget_CDict + ? 1U + : 0U + ) != 0 + ? (nuint)1 << (int)cParams->chainLog + : 0; nuint hSize = (nuint)1 << (int)cParams->hashLog; - uint hashLog3 = forWho == ZSTD_resetTarget_e.ZSTD_resetTarget_CCtx && cParams->minMatch == 3 ? 17 < cParams->windowLog ? 17 : cParams->windowLog : 0; + uint hashLog3 = + forWho == ZSTD_resetTarget_e.ZSTD_resetTarget_CCtx && cParams->minMatch == 3 + ? 17 < cParams->windowLog + ? 17 + : cParams->windowLog + : 0; nuint h3Size = hashLog3 != 0 ? (nuint)1 << (int)hashLog3 : 0; assert(useRowMatchFinder != ZSTD_paramSwitch_e.ZSTD_ps_auto); if (forceResetIndex == ZSTD_indexResetPolicy_e.ZSTDirp_reset) @@ -2314,20 +2941,41 @@ private static nuint ZSTD_reset_matchState(ZSTD_MatchState_t* ms, ZSTD_cwksp* ws } { - uint rowLog = cParams->searchLog <= 4 ? 4 : cParams->searchLog <= 6 ? cParams->searchLog : 6; + uint rowLog = + cParams->searchLog <= 4 ? 4 + : cParams->searchLog <= 6 ? cParams->searchLog + : 6; assert(cParams->hashLog >= rowLog); ms->rowHashLog = cParams->hashLog - rowLog; } } - if (forWho == ZSTD_resetTarget_e.ZSTD_resetTarget_CCtx && cParams->strategy >= ZSTD_strategy.ZSTD_btopt) + if ( + forWho == ZSTD_resetTarget_e.ZSTD_resetTarget_CCtx + && cParams->strategy >= ZSTD_strategy.ZSTD_btopt + ) { ms->opt.litFreq = (uint*)ZSTD_cwksp_reserve_aligned64(ws, (1 << 8) * sizeof(uint)); - ms->opt.litLengthFreq = (uint*)ZSTD_cwksp_reserve_aligned64(ws, (35 + 1) * sizeof(uint)); - ms->opt.matchLengthFreq = (uint*)ZSTD_cwksp_reserve_aligned64(ws, (52 + 1) * sizeof(uint)); - ms->opt.offCodeFreq = (uint*)ZSTD_cwksp_reserve_aligned64(ws, (31 + 1) * sizeof(uint)); - ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned64(ws, (nuint)(((1 << 12) + 3) * sizeof(ZSTD_match_t))); - ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned64(ws, (nuint)(((1 << 12) + 3) * sizeof(ZSTD_optimal_t))); + ms->opt.litLengthFreq = (uint*)ZSTD_cwksp_reserve_aligned64( + ws, + (35 + 1) * sizeof(uint) + ); + ms->opt.matchLengthFreq = (uint*)ZSTD_cwksp_reserve_aligned64( + ws, + (52 + 1) * sizeof(uint) + ); + ms->opt.offCodeFreq = (uint*)ZSTD_cwksp_reserve_aligned64( + ws, + (31 + 1) * sizeof(uint) + ); + ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned64( + ws, + (nuint)(((1 << 12) + 3) * sizeof(ZSTD_match_t)) + ); + ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned64( + ws, + (nuint)(((1 << 12) + 3) * sizeof(ZSTD_optimal_t)) + ); } ms->cParams = *cParams; @@ -2341,7 +2989,11 @@ private static nuint ZSTD_reset_matchState(ZSTD_MatchState_t* ms, ZSTD_cwksp* ws private static int ZSTD_indexTooCloseToMax(ZSTD_window_t w) { - return (nuint)(w.nextSrc - w.@base) > (MEM_64bits ? 3500U * (1 << 20) : 2000U * (1 << 20)) - 16 * (1 << 20) ? 1 : 0; + return + (nuint)(w.nextSrc - w.@base) + > (MEM_64bits ? 3500U * (1 << 20) : 2000U * (1 << 20)) - 16 * (1 << 20) + ? 1 + : 0; } /** ZSTD_dictTooBig(): @@ -2351,7 +3003,11 @@ private static int ZSTD_indexTooCloseToMax(ZSTD_window_t w) */ private static int ZSTD_dictTooBig(nuint loadedDictSize) { - return loadedDictSize > unchecked((uint)-1) - (MEM_64bits ? 3500U * (1 << 20) : 2000U * (1 << 20)) ? 1 : 0; + return + loadedDictSize + > unchecked((uint)-1) - (MEM_64bits ? 3500U * (1 << 20) : 2000U * (1 << 20)) + ? 1 + : 0; } /*! ZSTD_resetCCtx_internal() : @@ -2360,7 +3016,14 @@ private static int ZSTD_dictTooBig(nuint loadedDictSize) * dictionary is being attached / copied, then pass 0. * note : `params` are assumed fully validated at this stage. */ - private static nuint ZSTD_resetCCtx_internal(ZSTD_CCtx_s* zc, ZSTD_CCtx_params_s* @params, ulong pledgedSrcSize, nuint loadedDictSize, ZSTD_compResetPolicy_e crp, ZSTD_buffered_policy_e zbuff) + private static nuint ZSTD_resetCCtx_internal( + ZSTD_CCtx_s* zc, + ZSTD_CCtx_params_s* @params, + ulong pledgedSrcSize, + nuint loadedDictSize, + ZSTD_compResetPolicy_e crp, + ZSTD_buffered_policy_e zbuff + ) { ZSTD_cwksp* ws = &zc->workspace; assert(!ERR_isError(ZSTD_checkCParams(@params->cParams))); @@ -2379,16 +3042,54 @@ private static nuint ZSTD_resetCCtx_internal(ZSTD_CCtx_s* zc, ZSTD_CCtx_params_s } { - nuint windowSize = 1 > (nuint)((ulong)1 << (int)@params->cParams.windowLog < pledgedSrcSize ? (ulong)1 << (int)@params->cParams.windowLog : pledgedSrcSize) ? 1 : (nuint)((ulong)1 << (int)@params->cParams.windowLog < pledgedSrcSize ? (ulong)1 << (int)@params->cParams.windowLog : pledgedSrcSize); - nuint blockSize = @params->maxBlockSize < windowSize ? @params->maxBlockSize : windowSize; - nuint maxNbSeq = ZSTD_maxNbSeq(blockSize, @params->cParams.minMatch, ZSTD_hasExtSeqProd(@params)); - nuint buffOutSize = zbuff == ZSTD_buffered_policy_e.ZSTDb_buffered && @params->outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered ? ZSTD_compressBound(blockSize) + 1 : 0; - nuint buffInSize = zbuff == ZSTD_buffered_policy_e.ZSTDb_buffered && @params->inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered ? windowSize + blockSize : 0; + nuint windowSize = + 1 + > (nuint)( + (ulong)1 << (int)@params->cParams.windowLog < pledgedSrcSize + ? (ulong)1 << (int)@params->cParams.windowLog + : pledgedSrcSize + ) + ? 1 + : (nuint)( + (ulong)1 << (int)@params->cParams.windowLog < pledgedSrcSize + ? (ulong)1 << (int)@params->cParams.windowLog + : pledgedSrcSize + ); + nuint blockSize = + @params->maxBlockSize < windowSize ? @params->maxBlockSize : windowSize; + nuint maxNbSeq = ZSTD_maxNbSeq( + blockSize, + @params->cParams.minMatch, + ZSTD_hasExtSeqProd(@params) + ); + nuint buffOutSize = + zbuff == ZSTD_buffered_policy_e.ZSTDb_buffered + && @params->outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered + ? ZSTD_compressBound(blockSize) + 1 + : 0; + nuint buffInSize = + zbuff == ZSTD_buffered_policy_e.ZSTDb_buffered + && @params->inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered + ? windowSize + blockSize + : 0; nuint maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(@params->ldmParams, blockSize); int indexTooClose = ZSTD_indexTooCloseToMax(zc->blockState.matchState.window); int dictTooBig = ZSTD_dictTooBig(loadedDictSize); - ZSTD_indexResetPolicy_e needsIndexReset = indexTooClose != 0 || dictTooBig != 0 || zc->initialized == 0 ? ZSTD_indexResetPolicy_e.ZSTDirp_reset : ZSTD_indexResetPolicy_e.ZSTDirp_continue; - nuint neededSpace = ZSTD_estimateCCtxSize_usingCCtxParams_internal(&@params->cParams, &@params->ldmParams, zc->staticSize != 0 ? 1 : 0, @params->useRowMatchFinder, buffInSize, buffOutSize, pledgedSrcSize, ZSTD_hasExtSeqProd(@params), @params->maxBlockSize); + ZSTD_indexResetPolicy_e needsIndexReset = + indexTooClose != 0 || dictTooBig != 0 || zc->initialized == 0 + ? ZSTD_indexResetPolicy_e.ZSTDirp_reset + : ZSTD_indexResetPolicy_e.ZSTDirp_continue; + nuint neededSpace = ZSTD_estimateCCtxSize_usingCCtxParams_internal( + &@params->cParams, + &@params->ldmParams, + zc->staticSize != 0 ? 1 : 0, + @params->useRowMatchFinder, + buffInSize, + buffOutSize, + pledgedSrcSize, + ZSTD_hasExtSeqProd(@params), + @params->maxBlockSize + ); { nuint err_code = neededSpace; if (ERR_isError(err_code)) @@ -2407,7 +3108,9 @@ private static nuint ZSTD_resetCCtx_internal(ZSTD_CCtx_s* zc, ZSTD_CCtx_params_s { if (zc->staticSize != 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) + ); } needsIndexReset = ZSTD_indexResetPolicy_e.ZSTDirp_reset; @@ -2420,32 +3123,60 @@ private static nuint ZSTD_resetCCtx_internal(ZSTD_CCtx_s* zc, ZSTD_CCtx_params_s } } - assert(ZSTD_cwksp_check_available(ws, (nuint)(2 * sizeof(ZSTD_compressedBlockState_t))) != 0); - zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(ws, (nuint)sizeof(ZSTD_compressedBlockState_t)); + assert( + ZSTD_cwksp_check_available( + ws, + (nuint)(2 * sizeof(ZSTD_compressedBlockState_t)) + ) != 0 + ); + zc->blockState.prevCBlock = + (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object( + ws, + (nuint)sizeof(ZSTD_compressedBlockState_t) + ); if (zc->blockState.prevCBlock == null) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) + ); } - zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(ws, (nuint)sizeof(ZSTD_compressedBlockState_t)); + zc->blockState.nextCBlock = + (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object( + ws, + (nuint)sizeof(ZSTD_compressedBlockState_t) + ); if (zc->blockState.nextCBlock == null) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) + ); } - zc->tmpWorkspace = ZSTD_cwksp_reserve_object(ws, (8 << 10) + 512 + sizeof(uint) * (52 + 2) > 8208 ? (8 << 10) + 512 + sizeof(uint) * (52 + 2) : 8208); + zc->tmpWorkspace = ZSTD_cwksp_reserve_object( + ws, + (8 << 10) + 512 + sizeof(uint) * (52 + 2) > 8208 + ? (8 << 10) + 512 + sizeof(uint) * (52 + 2) + : 8208 + ); if (zc->tmpWorkspace == null) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) + ); } - zc->tmpWkspSize = (8 << 10) + 512 + sizeof(uint) * (52 + 2) > 8208 ? (8 << 10) + 512 + sizeof(uint) * (52 + 2) : 8208; + zc->tmpWkspSize = + (8 << 10) + 512 + sizeof(uint) * (52 + 2) > 8208 + ? (8 << 10) + 512 + sizeof(uint) * (52 + 2) + : 8208; } } ZSTD_cwksp_clear(ws); zc->blockState.matchState.cParams = @params->cParams; - zc->blockState.matchState.prefetchCDictTables = @params->prefetchCDictTables == ZSTD_paramSwitch_e.ZSTD_ps_enable ? 1 : 0; + zc->blockState.matchState.prefetchCDictTables = + @params->prefetchCDictTables == ZSTD_paramSwitch_e.ZSTD_ps_enable ? 1 : 0; zc->pledgedSrcSizePlusOne = pledgedSrcSize + 1; zc->consumedSrcSize = 0; zc->producedCSize = 0; @@ -2458,21 +3189,38 @@ private static nuint ZSTD_resetCCtx_internal(ZSTD_CCtx_s* zc, ZSTD_CCtx_params_s zc->dictContentSize = 0; ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock); { - nuint err_code = ZSTD_reset_matchState(&zc->blockState.matchState, ws, &@params->cParams, @params->useRowMatchFinder, crp, needsIndexReset, ZSTD_resetTarget_e.ZSTD_resetTarget_CCtx); + nuint err_code = ZSTD_reset_matchState( + &zc->blockState.matchState, + ws, + &@params->cParams, + @params->useRowMatchFinder, + crp, + needsIndexReset, + ZSTD_resetTarget_e.ZSTD_resetTarget_CCtx + ); if (ERR_isError(err_code)) { return err_code; } } - zc->seqStore.sequencesStart = (SeqDef_s*)ZSTD_cwksp_reserve_aligned64(ws, maxNbSeq * (nuint)sizeof(SeqDef_s)); + zc->seqStore.sequencesStart = (SeqDef_s*)ZSTD_cwksp_reserve_aligned64( + ws, + maxNbSeq * (nuint)sizeof(SeqDef_s) + ); if (@params->ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) { /* TODO: avoid memset? */ nuint ldmHSize = (nuint)1 << (int)@params->ldmParams.hashLog; - zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned64(ws, ldmHSize * (nuint)sizeof(ldmEntry_t)); + zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned64( + ws, + ldmHSize * (nuint)sizeof(ldmEntry_t) + ); memset(zc->ldmState.hashTable, 0, (uint)(ldmHSize * (nuint)sizeof(ldmEntry_t))); - zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned64(ws, maxNbLdmSeq * (nuint)sizeof(rawSeq)); + zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned64( + ws, + maxNbLdmSeq * (nuint)sizeof(rawSeq) + ); zc->maxNbLdmSequences = maxNbLdmSeq; ZSTD_window_init(&zc->ldmState.window); zc->ldmState.loadedDictEnd = 0; @@ -2482,7 +3230,10 @@ private static nuint ZSTD_resetCCtx_internal(ZSTD_CCtx_s* zc, ZSTD_CCtx_params_s { nuint maxNbExternalSeq = ZSTD_sequenceBound(blockSize); zc->extSeqBufCapacity = maxNbExternalSeq; - zc->extSeqBuf = (ZSTD_Sequence*)ZSTD_cwksp_reserve_aligned64(ws, maxNbExternalSeq * (nuint)sizeof(ZSTD_Sequence)); + zc->extSeqBuf = (ZSTD_Sequence*)ZSTD_cwksp_reserve_aligned64( + ws, + maxNbExternalSeq * (nuint)sizeof(ZSTD_Sequence) + ); } zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + 32); @@ -2495,7 +3246,9 @@ private static nuint ZSTD_resetCCtx_internal(ZSTD_CCtx_s* zc, ZSTD_CCtx_params_s if (@params->ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) { /* TODO: avoid memset? */ - nuint numBuckets = (nuint)1 << (int)(@params->ldmParams.hashLog - @params->ldmParams.bucketSizeLog); + nuint numBuckets = + (nuint)1 + << (int)(@params->ldmParams.hashLog - @params->ldmParams.bucketSizeLog); zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, numBuckets); memset(zc->ldmState.bucketOffsets, 0, (uint)numBuckets); } @@ -2523,15 +3276,50 @@ private static void ZSTD_invalidateRepCodes(ZSTD_CCtx_s* cctx) assert(ZSTD_window_hasExtDict(cctx->blockState.matchState.window) == 0); } - private static readonly nuint* attachDictSizeCutoffs = GetArrayPointer(new nuint[10] { 8 * (1 << 10), 8 * (1 << 10), 16 * (1 << 10), 32 * (1 << 10), 32 * (1 << 10), 32 * (1 << 10), 32 * (1 << 10), 32 * (1 << 10), 8 * (1 << 10), 8 * (1 << 10) }); - private static int ZSTD_shouldAttachDict(ZSTD_CDict_s* cdict, ZSTD_CCtx_params_s* @params, ulong pledgedSrcSize) + private static readonly nuint* attachDictSizeCutoffs = GetArrayPointer( + new nuint[10] + { + 8 * (1 << 10), + 8 * (1 << 10), + 16 * (1 << 10), + 32 * (1 << 10), + 32 * (1 << 10), + 32 * (1 << 10), + 32 * (1 << 10), + 32 * (1 << 10), + 8 * (1 << 10), + 8 * (1 << 10), + } + ); + + private static int ZSTD_shouldAttachDict( + ZSTD_CDict_s* cdict, + ZSTD_CCtx_params_s* @params, + ulong pledgedSrcSize + ) { nuint cutoff = attachDictSizeCutoffs[(int)cdict->matchState.cParams.strategy]; int dedicatedDictSearch = cdict->matchState.dedicatedDictSearch; - return dedicatedDictSearch != 0 || (pledgedSrcSize <= cutoff || pledgedSrcSize == unchecked(0UL - 1) || @params->attachDictPref == ZSTD_dictAttachPref_e.ZSTD_dictForceAttach) && @params->attachDictPref != ZSTD_dictAttachPref_e.ZSTD_dictForceCopy && @params->forceWindow == 0 ? 1 : 0; - } - - private static nuint ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx_s* cctx, ZSTD_CDict_s* cdict, ZSTD_CCtx_params_s @params, ulong pledgedSrcSize, ZSTD_buffered_policy_e zbuff) + return + dedicatedDictSearch != 0 + || ( + pledgedSrcSize <= cutoff + || pledgedSrcSize == unchecked(0UL - 1) + || @params->attachDictPref == ZSTD_dictAttachPref_e.ZSTD_dictForceAttach + ) + && @params->attachDictPref != ZSTD_dictAttachPref_e.ZSTD_dictForceCopy + && @params->forceWindow == 0 + ? 1 + : 0; + } + + private static nuint ZSTD_resetCCtx_byAttachingCDict( + ZSTD_CCtx_s* cctx, + ZSTD_CDict_s* cdict, + ZSTD_CCtx_params_s @params, + ulong pledgedSrcSize, + ZSTD_buffered_policy_e zbuff + ) { { ZSTD_compressionParameters adjusted_cdict_cParams = cdict->matchState.cParams; @@ -2542,11 +3330,24 @@ private static nuint ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx_s* cctx, ZSTD_CDi ZSTD_dedicatedDictSearch_revertCParams(&adjusted_cdict_cParams); } - @params.cParams = ZSTD_adjustCParams_internal(adjusted_cdict_cParams, pledgedSrcSize, cdict->dictContentSize, ZSTD_CParamMode_e.ZSTD_cpm_attachDict, @params.useRowMatchFinder); + @params.cParams = ZSTD_adjustCParams_internal( + adjusted_cdict_cParams, + pledgedSrcSize, + cdict->dictContentSize, + ZSTD_CParamMode_e.ZSTD_cpm_attachDict, + @params.useRowMatchFinder + ); @params.cParams.windowLog = windowLog; @params.useRowMatchFinder = cdict->useRowMatchFinder; { - nuint err_code = ZSTD_resetCCtx_internal(cctx, &@params, pledgedSrcSize, 0, ZSTD_compResetPolicy_e.ZSTDcrp_makeClean, zbuff); + nuint err_code = ZSTD_resetCCtx_internal( + cctx, + &@params, + pledgedSrcSize, + 0, + ZSTD_compResetPolicy_e.ZSTDcrp_makeClean, + zbuff + ); if (ERR_isError(err_code)) { return err_code; @@ -2557,28 +3358,43 @@ private static nuint ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx_s* cctx, ZSTD_CDi } { - uint cdictEnd = (uint)(cdict->matchState.window.nextSrc - cdict->matchState.window.@base); + uint cdictEnd = (uint)( + cdict->matchState.window.nextSrc - cdict->matchState.window.@base + ); uint cdictLen = cdictEnd - cdict->matchState.window.dictLimit; if (cdictLen != 0) { cctx->blockState.matchState.dictMatchState = &cdict->matchState; if (cctx->blockState.matchState.window.dictLimit < cdictEnd) { - cctx->blockState.matchState.window.nextSrc = cctx->blockState.matchState.window.@base + cdictEnd; + cctx->blockState.matchState.window.nextSrc = + cctx->blockState.matchState.window.@base + cdictEnd; ZSTD_window_clear(&cctx->blockState.matchState.window); } - cctx->blockState.matchState.loadedDictEnd = cctx->blockState.matchState.window.dictLimit; + cctx->blockState.matchState.loadedDictEnd = cctx->blockState + .matchState + .window + .dictLimit; } } cctx->dictID = cdict->dictID; cctx->dictContentSize = cdict->dictContentSize; - memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, (uint)sizeof(ZSTD_compressedBlockState_t)); + memcpy( + cctx->blockState.prevCBlock, + &cdict->cBlockState, + (uint)sizeof(ZSTD_compressedBlockState_t) + ); return 0; } - private static void ZSTD_copyCDictTableIntoCCtx(uint* dst, uint* src, nuint tableSize, ZSTD_compressionParameters* cParams) + private static void ZSTD_copyCDictTableIntoCCtx( + uint* dst, + uint* src, + nuint tableSize, + ZSTD_compressionParameters* cParams + ) { if (ZSTD_CDictIndicesAreTagged(cParams) != 0) { @@ -2598,7 +3414,13 @@ private static void ZSTD_copyCDictTableIntoCCtx(uint* dst, uint* src, nuint tabl } } - private static nuint ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx_s* cctx, ZSTD_CDict_s* cdict, ZSTD_CCtx_params_s @params, ulong pledgedSrcSize, ZSTD_buffered_policy_e zbuff) + private static nuint ZSTD_resetCCtx_byCopyingCDict( + ZSTD_CCtx_s* cctx, + ZSTD_CDict_s* cdict, + ZSTD_CCtx_params_s @params, + ulong pledgedSrcSize, + ZSTD_buffered_policy_e zbuff + ) { ZSTD_compressionParameters* cdict_cParams = &cdict->matchState.cParams; assert(cdict->matchState.dedicatedDictSearch == 0); @@ -2609,7 +3431,14 @@ private static nuint ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx_s* cctx, ZSTD_CDict @params.cParams.windowLog = windowLog; @params.useRowMatchFinder = cdict->useRowMatchFinder; { - nuint err_code = ZSTD_resetCCtx_internal(cctx, &@params, pledgedSrcSize, 0, ZSTD_compResetPolicy_e.ZSTDcrp_leaveDirty, zbuff); + nuint err_code = ZSTD_resetCCtx_internal( + cctx, + &@params, + pledgedSrcSize, + 0, + ZSTD_compResetPolicy_e.ZSTDcrp_leaveDirty, + zbuff + ); if (ERR_isError(err_code)) { return err_code; @@ -2625,18 +3454,42 @@ private static nuint ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx_s* cctx, ZSTD_CDict assert(@params.useRowMatchFinder != ZSTD_paramSwitch_e.ZSTD_ps_auto); { /* DDS guaranteed disabled */ - nuint chainSize = ZSTD_allocateChainTable(cdict_cParams->strategy, cdict->useRowMatchFinder, 0) != 0 ? (nuint)1 << (int)cdict_cParams->chainLog : 0; + nuint chainSize = + ZSTD_allocateChainTable(cdict_cParams->strategy, cdict->useRowMatchFinder, 0) + != 0 + ? (nuint)1 << (int)cdict_cParams->chainLog + : 0; nuint hSize = (nuint)1 << (int)cdict_cParams->hashLog; - ZSTD_copyCDictTableIntoCCtx(cctx->blockState.matchState.hashTable, cdict->matchState.hashTable, hSize, cdict_cParams); - if (ZSTD_allocateChainTable(cctx->appliedParams.cParams.strategy, cctx->appliedParams.useRowMatchFinder, 0) != 0) - { - ZSTD_copyCDictTableIntoCCtx(cctx->blockState.matchState.chainTable, cdict->matchState.chainTable, chainSize, cdict_cParams); + ZSTD_copyCDictTableIntoCCtx( + cctx->blockState.matchState.hashTable, + cdict->matchState.hashTable, + hSize, + cdict_cParams + ); + if ( + ZSTD_allocateChainTable( + cctx->appliedParams.cParams.strategy, + cctx->appliedParams.useRowMatchFinder, + 0 + ) != 0 + ) + { + ZSTD_copyCDictTableIntoCCtx( + cctx->blockState.matchState.chainTable, + cdict->matchState.chainTable, + chainSize, + cdict_cParams + ); } if (ZSTD_rowMatchFinderUsed(cdict_cParams->strategy, cdict->useRowMatchFinder) != 0) { nuint tagTableSize = hSize; - memcpy(cctx->blockState.matchState.tagTable, cdict->matchState.tagTable, (uint)tagTableSize); + memcpy( + cctx->blockState.matchState.tagTable, + cdict->matchState.tagTable, + (uint)tagTableSize + ); cctx->blockState.matchState.hashSalt = cdict->matchState.hashSalt; } } @@ -2660,18 +3513,34 @@ private static nuint ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx_s* cctx, ZSTD_CDict cctx->dictID = cdict->dictID; cctx->dictContentSize = cdict->dictContentSize; - memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, (uint)sizeof(ZSTD_compressedBlockState_t)); + memcpy( + cctx->blockState.prevCBlock, + &cdict->cBlockState, + (uint)sizeof(ZSTD_compressedBlockState_t) + ); return 0; } /* We have a choice between copying the dictionary context into the working * context, or referencing the dictionary context from the working context * in-place. We decide here which strategy to use. */ - private static nuint ZSTD_resetCCtx_usingCDict(ZSTD_CCtx_s* cctx, ZSTD_CDict_s* cdict, ZSTD_CCtx_params_s* @params, ulong pledgedSrcSize, ZSTD_buffered_policy_e zbuff) + private static nuint ZSTD_resetCCtx_usingCDict( + ZSTD_CCtx_s* cctx, + ZSTD_CDict_s* cdict, + ZSTD_CCtx_params_s* @params, + ulong pledgedSrcSize, + ZSTD_buffered_policy_e zbuff + ) { if (ZSTD_shouldAttachDict(cdict, @params, pledgedSrcSize) != 0) { - return ZSTD_resetCCtx_byAttachingCDict(cctx, cdict, *@params, pledgedSrcSize, zbuff); + return ZSTD_resetCCtx_byAttachingCDict( + cctx, + cdict, + *@params, + pledgedSrcSize, + zbuff + ); } else { @@ -2686,7 +3555,13 @@ private static nuint ZSTD_resetCCtx_usingCDict(ZSTD_CCtx_s* cctx, ZSTD_CDict_s* * entropy tables, and dictionary references. * `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx. * @return : 0, or an error code */ - private static nuint ZSTD_copyCCtx_internal(ZSTD_CCtx_s* dstCCtx, ZSTD_CCtx_s* srcCCtx, ZSTD_frameParameters fParams, ulong pledgedSrcSize, ZSTD_buffered_policy_e zbuff) + private static nuint ZSTD_copyCCtx_internal( + ZSTD_CCtx_s* dstCCtx, + ZSTD_CCtx_s* srcCCtx, + ZSTD_frameParameters fParams, + ulong pledgedSrcSize, + ZSTD_buffered_policy_e zbuff + ) { if (srcCCtx->stage != ZSTD_compressionStage_e.ZSTDcs_init) { @@ -2699,29 +3574,71 @@ private static nuint ZSTD_copyCCtx_internal(ZSTD_CCtx_s* dstCCtx, ZSTD_CCtx_s* s @params.cParams = srcCCtx->appliedParams.cParams; assert(srcCCtx->appliedParams.useRowMatchFinder != ZSTD_paramSwitch_e.ZSTD_ps_auto); assert(srcCCtx->appliedParams.postBlockSplitter != ZSTD_paramSwitch_e.ZSTD_ps_auto); - assert(srcCCtx->appliedParams.ldmParams.enableLdm != ZSTD_paramSwitch_e.ZSTD_ps_auto); + assert( + srcCCtx->appliedParams.ldmParams.enableLdm != ZSTD_paramSwitch_e.ZSTD_ps_auto + ); @params.useRowMatchFinder = srcCCtx->appliedParams.useRowMatchFinder; @params.postBlockSplitter = srcCCtx->appliedParams.postBlockSplitter; @params.ldmParams = srcCCtx->appliedParams.ldmParams; @params.fParams = fParams; @params.maxBlockSize = srcCCtx->appliedParams.maxBlockSize; - ZSTD_resetCCtx_internal(dstCCtx, &@params, pledgedSrcSize, 0, ZSTD_compResetPolicy_e.ZSTDcrp_leaveDirty, zbuff); - assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog); - assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy); - assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog); - assert(dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog); - assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3); + ZSTD_resetCCtx_internal( + dstCCtx, + &@params, + pledgedSrcSize, + 0, + ZSTD_compResetPolicy_e.ZSTDcrp_leaveDirty, + zbuff + ); + assert( + dstCCtx->appliedParams.cParams.windowLog + == srcCCtx->appliedParams.cParams.windowLog + ); + assert( + dstCCtx->appliedParams.cParams.strategy + == srcCCtx->appliedParams.cParams.strategy + ); + assert( + dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog + ); + assert( + dstCCtx->appliedParams.cParams.chainLog + == srcCCtx->appliedParams.cParams.chainLog + ); + assert( + dstCCtx->blockState.matchState.hashLog3 + == srcCCtx->blockState.matchState.hashLog3 + ); } ZSTD_cwksp_mark_tables_dirty(&dstCCtx->workspace); { - nuint chainSize = ZSTD_allocateChainTable(srcCCtx->appliedParams.cParams.strategy, srcCCtx->appliedParams.useRowMatchFinder, 0) != 0 ? (nuint)1 << (int)srcCCtx->appliedParams.cParams.chainLog : 0; + nuint chainSize = + ZSTD_allocateChainTable( + srcCCtx->appliedParams.cParams.strategy, + srcCCtx->appliedParams.useRowMatchFinder, + 0 + ) != 0 + ? (nuint)1 << (int)srcCCtx->appliedParams.cParams.chainLog + : 0; nuint hSize = (nuint)1 << (int)srcCCtx->appliedParams.cParams.hashLog; uint h3log = srcCCtx->blockState.matchState.hashLog3; nuint h3Size = h3log != 0 ? (nuint)1 << (int)h3log : 0; - memcpy(dstCCtx->blockState.matchState.hashTable, srcCCtx->blockState.matchState.hashTable, (uint)(hSize * sizeof(uint))); - memcpy(dstCCtx->blockState.matchState.chainTable, srcCCtx->blockState.matchState.chainTable, (uint)(chainSize * sizeof(uint))); - memcpy(dstCCtx->blockState.matchState.hashTable3, srcCCtx->blockState.matchState.hashTable3, (uint)(h3Size * sizeof(uint))); + memcpy( + dstCCtx->blockState.matchState.hashTable, + srcCCtx->blockState.matchState.hashTable, + (uint)(hSize * sizeof(uint)) + ); + memcpy( + dstCCtx->blockState.matchState.chainTable, + srcCCtx->blockState.matchState.chainTable, + (uint)(chainSize * sizeof(uint)) + ); + memcpy( + dstCCtx->blockState.matchState.hashTable3, + srcCCtx->blockState.matchState.hashTable3, + (uint)(h3Size * sizeof(uint)) + ); } ZSTD_cwksp_mark_tables_clean(&dstCCtx->workspace); @@ -2735,7 +3652,11 @@ private static nuint ZSTD_copyCCtx_internal(ZSTD_CCtx_s* dstCCtx, ZSTD_CCtx_s* s dstCCtx->dictID = srcCCtx->dictID; dstCCtx->dictContentSize = srcCCtx->dictContentSize; - memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, (uint)sizeof(ZSTD_compressedBlockState_t)); + memcpy( + dstCCtx->blockState.prevCBlock, + srcCCtx->blockState.prevCBlock, + (uint)sizeof(ZSTD_compressedBlockState_t) + ); return 0; } @@ -2744,14 +3665,18 @@ private static nuint ZSTD_copyCCtx_internal(ZSTD_CCtx_s* dstCCtx, ZSTD_CCtx_s* s * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). * pledgedSrcSize==0 means "unknown". * @return : 0, or an error code */ - public static nuint ZSTD_copyCCtx(ZSTD_CCtx_s* dstCCtx, ZSTD_CCtx_s* srcCCtx, ulong pledgedSrcSize) + public static nuint ZSTD_copyCCtx( + ZSTD_CCtx_s* dstCCtx, + ZSTD_CCtx_s* srcCCtx, + ulong pledgedSrcSize + ) { /*content*/ ZSTD_frameParameters fParams = new ZSTD_frameParameters { contentSizeFlag = 1, checksumFlag = 0, - noDictIDFlag = 0 + noDictIDFlag = 0, }; ZSTD_buffered_policy_e zbuff = srcCCtx->bufferedPolicy; if (pledgedSrcSize == 0) @@ -2767,7 +3692,12 @@ public static nuint ZSTD_copyCCtx(ZSTD_CCtx_s* dstCCtx, ZSTD_CCtx_s* srcCCtx, ul * Presume table size is a multiple of ZSTD_ROWSIZE * to help auto-vectorization */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_reduceTable_internal(uint* table, uint size, uint reducerValue, int preserveMark) + private static void ZSTD_reduceTable_internal( + uint* table, + uint size, + uint reducerValue, + int preserveMark + ) { int nbRows = (int)size / 16; int cellNb = 0; @@ -2813,14 +3743,24 @@ private static void ZSTD_reduceTable_btlazy2(uint* table, uint size, uint reduce /*! ZSTD_reduceIndex() : * rescale all indexes to avoid future overflow (indexes are U32) */ - private static void ZSTD_reduceIndex(ZSTD_MatchState_t* ms, ZSTD_CCtx_params_s* @params, uint reducerValue) + private static void ZSTD_reduceIndex( + ZSTD_MatchState_t* ms, + ZSTD_CCtx_params_s* @params, + uint reducerValue + ) { { uint hSize = (uint)1 << (int)@params->cParams.hashLog; ZSTD_reduceTable(ms->hashTable, hSize, reducerValue); } - if (ZSTD_allocateChainTable(@params->cParams.strategy, @params->useRowMatchFinder, (uint)ms->dedicatedDictSearch) != 0) + if ( + ZSTD_allocateChainTable( + @params->cParams.strategy, + @params->useRowMatchFinder, + (uint)ms->dedicatedDictSearch + ) != 0 + ) { uint chainSize = (uint)1 << (int)@params->cParams.chainLog; if (@params->cParams.strategy == ZSTD_strategy.ZSTD_btlazy2) @@ -2894,7 +3834,18 @@ private static int ZSTD_blockSplitterEnabled(ZSTD_CCtx_params_s* cctxParams) * * entropyWkspSize must be of size at least ENTROPY_WORKSPACE_SIZE - (MaxSeq + 1)*sizeof(U32) */ - private static ZSTD_symbolEncodingTypeStats_t ZSTD_buildSequencesStatistics(SeqStore_t* seqStorePtr, nuint nbSeq, ZSTD_fseCTables_t* prevEntropy, ZSTD_fseCTables_t* nextEntropy, byte* dst, byte* dstEnd, ZSTD_strategy strategy, uint* countWorkspace, void* entropyWorkspace, nuint entropyWkspSize) + private static ZSTD_symbolEncodingTypeStats_t ZSTD_buildSequencesStatistics( + SeqStore_t* seqStorePtr, + nuint nbSeq, + ZSTD_fseCTables_t* prevEntropy, + ZSTD_fseCTables_t* nextEntropy, + byte* dst, + byte* dstEnd, + ZSTD_strategy strategy, + uint* countWorkspace, + void* entropyWorkspace, + nuint entropyWkspSize + ) { byte* ostart = dst; byte* oend = dstEnd; @@ -2914,13 +3865,57 @@ private static ZSTD_symbolEncodingTypeStats_t ZSTD_buildSequencesStatistics(SeqS { uint max = 35; /* can't fail */ - nuint mostFrequent = HIST_countFast_wksp(countWorkspace, &max, llCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); + nuint mostFrequent = HIST_countFast_wksp( + countWorkspace, + &max, + llCodeTable, + nbSeq, + entropyWorkspace, + entropyWkspSize + ); nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode; - stats.LLtype = (uint)ZSTD_selectEncodingType(&nextEntropy->litlength_repeatMode, countWorkspace, max, mostFrequent, nbSeq, 9, prevEntropy->litlengthCTable, LL_defaultNorm, LL_defaultNormLog, ZSTD_DefaultPolicy_e.ZSTD_defaultAllowed, strategy); - assert(SymbolEncodingType_e.set_basic < SymbolEncodingType_e.set_compressed && SymbolEncodingType_e.set_rle < SymbolEncodingType_e.set_compressed); - assert(!(stats.LLtype < (uint)SymbolEncodingType_e.set_compressed && nextEntropy->litlength_repeatMode != FSE_repeat.FSE_repeat_none)); - { - nuint countSize = ZSTD_buildCTable(op, (nuint)(oend - op), CTable_LitLength, 9, (SymbolEncodingType_e)stats.LLtype, countWorkspace, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, 35, prevEntropy->litlengthCTable, sizeof(uint) * 329, entropyWorkspace, entropyWkspSize); + stats.LLtype = (uint)ZSTD_selectEncodingType( + &nextEntropy->litlength_repeatMode, + countWorkspace, + max, + mostFrequent, + nbSeq, + 9, + prevEntropy->litlengthCTable, + LL_defaultNorm, + LL_defaultNormLog, + ZSTD_DefaultPolicy_e.ZSTD_defaultAllowed, + strategy + ); + assert( + SymbolEncodingType_e.set_basic < SymbolEncodingType_e.set_compressed + && SymbolEncodingType_e.set_rle < SymbolEncodingType_e.set_compressed + ); + assert( + !( + stats.LLtype < (uint)SymbolEncodingType_e.set_compressed + && nextEntropy->litlength_repeatMode != FSE_repeat.FSE_repeat_none + ) + ); + { + nuint countSize = ZSTD_buildCTable( + op, + (nuint)(oend - op), + CTable_LitLength, + 9, + (SymbolEncodingType_e)stats.LLtype, + countWorkspace, + max, + llCodeTable, + nbSeq, + LL_defaultNorm, + LL_defaultNormLog, + 35, + prevEntropy->litlengthCTable, + sizeof(uint) * 329, + entropyWorkspace, + entropyWkspSize + ); if (ERR_isError(countSize)) { stats.size = countSize; @@ -2936,14 +3931,58 @@ private static ZSTD_symbolEncodingTypeStats_t ZSTD_buildSequencesStatistics(SeqS { uint max = 31; - nuint mostFrequent = HIST_countFast_wksp(countWorkspace, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); + nuint mostFrequent = HIST_countFast_wksp( + countWorkspace, + &max, + ofCodeTable, + nbSeq, + entropyWorkspace, + entropyWkspSize + ); /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */ - ZSTD_DefaultPolicy_e defaultPolicy = max <= 28 ? ZSTD_DefaultPolicy_e.ZSTD_defaultAllowed : ZSTD_DefaultPolicy_e.ZSTD_defaultDisallowed; + ZSTD_DefaultPolicy_e defaultPolicy = + max <= 28 + ? ZSTD_DefaultPolicy_e.ZSTD_defaultAllowed + : ZSTD_DefaultPolicy_e.ZSTD_defaultDisallowed; nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode; - stats.Offtype = (uint)ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode, countWorkspace, max, mostFrequent, nbSeq, 8, prevEntropy->offcodeCTable, OF_defaultNorm, OF_defaultNormLog, defaultPolicy, strategy); - assert(!(stats.Offtype < (uint)SymbolEncodingType_e.set_compressed && nextEntropy->offcode_repeatMode != FSE_repeat.FSE_repeat_none)); - { - nuint countSize = ZSTD_buildCTable(op, (nuint)(oend - op), CTable_OffsetBits, 8, (SymbolEncodingType_e)stats.Offtype, countWorkspace, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, 28, prevEntropy->offcodeCTable, sizeof(uint) * 193, entropyWorkspace, entropyWkspSize); + stats.Offtype = (uint)ZSTD_selectEncodingType( + &nextEntropy->offcode_repeatMode, + countWorkspace, + max, + mostFrequent, + nbSeq, + 8, + prevEntropy->offcodeCTable, + OF_defaultNorm, + OF_defaultNormLog, + defaultPolicy, + strategy + ); + assert( + !( + stats.Offtype < (uint)SymbolEncodingType_e.set_compressed + && nextEntropy->offcode_repeatMode != FSE_repeat.FSE_repeat_none + ) + ); + { + nuint countSize = ZSTD_buildCTable( + op, + (nuint)(oend - op), + CTable_OffsetBits, + 8, + (SymbolEncodingType_e)stats.Offtype, + countWorkspace, + max, + ofCodeTable, + nbSeq, + OF_defaultNorm, + OF_defaultNormLog, + 28, + prevEntropy->offcodeCTable, + sizeof(uint) * 193, + entropyWorkspace, + entropyWkspSize + ); if (ERR_isError(countSize)) { stats.size = countSize; @@ -2959,12 +3998,53 @@ private static ZSTD_symbolEncodingTypeStats_t ZSTD_buildSequencesStatistics(SeqS { uint max = 52; - nuint mostFrequent = HIST_countFast_wksp(countWorkspace, &max, mlCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); + nuint mostFrequent = HIST_countFast_wksp( + countWorkspace, + &max, + mlCodeTable, + nbSeq, + entropyWorkspace, + entropyWkspSize + ); nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode; - stats.MLtype = (uint)ZSTD_selectEncodingType(&nextEntropy->matchlength_repeatMode, countWorkspace, max, mostFrequent, nbSeq, 9, prevEntropy->matchlengthCTable, ML_defaultNorm, ML_defaultNormLog, ZSTD_DefaultPolicy_e.ZSTD_defaultAllowed, strategy); - assert(!(stats.MLtype < (uint)SymbolEncodingType_e.set_compressed && nextEntropy->matchlength_repeatMode != FSE_repeat.FSE_repeat_none)); - { - nuint countSize = ZSTD_buildCTable(op, (nuint)(oend - op), CTable_MatchLength, 9, (SymbolEncodingType_e)stats.MLtype, countWorkspace, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, 52, prevEntropy->matchlengthCTable, sizeof(uint) * 363, entropyWorkspace, entropyWkspSize); + stats.MLtype = (uint)ZSTD_selectEncodingType( + &nextEntropy->matchlength_repeatMode, + countWorkspace, + max, + mostFrequent, + nbSeq, + 9, + prevEntropy->matchlengthCTable, + ML_defaultNorm, + ML_defaultNormLog, + ZSTD_DefaultPolicy_e.ZSTD_defaultAllowed, + strategy + ); + assert( + !( + stats.MLtype < (uint)SymbolEncodingType_e.set_compressed + && nextEntropy->matchlength_repeatMode != FSE_repeat.FSE_repeat_none + ) + ); + { + nuint countSize = ZSTD_buildCTable( + op, + (nuint)(oend - op), + CTable_MatchLength, + 9, + (SymbolEncodingType_e)stats.MLtype, + countWorkspace, + max, + mlCodeTable, + nbSeq, + ML_defaultNorm, + ML_defaultNormLog, + 52, + prevEntropy->matchlengthCTable, + sizeof(uint) * 363, + entropyWorkspace, + entropyWkspSize + ); if (ERR_isError(countSize)) { stats.size = countSize; @@ -2983,7 +4063,19 @@ private static ZSTD_symbolEncodingTypeStats_t ZSTD_buildSequencesStatistics(SeqS } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_entropyCompressSeqStore_internal(void* dst, nuint dstCapacity, void* literals, nuint litSize, SeqStore_t* seqStorePtr, ZSTD_entropyCTables_t* prevEntropy, ZSTD_entropyCTables_t* nextEntropy, ZSTD_CCtx_params_s* cctxParams, void* entropyWorkspace, nuint entropyWkspSize, int bmi2) + private static nuint ZSTD_entropyCompressSeqStore_internal( + void* dst, + nuint dstCapacity, + void* literals, + nuint litSize, + SeqStore_t* seqStorePtr, + ZSTD_entropyCTables_t* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + ZSTD_CCtx_params_s* cctxParams, + void* entropyWorkspace, + nuint entropyWkspSize, + int bmi2 + ) { ZSTD_strategy strategy = cctxParams->cParams.strategy; uint* count = (uint*)entropyWorkspace; @@ -3006,8 +4098,22 @@ private static nuint ZSTD_entropyCompressSeqStore_internal(void* dst, nuint dstC { nuint numSequences = (nuint)(seqStorePtr->sequences - seqStorePtr->sequencesStart); /* Base suspicion of uncompressibility on ratio of literals to sequences */ - int suspectUncompressible = numSequences == 0 || litSize / numSequences >= 20 ? 1 : 0; - nuint cSize = ZSTD_compressLiterals(op, dstCapacity, literals, litSize, entropyWorkspace, entropyWkspSize, &prevEntropy->huf, &nextEntropy->huf, cctxParams->cParams.strategy, ZSTD_literalsCompressionIsDisabled(cctxParams), suspectUncompressible, bmi2); + int suspectUncompressible = + numSequences == 0 || litSize / numSequences >= 20 ? 1 : 0; + nuint cSize = ZSTD_compressLiterals( + op, + dstCapacity, + literals, + litSize, + entropyWorkspace, + entropyWkspSize, + &prevEntropy->huf, + &nextEntropy->huf, + cctxParams->cParams.strategy, + ZSTD_literalsCompressionIsDisabled(cctxParams), + suspectUncompressible, + bmi2 + ); { nuint err_code = cSize; if (ERR_isError(err_code)) @@ -3052,7 +4158,18 @@ private static nuint ZSTD_entropyCompressSeqStore_internal(void* dst, nuint dstC { byte* seqHead = op++; /* build stats for sequences */ - ZSTD_symbolEncodingTypeStats_t stats = ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq, &prevEntropy->fse, &nextEntropy->fse, op, oend, strategy, count, entropyWorkspace, entropyWkspSize); + ZSTD_symbolEncodingTypeStats_t stats = ZSTD_buildSequencesStatistics( + seqStorePtr, + nbSeq, + &prevEntropy->fse, + &nextEntropy->fse, + op, + oend, + strategy, + count, + entropyWorkspace, + entropyWkspSize + ); { nuint err_code = stats.size; if (ERR_isError(err_code)) @@ -3068,7 +4185,20 @@ private static nuint ZSTD_entropyCompressSeqStore_internal(void* dst, nuint dstC } { - nuint bitstreamSize = ZSTD_encodeSequences(op, (nuint)(oend - op), CTable_MatchLength, mlCodeTable, CTable_OffsetBits, ofCodeTable, CTable_LitLength, llCodeTable, sequences, nbSeq, longOffsets, bmi2); + nuint bitstreamSize = ZSTD_encodeSequences( + op, + (nuint)(oend - op), + CTable_MatchLength, + mlCodeTable, + CTable_OffsetBits, + ofCodeTable, + CTable_LitLength, + llCodeTable, + sequences, + nbSeq, + longOffsets, + bmi2 + ); { nuint err_code = bitstreamSize; if (ERR_isError(err_code)) @@ -3089,12 +4219,40 @@ private static nuint ZSTD_entropyCompressSeqStore_internal(void* dst, nuint dstC return (nuint)(op - ostart); } - private static nuint ZSTD_entropyCompressSeqStore_wExtLitBuffer(void* dst, nuint dstCapacity, void* literals, nuint litSize, nuint blockSize, SeqStore_t* seqStorePtr, ZSTD_entropyCTables_t* prevEntropy, ZSTD_entropyCTables_t* nextEntropy, ZSTD_CCtx_params_s* cctxParams, void* entropyWorkspace, nuint entropyWkspSize, int bmi2) - { - nuint cSize = ZSTD_entropyCompressSeqStore_internal(dst, dstCapacity, literals, litSize, seqStorePtr, prevEntropy, nextEntropy, cctxParams, entropyWorkspace, entropyWkspSize, bmi2); + private static nuint ZSTD_entropyCompressSeqStore_wExtLitBuffer( + void* dst, + nuint dstCapacity, + void* literals, + nuint litSize, + nuint blockSize, + SeqStore_t* seqStorePtr, + ZSTD_entropyCTables_t* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + ZSTD_CCtx_params_s* cctxParams, + void* entropyWorkspace, + nuint entropyWkspSize, + int bmi2 + ) + { + nuint cSize = ZSTD_entropyCompressSeqStore_internal( + dst, + dstCapacity, + literals, + litSize, + seqStorePtr, + prevEntropy, + nextEntropy, + cctxParams, + entropyWorkspace, + entropyWkspSize, + bmi2 + ); if (cSize == 0) return 0; - if (cSize == unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)) && blockSize <= dstCapacity) + if ( + cSize == unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)) + && blockSize <= dstCapacity + ) { return 0; } @@ -3117,104 +4275,137 @@ private static nuint ZSTD_entropyCompressSeqStore_wExtLitBuffer(void* dst, nuint return cSize; } - private static nuint ZSTD_entropyCompressSeqStore(SeqStore_t* seqStorePtr, ZSTD_entropyCTables_t* prevEntropy, ZSTD_entropyCTables_t* nextEntropy, ZSTD_CCtx_params_s* cctxParams, void* dst, nuint dstCapacity, nuint srcSize, void* entropyWorkspace, nuint entropyWkspSize, int bmi2) - { - return ZSTD_entropyCompressSeqStore_wExtLitBuffer(dst, dstCapacity, seqStorePtr->litStart, (nuint)(seqStorePtr->lit - seqStorePtr->litStart), srcSize, seqStorePtr, prevEntropy, nextEntropy, cctxParams, entropyWorkspace, entropyWkspSize, bmi2); - } - - private static readonly ZSTD_BlockCompressor_f?[][] blockCompressor = new ZSTD_BlockCompressor_f?[4][] - { - new ZSTD_BlockCompressor_f[10] - { - ZSTD_compressBlock_fast, - ZSTD_compressBlock_fast, - ZSTD_compressBlock_doubleFast, - ZSTD_compressBlock_greedy, - ZSTD_compressBlock_lazy, - ZSTD_compressBlock_lazy2, - ZSTD_compressBlock_btlazy2, - ZSTD_compressBlock_btopt, - ZSTD_compressBlock_btultra, - ZSTD_compressBlock_btultra2 - }, - new ZSTD_BlockCompressor_f[10] - { - ZSTD_compressBlock_fast_extDict, - ZSTD_compressBlock_fast_extDict, - ZSTD_compressBlock_doubleFast_extDict, - ZSTD_compressBlock_greedy_extDict, - ZSTD_compressBlock_lazy_extDict, - ZSTD_compressBlock_lazy2_extDict, - ZSTD_compressBlock_btlazy2_extDict, - ZSTD_compressBlock_btopt_extDict, - ZSTD_compressBlock_btultra_extDict, - ZSTD_compressBlock_btultra_extDict - }, - new ZSTD_BlockCompressor_f[10] - { - ZSTD_compressBlock_fast_dictMatchState, - ZSTD_compressBlock_fast_dictMatchState, - ZSTD_compressBlock_doubleFast_dictMatchState, - ZSTD_compressBlock_greedy_dictMatchState, - ZSTD_compressBlock_lazy_dictMatchState, - ZSTD_compressBlock_lazy2_dictMatchState, - ZSTD_compressBlock_btlazy2_dictMatchState, - ZSTD_compressBlock_btopt_dictMatchState, - ZSTD_compressBlock_btultra_dictMatchState, - ZSTD_compressBlock_btultra_dictMatchState - }, - new ZSTD_BlockCompressor_f?[10] - { - null, - null, - null, - ZSTD_compressBlock_greedy_dedicatedDictSearch, - ZSTD_compressBlock_lazy_dedicatedDictSearch, - ZSTD_compressBlock_lazy2_dedicatedDictSearch, - null, - null, - null, - null - } - }; - private static readonly ZSTD_BlockCompressor_f[][] rowBasedBlockCompressors = new ZSTD_BlockCompressor_f[4][] - { - new ZSTD_BlockCompressor_f[3] - { - ZSTD_compressBlock_greedy_row, - ZSTD_compressBlock_lazy_row, - ZSTD_compressBlock_lazy2_row - }, - new ZSTD_BlockCompressor_f[3] - { - ZSTD_compressBlock_greedy_extDict_row, - ZSTD_compressBlock_lazy_extDict_row, - ZSTD_compressBlock_lazy2_extDict_row - }, - new ZSTD_BlockCompressor_f[3] - { - ZSTD_compressBlock_greedy_dictMatchState_row, - ZSTD_compressBlock_lazy_dictMatchState_row, - ZSTD_compressBlock_lazy2_dictMatchState_row - }, - new ZSTD_BlockCompressor_f[3] - { - ZSTD_compressBlock_greedy_dedicatedDictSearch_row, - ZSTD_compressBlock_lazy_dedicatedDictSearch_row, - ZSTD_compressBlock_lazy2_dedicatedDictSearch_row - } - }; + private static nuint ZSTD_entropyCompressSeqStore( + SeqStore_t* seqStorePtr, + ZSTD_entropyCTables_t* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + ZSTD_CCtx_params_s* cctxParams, + void* dst, + nuint dstCapacity, + nuint srcSize, + void* entropyWorkspace, + nuint entropyWkspSize, + int bmi2 + ) + { + return ZSTD_entropyCompressSeqStore_wExtLitBuffer( + dst, + dstCapacity, + seqStorePtr->litStart, + (nuint)(seqStorePtr->lit - seqStorePtr->litStart), + srcSize, + seqStorePtr, + prevEntropy, + nextEntropy, + cctxParams, + entropyWorkspace, + entropyWkspSize, + bmi2 + ); + } + + private static readonly ZSTD_BlockCompressor_f?[][] blockCompressor = + new ZSTD_BlockCompressor_f?[4][] + { + new ZSTD_BlockCompressor_f[10] + { + ZSTD_compressBlock_fast, + ZSTD_compressBlock_fast, + ZSTD_compressBlock_doubleFast, + ZSTD_compressBlock_greedy, + ZSTD_compressBlock_lazy, + ZSTD_compressBlock_lazy2, + ZSTD_compressBlock_btlazy2, + ZSTD_compressBlock_btopt, + ZSTD_compressBlock_btultra, + ZSTD_compressBlock_btultra2, + }, + new ZSTD_BlockCompressor_f[10] + { + ZSTD_compressBlock_fast_extDict, + ZSTD_compressBlock_fast_extDict, + ZSTD_compressBlock_doubleFast_extDict, + ZSTD_compressBlock_greedy_extDict, + ZSTD_compressBlock_lazy_extDict, + ZSTD_compressBlock_lazy2_extDict, + ZSTD_compressBlock_btlazy2_extDict, + ZSTD_compressBlock_btopt_extDict, + ZSTD_compressBlock_btultra_extDict, + ZSTD_compressBlock_btultra_extDict, + }, + new ZSTD_BlockCompressor_f[10] + { + ZSTD_compressBlock_fast_dictMatchState, + ZSTD_compressBlock_fast_dictMatchState, + ZSTD_compressBlock_doubleFast_dictMatchState, + ZSTD_compressBlock_greedy_dictMatchState, + ZSTD_compressBlock_lazy_dictMatchState, + ZSTD_compressBlock_lazy2_dictMatchState, + ZSTD_compressBlock_btlazy2_dictMatchState, + ZSTD_compressBlock_btopt_dictMatchState, + ZSTD_compressBlock_btultra_dictMatchState, + ZSTD_compressBlock_btultra_dictMatchState, + }, + new ZSTD_BlockCompressor_f?[10] + { + null, + null, + null, + ZSTD_compressBlock_greedy_dedicatedDictSearch, + ZSTD_compressBlock_lazy_dedicatedDictSearch, + ZSTD_compressBlock_lazy2_dedicatedDictSearch, + null, + null, + null, + null, + }, + }; + private static readonly ZSTD_BlockCompressor_f[][] rowBasedBlockCompressors = + new ZSTD_BlockCompressor_f[4][] + { + new ZSTD_BlockCompressor_f[3] + { + ZSTD_compressBlock_greedy_row, + ZSTD_compressBlock_lazy_row, + ZSTD_compressBlock_lazy2_row, + }, + new ZSTD_BlockCompressor_f[3] + { + ZSTD_compressBlock_greedy_extDict_row, + ZSTD_compressBlock_lazy_extDict_row, + ZSTD_compressBlock_lazy2_extDict_row, + }, + new ZSTD_BlockCompressor_f[3] + { + ZSTD_compressBlock_greedy_dictMatchState_row, + ZSTD_compressBlock_lazy_dictMatchState_row, + ZSTD_compressBlock_lazy2_dictMatchState_row, + }, + new ZSTD_BlockCompressor_f[3] + { + ZSTD_compressBlock_greedy_dedicatedDictSearch_row, + ZSTD_compressBlock_lazy_dedicatedDictSearch_row, + ZSTD_compressBlock_lazy2_dedicatedDictSearch_row, + }, + }; + /* ZSTD_selectBlockCompressor() : * Not static, but internal use only (used by long distance matcher) * assumption : strat is a valid strategy */ - private static ZSTD_BlockCompressor_f ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramSwitch_e useRowMatchFinder, ZSTD_dictMode_e dictMode) + private static ZSTD_BlockCompressor_f ZSTD_selectBlockCompressor( + ZSTD_strategy strat, + ZSTD_paramSwitch_e useRowMatchFinder, + ZSTD_dictMode_e dictMode + ) { ZSTD_BlockCompressor_f? selectedCompressor; assert(ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_strategy, (int)strat) != 0); if (ZSTD_rowMatchFinderUsed(strat, useRowMatchFinder) != 0) { assert(useRowMatchFinder != ZSTD_paramSwitch_e.ZSTD_ps_auto); - selectedCompressor = rowBasedBlockCompressors[(int)dictMode][(int)strat - (int)ZSTD_strategy.ZSTD_greedy]; + selectedCompressor = rowBasedBlockCompressors[(int)dictMode][ + (int)strat - (int)ZSTD_strategy.ZSTD_greedy + ]; } else { @@ -3225,7 +4416,11 @@ private static ZSTD_BlockCompressor_f ZSTD_selectBlockCompressor(ZSTD_strategy s return selectedCompressor.NotNull(); } - private static void ZSTD_storeLastLiterals(SeqStore_t* seqStorePtr, byte* anchor, nuint lastLLSize) + private static void ZSTD_storeLastLiterals( + SeqStore_t* seqStorePtr, + byte* anchor, + nuint lastLLSize + ) { memcpy(seqStorePtr->lit, anchor, (uint)lastLLSize); seqStorePtr->lit += lastLLSize; @@ -3244,7 +4439,12 @@ private static void ZSTD_resetSeqStore(SeqStore_t* ssPtr) * - Appends a block delimiter to outSeqs if one is not already present. * See zstd.h for context regarding block delimiters. * Returns the number of sequences after post-processing, or an error code. */ - private static nuint ZSTD_postProcessSequenceProducerResult(ZSTD_Sequence* outSeqs, nuint nbExternalSeqs, nuint outSeqsCapacity, nuint srcSize) + private static nuint ZSTD_postProcessSequenceProducerResult( + ZSTD_Sequence* outSeqs, + nuint nbExternalSeqs, + nuint outSeqsCapacity, + nuint srcSize + ) { if (nbExternalSeqs > outSeqsCapacity) { @@ -3271,7 +4471,9 @@ private static nuint ZSTD_postProcessSequenceProducerResult(ZSTD_Sequence* outSe if (nbExternalSeqs == outSeqsCapacity) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_sequenceProducer_failed)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_sequenceProducer_failed) + ); } outSeqs[nbExternalSeqs] = new ZSTD_Sequence(); @@ -3287,7 +4489,9 @@ private static nuint ZSTD_postProcessSequenceProducerResult(ZSTD_Sequence* outSe * This function can be deleted and replaced by determine_blockSize after we resolve issue #3456. */ private static nuint ZSTD_fastSequenceLengthSum(ZSTD_Sequence* seqBuf, nuint seqBufSize) { - nuint matchLenSum, litLenSum, i; + nuint matchLenSum, + litLenSum, + i; matchLenSum = 0; litLenSum = 0; for (i = 0; i < seqBufSize; i++) @@ -3302,9 +4506,10 @@ private static nuint ZSTD_fastSequenceLengthSum(ZSTD_Sequence* seqBuf, nuint seq /** * Function to validate sequences produced by a block compressor. */ - private static void ZSTD_validateSeqStore(SeqStore_t* seqStore, ZSTD_compressionParameters* cParams) - { - } + private static void ZSTD_validateSeqStore( + SeqStore_t* seqStore, + ZSTD_compressionParameters* cParams + ) { } private static nuint ZSTD_buildSeqStore(ZSTD_CCtx_s* zc, void* src, nuint srcSize) { @@ -3319,7 +4524,11 @@ private static nuint ZSTD_buildSeqStore(ZSTD_CCtx_s* zc, void* src, nuint srcSiz } else { - ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch); + ZSTD_ldm_skipSequences( + &zc->externSeqStore, + srcSize, + zc->appliedParams.cParams.minMatch + ); } return (nuint)ZSTD_BuildSeqStore_e.ZSTDbss_noCompress; @@ -3338,7 +4547,13 @@ private static nuint ZSTD_buildSeqStore(ZSTD_CCtx_s* zc, void* src, nuint srcSiz assert(istart - @base < unchecked((nint)(uint)-1)); #endif if (curr > ms->nextToUpdate + 384) - ms->nextToUpdate = curr - (192 < curr - ms->nextToUpdate - 384 ? 192 : curr - ms->nextToUpdate - 384); + ms->nextToUpdate = + curr + - ( + 192 < curr - ms->nextToUpdate - 384 + ? 192 + : curr - ms->nextToUpdate - 384 + ); } { @@ -3352,13 +4567,27 @@ private static nuint ZSTD_buildSeqStore(ZSTD_CCtx_s* zc, void* src, nuint srcSiz if (zc->externSeqStore.pos < zc->externSeqStore.size) { - assert(zc->appliedParams.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_disable); + assert( + zc->appliedParams.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_disable + ); if (ZSTD_hasExtSeqProd(&zc->appliedParams) != 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_combination_unsupported)); + return unchecked( + (nuint)( + -(int)ZSTD_ErrorCode.ZSTD_error_parameter_combination_unsupported + ) + ); } - lastLLSize = ZSTD_ldm_blockCompress(&zc->externSeqStore, ms, &zc->seqStore, zc->blockState.nextCBlock->rep, zc->appliedParams.useRowMatchFinder, src, srcSize); + lastLLSize = ZSTD_ldm_blockCompress( + &zc->externSeqStore, + ms, + &zc->seqStore, + zc->blockState.nextCBlock->rep, + zc->appliedParams.useRowMatchFinder, + src, + srcSize + ); assert(zc->externSeqStore.pos <= zc->externSeqStore.size); } else if (zc->appliedParams.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) @@ -3366,21 +4595,39 @@ private static nuint ZSTD_buildSeqStore(ZSTD_CCtx_s* zc, void* src, nuint srcSiz RawSeqStore_t ldmSeqStore = kNullRawSeqStore; if (ZSTD_hasExtSeqProd(&zc->appliedParams) != 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_combination_unsupported)); + return unchecked( + (nuint)( + -(int)ZSTD_ErrorCode.ZSTD_error_parameter_combination_unsupported + ) + ); } ldmSeqStore.seq = zc->ldmSequences; ldmSeqStore.capacity = zc->maxNbLdmSequences; { /* Updates ldmSeqStore.size */ - nuint err_code = ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore, &zc->appliedParams.ldmParams, src, srcSize); + nuint err_code = ZSTD_ldm_generateSequences( + &zc->ldmState, + &ldmSeqStore, + &zc->appliedParams.ldmParams, + src, + srcSize + ); if (ERR_isError(err_code)) { return err_code; } } - lastLLSize = ZSTD_ldm_blockCompress(&ldmSeqStore, ms, &zc->seqStore, zc->blockState.nextCBlock->rep, zc->appliedParams.useRowMatchFinder, src, srcSize); + lastLLSize = ZSTD_ldm_blockCompress( + &ldmSeqStore, + ms, + &zc->seqStore, + zc->blockState.nextCBlock->rep, + zc->appliedParams.useRowMatchFinder, + src, + srcSize + ); assert(ldmSeqStore.pos == ldmSeqStore.size); } else if (ZSTD_hasExtSeqProd(&zc->appliedParams) != 0) @@ -3389,24 +4636,67 @@ private static nuint ZSTD_buildSeqStore(ZSTD_CCtx_s* zc, void* src, nuint srcSiz assert(zc->appliedParams.extSeqProdFunc != null); { uint windowSize = (uint)1 << (int)zc->appliedParams.cParams.windowLog; - nuint nbExternalSeqs = ((delegate* managed)zc->appliedParams.extSeqProdFunc)(zc->appliedParams.extSeqProdState, zc->extSeqBuf, zc->extSeqBufCapacity, src, srcSize, null, 0, zc->appliedParams.compressionLevel, windowSize); - nuint nbPostProcessedSeqs = ZSTD_postProcessSequenceProducerResult(zc->extSeqBuf, nbExternalSeqs, zc->extSeqBufCapacity, srcSize); + nuint nbExternalSeqs = ( + (delegate* managed< + void*, + ZSTD_Sequence*, + nuint, + void*, + nuint, + void*, + nuint, + int, + nuint, + nuint>) + zc->appliedParams.extSeqProdFunc + )( + zc->appliedParams.extSeqProdState, + zc->extSeqBuf, + zc->extSeqBufCapacity, + src, + srcSize, + null, + 0, + zc->appliedParams.compressionLevel, + windowSize + ); + nuint nbPostProcessedSeqs = ZSTD_postProcessSequenceProducerResult( + zc->extSeqBuf, + nbExternalSeqs, + zc->extSeqBufCapacity, + srcSize + ); if (!ERR_isError(nbPostProcessedSeqs)) { ZSTD_SequencePosition seqPos = new ZSTD_SequencePosition { idx = 0, posInSequence = 0, - posInSrc = 0 + posInSrc = 0, }; - nuint seqLenSum = ZSTD_fastSequenceLengthSum(zc->extSeqBuf, nbPostProcessedSeqs); + nuint seqLenSum = ZSTD_fastSequenceLengthSum( + zc->extSeqBuf, + nbPostProcessedSeqs + ); if (seqLenSum > srcSize) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); + return unchecked( + (nuint)( + -(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid + ) + ); } { - nuint err_code = ZSTD_transferSequences_wBlockDelim(zc, &seqPos, zc->extSeqBuf, nbPostProcessedSeqs, src, srcSize, zc->appliedParams.searchForExternalRepcodes); + nuint err_code = ZSTD_transferSequences_wBlockDelim( + zc, + &seqPos, + zc->extSeqBuf, + nbPostProcessedSeqs, + src, + srcSize, + zc->appliedParams.searchForExternalRepcodes + ); if (ERR_isError(err_code)) { return err_code; @@ -3423,17 +4713,37 @@ private static nuint ZSTD_buildSeqStore(ZSTD_CCtx_s* zc, void* src, nuint srcSiz } { - ZSTD_BlockCompressor_f blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, zc->appliedParams.useRowMatchFinder, dictMode); + ZSTD_BlockCompressor_f blockCompressor = ZSTD_selectBlockCompressor( + zc->appliedParams.cParams.strategy, + zc->appliedParams.useRowMatchFinder, + dictMode + ); ms->ldmSeqStore = null; - lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize); + lastLLSize = blockCompressor( + ms, + &zc->seqStore, + zc->blockState.nextCBlock->rep, + src, + srcSize + ); } } } else { - ZSTD_BlockCompressor_f blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, zc->appliedParams.useRowMatchFinder, dictMode); + ZSTD_BlockCompressor_f blockCompressor = ZSTD_selectBlockCompressor( + zc->appliedParams.cParams.strategy, + zc->appliedParams.useRowMatchFinder, + dictMode + ); ms->ldmSeqStore = null; - lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize); + lastLLSize = blockCompressor( + ms, + &zc->seqStore, + zc->blockState.nextCBlock->rep, + src, + srcSize + ); } { @@ -3446,12 +4756,19 @@ private static nuint ZSTD_buildSeqStore(ZSTD_CCtx_s* zc, void* src, nuint srcSiz return (nuint)ZSTD_BuildSeqStore_e.ZSTDbss_compress; } - private static nuint ZSTD_copyBlockSequences(SeqCollector* seqCollector, SeqStore_t* seqStore, uint* prevRepcodes) + private static nuint ZSTD_copyBlockSequences( + SeqCollector* seqCollector, + SeqStore_t* seqStore, + uint* prevRepcodes + ) { SeqDef_s* inSeqs = seqStore->sequencesStart; nuint nbInSequences = (nuint)(seqStore->sequences - inSeqs); nuint nbInLiterals = (nuint)(seqStore->lit - seqStore->litStart); - ZSTD_Sequence* outSeqs = seqCollector->seqIndex == 0 ? seqCollector->seqStart : seqCollector->seqStart + seqCollector->seqIndex; + ZSTD_Sequence* outSeqs = + seqCollector->seqIndex == 0 + ? seqCollector->seqStart + : seqCollector->seqStart + seqCollector->seqIndex; nuint nbOutSequences = nbInSequences + 1; nuint nbOutLiterals = 0; repcodes_s repcodes; @@ -3571,7 +4888,13 @@ public static nuint ZSTD_sequenceBound(nuint srcSize) * ZSTD_sequenceBound(srcSize), or an error code that can be checked * with ZSTD_isError(). */ - public static nuint ZSTD_generateSequences(ZSTD_CCtx_s* zc, ZSTD_Sequence* outSeqs, nuint outSeqsSize, void* src, nuint srcSize) + public static nuint ZSTD_generateSequences( + ZSTD_CCtx_s* zc, + ZSTD_Sequence* outSeqs, + nuint outSeqsSize, + void* src, + nuint srcSize + ) { nuint dstCapacity = ZSTD_compressBound(srcSize); /* Make C90 happy. */ @@ -3580,7 +4903,11 @@ public static nuint ZSTD_generateSequences(ZSTD_CCtx_s* zc, ZSTD_Sequence* outSe { int targetCBlockSize; { - nuint err_code = ZSTD_CCtx_getParameter(zc, ZSTD_cParameter.ZSTD_c_targetCBlockSize, &targetCBlockSize); + nuint err_code = ZSTD_CCtx_getParameter( + zc, + ZSTD_cParameter.ZSTD_c_targetCBlockSize, + &targetCBlockSize + ); if (ERR_isError(err_code)) { return err_code; @@ -3589,14 +4916,20 @@ public static nuint ZSTD_generateSequences(ZSTD_CCtx_s* zc, ZSTD_Sequence* outSe if (targetCBlockSize != 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported) + ); } } { int nbWorkers; { - nuint err_code = ZSTD_CCtx_getParameter(zc, ZSTD_cParameter.ZSTD_c_nbWorkers, &nbWorkers); + nuint err_code = ZSTD_CCtx_getParameter( + zc, + ZSTD_cParameter.ZSTD_c_nbWorkers, + &nbWorkers + ); if (ERR_isError(err_code)) { return err_code; @@ -3605,7 +4938,9 @@ public static nuint ZSTD_generateSequences(ZSTD_CCtx_s* zc, ZSTD_Sequence* outSe if (nbWorkers != 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported) + ); } } @@ -3723,7 +5058,10 @@ private static void ZSTD_blockState_confirmRepcodesAndEntropyTables(ZSTD_blockSt /* Writes the block header */ private static void writeBlockHeader(void* op, nuint cSize, nuint blockSize, uint lastBlock) { - uint cBlockHeader = cSize == 1 ? lastBlock + ((uint)blockType_e.bt_rle << 1) + (uint)(blockSize << 3) : lastBlock + ((uint)blockType_e.bt_compressed << 1) + (uint)(cSize << 3); + uint cBlockHeader = + cSize == 1 + ? lastBlock + ((uint)blockType_e.bt_rle << 1) + (uint)(blockSize << 3) + : lastBlock + ((uint)blockType_e.bt_compressed << 1) + (uint)(cSize << 3); MEM_writeLE24(op, cBlockHeader); } @@ -3734,7 +5072,17 @@ private static void writeBlockHeader(void* op, nuint cSize, nuint blockSize, uin * Requires ENTROPY_WORKSPACE_SIZE workspace * @return : size of huffman description table, or an error code */ - private static nuint ZSTD_buildBlockEntropyStats_literals(void* src, nuint srcSize, ZSTD_hufCTables_t* prevHuf, ZSTD_hufCTables_t* nextHuf, ZSTD_hufCTablesMetadata_t* hufMetadata, int literalsCompressionIsDisabled, void* workspace, nuint wkspSize, int hufFlags) + private static nuint ZSTD_buildBlockEntropyStats_literals( + void* src, + nuint srcSize, + ZSTD_hufCTables_t* prevHuf, + ZSTD_hufCTables_t* nextHuf, + ZSTD_hufCTablesMetadata_t* hufMetadata, + int literalsCompressionIsDisabled, + void* workspace, + nuint wkspSize, + int hufFlags + ) { byte* wkspStart = (byte*)workspace; byte* wkspEnd = wkspStart + wkspSize; @@ -3754,7 +5102,9 @@ private static nuint ZSTD_buildBlockEntropyStats_literals(void* src, nuint srcSi } { - nuint minLitSize = (nuint)(prevHuf->repeatMode == HUF_repeat.HUF_repeat_valid ? 6 : 63); + nuint minLitSize = (nuint)( + prevHuf->repeatMode == HUF_repeat.HUF_repeat_valid ? 6 : 63 + ); if (srcSize <= minLitSize) { hufMetadata->hType = SymbolEncodingType_e.set_basic; @@ -3763,7 +5113,14 @@ private static nuint ZSTD_buildBlockEntropyStats_literals(void* src, nuint srcSi } { - nuint largest = HIST_count_wksp(countWksp, &maxSymbolValue, (byte*)src, srcSize, workspace, wkspSize); + nuint largest = HIST_count_wksp( + countWksp, + &maxSymbolValue, + (byte*)src, + srcSize, + workspace, + wkspSize + ); { nuint err_code = largest; if (ERR_isError(err_code)) @@ -3785,16 +5142,35 @@ private static nuint ZSTD_buildBlockEntropyStats_literals(void* src, nuint srcSi } } - if (repeat == HUF_repeat.HUF_repeat_check && HUF_validateCTable(&prevHuf->CTable.e0, countWksp, maxSymbolValue) == 0) + if ( + repeat == HUF_repeat.HUF_repeat_check + && HUF_validateCTable(&prevHuf->CTable.e0, countWksp, maxSymbolValue) == 0 + ) { repeat = HUF_repeat.HUF_repeat_none; } memset(&nextHuf->CTable.e0, 0, sizeof(ulong) * 257); - huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue, nodeWksp, nodeWkspSize, &nextHuf->CTable.e0, countWksp, hufFlags); + huffLog = HUF_optimalTableLog( + huffLog, + srcSize, + maxSymbolValue, + nodeWksp, + nodeWkspSize, + &nextHuf->CTable.e0, + countWksp, + hufFlags + ); assert(huffLog <= 11); { - nuint maxBits = HUF_buildCTable_wksp(&nextHuf->CTable.e0, countWksp, maxSymbolValue, huffLog, nodeWksp, nodeWkspSize); + nuint maxBits = HUF_buildCTable_wksp( + &nextHuf->CTable.e0, + countWksp, + maxSymbolValue, + huffLog, + nodeWksp, + nodeWkspSize + ); { nuint err_code = maxBits; if (ERR_isError(err_code)) @@ -3807,12 +5183,31 @@ private static nuint ZSTD_buildBlockEntropyStats_literals(void* src, nuint srcSi } { - nuint newCSize = HUF_estimateCompressedSize(&nextHuf->CTable.e0, countWksp, maxSymbolValue); - nuint hSize = HUF_writeCTable_wksp(hufMetadata->hufDesBuffer, sizeof(byte) * 128, &nextHuf->CTable.e0, maxSymbolValue, huffLog, nodeWksp, nodeWkspSize); + nuint newCSize = HUF_estimateCompressedSize( + &nextHuf->CTable.e0, + countWksp, + maxSymbolValue + ); + nuint hSize = HUF_writeCTable_wksp( + hufMetadata->hufDesBuffer, + sizeof(byte) * 128, + &nextHuf->CTable.e0, + maxSymbolValue, + huffLog, + nodeWksp, + nodeWkspSize + ); if (repeat != HUF_repeat.HUF_repeat_none) { - nuint oldCSize = HUF_estimateCompressedSize(&prevHuf->CTable.e0, countWksp, maxSymbolValue); - if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) + nuint oldCSize = HUF_estimateCompressedSize( + &prevHuf->CTable.e0, + countWksp, + maxSymbolValue + ); + if ( + oldCSize < srcSize + && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize) + ) { memcpy(nextHuf, prevHuf, (uint)sizeof(ZSTD_hufCTables_t)); hufMetadata->hType = SymbolEncodingType_e.set_repeat; @@ -3837,7 +5232,9 @@ private static nuint ZSTD_buildBlockEntropyStats_literals(void* src, nuint srcSi * Returns a ZSTD_symbolEncodingTypeStats_t with all encoding types as set_basic, * and updates nextEntropy to the appropriate repeatMode. */ - private static ZSTD_symbolEncodingTypeStats_t ZSTD_buildDummySequencesStatistics(ZSTD_fseCTables_t* nextEntropy) + private static ZSTD_symbolEncodingTypeStats_t ZSTD_buildDummySequencesStatistics( + ZSTD_fseCTables_t* nextEntropy + ) { ZSTD_symbolEncodingTypeStats_t stats = new ZSTD_symbolEncodingTypeStats_t { @@ -3846,7 +5243,7 @@ private static ZSTD_symbolEncodingTypeStats_t ZSTD_buildDummySequencesStatistics MLtype = (uint)SymbolEncodingType_e.set_basic, size = 0, lastCountSize = 0, - longOffsets = 0 + longOffsets = 0, }; nextEntropy->litlength_repeatMode = FSE_repeat.FSE_repeat_none; nextEntropy->offcode_repeatMode = FSE_repeat.FSE_repeat_none; @@ -3859,7 +5256,15 @@ private static ZSTD_symbolEncodingTypeStats_t ZSTD_buildDummySequencesStatistics * Stores symbol compression modes and fse table to fseMetadata. * Requires ENTROPY_WORKSPACE_SIZE wksp. * @return : size of fse tables or error code */ - private static nuint ZSTD_buildBlockEntropyStats_sequences(SeqStore_t* seqStorePtr, ZSTD_fseCTables_t* prevEntropy, ZSTD_fseCTables_t* nextEntropy, ZSTD_CCtx_params_s* cctxParams, ZSTD_fseCTablesMetadata_t* fseMetadata, void* workspace, nuint wkspSize) + private static nuint ZSTD_buildBlockEntropyStats_sequences( + SeqStore_t* seqStorePtr, + ZSTD_fseCTables_t* prevEntropy, + ZSTD_fseCTables_t* nextEntropy, + ZSTD_CCtx_params_s* cctxParams, + ZSTD_fseCTablesMetadata_t* fseMetadata, + void* workspace, + nuint wkspSize + ) { ZSTD_strategy strategy = cctxParams->cParams.strategy; nuint nbSeq = (nuint)(seqStorePtr->sequences - seqStorePtr->sequencesStart); @@ -3870,7 +5275,21 @@ private static nuint ZSTD_buildBlockEntropyStats_sequences(SeqStore_t* seqStoreP uint* entropyWorkspace = countWorkspace + (52 + 1); nuint entropyWorkspaceSize = wkspSize - (52 + 1) * sizeof(uint); ZSTD_symbolEncodingTypeStats_t stats; - stats = nbSeq != 0 ? ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq, prevEntropy, nextEntropy, op, oend, strategy, countWorkspace, entropyWorkspace, entropyWorkspaceSize) : ZSTD_buildDummySequencesStatistics(nextEntropy); + stats = + nbSeq != 0 + ? ZSTD_buildSequencesStatistics( + seqStorePtr, + nbSeq, + prevEntropy, + nextEntropy, + op, + oend, + strategy, + countWorkspace, + entropyWorkspace, + entropyWorkspaceSize + ) + : ZSTD_buildDummySequencesStatistics(nextEntropy); { nuint err_code = stats.size; if (ERR_isError(err_code)) @@ -3892,12 +5311,31 @@ private static nuint ZSTD_buildBlockEntropyStats_sequences(SeqStore_t* seqStoreP * @return : 0 on success, or an error code * Note : also employed in superblock */ - private static nuint ZSTD_buildBlockEntropyStats(SeqStore_t* seqStorePtr, ZSTD_entropyCTables_t* prevEntropy, ZSTD_entropyCTables_t* nextEntropy, ZSTD_CCtx_params_s* cctxParams, ZSTD_entropyCTablesMetadata_t* entropyMetadata, void* workspace, nuint wkspSize) + private static nuint ZSTD_buildBlockEntropyStats( + SeqStore_t* seqStorePtr, + ZSTD_entropyCTables_t* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + ZSTD_CCtx_params_s* cctxParams, + ZSTD_entropyCTablesMetadata_t* entropyMetadata, + void* workspace, + nuint wkspSize + ) { nuint litSize = (nuint)(seqStorePtr->lit - seqStorePtr->litStart); - int huf_useOptDepth = cctxParams->cParams.strategy >= ZSTD_strategy.ZSTD_btultra ? 1 : 0; + int huf_useOptDepth = + cctxParams->cParams.strategy >= ZSTD_strategy.ZSTD_btultra ? 1 : 0; int hufFlags = huf_useOptDepth != 0 ? (int)HUF_flags_e.HUF_flags_optimalDepth : 0; - entropyMetadata->hufMetadata.hufDesSize = ZSTD_buildBlockEntropyStats_literals(seqStorePtr->litStart, litSize, &prevEntropy->huf, &nextEntropy->huf, &entropyMetadata->hufMetadata, ZSTD_literalsCompressionIsDisabled(cctxParams), workspace, wkspSize, hufFlags); + entropyMetadata->hufMetadata.hufDesSize = ZSTD_buildBlockEntropyStats_literals( + seqStorePtr->litStart, + litSize, + &prevEntropy->huf, + &nextEntropy->huf, + &entropyMetadata->hufMetadata, + ZSTD_literalsCompressionIsDisabled(cctxParams), + workspace, + wkspSize, + hufFlags + ); { nuint err_code = entropyMetadata->hufMetadata.hufDesSize; if (ERR_isError(err_code)) @@ -3906,7 +5344,15 @@ private static nuint ZSTD_buildBlockEntropyStats(SeqStore_t* seqStorePtr, ZSTD_e } } - entropyMetadata->fseMetadata.fseTablesSize = ZSTD_buildBlockEntropyStats_sequences(seqStorePtr, &prevEntropy->fse, &nextEntropy->fse, cctxParams, &entropyMetadata->fseMetadata, workspace, wkspSize); + entropyMetadata->fseMetadata.fseTablesSize = ZSTD_buildBlockEntropyStats_sequences( + seqStorePtr, + &prevEntropy->fse, + &nextEntropy->fse, + cctxParams, + &entropyMetadata->fseMetadata, + workspace, + wkspSize + ); { nuint err_code = entropyMetadata->fseMetadata.fseTablesSize; if (ERR_isError(err_code)) @@ -3919,23 +5365,47 @@ private static nuint ZSTD_buildBlockEntropyStats(SeqStore_t* seqStorePtr, ZSTD_e } /* Returns the size estimate for the literals section (header + content) of a block */ - private static nuint ZSTD_estimateBlockSize_literal(byte* literals, nuint litSize, ZSTD_hufCTables_t* huf, ZSTD_hufCTablesMetadata_t* hufMetadata, void* workspace, nuint wkspSize, int writeEntropy) + private static nuint ZSTD_estimateBlockSize_literal( + byte* literals, + nuint litSize, + ZSTD_hufCTables_t* huf, + ZSTD_hufCTablesMetadata_t* hufMetadata, + void* workspace, + nuint wkspSize, + int writeEntropy + ) { uint* countWksp = (uint*)workspace; uint maxSymbolValue = 255; - nuint literalSectionHeaderSize = (nuint)(3 + (litSize >= 1 * (1 << 10) ? 1 : 0) + (litSize >= 16 * (1 << 10) ? 1 : 0)); + nuint literalSectionHeaderSize = (nuint)( + 3 + (litSize >= 1 * (1 << 10) ? 1 : 0) + (litSize >= 16 * (1 << 10) ? 1 : 0) + ); uint singleStream = litSize < 256 ? 1U : 0U; if (hufMetadata->hType == SymbolEncodingType_e.set_basic) return litSize; else if (hufMetadata->hType == SymbolEncodingType_e.set_rle) return 1; - else if (hufMetadata->hType == SymbolEncodingType_e.set_compressed || hufMetadata->hType == SymbolEncodingType_e.set_repeat) - { - nuint largest = HIST_count_wksp(countWksp, &maxSymbolValue, literals, litSize, workspace, wkspSize); + else if ( + hufMetadata->hType == SymbolEncodingType_e.set_compressed + || hufMetadata->hType == SymbolEncodingType_e.set_repeat + ) + { + nuint largest = HIST_count_wksp( + countWksp, + &maxSymbolValue, + literals, + litSize, + workspace, + wkspSize + ); if (ERR_isError(largest)) return litSize; { - nuint cLitSizeEstimate = HUF_estimateCompressedSize(&huf->CTable.e0, countWksp, maxSymbolValue); + nuint cLitSizeEstimate = HUF_estimateCompressedSize( + &huf->CTable.e0, + countWksp, + maxSymbolValue + ); if (writeEntropy != 0) cLitSizeEstimate += hufMetadata->hufDesSize; if (singleStream == 0) @@ -3949,7 +5419,19 @@ private static nuint ZSTD_estimateBlockSize_literal(byte* literals, nuint litSiz } /* Returns the size estimate for the FSE-compressed symbols (of, ml, ll) of a block */ - private static nuint ZSTD_estimateBlockSize_symbolType(SymbolEncodingType_e type, byte* codeTable, nuint nbSeq, uint maxCode, uint* fseCTable, byte* additionalBits, short* defaultNorm, uint defaultNormLog, uint defaultMax, void* workspace, nuint wkspSize) + private static nuint ZSTD_estimateBlockSize_symbolType( + SymbolEncodingType_e type, + byte* codeTable, + nuint nbSeq, + uint maxCode, + uint* fseCTable, + byte* additionalBits, + short* defaultNorm, + uint defaultNormLog, + uint defaultMax, + void* workspace, + nuint wkspSize + ) { uint* countWksp = (uint*)workspace; byte* ctp = codeTable; @@ -3961,13 +5443,21 @@ private static nuint ZSTD_estimateBlockSize_symbolType(SymbolEncodingType_e type if (type == SymbolEncodingType_e.set_basic) { assert(max <= defaultMax); - cSymbolTypeSizeEstimateInBits = ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max); + cSymbolTypeSizeEstimateInBits = ZSTD_crossEntropyCost( + defaultNorm, + defaultNormLog, + countWksp, + max + ); } else if (type == SymbolEncodingType_e.set_rle) { cSymbolTypeSizeEstimateInBits = 0; } - else if (type == SymbolEncodingType_e.set_compressed || type == SymbolEncodingType_e.set_repeat) + else if ( + type == SymbolEncodingType_e.set_compressed + || type == SymbolEncodingType_e.set_repeat + ) { cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max); } @@ -3990,24 +5480,103 @@ private static nuint ZSTD_estimateBlockSize_symbolType(SymbolEncodingType_e type } /* Returns the size estimate for the sequences section (header + content) of a block */ - private static nuint ZSTD_estimateBlockSize_sequences(byte* ofCodeTable, byte* llCodeTable, byte* mlCodeTable, nuint nbSeq, ZSTD_fseCTables_t* fseTables, ZSTD_fseCTablesMetadata_t* fseMetadata, void* workspace, nuint wkspSize, int writeEntropy) + private static nuint ZSTD_estimateBlockSize_sequences( + byte* ofCodeTable, + byte* llCodeTable, + byte* mlCodeTable, + nuint nbSeq, + ZSTD_fseCTables_t* fseTables, + ZSTD_fseCTablesMetadata_t* fseMetadata, + void* workspace, + nuint wkspSize, + int writeEntropy + ) { /* seqHead */ - nuint sequencesSectionHeaderSize = (nuint)(1 + 1 + (nbSeq >= 128 ? 1 : 0) + (nbSeq >= 0x7F00 ? 1 : 0)); + nuint sequencesSectionHeaderSize = (nuint)( + 1 + 1 + (nbSeq >= 128 ? 1 : 0) + (nbSeq >= 0x7F00 ? 1 : 0) + ); nuint cSeqSizeEstimate = 0; - cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, nbSeq, 31, fseTables->offcodeCTable, null, OF_defaultNorm, OF_defaultNormLog, 28, workspace, wkspSize); - cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->llType, llCodeTable, nbSeq, 35, fseTables->litlengthCTable, LL_bits, LL_defaultNorm, LL_defaultNormLog, 35, workspace, wkspSize); - cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, nbSeq, 52, fseTables->matchlengthCTable, ML_bits, ML_defaultNorm, ML_defaultNormLog, 52, workspace, wkspSize); + cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType( + fseMetadata->ofType, + ofCodeTable, + nbSeq, + 31, + fseTables->offcodeCTable, + null, + OF_defaultNorm, + OF_defaultNormLog, + 28, + workspace, + wkspSize + ); + cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType( + fseMetadata->llType, + llCodeTable, + nbSeq, + 35, + fseTables->litlengthCTable, + LL_bits, + LL_defaultNorm, + LL_defaultNormLog, + 35, + workspace, + wkspSize + ); + cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType( + fseMetadata->mlType, + mlCodeTable, + nbSeq, + 52, + fseTables->matchlengthCTable, + ML_bits, + ML_defaultNorm, + ML_defaultNormLog, + 52, + workspace, + wkspSize + ); if (writeEntropy != 0) cSeqSizeEstimate += fseMetadata->fseTablesSize; return cSeqSizeEstimate + sequencesSectionHeaderSize; } /* Returns the size estimate for a given stream of literals, of, ll, ml */ - private static nuint ZSTD_estimateBlockSize(byte* literals, nuint litSize, byte* ofCodeTable, byte* llCodeTable, byte* mlCodeTable, nuint nbSeq, ZSTD_entropyCTables_t* entropy, ZSTD_entropyCTablesMetadata_t* entropyMetadata, void* workspace, nuint wkspSize, int writeLitEntropy, int writeSeqEntropy) - { - nuint literalsSize = ZSTD_estimateBlockSize_literal(literals, litSize, &entropy->huf, &entropyMetadata->hufMetadata, workspace, wkspSize, writeLitEntropy); - nuint seqSize = ZSTD_estimateBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable, nbSeq, &entropy->fse, &entropyMetadata->fseMetadata, workspace, wkspSize, writeSeqEntropy); + private static nuint ZSTD_estimateBlockSize( + byte* literals, + nuint litSize, + byte* ofCodeTable, + byte* llCodeTable, + byte* mlCodeTable, + nuint nbSeq, + ZSTD_entropyCTables_t* entropy, + ZSTD_entropyCTablesMetadata_t* entropyMetadata, + void* workspace, + nuint wkspSize, + int writeLitEntropy, + int writeSeqEntropy + ) + { + nuint literalsSize = ZSTD_estimateBlockSize_literal( + literals, + litSize, + &entropy->huf, + &entropyMetadata->hufMetadata, + workspace, + wkspSize, + writeLitEntropy + ); + nuint seqSize = ZSTD_estimateBlockSize_sequences( + ofCodeTable, + llCodeTable, + mlCodeTable, + nbSeq, + &entropy->fse, + &entropyMetadata->fseMetadata, + workspace, + wkspSize, + writeSeqEntropy + ); return seqSize + literalsSize + ZSTD_blockHeaderSize; } @@ -4015,18 +5584,42 @@ private static nuint ZSTD_estimateBlockSize(byte* literals, nuint litSize, byte* * * @return: estimated compressed size of the seqStore, or a zstd error. */ - private static nuint ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(SeqStore_t* seqStore, ZSTD_CCtx_s* zc) + private static nuint ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize( + SeqStore_t* seqStore, + ZSTD_CCtx_s* zc + ) { ZSTD_entropyCTablesMetadata_t* entropyMetadata = &zc->blockSplitCtx.entropyMetadata; { - nuint err_code = ZSTD_buildBlockEntropyStats(seqStore, &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy, &zc->appliedParams, entropyMetadata, zc->tmpWorkspace, zc->tmpWkspSize); + nuint err_code = ZSTD_buildBlockEntropyStats( + seqStore, + &zc->blockState.prevCBlock->entropy, + &zc->blockState.nextCBlock->entropy, + &zc->appliedParams, + entropyMetadata, + zc->tmpWorkspace, + zc->tmpWkspSize + ); if (ERR_isError(err_code)) { return err_code; } } - return ZSTD_estimateBlockSize(seqStore->litStart, (nuint)(seqStore->lit - seqStore->litStart), seqStore->ofCode, seqStore->llCode, seqStore->mlCode, (nuint)(seqStore->sequences - seqStore->sequencesStart), &zc->blockState.nextCBlock->entropy, entropyMetadata, zc->tmpWorkspace, zc->tmpWkspSize, entropyMetadata->hufMetadata.hType == SymbolEncodingType_e.set_compressed ? 1 : 0, 1); + return ZSTD_estimateBlockSize( + seqStore->litStart, + (nuint)(seqStore->lit - seqStore->litStart), + seqStore->ofCode, + seqStore->llCode, + seqStore->mlCode, + (nuint)(seqStore->sequences - seqStore->sequencesStart), + &zc->blockState.nextCBlock->entropy, + entropyMetadata, + zc->tmpWorkspace, + zc->tmpWkspSize, + entropyMetadata->hufMetadata.hType == SymbolEncodingType_e.set_compressed ? 1 : 0, + 1 + ); } /* Returns literals bytes represented in a seqStore */ @@ -4039,7 +5632,10 @@ private static nuint ZSTD_countSeqStoreLiteralsBytes(SeqStore_t* seqStore) { SeqDef_s seq = seqStore->sequencesStart[i]; literalsBytes += seq.litLength; - if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_literalLength) + if ( + i == seqStore->longLengthPos + && seqStore->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_literalLength + ) { literalsBytes += 0x10000; } @@ -4058,7 +5654,10 @@ private static nuint ZSTD_countSeqStoreMatchBytes(SeqStore_t* seqStore) { SeqDef_s seq = seqStore->sequencesStart[i]; matchBytes += (nuint)(seq.mlBase + 3); - if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_matchLength) + if ( + i == seqStore->longLengthPos + && seqStore->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_matchLength + ) { matchBytes += 0x10000; } @@ -4070,7 +5669,12 @@ private static nuint ZSTD_countSeqStoreMatchBytes(SeqStore_t* seqStore) /* Derives the seqStore that is a chunk of the originalSeqStore from [startIdx, endIdx). * Stores the result in resultSeqStore. */ - private static void ZSTD_deriveSeqStoreChunk(SeqStore_t* resultSeqStore, SeqStore_t* originalSeqStore, nuint startIdx, nuint endIdx) + private static void ZSTD_deriveSeqStoreChunk( + SeqStore_t* resultSeqStore, + SeqStore_t* originalSeqStore, + nuint startIdx, + nuint endIdx + ) { *resultSeqStore = *originalSeqStore; if (startIdx > 0) @@ -4081,7 +5685,10 @@ private static void ZSTD_deriveSeqStoreChunk(SeqStore_t* resultSeqStore, SeqStor if (originalSeqStore->longLengthType != ZSTD_longLengthType_e.ZSTD_llt_none) { - if (originalSeqStore->longLengthPos < startIdx || originalSeqStore->longLengthPos > endIdx) + if ( + originalSeqStore->longLengthPos < startIdx + || originalSeqStore->longLengthPos > endIdx + ) { resultSeqStore->longLengthType = ZSTD_longLengthType_e.ZSTD_llt_none; } @@ -4140,10 +5747,18 @@ private static uint ZSTD_resolveRepcodeToRawOffset(uint* rep, uint offBase, uint * 1-3 : repcode 1-3 * 4+ : real_offset+3 */ - private static void ZSTD_seqStore_resolveOffCodes(repcodes_s* dRepcodes, repcodes_s* cRepcodes, SeqStore_t* seqStore, uint nbSeq) + private static void ZSTD_seqStore_resolveOffCodes( + repcodes_s* dRepcodes, + repcodes_s* cRepcodes, + SeqStore_t* seqStore, + uint nbSeq + ) { uint idx = 0; - uint longLitLenIdx = seqStore->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_literalLength ? seqStore->longLengthPos : nbSeq; + uint longLitLenIdx = + seqStore->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_literalLength + ? seqStore->longLengthPos + : nbSeq; for (; idx < nbSeq; ++idx) { SeqDef_s* seq = seqStore->sequencesStart + idx; @@ -4171,7 +5786,18 @@ private static void ZSTD_seqStore_resolveOffCodes(repcodes_s* dRepcodes, repcode * * Returns the total size of that block (including header) or a ZSTD error code. */ - private static nuint ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx_s* zc, SeqStore_t* seqStore, repcodes_s* dRep, repcodes_s* cRep, void* dst, nuint dstCapacity, void* src, nuint srcSize, uint lastBlock, uint isPartition) + private static nuint ZSTD_compressSeqStore_singleBlock( + ZSTD_CCtx_s* zc, + SeqStore_t* seqStore, + repcodes_s* dRep, + repcodes_s* cRep, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + uint lastBlock, + uint isPartition + ) { const uint rleMaxLength = 25; byte* op = (byte*)dst; @@ -4181,13 +5807,29 @@ private static nuint ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx_s* zc, SeqStore /* In case of an RLE or raw block, the simulated decompression repcode history must be reset */ repcodes_s dRepOriginal = *dRep; if (isPartition != 0) - ZSTD_seqStore_resolveOffCodes(dRep, cRep, seqStore, (uint)(seqStore->sequences - seqStore->sequencesStart)); + ZSTD_seqStore_resolveOffCodes( + dRep, + cRep, + seqStore, + (uint)(seqStore->sequences - seqStore->sequencesStart) + ); if (dstCapacity < ZSTD_blockHeaderSize) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); } - cSeqsSize = ZSTD_entropyCompressSeqStore(seqStore, &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy, &zc->appliedParams, op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize, srcSize, zc->tmpWorkspace, zc->tmpWkspSize, zc->bmi2); + cSeqsSize = ZSTD_entropyCompressSeqStore( + seqStore, + &zc->blockState.prevCBlock->entropy, + &zc->blockState.nextCBlock->entropy, + &zc->appliedParams, + op + ZSTD_blockHeaderSize, + dstCapacity - ZSTD_blockHeaderSize, + srcSize, + zc->tmpWorkspace, + zc->tmpWkspSize, + zc->bmi2 + ); { nuint err_code = cSeqsSize; if (ERR_isError(err_code)) @@ -4196,7 +5838,11 @@ private static nuint ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx_s* zc, SeqStore } } - if (zc->isFirstBlock == 0 && cSeqsSize < rleMaxLength && ZSTD_isRLE((byte*)src, srcSize) != 0) + if ( + zc->isFirstBlock == 0 + && cSeqsSize < rleMaxLength + && ZSTD_isRLE((byte*)src, srcSize) != 0 + ) { cSeqsSize = 1; } @@ -4204,7 +5850,11 @@ private static nuint ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx_s* zc, SeqStore if (zc->seqCollector.collectSequences != 0) { { - nuint err_code = ZSTD_copyBlockSequences(&zc->seqCollector, seqStore, dRepOriginal.rep); + nuint err_code = ZSTD_copyBlockSequences( + &zc->seqCollector, + seqStore, + dRepOriginal.rep + ); if (ERR_isError(err_code)) { return err_code; @@ -4248,8 +5898,12 @@ private static nuint ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx_s* zc, SeqStore cSize = ZSTD_blockHeaderSize + cSeqsSize; } - if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat.FSE_repeat_valid) - zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat.FSE_repeat_check; + if ( + zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode + == FSE_repeat.FSE_repeat_valid + ) + zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = + FSE_repeat.FSE_repeat_check; return cSize; } @@ -4267,7 +5921,13 @@ private static nuint ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx_s* zc, SeqStore * At ZSTD_MAX_NB_BLOCK_SPLITS == 196 with the current existing blockSize * maximum of 128 KB, this value is actually impossible to reach. */ - private static void ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, nuint startIdx, nuint endIdx, ZSTD_CCtx_s* zc, SeqStore_t* origSeqStore) + private static void ZSTD_deriveBlockSplitsHelper( + seqStoreSplits* splits, + nuint startIdx, + nuint endIdx, + ZSTD_CCtx_s* zc, + SeqStore_t* origSeqStore + ) { SeqStore_t* fullSeqStoreChunk = &zc->blockSplitCtx.fullSeqStoreChunk; SeqStore_t* firstHalfSeqStore = &zc->blockSplitCtx.firstHalfSeqStore; @@ -4285,10 +5945,23 @@ private static void ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, nuint s ZSTD_deriveSeqStoreChunk(fullSeqStoreChunk, origSeqStore, startIdx, endIdx); ZSTD_deriveSeqStoreChunk(firstHalfSeqStore, origSeqStore, startIdx, midIdx); ZSTD_deriveSeqStoreChunk(secondHalfSeqStore, origSeqStore, midIdx, endIdx); - estimatedOriginalSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(fullSeqStoreChunk, zc); - estimatedFirstHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(firstHalfSeqStore, zc); - estimatedSecondHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(secondHalfSeqStore, zc); - if (ERR_isError(estimatedOriginalSize) || ERR_isError(estimatedFirstHalfSize) || ERR_isError(estimatedSecondHalfSize)) + estimatedOriginalSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize( + fullSeqStoreChunk, + zc + ); + estimatedFirstHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize( + firstHalfSeqStore, + zc + ); + estimatedSecondHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize( + secondHalfSeqStore, + zc + ); + if ( + ERR_isError(estimatedOriginalSize) + || ERR_isError(estimatedFirstHalfSize) + || ERR_isError(estimatedSecondHalfSize) + ) { return; } @@ -4327,7 +6000,15 @@ private static nuint ZSTD_deriveBlockSplits(ZSTD_CCtx_s* zc, uint* partitions, u * * Returns combined size of all blocks (which includes headers), or a ZSTD error code. */ - private static nuint ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx_s* zc, void* dst, nuint dstCapacity, void* src, nuint blockSize, uint lastBlock, uint nbSeq) + private static nuint ZSTD_compressBlock_splitBlock_internal( + ZSTD_CCtx_s* zc, + void* dst, + nuint dstCapacity, + void* src, + nuint blockSize, + uint lastBlock, + uint nbSeq + ) { nuint cSize = 0; byte* ip = (byte*)src; @@ -4360,7 +6041,18 @@ private static nuint ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx_s* zc, voi *nextSeqStore = new SeqStore_t(); if (numSplits == 0) { - nuint cSizeSingleBlock = ZSTD_compressSeqStore_singleBlock(zc, &zc->seqStore, &dRep, &cRep, op, dstCapacity, ip, blockSize, lastBlock, 0); + nuint cSizeSingleBlock = ZSTD_compressSeqStore_singleBlock( + zc, + &zc->seqStore, + &dRep, + &cRep, + op, + dstCapacity, + ip, + blockSize, + lastBlock, + 0 + ); { nuint err_code = cSizeSingleBlock; if (ERR_isError(err_code)) @@ -4380,7 +6072,9 @@ private static nuint ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx_s* zc, voi nuint cSizeChunk; uint lastPartition = i == numSplits ? 1U : 0U; uint lastBlockEntireSrc = 0; - nuint srcBytes = ZSTD_countSeqStoreLiteralsBytes(currSeqStore) + ZSTD_countSeqStoreMatchBytes(currSeqStore); + nuint srcBytes = + ZSTD_countSeqStoreLiteralsBytes(currSeqStore) + + ZSTD_countSeqStoreMatchBytes(currSeqStore); srcBytesTotal += srcBytes; if (lastPartition != 0) { @@ -4389,10 +6083,26 @@ private static nuint ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx_s* zc, voi } else { - ZSTD_deriveSeqStoreChunk(nextSeqStore, &zc->seqStore, partitions[i], partitions[i + 1]); - } - - cSizeChunk = ZSTD_compressSeqStore_singleBlock(zc, currSeqStore, &dRep, &cRep, op, dstCapacity, ip, srcBytes, lastBlockEntireSrc, 1); + ZSTD_deriveSeqStoreChunk( + nextSeqStore, + &zc->seqStore, + partitions[i], + partitions[i + 1] + ); + } + + cSizeChunk = ZSTD_compressSeqStore_singleBlock( + zc, + currSeqStore, + &dRep, + &cRep, + op, + dstCapacity, + ip, + srcBytes, + lastBlockEntireSrc, + 1 + ); { nuint err_code = cSizeChunk; if (ERR_isError(err_code)) @@ -4413,7 +6123,14 @@ private static nuint ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx_s* zc, voi return cSize; } - private static nuint ZSTD_compressBlock_splitBlock(ZSTD_CCtx_s* zc, void* dst, nuint dstCapacity, void* src, nuint srcSize, uint lastBlock) + private static nuint ZSTD_compressBlock_splitBlock( + ZSTD_CCtx_s* zc, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + uint lastBlock + ) { uint nbSeq; nuint cSize; @@ -4430,11 +6147,17 @@ private static nuint ZSTD_compressBlock_splitBlock(ZSTD_CCtx_s* zc, void* dst, n if (bss == (nuint)ZSTD_BuildSeqStore_e.ZSTDbss_noCompress) { - if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat.FSE_repeat_valid) - zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat.FSE_repeat_check; + if ( + zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode + == FSE_repeat.FSE_repeat_valid + ) + zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = + FSE_repeat.FSE_repeat_check; if (zc->seqCollector.collectSequences != 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_sequenceProducer_failed)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_sequenceProducer_failed) + ); } cSize = ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock); @@ -4452,7 +6175,15 @@ private static nuint ZSTD_compressBlock_splitBlock(ZSTD_CCtx_s* zc, void* dst, n nbSeq = (uint)(zc->seqStore.sequences - zc->seqStore.sequencesStart); } - cSize = ZSTD_compressBlock_splitBlock_internal(zc, dst, dstCapacity, src, srcSize, lastBlock, nbSeq); + cSize = ZSTD_compressBlock_splitBlock_internal( + zc, + dst, + dstCapacity, + src, + srcSize, + lastBlock, + nbSeq + ); { nuint err_code = cSize; if (ERR_isError(err_code)) @@ -4464,7 +6195,14 @@ private static nuint ZSTD_compressBlock_splitBlock(ZSTD_CCtx_s* zc, void* dst, n return cSize; } - private static nuint ZSTD_compressBlock_internal(ZSTD_CCtx_s* zc, void* dst, nuint dstCapacity, void* src, nuint srcSize, uint frame) + private static nuint ZSTD_compressBlock_internal( + ZSTD_CCtx_s* zc, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + uint frame + ) { /* This is an estimated upper bound for the length of an rle block. * This isn't the actual upper bound. @@ -4488,7 +6226,9 @@ private static nuint ZSTD_compressBlock_internal(ZSTD_CCtx_s* zc, void* dst, nui { if (zc->seqCollector.collectSequences != 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_sequenceProducer_failed)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_sequenceProducer_failed) + ); } cSize = 0; @@ -4499,7 +6239,11 @@ private static nuint ZSTD_compressBlock_internal(ZSTD_CCtx_s* zc, void* dst, nui if (zc->seqCollector.collectSequences != 0) { { - nuint err_code = ZSTD_copyBlockSequences(&zc->seqCollector, ZSTD_getSeqStore(zc), zc->blockState.prevCBlock->rep); + nuint err_code = ZSTD_copyBlockSequences( + &zc->seqCollector, + ZSTD_getSeqStore(zc), + zc->blockState.prevCBlock->rep + ); if (ERR_isError(err_code)) { return err_code; @@ -4510,38 +6254,81 @@ private static nuint ZSTD_compressBlock_internal(ZSTD_CCtx_s* zc, void* dst, nui return 0; } - cSize = ZSTD_entropyCompressSeqStore(&zc->seqStore, &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy, &zc->appliedParams, dst, dstCapacity, srcSize, zc->tmpWorkspace, zc->tmpWkspSize, zc->bmi2); - if (frame != 0 && zc->isFirstBlock == 0 && cSize < rleMaxLength && ZSTD_isRLE(ip, srcSize) != 0) + cSize = ZSTD_entropyCompressSeqStore( + &zc->seqStore, + &zc->blockState.prevCBlock->entropy, + &zc->blockState.nextCBlock->entropy, + &zc->appliedParams, + dst, + dstCapacity, + srcSize, + zc->tmpWorkspace, + zc->tmpWkspSize, + zc->bmi2 + ); + if ( + frame != 0 + && zc->isFirstBlock == 0 + && cSize < rleMaxLength + && ZSTD_isRLE(ip, srcSize) != 0 + ) { cSize = 1; op[0] = ip[0]; } - @out: + @out: if (!ERR_isError(cSize) && cSize > 1) { ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); } - if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat.FSE_repeat_valid) - zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat.FSE_repeat_check; + if ( + zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode + == FSE_repeat.FSE_repeat_valid + ) + zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = + FSE_repeat.FSE_repeat_check; return cSize; } - private static nuint ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx_s* zc, void* dst, nuint dstCapacity, void* src, nuint srcSize, nuint bss, uint lastBlock) + private static nuint ZSTD_compressBlock_targetCBlockSize_body( + ZSTD_CCtx_s* zc, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + nuint bss, + uint lastBlock + ) { if (bss == (nuint)ZSTD_BuildSeqStore_e.ZSTDbss_compress) { - if (zc->isFirstBlock == 0 && ZSTD_maybeRLE(&zc->seqStore) != 0 && ZSTD_isRLE((byte*)src, srcSize) != 0) + if ( + zc->isFirstBlock == 0 + && ZSTD_maybeRLE(&zc->seqStore) != 0 + && ZSTD_isRLE((byte*)src, srcSize) != 0 + ) { return ZSTD_rleCompressBlock(dst, dstCapacity, *(byte*)src, srcSize, lastBlock); } { - nuint cSize = ZSTD_compressSuperBlock(zc, dst, dstCapacity, src, srcSize, lastBlock); - if (cSize != unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall))) + nuint cSize = ZSTD_compressSuperBlock( + zc, + dst, + dstCapacity, + src, + srcSize, + lastBlock + ); + if ( + cSize + != unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)) + ) { - nuint maxCSize = srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy); + nuint maxCSize = + srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy); { nuint err_code = cSize; if (ERR_isError(err_code)) @@ -4562,7 +6349,14 @@ private static nuint ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx_s* zc, v return ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock); } - private static nuint ZSTD_compressBlock_targetCBlockSize(ZSTD_CCtx_s* zc, void* dst, nuint dstCapacity, void* src, nuint srcSize, uint lastBlock) + private static nuint ZSTD_compressBlock_targetCBlockSize( + ZSTD_CCtx_s* zc, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + uint lastBlock + ) { nuint cSize = 0; nuint bss = ZSTD_buildSeqStore(zc, src, srcSize); @@ -4574,7 +6368,15 @@ private static nuint ZSTD_compressBlock_targetCBlockSize(ZSTD_CCtx_s* zc, void* } } - cSize = ZSTD_compressBlock_targetCBlockSize_body(zc, dst, dstCapacity, src, srcSize, bss, lastBlock); + cSize = ZSTD_compressBlock_targetCBlockSize_body( + zc, + dst, + dstCapacity, + src, + srcSize, + bss, + lastBlock + ); { nuint err_code = cSize; if (ERR_isError(err_code)) @@ -4583,16 +6385,35 @@ private static nuint ZSTD_compressBlock_targetCBlockSize(ZSTD_CCtx_s* zc, void* } } - if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat.FSE_repeat_valid) - zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat.FSE_repeat_check; + if ( + zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode + == FSE_repeat.FSE_repeat_valid + ) + zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = + FSE_repeat.FSE_repeat_check; return cSize; } - private static void ZSTD_overflowCorrectIfNeeded(ZSTD_MatchState_t* ms, ZSTD_cwksp* ws, ZSTD_CCtx_params_s* @params, void* ip, void* iend) + private static void ZSTD_overflowCorrectIfNeeded( + ZSTD_MatchState_t* ms, + ZSTD_cwksp* ws, + ZSTD_CCtx_params_s* @params, + void* ip, + void* iend + ) { uint cycleLog = ZSTD_cycleLog(@params->cParams.chainLog, @params->cParams.strategy); uint maxDist = (uint)1 << (int)@params->cParams.windowLog; - if (ZSTD_window_needOverflowCorrection(ms->window, cycleLog, maxDist, ms->loadedDictEnd, ip, iend) != 0) + if ( + ZSTD_window_needOverflowCorrection( + ms->window, + cycleLog, + maxDist, + ms->loadedDictEnd, + ip, + iend + ) != 0 + ) { uint correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip); ZSTD_cwksp_mark_tables_dirty(ws); @@ -4608,25 +6429,29 @@ private static void ZSTD_overflowCorrectIfNeeded(ZSTD_MatchState_t* ms, ZSTD_cwk } #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_splitLevels => new int[10] - { - 0, - 0, - 1, - 2, - 2, - 3, - 3, - 4, - 4, - 4 - }; - private static int* splitLevels => (int*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_splitLevels)); + private static ReadOnlySpan Span_splitLevels => + new int[10] { 0, 0, 1, 2, 2, 3, 3, 4, 4, 4 }; + private static int* splitLevels => + (int*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_splitLevels) + ); #else - private static readonly int* splitLevels = GetArrayPointer(new int[10] { 0, 0, 1, 2, 2, 3, 3, 4, 4, 4 }); + private static readonly int* splitLevels = GetArrayPointer( + new int[10] { 0, 0, 1, 2, 2, 3, 3, 4, 4, 4 } + ); #endif - private static nuint ZSTD_optimalBlockSize(ZSTD_CCtx_s* cctx, void* src, nuint srcSize, nuint blockSizeMax, int splitLevel, ZSTD_strategy strat, long savings) + + private static nuint ZSTD_optimalBlockSize( + ZSTD_CCtx_s* cctx, + void* src, + nuint srcSize, + nuint blockSizeMax, + int splitLevel, + ZSTD_strategy strat, + long savings + ) { if (srcSize < 128 * (1 << 10) || blockSizeMax < 128 * (1 << 10)) return srcSize < blockSizeMax ? srcSize : blockSizeMax; @@ -4648,7 +6473,13 @@ private static nuint ZSTD_optimalBlockSize(ZSTD_CCtx_s* cctx, void* src, nuint s splitLevel -= 2; } - return ZSTD_splitBlock(src, blockSizeMax, splitLevel, cctx->tmpWorkspace, cctx->tmpWkspSize); + return ZSTD_splitBlock( + src, + blockSizeMax, + splitLevel, + cctx->tmpWorkspace, + cctx->tmpWkspSize + ); } /*! ZSTD_compress_frameChunk() : @@ -4658,7 +6489,14 @@ private static nuint ZSTD_optimalBlockSize(ZSTD_CCtx_s* cctx, void* src, nuint s * Frame is supposed already started (header already produced) * @return : compressed size, or an error code */ - private static nuint ZSTD_compress_frameChunk(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, void* src, nuint srcSize, uint lastFrameChunk) + private static nuint ZSTD_compress_frameChunk( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + uint lastFrameChunk + ) { nuint blockSizeMax = cctx->blockSizeMax; nuint remaining = srcSize; @@ -4673,7 +6511,15 @@ private static nuint ZSTD_compress_frameChunk(ZSTD_CCtx_s* cctx, void* dst, nuin while (remaining != 0) { ZSTD_MatchState_t* ms = &cctx->blockState.matchState; - nuint blockSize = ZSTD_optimalBlockSize(cctx, ip, remaining, blockSizeMax, cctx->appliedParams.preBlockSplitter_level, cctx->appliedParams.cParams.strategy, savings); + nuint blockSize = ZSTD_optimalBlockSize( + cctx, + ip, + remaining, + blockSizeMax, + cctx->appliedParams.preBlockSplitter_level, + cctx->appliedParams.cParams.strategy, + savings + ); uint lastBlock = lastFrameChunk & (uint)(blockSize == remaining ? 1 : 0); assert(blockSize <= remaining); if (dstCapacity < ZSTD_blockHeaderSize + (nuint)(1 + 1) + 1) @@ -4681,16 +6527,41 @@ private static nuint ZSTD_compress_frameChunk(ZSTD_CCtx_s* cctx, void* dst, nuin return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); } - ZSTD_overflowCorrectIfNeeded(ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize); - ZSTD_checkDictValidity(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState); - ZSTD_window_enforceMaxDist(&ms->window, ip, maxDist, &ms->loadedDictEnd, &ms->dictMatchState); + ZSTD_overflowCorrectIfNeeded( + ms, + &cctx->workspace, + &cctx->appliedParams, + ip, + ip + blockSize + ); + ZSTD_checkDictValidity( + &ms->window, + ip + blockSize, + maxDist, + &ms->loadedDictEnd, + &ms->dictMatchState + ); + ZSTD_window_enforceMaxDist( + &ms->window, + ip, + maxDist, + &ms->loadedDictEnd, + &ms->dictMatchState + ); if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit; { nuint cSize; if (ZSTD_useTargetCBlockSize(&cctx->appliedParams) != 0) { - cSize = ZSTD_compressBlock_targetCBlockSize(cctx, op, dstCapacity, ip, blockSize, lastBlock); + cSize = ZSTD_compressBlock_targetCBlockSize( + cctx, + op, + dstCapacity, + ip, + blockSize, + lastBlock + ); { nuint err_code = cSize; if (ERR_isError(err_code)) @@ -4704,7 +6575,14 @@ private static nuint ZSTD_compress_frameChunk(ZSTD_CCtx_s* cctx, void* dst, nuin } else if (ZSTD_blockSplitterEnabled(&cctx->appliedParams) != 0) { - cSize = ZSTD_compressBlock_splitBlock(cctx, op, dstCapacity, ip, blockSize, lastBlock); + cSize = ZSTD_compressBlock_splitBlock( + cctx, + op, + dstCapacity, + ip, + blockSize, + lastBlock + ); { nuint err_code = cSize; if (ERR_isError(err_code)) @@ -4717,7 +6595,14 @@ private static nuint ZSTD_compress_frameChunk(ZSTD_CCtx_s* cctx, void* dst, nuin } else { - cSize = ZSTD_compressBlock_internal(cctx, op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize, ip, blockSize, 1); + cSize = ZSTD_compressBlock_internal( + cctx, + op + ZSTD_blockHeaderSize, + dstCapacity - ZSTD_blockHeaderSize, + ip, + blockSize, + 1 + ); { nuint err_code = cSize; if (ERR_isError(err_code)) @@ -4739,7 +6624,14 @@ private static nuint ZSTD_compress_frameChunk(ZSTD_CCtx_s* cctx, void* dst, nuin } else { - uint cBlockHeader = cSize == 1 ? lastBlock + ((uint)blockType_e.bt_rle << 1) + (uint)(blockSize << 3) : lastBlock + ((uint)blockType_e.bt_compressed << 1) + (uint)(cSize << 3); + uint cBlockHeader = + cSize == 1 + ? lastBlock + + ((uint)blockType_e.bt_rle << 1) + + (uint)(blockSize << 3) + : lastBlock + + ((uint)blockType_e.bt_compressed << 1) + + (uint)(cSize << 3); MEM_writeLE24(op, cBlockHeader); cSize += ZSTD_blockHeaderSize; } @@ -4761,21 +6653,40 @@ private static nuint ZSTD_compress_frameChunk(ZSTD_CCtx_s* cctx, void* dst, nuin return (nuint)(op - ostart); } - private static nuint ZSTD_writeFrameHeader(void* dst, nuint dstCapacity, ZSTD_CCtx_params_s* @params, ulong pledgedSrcSize, uint dictID) + private static nuint ZSTD_writeFrameHeader( + void* dst, + nuint dstCapacity, + ZSTD_CCtx_params_s* @params, + ulong pledgedSrcSize, + uint dictID + ) { byte* op = (byte*)dst; /* 0-3 */ - uint dictIDSizeCodeLength = (uint)((dictID > 0 ? 1 : 0) + (dictID >= 256 ? 1 : 0) + (dictID >= 65536 ? 1 : 0)); + uint dictIDSizeCodeLength = (uint)( + (dictID > 0 ? 1 : 0) + (dictID >= 256 ? 1 : 0) + (dictID >= 65536 ? 1 : 0) + ); /* 0-3 */ uint dictIDSizeCode = @params->fParams.noDictIDFlag != 0 ? 0 : dictIDSizeCodeLength; uint checksumFlag = @params->fParams.checksumFlag > 0 ? 1U : 0U; uint windowSize = (uint)1 << (int)@params->cParams.windowLog; - uint singleSegment = @params->fParams.contentSizeFlag != 0 && windowSize >= pledgedSrcSize ? 1U : 0U; + uint singleSegment = + @params->fParams.contentSizeFlag != 0 && windowSize >= pledgedSrcSize ? 1U : 0U; byte windowLogByte = (byte)(@params->cParams.windowLog - 10 << 3); - uint fcsCode = (uint)(@params->fParams.contentSizeFlag != 0 ? (pledgedSrcSize >= 256 ? 1 : 0) + (pledgedSrcSize >= 65536 + 256 ? 1 : 0) + (pledgedSrcSize >= 0xFFFFFFFFU ? 1 : 0) : 0); - byte frameHeaderDescriptionByte = (byte)(dictIDSizeCode + (checksumFlag << 2) + (singleSegment << 5) + (fcsCode << 6)); + uint fcsCode = (uint)( + @params->fParams.contentSizeFlag != 0 + ? (pledgedSrcSize >= 256 ? 1 : 0) + + (pledgedSrcSize >= 65536 + 256 ? 1 : 0) + + (pledgedSrcSize >= 0xFFFFFFFFU ? 1 : 0) + : 0 + ); + byte frameHeaderDescriptionByte = (byte)( + dictIDSizeCode + (checksumFlag << 2) + (singleSegment << 5) + (fcsCode << 6) + ); nuint pos = 0; - assert(!(@params->fParams.contentSizeFlag != 0 && pledgedSrcSize == unchecked(0UL - 1))); + assert( + !(@params->fParams.contentSizeFlag != 0 && pledgedSrcSize == unchecked(0UL - 1)) + ); if (dstCapacity < 18) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); @@ -4843,7 +6754,13 @@ private static nuint ZSTD_writeFrameHeader(void* dst, nuint dstCapacity, ZSTD_CC * * Returns the total number of bytes written, or a ZSTD error code. */ - public static nuint ZSTD_writeSkippableFrame(void* dst, nuint dstCapacity, void* src, nuint srcSize, uint magicVariant) + public static nuint ZSTD_writeSkippableFrame( + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + uint magicVariant + ) { byte* op = (byte*)dst; if (dstCapacity < srcSize + 8) @@ -4896,10 +6813,17 @@ private static nuint ZSTD_writeLastEmptyBlock(void* dst, nuint dstCapacity) * NOTE: seqs are not verified! Invalid sequences can cause out-of-bounds memory * access and data corruption. */ - private static void ZSTD_referenceExternalSequences(ZSTD_CCtx_s* cctx, rawSeq* seq, nuint nbSeq) + private static void ZSTD_referenceExternalSequences( + ZSTD_CCtx_s* cctx, + rawSeq* seq, + nuint nbSeq + ) { assert(cctx->stage == ZSTD_compressionStage_e.ZSTDcs_init); - assert(nbSeq == 0 || cctx->appliedParams.ldmParams.enableLdm != ZSTD_paramSwitch_e.ZSTD_ps_enable); + assert( + nbSeq == 0 + || cctx->appliedParams.ldmParams.enableLdm != ZSTD_paramSwitch_e.ZSTD_ps_enable + ); cctx->externSeqStore.seq = seq; cctx->externSeqStore.size = nbSeq; cctx->externSeqStore.capacity = nbSeq; @@ -4907,7 +6831,15 @@ private static void ZSTD_referenceExternalSequences(ZSTD_CCtx_s* cctx, rawSeq* s cctx->externSeqStore.posInSequence = 0; } - private static nuint ZSTD_compressContinue_internal(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, void* src, nuint srcSize, uint frame, uint lastFrameChunk) + private static nuint ZSTD_compressContinue_internal( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + uint frame, + uint lastFrameChunk + ) { ZSTD_MatchState_t* ms = &cctx->blockState.matchState; nuint fhSize = 0; @@ -4918,7 +6850,13 @@ private static nuint ZSTD_compressContinue_internal(ZSTD_CCtx_s* cctx, void* dst if (frame != 0 && cctx->stage == ZSTD_compressionStage_e.ZSTDcs_init) { - fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, cctx->pledgedSrcSizePlusOne - 1, cctx->dictID); + fhSize = ZSTD_writeFrameHeader( + dst, + dstCapacity, + &cctx->appliedParams, + cctx->pledgedSrcSizePlusOne - 1, + cctx->dictID + ); { nuint err_code = fhSize; if (ERR_isError(err_code)) @@ -4948,11 +6886,27 @@ private static nuint ZSTD_compressContinue_internal(ZSTD_CCtx_s* cctx, void* dst if (frame == 0) { - ZSTD_overflowCorrectIfNeeded(ms, &cctx->workspace, &cctx->appliedParams, src, (byte*)src + srcSize); + ZSTD_overflowCorrectIfNeeded( + ms, + &cctx->workspace, + &cctx->appliedParams, + src, + (byte*)src + srcSize + ); } { - nuint cSize = frame != 0 ? ZSTD_compress_frameChunk(cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) : ZSTD_compressBlock_internal(cctx, dst, dstCapacity, src, srcSize, 0); + nuint cSize = + frame != 0 + ? ZSTD_compress_frameChunk( + cctx, + dst, + dstCapacity, + src, + srcSize, + lastFrameChunk + ) + : ZSTD_compressBlock_internal(cctx, dst, dstCapacity, src, srcSize, 0); { nuint err_code = cSize; if (ERR_isError(err_code)) @@ -4963,7 +6917,12 @@ private static nuint ZSTD_compressContinue_internal(ZSTD_CCtx_s* cctx, void* dst cctx->consumedSrcSize += srcSize; cctx->producedCSize += cSize + fhSize; - assert(!(cctx->appliedParams.fParams.contentSizeFlag != 0 && cctx->pledgedSrcSizePlusOne == 0)); + assert( + !( + cctx->appliedParams.fParams.contentSizeFlag != 0 + && cctx->pledgedSrcSizePlusOne == 0 + ) + ); if (cctx->pledgedSrcSizePlusOne != 0) { if (cctx->consumedSrcSize + 1 > cctx->pledgedSrcSizePlusOne) @@ -4976,13 +6935,25 @@ private static nuint ZSTD_compressContinue_internal(ZSTD_CCtx_s* cctx, void* dst } } - private static nuint ZSTD_compressContinue_public(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, void* src, nuint srcSize) + private static nuint ZSTD_compressContinue_public( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize + ) { return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 0); } /* NOTE: Must just wrap ZSTD_compressContinue_public() */ - public static nuint ZSTD_compressContinue(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, void* src, nuint srcSize) + public static nuint ZSTD_compressContinue( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize + ) { return ZSTD_compressContinue_public(cctx, dst, dstCapacity, src, srcSize); } @@ -4991,7 +6962,9 @@ private static nuint ZSTD_getBlockSize_deprecated(ZSTD_CCtx_s* cctx) { ZSTD_compressionParameters cParams = cctx->appliedParams.cParams; assert(ZSTD_checkCParams(cParams) == 0); - return cctx->appliedParams.maxBlockSize < (nuint)1 << (int)cParams.windowLog ? cctx->appliedParams.maxBlockSize : (nuint)1 << (int)cParams.windowLog; + return cctx->appliedParams.maxBlockSize < (nuint)1 << (int)cParams.windowLog + ? cctx->appliedParams.maxBlockSize + : (nuint)1 << (int)cParams.windowLog; } /* NOTE: Must just wrap ZSTD_getBlockSize_deprecated() */ @@ -5001,7 +6974,13 @@ public static nuint ZSTD_getBlockSize(ZSTD_CCtx_s* cctx) } /* NOTE: Must just wrap ZSTD_compressBlock_deprecated() */ - private static nuint ZSTD_compressBlock_deprecated(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_deprecated( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize + ) { { nuint blockSizeMax = ZSTD_getBlockSize_deprecated(cctx); @@ -5015,7 +6994,13 @@ private static nuint ZSTD_compressBlock_deprecated(ZSTD_CCtx_s* cctx, void* dst, } /* NOTE: Must just wrap ZSTD_compressBlock_deprecated() */ - public static nuint ZSTD_compressBlock(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, void* src, nuint srcSize) + public static nuint ZSTD_compressBlock( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize + ) { return ZSTD_compressBlock_deprecated(cctx, dst, dstCapacity, src, srcSize); } @@ -5023,11 +7008,23 @@ public static nuint ZSTD_compressBlock(ZSTD_CCtx_s* cctx, void* dst, nuint dstCa /*! ZSTD_loadDictionaryContent() : * @return : 0, or an error code */ - private static nuint ZSTD_loadDictionaryContent(ZSTD_MatchState_t* ms, ldmState_t* ls, ZSTD_cwksp* ws, ZSTD_CCtx_params_s* @params, void* src, nuint srcSize, ZSTD_dictTableLoadMethod_e dtlm, ZSTD_tableFillPurpose_e tfp) + private static nuint ZSTD_loadDictionaryContent( + ZSTD_MatchState_t* ms, + ldmState_t* ls, + ZSTD_cwksp* ws, + ZSTD_CCtx_params_s* @params, + void* src, + nuint srcSize, + ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp + ) { byte* ip = (byte*)src; byte* iend = ip + srcSize; - int loadLdmDict = @params->ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable && ls != null ? 1 : 0; + int loadLdmDict = + @params->ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable && ls != null + ? 1 + : 0; ZSTD_assertEqualCParams(@params->cParams, ms->cParams); { /* Allow the dictionary to set indices up to exactly ZSTD_CURRENT_MAX. @@ -5044,7 +7041,8 @@ private static nuint ZSTD_loadDictionaryContent(ZSTD_MatchState_t* ms, ldmState_ * When short cache is used, we need to truncate the dictionary * so that its indices don't overlap with the tag. */ const uint shortCacheMaxDictSize = (1U << 32 - 8) - 2; - maxDictSize = maxDictSize < shortCacheMaxDictSize ? maxDictSize : shortCacheMaxDictSize; + maxDictSize = + maxDictSize < shortCacheMaxDictSize ? maxDictSize : shortCacheMaxDictSize; assert(loadLdmDict == 0); } @@ -5056,7 +7054,10 @@ private static nuint ZSTD_loadDictionaryContent(ZSTD_MatchState_t* ms, ldmState_ } } - if (srcSize > unchecked((uint)-1) - (MEM_64bits ? 3500U * (1 << 20) : 2000U * (1 << 20))) + if ( + srcSize + > unchecked((uint)-1) - (MEM_64bits ? 3500U * (1 << 20) : 2000U * (1 << 20)) + ) { assert(ZSTD_window_isEmpty(ms->window) != 0); #if DEBUG @@ -5074,7 +7075,19 @@ private static nuint ZSTD_loadDictionaryContent(ZSTD_MatchState_t* ms, ldmState_ } { - uint maxDictSize = 1U << (int)((@params->cParams.hashLog + 3 > @params->cParams.chainLog + 1 ? @params->cParams.hashLog + 3 : @params->cParams.chainLog + 1) < 31 ? @params->cParams.hashLog + 3 > @params->cParams.chainLog + 1 ? @params->cParams.hashLog + 3 : @params->cParams.chainLog + 1 : 31); + uint maxDictSize = + 1U + << (int)( + ( + @params->cParams.hashLog + 3 > @params->cParams.chainLog + 1 + ? @params->cParams.hashLog + 3 + : @params->cParams.chainLog + 1 + ) < 31 + ? @params->cParams.hashLog + 3 > @params->cParams.chainLog + 1 + ? @params->cParams.hashLog + 3 + : @params->cParams.chainLog + 1 + : 31 + ); if (srcSize > maxDictSize) { ip = iend - maxDictSize; @@ -5142,7 +7155,11 @@ private static nuint ZSTD_loadDictionaryContent(ZSTD_MatchState_t* ms, ldmState_ * when FSE encoding. Mark dictionaries with zero probability symbols as FSE_repeat_check * and only dictionaries with 100% valid symbols can be assumed valid. */ - private static FSE_repeat ZSTD_dictNCountRepeat(short* normalizedCounter, uint dictMaxSymbolValue, uint maxSymbolValue) + private static FSE_repeat ZSTD_dictNCountRepeat( + short* normalizedCounter, + uint dictMaxSymbolValue, + uint maxSymbolValue + ) { uint s; if (dictMaxSymbolValue < maxSymbolValue) @@ -5166,7 +7183,12 @@ private static FSE_repeat ZSTD_dictNCountRepeat(short* normalizedCounter, uint d * return : size of dictionary header (size of magic number + dict ID + entropy tables) * assumptions : magic number supposed already checked * and dictSize >= 8 */ - private static nuint ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace, void* dict, nuint dictSize) + private static nuint ZSTD_loadCEntropy( + ZSTD_compressedBlockState_t* bs, + void* workspace, + void* dict, + nuint dictSize + ) { short* offcodeNCount = stackalloc short[32]; uint offcodeMaxValue = 31; @@ -5178,7 +7200,13 @@ private static nuint ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* wo { uint maxSymbolValue = 255; uint hasZeroWeights = 1; - nuint hufHeaderSize = HUF_readCTable(&bs->entropy.huf.CTable.e0, &maxSymbolValue, dictPtr, (nuint)(dictEnd - dictPtr), &hasZeroWeights); + nuint hufHeaderSize = HUF_readCTable( + &bs->entropy.huf.CTable.e0, + &maxSymbolValue, + dictPtr, + (nuint)(dictEnd - dictPtr), + &hasZeroWeights + ); if (hasZeroWeights == 0 && maxSymbolValue == 255) bs->entropy.huf.repeatMode = HUF_repeat.HUF_repeat_valid; if (ERR_isError(hufHeaderSize)) @@ -5191,7 +7219,13 @@ private static nuint ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* wo { uint offcodeLog; - nuint offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, (nuint)(dictEnd - dictPtr)); + nuint offcodeHeaderSize = FSE_readNCount( + offcodeNCount, + &offcodeMaxValue, + &offcodeLog, + dictPtr, + (nuint)(dictEnd - dictPtr) + ); if (ERR_isError(offcodeHeaderSize)) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); @@ -5202,7 +7236,18 @@ private static nuint ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* wo return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); } - if (ERR_isError(FSE_buildCTable_wksp(bs->entropy.fse.offcodeCTable, offcodeNCount, 31, offcodeLog, workspace, (8 << 10) + 512))) + if ( + ERR_isError( + FSE_buildCTable_wksp( + bs->entropy.fse.offcodeCTable, + offcodeNCount, + 31, + offcodeLog, + workspace, + (8 << 10) + 512 + ) + ) + ) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); } @@ -5212,8 +7257,15 @@ private static nuint ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* wo { short* matchlengthNCount = stackalloc short[53]; - uint matchlengthMaxValue = 52, matchlengthLog; - nuint matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, (nuint)(dictEnd - dictPtr)); + uint matchlengthMaxValue = 52, + matchlengthLog; + nuint matchlengthHeaderSize = FSE_readNCount( + matchlengthNCount, + &matchlengthMaxValue, + &matchlengthLog, + dictPtr, + (nuint)(dictEnd - dictPtr) + ); if (ERR_isError(matchlengthHeaderSize)) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); @@ -5224,19 +7276,41 @@ private static nuint ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* wo return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); } - if (ERR_isError(FSE_buildCTable_wksp(bs->entropy.fse.matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, workspace, (8 << 10) + 512))) + if ( + ERR_isError( + FSE_buildCTable_wksp( + bs->entropy.fse.matchlengthCTable, + matchlengthNCount, + matchlengthMaxValue, + matchlengthLog, + workspace, + (8 << 10) + 512 + ) + ) + ) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); } - bs->entropy.fse.matchlength_repeatMode = ZSTD_dictNCountRepeat(matchlengthNCount, matchlengthMaxValue, 52); + bs->entropy.fse.matchlength_repeatMode = ZSTD_dictNCountRepeat( + matchlengthNCount, + matchlengthMaxValue, + 52 + ); dictPtr += matchlengthHeaderSize; } { short* litlengthNCount = stackalloc short[36]; - uint litlengthMaxValue = 35, litlengthLog; - nuint litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, (nuint)(dictEnd - dictPtr)); + uint litlengthMaxValue = 35, + litlengthLog; + nuint litlengthHeaderSize = FSE_readNCount( + litlengthNCount, + &litlengthMaxValue, + &litlengthLog, + dictPtr, + (nuint)(dictEnd - dictPtr) + ); if (ERR_isError(litlengthHeaderSize)) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); @@ -5247,12 +7321,27 @@ private static nuint ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* wo return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); } - if (ERR_isError(FSE_buildCTable_wksp(bs->entropy.fse.litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, workspace, (8 << 10) + 512))) + if ( + ERR_isError( + FSE_buildCTable_wksp( + bs->entropy.fse.litlengthCTable, + litlengthNCount, + litlengthMaxValue, + litlengthLog, + workspace, + (8 << 10) + 512 + ) + ) + ) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); } - bs->entropy.fse.litlength_repeatMode = ZSTD_dictNCountRepeat(litlengthNCount, litlengthMaxValue, 35); + bs->entropy.fse.litlength_repeatMode = ZSTD_dictNCountRepeat( + litlengthNCount, + litlengthMaxValue, + 35 + ); dictPtr += litlengthHeaderSize; } @@ -5275,19 +7364,27 @@ private static nuint ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* wo offcodeMax = ZSTD_highbit32(maxOffset); } - bs->entropy.fse.offcode_repeatMode = ZSTD_dictNCountRepeat(offcodeNCount, offcodeMaxValue, offcodeMax < 31 ? offcodeMax : 31); + bs->entropy.fse.offcode_repeatMode = ZSTD_dictNCountRepeat( + offcodeNCount, + offcodeMaxValue, + offcodeMax < 31 ? offcodeMax : 31 + ); { uint u; for (u = 0; u < 3; u++) { if (bs->rep[u] == 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted) + ); } if (bs->rep[u] > dictContentSize) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted) + ); } } } @@ -5305,7 +7402,17 @@ private static nuint ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* wo * assumptions : magic number supposed already checked * dictSize supposed >= 8 */ - private static nuint ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs, ZSTD_MatchState_t* ms, ZSTD_cwksp* ws, ZSTD_CCtx_params_s* @params, void* dict, nuint dictSize, ZSTD_dictTableLoadMethod_e dtlm, ZSTD_tableFillPurpose_e tfp, void* workspace) + private static nuint ZSTD_loadZstdDictionary( + ZSTD_compressedBlockState_t* bs, + ZSTD_MatchState_t* ms, + ZSTD_cwksp* ws, + ZSTD_CCtx_params_s* @params, + void* dict, + nuint dictSize, + ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp, + void* workspace + ) { byte* dictPtr = (byte*)dict; byte* dictEnd = dictPtr + dictSize; @@ -5327,7 +7434,16 @@ private static nuint ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs, ZS { nuint dictContentSize = (nuint)(dictEnd - dictPtr); { - nuint err_code = ZSTD_loadDictionaryContent(ms, null, ws, @params, dictPtr, dictContentSize, dtlm, tfp); + nuint err_code = ZSTD_loadDictionaryContent( + ms, + null, + ws, + @params, + dictPtr, + dictContentSize, + dtlm, + tfp + ); if (ERR_isError(err_code)) { return err_code; @@ -5340,7 +7456,19 @@ private static nuint ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs, ZS /** ZSTD_compress_insertDictionary() : * @return : dictID, or an error code */ - private static nuint ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs, ZSTD_MatchState_t* ms, ldmState_t* ls, ZSTD_cwksp* ws, ZSTD_CCtx_params_s* @params, void* dict, nuint dictSize, ZSTD_dictContentType_e dictContentType, ZSTD_dictTableLoadMethod_e dtlm, ZSTD_tableFillPurpose_e tfp, void* workspace) + private static nuint ZSTD_compress_insertDictionary( + ZSTD_compressedBlockState_t* bs, + ZSTD_MatchState_t* ms, + ldmState_t* ls, + ZSTD_cwksp* ws, + ZSTD_CCtx_params_s* @params, + void* dict, + nuint dictSize, + ZSTD_dictContentType_e dictContentType, + ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp, + void* workspace + ) { if (dict == null || dictSize < 8) { @@ -5359,7 +7487,16 @@ private static nuint ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* { if (dictContentType == ZSTD_dictContentType_e.ZSTD_dct_auto) { - return ZSTD_loadDictionaryContent(ms, ls, ws, @params, dict, dictSize, dtlm, tfp); + return ZSTD_loadDictionaryContent( + ms, + ls, + ws, + @params, + dict, + dictSize, + dtlm, + tfp + ); } if (dictContentType == ZSTD_dictContentType_e.ZSTD_dct_fullDict) @@ -5370,24 +7507,61 @@ private static nuint ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* assert(0 != 0); } - return ZSTD_loadZstdDictionary(bs, ms, ws, @params, dict, dictSize, dtlm, tfp, workspace); + return ZSTD_loadZstdDictionary( + bs, + ms, + ws, + @params, + dict, + dictSize, + dtlm, + tfp, + workspace + ); } /*! ZSTD_compressBegin_internal() : * Assumption : either @dict OR @cdict (or none) is non-NULL, never both * @return : 0, or an error code */ - private static nuint ZSTD_compressBegin_internal(ZSTD_CCtx_s* cctx, void* dict, nuint dictSize, ZSTD_dictContentType_e dictContentType, ZSTD_dictTableLoadMethod_e dtlm, ZSTD_CDict_s* cdict, ZSTD_CCtx_params_s* @params, ulong pledgedSrcSize, ZSTD_buffered_policy_e zbuff) + private static nuint ZSTD_compressBegin_internal( + ZSTD_CCtx_s* cctx, + void* dict, + nuint dictSize, + ZSTD_dictContentType_e dictContentType, + ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_CDict_s* cdict, + ZSTD_CCtx_params_s* @params, + ulong pledgedSrcSize, + ZSTD_buffered_policy_e zbuff + ) { nuint dictContentSize = cdict != null ? cdict->dictContentSize : dictSize; assert(!ERR_isError(ZSTD_checkCParams(@params->cParams))); assert(!(dict != null && cdict != null)); - if (cdict != null && cdict->dictContentSize > 0 && (pledgedSrcSize < 128 * (1 << 10) || pledgedSrcSize < cdict->dictContentSize * 6UL || pledgedSrcSize == unchecked(0UL - 1) || cdict->compressionLevel == 0) && @params->attachDictPref != ZSTD_dictAttachPref_e.ZSTD_dictForceLoad) + if ( + cdict != null + && cdict->dictContentSize > 0 + && ( + pledgedSrcSize < 128 * (1 << 10) + || pledgedSrcSize < cdict->dictContentSize * 6UL + || pledgedSrcSize == unchecked(0UL - 1) + || cdict->compressionLevel == 0 + ) + && @params->attachDictPref != ZSTD_dictAttachPref_e.ZSTD_dictForceLoad + ) { return ZSTD_resetCCtx_usingCDict(cctx, cdict, @params, pledgedSrcSize, zbuff); } { - nuint err_code = ZSTD_resetCCtx_internal(cctx, @params, pledgedSrcSize, dictContentSize, ZSTD_compResetPolicy_e.ZSTDcrp_makeClean, zbuff); + nuint err_code = ZSTD_resetCCtx_internal( + cctx, + @params, + pledgedSrcSize, + dictContentSize, + ZSTD_compResetPolicy_e.ZSTDcrp_makeClean, + zbuff + ); if (ERR_isError(err_code)) { return err_code; @@ -5395,7 +7569,34 @@ private static nuint ZSTD_compressBegin_internal(ZSTD_CCtx_s* cctx, void* dict, } { - nuint dictID = cdict != null ? ZSTD_compress_insertDictionary(cctx->blockState.prevCBlock, &cctx->blockState.matchState, &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, cdict->dictContent, cdict->dictContentSize, cdict->dictContentType, dtlm, ZSTD_tableFillPurpose_e.ZSTD_tfp_forCCtx, cctx->tmpWorkspace) : ZSTD_compress_insertDictionary(cctx->blockState.prevCBlock, &cctx->blockState.matchState, &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, dict, dictSize, dictContentType, dtlm, ZSTD_tableFillPurpose_e.ZSTD_tfp_forCCtx, cctx->tmpWorkspace); + nuint dictID = + cdict != null + ? ZSTD_compress_insertDictionary( + cctx->blockState.prevCBlock, + &cctx->blockState.matchState, + &cctx->ldmState, + &cctx->workspace, + &cctx->appliedParams, + cdict->dictContent, + cdict->dictContentSize, + cdict->dictContentType, + dtlm, + ZSTD_tableFillPurpose_e.ZSTD_tfp_forCCtx, + cctx->tmpWorkspace + ) + : ZSTD_compress_insertDictionary( + cctx->blockState.prevCBlock, + &cctx->blockState.matchState, + &cctx->ldmState, + &cctx->workspace, + &cctx->appliedParams, + dict, + dictSize, + dictContentType, + dtlm, + ZSTD_tableFillPurpose_e.ZSTD_tfp_forCCtx, + cctx->tmpWorkspace + ); { nuint err_code = dictID; if (ERR_isError(err_code)) @@ -5414,7 +7615,16 @@ private static nuint ZSTD_compressBegin_internal(ZSTD_CCtx_s* cctx, void* dict, /* ZSTD_compressBegin_advanced_internal() : * Private use only. To be called from zstdmt_compress.c. */ - private static nuint ZSTD_compressBegin_advanced_internal(ZSTD_CCtx_s* cctx, void* dict, nuint dictSize, ZSTD_dictContentType_e dictContentType, ZSTD_dictTableLoadMethod_e dtlm, ZSTD_CDict_s* cdict, ZSTD_CCtx_params_s* @params, ulong pledgedSrcSize) + private static nuint ZSTD_compressBegin_advanced_internal( + ZSTD_CCtx_s* cctx, + void* dict, + nuint dictSize, + ZSTD_dictContentType_e dictContentType, + ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_CDict_s* cdict, + ZSTD_CCtx_params_s* @params, + ulong pledgedSrcSize + ) { { /* compression parameters verification and optimization */ @@ -5425,30 +7635,84 @@ private static nuint ZSTD_compressBegin_advanced_internal(ZSTD_CCtx_s* cctx, voi } } - return ZSTD_compressBegin_internal(cctx, dict, dictSize, dictContentType, dtlm, cdict, @params, pledgedSrcSize, ZSTD_buffered_policy_e.ZSTDb_not_buffered); + return ZSTD_compressBegin_internal( + cctx, + dict, + dictSize, + dictContentType, + dtlm, + cdict, + @params, + pledgedSrcSize, + ZSTD_buffered_policy_e.ZSTDb_not_buffered + ); } /*! ZSTD_compressBegin_advanced() : * @return : 0, or an error code */ - public static nuint ZSTD_compressBegin_advanced(ZSTD_CCtx_s* cctx, void* dict, nuint dictSize, ZSTD_parameters @params, ulong pledgedSrcSize) + public static nuint ZSTD_compressBegin_advanced( + ZSTD_CCtx_s* cctx, + void* dict, + nuint dictSize, + ZSTD_parameters @params, + ulong pledgedSrcSize + ) { ZSTD_CCtx_params_s cctxParams; ZSTD_CCtxParams_init_internal(&cctxParams, &@params, 0); - return ZSTD_compressBegin_advanced_internal(cctx, dict, dictSize, ZSTD_dictContentType_e.ZSTD_dct_auto, ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, null, &cctxParams, pledgedSrcSize); + return ZSTD_compressBegin_advanced_internal( + cctx, + dict, + dictSize, + ZSTD_dictContentType_e.ZSTD_dct_auto, + ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, + null, + &cctxParams, + pledgedSrcSize + ); } - private static nuint ZSTD_compressBegin_usingDict_deprecated(ZSTD_CCtx_s* cctx, void* dict, nuint dictSize, int compressionLevel) + private static nuint ZSTD_compressBegin_usingDict_deprecated( + ZSTD_CCtx_s* cctx, + void* dict, + nuint dictSize, + int compressionLevel + ) { ZSTD_CCtx_params_s cctxParams; { - ZSTD_parameters @params = ZSTD_getParams_internal(compressionLevel, unchecked(0UL - 1), dictSize, ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict); - ZSTD_CCtxParams_init_internal(&cctxParams, &@params, compressionLevel == 0 ? 3 : compressionLevel); - } - - return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dictContentType_e.ZSTD_dct_auto, ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, null, &cctxParams, unchecked(0UL - 1), ZSTD_buffered_policy_e.ZSTDb_not_buffered); + ZSTD_parameters @params = ZSTD_getParams_internal( + compressionLevel, + unchecked(0UL - 1), + dictSize, + ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict + ); + ZSTD_CCtxParams_init_internal( + &cctxParams, + &@params, + compressionLevel == 0 ? 3 : compressionLevel + ); + } + + return ZSTD_compressBegin_internal( + cctx, + dict, + dictSize, + ZSTD_dictContentType_e.ZSTD_dct_auto, + ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, + null, + &cctxParams, + unchecked(0UL - 1), + ZSTD_buffered_policy_e.ZSTDb_not_buffered + ); } - public static nuint ZSTD_compressBegin_usingDict(ZSTD_CCtx_s* cctx, void* dict, nuint dictSize, int compressionLevel) + public static nuint ZSTD_compressBegin_usingDict( + ZSTD_CCtx_s* cctx, + void* dict, + nuint dictSize, + int compressionLevel + ) { return ZSTD_compressBegin_usingDict_deprecated(cctx, dict, dictSize, compressionLevel); } @@ -5520,14 +7784,26 @@ private static nuint ZSTD_writeEpilogue(ZSTD_CCtx_s* cctx, void* dst, nuint dstC /** ZSTD_CCtx_trace() : * Trace the end of a compression call. */ - private static void ZSTD_CCtx_trace(ZSTD_CCtx_s* cctx, nuint extraCSize) - { - } + private static void ZSTD_CCtx_trace(ZSTD_CCtx_s* cctx, nuint extraCSize) { } - private static nuint ZSTD_compressEnd_public(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, void* src, nuint srcSize) + private static nuint ZSTD_compressEnd_public( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize + ) { nuint endResult; - nuint cSize = ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 1); + nuint cSize = ZSTD_compressContinue_internal( + cctx, + dst, + dstCapacity, + src, + srcSize, + 1, + 1 + ); { nuint err_code = cSize; if (ERR_isError(err_code)) @@ -5545,7 +7821,12 @@ private static nuint ZSTD_compressEnd_public(ZSTD_CCtx_s* cctx, void* dst, nuint } } - assert(!(cctx->appliedParams.fParams.contentSizeFlag != 0 && cctx->pledgedSrcSizePlusOne == 0)); + assert( + !( + cctx->appliedParams.fParams.contentSizeFlag != 0 + && cctx->pledgedSrcSizePlusOne == 0 + ) + ); if (cctx->pledgedSrcSizePlusOne != 0) { if (cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize + 1) @@ -5559,7 +7840,13 @@ private static nuint ZSTD_compressEnd_public(ZSTD_CCtx_s* cctx, void* dst, nuint } /* NOTE: Must just wrap ZSTD_compressEnd_public() */ - public static nuint ZSTD_compressEnd(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, void* src, nuint srcSize) + public static nuint ZSTD_compressEnd( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize + ) { return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize); } @@ -5568,7 +7855,16 @@ public static nuint ZSTD_compressEnd(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapa * Note : this function is now DEPRECATED. * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters. * This prototype will generate compilation warnings. */ - public static nuint ZSTD_compress_advanced(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, void* src, nuint srcSize, void* dict, nuint dictSize, ZSTD_parameters @params) + public static nuint ZSTD_compress_advanced( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + void* dict, + nuint dictSize, + ZSTD_parameters @params + ) { { nuint err_code = ZSTD_checkCParams(@params.cParams); @@ -5579,14 +7875,42 @@ public static nuint ZSTD_compress_advanced(ZSTD_CCtx_s* cctx, void* dst, nuint d } ZSTD_CCtxParams_init_internal(&cctx->simpleApiParams, &@params, 0); - return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctx->simpleApiParams); + return ZSTD_compress_advanced_internal( + cctx, + dst, + dstCapacity, + src, + srcSize, + dict, + dictSize, + &cctx->simpleApiParams + ); } /* Internal */ - private static nuint ZSTD_compress_advanced_internal(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, void* src, nuint srcSize, void* dict, nuint dictSize, ZSTD_CCtx_params_s* @params) - { - { - nuint err_code = ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dictContentType_e.ZSTD_dct_auto, ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, null, @params, srcSize, ZSTD_buffered_policy_e.ZSTDb_not_buffered); + private static nuint ZSTD_compress_advanced_internal( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + void* dict, + nuint dictSize, + ZSTD_CCtx_params_s* @params + ) + { + { + nuint err_code = ZSTD_compressBegin_internal( + cctx, + dict, + dictSize, + ZSTD_dictContentType_e.ZSTD_dct_auto, + ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, + null, + @params, + srcSize, + ZSTD_buffered_policy_e.ZSTDb_not_buffered + ); if (ERR_isError(err_code)) { return err_code; @@ -5606,15 +7930,42 @@ private static nuint ZSTD_compress_advanced_internal(ZSTD_CCtx_s* cctx, void* ds * Note : This function loads the dictionary, resulting in significant startup delay. * It's intended for a dictionary used only once. * Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used. */ - public static nuint ZSTD_compress_usingDict(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, void* src, nuint srcSize, void* dict, nuint dictSize, int compressionLevel) - { - { - ZSTD_parameters @params = ZSTD_getParams_internal(compressionLevel, srcSize, dict != null ? dictSize : 0, ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict); + public static nuint ZSTD_compress_usingDict( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + void* dict, + nuint dictSize, + int compressionLevel + ) + { + { + ZSTD_parameters @params = ZSTD_getParams_internal( + compressionLevel, + srcSize, + dict != null ? dictSize : 0, + ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict + ); assert(@params.fParams.contentSizeFlag == 1); - ZSTD_CCtxParams_init_internal(&cctx->simpleApiParams, &@params, compressionLevel == 0 ? 3 : compressionLevel); + ZSTD_CCtxParams_init_internal( + &cctx->simpleApiParams, + &@params, + compressionLevel == 0 ? 3 : compressionLevel + ); } - return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctx->simpleApiParams); + return ZSTD_compress_advanced_internal( + cctx, + dst, + dstCapacity, + src, + srcSize, + dict, + dictSize, + &cctx->simpleApiParams + ); } /*! ZSTD_compressCCtx() : @@ -5625,10 +7976,26 @@ public static nuint ZSTD_compress_usingDict(ZSTD_CCtx_s* cctx, void* dst, nuint * If any advanced parameter was set using the advanced API, * they will all be reset. Only @compressionLevel remains. */ - public static nuint ZSTD_compressCCtx(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, void* src, nuint srcSize, int compressionLevel) + public static nuint ZSTD_compressCCtx( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + int compressionLevel + ) { assert(cctx != null); - return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, null, 0, compressionLevel); + return ZSTD_compress_usingDict( + cctx, + dst, + dstCapacity, + src, + srcSize, + null, + 0, + compressionLevel + ); } /*************************************** @@ -5640,7 +8007,13 @@ public static nuint ZSTD_compressCCtx(ZSTD_CCtx_s* cctx, void* dst, nuint dstCap * enough space to successfully compress the data. * @return : compressed size written into `dst` (<= `dstCapacity), * or an error code if it fails (which can be tested using ZSTD_isError()). */ - public static nuint ZSTD_compress(void* dst, nuint dstCapacity, void* src, nuint srcSize, int compressionLevel) + public static nuint ZSTD_compress( + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + int compressionLevel + ) { nuint result; ZSTD_CCtx_s ctxBody; @@ -5652,9 +8025,25 @@ public static nuint ZSTD_compress(void* dst, nuint dstCapacity, void* src, nuint /*! ZSTD_estimateCDictSize_advanced() : * Estimate amount of memory that will be needed to create a dictionary with following arguments */ - public static nuint ZSTD_estimateCDictSize_advanced(nuint dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod) - { - return ZSTD_cwksp_alloc_size((nuint)sizeof(ZSTD_CDict_s)) + ZSTD_cwksp_alloc_size((8 << 10) + 512) + ZSTD_sizeof_matchState(&cParams, ZSTD_resolveRowMatchFinderMode(ZSTD_paramSwitch_e.ZSTD_ps_auto, &cParams), 1, 0) + (dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef ? 0 : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, (nuint)sizeof(void*)))); + public static nuint ZSTD_estimateCDictSize_advanced( + nuint dictSize, + ZSTD_compressionParameters cParams, + ZSTD_dictLoadMethod_e dictLoadMethod + ) + { + return ZSTD_cwksp_alloc_size((nuint)sizeof(ZSTD_CDict_s)) + + ZSTD_cwksp_alloc_size((8 << 10) + 512) + + ZSTD_sizeof_matchState( + &cParams, + ZSTD_resolveRowMatchFinderMode(ZSTD_paramSwitch_e.ZSTD_ps_auto, &cParams), + 1, + 0 + ) + + ( + dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef + ? 0 + : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, (nuint)sizeof(void*))) + ); } /*! ZSTD_estimate?DictSize() : @@ -5664,29 +8053,53 @@ public static nuint ZSTD_estimateCDictSize_advanced(nuint dictSize, ZSTD_compres */ public static nuint ZSTD_estimateCDictSize(nuint dictSize, int compressionLevel) { - ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, unchecked(0UL - 1), dictSize, ZSTD_CParamMode_e.ZSTD_cpm_createCDict); - return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dictLoadMethod_e.ZSTD_dlm_byCopy); + ZSTD_compressionParameters cParams = ZSTD_getCParams_internal( + compressionLevel, + unchecked(0UL - 1), + dictSize, + ZSTD_CParamMode_e.ZSTD_cpm_createCDict + ); + return ZSTD_estimateCDictSize_advanced( + dictSize, + cParams, + ZSTD_dictLoadMethod_e.ZSTD_dlm_byCopy + ); } public static nuint ZSTD_sizeof_CDict(ZSTD_CDict_s* cdict) { if (cdict == null) return 0; - return (nuint)(cdict->workspace.workspace == cdict ? 0 : sizeof(ZSTD_CDict_s)) + ZSTD_cwksp_sizeof(&cdict->workspace); + return (nuint)(cdict->workspace.workspace == cdict ? 0 : sizeof(ZSTD_CDict_s)) + + ZSTD_cwksp_sizeof(&cdict->workspace); } - private static nuint ZSTD_initCDict_internal(ZSTD_CDict_s* cdict, void* dictBuffer, nuint dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_CCtx_params_s @params) + private static nuint ZSTD_initCDict_internal( + ZSTD_CDict_s* cdict, + void* dictBuffer, + nuint dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType, + ZSTD_CCtx_params_s @params + ) { assert(ZSTD_checkCParams(@params.cParams) == 0); cdict->matchState.cParams = @params.cParams; cdict->matchState.dedicatedDictSearch = @params.enableDedicatedDictSearch; - if (dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef || dictBuffer == null || dictSize == 0) + if ( + dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef + || dictBuffer == null + || dictSize == 0 + ) { cdict->dictContent = dictBuffer; } else { - void* internalBuffer = ZSTD_cwksp_reserve_object(&cdict->workspace, ZSTD_cwksp_align(dictSize, (nuint)sizeof(void*))); + void* internalBuffer = ZSTD_cwksp_reserve_object( + &cdict->workspace, + ZSTD_cwksp_align(dictSize, (nuint)sizeof(void*)) + ); if (internalBuffer == null) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); @@ -5698,10 +8111,21 @@ private static nuint ZSTD_initCDict_internal(ZSTD_CDict_s* cdict, void* dictBuff cdict->dictContentSize = dictSize; cdict->dictContentType = dictContentType; - cdict->entropyWorkspace = (uint*)ZSTD_cwksp_reserve_object(&cdict->workspace, (8 << 10) + 512); + cdict->entropyWorkspace = (uint*)ZSTD_cwksp_reserve_object( + &cdict->workspace, + (8 << 10) + 512 + ); ZSTD_reset_compressedBlockState(&cdict->cBlockState); { - nuint err_code = ZSTD_reset_matchState(&cdict->matchState, &cdict->workspace, &@params.cParams, @params.useRowMatchFinder, ZSTD_compResetPolicy_e.ZSTDcrp_makeClean, ZSTD_indexResetPolicy_e.ZSTDirp_reset, ZSTD_resetTarget_e.ZSTD_resetTarget_CDict); + nuint err_code = ZSTD_reset_matchState( + &cdict->matchState, + &cdict->workspace, + &@params.cParams, + @params.useRowMatchFinder, + ZSTD_compResetPolicy_e.ZSTDcrp_makeClean, + ZSTD_indexResetPolicy_e.ZSTDirp_reset, + ZSTD_resetTarget_e.ZSTD_resetTarget_CDict + ); if (ERR_isError(err_code)) { return err_code; @@ -5712,7 +8136,19 @@ private static nuint ZSTD_initCDict_internal(ZSTD_CDict_s* cdict, void* dictBuff @params.compressionLevel = 3; @params.fParams.contentSizeFlag = 1; { - nuint dictID = ZSTD_compress_insertDictionary(&cdict->cBlockState, &cdict->matchState, null, &cdict->workspace, &@params, cdict->dictContent, cdict->dictContentSize, dictContentType, ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_full, ZSTD_tableFillPurpose_e.ZSTD_tfp_forCDict, cdict->entropyWorkspace); + nuint dictID = ZSTD_compress_insertDictionary( + &cdict->cBlockState, + &cdict->matchState, + null, + &cdict->workspace, + &@params, + cdict->dictContent, + cdict->dictContentSize, + dictContentType, + ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_full, + ZSTD_tableFillPurpose_e.ZSTD_tfp_forCDict, + cdict->entropyWorkspace + ); { nuint err_code = dictID; if (ERR_isError(err_code)) @@ -5729,12 +8165,37 @@ private static nuint ZSTD_initCDict_internal(ZSTD_CDict_s* cdict, void* dictBuff return 0; } - private static ZSTD_CDict_s* ZSTD_createCDict_advanced_internal(nuint dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_compressionParameters cParams, ZSTD_paramSwitch_e useRowMatchFinder, int enableDedicatedDictSearch, ZSTD_customMem customMem) + private static ZSTD_CDict_s* ZSTD_createCDict_advanced_internal( + nuint dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_compressionParameters cParams, + ZSTD_paramSwitch_e useRowMatchFinder, + int enableDedicatedDictSearch, + ZSTD_customMem customMem + ) { - if (((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) != 0) + if ( + ((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) + != 0 + ) return null; { - nuint workspaceSize = ZSTD_cwksp_alloc_size((nuint)sizeof(ZSTD_CDict_s)) + ZSTD_cwksp_alloc_size((8 << 10) + 512) + ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, enableDedicatedDictSearch, 0) + (dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef ? 0 : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, (nuint)sizeof(void*)))); + nuint workspaceSize = + ZSTD_cwksp_alloc_size((nuint)sizeof(ZSTD_CDict_s)) + + ZSTD_cwksp_alloc_size((8 << 10) + 512) + + ZSTD_sizeof_matchState( + &cParams, + useRowMatchFinder, + enableDedicatedDictSearch, + 0 + ) + + ( + dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef + ? 0 + : ZSTD_cwksp_alloc_size( + ZSTD_cwksp_align(dictSize, (nuint)sizeof(void*)) + ) + ); void* workspace = ZSTD_customMalloc(workspaceSize, customMem); ZSTD_cwksp ws; ZSTD_CDict_s* cdict; @@ -5744,7 +8205,12 @@ private static nuint ZSTD_initCDict_internal(ZSTD_CDict_s* cdict, void* dictBuff return null; } - ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc_e.ZSTD_cwksp_dynamic_alloc); + ZSTD_cwksp_init( + &ws, + workspace, + workspaceSize, + ZSTD_cwksp_static_alloc_e.ZSTD_cwksp_dynamic_alloc + ); cdict = (ZSTD_CDict_s*)ZSTD_cwksp_reserve_object(&ws, (nuint)sizeof(ZSTD_CDict_s)); assert(cdict != null); ZSTD_cwksp_move(&cdict->workspace, &ws); @@ -5755,46 +8221,105 @@ private static nuint ZSTD_initCDict_internal(ZSTD_CDict_s* cdict, void* dictBuff } } - public static ZSTD_CDict_s* ZSTD_createCDict_advanced(void* dictBuffer, nuint dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_compressionParameters cParams, ZSTD_customMem customMem) + public static ZSTD_CDict_s* ZSTD_createCDict_advanced( + void* dictBuffer, + nuint dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType, + ZSTD_compressionParameters cParams, + ZSTD_customMem customMem + ) { ZSTD_CCtx_params_s cctxParams; cctxParams = new ZSTD_CCtx_params_s(); ZSTD_CCtxParams_init(&cctxParams, 0); cctxParams.cParams = cParams; cctxParams.customMem = customMem; - return ZSTD_createCDict_advanced2(dictBuffer, dictSize, dictLoadMethod, dictContentType, &cctxParams, customMem); + return ZSTD_createCDict_advanced2( + dictBuffer, + dictSize, + dictLoadMethod, + dictContentType, + &cctxParams, + customMem + ); } /* * This API is temporary and is expected to change or disappear in the future! */ - public static ZSTD_CDict_s* ZSTD_createCDict_advanced2(void* dict, nuint dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_CCtx_params_s* originalCctxParams, ZSTD_customMem customMem) + public static ZSTD_CDict_s* ZSTD_createCDict_advanced2( + void* dict, + nuint dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType, + ZSTD_CCtx_params_s* originalCctxParams, + ZSTD_customMem customMem + ) { ZSTD_CCtx_params_s cctxParams = *originalCctxParams; ZSTD_compressionParameters cParams; ZSTD_CDict_s* cdict; - if (((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) != 0) + if ( + ((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) + != 0 + ) return null; if (cctxParams.enableDedicatedDictSearch != 0) { - cParams = ZSTD_dedicatedDictSearch_getCParams(cctxParams.compressionLevel, dictSize); + cParams = ZSTD_dedicatedDictSearch_getCParams( + cctxParams.compressionLevel, + dictSize + ); ZSTD_overrideCParams(&cParams, &cctxParams.cParams); } else { - cParams = ZSTD_getCParamsFromCCtxParams(&cctxParams, unchecked(0UL - 1), dictSize, ZSTD_CParamMode_e.ZSTD_cpm_createCDict); + cParams = ZSTD_getCParamsFromCCtxParams( + &cctxParams, + unchecked(0UL - 1), + dictSize, + ZSTD_CParamMode_e.ZSTD_cpm_createCDict + ); } if (ZSTD_dedicatedDictSearch_isSupported(&cParams) == 0) { cctxParams.enableDedicatedDictSearch = 0; - cParams = ZSTD_getCParamsFromCCtxParams(&cctxParams, unchecked(0UL - 1), dictSize, ZSTD_CParamMode_e.ZSTD_cpm_createCDict); + cParams = ZSTD_getCParamsFromCCtxParams( + &cctxParams, + unchecked(0UL - 1), + dictSize, + ZSTD_CParamMode_e.ZSTD_cpm_createCDict + ); } cctxParams.cParams = cParams; - cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams); - cdict = ZSTD_createCDict_advanced_internal(dictSize, dictLoadMethod, cctxParams.cParams, cctxParams.useRowMatchFinder, cctxParams.enableDedicatedDictSearch, customMem); - if (cdict == null || ERR_isError(ZSTD_initCDict_internal(cdict, dict, dictSize, dictLoadMethod, dictContentType, cctxParams))) + cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode( + cctxParams.useRowMatchFinder, + &cParams + ); + cdict = ZSTD_createCDict_advanced_internal( + dictSize, + dictLoadMethod, + cctxParams.cParams, + cctxParams.useRowMatchFinder, + cctxParams.enableDedicatedDictSearch, + customMem + ); + if ( + cdict == null + || ERR_isError( + ZSTD_initCDict_internal( + cdict, + dict, + dictSize, + dictLoadMethod, + dictContentType, + cctxParams + ) + ) + ) { ZSTD_freeCDict(cdict); return null; @@ -5815,10 +8340,26 @@ private static nuint ZSTD_initCDict_internal(ZSTD_CDict_s* cdict, void* dictBuff * in which case the only thing that it transports is the @compressionLevel. * This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively, * expecting a ZSTD_CDict parameter with any data, including those without a known dictionary. */ - public static ZSTD_CDict_s* ZSTD_createCDict(void* dict, nuint dictSize, int compressionLevel) - { - ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, unchecked(0UL - 1), dictSize, ZSTD_CParamMode_e.ZSTD_cpm_createCDict); - ZSTD_CDict_s* cdict = ZSTD_createCDict_advanced(dict, dictSize, ZSTD_dictLoadMethod_e.ZSTD_dlm_byCopy, ZSTD_dictContentType_e.ZSTD_dct_auto, cParams, ZSTD_defaultCMem); + public static ZSTD_CDict_s* ZSTD_createCDict( + void* dict, + nuint dictSize, + int compressionLevel + ) + { + ZSTD_compressionParameters cParams = ZSTD_getCParams_internal( + compressionLevel, + unchecked(0UL - 1), + dictSize, + ZSTD_CParamMode_e.ZSTD_cpm_createCDict + ); + ZSTD_CDict_s* cdict = ZSTD_createCDict_advanced( + dict, + dictSize, + ZSTD_dictLoadMethod_e.ZSTD_dlm_byCopy, + ZSTD_dictContentType_e.ZSTD_dct_auto, + cParams, + ZSTD_defaultCMem + ); if (cdict != null) cdict->compressionLevel = compressionLevel == 0 ? 3 : compressionLevel; return cdict; @@ -5830,10 +8371,26 @@ private static nuint ZSTD_initCDict_internal(ZSTD_CDict_s* cdict, void* dictBuff * As a consequence, `dictBuffer` **must** outlive CDict, * and its content must remain unmodified throughout the lifetime of CDict. * note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef */ - public static ZSTD_CDict_s* ZSTD_createCDict_byReference(void* dict, nuint dictSize, int compressionLevel) - { - ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, unchecked(0UL - 1), dictSize, ZSTD_CParamMode_e.ZSTD_cpm_createCDict); - ZSTD_CDict_s* cdict = ZSTD_createCDict_advanced(dict, dictSize, ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, ZSTD_dictContentType_e.ZSTD_dct_auto, cParams, ZSTD_defaultCMem); + public static ZSTD_CDict_s* ZSTD_createCDict_byReference( + void* dict, + nuint dictSize, + int compressionLevel + ) + { + ZSTD_compressionParameters cParams = ZSTD_getCParams_internal( + compressionLevel, + unchecked(0UL - 1), + dictSize, + ZSTD_CParamMode_e.ZSTD_cpm_createCDict + ); + ZSTD_CDict_s* cdict = ZSTD_createCDict_advanced( + dict, + dictSize, + ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, + ZSTD_dictContentType_e.ZSTD_dct_auto, + cParams, + ZSTD_defaultCMem + ); if (cdict != null) cdict->compressionLevel = compressionLevel == 0 ? 3 : compressionLevel; return cdict; @@ -5872,19 +8429,43 @@ public static nuint ZSTD_freeCDict(ZSTD_CDict_s* cdict) * Note : there is no corresponding "free" function. * Since workspace was allocated externally, it must be freed externally. */ - public static ZSTD_CDict_s* ZSTD_initStaticCDict(void* workspace, nuint workspaceSize, void* dict, nuint dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_compressionParameters cParams) - { - ZSTD_paramSwitch_e useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(ZSTD_paramSwitch_e.ZSTD_ps_auto, &cParams); + public static ZSTD_CDict_s* ZSTD_initStaticCDict( + void* workspace, + nuint workspaceSize, + void* dict, + nuint dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType, + ZSTD_compressionParameters cParams + ) + { + ZSTD_paramSwitch_e useRowMatchFinder = ZSTD_resolveRowMatchFinderMode( + ZSTD_paramSwitch_e.ZSTD_ps_auto, + &cParams + ); /* enableDedicatedDictSearch */ nuint matchStateSize = ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, 1, 0); - nuint neededSize = ZSTD_cwksp_alloc_size((nuint)sizeof(ZSTD_CDict_s)) + (dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef ? 0 : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, (nuint)sizeof(void*)))) + ZSTD_cwksp_alloc_size((8 << 10) + 512) + matchStateSize; + nuint neededSize = + ZSTD_cwksp_alloc_size((nuint)sizeof(ZSTD_CDict_s)) + + ( + dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef + ? 0 + : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, (nuint)sizeof(void*))) + ) + + ZSTD_cwksp_alloc_size((8 << 10) + 512) + + matchStateSize; ZSTD_CDict_s* cdict; ZSTD_CCtx_params_s @params; if (((nuint)workspace & 7) != 0) return null; { ZSTD_cwksp ws; - ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc_e.ZSTD_cwksp_static_alloc); + ZSTD_cwksp_init( + &ws, + workspace, + workspaceSize, + ZSTD_cwksp_static_alloc_e.ZSTD_cwksp_static_alloc + ); cdict = (ZSTD_CDict_s*)ZSTD_cwksp_reserve_object(&ws, (nuint)sizeof(ZSTD_CDict_s)); if (cdict == null) return null; @@ -5898,7 +8479,18 @@ public static nuint ZSTD_freeCDict(ZSTD_CDict_s* cdict) @params.useRowMatchFinder = useRowMatchFinder; cdict->useRowMatchFinder = useRowMatchFinder; cdict->compressionLevel = 0; - if (ERR_isError(ZSTD_initCDict_internal(cdict, dict, dictSize, dictLoadMethod, dictContentType, @params))) + if ( + ERR_isError( + ZSTD_initCDict_internal( + cdict, + dict, + dictSize, + dictLoadMethod, + dictContentType, + @params + ) + ) + ) return null; return cdict; } @@ -5925,7 +8517,12 @@ public static uint ZSTD_getDictID_fromCDict(ZSTD_CDict_s* cdict) /* ZSTD_compressBegin_usingCDict_internal() : * Implementation of various ZSTD_compressBegin_usingCDict* functions. */ - private static nuint ZSTD_compressBegin_usingCDict_internal(ZSTD_CCtx_s* cctx, ZSTD_CDict_s* cdict, ZSTD_frameParameters fParams, ulong pledgedSrcSize) + private static nuint ZSTD_compressBegin_usingCDict_internal( + ZSTD_CCtx_s* cctx, + ZSTD_CDict_s* cdict, + ZSTD_frameParameters fParams, + ulong pledgedSrcSize + ) { ZSTD_CCtx_params_s cctxParams; if (cdict == null) @@ -5936,38 +8533,70 @@ private static nuint ZSTD_compressBegin_usingCDict_internal(ZSTD_CCtx_s* cctx, Z { ZSTD_parameters @params; @params.fParams = fParams; - @params.cParams = pledgedSrcSize < 128 * (1 << 10) || pledgedSrcSize < cdict->dictContentSize * 6UL || pledgedSrcSize == unchecked(0UL - 1) || cdict->compressionLevel == 0 ? ZSTD_getCParamsFromCDict(cdict) : ZSTD_getCParams(cdict->compressionLevel, pledgedSrcSize, cdict->dictContentSize); + @params.cParams = + pledgedSrcSize < 128 * (1 << 10) + || pledgedSrcSize < cdict->dictContentSize * 6UL + || pledgedSrcSize == unchecked(0UL - 1) + || cdict->compressionLevel == 0 + ? ZSTD_getCParamsFromCDict(cdict) + : ZSTD_getCParams( + cdict->compressionLevel, + pledgedSrcSize, + cdict->dictContentSize + ); ZSTD_CCtxParams_init_internal(&cctxParams, &@params, cdict->compressionLevel); } if (pledgedSrcSize != unchecked(0UL - 1)) { uint limitedSrcSize = (uint)(pledgedSrcSize < 1U << 19 ? pledgedSrcSize : 1U << 19); - uint limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1; - cctxParams.cParams.windowLog = cctxParams.cParams.windowLog > limitedSrcLog ? cctxParams.cParams.windowLog : limitedSrcLog; + uint limitedSrcLog = + limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1; + cctxParams.cParams.windowLog = + cctxParams.cParams.windowLog > limitedSrcLog + ? cctxParams.cParams.windowLog + : limitedSrcLog; } - return ZSTD_compressBegin_internal(cctx, null, 0, ZSTD_dictContentType_e.ZSTD_dct_auto, ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, cdict, &cctxParams, pledgedSrcSize, ZSTD_buffered_policy_e.ZSTDb_not_buffered); + return ZSTD_compressBegin_internal( + cctx, + null, + 0, + ZSTD_dictContentType_e.ZSTD_dct_auto, + ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, + cdict, + &cctxParams, + pledgedSrcSize, + ZSTD_buffered_policy_e.ZSTDb_not_buffered + ); } /* ZSTD_compressBegin_usingCDict_advanced() : * This function is DEPRECATED. * cdict must be != NULL */ - public static nuint ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx_s* cctx, ZSTD_CDict_s* cdict, ZSTD_frameParameters fParams, ulong pledgedSrcSize) + public static nuint ZSTD_compressBegin_usingCDict_advanced( + ZSTD_CCtx_s* cctx, + ZSTD_CDict_s* cdict, + ZSTD_frameParameters fParams, + ulong pledgedSrcSize + ) { return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, pledgedSrcSize); } /* ZSTD_compressBegin_usingCDict() : * cdict must be != NULL */ - private static nuint ZSTD_compressBegin_usingCDict_deprecated(ZSTD_CCtx_s* cctx, ZSTD_CDict_s* cdict) + private static nuint ZSTD_compressBegin_usingCDict_deprecated( + ZSTD_CCtx_s* cctx, + ZSTD_CDict_s* cdict + ) { /*content*/ ZSTD_frameParameters fParams = new ZSTD_frameParameters { contentSizeFlag = 0, checksumFlag = 0, - noDictIDFlag = 0 + noDictIDFlag = 0, }; return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, unchecked(0UL - 1)); } @@ -5980,11 +8609,24 @@ public static nuint ZSTD_compressBegin_usingCDict(ZSTD_CCtx_s* cctx, ZSTD_CDict_ /*! ZSTD_compress_usingCDict_internal(): * Implementation of various ZSTD_compress_usingCDict* functions. */ - private static nuint ZSTD_compress_usingCDict_internal(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, void* src, nuint srcSize, ZSTD_CDict_s* cdict, ZSTD_frameParameters fParams) + private static nuint ZSTD_compress_usingCDict_internal( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + ZSTD_CDict_s* cdict, + ZSTD_frameParameters fParams + ) { { /* will check if cdict != NULL */ - nuint err_code = ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, srcSize); + nuint err_code = ZSTD_compressBegin_usingCDict_internal( + cctx, + cdict, + fParams, + srcSize + ); if (ERR_isError(err_code)) { return err_code; @@ -5997,9 +8639,25 @@ private static nuint ZSTD_compress_usingCDict_internal(ZSTD_CCtx_s* cctx, void* /*! ZSTD_compress_usingCDict_advanced(): * This function is DEPRECATED. */ - public static nuint ZSTD_compress_usingCDict_advanced(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, void* src, nuint srcSize, ZSTD_CDict_s* cdict, ZSTD_frameParameters fParams) - { - return ZSTD_compress_usingCDict_internal(cctx, dst, dstCapacity, src, srcSize, cdict, fParams); + public static nuint ZSTD_compress_usingCDict_advanced( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + ZSTD_CDict_s* cdict, + ZSTD_frameParameters fParams + ) + { + return ZSTD_compress_usingCDict_internal( + cctx, + dst, + dstCapacity, + src, + srcSize, + cdict, + fParams + ); } /*! ZSTD_compress_usingCDict() : @@ -6007,16 +8665,31 @@ public static nuint ZSTD_compress_usingCDict_advanced(ZSTD_CCtx_s* cctx, void* d * Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times. * Note that compression parameters are decided at CDict creation time * while frame parameters are hardcoded */ - public static nuint ZSTD_compress_usingCDict(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, void* src, nuint srcSize, ZSTD_CDict_s* cdict) + public static nuint ZSTD_compress_usingCDict( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + ZSTD_CDict_s* cdict + ) { /*content*/ ZSTD_frameParameters fParams = new ZSTD_frameParameters { contentSizeFlag = 1, checksumFlag = 0, - noDictIDFlag = 0 + noDictIDFlag = 0, }; - return ZSTD_compress_usingCDict_internal(cctx, dst, dstCapacity, src, srcSize, cdict, fParams); + return ZSTD_compress_usingCDict_internal( + cctx, + dst, + dstCapacity, + src, + srcSize, + cdict, + fParams + ); } /* ****************************************************************** @@ -6053,7 +8726,11 @@ public static nuint ZSTD_CStreamOutSize() return ZSTD_compressBound(1 << 17) + ZSTD_blockHeaderSize + 4; } - private static ZSTD_CParamMode_e ZSTD_getCParamMode(ZSTD_CDict_s* cdict, ZSTD_CCtx_params_s* @params, ulong pledgedSrcSize) + private static ZSTD_CParamMode_e ZSTD_getCParamMode( + ZSTD_CDict_s* cdict, + ZSTD_CCtx_params_s* @params, + ulong pledgedSrcSize + ) { if (cdict != null && ZSTD_shouldAttachDict(cdict, @params, pledgedSrcSize) != 0) return ZSTD_CParamMode_e.ZSTD_cpm_attachDict; @@ -6093,7 +8770,14 @@ public static nuint ZSTD_resetCStream(ZSTD_CCtx_s* zcs, ulong pss) * Note : for lib/compress only. Used by zstdmt_compress.c. * Assumption 1 : params are valid * Assumption 2 : either dict, or cdict, is defined, not both */ - private static nuint ZSTD_initCStream_internal(ZSTD_CCtx_s* zcs, void* dict, nuint dictSize, ZSTD_CDict_s* cdict, ZSTD_CCtx_params_s* @params, ulong pledgedSrcSize) + private static nuint ZSTD_initCStream_internal( + ZSTD_CCtx_s* zcs, + void* dict, + nuint dictSize, + ZSTD_CDict_s* cdict, + ZSTD_CCtx_params_s* @params, + ulong pledgedSrcSize + ) { { nuint err_code = ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); @@ -6137,7 +8821,12 @@ private static nuint ZSTD_initCStream_internal(ZSTD_CCtx_s* zcs, void* dict, nui /* ZSTD_initCStream_usingCDict_advanced() : * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */ - public static nuint ZSTD_initCStream_usingCDict_advanced(ZSTD_CCtx_s* zcs, ZSTD_CDict_s* cdict, ZSTD_frameParameters fParams, ulong pledgedSrcSize) + public static nuint ZSTD_initCStream_usingCDict_advanced( + ZSTD_CCtx_s* zcs, + ZSTD_CDict_s* cdict, + ZSTD_frameParameters fParams, + ulong pledgedSrcSize + ) { { nuint err_code = ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); @@ -6193,13 +8882,20 @@ public static nuint ZSTD_initCStream_usingCDict(ZSTD_CCtx_s* zcs, ZSTD_CDict_s* * pledgedSrcSize must be exact. * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. * dict is loaded with default parameters ZSTD_dct_auto and ZSTD_dlm_byCopy. */ - public static nuint ZSTD_initCStream_advanced(ZSTD_CCtx_s* zcs, void* dict, nuint dictSize, ZSTD_parameters @params, ulong pss) + public static nuint ZSTD_initCStream_advanced( + ZSTD_CCtx_s* zcs, + void* dict, + nuint dictSize, + ZSTD_parameters @params, + ulong pss + ) { /* for compatibility with older programs relying on this behavior. * Users should now specify ZSTD_CONTENTSIZE_UNKNOWN. * This line will be removed in the future. */ - ulong pledgedSrcSize = pss == 0 && @params.fParams.contentSizeFlag == 0 ? unchecked(0UL - 1) : pss; + ulong pledgedSrcSize = + pss == 0 && @params.fParams.contentSizeFlag == 0 ? unchecked(0UL - 1) : pss; { nuint err_code = ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); if (ERR_isError(err_code)) @@ -6248,7 +8944,12 @@ public static nuint ZSTD_initCStream_advanced(ZSTD_CCtx_s* zcs, void* dict, nuin * it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy. * This prototype will generate compilation warnings. */ - public static nuint ZSTD_initCStream_usingDict(ZSTD_CCtx_s* zcs, void* dict, nuint dictSize, int compressionLevel) + public static nuint ZSTD_initCStream_usingDict( + ZSTD_CCtx_s* zcs, + void* dict, + nuint dictSize, + int compressionLevel + ) { { nuint err_code = ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); @@ -6259,7 +8960,11 @@ public static nuint ZSTD_initCStream_usingDict(ZSTD_CCtx_s* zcs, void* dict, nui } { - nuint err_code = ZSTD_CCtx_setParameter(zcs, ZSTD_cParameter.ZSTD_c_compressionLevel, compressionLevel); + nuint err_code = ZSTD_CCtx_setParameter( + zcs, + ZSTD_cParameter.ZSTD_c_compressionLevel, + compressionLevel + ); if (ERR_isError(err_code)) { return err_code; @@ -6289,7 +8994,11 @@ public static nuint ZSTD_initCStream_usingDict(ZSTD_CCtx_s* zcs, void* dict, nui * "0" also disables frame content size field. It may be enabled in the future. * This prototype will generate compilation warnings. */ - public static nuint ZSTD_initCStream_srcSize(ZSTD_CCtx_s* zcs, int compressionLevel, ulong pss) + public static nuint ZSTD_initCStream_srcSize( + ZSTD_CCtx_s* zcs, + int compressionLevel, + ulong pss + ) { /* temporary : 0 interpreted as "unknown" during transition period. * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. @@ -6313,7 +9022,11 @@ public static nuint ZSTD_initCStream_srcSize(ZSTD_CCtx_s* zcs, int compressionLe } { - nuint err_code = ZSTD_CCtx_setParameter(zcs, ZSTD_cParameter.ZSTD_c_compressionLevel, compressionLevel); + nuint err_code = ZSTD_CCtx_setParameter( + zcs, + ZSTD_cParameter.ZSTD_c_compressionLevel, + compressionLevel + ); if (ERR_isError(err_code)) { return err_code; @@ -6360,7 +9073,11 @@ public static nuint ZSTD_initCStream(ZSTD_CCtx_s* zcs, int compressionLevel) } { - nuint err_code = ZSTD_CCtx_setParameter(zcs, ZSTD_cParameter.ZSTD_c_compressionLevel, compressionLevel); + nuint err_code = ZSTD_CCtx_setParameter( + zcs, + ZSTD_cParameter.ZSTD_c_compressionLevel, + compressionLevel + ); if (ERR_isError(err_code)) { return err_code; @@ -6390,7 +9107,12 @@ private static nuint ZSTD_nextInputSizeHint(ZSTD_CCtx_s* cctx) /** ZSTD_compressStream_generic(): * internal function for all *compressStream*() variants * @return : hint size for next input to complete ongoing block */ - private static nuint ZSTD_compressStream_generic(ZSTD_CCtx_s* zcs, ZSTD_outBuffer_s* output, ZSTD_inBuffer_s* input, ZSTD_EndDirective flushMode) + private static nuint ZSTD_compressStream_generic( + ZSTD_CCtx_s* zcs, + ZSTD_outBuffer_s* output, + ZSTD_inBuffer_s* input, + ZSTD_EndDirective flushMode + ) { assert(input != null); sbyte* istart = (sbyte*)input->src; @@ -6445,10 +9167,24 @@ private static nuint ZSTD_compressStream_generic(ZSTD_CCtx_s* zcs, ZSTD_outBuffe case ZSTD_cStreamStage.zcss_init: return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_init_missing)); case ZSTD_cStreamStage.zcss_load: - if (flushMode == ZSTD_EndDirective.ZSTD_e_end && ((nuint)(oend - op) >= ZSTD_compressBound((nuint)(iend - ip)) || zcs->appliedParams.outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable) && zcs->inBuffPos == 0) + if ( + flushMode == ZSTD_EndDirective.ZSTD_e_end + && ( + (nuint)(oend - op) >= ZSTD_compressBound((nuint)(iend - ip)) + || zcs->appliedParams.outBufferMode + == ZSTD_bufferMode_e.ZSTD_bm_stable + ) + && zcs->inBuffPos == 0 + ) { /* shortcut to compression pass directly into output buffer */ - nuint cSize = ZSTD_compressEnd_public(zcs, op, (nuint)(oend - op), ip, (nuint)(iend - ip)); + nuint cSize = ZSTD_compressEnd_public( + zcs, + op, + (nuint)(oend - op), + ip, + (nuint)(iend - ip) + ); { nuint err_code = cSize; if (ERR_isError(err_code)) @@ -6468,17 +9204,28 @@ private static nuint ZSTD_compressStream_generic(ZSTD_CCtx_s* zcs, ZSTD_outBuffe if (zcs->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered) { nuint toLoad = zcs->inBuffTarget - zcs->inBuffPos; - nuint loaded = ZSTD_limitCopy(zcs->inBuff + zcs->inBuffPos, toLoad, ip, (nuint)(iend - ip)); + nuint loaded = ZSTD_limitCopy( + zcs->inBuff + zcs->inBuffPos, + toLoad, + ip, + (nuint)(iend - ip) + ); zcs->inBuffPos += loaded; if (ip != null) ip += loaded; - if (flushMode == ZSTD_EndDirective.ZSTD_e_continue && zcs->inBuffPos < zcs->inBuffTarget) + if ( + flushMode == ZSTD_EndDirective.ZSTD_e_continue + && zcs->inBuffPos < zcs->inBuffTarget + ) { someMoreWork = 0; break; } - if (flushMode == ZSTD_EndDirective.ZSTD_e_flush && zcs->inBuffPos == zcs->inToCompress) + if ( + flushMode == ZSTD_EndDirective.ZSTD_e_flush + && zcs->inBuffPos == zcs->inToCompress + ) { someMoreWork = 0; break; @@ -6486,8 +9233,13 @@ private static nuint ZSTD_compressStream_generic(ZSTD_CCtx_s* zcs, ZSTD_outBuffe } else { - assert(zcs->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable); - if (flushMode == ZSTD_EndDirective.ZSTD_e_continue && (nuint)(iend - ip) < zcs->blockSizeMax) + assert( + zcs->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable + ); + if ( + flushMode == ZSTD_EndDirective.ZSTD_e_continue + && (nuint)(iend - ip) < zcs->blockSizeMax + ) { zcs->stableIn_notConsumed = (nuint)(iend - ip); ip = iend; @@ -6503,12 +9255,23 @@ private static nuint ZSTD_compressStream_generic(ZSTD_CCtx_s* zcs, ZSTD_outBuffe } { - int inputBuffered = zcs->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered ? 1 : 0; + int inputBuffered = + zcs->appliedParams.inBufferMode + == ZSTD_bufferMode_e.ZSTD_bm_buffered + ? 1 + : 0; void* cDst; nuint cSize; nuint oSize = (nuint)(oend - op); - nuint iSize = inputBuffered != 0 ? zcs->inBuffPos - zcs->inToCompress : (nuint)(iend - ip) < zcs->blockSizeMax ? (nuint)(iend - ip) : zcs->blockSizeMax; - if (oSize >= ZSTD_compressBound(iSize) || zcs->appliedParams.outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable) + nuint iSize = + inputBuffered != 0 ? zcs->inBuffPos - zcs->inToCompress + : (nuint)(iend - ip) < zcs->blockSizeMax ? (nuint)(iend - ip) + : zcs->blockSizeMax; + if ( + oSize >= ZSTD_compressBound(iSize) + || zcs->appliedParams.outBufferMode + == ZSTD_bufferMode_e.ZSTD_bm_stable + ) cDst = op; else { @@ -6518,8 +9281,26 @@ private static nuint ZSTD_compressStream_generic(ZSTD_CCtx_s* zcs, ZSTD_outBuffe if (inputBuffered != 0) { - uint lastBlock = flushMode == ZSTD_EndDirective.ZSTD_e_end && ip == iend ? 1U : 0U; - cSize = lastBlock != 0 ? ZSTD_compressEnd_public(zcs, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize) : ZSTD_compressContinue_public(zcs, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize); + uint lastBlock = + flushMode == ZSTD_EndDirective.ZSTD_e_end && ip == iend + ? 1U + : 0U; + cSize = + lastBlock != 0 + ? ZSTD_compressEnd_public( + zcs, + cDst, + oSize, + zcs->inBuff + zcs->inToCompress, + iSize + ) + : ZSTD_compressContinue_public( + zcs, + cDst, + oSize, + zcs->inBuff + zcs->inToCompress, + iSize + ); { nuint err_code = cSize; if (ERR_isError(err_code)) @@ -6544,8 +9325,14 @@ private static nuint ZSTD_compressStream_generic(ZSTD_CCtx_s* zcs, ZSTD_outBuffe } else { - uint lastBlock = flushMode == ZSTD_EndDirective.ZSTD_e_end && ip + iSize == iend ? 1U : 0U; - cSize = lastBlock != 0 ? ZSTD_compressEnd_public(zcs, cDst, oSize, ip, iSize) : ZSTD_compressContinue_public(zcs, cDst, oSize, ip, iSize); + uint lastBlock = + flushMode == ZSTD_EndDirective.ZSTD_e_end && ip + iSize == iend + ? 1U + : 0U; + cSize = + lastBlock != 0 + ? ZSTD_compressEnd_public(zcs, cDst, oSize, ip, iSize) + : ZSTD_compressContinue_public(zcs, cDst, oSize, ip, iSize); if (ip != null) ip += iSize; { @@ -6569,7 +9356,10 @@ private static nuint ZSTD_compressStream_generic(ZSTD_CCtx_s* zcs, ZSTD_outBuffe if (zcs->frameEnded != 0) { someMoreWork = 0; - ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); + ZSTD_CCtx_reset( + zcs, + ZSTD_ResetDirective.ZSTD_reset_session_only + ); } break; @@ -6582,10 +9372,18 @@ private static nuint ZSTD_compressStream_generic(ZSTD_CCtx_s* zcs, ZSTD_outBuffe goto case ZSTD_cStreamStage.zcss_flush; case ZSTD_cStreamStage.zcss_flush: - assert(zcs->appliedParams.outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered); + assert( + zcs->appliedParams.outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered + ); + { nuint toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; - nuint flushed = ZSTD_limitCopy(op, (nuint)(oend - op), zcs->outBuff + zcs->outBuffFlushedSize, toFlush); + nuint flushed = ZSTD_limitCopy( + op, + (nuint)(oend - op), + zcs->outBuff + zcs->outBuffFlushedSize, + toFlush + ); if (flushed != 0) op += flushed; zcs->outBuffFlushedSize += flushed; @@ -6638,10 +9436,19 @@ private static nuint ZSTD_nextInputSizeHint_MTorST(ZSTD_CCtx_s* cctx) * the next read size (if non-zero and not an error). ZSTD_compressStream2() * returns the minimum nb of bytes left to flush (if non-zero and not an error). */ - public static nuint ZSTD_compressStream(ZSTD_CCtx_s* zcs, ZSTD_outBuffer_s* output, ZSTD_inBuffer_s* input) - { - { - nuint err_code = ZSTD_compressStream2(zcs, output, input, ZSTD_EndDirective.ZSTD_e_continue); + public static nuint ZSTD_compressStream( + ZSTD_CCtx_s* zcs, + ZSTD_outBuffer_s* output, + ZSTD_inBuffer_s* input + ) + { + { + nuint err_code = ZSTD_compressStream2( + zcs, + output, + input, + ZSTD_EndDirective.ZSTD_e_continue + ); if (ERR_isError(err_code)) { return err_code; @@ -6654,7 +9461,11 @@ public static nuint ZSTD_compressStream(ZSTD_CCtx_s* zcs, ZSTD_outBuffer_s* outp /* After a compression call set the expected input/output buffer. * This is validated at the start of the next compression call. */ - private static void ZSTD_setBufferExpectations(ZSTD_CCtx_s* cctx, ZSTD_outBuffer_s* output, ZSTD_inBuffer_s* input) + private static void ZSTD_setBufferExpectations( + ZSTD_CCtx_s* cctx, + ZSTD_outBuffer_s* output, + ZSTD_inBuffer_s* input + ) { if (cctx->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable) { @@ -6670,20 +9481,29 @@ private static void ZSTD_setBufferExpectations(ZSTD_CCtx_s* cctx, ZSTD_outBuffer /* Validate that the input/output buffers match the expectations set by * ZSTD_setBufferExpectations. */ - private static nuint ZSTD_checkBufferStability(ZSTD_CCtx_s* cctx, ZSTD_outBuffer_s* output, ZSTD_inBuffer_s* input, ZSTD_EndDirective endOp) + private static nuint ZSTD_checkBufferStability( + ZSTD_CCtx_s* cctx, + ZSTD_outBuffer_s* output, + ZSTD_inBuffer_s* input, + ZSTD_EndDirective endOp + ) { if (cctx->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable) { ZSTD_inBuffer_s expect = cctx->expectedInBuffer; if (expect.src != input->src || expect.pos != input->pos) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stabilityCondition_notRespected)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stabilityCondition_notRespected) + ); } if (cctx->appliedParams.outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable) { nuint outBufferSize = output->size - output->pos; if (cctx->expectedOutBufferSize != outBufferSize) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stabilityCondition_notRespected)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stabilityCondition_notRespected) + ); } return 0; @@ -6694,7 +9514,11 @@ private static nuint ZSTD_checkBufferStability(ZSTD_CCtx_s* cctx, ZSTD_outBuffer * Otherwise, it's ignored. * @return: 0 on success, or a ZSTD_error code otherwise. */ - private static nuint ZSTD_CCtx_init_compressStream2(ZSTD_CCtx_s* cctx, ZSTD_EndDirective endOp, nuint inSize) + private static nuint ZSTD_CCtx_init_compressStream2( + ZSTD_CCtx_s* cctx, + ZSTD_EndDirective endOp, + nuint inSize + ) { ZSTD_CCtx_params_s @params = cctx->requestedParams; ZSTD_prefixDict_s prefixDict = cctx->prefixDict; @@ -6717,20 +9541,48 @@ private static nuint ZSTD_CCtx_init_compressStream2(ZSTD_CCtx_s* cctx, ZSTD_EndD if (endOp == ZSTD_EndDirective.ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1; { - nuint dictSize = prefixDict.dict != null ? prefixDict.dictSize : cctx->cdict != null ? cctx->cdict->dictContentSize : 0; - ZSTD_CParamMode_e mode = ZSTD_getCParamMode(cctx->cdict, &@params, cctx->pledgedSrcSizePlusOne - 1); - @params.cParams = ZSTD_getCParamsFromCCtxParams(&@params, cctx->pledgedSrcSizePlusOne - 1, dictSize, mode); - } - - @params.postBlockSplitter = ZSTD_resolveBlockSplitterMode(@params.postBlockSplitter, &@params.cParams); - @params.ldmParams.enableLdm = ZSTD_resolveEnableLdm(@params.ldmParams.enableLdm, &@params.cParams); - @params.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(@params.useRowMatchFinder, &@params.cParams); - @params.validateSequences = ZSTD_resolveExternalSequenceValidation(@params.validateSequences); + nuint dictSize = + prefixDict.dict != null ? prefixDict.dictSize + : cctx->cdict != null ? cctx->cdict->dictContentSize + : 0; + ZSTD_CParamMode_e mode = ZSTD_getCParamMode( + cctx->cdict, + &@params, + cctx->pledgedSrcSizePlusOne - 1 + ); + @params.cParams = ZSTD_getCParamsFromCCtxParams( + &@params, + cctx->pledgedSrcSizePlusOne - 1, + dictSize, + mode + ); + } + + @params.postBlockSplitter = ZSTD_resolveBlockSplitterMode( + @params.postBlockSplitter, + &@params.cParams + ); + @params.ldmParams.enableLdm = ZSTD_resolveEnableLdm( + @params.ldmParams.enableLdm, + &@params.cParams + ); + @params.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode( + @params.useRowMatchFinder, + &@params.cParams + ); + @params.validateSequences = ZSTD_resolveExternalSequenceValidation( + @params.validateSequences + ); @params.maxBlockSize = ZSTD_resolveMaxBlockSize(@params.maxBlockSize); - @params.searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(@params.searchForExternalRepcodes, @params.compressionLevel); + @params.searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch( + @params.searchForExternalRepcodes, + @params.compressionLevel + ); if (ZSTD_hasExtSeqProd(&@params) != 0 && @params.nbWorkers >= 1) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_combination_unsupported)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_combination_unsupported) + ); } if (cctx->pledgedSrcSizePlusOne - 1 <= 512 * (1 << 10)) @@ -6742,15 +9594,29 @@ private static nuint ZSTD_CCtx_init_compressStream2(ZSTD_CCtx_s* cctx, ZSTD_EndD { if (cctx->mtctx == null) { - cctx->mtctx = ZSTDMT_createCCtx_advanced((uint)@params.nbWorkers, cctx->customMem, cctx->pool); + cctx->mtctx = ZSTDMT_createCCtx_advanced( + (uint)@params.nbWorkers, + cctx->customMem, + cctx->pool + ); if (cctx->mtctx == null) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) + ); } } { - nuint err_code = ZSTDMT_initCStream_internal(cctx->mtctx, prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType, cctx->cdict, @params, cctx->pledgedSrcSizePlusOne - 1); + nuint err_code = ZSTDMT_initCStream_internal( + cctx->mtctx, + prefixDict.dict, + prefixDict.dictSize, + prefixDict.dictContentType, + cctx->cdict, + @params, + cctx->pledgedSrcSizePlusOne - 1 + ); if (ERR_isError(err_code)) { return err_code; @@ -6758,7 +9624,8 @@ private static nuint ZSTD_CCtx_init_compressStream2(ZSTD_CCtx_s* cctx, ZSTD_EndD } cctx->dictID = cctx->cdict != null ? cctx->cdict->dictID : 0; - cctx->dictContentSize = cctx->cdict != null ? cctx->cdict->dictContentSize : prefixDict.dictSize; + cctx->dictContentSize = + cctx->cdict != null ? cctx->cdict->dictContentSize : prefixDict.dictSize; cctx->consumedSrcSize = 0; cctx->producedCSize = 0; cctx->streamStage = ZSTD_cStreamStage.zcss_load; @@ -6769,7 +9636,17 @@ private static nuint ZSTD_CCtx_init_compressStream2(ZSTD_CCtx_s* cctx, ZSTD_EndD ulong pledgedSrcSize = cctx->pledgedSrcSizePlusOne - 1; assert(!ERR_isError(ZSTD_checkCParams(@params.cParams))); { - nuint err_code = ZSTD_compressBegin_internal(cctx, prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType, ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, cctx->cdict, &@params, pledgedSrcSize, ZSTD_buffered_policy_e.ZSTDb_buffered); + nuint err_code = ZSTD_compressBegin_internal( + cctx, + prefixDict.dict, + prefixDict.dictSize, + prefixDict.dictContentType, + ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, + cctx->cdict, + &@params, + pledgedSrcSize, + ZSTD_buffered_policy_e.ZSTDb_buffered + ); if (ERR_isError(err_code)) { return err_code; @@ -6781,7 +9658,8 @@ private static nuint ZSTD_CCtx_init_compressStream2(ZSTD_CCtx_s* cctx, ZSTD_EndD cctx->inBuffPos = 0; if (cctx->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered) { - cctx->inBuffTarget = cctx->blockSizeMax + (nuint)(cctx->blockSizeMax == pledgedSrcSize ? 1 : 0); + cctx->inBuffTarget = + cctx->blockSizeMax + (nuint)(cctx->blockSizeMax == pledgedSrcSize ? 1 : 0); } else { @@ -6798,7 +9676,12 @@ private static nuint ZSTD_CCtx_init_compressStream2(ZSTD_CCtx_s* cctx, ZSTD_EndD /* @return provides a minimum amount of data remaining to be flushed from internal buffers */ - public static nuint ZSTD_compressStream2(ZSTD_CCtx_s* cctx, ZSTD_outBuffer_s* output, ZSTD_inBuffer_s* input, ZSTD_EndDirective endOp) + public static nuint ZSTD_compressStream2( + ZSTD_CCtx_s* cctx, + ZSTD_outBuffer_s* output, + ZSTD_inBuffer_s* input, + ZSTD_EndDirective endOp + ) { if (output->pos > output->size) { @@ -6821,25 +9704,39 @@ public static nuint ZSTD_compressStream2(ZSTD_CCtx_s* cctx, ZSTD_outBuffer_s* ou /* no obligation to start from pos==0 */ nuint inputSize = input->size - input->pos; nuint totalInputSize = inputSize + cctx->stableIn_notConsumed; - if (cctx->requestedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable && endOp == ZSTD_EndDirective.ZSTD_e_continue && totalInputSize < 1 << 17) + if ( + cctx->requestedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable + && endOp == ZSTD_EndDirective.ZSTD_e_continue + && totalInputSize < 1 << 17 + ) { if (cctx->stableIn_notConsumed != 0) { if (input->src != cctx->expectedInBuffer.src) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stabilityCondition_notRespected)); + return unchecked( + (nuint)( + -(int)ZSTD_ErrorCode.ZSTD_error_stabilityCondition_notRespected + ) + ); } if (input->pos != cctx->expectedInBuffer.size) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stabilityCondition_notRespected)); + return unchecked( + (nuint)( + -(int)ZSTD_ErrorCode.ZSTD_error_stabilityCondition_notRespected + ) + ); } } input->pos = input->size; cctx->expectedInBuffer = *input; cctx->stableIn_notConsumed += inputSize; - return (nuint)(cctx->requestedParams.format == ZSTD_format_e.ZSTD_f_zstd1 ? 6 : 2); + return (nuint)( + cctx->requestedParams.format == ZSTD_format_e.ZSTD_f_zstd1 ? 6 : 2 + ); } { @@ -6886,7 +9783,10 @@ public static nuint ZSTD_compressStream2(ZSTD_CCtx_s* cctx, ZSTD_outBuffer_s* ou flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp); cctx->consumedSrcSize += input->pos - ipos; cctx->producedCSize += output->pos - opos; - if (ERR_isError(flushMin) || endOp == ZSTD_EndDirective.ZSTD_e_end && flushMin == 0) + if ( + ERR_isError(flushMin) + || endOp == ZSTD_EndDirective.ZSTD_e_end && flushMin == 0 + ) { if (flushMin == 0) ZSTD_CCtx_trace(cctx, 0); @@ -6903,18 +9803,30 @@ public static nuint ZSTD_compressStream2(ZSTD_CCtx_s* cctx, ZSTD_outBuffer_s* ou if (endOp == ZSTD_EndDirective.ZSTD_e_continue) { - if (input->pos != ipos || output->pos != opos || input->pos == input->size || output->pos == output->size) + if ( + input->pos != ipos + || output->pos != opos + || input->pos == input->size + || output->pos == output->size + ) break; } else { - assert(endOp == ZSTD_EndDirective.ZSTD_e_flush || endOp == ZSTD_EndDirective.ZSTD_e_end); + assert( + endOp == ZSTD_EndDirective.ZSTD_e_flush + || endOp == ZSTD_EndDirective.ZSTD_e_end + ); if (flushMin == 0 || output->pos == output->size) break; } } - assert(endOp == ZSTD_EndDirective.ZSTD_e_continue || flushMin == 0 || output->pos == output->size); + assert( + endOp == ZSTD_EndDirective.ZSTD_e_continue + || flushMin == 0 + || output->pos == output->size + ); ZSTD_setBufferExpectations(cctx, output, input); return flushMin; } @@ -6937,7 +9849,16 @@ public static nuint ZSTD_compressStream2(ZSTD_CCtx_s* cctx, ZSTD_outBuffer_s* ou * This variant might be helpful for binders from dynamic languages * which have troubles handling structures containing memory pointers. */ - public static nuint ZSTD_compressStream2_simpleArgs(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, nuint* dstPos, void* src, nuint srcSize, nuint* srcPos, ZSTD_EndDirective endOp) + public static nuint ZSTD_compressStream2_simpleArgs( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + nuint* dstPos, + void* src, + nuint srcSize, + nuint* srcPos, + ZSTD_EndDirective endOp + ) { ZSTD_outBuffer_s output; ZSTD_inBuffer_s input; @@ -6967,7 +9888,13 @@ public static nuint ZSTD_compressStream2_simpleArgs(ZSTD_CCtx_s* cctx, void* dst * @return : compressed size written into `dst` (<= `dstCapacity), * or an error code if it fails (which can be tested using ZSTD_isError()). */ - public static nuint ZSTD_compress2(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, void* src, nuint srcSize) + public static nuint ZSTD_compress2( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize + ) { ZSTD_bufferMode_e originalInBufferMode = cctx->requestedParams.inBufferMode; ZSTD_bufferMode_e originalOutBufferMode = cctx->requestedParams.outBufferMode; @@ -6977,7 +9904,16 @@ public static nuint ZSTD_compress2(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapaci { nuint oPos = 0; nuint iPos = 0; - nuint result = ZSTD_compressStream2_simpleArgs(cctx, dst, dstCapacity, &oPos, src, srcSize, &iPos, ZSTD_EndDirective.ZSTD_e_end); + nuint result = ZSTD_compressStream2_simpleArgs( + cctx, + dst, + dstCapacity, + &oPos, + src, + srcSize, + &iPos, + ZSTD_EndDirective.ZSTD_e_end + ); cctx->requestedParams.inBufferMode = originalInBufferMode; cctx->requestedParams.outBufferMode = originalOutBufferMode; { @@ -7003,7 +9939,15 @@ public static nuint ZSTD_compress2(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapaci * @offBase : must use the format required by ZSTD_storeSeq() * @returns a ZSTD error code if sequence is not valid */ - private static nuint ZSTD_validateSequence(uint offBase, uint matchLength, uint minMatch, nuint posInSrc, uint windowLog, nuint dictSize, int useSequenceProducer) + private static nuint ZSTD_validateSequence( + uint offBase, + uint matchLength, + uint minMatch, + nuint posInSrc, + uint windowLog, + nuint dictSize, + int useSequenceProducer + ) { uint windowSize = 1U << (int)windowLog; /* posInSrc represents the amount of data the decoder would decode up to this point. @@ -7017,13 +9961,17 @@ private static nuint ZSTD_validateSequence(uint offBase, uint matchLength, uint assert(offsetBound > 0); if (offBase > offsetBound + 3) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) + ); } } if (matchLength < matchLenLowerBound) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) + ); } return 0; @@ -7068,7 +10016,15 @@ private static uint ZSTD_finalizeOffBase(uint rawOffset, uint* rep, uint ll0) * @blockSize must be == sum(sequence_lengths). * @returns @blockSize on success, and a ZSTD_error otherwise. */ - private static nuint ZSTD_transferSequences_wBlockDelim(ZSTD_CCtx_s* cctx, ZSTD_SequencePosition* seqPos, ZSTD_Sequence* inSeqs, nuint inSeqsSize, void* src, nuint blockSize, ZSTD_paramSwitch_e externalRepSearch) + private static nuint ZSTD_transferSequences_wBlockDelim( + ZSTD_CCtx_s* cctx, + ZSTD_SequencePosition* seqPos, + ZSTD_Sequence* inSeqs, + nuint inSeqsSize, + void* src, + nuint blockSize, + ZSTD_paramSwitch_e externalRepSearch + ) { uint idx = seqPos->idx; uint startIdx = idx; @@ -7090,7 +10046,11 @@ private static nuint ZSTD_transferSequences_wBlockDelim(ZSTD_CCtx_s* cctx, ZSTD_ } memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, (uint)sizeof(repcodes_s)); - for (; idx < inSeqsSize && (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0); ++idx) + for ( + ; + idx < inSeqsSize && (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0); + ++idx + ) { uint litLength = inSeqs[idx].litLength; uint matchLength = inSeqs[idx].matchLength; @@ -7111,7 +10071,15 @@ private static nuint ZSTD_transferSequences_wBlockDelim(ZSTD_CCtx_s* cctx, ZSTD_ { seqPos->posInSrc += litLength + matchLength; { - nuint err_code = ZSTD_validateSequence(offBase, matchLength, cctx->appliedParams.cParams.minMatch, seqPos->posInSrc, cctx->appliedParams.cParams.windowLog, dictSize, ZSTD_hasExtSeqProd(&cctx->appliedParams)); + nuint err_code = ZSTD_validateSequence( + offBase, + matchLength, + cctx->appliedParams.cParams.minMatch, + seqPos->posInSrc, + cctx->appliedParams.cParams.windowLog, + dictSize, + ZSTD_hasExtSeqProd(&cctx->appliedParams) + ); if (ERR_isError(err_code)) { return err_code; @@ -7121,7 +10089,9 @@ private static nuint ZSTD_transferSequences_wBlockDelim(ZSTD_CCtx_s* cctx, ZSTD_ if (idx - seqPos->idx >= cctx->seqStore.maxNbSeq) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) + ); } ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offBase, matchLength); @@ -7130,7 +10100,9 @@ private static nuint ZSTD_transferSequences_wBlockDelim(ZSTD_CCtx_s* cctx, ZSTD_ if (idx == inSeqsSize) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) + ); } assert(externalRepSearch != ZSTD_paramSwitch_e.ZSTD_ps_auto); @@ -7171,7 +10143,9 @@ private static nuint ZSTD_transferSequences_wBlockDelim(ZSTD_CCtx_s* cctx, ZSTD_ if (ip != iend) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) + ); } seqPos->idx = idx + 1; @@ -7189,7 +10163,15 @@ private static nuint ZSTD_transferSequences_wBlockDelim(ZSTD_CCtx_s* cctx, ZSTD_ * @returns the number of bytes consumed from @src, necessarily <= @blockSize. * Otherwise, it may return a ZSTD error if something went wrong. */ - private static nuint ZSTD_transferSequences_noDelim(ZSTD_CCtx_s* cctx, ZSTD_SequencePosition* seqPos, ZSTD_Sequence* inSeqs, nuint inSeqsSize, void* src, nuint blockSize, ZSTD_paramSwitch_e externalRepSearch) + private static nuint ZSTD_transferSequences_noDelim( + ZSTD_CCtx_s* cctx, + ZSTD_SequencePosition* seqPos, + ZSTD_Sequence* inSeqs, + nuint inSeqsSize, + void* src, + nuint blockSize, + ZSTD_paramSwitch_e externalRepSearch + ) { uint idx = seqPos->idx; uint startPosInSequence = seqPos->posInSequence; @@ -7244,16 +10226,23 @@ private static nuint ZSTD_transferSequences_noDelim(ZSTD_CCtx_s* cctx, ZSTD_Sequ if (endPosInSequence > litLength) { uint firstHalfMatchLength; - litLength = startPosInSequence >= litLength ? 0 : litLength - startPosInSequence; + litLength = + startPosInSequence >= litLength ? 0 : litLength - startPosInSequence; firstHalfMatchLength = endPosInSequence - startPosInSequence - litLength; - if (matchLength > blockSize && firstHalfMatchLength >= cctx->appliedParams.cParams.minMatch) + if ( + matchLength > blockSize + && firstHalfMatchLength >= cctx->appliedParams.cParams.minMatch + ) { /* Only ever split the match if it is larger than the block size */ - uint secondHalfMatchLength = currSeq.matchLength + currSeq.litLength - endPosInSequence; + uint secondHalfMatchLength = + currSeq.matchLength + currSeq.litLength - endPosInSequence; if (secondHalfMatchLength < cctx->appliedParams.cParams.minMatch) { - endPosInSequence -= cctx->appliedParams.cParams.minMatch - secondHalfMatchLength; - bytesAdjustment = cctx->appliedParams.cParams.minMatch - secondHalfMatchLength; + endPosInSequence -= + cctx->appliedParams.cParams.minMatch - secondHalfMatchLength; + bytesAdjustment = + cctx->appliedParams.cParams.minMatch - secondHalfMatchLength; firstHalfMatchLength -= bytesAdjustment; } @@ -7283,7 +10272,15 @@ private static nuint ZSTD_transferSequences_noDelim(ZSTD_CCtx_s* cctx, ZSTD_Sequ { seqPos->posInSrc += litLength + matchLength; { - nuint err_code = ZSTD_validateSequence(offBase, matchLength, cctx->appliedParams.cParams.minMatch, seqPos->posInSrc, cctx->appliedParams.cParams.windowLog, dictSize, ZSTD_hasExtSeqProd(&cctx->appliedParams)); + nuint err_code = ZSTD_validateSequence( + offBase, + matchLength, + cctx->appliedParams.cParams.minMatch, + seqPos->posInSrc, + cctx->appliedParams.cParams.windowLog, + dictSize, + ZSTD_hasExtSeqProd(&cctx->appliedParams) + ); if (ERR_isError(err_code)) { return err_code; @@ -7293,7 +10290,9 @@ private static nuint ZSTD_transferSequences_noDelim(ZSTD_CCtx_s* cctx, ZSTD_Sequ if (idx - seqPos->idx >= cctx->seqStore.maxNbSeq) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) + ); } ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offBase, matchLength); @@ -7302,7 +10301,10 @@ private static nuint ZSTD_transferSequences_noDelim(ZSTD_CCtx_s* cctx, ZSTD_Sequ idx++; } - assert(idx == inSeqsSize || endPosInSequence <= inSeqs[idx].litLength + inSeqs[idx].matchLength); + assert( + idx == inSeqsSize + || endPosInSequence <= inSeqs[idx].litLength + inSeqs[idx].matchLength + ); seqPos->idx = idx; seqPos->posInSequence = endPosInSequence; memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, (uint)sizeof(repcodes_s)); @@ -7321,21 +10323,43 @@ private static nuint ZSTD_transferSequences_noDelim(ZSTD_CCtx_s* cctx, ZSTD_Sequ private static void* ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode) { - assert(ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam11, (int)mode) != 0); + assert( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam11, (int)mode) != 0 + ); if (mode == ZSTD_sequenceFormat_e.ZSTD_sf_explicitBlockDelimiters) { - return (delegate* managed)(&ZSTD_transferSequences_wBlockDelim); + return (delegate* managed< + ZSTD_CCtx_s*, + ZSTD_SequencePosition*, + ZSTD_Sequence*, + nuint, + void*, + nuint, + ZSTD_paramSwitch_e, + nuint>)(&ZSTD_transferSequences_wBlockDelim); } assert(mode == ZSTD_sequenceFormat_e.ZSTD_sf_noBlockDelimiters); - return (delegate* managed)(&ZSTD_transferSequences_noDelim); + return (delegate* managed< + ZSTD_CCtx_s*, + ZSTD_SequencePosition*, + ZSTD_Sequence*, + nuint, + void*, + nuint, + ZSTD_paramSwitch_e, + nuint>)(&ZSTD_transferSequences_noDelim); } /* Discover the size of next block by searching for the delimiter. * Note that a block delimiter **must** exist in this mode, * otherwise it's an input error. * The block size retrieved will be later compared to ensure it remains within bounds */ - private static nuint blockSize_explicitDelimiter(ZSTD_Sequence* inSeqs, nuint inSeqsSize, ZSTD_SequencePosition seqPos) + private static nuint blockSize_explicitDelimiter( + ZSTD_Sequence* inSeqs, + nuint inSeqsSize, + ZSTD_SequencePosition seqPos + ) { int end = 0; nuint blockSize = 0; @@ -7348,7 +10372,9 @@ private static nuint blockSize_explicitDelimiter(ZSTD_Sequence* inSeqs, nuint in if (end != 0) { if (inSeqs[spos].matchLength != 0) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) + ); break; } @@ -7356,11 +10382,20 @@ private static nuint blockSize_explicitDelimiter(ZSTD_Sequence* inSeqs, nuint in } if (end == 0) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) + ); return blockSize; } - private static nuint determine_blockSize(ZSTD_sequenceFormat_e mode, nuint blockSize, nuint remaining, ZSTD_Sequence* inSeqs, nuint inSeqsSize, ZSTD_SequencePosition seqPos) + private static nuint determine_blockSize( + ZSTD_sequenceFormat_e mode, + nuint blockSize, + nuint remaining, + ZSTD_Sequence* inSeqs, + nuint inSeqsSize, + ZSTD_SequencePosition seqPos + ) { if (mode == ZSTD_sequenceFormat_e.ZSTD_sf_noBlockDelimiters) { @@ -7379,9 +10414,13 @@ private static nuint determine_blockSize(ZSTD_sequenceFormat_e mode, nuint block } if (explicitBlockSize > blockSize) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) + ); if (explicitBlockSize > remaining) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) + ); return explicitBlockSize; } } @@ -7391,7 +10430,15 @@ private static nuint determine_blockSize(ZSTD_sequenceFormat_e mode, nuint block * Returns the cumulative size of all compressed blocks (including their headers), * otherwise a ZSTD error. */ - private static nuint ZSTD_compressSequences_internal(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, ZSTD_Sequence* inSeqs, nuint inSeqsSize, void* src, nuint srcSize) + private static nuint ZSTD_compressSequences_internal( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + ZSTD_Sequence* inSeqs, + nuint inSeqsSize, + void* src, + nuint srcSize + ) { nuint cSize = 0; nuint remaining = srcSize; @@ -7399,7 +10446,7 @@ private static nuint ZSTD_compressSequences_internal(ZSTD_CCtx_s* cctx, void* ds { idx = 0, posInSequence = 0, - posInSrc = 0 + posInSrc = 0, }; byte* ip = (byte*)src; byte* op = (byte*)dst; @@ -7423,7 +10470,14 @@ private static nuint ZSTD_compressSequences_internal(ZSTD_CCtx_s* cctx, void* ds { nuint compressedSeqsSize; nuint cBlockSize; - nuint blockSize = determine_blockSize(cctx->appliedParams.blockDelimiters, cctx->blockSizeMax, remaining, inSeqs, inSeqsSize, seqPos); + nuint blockSize = determine_blockSize( + cctx->appliedParams.blockDelimiters, + cctx->blockSizeMax, + remaining, + inSeqs, + inSeqsSize, + seqPos + ); uint lastBlock = blockSize == remaining ? 1U : 0U; { nuint err_code = blockSize; @@ -7435,7 +10489,25 @@ private static nuint ZSTD_compressSequences_internal(ZSTD_CCtx_s* cctx, void* ds assert(blockSize <= remaining); ZSTD_resetSeqStore(&cctx->seqStore); - blockSize = ((delegate* managed)sequenceCopier)(cctx, &seqPos, inSeqs, inSeqsSize, ip, blockSize, cctx->appliedParams.searchForExternalRepcodes); + blockSize = ( + (delegate* managed< + ZSTD_CCtx_s*, + ZSTD_SequencePosition*, + ZSTD_Sequence*, + nuint, + void*, + nuint, + ZSTD_paramSwitch_e, + nuint>)sequenceCopier + )( + cctx, + &seqPos, + inSeqs, + inSeqsSize, + ip, + blockSize, + cctx->appliedParams.searchForExternalRepcodes + ); { nuint err_code = blockSize; if (ERR_isError(err_code)) @@ -7468,7 +10540,18 @@ private static nuint ZSTD_compressSequences_internal(ZSTD_CCtx_s* cctx, void* ds return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); } - compressedSeqsSize = ZSTD_entropyCompressSeqStore(&cctx->seqStore, &cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy, &cctx->appliedParams, op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize, blockSize, cctx->tmpWorkspace, cctx->tmpWkspSize, cctx->bmi2); + compressedSeqsSize = ZSTD_entropyCompressSeqStore( + &cctx->seqStore, + &cctx->blockState.prevCBlock->entropy, + &cctx->blockState.nextCBlock->entropy, + &cctx->appliedParams, + op + ZSTD_blockHeaderSize, + dstCapacity - ZSTD_blockHeaderSize, + blockSize, + cctx->tmpWorkspace, + cctx->tmpWkspSize, + cctx->bmi2 + ); { nuint err_code = compressedSeqsSize; if (ERR_isError(err_code)) @@ -7477,7 +10560,11 @@ private static nuint ZSTD_compressSequences_internal(ZSTD_CCtx_s* cctx, void* ds } } - if (cctx->isFirstBlock == 0 && ZSTD_maybeRLE(&cctx->seqStore) != 0 && ZSTD_isRLE(ip, blockSize) != 0) + if ( + cctx->isFirstBlock == 0 + && ZSTD_maybeRLE(&cctx->seqStore) != 0 + && ZSTD_isRLE(ip, blockSize) != 0 + ) { compressedSeqsSize = 1; } @@ -7508,9 +10595,16 @@ private static nuint ZSTD_compressSequences_internal(ZSTD_CCtx_s* cctx, void* ds { uint cBlockHeader; ZSTD_blockState_confirmRepcodesAndEntropyTables(&cctx->blockState); - if (cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat.FSE_repeat_valid) - cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat.FSE_repeat_check; - cBlockHeader = lastBlock + ((uint)blockType_e.bt_compressed << 1) + (uint)(compressedSeqsSize << 3); + if ( + cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode + == FSE_repeat.FSE_repeat_valid + ) + cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = + FSE_repeat.FSE_repeat_check; + cBlockHeader = + lastBlock + + ((uint)blockType_e.bt_compressed << 1) + + (uint)(compressedSeqsSize << 3); MEM_writeLE24(op, cBlockHeader); cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize; } @@ -7569,13 +10663,25 @@ private static nuint ZSTD_compressSequences_internal(ZSTD_CCtx_s* cctx, void* ds * and cannot emit an RLE block that disagrees with the repcode history. * @return : final compressed size, or a ZSTD error code. */ - public static nuint ZSTD_compressSequences(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, ZSTD_Sequence* inSeqs, nuint inSeqsSize, void* src, nuint srcSize) + public static nuint ZSTD_compressSequences( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + ZSTD_Sequence* inSeqs, + nuint inSeqsSize, + void* src, + nuint srcSize + ) { byte* op = (byte*)dst; nuint cSize = 0; assert(cctx != null); { - nuint err_code = ZSTD_CCtx_init_compressStream2(cctx, ZSTD_EndDirective.ZSTD_e_end, srcSize); + nuint err_code = ZSTD_CCtx_init_compressStream2( + cctx, + ZSTD_EndDirective.ZSTD_e_end, + srcSize + ); if (ERR_isError(err_code)) { return err_code; @@ -7583,7 +10689,13 @@ public static nuint ZSTD_compressSequences(ZSTD_CCtx_s* cctx, void* dst, nuint d } { - nuint frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, &cctx->appliedParams, srcSize, cctx->dictID); + nuint frameHeaderSize = ZSTD_writeFrameHeader( + op, + dstCapacity, + &cctx->appliedParams, + srcSize, + cctx->dictID + ); op += frameHeaderSize; assert(frameHeaderSize <= dstCapacity); dstCapacity -= frameHeaderSize; @@ -7596,7 +10708,15 @@ public static nuint ZSTD_compressSequences(ZSTD_CCtx_s* cctx, void* dst, nuint d } { - nuint cBlocksSize = ZSTD_compressSequences_internal(cctx, op, dstCapacity, inSeqs, inSeqsSize, src, srcSize); + nuint cBlocksSize = ZSTD_compressSequences_internal( + cctx, + op, + dstCapacity, + inSeqs, + inSeqsSize, + src, + srcSize + ); { nuint err_code = cBlocksSize; if (ERR_isError(err_code)) @@ -7625,7 +10745,11 @@ public static nuint ZSTD_compressSequences(ZSTD_CCtx_s* cctx, void* dst, nuint d return cSize; } - private static nuint convertSequences_noRepcodes(SeqDef_s* dstSeqs, ZSTD_Sequence* inSeqs, nuint nbSequences) + private static nuint convertSequences_noRepcodes( + SeqDef_s* dstSeqs, + ZSTD_Sequence* inSeqs, + nuint nbSequences + ) { nuint longLen = 0; nuint n; @@ -7658,13 +10782,20 @@ private static nuint convertSequences_noRepcodes(SeqDef_s* dstSeqs, ZSTD_Sequenc * This is helpful to generate a lean main pipeline, improving performance. * It may be re-inserted later. */ - private static nuint ZSTD_convertBlockSequences(ZSTD_CCtx_s* cctx, ZSTD_Sequence* inSeqs, nuint nbSequences, int repcodeResolution) + private static nuint ZSTD_convertBlockSequences( + ZSTD_CCtx_s* cctx, + ZSTD_Sequence* inSeqs, + nuint nbSequences, + int repcodeResolution + ) { repcodes_s updatedRepcodes; nuint seqNb = 0; if (nbSequences >= cctx->seqStore.maxNbSeq) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) + ); } memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, (uint)sizeof(repcodes_s)); @@ -7673,7 +10804,11 @@ private static nuint ZSTD_convertBlockSequences(ZSTD_CCtx_s* cctx, ZSTD_Sequence assert(inSeqs[nbSequences - 1].offset == 0); if (repcodeResolution == 0) { - nuint longl = convertSequences_noRepcodes(cctx->seqStore.sequencesStart, inSeqs, nbSequences - 1); + nuint longl = convertSequences_noRepcodes( + cctx->seqStore.sequencesStart, + inSeqs, + nbSequences - 1 + ); cctx->seqStore.sequences = cctx->seqStore.sequencesStart + nbSequences - 1; if (longl != 0) { @@ -7686,7 +10821,8 @@ private static nuint ZSTD_convertBlockSequences(ZSTD_CCtx_s* cctx, ZSTD_Sequence else { assert(longl <= 2 * (nbSequences - 1)); - cctx->seqStore.longLengthType = ZSTD_longLengthType_e.ZSTD_llt_literalLength; + cctx->seqStore.longLengthType = + ZSTD_longLengthType_e.ZSTD_llt_literalLength; cctx->seqStore.longLengthPos = (uint)(longl - (nbSequences - 1) - 1); } } @@ -7698,7 +10834,11 @@ private static nuint ZSTD_convertBlockSequences(ZSTD_CCtx_s* cctx, ZSTD_Sequence uint litLength = inSeqs[seqNb].litLength; uint matchLength = inSeqs[seqNb].matchLength; uint ll0 = litLength == 0 ? 1U : 0U; - uint offBase = ZSTD_finalizeOffBase(inSeqs[seqNb].offset, updatedRepcodes.rep, ll0); + uint offBase = ZSTD_finalizeOffBase( + inSeqs[seqNb].offset, + updatedRepcodes.rep, + ll0 + ); ZSTD_storeSeqOnly(&cctx->seqStore, litLength, offBase, matchLength); ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0); } @@ -7755,7 +10895,9 @@ private static BlockSummary ZSTD_get1BlockSummary(ZSTD_Sequence* seqs, nuint nbS { BlockSummary bs; System.Runtime.CompilerServices.Unsafe.SkipInit(out bs); - bs.nbSequences = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); + bs.nbSequences = unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) + ); return bs; } @@ -7768,16 +10910,32 @@ private static BlockSummary ZSTD_get1BlockSummary(ZSTD_Sequence* seqs, nuint nbS } } - private static nuint ZSTD_compressSequencesAndLiterals_internal(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, ZSTD_Sequence* inSeqs, nuint nbSequences, void* literals, nuint litSize, nuint srcSize) + private static nuint ZSTD_compressSequencesAndLiterals_internal( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + ZSTD_Sequence* inSeqs, + nuint nbSequences, + void* literals, + nuint litSize, + nuint srcSize + ) { nuint remaining = srcSize; nuint cSize = 0; byte* op = (byte*)dst; - int repcodeResolution = cctx->appliedParams.searchForExternalRepcodes == ZSTD_paramSwitch_e.ZSTD_ps_enable ? 1 : 0; - assert(cctx->appliedParams.searchForExternalRepcodes != ZSTD_paramSwitch_e.ZSTD_ps_auto); + int repcodeResolution = + cctx->appliedParams.searchForExternalRepcodes == ZSTD_paramSwitch_e.ZSTD_ps_enable + ? 1 + : 0; + assert( + cctx->appliedParams.searchForExternalRepcodes != ZSTD_paramSwitch_e.ZSTD_ps_auto + ); if (nbSequences == 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) + ); } if (nbSequences == 1 && inSeqs[0].litLength == 0) @@ -7797,7 +10955,9 @@ private static nuint ZSTD_compressSequencesAndLiterals_internal(ZSTD_CCtx_s* cct while (nbSequences != 0) { - nuint compressedSeqsSize, cBlockSize, conversionStatus; + nuint compressedSeqsSize, + cBlockSize, + conversionStatus; BlockSummary block = ZSTD_get1BlockSummary(inSeqs, nbSequences); uint lastBlock = block.nbSequences == nbSequences ? 1U : 0U; { @@ -7811,11 +10971,18 @@ private static nuint ZSTD_compressSequencesAndLiterals_internal(ZSTD_CCtx_s* cct assert(block.nbSequences <= nbSequences); if (block.litSize > litSize) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) + ); } ZSTD_resetSeqStore(&cctx->seqStore); - conversionStatus = ZSTD_convertBlockSequences(cctx, inSeqs, block.nbSequences, repcodeResolution); + conversionStatus = ZSTD_convertBlockSequences( + cctx, + inSeqs, + block.nbSequences, + repcodeResolution + ); { nuint err_code = conversionStatus; if (ERR_isError(err_code)) @@ -7832,7 +10999,19 @@ private static nuint ZSTD_compressSequencesAndLiterals_internal(ZSTD_CCtx_s* cct return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); } - compressedSeqsSize = ZSTD_entropyCompressSeqStore_internal(op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize, literals, block.litSize, &cctx->seqStore, &cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy, &cctx->appliedParams, cctx->tmpWorkspace, cctx->tmpWkspSize, cctx->bmi2); + compressedSeqsSize = ZSTD_entropyCompressSeqStore_internal( + op + ZSTD_blockHeaderSize, + dstCapacity - ZSTD_blockHeaderSize, + literals, + block.litSize, + &cctx->seqStore, + &cctx->blockState.prevCBlock->entropy, + &cctx->blockState.nextCBlock->entropy, + &cctx->appliedParams, + cctx->tmpWorkspace, + cctx->tmpWkspSize, + cctx->bmi2 + ); { nuint err_code = compressedSeqsSize; if (ERR_isError(err_code)) @@ -7847,16 +11026,25 @@ private static nuint ZSTD_compressSequencesAndLiterals_internal(ZSTD_CCtx_s* cct literals = (sbyte*)literals + block.litSize; if (compressedSeqsSize == 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_cannotProduce_uncompressedBlock)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_cannotProduce_uncompressedBlock) + ); } else { uint cBlockHeader; assert(compressedSeqsSize > 1); ZSTD_blockState_confirmRepcodesAndEntropyTables(&cctx->blockState); - if (cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat.FSE_repeat_valid) - cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat.FSE_repeat_check; - cBlockHeader = lastBlock + ((uint)blockType_e.bt_compressed << 1) + (uint)(compressedSeqsSize << 3); + if ( + cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode + == FSE_repeat.FSE_repeat_valid + ) + cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = + FSE_repeat.FSE_repeat_check; + cBlockHeader = + lastBlock + + ((uint)blockType_e.bt_compressed << 1) + + (uint)(compressedSeqsSize << 3); MEM_writeLE24(op, cBlockHeader); cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize; } @@ -7874,12 +11062,16 @@ private static nuint ZSTD_compressSequencesAndLiterals_internal(ZSTD_CCtx_s* cct if (litSize != 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) + ); } if (remaining != 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) + ); } return cSize; @@ -7903,7 +11095,17 @@ private static nuint ZSTD_compressSequencesAndLiterals_internal(ZSTD_CCtx_s* cct * - @decompressedSize must be correct, and correspond to the sum of all Sequences. Any discrepancy will generate an error. * @return : final compressed size, or a ZSTD error code. */ - public static nuint ZSTD_compressSequencesAndLiterals(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity, ZSTD_Sequence* inSeqs, nuint inSeqsSize, void* literals, nuint litSize, nuint litCapacity, nuint decompressedSize) + public static nuint ZSTD_compressSequencesAndLiterals( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + ZSTD_Sequence* inSeqs, + nuint inSeqsSize, + void* literals, + nuint litSize, + nuint litCapacity, + nuint decompressedSize + ) { byte* op = (byte*)dst; nuint cSize = 0; @@ -7914,16 +11116,25 @@ public static nuint ZSTD_compressSequencesAndLiterals(ZSTD_CCtx_s* cctx, void* d } { - nuint err_code = ZSTD_CCtx_init_compressStream2(cctx, ZSTD_EndDirective.ZSTD_e_end, decompressedSize); + nuint err_code = ZSTD_CCtx_init_compressStream2( + cctx, + ZSTD_EndDirective.ZSTD_e_end, + decompressedSize + ); if (ERR_isError(err_code)) { return err_code; } } - if (cctx->appliedParams.blockDelimiters == ZSTD_sequenceFormat_e.ZSTD_sf_noBlockDelimiters) + if ( + cctx->appliedParams.blockDelimiters + == ZSTD_sequenceFormat_e.ZSTD_sf_noBlockDelimiters + ) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported) + ); } if (cctx->appliedParams.validateSequences != 0) @@ -7933,11 +11144,19 @@ public static nuint ZSTD_compressSequencesAndLiterals(ZSTD_CCtx_s* cctx, void* d if (cctx->appliedParams.fParams.checksumFlag != 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported) + ); } { - nuint frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, &cctx->appliedParams, decompressedSize, cctx->dictID); + nuint frameHeaderSize = ZSTD_writeFrameHeader( + op, + dstCapacity, + &cctx->appliedParams, + decompressedSize, + cctx->dictID + ); op += frameHeaderSize; assert(frameHeaderSize <= dstCapacity); dstCapacity -= frameHeaderSize; @@ -7945,7 +11164,16 @@ public static nuint ZSTD_compressSequencesAndLiterals(ZSTD_CCtx_s* cctx, void* d } { - nuint cBlocksSize = ZSTD_compressSequencesAndLiterals_internal(cctx, op, dstCapacity, inSeqs, inSeqsSize, literals, litSize, decompressedSize); + nuint cBlocksSize = ZSTD_compressSequencesAndLiterals_internal( + cctx, + op, + dstCapacity, + inSeqs, + inSeqsSize, + literals, + litSize, + decompressedSize + ); { nuint err_code = cBlocksSize; if (ERR_isError(err_code)) @@ -7969,9 +11197,10 @@ private static ZSTD_inBuffer_s inBuffer_forEndFlush(ZSTD_CCtx_s* zcs) { src = null, size = 0, - pos = 0 + pos = 0, }; - int stableInput = zcs->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable ? 1 : 0; + int stableInput = + zcs->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable ? 1 : 0; return stableInput != 0 ? zcs->expectedInBuffer : nullInput; } @@ -7988,7 +11217,12 @@ public static nuint ZSTD_flushStream(ZSTD_CCtx_s* zcs, ZSTD_outBuffer_s* output) public static nuint ZSTD_endStream(ZSTD_CCtx_s* zcs, ZSTD_outBuffer_s* output) { ZSTD_inBuffer_s input = inBuffer_forEndFlush(zcs); - nuint remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_EndDirective.ZSTD_e_end); + nuint remainingToFlush = ZSTD_compressStream2( + zcs, + output, + &input, + ZSTD_EndDirective.ZSTD_e_end + ); { nuint err_code = remainingToFlush; if (ERR_isError(err_code)) @@ -8001,7 +11235,9 @@ public static nuint ZSTD_endStream(ZSTD_CCtx_s* zcs, ZSTD_outBuffer_s* output) return remainingToFlush; { nuint lastBlockSize = (nuint)(zcs->frameEnded != 0 ? 0 : 3); - nuint checksumSize = (nuint)(zcs->frameEnded != 0 ? 0 : zcs->appliedParams.fParams.checksumFlag * 4); + nuint checksumSize = (nuint)( + zcs->frameEnded != 0 ? 0 : zcs->appliedParams.fParams.checksumFlag * 4 + ); nuint toFlush = remainingToFlush + lastBlockSize + checksumSize; return toFlush; } @@ -8022,9 +11258,17 @@ public static int ZSTD_defaultCLevel() return 3; } - private static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(int compressionLevel, nuint dictSize) + private static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams( + int compressionLevel, + nuint dictSize + ) { - ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, 0, dictSize, ZSTD_CParamMode_e.ZSTD_cpm_createCDict); + ZSTD_compressionParameters cParams = ZSTD_getCParams_internal( + compressionLevel, + 0, + dictSize, + ZSTD_CParamMode_e.ZSTD_cpm_createCDict + ); switch (cParams.strategy) { case ZSTD_strategy.ZSTD_fast: @@ -8047,7 +11291,13 @@ private static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(in private static int ZSTD_dedicatedDictSearch_isSupported(ZSTD_compressionParameters* cParams) { - return cParams->strategy >= ZSTD_strategy.ZSTD_greedy && cParams->strategy <= ZSTD_strategy.ZSTD_lazy2 && cParams->hashLog > cParams->chainLog && cParams->chainLog <= 24 ? 1 : 0; + return + cParams->strategy >= ZSTD_strategy.ZSTD_greedy + && cParams->strategy <= ZSTD_strategy.ZSTD_lazy2 + && cParams->hashLog > cParams->chainLog + && cParams->chainLog <= 24 + ? 1 + : 0; } /** @@ -8055,7 +11305,9 @@ private static int ZSTD_dedicatedDictSearch_isSupported(ZSTD_compressionParamete * search. This is used to recover the params set to be used in the working * context. (Otherwise, those tables would also grow.) */ - private static void ZSTD_dedicatedDictSearch_revertCParams(ZSTD_compressionParameters* cParams) + private static void ZSTD_dedicatedDictSearch_revertCParams( + ZSTD_compressionParameters* cParams + ) { switch (cParams->strategy) { @@ -8080,7 +11332,11 @@ private static void ZSTD_dedicatedDictSearch_revertCParams(ZSTD_compressionParam } } - private static ulong ZSTD_getCParamRowSize(ulong srcSizeHint, nuint dictSize, ZSTD_CParamMode_e mode) + private static ulong ZSTD_getCParamRowSize( + ulong srcSizeHint, + nuint dictSize, + ZSTD_CParamMode_e mode + ) { switch (mode) { @@ -8099,7 +11355,9 @@ private static ulong ZSTD_getCParamRowSize(ulong srcSizeHint, nuint dictSize, ZS { int unknown = srcSizeHint == unchecked(0UL - 1) ? 1 : 0; nuint addedSize = (nuint)(unknown != 0 && dictSize > 0 ? 500 : 0); - return unknown != 0 && dictSize == 0 ? unchecked(0UL - 1) : srcSizeHint + dictSize + addedSize; + return unknown != 0 && dictSize == 0 + ? unchecked(0UL - 1) + : srcSizeHint + dictSize + addedSize; } } @@ -8108,10 +11366,19 @@ private static ulong ZSTD_getCParamRowSize(ulong srcSizeHint, nuint dictSize, ZS * Note: srcSizeHint 0 means 0, use ZSTD_CONTENTSIZE_UNKNOWN for unknown. * Use dictSize == 0 for unknown or unused. * Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_CParamMode_e`. */ - private static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, ulong srcSizeHint, nuint dictSize, ZSTD_CParamMode_e mode) + private static ZSTD_compressionParameters ZSTD_getCParams_internal( + int compressionLevel, + ulong srcSizeHint, + nuint dictSize, + ZSTD_CParamMode_e mode + ) { ulong rSize = ZSTD_getCParamRowSize(srcSizeHint, dictSize, mode); - uint tableID = (uint)((rSize <= 256 * (1 << 10) ? 1 : 0) + (rSize <= 128 * (1 << 10) ? 1 : 0) + (rSize <= 16 * (1 << 10) ? 1 : 0)); + uint tableID = (uint)( + (rSize <= 256 * (1 << 10) ? 1 : 0) + + (rSize <= 128 * (1 << 10) ? 1 : 0) + + (rSize <= 16 * (1 << 10) ? 1 : 0) + ); int row; if (compressionLevel == 0) row = 3; @@ -8125,36 +11392,59 @@ private static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressi ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row]; if (compressionLevel < 0) { - int clampedCompressionLevel = ZSTD_minCLevel() > compressionLevel ? ZSTD_minCLevel() : compressionLevel; + int clampedCompressionLevel = + ZSTD_minCLevel() > compressionLevel ? ZSTD_minCLevel() : compressionLevel; cp.targetLength = (uint)-clampedCompressionLevel; } - return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize, mode, ZSTD_paramSwitch_e.ZSTD_ps_auto); + return ZSTD_adjustCParams_internal( + cp, + srcSizeHint, + dictSize, + mode, + ZSTD_paramSwitch_e.ZSTD_ps_auto + ); } } /*! ZSTD_getCParams() : * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize. * Size values are optional, provide 0 if not known or unused */ - public static ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, ulong srcSizeHint, nuint dictSize) + public static ZSTD_compressionParameters ZSTD_getCParams( + int compressionLevel, + ulong srcSizeHint, + nuint dictSize + ) { if (srcSizeHint == 0) srcSizeHint = unchecked(0UL - 1); - return ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_CParamMode_e.ZSTD_cpm_unknown); + return ZSTD_getCParams_internal( + compressionLevel, + srcSizeHint, + dictSize, + ZSTD_CParamMode_e.ZSTD_cpm_unknown + ); } /*! ZSTD_getParams() : * same idea as ZSTD_getCParams() * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`). * Fields of `ZSTD_frameParameters` are set to default values */ - private static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, ulong srcSizeHint, nuint dictSize, ZSTD_CParamMode_e mode) + private static ZSTD_parameters ZSTD_getParams_internal( + int compressionLevel, + ulong srcSizeHint, + nuint dictSize, + ZSTD_CParamMode_e mode + ) { ZSTD_parameters @params; - ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, mode); - @params = new ZSTD_parameters - { - cParams = cParams - }; + ZSTD_compressionParameters cParams = ZSTD_getCParams_internal( + compressionLevel, + srcSizeHint, + dictSize, + mode + ); + @params = new ZSTD_parameters { cParams = cParams }; @params.fParams.contentSizeFlag = 1; return @params; } @@ -8163,11 +11453,20 @@ private static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, ulo * same idea as ZSTD_getCParams() * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`). * Fields of `ZSTD_frameParameters` are set to default values */ - public static ZSTD_parameters ZSTD_getParams(int compressionLevel, ulong srcSizeHint, nuint dictSize) + public static ZSTD_parameters ZSTD_getParams( + int compressionLevel, + ulong srcSizeHint, + nuint dictSize + ) { if (srcSizeHint == 0) srcSizeHint = unchecked(0UL - 1); - return ZSTD_getParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_CParamMode_e.ZSTD_cpm_unknown); + return ZSTD_getParams_internal( + compressionLevel, + srcSizeHint, + dictSize, + ZSTD_CParamMode_e.ZSTD_cpm_unknown + ); } /*! ZSTD_registerSequenceProducer() : @@ -8189,10 +11488,18 @@ public static ZSTD_parameters ZSTD_getParams(int compressionLevel, ulong srcSize * * The user is strongly encouraged to read the full API documentation (above) before * calling this function. */ - public static void ZSTD_registerSequenceProducer(ZSTD_CCtx_s* zc, void* extSeqProdState, void* extSeqProdFunc) + public static void ZSTD_registerSequenceProducer( + ZSTD_CCtx_s* zc, + void* extSeqProdState, + void* extSeqProdFunc + ) { assert(zc != null); - ZSTD_CCtxParams_registerSequenceProducer(&zc->requestedParams, extSeqProdState, extSeqProdFunc); + ZSTD_CCtxParams_registerSequenceProducer( + &zc->requestedParams, + extSeqProdState, + extSeqProdFunc + ); } /*! ZSTD_CCtxParams_registerSequenceProducer() : @@ -8204,7 +11511,11 @@ public static void ZSTD_registerSequenceProducer(ZSTD_CCtx_s* zc, void* extSeqPr * is required, then this function is for you. Otherwise, you probably don't need it. * * See tests/zstreamtest.c for example usage. */ - public static void ZSTD_CCtxParams_registerSequenceProducer(ZSTD_CCtx_params_s* @params, void* extSeqProdState, void* extSeqProdFunc) + public static void ZSTD_CCtxParams_registerSequenceProducer( + ZSTD_CCtx_params_s* @params, + void* extSeqProdState, + void* extSeqProdFunc + ) { assert(@params != null); if (extSeqProdFunc != null) diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressInternal.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressInternal.cs index 03da49072..c3026112a 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressInternal.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressInternal.cs @@ -1,7 +1,7 @@ -using System.Runtime.CompilerServices; -using static ZstdSharp.UnsafeHelper; using System; +using System.Runtime.CompilerServices; using System.Runtime.InteropServices; +using static ZstdSharp.UnsafeHelper; namespace ZstdSharp.Unsafe { @@ -12,7 +12,10 @@ public static unsafe partial class Methods * indicated by longLengthPos and longLengthType, and adds MINMATCH back to matchLength. */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static ZSTD_SequenceLength ZSTD_getSequenceLength(SeqStore_t* seqStore, SeqDef_s* seq) + private static ZSTD_SequenceLength ZSTD_getSequenceLength( + SeqStore_t* seqStore, + SeqDef_s* seq + ) { ZSTD_SequenceLength seqLen; seqLen.litLength = seq->litLength; @@ -33,80 +36,160 @@ private static ZSTD_SequenceLength ZSTD_getSequenceLength(SeqStore_t* seqStore, return seqLen; } - private static readonly RawSeqStore_t kNullRawSeqStore = new RawSeqStore_t(seq: null, pos: 0, posInSequence: 0, size: 0, capacity: 0); + private static readonly RawSeqStore_t kNullRawSeqStore = new RawSeqStore_t( + seq: null, + pos: 0, + posInSequence: 0, + size: 0, + capacity: 0 + ); #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_LL_Code => new byte[64] - { - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - 16, - 17, - 17, - 18, - 18, - 19, - 19, - 20, - 20, - 20, - 20, - 21, - 21, - 21, - 21, - 22, - 22, - 22, - 22, - 22, - 22, - 22, - 22, - 23, - 23, - 23, - 23, - 23, - 23, - 23, - 23, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24 - }; - private static byte* LL_Code => (byte*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_LL_Code)); + private static ReadOnlySpan Span_LL_Code => + new byte[64] + { + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 16, + 17, + 17, + 18, + 18, + 19, + 19, + 20, + 20, + 20, + 20, + 21, + 21, + 21, + 21, + 22, + 22, + 22, + 22, + 22, + 22, + 22, + 22, + 23, + 23, + 23, + 23, + 23, + 23, + 23, + 23, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + }; + private static byte* LL_Code => + (byte*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_LL_Code) + ); #else - private static readonly byte* LL_Code = GetArrayPointer(new byte[64] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24 }); + private static readonly byte* LL_Code = GetArrayPointer( + new byte[64] + { + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 16, + 17, + 17, + 18, + 18, + 19, + 19, + 20, + 20, + 20, + 20, + 21, + 21, + 21, + 21, + 22, + 22, + 22, + 22, + 22, + 22, + 22, + 22, + 23, + 23, + 23, + 23, + 23, + 23, + 23, + 23, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + } + ); #endif + [MethodImpl(MethodImplOptions.AggressiveInlining)] private static uint ZSTD_LLcode(uint litLength) { @@ -115,141 +198,278 @@ private static uint ZSTD_LLcode(uint litLength) } #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_ML_Code => new byte[128] - { - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - 17, - 18, - 19, - 20, - 21, - 22, - 23, - 24, - 25, - 26, - 27, - 28, - 29, - 30, - 31, - 32, - 32, - 33, - 33, - 34, - 34, - 35, - 35, - 36, - 36, - 36, - 36, - 37, - 37, - 37, - 37, - 38, - 38, - 38, - 38, - 38, - 38, - 38, - 38, - 39, - 39, - 39, - 39, - 39, - 39, - 39, - 39, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 41, - 41, - 41, - 41, - 41, - 41, - 41, - 41, - 41, - 41, - 41, - 41, - 41, - 41, - 41, - 41, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42 - }; - private static byte* ML_Code => (byte*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_ML_Code)); + private static ReadOnlySpan Span_ML_Code => + new byte[128] + { + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 32, + 33, + 33, + 34, + 34, + 35, + 35, + 36, + 36, + 36, + 36, + 37, + 37, + 37, + 37, + 38, + 38, + 38, + 38, + 38, + 38, + 38, + 38, + 39, + 39, + 39, + 39, + 39, + 39, + 39, + 39, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + }; + private static byte* ML_Code => + (byte*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_ML_Code) + ); #else - private static readonly byte* ML_Code = GetArrayPointer(new byte[128] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 }); + private static readonly byte* ML_Code = GetArrayPointer( + new byte[128] + { + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 32, + 33, + 33, + 34, + 34, + 35, + 35, + 36, + 36, + 36, + 36, + 37, + 37, + 37, + 37, + 38, + 38, + 38, + 38, + 38, + 38, + 38, + 38, + 39, + 39, + 39, + 39, + 39, + 39, + 39, + 39, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + } + ); #endif /* ZSTD_MLcode() : * note : mlBase = matchLength - MINMATCH; @@ -281,7 +501,12 @@ private static int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value) * @return index >= lowLimit ? candidate : backup, * tries to force branchless codegen. */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static byte* ZSTD_selectAddr(uint index, uint lowLimit, byte* candidate, byte* backup) + private static byte* ZSTD_selectAddr( + uint index, + uint lowLimit, + byte* candidate, + byte* backup + ) { return index >= lowLimit ? candidate : backup; } @@ -290,9 +515,16 @@ private static int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value) * Writes uncompressed block to dst buffer from given src. * Returns the size of the block */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_noCompressBlock(void* dst, nuint dstCapacity, void* src, nuint srcSize, uint lastBlock) + private static nuint ZSTD_noCompressBlock( + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + uint lastBlock + ) { - uint cBlockHeader24 = lastBlock + ((uint)blockType_e.bt_raw << 1) + (uint)(srcSize << 3); + uint cBlockHeader24 = + lastBlock + ((uint)blockType_e.bt_raw << 1) + (uint)(srcSize << 3); if (srcSize + ZSTD_blockHeaderSize > dstCapacity) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); @@ -304,7 +536,13 @@ private static nuint ZSTD_noCompressBlock(void* dst, nuint dstCapacity, void* sr } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_rleCompressBlock(void* dst, nuint dstCapacity, byte src, nuint srcSize, uint lastBlock) + private static nuint ZSTD_rleCompressBlock( + void* dst, + nuint dstCapacity, + byte src, + nuint srcSize, + uint lastBlock + ) { byte* op = (byte*)dst; uint cBlockHeader = lastBlock + ((uint)blockType_e.bt_rle << 1) + (uint)(srcSize << 3); @@ -343,7 +581,11 @@ private static int ZSTD_literalsCompressionIsDisabled(ZSTD_CCtx_params_s* cctxPa assert(0 != 0); goto case ZSTD_paramSwitch_e.ZSTD_ps_auto; case ZSTD_paramSwitch_e.ZSTD_ps_auto: - return cctxParams->cParams.strategy == ZSTD_strategy.ZSTD_fast && cctxParams->cParams.targetLength > 0 ? 1 : 0; + return + cctxParams->cParams.strategy == ZSTD_strategy.ZSTD_fast + && cctxParams->cParams.targetLength > 0 + ? 1 + : 0; } } @@ -373,15 +615,25 @@ private static void ZSTD_safecopyLiterals(byte* op, byte* ip, byte* iend, byte* * @matchLength : must be >= MINMATCH */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_storeSeqOnly(SeqStore_t* seqStorePtr, nuint litLength, uint offBase, nuint matchLength) + private static void ZSTD_storeSeqOnly( + SeqStore_t* seqStorePtr, + nuint litLength, + uint offBase, + nuint matchLength + ) { - assert((nuint)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq); + assert( + (nuint)(seqStorePtr->sequences - seqStorePtr->sequencesStart) + < seqStorePtr->maxNbSeq + ); assert(litLength <= 1 << 17); if (litLength > 0xFFFF) { assert(seqStorePtr->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_none); seqStorePtr->longLengthType = ZSTD_longLengthType_e.ZSTD_llt_literalLength; - seqStorePtr->longLengthPos = (uint)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + seqStorePtr->longLengthPos = (uint)( + seqStorePtr->sequences - seqStorePtr->sequencesStart + ); } seqStorePtr->sequences[0].litLength = (ushort)litLength; @@ -394,7 +646,9 @@ private static void ZSTD_storeSeqOnly(SeqStore_t* seqStorePtr, nuint litLength, { assert(seqStorePtr->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_none); seqStorePtr->longLengthType = ZSTD_longLengthType_e.ZSTD_llt_matchLength; - seqStorePtr->longLengthPos = (uint)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + seqStorePtr->longLengthPos = (uint)( + seqStorePtr->sequences - seqStorePtr->sequencesStart + ); } seqStorePtr->sequences[0].mlBase = (ushort)mlBase; @@ -410,11 +664,21 @@ private static void ZSTD_storeSeqOnly(SeqStore_t* seqStorePtr, nuint litLength, * Allowed to over-read literals up to litLimit. */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_storeSeq(SeqStore_t* seqStorePtr, nuint litLength, byte* literals, byte* litLimit, uint offBase, nuint matchLength) + private static void ZSTD_storeSeq( + SeqStore_t* seqStorePtr, + nuint litLength, + byte* literals, + byte* litLimit, + uint offBase, + nuint matchLength + ) { byte* litLimit_w = litLimit - 32; byte* litEnd = literals + litLength; - assert((nuint)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq); + assert( + (nuint)(seqStorePtr->sequences - seqStorePtr->sequencesStart) + < seqStorePtr->maxNbSeq + ); assert(seqStorePtr->maxNbLit <= 128 * (1 << 10)); assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit); assert(literals + litLength <= litLimit); @@ -423,7 +687,12 @@ private static void ZSTD_storeSeq(SeqStore_t* seqStorePtr, nuint litLength, byte ZSTD_copy16(seqStorePtr->lit, literals); if (litLength > 16) { - ZSTD_wildcopy(seqStorePtr->lit + 16, literals + 16, (nint)litLength - 16, ZSTD_overlap_e.ZSTD_no_overlap); + ZSTD_wildcopy( + seqStorePtr->lit + 16, + literals + 16, + (nint)litLength - 16, + ZSTD_overlap_e.ZSTD_no_overlap + ); } } else @@ -527,7 +796,13 @@ private static nuint ZSTD_count(byte* pIn, byte* pMatch, byte* pInLimit) * convention : on reaching mEnd, match count continue starting from iStart */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_count_2segments(byte* ip, byte* match, byte* iEnd, byte* mEnd, byte* iStart) + private static nuint ZSTD_count_2segments( + byte* ip, + byte* match, + byte* iEnd, + byte* mEnd, + byte* iStart + ) { byte* vEnd = ip + (mEnd - match) < iEnd ? ip + (mEnd - match) : iEnd; nuint matchLength = ZSTD_count(ip, match, vEnd); @@ -537,6 +812,7 @@ private static nuint ZSTD_count_2segments(byte* ip, byte* match, byte* iEnd, byt } private const uint prime3bytes = 506832829U; + [MethodImpl(MethodImplOptions.AggressiveInlining)] private static uint ZSTD_hash3(uint u, uint h, uint s) { @@ -557,6 +833,7 @@ private static nuint ZSTD_hash3PtrS(void* ptr, uint h, uint s) } private const uint prime4bytes = 2654435761U; + [MethodImpl(MethodImplOptions.AggressiveInlining)] private static uint ZSTD_hash4(uint u, uint h, uint s) { @@ -577,6 +854,7 @@ private static nuint ZSTD_hash4PtrS(void* ptr, uint h, uint s) } private const ulong prime5bytes = 889523592379UL; + [MethodImpl(MethodImplOptions.AggressiveInlining)] private static nuint ZSTD_hash5(ulong u, uint h, ulong s) { @@ -597,6 +875,7 @@ private static nuint ZSTD_hash5PtrS(void* p, uint h, ulong s) } private const ulong prime6bytes = 227718039650203UL; + [MethodImpl(MethodImplOptions.AggressiveInlining)] private static nuint ZSTD_hash6(ulong u, uint h, ulong s) { @@ -617,6 +896,7 @@ private static nuint ZSTD_hash6PtrS(void* p, uint h, ulong s) } private const ulong prime7bytes = 58295818150454627UL; + [MethodImpl(MethodImplOptions.AggressiveInlining)] private static nuint ZSTD_hash7(ulong u, uint h, ulong s) { @@ -637,6 +917,7 @@ private static nuint ZSTD_hash7PtrS(void* p, uint h, ulong s) } private const ulong prime8bytes = 0xCF1BBCDCB7A56463UL; + [MethodImpl(MethodImplOptions.AggressiveInlining)] private static nuint ZSTD_hash8(ulong u, uint h, ulong s) { @@ -742,7 +1023,12 @@ private static ulong ZSTD_rollingHash_primePower(uint length) * Rotate the rolling hash by one byte. */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static ulong ZSTD_rollingHash_rotate(ulong hash, byte toRemove, byte toAdd, ulong primePower) + private static ulong ZSTD_rollingHash_rotate( + ulong hash, + byte toRemove, + byte toAdd, + ulong primePower + ) { hash -= (ulong)(toRemove + 10) * primePower; hash *= prime8bytes; @@ -766,7 +1052,10 @@ private static void ZSTD_window_clear(ZSTD_window_t* window) [MethodImpl(MethodImplOptions.AggressiveInlining)] private static uint ZSTD_window_isEmpty(ZSTD_window_t window) { - return window.dictLimit == 2 && window.lowLimit == 2 && window.nextSrc - window.@base == 2 ? 1U : 0U; + return + window.dictLimit == 2 && window.lowLimit == 2 && window.nextSrc - window.@base == 2 + ? 1U + : 0U; } /** @@ -787,7 +1076,12 @@ private static uint ZSTD_window_hasExtDict(ZSTD_window_t window) [MethodImpl(MethodImplOptions.AggressiveInlining)] private static ZSTD_dictMode_e ZSTD_matchState_dictMode(ZSTD_MatchState_t* ms) { - return ZSTD_window_hasExtDict(ms->window) != 0 ? ZSTD_dictMode_e.ZSTD_extDict : ms->dictMatchState != null ? ms->dictMatchState->dedicatedDictSearch != 0 ? ZSTD_dictMode_e.ZSTD_dedicatedDictSearch : ZSTD_dictMode_e.ZSTD_dictMatchState : ZSTD_dictMode_e.ZSTD_noDict; + return ZSTD_window_hasExtDict(ms->window) != 0 ? ZSTD_dictMode_e.ZSTD_extDict + : ms->dictMatchState != null + ? ms->dictMatchState->dedicatedDictSearch != 0 + ? ZSTD_dictMode_e.ZSTD_dedicatedDictSearch + : ZSTD_dictMode_e.ZSTD_dictMatchState + : ZSTD_dictMode_e.ZSTD_noDict; } /** @@ -796,18 +1090,28 @@ private static ZSTD_dictMode_e ZSTD_matchState_dictMode(ZSTD_MatchState_t* ms) * to work correctly without impacting compression ratio. */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_window_canOverflowCorrect(ZSTD_window_t window, uint cycleLog, uint maxDist, uint loadedDictEnd, void* src) + private static uint ZSTD_window_canOverflowCorrect( + ZSTD_window_t window, + uint cycleLog, + uint maxDist, + uint loadedDictEnd, + void* src + ) { uint cycleSize = 1U << (int)cycleLog; uint curr = (uint)((byte*)src - window.@base); - uint minIndexToOverflowCorrect = cycleSize + (maxDist > cycleSize ? maxDist : cycleSize) + 2; + uint minIndexToOverflowCorrect = + cycleSize + (maxDist > cycleSize ? maxDist : cycleSize) + 2; /* Adjust the min index to backoff the overflow correction frequency, * so we don't waste too much CPU in overflow correction. If this * computation overflows we don't really care, we just need to make * sure it is at least minIndexToOverflowCorrect. */ uint adjustment = window.nbOverflowCorrections + 1; - uint adjustedIndex = minIndexToOverflowCorrect * adjustment > minIndexToOverflowCorrect ? minIndexToOverflowCorrect * adjustment : minIndexToOverflowCorrect; + uint adjustedIndex = + minIndexToOverflowCorrect * adjustment > minIndexToOverflowCorrect + ? minIndexToOverflowCorrect * adjustment + : minIndexToOverflowCorrect; uint indexLargeEnough = curr > adjustedIndex ? 1U : 0U; /* Only overflow correct early if the dictionary is invalidated already, * so we don't hurt compression ratio. @@ -822,7 +1126,14 @@ private static uint ZSTD_window_canOverflowCorrect(ZSTD_window_t window, uint cy * protection. */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_window_needOverflowCorrection(ZSTD_window_t window, uint cycleLog, uint maxDist, uint loadedDictEnd, void* src, void* srcEnd) + private static uint ZSTD_window_needOverflowCorrection( + ZSTD_window_t window, + uint cycleLog, + uint maxDist, + uint loadedDictEnd, + void* src, + void* srcEnd + ) { uint curr = (uint)((byte*)srcEnd - window.@base); return curr > (MEM_64bits ? 3500U * (1 << 20) : 2000U * (1 << 20)) ? 1U : 0U; @@ -838,7 +1149,12 @@ private static uint ZSTD_window_needOverflowCorrection(ZSTD_window_t window, uin * which may be 0. Every index up to maxDist in the past must be valid. */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_window_correctOverflow(ZSTD_window_t* window, uint cycleLog, uint maxDist, void* src) + private static uint ZSTD_window_correctOverflow( + ZSTD_window_t* window, + uint cycleLog, + uint maxDist, + void* src + ) { /* preemptive overflow correction: * 1. correction is large enough: @@ -864,8 +1180,14 @@ private static uint ZSTD_window_correctOverflow(ZSTD_window_t* window, uint cycl uint curr = (uint)((byte*)src - window->@base); uint currentCycle = curr & cycleMask; /* Ensure newCurrent - maxDist >= ZSTD_WINDOW_START_INDEX. */ - uint currentCycleCorrection = currentCycle < 2 ? cycleSize > 2 ? cycleSize : 2 : 0; - uint newCurrent = currentCycle + currentCycleCorrection + (maxDist > cycleSize ? maxDist : cycleSize); + uint currentCycleCorrection = + currentCycle < 2 + ? cycleSize > 2 + ? cycleSize + : 2 + : 0; + uint newCurrent = + currentCycle + currentCycleCorrection + (maxDist > cycleSize ? maxDist : cycleSize); uint correction = curr - newCurrent; assert((maxDist & maxDist - 1) == 0); assert((curr & cycleMask) == (newCurrent & cycleMask)); @@ -926,7 +1248,13 @@ private static uint ZSTD_window_correctOverflow(ZSTD_window_t* window, uint cycl * forceWindow and dictMatchState are therefore incompatible. */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_window_enforceMaxDist(ZSTD_window_t* window, void* blockEnd, uint maxDist, uint* loadedDictEndPtr, ZSTD_MatchState_t** dictMatchStatePtr) + private static void ZSTD_window_enforceMaxDist( + ZSTD_window_t* window, + void* blockEnd, + uint maxDist, + uint* loadedDictEndPtr, + ZSTD_MatchState_t** dictMatchStatePtr + ) { uint blockEndIdx = (uint)((byte*)blockEnd - window->@base); uint loadedDictEnd = loadedDictEndPtr != null ? *loadedDictEndPtr : 0; @@ -954,7 +1282,13 @@ private static void ZSTD_window_enforceMaxDist(ZSTD_window_t* window, void* bloc * loadedDictEnd uses same referential as window->base * maxDist is the window size */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_checkDictValidity(ZSTD_window_t* window, void* blockEnd, uint maxDist, uint* loadedDictEndPtr, ZSTD_MatchState_t** dictMatchStatePtr) + private static void ZSTD_checkDictValidity( + ZSTD_window_t* window, + void* blockEnd, + uint maxDist, + uint* loadedDictEndPtr, + ZSTD_MatchState_t** dictMatchStatePtr + ) { assert(loadedDictEndPtr != null); assert(dictMatchStatePtr != null); @@ -971,16 +1305,17 @@ private static void ZSTD_checkDictValidity(ZSTD_window_t* window, void* blockEnd } #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_stringToByte_20_00 => new byte[] - { - 32, - 0 - }; - private static byte* stringToByte_20_00 => (byte*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_stringToByte_20_00)); + private static ReadOnlySpan Span_stringToByte_20_00 => new byte[] { 32, 0 }; + private static byte* stringToByte_20_00 => + (byte*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_stringToByte_20_00) + ); #else private static readonly byte* stringToByte_20_00 = GetArrayPointer(new byte[] { 32, 0 }); #endif + [MethodImpl(MethodImplOptions.AggressiveInlining)] private static void ZSTD_window_init(ZSTD_window_t* window) { @@ -991,7 +1326,7 @@ private static void ZSTD_window_init(ZSTD_window_t* window) dictLimit = 2, lowLimit = 2, nextSrc = stringToByte_20_00 + 2, - nbOverflowCorrections = 0 + nbOverflowCorrections = 0, }; } @@ -1003,7 +1338,12 @@ private static void ZSTD_window_init(ZSTD_window_t* window) * Returns non-zero if the segment is contiguous. */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_window_update(ZSTD_window_t* window, void* src, nuint srcSize, int forceNonContiguous) + private static uint ZSTD_window_update( + ZSTD_window_t* window, + void* src, + nuint srcSize, + int forceNonContiguous + ) { byte* ip = (byte*)src; uint contiguous = 1; @@ -1026,10 +1366,14 @@ private static uint ZSTD_window_update(ZSTD_window_t* window, void* src, nuint s } window->nextSrc = ip + srcSize; - if (ip + srcSize > window->dictBase + window->lowLimit && ip < window->dictBase + window->dictLimit) + if ( + ip + srcSize > window->dictBase + window->lowLimit + && ip < window->dictBase + window->dictLimit + ) { nuint highInputIdx = (nuint)(ip + srcSize - window->dictBase); - uint lowLimitMax = highInputIdx > window->dictLimit ? window->dictLimit : (uint)highInputIdx; + uint lowLimitMax = + highInputIdx > window->dictLimit ? window->dictLimit : (uint)highInputIdx; assert(highInputIdx < 0xffffffff); window->lowLimit = lowLimitMax; } @@ -1041,7 +1385,11 @@ private static uint ZSTD_window_update(ZSTD_window_t* window, void* src, nuint s * Returns the lowest allowed match index. It may either be in the ext-dict or the prefix. */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_getLowestMatchIndex(ZSTD_MatchState_t* ms, uint curr, uint windowLog) + private static uint ZSTD_getLowestMatchIndex( + ZSTD_MatchState_t* ms, + uint curr, + uint windowLog + ) { uint maxDistance = 1U << (int)windowLog; uint lowestValid = ms->window.lowLimit; @@ -1059,7 +1407,11 @@ private static uint ZSTD_getLowestMatchIndex(ZSTD_MatchState_t* ms, uint curr, u * Returns the lowest allowed match index in the prefix. */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_getLowestPrefixIndex(ZSTD_MatchState_t* ms, uint curr, uint windowLog) + private static uint ZSTD_getLowestPrefixIndex( + ZSTD_MatchState_t* ms, + uint curr, + uint windowLog + ) { uint maxDistance = 1U << (int)windowLog; uint lowestValid = ms->window.dictLimit; diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressLiterals.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressLiterals.cs index 3d50a52c5..803f67493 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressLiterals.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressLiterals.cs @@ -7,7 +7,12 @@ public static unsafe partial class Methods /* ************************************************************** * Literals compression - special cases ****************************************************************/ - private static nuint ZSTD_noCompressLiterals(void* dst, nuint dstCapacity, void* src, nuint srcSize) + private static nuint ZSTD_noCompressLiterals( + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize + ) { byte* ostart = (byte*)dst; uint flSize = (uint)(1 + (srcSize > 31 ? 1 : 0) + (srcSize > 4095 ? 1 : 0)); @@ -22,10 +27,16 @@ private static nuint ZSTD_noCompressLiterals(void* dst, nuint dstCapacity, void* ostart[0] = (byte)((uint)SymbolEncodingType_e.set_basic + (srcSize << 3)); break; case 2: - MEM_writeLE16(ostart, (ushort)((uint)SymbolEncodingType_e.set_basic + (1 << 2) + (srcSize << 4))); + MEM_writeLE16( + ostart, + (ushort)((uint)SymbolEncodingType_e.set_basic + (1 << 2) + (srcSize << 4)) + ); break; case 3: - MEM_writeLE32(ostart, (uint)((uint)SymbolEncodingType_e.set_basic + (3 << 2) + (srcSize << 4))); + MEM_writeLE32( + ostart, + (uint)((uint)SymbolEncodingType_e.set_basic + (3 << 2) + (srcSize << 4)) + ); break; default: assert(0 != 0); @@ -57,7 +68,12 @@ private static int allBytesIdentical(void* src, nuint srcSize) * Conditions : * - All bytes in @src are identical * - dstCapacity >= 4 */ - private static nuint ZSTD_compressRleLiteralsBlock(void* dst, nuint dstCapacity, void* src, nuint srcSize) + private static nuint ZSTD_compressRleLiteralsBlock( + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize + ) { byte* ostart = (byte*)dst; uint flSize = (uint)(1 + (srcSize > 31 ? 1 : 0) + (srcSize > 4095 ? 1 : 0)); @@ -69,10 +85,16 @@ private static nuint ZSTD_compressRleLiteralsBlock(void* dst, nuint dstCapacity, ostart[0] = (byte)((uint)SymbolEncodingType_e.set_rle + (srcSize << 3)); break; case 2: - MEM_writeLE16(ostart, (ushort)((uint)SymbolEncodingType_e.set_rle + (1 << 2) + (srcSize << 4))); + MEM_writeLE16( + ostart, + (ushort)((uint)SymbolEncodingType_e.set_rle + (1 << 2) + (srcSize << 4)) + ); break; case 3: - MEM_writeLE32(ostart, (uint)((uint)SymbolEncodingType_e.set_rle + (3 << 2) + (srcSize << 4))); + MEM_writeLE32( + ostart, + (uint)((uint)SymbolEncodingType_e.set_rle + (3 << 2) + (srcSize << 4)) + ); break; default: assert(0 != 0); @@ -88,7 +110,10 @@ private static nuint ZSTD_compressRleLiteralsBlock(void* dst, nuint dstCapacity, * for literal compression to even be attempted. * Minimum is made tighter as compression strategy increases. */ - private static nuint ZSTD_minLiteralsToCompress(ZSTD_strategy strategy, HUF_repeat huf_repeat) + private static nuint ZSTD_minLiteralsToCompress( + ZSTD_strategy strategy, + HUF_repeat huf_repeat + ) { assert((int)strategy >= 0); assert((int)strategy <= 9); @@ -104,9 +129,24 @@ private static nuint ZSTD_minLiteralsToCompress(ZSTD_strategy strategy, HUF_repe * @entropyWorkspaceSize : must be >= HUF_WORKSPACE_SIZE * @suspectUncompressible: sampling checks, to potentially skip huffman coding */ - private static nuint ZSTD_compressLiterals(void* dst, nuint dstCapacity, void* src, nuint srcSize, void* entropyWorkspace, nuint entropyWorkspaceSize, ZSTD_hufCTables_t* prevHuf, ZSTD_hufCTables_t* nextHuf, ZSTD_strategy strategy, int disableLiteralCompression, int suspectUncompressible, int bmi2) + private static nuint ZSTD_compressLiterals( + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + void* entropyWorkspace, + nuint entropyWorkspaceSize, + ZSTD_hufCTables_t* prevHuf, + ZSTD_hufCTables_t* nextHuf, + ZSTD_strategy strategy, + int disableLiteralCompression, + int suspectUncompressible, + int bmi2 + ) { - nuint lhSize = (nuint)(3 + (srcSize >= 1 * (1 << 10) ? 1 : 0) + (srcSize >= 16 * (1 << 10) ? 1 : 0)); + nuint lhSize = (nuint)( + 3 + (srcSize >= 1 * (1 << 10) ? 1 : 0) + (srcSize >= 16 * (1 << 10) ? 1 : 0) + ); byte* ostart = (byte*)dst; uint singleStream = srcSize < 256 ? 1U : 0U; SymbolEncodingType_e hType = SymbolEncodingType_e.set_compressed; @@ -123,12 +163,82 @@ private static nuint ZSTD_compressLiterals(void* dst, nuint dstCapacity, void* s { HUF_repeat repeat = prevHuf->repeatMode; - int flags = 0 | (bmi2 != 0 ? (int)HUF_flags_e.HUF_flags_bmi2 : 0) | (strategy < ZSTD_strategy.ZSTD_lazy && srcSize <= 1024 ? (int)HUF_flags_e.HUF_flags_preferRepeat : 0) | (strategy >= ZSTD_strategy.ZSTD_btultra ? (int)HUF_flags_e.HUF_flags_optimalDepth : 0) | (suspectUncompressible != 0 ? (int)HUF_flags_e.HUF_flags_suspectUncompressible : 0); + int flags = + 0 + | (bmi2 != 0 ? (int)HUF_flags_e.HUF_flags_bmi2 : 0) + | ( + strategy < ZSTD_strategy.ZSTD_lazy && srcSize <= 1024 + ? (int)HUF_flags_e.HUF_flags_preferRepeat + : 0 + ) + | ( + strategy >= ZSTD_strategy.ZSTD_btultra + ? (int)HUF_flags_e.HUF_flags_optimalDepth + : 0 + ) + | ( + suspectUncompressible != 0 + ? (int)HUF_flags_e.HUF_flags_suspectUncompressible + : 0 + ); void* huf_compress; if (repeat == HUF_repeat.HUF_repeat_valid && lhSize == 3) singleStream = 1; - huf_compress = singleStream != 0 ? (delegate* managed)(&HUF_compress1X_repeat) : (delegate* managed)(&HUF_compress4X_repeat); - cLitSize = ((delegate* managed)huf_compress)(ostart + lhSize, dstCapacity - lhSize, src, srcSize, 255, 11, entropyWorkspace, entropyWorkspaceSize, &nextHuf->CTable.e0, &repeat, flags); + huf_compress = + singleStream != 0 + ? (delegate* managed< + void*, + nuint, + void*, + nuint, + uint, + uint, + void*, + nuint, + nuint*, + HUF_repeat*, + int, + nuint>)(&HUF_compress1X_repeat) + : (delegate* managed< + void*, + nuint, + void*, + nuint, + uint, + uint, + void*, + nuint, + nuint*, + HUF_repeat*, + int, + nuint>)(&HUF_compress4X_repeat); + cLitSize = ( + (delegate* managed< + void*, + nuint, + void*, + nuint, + uint, + uint, + void*, + nuint, + nuint*, + HUF_repeat*, + int, + nuint>)huf_compress + )( + ostart + lhSize, + dstCapacity - lhSize, + src, + srcSize, + 255, + 11, + entropyWorkspace, + entropyWorkspaceSize, + &nextHuf->CTable.e0, + &repeat, + flags + ); if (repeat != HUF_repeat.HUF_repeat_none) { hType = SymbolEncodingType_e.set_repeat; @@ -164,25 +274,38 @@ private static nuint ZSTD_compressLiterals(void* dst, nuint dstCapacity, void* s #if DEBUG if (singleStream == 0) assert(srcSize >= 6); + #endif { - uint lhc = (uint)hType + ((singleStream == 0 ? 1U : 0U) << 2) + ((uint)srcSize << 4) + ((uint)cLitSize << 14); + uint lhc = + (uint)hType + + ((singleStream == 0 ? 1U : 0U) << 2) + + ((uint)srcSize << 4) + + ((uint)cLitSize << 14); MEM_writeLE24(ostart, lhc); break; } case 4: assert(srcSize >= 6); + { - uint lhc = (uint)(hType + (2 << 2)) + ((uint)srcSize << 4) + ((uint)cLitSize << 18); + uint lhc = + (uint)(hType + (2 << 2)) + + ((uint)srcSize << 4) + + ((uint)cLitSize << 18); MEM_writeLE32(ostart, lhc); break; } case 5: assert(srcSize >= 6); + { - uint lhc = (uint)(hType + (3 << 2)) + ((uint)srcSize << 4) + ((uint)cLitSize << 22); + uint lhc = + (uint)(hType + (3 << 2)) + + ((uint)srcSize << 4) + + ((uint)cLitSize << 22); MEM_writeLE32(ostart, lhc); ostart[4] = (byte)(cLitSize >> 10); break; @@ -196,4 +319,4 @@ private static nuint ZSTD_compressLiterals(void* dst, nuint dstCapacity, void* s return lhSize + cLitSize; } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressSequences.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressSequences.cs index 17578d25f..c336ff447 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressSequences.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressSequences.cs @@ -1,277 +1,543 @@ -using static ZstdSharp.UnsafeHelper; using System; -using System.Runtime.InteropServices; using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; +using static ZstdSharp.UnsafeHelper; namespace ZstdSharp.Unsafe { public static unsafe partial class Methods { #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_kInverseProbabilityLog256 => new uint[256] - { - 0, - 2048, - 1792, - 1642, - 1536, - 1453, - 1386, - 1329, - 1280, - 1236, - 1197, - 1162, - 1130, - 1100, - 1073, - 1047, - 1024, - 1001, - 980, - 960, - 941, - 923, - 906, - 889, - 874, - 859, - 844, - 830, - 817, - 804, - 791, - 779, - 768, - 756, - 745, - 734, - 724, - 714, - 704, - 694, - 685, - 676, - 667, - 658, - 650, - 642, - 633, - 626, - 618, - 610, - 603, - 595, - 588, - 581, - 574, - 567, - 561, - 554, - 548, - 542, - 535, - 529, - 523, - 517, - 512, - 506, - 500, - 495, - 489, - 484, - 478, - 473, - 468, - 463, - 458, - 453, - 448, - 443, - 438, - 434, - 429, - 424, - 420, - 415, - 411, - 407, - 402, - 398, - 394, - 390, - 386, - 382, - 377, - 373, - 370, - 366, - 362, - 358, - 354, - 350, - 347, - 343, - 339, - 336, - 332, - 329, - 325, - 322, - 318, - 315, - 311, - 308, - 305, - 302, - 298, - 295, - 292, - 289, - 286, - 282, - 279, - 276, - 273, - 270, - 267, - 264, - 261, - 258, - 256, - 253, - 250, - 247, - 244, - 241, - 239, - 236, - 233, - 230, - 228, - 225, - 222, - 220, - 217, - 215, - 212, - 209, - 207, - 204, - 202, - 199, - 197, - 194, - 192, - 190, - 187, - 185, - 182, - 180, - 178, - 175, - 173, - 171, - 168, - 166, - 164, - 162, - 159, - 157, - 155, - 153, - 151, - 149, - 146, - 144, - 142, - 140, - 138, - 136, - 134, - 132, - 130, - 128, - 126, - 123, - 121, - 119, - 117, - 115, - 114, - 112, - 110, - 108, - 106, - 104, - 102, - 100, - 98, - 96, - 94, - 93, - 91, - 89, - 87, - 85, - 83, - 82, - 80, - 78, - 76, - 74, - 73, - 71, - 69, - 67, - 66, - 64, - 62, - 61, - 59, - 57, - 55, - 54, - 52, - 50, - 49, - 47, - 46, - 44, - 42, - 41, - 39, - 37, - 36, - 34, - 33, - 31, - 30, - 28, - 26, - 25, - 23, - 22, - 20, - 19, - 17, - 16, - 14, - 13, - 11, - 10, - 8, - 7, - 5, - 4, - 2, - 1 - }; - private static uint* kInverseProbabilityLog256 => (uint*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_kInverseProbabilityLog256)); + private static ReadOnlySpan Span_kInverseProbabilityLog256 => + new uint[256] + { + 0, + 2048, + 1792, + 1642, + 1536, + 1453, + 1386, + 1329, + 1280, + 1236, + 1197, + 1162, + 1130, + 1100, + 1073, + 1047, + 1024, + 1001, + 980, + 960, + 941, + 923, + 906, + 889, + 874, + 859, + 844, + 830, + 817, + 804, + 791, + 779, + 768, + 756, + 745, + 734, + 724, + 714, + 704, + 694, + 685, + 676, + 667, + 658, + 650, + 642, + 633, + 626, + 618, + 610, + 603, + 595, + 588, + 581, + 574, + 567, + 561, + 554, + 548, + 542, + 535, + 529, + 523, + 517, + 512, + 506, + 500, + 495, + 489, + 484, + 478, + 473, + 468, + 463, + 458, + 453, + 448, + 443, + 438, + 434, + 429, + 424, + 420, + 415, + 411, + 407, + 402, + 398, + 394, + 390, + 386, + 382, + 377, + 373, + 370, + 366, + 362, + 358, + 354, + 350, + 347, + 343, + 339, + 336, + 332, + 329, + 325, + 322, + 318, + 315, + 311, + 308, + 305, + 302, + 298, + 295, + 292, + 289, + 286, + 282, + 279, + 276, + 273, + 270, + 267, + 264, + 261, + 258, + 256, + 253, + 250, + 247, + 244, + 241, + 239, + 236, + 233, + 230, + 228, + 225, + 222, + 220, + 217, + 215, + 212, + 209, + 207, + 204, + 202, + 199, + 197, + 194, + 192, + 190, + 187, + 185, + 182, + 180, + 178, + 175, + 173, + 171, + 168, + 166, + 164, + 162, + 159, + 157, + 155, + 153, + 151, + 149, + 146, + 144, + 142, + 140, + 138, + 136, + 134, + 132, + 130, + 128, + 126, + 123, + 121, + 119, + 117, + 115, + 114, + 112, + 110, + 108, + 106, + 104, + 102, + 100, + 98, + 96, + 94, + 93, + 91, + 89, + 87, + 85, + 83, + 82, + 80, + 78, + 76, + 74, + 73, + 71, + 69, + 67, + 66, + 64, + 62, + 61, + 59, + 57, + 55, + 54, + 52, + 50, + 49, + 47, + 46, + 44, + 42, + 41, + 39, + 37, + 36, + 34, + 33, + 31, + 30, + 28, + 26, + 25, + 23, + 22, + 20, + 19, + 17, + 16, + 14, + 13, + 11, + 10, + 8, + 7, + 5, + 4, + 2, + 1, + }; + private static uint* kInverseProbabilityLog256 => + (uint*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_kInverseProbabilityLog256) + ); #else - private static readonly uint* kInverseProbabilityLog256 = GetArrayPointer(new uint[256] { 0, 2048, 1792, 1642, 1536, 1453, 1386, 1329, 1280, 1236, 1197, 1162, 1130, 1100, 1073, 1047, 1024, 1001, 980, 960, 941, 923, 906, 889, 874, 859, 844, 830, 817, 804, 791, 779, 768, 756, 745, 734, 724, 714, 704, 694, 685, 676, 667, 658, 650, 642, 633, 626, 618, 610, 603, 595, 588, 581, 574, 567, 561, 554, 548, 542, 535, 529, 523, 517, 512, 506, 500, 495, 489, 484, 478, 473, 468, 463, 458, 453, 448, 443, 438, 434, 429, 424, 420, 415, 411, 407, 402, 398, 394, 390, 386, 382, 377, 373, 370, 366, 362, 358, 354, 350, 347, 343, 339, 336, 332, 329, 325, 322, 318, 315, 311, 308, 305, 302, 298, 295, 292, 289, 286, 282, 279, 276, 273, 270, 267, 264, 261, 258, 256, 253, 250, 247, 244, 241, 239, 236, 233, 230, 228, 225, 222, 220, 217, 215, 212, 209, 207, 204, 202, 199, 197, 194, 192, 190, 187, 185, 182, 180, 178, 175, 173, 171, 168, 166, 164, 162, 159, 157, 155, 153, 151, 149, 146, 144, 142, 140, 138, 136, 134, 132, 130, 128, 126, 123, 121, 119, 117, 115, 114, 112, 110, 108, 106, 104, 102, 100, 98, 96, 94, 93, 91, 89, 87, 85, 83, 82, 80, 78, 76, 74, 73, 71, 69, 67, 66, 64, 62, 61, 59, 57, 55, 54, 52, 50, 49, 47, 46, 44, 42, 41, 39, 37, 36, 34, 33, 31, 30, 28, 26, 25, 23, 22, 20, 19, 17, 16, 14, 13, 11, 10, 8, 7, 5, 4, 2, 1 }); + private static readonly uint* kInverseProbabilityLog256 = GetArrayPointer( + new uint[256] + { + 0, + 2048, + 1792, + 1642, + 1536, + 1453, + 1386, + 1329, + 1280, + 1236, + 1197, + 1162, + 1130, + 1100, + 1073, + 1047, + 1024, + 1001, + 980, + 960, + 941, + 923, + 906, + 889, + 874, + 859, + 844, + 830, + 817, + 804, + 791, + 779, + 768, + 756, + 745, + 734, + 724, + 714, + 704, + 694, + 685, + 676, + 667, + 658, + 650, + 642, + 633, + 626, + 618, + 610, + 603, + 595, + 588, + 581, + 574, + 567, + 561, + 554, + 548, + 542, + 535, + 529, + 523, + 517, + 512, + 506, + 500, + 495, + 489, + 484, + 478, + 473, + 468, + 463, + 458, + 453, + 448, + 443, + 438, + 434, + 429, + 424, + 420, + 415, + 411, + 407, + 402, + 398, + 394, + 390, + 386, + 382, + 377, + 373, + 370, + 366, + 362, + 358, + 354, + 350, + 347, + 343, + 339, + 336, + 332, + 329, + 325, + 322, + 318, + 315, + 311, + 308, + 305, + 302, + 298, + 295, + 292, + 289, + 286, + 282, + 279, + 276, + 273, + 270, + 267, + 264, + 261, + 258, + 256, + 253, + 250, + 247, + 244, + 241, + 239, + 236, + 233, + 230, + 228, + 225, + 222, + 220, + 217, + 215, + 212, + 209, + 207, + 204, + 202, + 199, + 197, + 194, + 192, + 190, + 187, + 185, + 182, + 180, + 178, + 175, + 173, + 171, + 168, + 166, + 164, + 162, + 159, + 157, + 155, + 153, + 151, + 149, + 146, + 144, + 142, + 140, + 138, + 136, + 134, + 132, + 130, + 128, + 126, + 123, + 121, + 119, + 117, + 115, + 114, + 112, + 110, + 108, + 106, + 104, + 102, + 100, + 98, + 96, + 94, + 93, + 91, + 89, + 87, + 85, + 83, + 82, + 80, + 78, + 76, + 74, + 73, + 71, + 69, + 67, + 66, + 64, + 62, + 61, + 59, + 57, + 55, + 54, + 52, + 50, + 49, + 47, + 46, + 44, + 42, + 41, + 39, + 37, + 36, + 34, + 33, + 31, + 30, + 28, + 26, + 25, + 23, + 22, + 20, + 19, + 17, + 16, + 14, + 13, + 11, + 10, + 8, + 7, + 5, + 4, + 2, + 1, + } + ); #endif + private static uint ZSTD_getFSEMaxSymbolValue(uint* ctable) { void* ptr = ctable; @@ -299,7 +565,14 @@ private static nuint ZSTD_NCountCost(uint* count, uint max, nuint nbSeq, uint FS short* norm = stackalloc short[53]; uint tableLog = FSE_optimalTableLog(FSELog, nbSeq, max); { - nuint err_code = FSE_normalizeCount(norm, tableLog, count, nbSeq, max, ZSTD_useLowProbCount(nbSeq)); + nuint err_code = FSE_normalizeCount( + norm, + tableLog, + count, + nbSeq, + max, + ZSTD_useLowProbCount(nbSeq) + ); if (ERR_isError(err_code)) { return err_code; @@ -369,7 +642,12 @@ private static nuint ZSTD_fseBitCost(uint* ctable, uint* count, uint max) * table described by norm. The max symbol support by norm is assumed >= max. * norm must be valid for every symbol with non-zero probability in count. */ - private static nuint ZSTD_crossEntropyCost(short* norm, uint accuracyLog, uint* count, uint max) + private static nuint ZSTD_crossEntropyCost( + short* norm, + uint accuracyLog, + uint* count, + uint max + ) { uint shift = 8 - accuracyLog; nuint cost = 0; @@ -387,7 +665,19 @@ private static nuint ZSTD_crossEntropyCost(short* norm, uint accuracyLog, uint* return cost >> 8; } - private static SymbolEncodingType_e ZSTD_selectEncodingType(FSE_repeat* repeatMode, uint* count, uint max, nuint mostFrequent, nuint nbSeq, uint FSELog, uint* prevCTable, short* defaultNorm, uint defaultNormLog, ZSTD_DefaultPolicy_e isDefaultAllowed, ZSTD_strategy strategy) + private static SymbolEncodingType_e ZSTD_selectEncodingType( + FSE_repeat* repeatMode, + uint* count, + uint max, + nuint mostFrequent, + nuint nbSeq, + uint FSELog, + uint* prevCTable, + short* defaultNorm, + uint defaultNormLog, + ZSTD_DefaultPolicy_e isDefaultAllowed, + ZSTD_strategy strategy + ) { if (mostFrequent == nbSeq) { @@ -408,7 +698,8 @@ private static SymbolEncodingType_e ZSTD_selectEncodingType(FSE_repeat* repeatMo nuint mult = (nuint)(10 - strategy); const nuint baseLog = 3; /* 28-36 for offset, 56-72 for lengths */ - nuint dynamicFse_nbSeq_min = ((nuint)1 << (int)defaultNormLog) * mult >> (int)baseLog; + nuint dynamicFse_nbSeq_min = + ((nuint)1 << (int)defaultNormLog) * mult >> (int)baseLog; assert(defaultNormLog >= 5 && defaultNormLog <= 6); assert(mult <= 9 && mult >= 7); if (*repeatMode == FSE_repeat.FSE_repeat_valid && nbSeq < staticFse_nbSeq_max) @@ -416,7 +707,10 @@ private static SymbolEncodingType_e ZSTD_selectEncodingType(FSE_repeat* repeatMo return SymbolEncodingType_e.set_repeat; } - if (nbSeq < dynamicFse_nbSeq_min || mostFrequent < nbSeq >> (int)(defaultNormLog - 1)) + if ( + nbSeq < dynamicFse_nbSeq_min + || mostFrequent < nbSeq >> (int)(defaultNormLog - 1) + ) { *repeatMode = FSE_repeat.FSE_repeat_none; return SymbolEncodingType_e.set_basic; @@ -425,20 +719,30 @@ private static SymbolEncodingType_e ZSTD_selectEncodingType(FSE_repeat* repeatMo } else { - nuint basicCost = isDefaultAllowed != default ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, count, max) : unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); - nuint repeatCost = *repeatMode != FSE_repeat.FSE_repeat_none ? ZSTD_fseBitCost(prevCTable, count, max) : unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + nuint basicCost = + isDefaultAllowed != default + ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, count, max) + : unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + nuint repeatCost = + *repeatMode != FSE_repeat.FSE_repeat_none + ? ZSTD_fseBitCost(prevCTable, count, max) + : unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); nuint NCountCost = ZSTD_NCountCost(count, max, nbSeq, FSELog); nuint compressedCost = (NCountCost << 3) + ZSTD_entropyCost(count, max, nbSeq); #if DEBUG if (isDefaultAllowed != default) { assert(!ERR_isError(basicCost)); - assert(!(*repeatMode == FSE_repeat.FSE_repeat_valid && ERR_isError(repeatCost))); + assert( + !(*repeatMode == FSE_repeat.FSE_repeat_valid && ERR_isError(repeatCost)) + ); } #endif assert(!ERR_isError(NCountCost)); - assert(compressedCost < unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxCode))); + assert( + compressedCost < unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxCode)) + ); if (basicCost <= repeatCost && basicCost <= compressedCost) { assert(isDefaultAllowed != default); @@ -459,7 +763,24 @@ private static SymbolEncodingType_e ZSTD_selectEncodingType(FSE_repeat* repeatMo return SymbolEncodingType_e.set_compressed; } - private static nuint ZSTD_buildCTable(void* dst, nuint dstCapacity, uint* nextCTable, uint FSELog, SymbolEncodingType_e type, uint* count, uint max, byte* codeTable, nuint nbSeq, short* defaultNorm, uint defaultNormLog, uint defaultMax, uint* prevCTable, nuint prevCTableSize, void* entropyWorkspace, nuint entropyWorkspaceSize) + private static nuint ZSTD_buildCTable( + void* dst, + nuint dstCapacity, + uint* nextCTable, + uint FSELog, + SymbolEncodingType_e type, + uint* count, + uint max, + byte* codeTable, + nuint nbSeq, + short* defaultNorm, + uint defaultNormLog, + uint defaultMax, + uint* prevCTable, + nuint prevCTableSize, + void* entropyWorkspace, + nuint entropyWorkspaceSize + ) { byte* op = (byte*)dst; byte* oend = op + dstCapacity; @@ -487,7 +808,14 @@ private static nuint ZSTD_buildCTable(void* dst, nuint dstCapacity, uint* nextCT case SymbolEncodingType_e.set_basic: { /* note : could be pre-calculated */ - nuint err_code = FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, entropyWorkspace, entropyWorkspaceSize); + nuint err_code = FSE_buildCTable_wksp( + nextCTable, + defaultNorm, + defaultMax, + defaultNormLog, + entropyWorkspace, + entropyWorkspaceSize + ); if (ERR_isError(err_code)) { return err_code; @@ -496,49 +824,69 @@ private static nuint ZSTD_buildCTable(void* dst, nuint dstCapacity, uint* nextCT return 0; case SymbolEncodingType_e.set_compressed: + { + ZSTD_BuildCTableWksp* wksp = (ZSTD_BuildCTableWksp*)entropyWorkspace; + nuint nbSeq_1 = nbSeq; + uint tableLog = FSE_optimalTableLog(FSELog, nbSeq, max); + if (count[codeTable[nbSeq - 1]] > 1) + { + count[codeTable[nbSeq - 1]]--; + nbSeq_1--; + } + + assert(nbSeq_1 > 1); + assert(entropyWorkspaceSize >= (nuint)sizeof(ZSTD_BuildCTableWksp)); { - ZSTD_BuildCTableWksp* wksp = (ZSTD_BuildCTableWksp*)entropyWorkspace; - nuint nbSeq_1 = nbSeq; - uint tableLog = FSE_optimalTableLog(FSELog, nbSeq, max); - if (count[codeTable[nbSeq - 1]] > 1) + nuint err_code = FSE_normalizeCount( + wksp->norm, + tableLog, + count, + nbSeq_1, + max, + ZSTD_useLowProbCount(nbSeq_1) + ); + if (ERR_isError(err_code)) { - count[codeTable[nbSeq - 1]]--; - nbSeq_1--; + return err_code; } + } - assert(nbSeq_1 > 1); - assert(entropyWorkspaceSize >= (nuint)sizeof(ZSTD_BuildCTableWksp)); + assert(oend >= op); + { + /* overflow protected */ + nuint NCountSize = FSE_writeNCount( + op, + (nuint)(oend - op), + wksp->norm, + max, + tableLog + ); { - nuint err_code = FSE_normalizeCount(wksp->norm, tableLog, count, nbSeq_1, max, ZSTD_useLowProbCount(nbSeq_1)); + nuint err_code = NCountSize; if (ERR_isError(err_code)) { return err_code; } } - assert(oend >= op); { - /* overflow protected */ - nuint NCountSize = FSE_writeNCount(op, (nuint)(oend - op), wksp->norm, max, tableLog); - { - nuint err_code = NCountSize; - if (ERR_isError(err_code)) - { - return err_code; - } - } - + nuint err_code = FSE_buildCTable_wksp( + nextCTable, + wksp->norm, + max, + tableLog, + wksp->wksp, + sizeof(uint) * 285 + ); + if (ERR_isError(err_code)) { - nuint err_code = FSE_buildCTable_wksp(nextCTable, wksp->norm, max, tableLog, wksp->wksp, sizeof(uint) * 285); - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } - - return NCountSize; } + + return NCountSize; } + } default: assert(0 != 0); @@ -546,7 +894,19 @@ private static nuint ZSTD_buildCTable(void* dst, nuint dstCapacity, uint* nextCT } } - private static nuint ZSTD_encodeSequences_body(void* dst, nuint dstCapacity, uint* CTable_MatchLength, byte* mlCodeTable, uint* CTable_OffsetBits, byte* ofCodeTable, uint* CTable_LitLength, byte* llCodeTable, SeqDef_s* sequences, nuint nbSeq, int longOffsets) + private static nuint ZSTD_encodeSequences_body( + void* dst, + nuint dstCapacity, + uint* CTable_MatchLength, + byte* mlCodeTable, + uint* CTable_OffsetBits, + byte* ofCodeTable, + uint* CTable_LitLength, + byte* llCodeTable, + SeqDef_s* sequences, + nuint nbSeq, + int longOffsets + ) { BIT_CStream_t blockStream; System.Runtime.CompilerServices.Unsafe.SkipInit(out blockStream); @@ -568,30 +928,81 @@ private static nuint ZSTD_encodeSequences_body(void* dst, nuint dstCapacity, uin FSE_initCState2(ref stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq - 1]); FSE_initCState2(ref stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq - 1]); FSE_initCState2(ref stateLitLength, CTable_LitLength, llCodeTable[nbSeq - 1]); - BIT_addBits(ref blockStream_bitContainer, ref blockStream_bitPos, sequences[nbSeq - 1].litLength, LL_bits[llCodeTable[nbSeq - 1]]); + BIT_addBits( + ref blockStream_bitContainer, + ref blockStream_bitPos, + sequences[nbSeq - 1].litLength, + LL_bits[llCodeTable[nbSeq - 1]] + ); if (MEM_32bits) - BIT_flushBits(ref blockStream_bitContainer, ref blockStream_bitPos, ref blockStream_ptr, blockStream_endPtr); - BIT_addBits(ref blockStream_bitContainer, ref blockStream_bitPos, sequences[nbSeq - 1].mlBase, ML_bits[mlCodeTable[nbSeq - 1]]); + BIT_flushBits( + ref blockStream_bitContainer, + ref blockStream_bitPos, + ref blockStream_ptr, + blockStream_endPtr + ); + BIT_addBits( + ref blockStream_bitContainer, + ref blockStream_bitPos, + sequences[nbSeq - 1].mlBase, + ML_bits[mlCodeTable[nbSeq - 1]] + ); if (MEM_32bits) - BIT_flushBits(ref blockStream_bitContainer, ref blockStream_bitPos, ref blockStream_ptr, blockStream_endPtr); + BIT_flushBits( + ref blockStream_bitContainer, + ref blockStream_bitPos, + ref blockStream_ptr, + blockStream_endPtr + ); if (longOffsets != 0) { uint ofBits = ofCodeTable[nbSeq - 1]; - uint extraBits = ofBits - (ofBits < (uint)(MEM_32bits ? 25 : 57) - 1 ? ofBits : (uint)(MEM_32bits ? 25 : 57) - 1); + uint extraBits = + ofBits + - ( + ofBits < (uint)(MEM_32bits ? 25 : 57) - 1 + ? ofBits + : (uint)(MEM_32bits ? 25 : 57) - 1 + ); if (extraBits != 0) { - BIT_addBits(ref blockStream_bitContainer, ref blockStream_bitPos, sequences[nbSeq - 1].offBase, extraBits); - BIT_flushBits(ref blockStream_bitContainer, ref blockStream_bitPos, ref blockStream_ptr, blockStream_endPtr); + BIT_addBits( + ref blockStream_bitContainer, + ref blockStream_bitPos, + sequences[nbSeq - 1].offBase, + extraBits + ); + BIT_flushBits( + ref blockStream_bitContainer, + ref blockStream_bitPos, + ref blockStream_ptr, + blockStream_endPtr + ); } - BIT_addBits(ref blockStream_bitContainer, ref blockStream_bitPos, sequences[nbSeq - 1].offBase >> (int)extraBits, ofBits - extraBits); + BIT_addBits( + ref blockStream_bitContainer, + ref blockStream_bitPos, + sequences[nbSeq - 1].offBase >> (int)extraBits, + ofBits - extraBits + ); } else { - BIT_addBits(ref blockStream_bitContainer, ref blockStream_bitPos, sequences[nbSeq - 1].offBase, ofCodeTable[nbSeq - 1]); + BIT_addBits( + ref blockStream_bitContainer, + ref blockStream_bitPos, + sequences[nbSeq - 1].offBase, + ofCodeTable[nbSeq - 1] + ); } - BIT_flushBits(ref blockStream_bitContainer, ref blockStream_bitPos, ref blockStream_ptr, blockStream_endPtr); + BIT_flushBits( + ref blockStream_bitContainer, + ref blockStream_bitPos, + ref blockStream_ptr, + blockStream_endPtr + ); { nuint n; for (n = nbSeq - 2; n < nbSeq; n--) @@ -602,44 +1013,144 @@ private static nuint ZSTD_encodeSequences_body(void* dst, nuint dstCapacity, uin uint llBits = LL_bits[llCode]; uint ofBits = ofCode; uint mlBits = ML_bits[mlCode]; - FSE_encodeSymbol(ref blockStream_bitContainer, ref blockStream_bitPos, ref stateOffsetBits, ofCode); - FSE_encodeSymbol(ref blockStream_bitContainer, ref blockStream_bitPos, ref stateMatchLength, mlCode); + FSE_encodeSymbol( + ref blockStream_bitContainer, + ref blockStream_bitPos, + ref stateOffsetBits, + ofCode + ); + FSE_encodeSymbol( + ref blockStream_bitContainer, + ref blockStream_bitPos, + ref stateMatchLength, + mlCode + ); if (MEM_32bits) - BIT_flushBits(ref blockStream_bitContainer, ref blockStream_bitPos, ref blockStream_ptr, blockStream_endPtr); - FSE_encodeSymbol(ref blockStream_bitContainer, ref blockStream_bitPos, ref stateLitLength, llCode); + BIT_flushBits( + ref blockStream_bitContainer, + ref blockStream_bitPos, + ref blockStream_ptr, + blockStream_endPtr + ); + FSE_encodeSymbol( + ref blockStream_bitContainer, + ref blockStream_bitPos, + ref stateLitLength, + llCode + ); if (MEM_32bits || ofBits + mlBits + llBits >= 64 - 7 - (9 + 9 + 8)) - BIT_flushBits(ref blockStream_bitContainer, ref blockStream_bitPos, ref blockStream_ptr, blockStream_endPtr); - BIT_addBits(ref blockStream_bitContainer, ref blockStream_bitPos, sequences[n].litLength, llBits); + BIT_flushBits( + ref blockStream_bitContainer, + ref blockStream_bitPos, + ref blockStream_ptr, + blockStream_endPtr + ); + BIT_addBits( + ref blockStream_bitContainer, + ref blockStream_bitPos, + sequences[n].litLength, + llBits + ); if (MEM_32bits && llBits + mlBits > 24) - BIT_flushBits(ref blockStream_bitContainer, ref blockStream_bitPos, ref blockStream_ptr, blockStream_endPtr); - BIT_addBits(ref blockStream_bitContainer, ref blockStream_bitPos, sequences[n].mlBase, mlBits); + BIT_flushBits( + ref blockStream_bitContainer, + ref blockStream_bitPos, + ref blockStream_ptr, + blockStream_endPtr + ); + BIT_addBits( + ref blockStream_bitContainer, + ref blockStream_bitPos, + sequences[n].mlBase, + mlBits + ); if (MEM_32bits || ofBits + mlBits + llBits > 56) - BIT_flushBits(ref blockStream_bitContainer, ref blockStream_bitPos, ref blockStream_ptr, blockStream_endPtr); + BIT_flushBits( + ref blockStream_bitContainer, + ref blockStream_bitPos, + ref blockStream_ptr, + blockStream_endPtr + ); if (longOffsets != 0) { - uint extraBits = ofBits - (ofBits < (uint)(MEM_32bits ? 25 : 57) - 1 ? ofBits : (uint)(MEM_32bits ? 25 : 57) - 1); + uint extraBits = + ofBits + - ( + ofBits < (uint)(MEM_32bits ? 25 : 57) - 1 + ? ofBits + : (uint)(MEM_32bits ? 25 : 57) - 1 + ); if (extraBits != 0) { - BIT_addBits(ref blockStream_bitContainer, ref blockStream_bitPos, sequences[n].offBase, extraBits); - BIT_flushBits(ref blockStream_bitContainer, ref blockStream_bitPos, ref blockStream_ptr, blockStream_endPtr); + BIT_addBits( + ref blockStream_bitContainer, + ref blockStream_bitPos, + sequences[n].offBase, + extraBits + ); + BIT_flushBits( + ref blockStream_bitContainer, + ref blockStream_bitPos, + ref blockStream_ptr, + blockStream_endPtr + ); } - BIT_addBits(ref blockStream_bitContainer, ref blockStream_bitPos, sequences[n].offBase >> (int)extraBits, ofBits - extraBits); + BIT_addBits( + ref blockStream_bitContainer, + ref blockStream_bitPos, + sequences[n].offBase >> (int)extraBits, + ofBits - extraBits + ); } else { - BIT_addBits(ref blockStream_bitContainer, ref blockStream_bitPos, sequences[n].offBase, ofBits); + BIT_addBits( + ref blockStream_bitContainer, + ref blockStream_bitPos, + sequences[n].offBase, + ofBits + ); } - BIT_flushBits(ref blockStream_bitContainer, ref blockStream_bitPos, ref blockStream_ptr, blockStream_endPtr); + BIT_flushBits( + ref blockStream_bitContainer, + ref blockStream_bitPos, + ref blockStream_ptr, + blockStream_endPtr + ); } } - FSE_flushCState(ref blockStream_bitContainer, ref blockStream_bitPos, ref blockStream_ptr, blockStream_endPtr, ref stateMatchLength); - FSE_flushCState(ref blockStream_bitContainer, ref blockStream_bitPos, ref blockStream_ptr, blockStream_endPtr, ref stateOffsetBits); - FSE_flushCState(ref blockStream_bitContainer, ref blockStream_bitPos, ref blockStream_ptr, blockStream_endPtr, ref stateLitLength); + FSE_flushCState( + ref blockStream_bitContainer, + ref blockStream_bitPos, + ref blockStream_ptr, + blockStream_endPtr, + ref stateMatchLength + ); + FSE_flushCState( + ref blockStream_bitContainer, + ref blockStream_bitPos, + ref blockStream_ptr, + blockStream_endPtr, + ref stateOffsetBits + ); + FSE_flushCState( + ref blockStream_bitContainer, + ref blockStream_bitPos, + ref blockStream_ptr, + blockStream_endPtr, + ref stateLitLength + ); { - nuint streamSize = BIT_closeCStream(ref blockStream_bitContainer, ref blockStream_bitPos, blockStream_ptr, blockStream_endPtr, blockStream.startPtr); + nuint streamSize = BIT_closeCStream( + ref blockStream_bitContainer, + ref blockStream_bitPos, + blockStream_ptr, + blockStream_endPtr, + blockStream.startPtr + ); if (streamSize == 0) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); @@ -649,14 +1160,63 @@ private static nuint ZSTD_encodeSequences_body(void* dst, nuint dstCapacity, uin } } - private static nuint ZSTD_encodeSequences_default(void* dst, nuint dstCapacity, uint* CTable_MatchLength, byte* mlCodeTable, uint* CTable_OffsetBits, byte* ofCodeTable, uint* CTable_LitLength, byte* llCodeTable, SeqDef_s* sequences, nuint nbSeq, int longOffsets) + private static nuint ZSTD_encodeSequences_default( + void* dst, + nuint dstCapacity, + uint* CTable_MatchLength, + byte* mlCodeTable, + uint* CTable_OffsetBits, + byte* ofCodeTable, + uint* CTable_LitLength, + byte* llCodeTable, + SeqDef_s* sequences, + nuint nbSeq, + int longOffsets + ) { - return ZSTD_encodeSequences_body(dst, dstCapacity, CTable_MatchLength, mlCodeTable, CTable_OffsetBits, ofCodeTable, CTable_LitLength, llCodeTable, sequences, nbSeq, longOffsets); + return ZSTD_encodeSequences_body( + dst, + dstCapacity, + CTable_MatchLength, + mlCodeTable, + CTable_OffsetBits, + ofCodeTable, + CTable_LitLength, + llCodeTable, + sequences, + nbSeq, + longOffsets + ); } - private static nuint ZSTD_encodeSequences(void* dst, nuint dstCapacity, uint* CTable_MatchLength, byte* mlCodeTable, uint* CTable_OffsetBits, byte* ofCodeTable, uint* CTable_LitLength, byte* llCodeTable, SeqDef_s* sequences, nuint nbSeq, int longOffsets, int bmi2) + private static nuint ZSTD_encodeSequences( + void* dst, + nuint dstCapacity, + uint* CTable_MatchLength, + byte* mlCodeTable, + uint* CTable_OffsetBits, + byte* ofCodeTable, + uint* CTable_LitLength, + byte* llCodeTable, + SeqDef_s* sequences, + nuint nbSeq, + int longOffsets, + int bmi2 + ) { - return ZSTD_encodeSequences_default(dst, dstCapacity, CTable_MatchLength, mlCodeTable, CTable_OffsetBits, ofCodeTable, CTable_LitLength, llCodeTable, sequences, nbSeq, longOffsets); + return ZSTD_encodeSequences_default( + dst, + dstCapacity, + CTable_MatchLength, + mlCodeTable, + CTable_OffsetBits, + ofCodeTable, + CTable_LitLength, + llCodeTable, + sequences, + nbSeq, + longOffsets + ); } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressSuperblock.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressSuperblock.cs index 3d5382c98..1f12375c5 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressSuperblock.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressSuperblock.cs @@ -23,15 +23,30 @@ public static unsafe partial class Methods * @return : compressed size of literals section of a sub-block * Or 0 if unable to compress. * Or error code */ - private static nuint ZSTD_compressSubBlock_literal(nuint* hufTable, ZSTD_hufCTablesMetadata_t* hufMetadata, byte* literals, nuint litSize, void* dst, nuint dstSize, int bmi2, int writeEntropy, int* entropyWritten) + private static nuint ZSTD_compressSubBlock_literal( + nuint* hufTable, + ZSTD_hufCTablesMetadata_t* hufMetadata, + byte* literals, + nuint litSize, + void* dst, + nuint dstSize, + int bmi2, + int writeEntropy, + int* entropyWritten + ) { nuint header = (nuint)(writeEntropy != 0 ? 200 : 0); - nuint lhSize = (nuint)(3 + (litSize >= 1 * (1 << 10) - header ? 1 : 0) + (litSize >= 16 * (1 << 10) - header ? 1 : 0)); + nuint lhSize = (nuint)( + 3 + + (litSize >= 1 * (1 << 10) - header ? 1 : 0) + + (litSize >= 16 * (1 << 10) - header ? 1 : 0) + ); byte* ostart = (byte*)dst; byte* oend = ostart + dstSize; byte* op = ostart + lhSize; uint singleStream = lhSize == 3 ? 1U : 0U; - SymbolEncodingType_e hType = writeEntropy != 0 ? hufMetadata->hType : SymbolEncodingType_e.set_repeat; + SymbolEncodingType_e hType = + writeEntropy != 0 ? hufMetadata->hType : SymbolEncodingType_e.set_repeat; nuint cLitSize = 0; *entropyWritten = 0; if (litSize == 0 || hufMetadata->hType == SymbolEncodingType_e.set_basic) @@ -44,7 +59,10 @@ private static nuint ZSTD_compressSubBlock_literal(nuint* hufTable, ZSTD_hufCTab } assert(litSize > 0); - assert(hufMetadata->hType == SymbolEncodingType_e.set_compressed || hufMetadata->hType == SymbolEncodingType_e.set_repeat); + assert( + hufMetadata->hType == SymbolEncodingType_e.set_compressed + || hufMetadata->hType == SymbolEncodingType_e.set_repeat + ); if (writeEntropy != 0 && hufMetadata->hType == SymbolEncodingType_e.set_compressed) { memcpy(op, hufMetadata->hufDesBuffer, (uint)hufMetadata->hufDesSize); @@ -54,7 +72,24 @@ private static nuint ZSTD_compressSubBlock_literal(nuint* hufTable, ZSTD_hufCTab { int flags = bmi2 != 0 ? (int)HUF_flags_e.HUF_flags_bmi2 : 0; - nuint cSize = singleStream != 0 ? HUF_compress1X_usingCTable(op, (nuint)(oend - op), literals, litSize, hufTable, flags) : HUF_compress4X_usingCTable(op, (nuint)(oend - op), literals, litSize, hufTable, flags); + nuint cSize = + singleStream != 0 + ? HUF_compress1X_usingCTable( + op, + (nuint)(oend - op), + literals, + litSize, + hufTable, + flags + ) + : HUF_compress4X_usingCTable( + op, + (nuint)(oend - op), + literals, + litSize, + hufTable, + flags + ); op += cSize; cLitSize += cSize; if (cSize == 0 || ERR_isError(cSize)) @@ -67,7 +102,14 @@ private static nuint ZSTD_compressSubBlock_literal(nuint* hufTable, ZSTD_hufCTab return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); } - if (lhSize < (nuint)(3 + (cLitSize >= 1 * (1 << 10) ? 1 : 0) + (cLitSize >= 16 * (1 << 10) ? 1 : 0))) + if ( + lhSize + < (nuint)( + 3 + + (cLitSize >= 1 * (1 << 10) ? 1 : 0) + + (cLitSize >= 16 * (1 << 10) ? 1 : 0) + ) + ) { assert(cLitSize > litSize); return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); @@ -77,26 +119,32 @@ private static nuint ZSTD_compressSubBlock_literal(nuint* hufTable, ZSTD_hufCTab switch (lhSize) { case 3: - { - uint lhc = (uint)hType + ((singleStream == 0 ? 1U : 0U) << 2) + ((uint)litSize << 4) + ((uint)cLitSize << 14); - MEM_writeLE24(ostart, lhc); - break; - } + { + uint lhc = + (uint)hType + + ((singleStream == 0 ? 1U : 0U) << 2) + + ((uint)litSize << 4) + + ((uint)cLitSize << 14); + MEM_writeLE24(ostart, lhc); + break; + } case 4: - { - uint lhc = (uint)(hType + (2 << 2)) + ((uint)litSize << 4) + ((uint)cLitSize << 18); - MEM_writeLE32(ostart, lhc); - break; - } + { + uint lhc = + (uint)(hType + (2 << 2)) + ((uint)litSize << 4) + ((uint)cLitSize << 18); + MEM_writeLE32(ostart, lhc); + break; + } case 5: - { - uint lhc = (uint)(hType + (3 << 2)) + ((uint)litSize << 4) + ((uint)cLitSize << 22); - MEM_writeLE32(ostart, lhc); - ostart[4] = (byte)(cLitSize >> 10); - break; - } + { + uint lhc = + (uint)(hType + (3 << 2)) + ((uint)litSize << 4) + ((uint)cLitSize << 22); + MEM_writeLE32(ostart, lhc); + ostart[4] = (byte)(cLitSize >> 10); + break; + } default: assert(0 != 0); @@ -107,7 +155,13 @@ private static nuint ZSTD_compressSubBlock_literal(nuint* hufTable, ZSTD_hufCTab return (nuint)(op - ostart); } - private static nuint ZSTD_seqDecompressedSize(SeqStore_t* seqStore, SeqDef_s* sequences, nuint nbSeqs, nuint litSize, int lastSubBlock) + private static nuint ZSTD_seqDecompressedSize( + SeqStore_t* seqStore, + SeqDef_s* sequences, + nuint nbSeqs, + nuint litSize, + int lastSubBlock + ) { nuint matchLengthSum = 0; nuint litLengthSum = 0; @@ -136,7 +190,21 @@ private static nuint ZSTD_seqDecompressedSize(SeqStore_t* seqStore, SeqDef_s* se * @return : compressed size of sequences section of a sub-block * Or 0 if it is unable to compress * Or error code. */ - private static nuint ZSTD_compressSubBlock_sequences(ZSTD_fseCTables_t* fseTables, ZSTD_fseCTablesMetadata_t* fseMetadata, SeqDef_s* sequences, nuint nbSeq, byte* llCode, byte* mlCode, byte* ofCode, ZSTD_CCtx_params_s* cctxParams, void* dst, nuint dstCapacity, int bmi2, int writeEntropy, int* entropyWritten) + private static nuint ZSTD_compressSubBlock_sequences( + ZSTD_fseCTables_t* fseTables, + ZSTD_fseCTablesMetadata_t* fseMetadata, + SeqDef_s* sequences, + nuint nbSeq, + byte* llCode, + byte* mlCode, + byte* ofCode, + ZSTD_CCtx_params_s* cctxParams, + void* dst, + nuint dstCapacity, + int bmi2, + int writeEntropy, + int* entropyWritten + ) { int longOffsets = cctxParams->cParams.windowLog > (uint)(MEM_32bits ? 25 : 57) ? 1 : 0; byte* ostart = (byte*)dst; @@ -186,7 +254,20 @@ private static nuint ZSTD_compressSubBlock_sequences(ZSTD_fseCTables_t* fseTable } { - nuint bitstreamSize = ZSTD_encodeSequences(op, (nuint)(oend - op), fseTables->matchlengthCTable, mlCode, fseTables->offcodeCTable, ofCode, fseTables->litlengthCTable, llCode, sequences, nbSeq, longOffsets, bmi2); + nuint bitstreamSize = ZSTD_encodeSequences( + op, + (nuint)(oend - op), + fseTables->matchlengthCTable, + mlCode, + fseTables->offcodeCTable, + ofCode, + fseTables->litlengthCTable, + llCode, + sequences, + nbSeq, + longOffsets, + bmi2 + ); { nuint err_code = bitstreamSize; if (ERR_isError(err_code)) @@ -196,7 +277,11 @@ private static nuint ZSTD_compressSubBlock_sequences(ZSTD_fseCTables_t* fseTable } op += bitstreamSize; - if (writeEntropy != 0 && fseMetadata->lastCountSize != 0 && fseMetadata->lastCountSize + bitstreamSize < 4) + if ( + writeEntropy != 0 + && fseMetadata->lastCountSize != 0 + && fseMetadata->lastCountSize + bitstreamSize < 4 + ) { assert(fseMetadata->lastCountSize + bitstreamSize == 3); return 0; @@ -216,13 +301,42 @@ private static nuint ZSTD_compressSubBlock_sequences(ZSTD_fseCTables_t* fseTable * Compresses a single sub-block. * @return : compressed size of the sub-block * Or 0 if it failed to compress. */ - private static nuint ZSTD_compressSubBlock(ZSTD_entropyCTables_t* entropy, ZSTD_entropyCTablesMetadata_t* entropyMetadata, SeqDef_s* sequences, nuint nbSeq, byte* literals, nuint litSize, byte* llCode, byte* mlCode, byte* ofCode, ZSTD_CCtx_params_s* cctxParams, void* dst, nuint dstCapacity, int bmi2, int writeLitEntropy, int writeSeqEntropy, int* litEntropyWritten, int* seqEntropyWritten, uint lastBlock) + private static nuint ZSTD_compressSubBlock( + ZSTD_entropyCTables_t* entropy, + ZSTD_entropyCTablesMetadata_t* entropyMetadata, + SeqDef_s* sequences, + nuint nbSeq, + byte* literals, + nuint litSize, + byte* llCode, + byte* mlCode, + byte* ofCode, + ZSTD_CCtx_params_s* cctxParams, + void* dst, + nuint dstCapacity, + int bmi2, + int writeLitEntropy, + int writeSeqEntropy, + int* litEntropyWritten, + int* seqEntropyWritten, + uint lastBlock + ) { byte* ostart = (byte*)dst; byte* oend = ostart + dstCapacity; byte* op = ostart + ZSTD_blockHeaderSize; { - nuint cLitSize = ZSTD_compressSubBlock_literal(&entropy->huf.CTable.e0, &entropyMetadata->hufMetadata, literals, litSize, op, (nuint)(oend - op), bmi2, writeLitEntropy, litEntropyWritten); + nuint cLitSize = ZSTD_compressSubBlock_literal( + &entropy->huf.CTable.e0, + &entropyMetadata->hufMetadata, + literals, + litSize, + op, + (nuint)(oend - op), + bmi2, + writeLitEntropy, + litEntropyWritten + ); { nuint err_code = cLitSize; if (ERR_isError(err_code)) @@ -237,7 +351,21 @@ private static nuint ZSTD_compressSubBlock(ZSTD_entropyCTables_t* entropy, ZSTD_ } { - nuint cSeqSize = ZSTD_compressSubBlock_sequences(&entropy->fse, &entropyMetadata->fseMetadata, sequences, nbSeq, llCode, mlCode, ofCode, cctxParams, op, (nuint)(oend - op), bmi2, writeSeqEntropy, seqEntropyWritten); + nuint cSeqSize = ZSTD_compressSubBlock_sequences( + &entropy->fse, + &entropyMetadata->fseMetadata, + sequences, + nbSeq, + llCode, + mlCode, + ofCode, + cctxParams, + op, + (nuint)(oend - op), + bmi2, + writeSeqEntropy, + seqEntropyWritten + ); { nuint err_code = cSeqSize; if (ERR_isError(err_code)) @@ -253,14 +381,23 @@ private static nuint ZSTD_compressSubBlock(ZSTD_entropyCTables_t* entropy, ZSTD_ { nuint cSize = (nuint)(op - ostart) - ZSTD_blockHeaderSize; - uint cBlockHeader24 = lastBlock + ((uint)blockType_e.bt_compressed << 1) + (uint)(cSize << 3); + uint cBlockHeader24 = + lastBlock + ((uint)blockType_e.bt_compressed << 1) + (uint)(cSize << 3); MEM_writeLE24(ostart, cBlockHeader24); } return (nuint)(op - ostart); } - private static nuint ZSTD_estimateSubBlockSize_literal(byte* literals, nuint litSize, ZSTD_hufCTables_t* huf, ZSTD_hufCTablesMetadata_t* hufMetadata, void* workspace, nuint wkspSize, int writeEntropy) + private static nuint ZSTD_estimateSubBlockSize_literal( + byte* literals, + nuint litSize, + ZSTD_hufCTables_t* huf, + ZSTD_hufCTablesMetadata_t* hufMetadata, + void* workspace, + nuint wkspSize, + int writeEntropy + ) { uint* countWksp = (uint*)workspace; uint maxSymbolValue = 255; @@ -270,13 +407,27 @@ private static nuint ZSTD_estimateSubBlockSize_literal(byte* literals, nuint lit return litSize; else if (hufMetadata->hType == SymbolEncodingType_e.set_rle) return 1; - else if (hufMetadata->hType == SymbolEncodingType_e.set_compressed || hufMetadata->hType == SymbolEncodingType_e.set_repeat) + else if ( + hufMetadata->hType == SymbolEncodingType_e.set_compressed + || hufMetadata->hType == SymbolEncodingType_e.set_repeat + ) { - nuint largest = HIST_count_wksp(countWksp, &maxSymbolValue, literals, litSize, workspace, wkspSize); + nuint largest = HIST_count_wksp( + countWksp, + &maxSymbolValue, + literals, + litSize, + workspace, + wkspSize + ); if (ERR_isError(largest)) return litSize; { - nuint cLitSizeEstimate = HUF_estimateCompressedSize(&huf->CTable.e0, countWksp, maxSymbolValue); + nuint cLitSizeEstimate = HUF_estimateCompressedSize( + &huf->CTable.e0, + countWksp, + maxSymbolValue + ); if (writeEntropy != 0) cLitSizeEstimate += hufMetadata->hufDesSize; return cLitSizeEstimate + literalSectionHeaderSize; @@ -287,7 +438,19 @@ private static nuint ZSTD_estimateSubBlockSize_literal(byte* literals, nuint lit return 0; } - private static nuint ZSTD_estimateSubBlockSize_symbolType(SymbolEncodingType_e type, byte* codeTable, uint maxCode, nuint nbSeq, uint* fseCTable, byte* additionalBits, short* defaultNorm, uint defaultNormLog, uint defaultMax, void* workspace, nuint wkspSize) + private static nuint ZSTD_estimateSubBlockSize_symbolType( + SymbolEncodingType_e type, + byte* codeTable, + uint maxCode, + nuint nbSeq, + uint* fseCTable, + byte* additionalBits, + short* defaultNorm, + uint defaultNormLog, + uint defaultMax, + void* workspace, + nuint wkspSize + ) { uint* countWksp = (uint*)workspace; byte* ctp = codeTable; @@ -299,13 +462,19 @@ private static nuint ZSTD_estimateSubBlockSize_symbolType(SymbolEncodingType_e t if (type == SymbolEncodingType_e.set_basic) { assert(max <= defaultMax); - cSymbolTypeSizeEstimateInBits = max <= defaultMax ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max) : unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + cSymbolTypeSizeEstimateInBits = + max <= defaultMax + ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max) + : unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); } else if (type == SymbolEncodingType_e.set_rle) { cSymbolTypeSizeEstimateInBits = 0; } - else if (type == SymbolEncodingType_e.set_compressed || type == SymbolEncodingType_e.set_repeat) + else if ( + type == SymbolEncodingType_e.set_compressed + || type == SymbolEncodingType_e.set_repeat + ) { cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max); } @@ -324,44 +493,131 @@ private static nuint ZSTD_estimateSubBlockSize_symbolType(SymbolEncodingType_e t return cSymbolTypeSizeEstimateInBits / 8; } - private static nuint ZSTD_estimateSubBlockSize_sequences(byte* ofCodeTable, byte* llCodeTable, byte* mlCodeTable, nuint nbSeq, ZSTD_fseCTables_t* fseTables, ZSTD_fseCTablesMetadata_t* fseMetadata, void* workspace, nuint wkspSize, int writeEntropy) + private static nuint ZSTD_estimateSubBlockSize_sequences( + byte* ofCodeTable, + byte* llCodeTable, + byte* mlCodeTable, + nuint nbSeq, + ZSTD_fseCTables_t* fseTables, + ZSTD_fseCTablesMetadata_t* fseMetadata, + void* workspace, + nuint wkspSize, + int writeEntropy + ) { /* Use hard coded size of 3 bytes */ const nuint sequencesSectionHeaderSize = 3; nuint cSeqSizeEstimate = 0; if (nbSeq == 0) return sequencesSectionHeaderSize; - cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, 31, nbSeq, fseTables->offcodeCTable, null, OF_defaultNorm, OF_defaultNormLog, 28, workspace, wkspSize); - cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->llType, llCodeTable, 35, nbSeq, fseTables->litlengthCTable, LL_bits, LL_defaultNorm, LL_defaultNormLog, 35, workspace, wkspSize); - cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, 52, nbSeq, fseTables->matchlengthCTable, ML_bits, ML_defaultNorm, ML_defaultNormLog, 52, workspace, wkspSize); + cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType( + fseMetadata->ofType, + ofCodeTable, + 31, + nbSeq, + fseTables->offcodeCTable, + null, + OF_defaultNorm, + OF_defaultNormLog, + 28, + workspace, + wkspSize + ); + cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType( + fseMetadata->llType, + llCodeTable, + 35, + nbSeq, + fseTables->litlengthCTable, + LL_bits, + LL_defaultNorm, + LL_defaultNormLog, + 35, + workspace, + wkspSize + ); + cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType( + fseMetadata->mlType, + mlCodeTable, + 52, + nbSeq, + fseTables->matchlengthCTable, + ML_bits, + ML_defaultNorm, + ML_defaultNormLog, + 52, + workspace, + wkspSize + ); if (writeEntropy != 0) cSeqSizeEstimate += fseMetadata->fseTablesSize; return cSeqSizeEstimate + sequencesSectionHeaderSize; } - private static EstimatedBlockSize ZSTD_estimateSubBlockSize(byte* literals, nuint litSize, byte* ofCodeTable, byte* llCodeTable, byte* mlCodeTable, nuint nbSeq, ZSTD_entropyCTables_t* entropy, ZSTD_entropyCTablesMetadata_t* entropyMetadata, void* workspace, nuint wkspSize, int writeLitEntropy, int writeSeqEntropy) + private static EstimatedBlockSize ZSTD_estimateSubBlockSize( + byte* literals, + nuint litSize, + byte* ofCodeTable, + byte* llCodeTable, + byte* mlCodeTable, + nuint nbSeq, + ZSTD_entropyCTables_t* entropy, + ZSTD_entropyCTablesMetadata_t* entropyMetadata, + void* workspace, + nuint wkspSize, + int writeLitEntropy, + int writeSeqEntropy + ) { EstimatedBlockSize ebs; - ebs.estLitSize = ZSTD_estimateSubBlockSize_literal(literals, litSize, &entropy->huf, &entropyMetadata->hufMetadata, workspace, wkspSize, writeLitEntropy); - ebs.estBlockSize = ZSTD_estimateSubBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable, nbSeq, &entropy->fse, &entropyMetadata->fseMetadata, workspace, wkspSize, writeSeqEntropy); + ebs.estLitSize = ZSTD_estimateSubBlockSize_literal( + literals, + litSize, + &entropy->huf, + &entropyMetadata->hufMetadata, + workspace, + wkspSize, + writeLitEntropy + ); + ebs.estBlockSize = ZSTD_estimateSubBlockSize_sequences( + ofCodeTable, + llCodeTable, + mlCodeTable, + nbSeq, + &entropy->fse, + &entropyMetadata->fseMetadata, + workspace, + wkspSize, + writeSeqEntropy + ); ebs.estBlockSize += ebs.estLitSize + ZSTD_blockHeaderSize; return ebs; } private static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t* fseMetadata) { - if (fseMetadata->llType == SymbolEncodingType_e.set_compressed || fseMetadata->llType == SymbolEncodingType_e.set_rle) + if ( + fseMetadata->llType == SymbolEncodingType_e.set_compressed + || fseMetadata->llType == SymbolEncodingType_e.set_rle + ) return 1; - if (fseMetadata->mlType == SymbolEncodingType_e.set_compressed || fseMetadata->mlType == SymbolEncodingType_e.set_rle) + if ( + fseMetadata->mlType == SymbolEncodingType_e.set_compressed + || fseMetadata->mlType == SymbolEncodingType_e.set_rle + ) return 1; - if (fseMetadata->ofType == SymbolEncodingType_e.set_compressed || fseMetadata->ofType == SymbolEncodingType_e.set_rle) + if ( + fseMetadata->ofType == SymbolEncodingType_e.set_compressed + || fseMetadata->ofType == SymbolEncodingType_e.set_rle + ) return 1; return 0; } private static nuint countLiterals(SeqStore_t* seqStore, SeqDef_s* sp, nuint seqCount) { - nuint n, total = 0; + nuint n, + total = 0; assert(sp != null); for (n = 0; n < seqCount; n++) { @@ -371,9 +627,18 @@ private static nuint countLiterals(SeqStore_t* seqStore, SeqDef_s* sp, nuint seq return total; } - private static nuint sizeBlockSequences(SeqDef_s* sp, nuint nbSeqs, nuint targetBudget, nuint avgLitCost, nuint avgSeqCost, int firstSubBlock) + private static nuint sizeBlockSequences( + SeqDef_s* sp, + nuint nbSeqs, + nuint targetBudget, + nuint avgLitCost, + nuint avgSeqCost, + int firstSubBlock + ) { - nuint n, budget = 0, inSize = 0; + nuint n, + budget = 0, + inSize = 0; /* generous estimate */ nuint headerSize = (nuint)firstSubBlock * 120 * 256; assert(firstSubBlock == 0 || firstSubBlock == 1); @@ -401,7 +666,21 @@ private static nuint sizeBlockSequences(SeqDef_s* sp, nuint nbSeqs, nuint target * Sub-blocks are all compressed, except the last one when beneficial. * @return : compressed size of the super block (which features multiple ZSTD blocks) * or 0 if it failed to compress. */ - private static nuint ZSTD_compressSubBlock_multi(SeqStore_t* seqStorePtr, ZSTD_compressedBlockState_t* prevCBlock, ZSTD_compressedBlockState_t* nextCBlock, ZSTD_entropyCTablesMetadata_t* entropyMetadata, ZSTD_CCtx_params_s* cctxParams, void* dst, nuint dstCapacity, void* src, nuint srcSize, int bmi2, uint lastBlock, void* workspace, nuint wkspSize) + private static nuint ZSTD_compressSubBlock_multi( + SeqStore_t* seqStorePtr, + ZSTD_compressedBlockState_t* prevCBlock, + ZSTD_compressedBlockState_t* nextCBlock, + ZSTD_entropyCTablesMetadata_t* entropyMetadata, + ZSTD_CCtx_params_s* cctxParams, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + int bmi2, + uint lastBlock, + void* workspace, + nuint wkspSize + ) { SeqDef_s* sstart = seqStorePtr->sequencesStart; SeqDef_s* send = seqStorePtr->sequences; @@ -422,17 +701,37 @@ private static nuint ZSTD_compressSubBlock_multi(SeqStore_t* seqStorePtr, ZSTD_c byte* ofCodePtr = seqStorePtr->ofCode; /* enforce minimum size, to reduce undesirable side effects */ const nuint minTarget = 1340; - nuint targetCBlockSize = minTarget > cctxParams->targetCBlockSize ? minTarget : cctxParams->targetCBlockSize; - int writeLitEntropy = entropyMetadata->hufMetadata.hType == SymbolEncodingType_e.set_compressed ? 1 : 0; + nuint targetCBlockSize = + minTarget > cctxParams->targetCBlockSize ? minTarget : cctxParams->targetCBlockSize; + int writeLitEntropy = + entropyMetadata->hufMetadata.hType == SymbolEncodingType_e.set_compressed ? 1 : 0; int writeSeqEntropy = 1; if (nbSeqs > 0) { - EstimatedBlockSize ebs = ZSTD_estimateSubBlockSize(lp, nbLiterals, ofCodePtr, llCodePtr, mlCodePtr, nbSeqs, &nextCBlock->entropy, entropyMetadata, workspace, wkspSize, writeLitEntropy, writeSeqEntropy); + EstimatedBlockSize ebs = ZSTD_estimateSubBlockSize( + lp, + nbLiterals, + ofCodePtr, + llCodePtr, + mlCodePtr, + nbSeqs, + &nextCBlock->entropy, + entropyMetadata, + workspace, + wkspSize, + writeLitEntropy, + writeSeqEntropy + ); /* quick estimation */ nuint avgLitCost = nbLiterals != 0 ? ebs.estLitSize * 256 / nbLiterals : 256; nuint avgSeqCost = (ebs.estBlockSize - ebs.estLitSize) * 256 / nbSeqs; - nuint nbSubBlocks = (ebs.estBlockSize + targetCBlockSize / 2) / targetCBlockSize > 1 ? (ebs.estBlockSize + targetCBlockSize / 2) / targetCBlockSize : 1; - nuint n, avgBlockBudget, blockBudgetSupp = 0; + nuint nbSubBlocks = + (ebs.estBlockSize + targetCBlockSize / 2) / targetCBlockSize > 1 + ? (ebs.estBlockSize + targetCBlockSize / 2) / targetCBlockSize + : 1; + nuint n, + avgBlockBudget, + blockBudgetSupp = 0; avgBlockBudget = ebs.estBlockSize * 256 / nbSubBlocks; if (ebs.estBlockSize > srcSize) return 0; @@ -440,7 +739,14 @@ private static nuint ZSTD_compressSubBlock_multi(SeqStore_t* seqStorePtr, ZSTD_c for (n = 0; n < nbSubBlocks - 1; n++) { /* determine nb of sequences for current sub-block + nbLiterals from next sequence */ - nuint seqCount = sizeBlockSequences(sp, (nuint)(send - sp), avgBlockBudget + blockBudgetSupp, avgLitCost, avgSeqCost, n == 0 ? 1 : 0); + nuint seqCount = sizeBlockSequences( + sp, + (nuint)(send - sp), + avgBlockBudget + blockBudgetSupp, + avgLitCost, + avgSeqCost, + n == 0 ? 1 : 0 + ); assert(seqCount <= (nuint)(send - sp)); if (sp + seqCount == send) break; @@ -449,8 +755,33 @@ private static nuint ZSTD_compressSubBlock_multi(SeqStore_t* seqStorePtr, ZSTD_c int litEntropyWritten = 0; int seqEntropyWritten = 0; nuint litSize = countLiterals(seqStorePtr, sp, seqCount); - nuint decompressedSize = ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, 0); - nuint cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata, sp, seqCount, lp, litSize, llCodePtr, mlCodePtr, ofCodePtr, cctxParams, op, (nuint)(oend - op), bmi2, writeLitEntropy, writeSeqEntropy, &litEntropyWritten, &seqEntropyWritten, 0); + nuint decompressedSize = ZSTD_seqDecompressedSize( + seqStorePtr, + sp, + seqCount, + litSize, + 0 + ); + nuint cSize = ZSTD_compressSubBlock( + &nextCBlock->entropy, + entropyMetadata, + sp, + seqCount, + lp, + litSize, + llCodePtr, + mlCodePtr, + ofCodePtr, + cctxParams, + op, + (nuint)(oend - op), + bmi2, + writeLitEntropy, + writeSeqEntropy, + &litEntropyWritten, + &seqEntropyWritten, + 0 + ); { nuint err_code = cSize; if (ERR_isError(err_code)) @@ -490,8 +821,33 @@ private static nuint ZSTD_compressSubBlock_multi(SeqStore_t* seqStorePtr, ZSTD_c int seqEntropyWritten = 0; nuint litSize = (nuint)(lend - lp); nuint seqCount = (nuint)(send - sp); - nuint decompressedSize = ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, 1); - nuint cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata, sp, seqCount, lp, litSize, llCodePtr, mlCodePtr, ofCodePtr, cctxParams, op, (nuint)(oend - op), bmi2, writeLitEntropy, writeSeqEntropy, &litEntropyWritten, &seqEntropyWritten, lastBlock); + nuint decompressedSize = ZSTD_seqDecompressedSize( + seqStorePtr, + sp, + seqCount, + litSize, + 1 + ); + nuint cSize = ZSTD_compressSubBlock( + &nextCBlock->entropy, + entropyMetadata, + sp, + seqCount, + lp, + litSize, + llCodePtr, + mlCodePtr, + ofCodePtr, + cctxParams, + op, + (nuint)(oend - op), + bmi2, + writeLitEntropy, + writeSeqEntropy, + &litEntropyWritten, + &seqEntropyWritten, + lastBlock + ); { nuint err_code = cSize; if (ERR_isError(err_code)) @@ -525,10 +881,17 @@ private static nuint ZSTD_compressSubBlock_multi(SeqStore_t* seqStorePtr, ZSTD_c if (writeLitEntropy != 0) { - memcpy(&nextCBlock->entropy.huf, &prevCBlock->entropy.huf, (uint)sizeof(ZSTD_hufCTables_t)); + memcpy( + &nextCBlock->entropy.huf, + &prevCBlock->entropy.huf, + (uint)sizeof(ZSTD_hufCTables_t) + ); } - if (writeSeqEntropy != 0 && ZSTD_needSequenceEntropyTables(&entropyMetadata->fseMetadata) != 0) + if ( + writeSeqEntropy != 0 + && ZSTD_needSequenceEntropyTables(&entropyMetadata->fseMetadata) != 0 + ) { return 0; } @@ -555,7 +918,11 @@ private static nuint ZSTD_compressSubBlock_multi(SeqStore_t* seqStorePtr, ZSTD_c memcpy(&rep, prevCBlock->rep, (uint)sizeof(repcodes_s)); for (seq = sstart; seq < sp; ++seq) { - ZSTD_updateRep(rep.rep, seq->offBase, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0 ? 1U : 0U); + ZSTD_updateRep( + rep.rep, + seq->offBase, + ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0 ? 1U : 0U + ); } memcpy(nextCBlock->rep, &rep, (uint)sizeof(repcodes_s)); @@ -568,18 +935,47 @@ private static nuint ZSTD_compressSubBlock_multi(SeqStore_t* seqStorePtr, ZSTD_c /* ZSTD_compressSuperBlock() : * Used to compress a super block when targetCBlockSize is being used. * The given block will be compressed into multiple sub blocks that are around targetCBlockSize. */ - private static nuint ZSTD_compressSuperBlock(ZSTD_CCtx_s* zc, void* dst, nuint dstCapacity, void* src, nuint srcSize, uint lastBlock) + private static nuint ZSTD_compressSuperBlock( + ZSTD_CCtx_s* zc, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + uint lastBlock + ) { ZSTD_entropyCTablesMetadata_t entropyMetadata; { - nuint err_code = ZSTD_buildBlockEntropyStats(&zc->seqStore, &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy, &zc->appliedParams, &entropyMetadata, zc->tmpWorkspace, zc->tmpWkspSize); + nuint err_code = ZSTD_buildBlockEntropyStats( + &zc->seqStore, + &zc->blockState.prevCBlock->entropy, + &zc->blockState.nextCBlock->entropy, + &zc->appliedParams, + &entropyMetadata, + zc->tmpWorkspace, + zc->tmpWkspSize + ); if (ERR_isError(err_code)) { return err_code; } } - return ZSTD_compressSubBlock_multi(&zc->seqStore, zc->blockState.prevCBlock, zc->blockState.nextCBlock, &entropyMetadata, &zc->appliedParams, dst, dstCapacity, src, srcSize, zc->bmi2, lastBlock, zc->tmpWorkspace, zc->tmpWkspSize); + return ZSTD_compressSubBlock_multi( + &zc->seqStore, + zc->blockState.prevCBlock, + zc->blockState.nextCBlock, + &entropyMetadata, + &zc->appliedParams, + dst, + dstCapacity, + src, + srcSize, + zc->bmi2, + lastBlock, + zc->tmpWorkspace, + zc->tmpWkspSize + ); } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCwksp.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCwksp.cs index 8f6063a52..414e75db0 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCwksp.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCwksp.cs @@ -1,6 +1,6 @@ +using System.Diagnostics; using System.Runtime.CompilerServices; using static ZstdSharp.UnsafeHelper; -using System.Diagnostics; namespace ZstdSharp.Unsafe { @@ -143,12 +143,18 @@ private static nuint ZSTD_cwksp_bytes_to_align_ptr(void* ptr, nuint alignBytes) * Returns a 0 on success, or zstd error */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) + private static nuint ZSTD_cwksp_internal_advance_phase( + ZSTD_cwksp* ws, + ZSTD_cwksp_alloc_phase_e phase + ) { assert(phase >= ws->phase); if (phase > ws->phase) { - if (ws->phase < ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_aligned_init_once && phase >= ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_aligned_init_once) + if ( + ws->phase < ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_aligned_init_once + && phase >= ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_aligned_init_once + ) { ws->tableValidEnd = ws->objectEnd; ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws); @@ -158,7 +164,9 @@ private static nuint ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwks void* objectEnd = (byte*)alloc + bytesToAlign; if (objectEnd > ws->workspaceEnd) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) + ); } ws->objectEnd = objectEnd; @@ -190,7 +198,11 @@ private static int ZSTD_cwksp_owns_buffer(ZSTD_cwksp* ws, void* ptr) * Internal function. Do not use directly. */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void* ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, nuint bytes, ZSTD_cwksp_alloc_phase_e phase) + private static void* ZSTD_cwksp_reserve_internal( + ZSTD_cwksp* ws, + nuint bytes, + ZSTD_cwksp_alloc_phase_e phase + ) { void* alloc; if (ERR_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) @@ -208,7 +220,11 @@ private static int ZSTD_cwksp_owns_buffer(ZSTD_cwksp* ws, void* ptr) [MethodImpl(MethodImplOptions.AggressiveInlining)] private static byte* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, nuint bytes) { - return (byte*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_buffers); + return (byte*)ZSTD_cwksp_reserve_internal( + ws, + bytes, + ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_buffers + ); } /** @@ -224,11 +240,23 @@ private static int ZSTD_cwksp_owns_buffer(ZSTD_cwksp* ws, void* ptr) private static void* ZSTD_cwksp_reserve_aligned_init_once(ZSTD_cwksp* ws, nuint bytes) { nuint alignedBytes = ZSTD_cwksp_align(bytes, 64); - void* ptr = ZSTD_cwksp_reserve_internal(ws, alignedBytes, ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_aligned_init_once); + void* ptr = ZSTD_cwksp_reserve_internal( + ws, + alignedBytes, + ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_aligned_init_once + ); assert(((nuint)ptr & 64 - 1) == 0); if (ptr != null && ptr < ws->initOnceStart) { - memset(ptr, 0, (uint)((nuint)((byte*)ws->initOnceStart - (byte*)ptr) < alignedBytes ? (nuint)((byte*)ws->initOnceStart - (byte*)ptr) : alignedBytes)); + memset( + ptr, + 0, + (uint)( + (nuint)((byte*)ws->initOnceStart - (byte*)ptr) < alignedBytes + ? (nuint)((byte*)ws->initOnceStart - (byte*)ptr) + : alignedBytes + ) + ); ws->initOnceStart = ptr; } @@ -241,7 +269,11 @@ private static int ZSTD_cwksp_owns_buffer(ZSTD_cwksp* ws, void* ptr) [MethodImpl(MethodImplOptions.AggressiveInlining)] private static void* ZSTD_cwksp_reserve_aligned64(ZSTD_cwksp* ws, nuint bytes) { - void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, 64), ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_aligned); + void* ptr = ZSTD_cwksp_reserve_internal( + ws, + ZSTD_cwksp_align(bytes, 64), + ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_aligned + ); assert(((nuint)ptr & 64 - 1) == 0); return ptr; } @@ -254,7 +286,8 @@ private static int ZSTD_cwksp_owns_buffer(ZSTD_cwksp* ws, void* ptr) [MethodImpl(MethodImplOptions.AggressiveInlining)] private static void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, nuint bytes) { - ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_aligned_init_once; + ZSTD_cwksp_alloc_phase_e phase = + ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_aligned_init_once; void* alloc; void* end; void* top; @@ -297,7 +330,10 @@ private static int ZSTD_cwksp_owns_buffer(ZSTD_cwksp* ws, void* ptr) assert((nuint)alloc % (nuint)sizeof(void*) == 0); assert(bytes % (nuint)sizeof(void*) == 0); ZSTD_cwksp_assert_internal_consistency(ws); - if (ws->phase != ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) + if ( + ws->phase != ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_objects + || end > ws->workspaceEnd + ) { ws->allocFailed = 1; return null; @@ -314,7 +350,11 @@ private static int ZSTD_cwksp_owns_buffer(ZSTD_cwksp* ws, void* ptr) * Note : should happen only once, at workspace first initialization */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void* ZSTD_cwksp_reserve_object_aligned(ZSTD_cwksp* ws, nuint byteSize, nuint alignment) + private static void* ZSTD_cwksp_reserve_object_aligned( + ZSTD_cwksp* ws, + nuint byteSize, + nuint alignment + ) { nuint mask = alignment - 1; nuint surplus = alignment > (nuint)sizeof(void*) ? alignment - (nuint)sizeof(void*) : 0; @@ -359,7 +399,11 @@ private static void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) assert(ws->tableValidEnd <= ws->allocStart); if (ws->tableValidEnd < ws->tableEnd) { - memset(ws->tableValidEnd, 0, (uint)(nuint)((byte*)ws->tableEnd - (byte*)ws->tableValidEnd)); + memset( + ws->tableValidEnd, + 0, + (uint)(nuint)((byte*)ws->tableEnd - (byte*)ws->tableValidEnd) + ); } ZSTD_cwksp_mark_tables_clean(ws); @@ -403,7 +447,8 @@ private static nuint ZSTD_cwksp_sizeof(ZSTD_cwksp* ws) [MethodImpl(MethodImplOptions.AggressiveInlining)] private static nuint ZSTD_cwksp_used(ZSTD_cwksp* ws) { - return (nuint)((byte*)ws->tableEnd - (byte*)ws->workspace) + (nuint)((byte*)ws->workspaceEnd - (byte*)ws->allocStart); + return (nuint)((byte*)ws->tableEnd - (byte*)ws->workspace) + + (nuint)((byte*)ws->workspaceEnd - (byte*)ws->allocStart); } /** @@ -412,7 +457,12 @@ private static nuint ZSTD_cwksp_used(ZSTD_cwksp* ws) * buffer, if present, must be separately freed). */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, nuint size, ZSTD_cwksp_static_alloc_e isStatic) + private static void ZSTD_cwksp_init( + ZSTD_cwksp* ws, + void* start, + nuint size, + ZSTD_cwksp_static_alloc_e isStatic + ) { assert(((nuint)start & (nuint)(sizeof(void*) - 1)) == 0); ws->workspace = start; @@ -436,7 +486,12 @@ private static nuint ZSTD_cwksp_create(ZSTD_cwksp* ws, nuint size, ZSTD_customMe return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); } - ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_static_alloc_e.ZSTD_cwksp_dynamic_alloc); + ZSTD_cwksp_init( + ws, + workspace, + size, + ZSTD_cwksp_static_alloc_e.ZSTD_cwksp_dynamic_alloc + ); return 0; } @@ -470,9 +525,16 @@ private static int ZSTD_cwksp_reserve_failed(ZSTD_cwksp* ws) * actual amount of space used. */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static int ZSTD_cwksp_estimated_space_within_bounds(ZSTD_cwksp* ws, nuint estimatedSpace) + private static int ZSTD_cwksp_estimated_space_within_bounds( + ZSTD_cwksp* ws, + nuint estimatedSpace + ) { - return estimatedSpace - ZSTD_cwksp_slack_space_required() <= ZSTD_cwksp_used(ws) && ZSTD_cwksp_used(ws) <= estimatedSpace ? 1 : 0; + return + estimatedSpace - ZSTD_cwksp_slack_space_required() <= ZSTD_cwksp_used(ws) + && ZSTD_cwksp_used(ws) <= estimatedSpace + ? 1 + : 0; } /*-************************************* @@ -499,11 +561,18 @@ private static int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, nuint additionalNe [MethodImpl(MethodImplOptions.AggressiveInlining)] private static int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, nuint additionalNeededSpace) { - return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace) != 0 && ws->workspaceOversizedDuration > 128 ? 1 : 0; + return + ZSTD_cwksp_check_too_large(ws, additionalNeededSpace) != 0 + && ws->workspaceOversizedDuration > 128 + ? 1 + : 0; } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_cwksp_bump_oversized_duration(ZSTD_cwksp* ws, nuint additionalNeededSpace) + private static void ZSTD_cwksp_bump_oversized_duration( + ZSTD_cwksp* ws, + nuint additionalNeededSpace + ) { if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace) != 0) { @@ -515,4 +584,4 @@ private static void ZSTD_cwksp_bump_oversized_duration(ZSTD_cwksp* ws, nuint add } } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDdict.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDdict.cs index d7edbcd39..49d786a5a 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDdict.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDdict.cs @@ -54,7 +54,10 @@ private static void ZSTD_copyDDictParameters(ZSTD_DCtx_s* dctx, ZSTD_DDict_s* dd } } - private static nuint ZSTD_loadEntropy_intoDDict(ZSTD_DDict_s* ddict, ZSTD_dictContentType_e dictContentType) + private static nuint ZSTD_loadEntropy_intoDDict( + ZSTD_DDict_s* ddict, + ZSTD_dictContentType_e dictContentType + ) { ddict->dictID = 0; ddict->entropyPresent = 0; @@ -72,13 +75,17 @@ private static nuint ZSTD_loadEntropy_intoDDict(ZSTD_DDict_s* ddict, ZSTD_dictCo if (magic != 0xEC30A437) { if (dictContentType == ZSTD_dictContentType_e.ZSTD_dct_fullDict) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted) + ); return 0; } } ddict->dictID = MEM_readLE32((sbyte*)ddict->dictContent + 4); - if (ERR_isError(ZSTD_loadDEntropy(&ddict->entropy, ddict->dictContent, ddict->dictSize))) + if ( + ERR_isError(ZSTD_loadDEntropy(&ddict->entropy, ddict->dictContent, ddict->dictSize)) + ) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); } @@ -87,9 +94,19 @@ private static nuint ZSTD_loadEntropy_intoDDict(ZSTD_DDict_s* ddict, ZSTD_dictCo return 0; } - private static nuint ZSTD_initDDict_internal(ZSTD_DDict_s* ddict, void* dict, nuint dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType) + private static nuint ZSTD_initDDict_internal( + ZSTD_DDict_s* ddict, + void* dict, + nuint dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType + ) { - if (dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef || dict == null || dictSize == 0) + if ( + dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef + || dict == null + || dictSize == 0 + ) { ddict->dictBuffer = null; ddict->dictContent = dict; @@ -120,17 +137,35 @@ private static nuint ZSTD_initDDict_internal(ZSTD_DDict_s* ddict, void* dict, nu return 0; } - public static ZSTD_DDict_s* ZSTD_createDDict_advanced(void* dict, nuint dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_customMem customMem) + public static ZSTD_DDict_s* ZSTD_createDDict_advanced( + void* dict, + nuint dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType, + ZSTD_customMem customMem + ) { - if (((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) != 0) + if ( + ((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) + != 0 + ) return null; { - ZSTD_DDict_s* ddict = (ZSTD_DDict_s*)ZSTD_customMalloc((nuint)sizeof(ZSTD_DDict_s), customMem); + ZSTD_DDict_s* ddict = (ZSTD_DDict_s*)ZSTD_customMalloc( + (nuint)sizeof(ZSTD_DDict_s), + customMem + ); if (ddict == null) return null; ddict->cMem = customMem; { - nuint initResult = ZSTD_initDDict_internal(ddict, dict, dictSize, dictLoadMethod, dictContentType); + nuint initResult = ZSTD_initDDict_internal( + ddict, + dict, + dictSize, + dictLoadMethod, + dictContentType + ); if (ERR_isError(initResult)) { ZSTD_freeDDict(ddict); @@ -152,9 +187,15 @@ private static nuint ZSTD_initDDict_internal(ZSTD_DDict_s* ddict, void* dict, nu { customAlloc = null, customFree = null, - opaque = null + opaque = null, }; - return ZSTD_createDDict_advanced(dict, dictSize, ZSTD_dictLoadMethod_e.ZSTD_dlm_byCopy, ZSTD_dictContentType_e.ZSTD_dct_auto, allocator); + return ZSTD_createDDict_advanced( + dict, + dictSize, + ZSTD_dictLoadMethod_e.ZSTD_dlm_byCopy, + ZSTD_dictContentType_e.ZSTD_dct_auto, + allocator + ); } /*! ZSTD_createDDict_byReference() : @@ -167,14 +208,29 @@ private static nuint ZSTD_initDDict_internal(ZSTD_DDict_s* ddict, void* dict, nu { customAlloc = null, customFree = null, - opaque = null + opaque = null, }; - return ZSTD_createDDict_advanced(dictBuffer, dictSize, ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, ZSTD_dictContentType_e.ZSTD_dct_auto, allocator); + return ZSTD_createDDict_advanced( + dictBuffer, + dictSize, + ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, + ZSTD_dictContentType_e.ZSTD_dct_auto, + allocator + ); } - public static ZSTD_DDict_s* ZSTD_initStaticDDict(void* sBuffer, nuint sBufferSize, void* dict, nuint dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType) + public static ZSTD_DDict_s* ZSTD_initStaticDDict( + void* sBuffer, + nuint sBufferSize, + void* dict, + nuint dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType + ) { - nuint neededSpace = (nuint)sizeof(ZSTD_DDict_s) + (dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef ? 0 : dictSize); + nuint neededSpace = + (nuint)sizeof(ZSTD_DDict_s) + + (dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef ? 0 : dictSize); ZSTD_DDict_s* ddict = (ZSTD_DDict_s*)sBuffer; assert(sBuffer != null); assert(dict != null); @@ -188,7 +244,17 @@ private static nuint ZSTD_initDDict_internal(ZSTD_DDict_s* ddict, void* dict, nu dict = ddict + 1; } - if (ERR_isError(ZSTD_initDDict_internal(ddict, dict, dictSize, ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, dictContentType))) + if ( + ERR_isError( + ZSTD_initDDict_internal( + ddict, + dict, + dictSize, + ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, + dictContentType + ) + ) + ) return null; return ddict; } @@ -211,9 +277,13 @@ public static nuint ZSTD_freeDDict(ZSTD_DDict_s* ddict) /*! ZSTD_estimateDDictSize() : * Estimate amount of memory that will be needed to create a dictionary for decompression. * Note : dictionary created by reference using ZSTD_dlm_byRef are smaller */ - public static nuint ZSTD_estimateDDictSize(nuint dictSize, ZSTD_dictLoadMethod_e dictLoadMethod) + public static nuint ZSTD_estimateDDictSize( + nuint dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod + ) { - return (nuint)sizeof(ZSTD_DDict_s) + (dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef ? 0 : dictSize); + return (nuint)sizeof(ZSTD_DDict_s) + + (dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef ? 0 : dictSize); } public static nuint ZSTD_sizeof_DDict(ZSTD_DDict_s* ddict) @@ -234,4 +304,4 @@ public static uint ZSTD_getDictID_fromDDict(ZSTD_DDict_s* ddict) return ddict->dictID; } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompress.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompress.cs index 65454a56c..737aae945 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompress.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompress.cs @@ -17,7 +17,10 @@ private static nuint ZSTD_DDictHashSet_getIndex(ZSTD_DDictHashSet* hashSet, uint * If inserting a DDict with a dictID that already exists in the set, replaces the one in the set. * Returns 0 if successful, or a zstd error code if something went wrong. */ - private static nuint ZSTD_DDictHashSet_emplaceDDict(ZSTD_DDictHashSet* hashSet, ZSTD_DDict_s* ddict) + private static nuint ZSTD_DDictHashSet_emplaceDDict( + ZSTD_DDictHashSet* hashSet, + ZSTD_DDict_s* ddict + ) { uint dictID = ZSTD_getDictID_fromDDict(ddict); nuint idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID); @@ -48,10 +51,16 @@ private static nuint ZSTD_DDictHashSet_emplaceDDict(ZSTD_DDictHashSet* hashSet, * rehashes all values, allocates new table, frees old table. * Returns 0 on success, otherwise a zstd error code. */ - private static nuint ZSTD_DDictHashSet_expand(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) + private static nuint ZSTD_DDictHashSet_expand( + ZSTD_DDictHashSet* hashSet, + ZSTD_customMem customMem + ) { nuint newTableSize = hashSet->ddictPtrTableSize * 2; - ZSTD_DDict_s** newTable = (ZSTD_DDict_s**)ZSTD_customCalloc((nuint)sizeof(ZSTD_DDict_s*) * newTableSize, customMem); + ZSTD_DDict_s** newTable = (ZSTD_DDict_s**)ZSTD_customCalloc( + (nuint)sizeof(ZSTD_DDict_s*) * newTableSize, + customMem + ); ZSTD_DDict_s** oldTable = hashSet->ddictPtrTable; nuint oldTableSize = hashSet->ddictPtrTableSize; nuint i; @@ -82,7 +91,10 @@ private static nuint ZSTD_DDictHashSet_expand(ZSTD_DDictHashSet* hashSet, ZSTD_c /* Fetches a DDict with the given dictID * Returns the ZSTD_DDict* with the requested dictID. If it doesn't exist, then returns NULL. */ - private static ZSTD_DDict_s* ZSTD_DDictHashSet_getDDict(ZSTD_DDictHashSet* hashSet, uint dictID) + private static ZSTD_DDict_s* ZSTD_DDictHashSet_getDDict( + ZSTD_DDictHashSet* hashSet, + uint dictID + ) { nuint idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID); nuint idxRangeMask = hashSet->ddictPtrTableSize - 1; @@ -109,10 +121,16 @@ private static nuint ZSTD_DDictHashSet_expand(ZSTD_DDictHashSet* hashSet, ZSTD_c */ private static ZSTD_DDictHashSet* ZSTD_createDDictHashSet(ZSTD_customMem customMem) { - ZSTD_DDictHashSet* ret = (ZSTD_DDictHashSet*)ZSTD_customMalloc((nuint)sizeof(ZSTD_DDictHashSet), customMem); + ZSTD_DDictHashSet* ret = (ZSTD_DDictHashSet*)ZSTD_customMalloc( + (nuint)sizeof(ZSTD_DDictHashSet), + customMem + ); if (ret == null) return null; - ret->ddictPtrTable = (ZSTD_DDict_s**)ZSTD_customCalloc((nuint)(64 * sizeof(ZSTD_DDict_s*)), customMem); + ret->ddictPtrTable = (ZSTD_DDict_s**)ZSTD_customCalloc( + (nuint)(64 * sizeof(ZSTD_DDict_s*)), + customMem + ); if (ret->ddictPtrTable == null) { ZSTD_customFree(ret, customMem); @@ -127,7 +145,10 @@ private static nuint ZSTD_DDictHashSet_expand(ZSTD_DDictHashSet* hashSet, ZSTD_c /* Frees the table of ZSTD_DDict* within a hashset, then frees the hashset itself. * Note: The ZSTD_DDict* within the table are NOT freed. */ - private static void ZSTD_freeDDictHashSet(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) + private static void ZSTD_freeDDictHashSet( + ZSTD_DDictHashSet* hashSet, + ZSTD_customMem customMem + ) { if (hashSet != null && hashSet->ddictPtrTable != null) { @@ -143,7 +164,11 @@ private static void ZSTD_freeDDictHashSet(ZSTD_DDictHashSet* hashSet, ZSTD_custo /* Public function: Adds a DDict into the ZSTD_DDictHashSet, possibly triggering a resize of the hash set. * Returns 0 on success, or a ZSTD error. */ - private static nuint ZSTD_DDictHashSet_addDDict(ZSTD_DDictHashSet* hashSet, ZSTD_DDict_s* ddict, ZSTD_customMem customMem) + private static nuint ZSTD_DDictHashSet_addDDict( + ZSTD_DDictHashSet* hashSet, + ZSTD_DDict_s* ddict, + ZSTD_customMem customMem + ) { if (hashSet->ddictPtrCount * 4 / hashSet->ddictPtrTableSize * 3 != 0) { @@ -172,7 +197,10 @@ public static nuint ZSTD_sizeof_DCtx(ZSTD_DCtx_s* dctx) { if (dctx == null) return 0; - return (nuint)sizeof(ZSTD_DCtx_s) + ZSTD_sizeof_DDict(dctx->ddictLocal) + dctx->inBuffSize + dctx->outBuffSize; + return (nuint)sizeof(ZSTD_DCtx_s) + + ZSTD_sizeof_DDict(dctx->ddictLocal) + + dctx->inBuffSize + + dctx->outBuffSize; } public static nuint ZSTD_estimateDCtxSize() @@ -183,7 +211,10 @@ public static nuint ZSTD_estimateDCtxSize() private static nuint ZSTD_startingInputLength(ZSTD_format_e format) { nuint startingInputLength = (nuint)(format == ZSTD_format_e.ZSTD_f_zstd1 ? 5 : 1); - assert(format == ZSTD_format_e.ZSTD_f_zstd1 || format == ZSTD_format_e.ZSTD_f_zstd1_magicless); + assert( + format == ZSTD_format_e.ZSTD_f_zstd1 + || format == ZSTD_format_e.ZSTD_f_zstd1_magicless + ); return startingInputLength; } @@ -233,10 +264,16 @@ private static void ZSTD_initDCtx_internal(ZSTD_DCtx_s* dctx) private static ZSTD_DCtx_s* ZSTD_createDCtx_internal(ZSTD_customMem customMem) { - if (((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) != 0) + if ( + ((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) + != 0 + ) return null; { - ZSTD_DCtx_s* dctx = (ZSTD_DCtx_s*)ZSTD_customMalloc((nuint)sizeof(ZSTD_DCtx_s), customMem); + ZSTD_DCtx_s* dctx = (ZSTD_DCtx_s*)ZSTD_customMalloc( + (nuint)sizeof(ZSTD_DCtx_s), + customMem + ); if (dctx == null) return null; dctx->customMem = customMem; @@ -308,7 +345,10 @@ private static void ZSTD_DCtx_selectFrameDDict(ZSTD_DCtx_s* dctx) assert(dctx->refMultipleDDicts != default && dctx->ddictSet != null); if (dctx->ddict != null) { - ZSTD_DDict_s* frameDDict = ZSTD_DDictHashSet_getDDict(dctx->ddictSet, dctx->fParams.dictID); + ZSTD_DDict_s* frameDDict = ZSTD_DDictHashSet_getDDict( + dctx->ddictSet, + dctx->fParams.dictID + ); if (frameDDict != null) { ZSTD_clearDict(dctx); @@ -361,7 +401,11 @@ public static uint ZSTD_isSkippableFrame(void* buffer, nuint size) * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless. * @return : size of the Frame Header * or an error code, which can be tested with ZSTD_isError() */ - private static nuint ZSTD_frameHeaderSize_internal(void* src, nuint srcSize, ZSTD_format_e format) + private static nuint ZSTD_frameHeaderSize_internal( + void* src, + nuint srcSize, + ZSTD_format_e format + ) { nuint minInputSize = ZSTD_startingInputLength(format); if (srcSize < minInputSize) @@ -374,7 +418,11 @@ private static nuint ZSTD_frameHeaderSize_internal(void* src, nuint srcSize, ZST uint dictID = (uint)(fhd & 3); uint singleSegment = (uint)(fhd >> 5 & 1); uint fcsId = (uint)(fhd >> 6); - return minInputSize + (nuint)(singleSegment == 0 ? 1 : 0) + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId] + (nuint)(singleSegment != 0 && fcsId == 0 ? 1 : 0); + return minInputSize + + (nuint)(singleSegment == 0 ? 1 : 0) + + ZSTD_did_fieldSize[dictID] + + ZSTD_fcs_fieldSize[fcsId] + + (nuint)(singleSegment != 0 && fcsId == 0 ? 1 : 0); } } @@ -393,7 +441,12 @@ public static nuint ZSTD_frameHeaderSize(void* src, nuint srcSize) * @return : 0, `zfhPtr` is correctly filled, * >0, `srcSize` is too small, value is wanted `srcSize` amount, ** or an error code, which can be tested using ZSTD_isError() */ - public static nuint ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, void* src, nuint srcSize, ZSTD_format_e format) + public static nuint ZSTD_getFrameHeader_advanced( + ZSTD_frameHeader* zfhPtr, + void* src, + nuint srcSize, + ZSTD_format_e format + ) { byte* ip = (byte*)src; nuint minInputSize = ZSTD_startingInputLength(format); @@ -424,7 +477,9 @@ public static nuint ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, void* memcpy(hbuf, src, (uint)toCopy); if ((MEM_readLE32(hbuf) & 0xFFFFFFF0) != 0x184D2A50) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_prefix_unknown)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_prefix_unknown) + ); } } } @@ -444,7 +499,7 @@ public static nuint ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, void* frameType = ZSTD_frameType_e.ZSTD_skippableFrame, dictID = MEM_readLE32(src) - 0x184D2A50, headerSize = 8, - frameContentSize = MEM_readLE32((sbyte*)src + 4) + frameContentSize = MEM_readLE32((sbyte*)src + 4), }; return 0; } @@ -471,7 +526,9 @@ public static nuint ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, void* ulong frameContentSize = unchecked(0UL - 1); if ((fhdByte & 0x08) != 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported) + ); } if (singleSegment == 0) @@ -480,7 +537,9 @@ public static nuint ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, void* uint windowLog = (uint)((wlByte >> 3) + 10); if (windowLog > (uint)(sizeof(nuint) == 4 ? 30 : 31)) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_windowTooLarge)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_windowTooLarge) + ); } windowSize = 1UL << (int)windowLog; @@ -584,7 +643,9 @@ private static nuint readSkippableFrameSize(void* src, nuint srcSize) sizeU32 = MEM_readLE32((byte*)src + 4); if (sizeU32 + 8 < sizeU32) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported) + ); } { @@ -609,7 +670,13 @@ private static nuint readSkippableFrameSize(void* src, nuint srcSize) * * @return : number of bytes written or a ZSTD error. */ - public static nuint ZSTD_readSkippableFrame(void* dst, nuint dstCapacity, uint* magicVariant, void* src, nuint srcSize) + public static nuint ZSTD_readSkippableFrame( + void* dst, + nuint dstCapacity, + uint* magicVariant, + void* src, + nuint srcSize + ) { if (srcSize < 8) { @@ -622,7 +689,9 @@ public static nuint ZSTD_readSkippableFrame(void* dst, nuint dstCapacity, uint* nuint skippableContentSize = skippableFrameSize - 8; if (ZSTD_isSkippableFrame(src, srcSize) == 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported) + ); } if (skippableFrameSize < 8 || skippableFrameSize > srcSize) @@ -709,7 +778,12 @@ public static ulong ZSTD_getDecompressedSize(void* src, nuint srcSize) * @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */ private static nuint ZSTD_decodeFrameHeader(ZSTD_DCtx_s* dctx, void* src, nuint headerSize) { - nuint result = ZSTD_getFrameHeader_advanced(&dctx->fParams, src, headerSize, dctx->format); + nuint result = ZSTD_getFrameHeader_advanced( + &dctx->fParams, + src, + headerSize, + dctx->format + ); if (ERR_isError(result)) return result; if (result > 0) @@ -717,7 +791,10 @@ private static nuint ZSTD_decodeFrameHeader(ZSTD_DCtx_s* dctx, void* src, nuint return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); } - if (dctx->refMultipleDDicts == ZSTD_refMultipleDDicts_e.ZSTD_rmd_refMultipleDDicts && dctx->ddictSet != null) + if ( + dctx->refMultipleDDicts == ZSTD_refMultipleDDicts_e.ZSTD_rmd_refMultipleDDicts + && dctx->ddictSet != null + ) { ZSTD_DCtx_selectFrameDDict(dctx); } @@ -727,7 +804,9 @@ private static nuint ZSTD_decodeFrameHeader(ZSTD_DCtx_s* dctx, void* src, nuint return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_wrong)); } - dctx->validateChecksum = (uint)(dctx->fParams.checksumFlag != 0 && dctx->forceIgnoreChecksum == default ? 1 : 0); + dctx->validateChecksum = (uint)( + dctx->fParams.checksumFlag != 0 && dctx->forceIgnoreChecksum == default ? 1 : 0 + ); if (dctx->validateChecksum != 0) ZSTD_XXH64_reset(&dctx->xxhState, 0); dctx->processedCSize += headerSize; @@ -743,14 +822,25 @@ private static ZSTD_frameSizeInfo ZSTD_errorFrameSizeInfo(nuint ret) return frameSizeInfo; } - private static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(void* src, nuint srcSize, ZSTD_format_e format) + private static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo( + void* src, + nuint srcSize, + ZSTD_format_e format + ) { ZSTD_frameSizeInfo frameSizeInfo; frameSizeInfo = new ZSTD_frameSizeInfo(); - if (format == ZSTD_format_e.ZSTD_f_zstd1 && srcSize >= 8 && (MEM_readLE32(src) & 0xFFFFFFF0) == 0x184D2A50) + if ( + format == ZSTD_format_e.ZSTD_f_zstd1 + && srcSize >= 8 + && (MEM_readLE32(src) & 0xFFFFFFF0) == 0x184D2A50 + ) { frameSizeInfo.compressedSize = readSkippableFrameSize(src, srcSize); - assert(ERR_isError(frameSizeInfo.compressedSize) || frameSizeInfo.compressedSize <= srcSize); + assert( + ERR_isError(frameSizeInfo.compressedSize) + || frameSizeInfo.compressedSize <= srcSize + ); return frameSizeInfo; } else @@ -765,7 +855,9 @@ private static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(void* src, nuint srcSiz if (ERR_isError(ret)) return ZSTD_errorFrameSizeInfo(ret); if (ret > 0) - return ZSTD_errorFrameSizeInfo(unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong))); + return ZSTD_errorFrameSizeInfo( + unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)) + ); } ip += zfh.headerSize; @@ -777,7 +869,9 @@ private static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(void* src, nuint srcSiz if (ERR_isError(cBlockSize)) return ZSTD_errorFrameSizeInfo(cBlockSize); if (ZSTD_blockHeaderSize + cBlockSize > remainingSize) - return ZSTD_errorFrameSizeInfo(unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong))); + return ZSTD_errorFrameSizeInfo( + unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)) + ); ip += ZSTD_blockHeaderSize + cBlockSize; remainingSize -= ZSTD_blockHeaderSize + cBlockSize; nbBlocks++; @@ -788,18 +882,27 @@ private static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(void* src, nuint srcSiz if (zfh.checksumFlag != 0) { if (remainingSize < 4) - return ZSTD_errorFrameSizeInfo(unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong))); + return ZSTD_errorFrameSizeInfo( + unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)) + ); ip += 4; } frameSizeInfo.nbBlocks = nbBlocks; frameSizeInfo.compressedSize = (nuint)(ip - ipstart); - frameSizeInfo.decompressedBound = zfh.frameContentSize != unchecked(0UL - 1) ? zfh.frameContentSize : (ulong)nbBlocks * zfh.blockSizeMax; + frameSizeInfo.decompressedBound = + zfh.frameContentSize != unchecked(0UL - 1) + ? zfh.frameContentSize + : (ulong)nbBlocks * zfh.blockSizeMax; return frameSizeInfo; } } - private static nuint ZSTD_findFrameCompressedSize_advanced(void* src, nuint srcSize, ZSTD_format_e format) + private static nuint ZSTD_findFrameCompressedSize_advanced( + void* src, + nuint srcSize, + ZSTD_format_e format + ) { ZSTD_frameSizeInfo frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize, format); return frameSizeInfo.compressedSize; @@ -824,7 +927,11 @@ public static ulong ZSTD_decompressBound(void* src, nuint srcSize) ulong bound = 0; while (srcSize > 0) { - ZSTD_frameSizeInfo frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize, ZSTD_format_e.ZSTD_f_zstd1); + ZSTD_frameSizeInfo frameSizeInfo = ZSTD_findFrameSizeInfo( + src, + srcSize, + ZSTD_format_e.ZSTD_f_zstd1 + ); nuint compressedSize = frameSizeInfo.compressedSize; ulong decompressedBound = frameSizeInfo.decompressedBound; if (ERR_isError(compressedSize) || decompressedBound == unchecked(0UL - 2)) @@ -867,7 +974,11 @@ public static nuint ZSTD_decompressionMargin(void* src, nuint srcSize) uint maxBlockSize = 0; while (srcSize > 0) { - ZSTD_frameSizeInfo frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize, ZSTD_format_e.ZSTD_f_zstd1); + ZSTD_frameSizeInfo frameSizeInfo = ZSTD_findFrameSizeInfo( + src, + srcSize, + ZSTD_format_e.ZSTD_f_zstd1 + ); nuint compressedSize = frameSizeInfo.compressedSize; ulong decompressedBound = frameSizeInfo.decompressedBound; ZSTD_frameHeader zfh; @@ -886,7 +997,8 @@ public static nuint ZSTD_decompressionMargin(void* src, nuint srcSize) margin += zfh.headerSize; margin += (nuint)(zfh.checksumFlag != 0 ? 4 : 0); margin += 3 * frameSizeInfo.nbBlocks; - maxBlockSize = maxBlockSize > zfh.blockSizeMax ? maxBlockSize : zfh.blockSizeMax; + maxBlockSize = + maxBlockSize > zfh.blockSizeMax ? maxBlockSize : zfh.blockSizeMax; } else { @@ -912,7 +1024,12 @@ public static nuint ZSTD_insertBlock(ZSTD_DCtx_s* dctx, void* blockStart, nuint return blockSize; } - private static nuint ZSTD_copyRawBlock(void* dst, nuint dstCapacity, void* src, nuint srcSize) + private static nuint ZSTD_copyRawBlock( + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize + ) { if (srcSize > dstCapacity) { @@ -948,15 +1065,24 @@ private static nuint ZSTD_setRleBlock(void* dst, nuint dstCapacity, byte b, nuin return regenSize; } - private static void ZSTD_DCtx_trace_end(ZSTD_DCtx_s* dctx, ulong uncompressedSize, ulong compressedSize, int streaming) - { - } + private static void ZSTD_DCtx_trace_end( + ZSTD_DCtx_s* dctx, + ulong uncompressedSize, + ulong compressedSize, + int streaming + ) { } /*! ZSTD_decompressFrame() : * @dctx must be properly initialized * will update *srcPtr and *srcSizePtr, * to make *srcPtr progress by one frame. */ - private static nuint ZSTD_decompressFrame(ZSTD_DCtx_s* dctx, void* dst, nuint dstCapacity, void** srcPtr, nuint* srcSizePtr) + private static nuint ZSTD_decompressFrame( + ZSTD_DCtx_s* dctx, + void* dst, + nuint dstCapacity, + void** srcPtr, + nuint* srcSizePtr + ) { byte* istart = (byte*)*srcPtr; byte* ip = istart; @@ -964,13 +1090,20 @@ private static nuint ZSTD_decompressFrame(ZSTD_DCtx_s* dctx, void* dst, nuint ds byte* oend = dstCapacity != 0 ? ostart + dstCapacity : ostart; byte* op = ostart; nuint remainingSrcSize = *srcSizePtr; - if (remainingSrcSize < (nuint)(dctx->format == ZSTD_format_e.ZSTD_f_zstd1 ? 6 : 2) + ZSTD_blockHeaderSize) + if ( + remainingSrcSize + < (nuint)(dctx->format == ZSTD_format_e.ZSTD_f_zstd1 ? 6 : 2) + ZSTD_blockHeaderSize + ) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); } { - nuint frameHeaderSize = ZSTD_frameHeaderSize_internal(ip, (nuint)(dctx->format == ZSTD_format_e.ZSTD_f_zstd1 ? 5 : 1), dctx->format); + nuint frameHeaderSize = ZSTD_frameHeaderSize_internal( + ip, + (nuint)(dctx->format == ZSTD_format_e.ZSTD_f_zstd1 ? 5 : 1), + dctx->format + ); if (ERR_isError(frameHeaderSize)) return frameHeaderSize; if (remainingSrcSize < frameHeaderSize + ZSTD_blockHeaderSize) @@ -991,7 +1124,10 @@ private static nuint ZSTD_decompressFrame(ZSTD_DCtx_s* dctx, void* dst, nuint ds } if (dctx->maxBlockSizeParam != 0) - dctx->fParams.blockSizeMax = dctx->fParams.blockSizeMax < (uint)dctx->maxBlockSizeParam ? dctx->fParams.blockSizeMax : (uint)dctx->maxBlockSizeParam; + dctx->fParams.blockSizeMax = + dctx->fParams.blockSizeMax < (uint)dctx->maxBlockSizeParam + ? dctx->fParams.blockSizeMax + : (uint)dctx->maxBlockSizeParam; while (true) { byte* oBlockEnd = oend; @@ -1016,17 +1152,31 @@ private static nuint ZSTD_decompressFrame(ZSTD_DCtx_s* dctx, void* dst, nuint ds { case blockType_e.bt_compressed: assert(dctx->isFrameDecompression == 1); - decodedSize = ZSTD_decompressBlock_internal(dctx, op, (nuint)(oBlockEnd - op), ip, cBlockSize, streaming_operation.not_streaming); + decodedSize = ZSTD_decompressBlock_internal( + dctx, + op, + (nuint)(oBlockEnd - op), + ip, + cBlockSize, + streaming_operation.not_streaming + ); break; case blockType_e.bt_raw: decodedSize = ZSTD_copyRawBlock(op, (nuint)(oend - op), ip, cBlockSize); break; case blockType_e.bt_rle: - decodedSize = ZSTD_setRleBlock(op, (nuint)(oBlockEnd - op), *ip, blockProperties.origSize); + decodedSize = ZSTD_setRleBlock( + op, + (nuint)(oBlockEnd - op), + *ip, + blockProperties.origSize + ); break; case blockType_e.bt_reserved: default: - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); } { @@ -1090,7 +1240,16 @@ private static nuint ZSTD_decompressFrame(ZSTD_DCtx_s* dctx, void* dst, nuint ds return (nuint)(op - ostart); } - private static nuint ZSTD_decompressMultiFrame(ZSTD_DCtx_s* dctx, void* dst, nuint dstCapacity, void* src, nuint srcSize, void* dict, nuint dictSize, ZSTD_DDict_s* ddict) + private static nuint ZSTD_decompressMultiFrame( + ZSTD_DCtx_s* dctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + void* dict, + nuint dictSize, + ZSTD_DDict_s* ddict + ) { void* dststart = dst; int moreThan1Frame = 0; @@ -1148,7 +1307,10 @@ private static nuint ZSTD_decompressMultiFrame(ZSTD_DCtx_s* dctx, void* dst, nui ZSTD_checkContinuity(dctx, dst, dstCapacity); { nuint res = ZSTD_decompressFrame(dctx, dst, dstCapacity, &src, &srcSize); - if (ZSTD_getErrorCode(res) == ZSTD_ErrorCode.ZSTD_error_prefix_unknown && moreThan1Frame == 1) + if ( + ZSTD_getErrorCode(res) == ZSTD_ErrorCode.ZSTD_error_prefix_unknown + && moreThan1Frame == 1 + ) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); } @@ -1178,9 +1340,26 @@ private static nuint ZSTD_decompressMultiFrame(ZSTD_DCtx_s* dctx, void* dst, nui * Note : This function loads the dictionary, resulting in significant startup delay. * It's intended for a dictionary used only once. * Note : When `dict == NULL || dictSize < 8` no dictionary is used. */ - public static nuint ZSTD_decompress_usingDict(ZSTD_DCtx_s* dctx, void* dst, nuint dstCapacity, void* src, nuint srcSize, void* dict, nuint dictSize) - { - return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, null); + public static nuint ZSTD_decompress_usingDict( + ZSTD_DCtx_s* dctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + void* dict, + nuint dictSize + ) + { + return ZSTD_decompressMultiFrame( + dctx, + dst, + dstCapacity, + src, + srcSize, + dict, + dictSize, + null + ); } private static ZSTD_DDict_s* ZSTD_getDDict(ZSTD_DCtx_s* dctx) @@ -1206,9 +1385,22 @@ public static nuint ZSTD_decompress_usingDict(ZSTD_DCtx_s* dctx, void* dst, nuin * requires an allocated ZSTD_DCtx. * Compatible with sticky parameters (see below). */ - public static nuint ZSTD_decompressDCtx(ZSTD_DCtx_s* dctx, void* dst, nuint dstCapacity, void* src, nuint srcSize) - { - return ZSTD_decompress_usingDDict(dctx, dst, dstCapacity, src, srcSize, ZSTD_getDDict(dctx)); + public static nuint ZSTD_decompressDCtx( + ZSTD_DCtx_s* dctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize + ) + { + return ZSTD_decompress_usingDDict( + dctx, + dst, + dstCapacity, + src, + srcSize, + ZSTD_getDDict(dctx) + ); } /*! ZSTD_decompress() : @@ -1253,13 +1445,23 @@ public static nuint ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx_s* dctx) * * @param inputSize - The total amount of input that the caller currently has. */ - private static nuint ZSTD_nextSrcSizeToDecompressWithInputSize(ZSTD_DCtx_s* dctx, nuint inputSize) - { - if (!(dctx->stage == ZSTD_dStage.ZSTDds_decompressBlock || dctx->stage == ZSTD_dStage.ZSTDds_decompressLastBlock)) + private static nuint ZSTD_nextSrcSizeToDecompressWithInputSize( + ZSTD_DCtx_s* dctx, + nuint inputSize + ) + { + if ( + !( + dctx->stage == ZSTD_dStage.ZSTDds_decompressBlock + || dctx->stage == ZSTD_dStage.ZSTDds_decompressLastBlock + ) + ) return dctx->expected; if (dctx->bType != blockType_e.bt_raw) return dctx->expected; - return inputSize <= 1 ? 1 : inputSize <= dctx->expected ? inputSize : dctx->expected; + return inputSize <= 1 ? 1 + : inputSize <= dctx->expected ? inputSize + : dctx->expected; } public static ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx_s* dctx) @@ -1295,7 +1497,13 @@ private static int ZSTD_isSkipFrame(ZSTD_DCtx_s* dctx) * srcSize : must be the exact nb of bytes expected (see ZSTD_nextSrcSizeToDecompress()) * @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity) * or an error code, which can be tested using ZSTD_isError() */ - public static nuint ZSTD_decompressContinue(ZSTD_DCtx_s* dctx, void* dst, nuint dstCapacity, void* src, nuint srcSize) + public static nuint ZSTD_decompressContinue( + ZSTD_DCtx_s* dctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize + ) { if (srcSize != ZSTD_nextSrcSizeToDecompressWithInputSize(dctx, srcSize)) { @@ -1330,8 +1538,13 @@ public static nuint ZSTD_decompressContinue(ZSTD_DCtx_s* dctx, void* dst, nuint case ZSTD_dStage.ZSTDds_decodeFrameHeader: assert(src != null); memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, (uint)srcSize); + { - nuint err_code = ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize); + nuint err_code = ZSTD_decodeFrameHeader( + dctx, + dctx->headerBuffer, + dctx->headerSize + ); if (ERR_isError(err_code)) { return err_code; @@ -1342,133 +1555,156 @@ public static nuint ZSTD_decompressContinue(ZSTD_DCtx_s* dctx, void* dst, nuint dctx->stage = ZSTD_dStage.ZSTDds_decodeBlockHeader; return 0; case ZSTD_dStage.ZSTDds_decodeBlockHeader: + { + blockProperties_t bp; + nuint cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp); + if (ERR_isError(cBlockSize)) + return cBlockSize; + if (cBlockSize > dctx->fParams.blockSizeMax) { - blockProperties_t bp; - nuint cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp); - if (ERR_isError(cBlockSize)) - return cBlockSize; - if (cBlockSize > dctx->fParams.blockSizeMax) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - } + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); + } - dctx->expected = cBlockSize; - dctx->bType = bp.blockType; - dctx->rleSize = bp.origSize; - if (cBlockSize != 0) - { - dctx->stage = bp.lastBlock != 0 ? ZSTD_dStage.ZSTDds_decompressLastBlock : ZSTD_dStage.ZSTDds_decompressBlock; - return 0; - } + dctx->expected = cBlockSize; + dctx->bType = bp.blockType; + dctx->rleSize = bp.origSize; + if (cBlockSize != 0) + { + dctx->stage = + bp.lastBlock != 0 + ? ZSTD_dStage.ZSTDds_decompressLastBlock + : ZSTD_dStage.ZSTDds_decompressBlock; + return 0; + } - if (bp.lastBlock != 0) + if (bp.lastBlock != 0) + { + if (dctx->fParams.checksumFlag != 0) { - if (dctx->fParams.checksumFlag != 0) - { - dctx->expected = 4; - dctx->stage = ZSTD_dStage.ZSTDds_checkChecksum; - } - else - { - dctx->expected = 0; - dctx->stage = ZSTD_dStage.ZSTDds_getFrameHeaderSize; - } + dctx->expected = 4; + dctx->stage = ZSTD_dStage.ZSTDds_checkChecksum; } else { - dctx->expected = ZSTD_blockHeaderSize; - dctx->stage = ZSTD_dStage.ZSTDds_decodeBlockHeader; + dctx->expected = 0; + dctx->stage = ZSTD_dStage.ZSTDds_getFrameHeaderSize; } - - return 0; + } + else + { + dctx->expected = ZSTD_blockHeaderSize; + dctx->stage = ZSTD_dStage.ZSTDds_decodeBlockHeader; } + return 0; + } + case ZSTD_dStage.ZSTDds_decompressLastBlock: case ZSTD_dStage.ZSTDds_decompressBlock: + { + nuint rSize; + switch (dctx->bType) { - nuint rSize; - switch (dctx->bType) - { - case blockType_e.bt_compressed: - assert(dctx->isFrameDecompression == 1); - rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, streaming_operation.is_streaming); - dctx->expected = 0; - break; - case blockType_e.bt_raw: - assert(srcSize <= dctx->expected); - rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize); + case blockType_e.bt_compressed: + assert(dctx->isFrameDecompression == 1); + rSize = ZSTD_decompressBlock_internal( + dctx, + dst, + dstCapacity, + src, + srcSize, + streaming_operation.is_streaming + ); + dctx->expected = 0; + break; + case blockType_e.bt_raw: + assert(srcSize <= dctx->expected); + rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize); + + { + nuint err_code = rSize; + if (ERR_isError(err_code)) { - nuint err_code = rSize; - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } + } - assert(rSize == srcSize); - dctx->expected -= rSize; - break; - case blockType_e.bt_rle: - rSize = ZSTD_setRleBlock(dst, dstCapacity, *(byte*)src, dctx->rleSize); - dctx->expected = 0; - break; - case blockType_e.bt_reserved: - default: - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - } + assert(rSize == srcSize); + dctx->expected -= rSize; + break; + case blockType_e.bt_rle: + rSize = ZSTD_setRleBlock(dst, dstCapacity, *(byte*)src, dctx->rleSize); + dctx->expected = 0; + break; + case blockType_e.bt_reserved: + default: + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); + } + { + nuint err_code = rSize; + if (ERR_isError(err_code)) { - nuint err_code = rSize; - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } + } - if (rSize > dctx->fParams.blockSizeMax) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - } + if (rSize > dctx->fParams.blockSizeMax) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); + } - dctx->decodedSize += rSize; - if (dctx->validateChecksum != 0) - ZSTD_XXH64_update(&dctx->xxhState, dst, rSize); - dctx->previousDstEnd = (sbyte*)dst + rSize; - if (dctx->expected > 0) + dctx->decodedSize += rSize; + if (dctx->validateChecksum != 0) + ZSTD_XXH64_update(&dctx->xxhState, dst, rSize); + dctx->previousDstEnd = (sbyte*)dst + rSize; + if (dctx->expected > 0) + { + return rSize; + } + + if (dctx->stage == ZSTD_dStage.ZSTDds_decompressLastBlock) + { + if ( + dctx->fParams.frameContentSize != unchecked(0UL - 1) + && dctx->decodedSize != dctx->fParams.frameContentSize + ) { - return rSize; + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); } - if (dctx->stage == ZSTD_dStage.ZSTDds_decompressLastBlock) + if (dctx->fParams.checksumFlag != 0) { - if (dctx->fParams.frameContentSize != unchecked(0UL - 1) && dctx->decodedSize != dctx->fParams.frameContentSize) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - } - - if (dctx->fParams.checksumFlag != 0) - { - dctx->expected = 4; - dctx->stage = ZSTD_dStage.ZSTDds_checkChecksum; - } - else - { - ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, 1); - dctx->expected = 0; - dctx->stage = ZSTD_dStage.ZSTDds_getFrameHeaderSize; - } + dctx->expected = 4; + dctx->stage = ZSTD_dStage.ZSTDds_checkChecksum; } else { - dctx->stage = ZSTD_dStage.ZSTDds_decodeBlockHeader; - dctx->expected = ZSTD_blockHeaderSize; + ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, 1); + dctx->expected = 0; + dctx->stage = ZSTD_dStage.ZSTDds_getFrameHeaderSize; } - - return rSize; + } + else + { + dctx->stage = ZSTD_dStage.ZSTDds_decodeBlockHeader; + dctx->expected = ZSTD_blockHeaderSize; } + return rSize; + } + case ZSTD_dStage.ZSTDds_checkChecksum: assert(srcSize == 4); + { if (dctx->validateChecksum != 0) { @@ -1476,7 +1712,9 @@ public static nuint ZSTD_decompressContinue(ZSTD_DCtx_s* dctx, void* dst, nuint uint check32 = MEM_readLE32(src); if (check32 != h32) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_checksum_wrong)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_checksum_wrong) + ); } } @@ -1507,7 +1745,8 @@ public static nuint ZSTD_decompressContinue(ZSTD_DCtx_s* dctx, void* dst, nuint private static nuint ZSTD_refDictContent(ZSTD_DCtx_s* dctx, void* dict, nuint dictSize) { dctx->dictEnd = dctx->previousDstEnd; - dctx->virtualStart = (sbyte*)dict - ((sbyte*)dctx->previousDstEnd - (sbyte*)dctx->prefixStart); + dctx->virtualStart = + (sbyte*)dict - ((sbyte*)dctx->previousDstEnd - (sbyte*)dctx->prefixStart); dctx->prefixStart = dict; dctx->previousDstEnd = (sbyte*)dict + dictSize; return 0; @@ -1516,7 +1755,11 @@ private static nuint ZSTD_refDictContent(ZSTD_DCtx_s* dctx, void* dict, nuint di /*! ZSTD_loadDEntropy() : * dict : must point at beginning of a valid zstd dictionary. * @return : size of entropy tables read */ - private static nuint ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy, void* dict, nuint dictSize) + private static nuint ZSTD_loadDEntropy( + ZSTD_entropyDTables_t* entropy, + void* dict, + nuint dictSize + ) { byte* dictPtr = (byte*)dict; byte* dictEnd = dictPtr + dictSize; @@ -1530,8 +1773,19 @@ private static nuint ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy, void* dic { /* use fse tables as temporary workspace; implies fse tables are grouped together */ void* workspace = &entropy->LLTable; - nuint workspaceSize = (nuint)(sizeof(ZSTD_seqSymbol) * 513 + sizeof(ZSTD_seqSymbol) * 257 + sizeof(ZSTD_seqSymbol) * 513); - nuint hSize = HUF_readDTableX2_wksp(entropy->hufTable, dictPtr, (nuint)(dictEnd - dictPtr), workspace, workspaceSize, 0); + nuint workspaceSize = (nuint)( + sizeof(ZSTD_seqSymbol) * 513 + + sizeof(ZSTD_seqSymbol) * 257 + + sizeof(ZSTD_seqSymbol) * 513 + ); + nuint hSize = HUF_readDTableX2_wksp( + entropy->hufTable, + dictPtr, + (nuint)(dictEnd - dictPtr), + workspace, + workspaceSize, + 0 + ); if (ERR_isError(hSize)) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); @@ -1542,8 +1796,15 @@ private static nuint ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy, void* dic { short* offcodeNCount = stackalloc short[32]; - uint offcodeMaxValue = 31, offcodeLog; - nuint offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, (nuint)(dictEnd - dictPtr)); + uint offcodeMaxValue = 31, + offcodeLog; + nuint offcodeHeaderSize = FSE_readNCount( + offcodeNCount, + &offcodeMaxValue, + &offcodeLog, + dictPtr, + (nuint)(dictEnd - dictPtr) + ); if (ERR_isError(offcodeHeaderSize)) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); @@ -1559,14 +1820,31 @@ private static nuint ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy, void* dic return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); } - ZSTD_buildFSETable(&entropy->OFTable.e0, offcodeNCount, offcodeMaxValue, OF_base, OF_bits, offcodeLog, entropy->workspace, sizeof(uint) * 157, 0); + ZSTD_buildFSETable( + &entropy->OFTable.e0, + offcodeNCount, + offcodeMaxValue, + OF_base, + OF_bits, + offcodeLog, + entropy->workspace, + sizeof(uint) * 157, + 0 + ); dictPtr += offcodeHeaderSize; } { short* matchlengthNCount = stackalloc short[53]; - uint matchlengthMaxValue = 52, matchlengthLog; - nuint matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, (nuint)(dictEnd - dictPtr)); + uint matchlengthMaxValue = 52, + matchlengthLog; + nuint matchlengthHeaderSize = FSE_readNCount( + matchlengthNCount, + &matchlengthMaxValue, + &matchlengthLog, + dictPtr, + (nuint)(dictEnd - dictPtr) + ); if (ERR_isError(matchlengthHeaderSize)) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); @@ -1582,14 +1860,31 @@ private static nuint ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy, void* dic return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); } - ZSTD_buildFSETable(&entropy->MLTable.e0, matchlengthNCount, matchlengthMaxValue, ML_base, ML_bits, matchlengthLog, entropy->workspace, sizeof(uint) * 157, 0); + ZSTD_buildFSETable( + &entropy->MLTable.e0, + matchlengthNCount, + matchlengthMaxValue, + ML_base, + ML_bits, + matchlengthLog, + entropy->workspace, + sizeof(uint) * 157, + 0 + ); dictPtr += matchlengthHeaderSize; } { short* litlengthNCount = stackalloc short[36]; - uint litlengthMaxValue = 35, litlengthLog; - nuint litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, (nuint)(dictEnd - dictPtr)); + uint litlengthMaxValue = 35, + litlengthLog; + nuint litlengthHeaderSize = FSE_readNCount( + litlengthNCount, + &litlengthMaxValue, + &litlengthLog, + dictPtr, + (nuint)(dictEnd - dictPtr) + ); if (ERR_isError(litlengthHeaderSize)) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); @@ -1605,7 +1900,17 @@ private static nuint ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy, void* dic return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); } - ZSTD_buildFSETable(&entropy->LLTable.e0, litlengthNCount, litlengthMaxValue, LL_base, LL_bits, litlengthLog, entropy->workspace, sizeof(uint) * 157, 0); + ZSTD_buildFSETable( + &entropy->LLTable.e0, + litlengthNCount, + litlengthMaxValue, + LL_base, + LL_bits, + litlengthLog, + entropy->workspace, + sizeof(uint) * 157, + 0 + ); dictPtr += litlengthHeaderSize; } @@ -1623,7 +1928,9 @@ private static nuint ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy, void* dic dictPtr += 4; if (rep == 0 || rep > dictContentSize) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted) + ); } entropy->rep[i] = rep; @@ -1633,7 +1940,11 @@ private static nuint ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy, void* dic return (nuint)(dictPtr - (byte*)dict); } - private static nuint ZSTD_decompress_insertDictionary(ZSTD_DCtx_s* dctx, void* dict, nuint dictSize) + private static nuint ZSTD_decompress_insertDictionary( + ZSTD_DCtx_s* dctx, + void* dict, + nuint dictSize + ) { if (dictSize < 8) return ZSTD_refDictContent(dctx, dict, dictSize); @@ -1685,7 +1996,11 @@ public static nuint ZSTD_decompressBegin(ZSTD_DCtx_s* dctx) return 0; } - public static nuint ZSTD_decompressBegin_usingDict(ZSTD_DCtx_s* dctx, void* dict, nuint dictSize) + public static nuint ZSTD_decompressBegin_usingDict( + ZSTD_DCtx_s* dctx, + void* dict, + nuint dictSize + ) { { nuint err_code = ZSTD_decompressBegin(dctx); @@ -1770,7 +2085,7 @@ public static uint ZSTD_getDictID_fromFrame(void* src, nuint srcSize) dictID = 0, checksumFlag = 0, _reserved1 = 0, - _reserved2 = 0 + _reserved2 = 0, }; nuint hError = ZSTD_getFrameHeader(&zfp, src, srcSize); if (ERR_isError(hError)) @@ -1781,7 +2096,14 @@ public static uint ZSTD_getDictID_fromFrame(void* src, nuint srcSize) /*! ZSTD_decompress_usingDDict() : * Decompression using a pre-digested Dictionary * Use dictionary without significant overhead. */ - public static nuint ZSTD_decompress_usingDDict(ZSTD_DCtx_s* dctx, void* dst, nuint dstCapacity, void* src, nuint srcSize, ZSTD_DDict_s* ddict) + public static nuint ZSTD_decompress_usingDDict( + ZSTD_DCtx_s* dctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + ZSTD_DDict_s* ddict + ) { return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, null, 0, ddict); } @@ -1825,7 +2147,13 @@ public static nuint ZSTD_DStreamOutSize() * but gives direct control over * how to load the dictionary (by copy ? by reference ?) * and how to interpret it (automatic ? force raw mode ? full mode only ?). */ - public static nuint ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx_s* dctx, void* dict, nuint dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType) + public static nuint ZSTD_DCtx_loadDictionary_advanced( + ZSTD_DCtx_s* dctx, + void* dict, + nuint dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType + ) { if (dctx->streamStage != ZSTD_dStreamStage.zdss_init) { @@ -1835,7 +2163,13 @@ public static nuint ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx_s* dctx, void* d ZSTD_clearDict(dctx); if (dict != null && dictSize != 0) { - dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem); + dctx->ddictLocal = ZSTD_createDDict_advanced( + dict, + dictSize, + dictLoadMethod, + dictContentType, + dctx->customMem + ); if (dctx->ddictLocal == null) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); @@ -1853,9 +2187,19 @@ public static nuint ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx_s* dctx, void* d * but references `dict` content instead of copying it into `dctx`. * This saves memory if `dict` remains around., * However, it's imperative that `dict` remains accessible (and unmodified) while being used, so it must outlive decompression. */ - public static nuint ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx_s* dctx, void* dict, nuint dictSize) + public static nuint ZSTD_DCtx_loadDictionary_byReference( + ZSTD_DCtx_s* dctx, + void* dict, + nuint dictSize + ) { - return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, ZSTD_dictContentType_e.ZSTD_dct_auto); + return ZSTD_DCtx_loadDictionary_advanced( + dctx, + dict, + dictSize, + ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, + ZSTD_dictContentType_e.ZSTD_dct_auto + ); } /*! ZSTD_DCtx_loadDictionary() : Requires v1.4.0+ @@ -1875,16 +2219,33 @@ public static nuint ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx_s* dctx, void */ public static nuint ZSTD_DCtx_loadDictionary(ZSTD_DCtx_s* dctx, void* dict, nuint dictSize) { - return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dictLoadMethod_e.ZSTD_dlm_byCopy, ZSTD_dictContentType_e.ZSTD_dct_auto); + return ZSTD_DCtx_loadDictionary_advanced( + dctx, + dict, + dictSize, + ZSTD_dictLoadMethod_e.ZSTD_dlm_byCopy, + ZSTD_dictContentType_e.ZSTD_dct_auto + ); } /*! ZSTD_DCtx_refPrefix_advanced() : * Same as ZSTD_DCtx_refPrefix(), but gives finer control over * how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */ - public static nuint ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx_s* dctx, void* prefix, nuint prefixSize, ZSTD_dictContentType_e dictContentType) - { - { - nuint err_code = ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, dictContentType); + public static nuint ZSTD_DCtx_refPrefix_advanced( + ZSTD_DCtx_s* dctx, + void* prefix, + nuint prefixSize, + ZSTD_dictContentType_e dictContentType + ) + { + { + nuint err_code = ZSTD_DCtx_loadDictionary_advanced( + dctx, + prefix, + prefixSize, + ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, + dictContentType + ); if (ERR_isError(err_code)) { return err_code; @@ -1913,7 +2274,12 @@ public static nuint ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx_s* dctx, void* prefix */ public static nuint ZSTD_DCtx_refPrefix(ZSTD_DCtx_s* dctx, void* prefix, nuint prefixSize) { - return ZSTD_DCtx_refPrefix_advanced(dctx, prefix, prefixSize, ZSTD_dictContentType_e.ZSTD_dct_rawContent); + return ZSTD_DCtx_refPrefix_advanced( + dctx, + prefix, + prefixSize, + ZSTD_dictContentType_e.ZSTD_dct_rawContent + ); } /* ZSTD_initDStream_usingDict() : @@ -2038,13 +2404,19 @@ public static nuint ZSTD_DCtx_refDDict(ZSTD_DCtx_s* dctx, ZSTD_DDict_s* ddict) dctx->ddictSet = ZSTD_createDDictHashSet(dctx->customMem); if (dctx->ddictSet == null) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) + ); } } assert(dctx->staticSize == 0); { - nuint err_code = ZSTD_DDictHashSet_addDDict(dctx->ddictSet, ddict, dctx->customMem); + nuint err_code = ZSTD_DDictHashSet_addDDict( + dctx->ddictSet, + ddict, + dctx->customMem + ); if (ERR_isError(err_code)) { return err_code; @@ -2091,7 +2463,11 @@ public static nuint ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx_s* dctx, nuint maxWindo * @return : 0, or an error code (which can be tested using ZSTD_isError()). */ public static nuint ZSTD_DCtx_setFormat(ZSTD_DCtx_s* dctx, ZSTD_format_e format) { - return ZSTD_DCtx_setParameter(dctx, ZSTD_dParameter.ZSTD_d_experimentalParam1, (int)format); + return ZSTD_DCtx_setParameter( + dctx, + ZSTD_dParameter.ZSTD_d_experimentalParam1, + (int)format + ); } /*! ZSTD_dParam_getBounds() : @@ -2107,7 +2483,7 @@ public static ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam) { error = 0, lowerBound = 0, - upperBound = 0 + upperBound = 0, }; switch (dParam) { @@ -2143,7 +2519,9 @@ public static ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam) break; } - bounds.error = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported)); + bounds.error = unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported) + ); return bounds; } @@ -2167,7 +2545,11 @@ private static int ZSTD_dParam_withinBounds(ZSTD_dParameter dParam, int value) * and store it into int* value. * @return : 0, or an error code (which can be tested with ZSTD_isError()). */ - public static nuint ZSTD_DCtx_getParameter(ZSTD_DCtx_s* dctx, ZSTD_dParameter param, int* value) + public static nuint ZSTD_DCtx_getParameter( + ZSTD_DCtx_s* dctx, + ZSTD_dParameter param, + int* value + ) { switch (param) { @@ -2206,7 +2588,11 @@ public static nuint ZSTD_DCtx_getParameter(ZSTD_DCtx_s* dctx, ZSTD_dParameter pa * Setting a parameter is only possible during frame initialization (before starting decompression). * @return : 0, or an error code (which can be tested using ZSTD_isError()). */ - public static nuint ZSTD_DCtx_setParameter(ZSTD_DCtx_s* dctx, ZSTD_dParameter dParam, int value) + public static nuint ZSTD_DCtx_setParameter( + ZSTD_DCtx_s* dctx, + ZSTD_dParameter dParam, + int value + ) { if (dctx->streamStage != ZSTD_dStreamStage.zdss_init) { @@ -2218,10 +2604,16 @@ public static nuint ZSTD_DCtx_setParameter(ZSTD_DCtx_s* dctx, ZSTD_dParameter dP case ZSTD_dParameter.ZSTD_d_windowLogMax: if (value == 0) value = 27; + { - if (ZSTD_dParam_withinBounds(ZSTD_dParameter.ZSTD_d_windowLogMax, value) == 0) + if ( + ZSTD_dParam_withinBounds(ZSTD_dParameter.ZSTD_d_windowLogMax, value) + == 0 + ) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } } @@ -2229,9 +2621,16 @@ public static nuint ZSTD_DCtx_setParameter(ZSTD_DCtx_s* dctx, ZSTD_dParameter dP return 0; case ZSTD_dParameter.ZSTD_d_experimentalParam1: { - if (ZSTD_dParam_withinBounds(ZSTD_dParameter.ZSTD_d_experimentalParam1, value) == 0) + if ( + ZSTD_dParam_withinBounds( + ZSTD_dParameter.ZSTD_d_experimentalParam1, + value + ) == 0 + ) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } } @@ -2239,9 +2638,16 @@ public static nuint ZSTD_DCtx_setParameter(ZSTD_DCtx_s* dctx, ZSTD_dParameter dP return 0; case ZSTD_dParameter.ZSTD_d_experimentalParam2: { - if (ZSTD_dParam_withinBounds(ZSTD_dParameter.ZSTD_d_experimentalParam2, value) == 0) + if ( + ZSTD_dParam_withinBounds( + ZSTD_dParameter.ZSTD_d_experimentalParam2, + value + ) == 0 + ) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } } @@ -2249,9 +2655,16 @@ public static nuint ZSTD_DCtx_setParameter(ZSTD_DCtx_s* dctx, ZSTD_dParameter dP return 0; case ZSTD_dParameter.ZSTD_d_experimentalParam3: { - if (ZSTD_dParam_withinBounds(ZSTD_dParameter.ZSTD_d_experimentalParam3, value) == 0) + if ( + ZSTD_dParam_withinBounds( + ZSTD_dParameter.ZSTD_d_experimentalParam3, + value + ) == 0 + ) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } } @@ -2259,24 +2672,40 @@ public static nuint ZSTD_DCtx_setParameter(ZSTD_DCtx_s* dctx, ZSTD_dParameter dP return 0; case ZSTD_dParameter.ZSTD_d_experimentalParam4: { - if (ZSTD_dParam_withinBounds(ZSTD_dParameter.ZSTD_d_experimentalParam4, value) == 0) + if ( + ZSTD_dParam_withinBounds( + ZSTD_dParameter.ZSTD_d_experimentalParam4, + value + ) == 0 + ) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } } if (dctx->staticSize != 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported) + ); } dctx->refMultipleDDicts = (ZSTD_refMultipleDDicts_e)value; return 0; case ZSTD_dParameter.ZSTD_d_experimentalParam5: { - if (ZSTD_dParam_withinBounds(ZSTD_dParameter.ZSTD_d_experimentalParam5, value) == 0) + if ( + ZSTD_dParam_withinBounds( + ZSTD_dParameter.ZSTD_d_experimentalParam5, + value + ) == 0 + ) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } } @@ -2285,9 +2714,16 @@ public static nuint ZSTD_DCtx_setParameter(ZSTD_DCtx_s* dctx, ZSTD_dParameter dP case ZSTD_dParameter.ZSTD_d_experimentalParam6: if (value != 0) { - if (ZSTD_dParam_withinBounds(ZSTD_dParameter.ZSTD_d_experimentalParam6, value) == 0) + if ( + ZSTD_dParam_withinBounds( + ZSTD_dParameter.ZSTD_d_experimentalParam6, + value + ) == 0 + ) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } } @@ -2308,14 +2744,20 @@ public static nuint ZSTD_DCtx_setParameter(ZSTD_DCtx_s* dctx, ZSTD_dParameter dP */ public static nuint ZSTD_DCtx_reset(ZSTD_DCtx_s* dctx, ZSTD_ResetDirective reset) { - if (reset == ZSTD_ResetDirective.ZSTD_reset_session_only || reset == ZSTD_ResetDirective.ZSTD_reset_session_and_parameters) + if ( + reset == ZSTD_ResetDirective.ZSTD_reset_session_only + || reset == ZSTD_ResetDirective.ZSTD_reset_session_and_parameters + ) { dctx->streamStage = ZSTD_dStreamStage.zdss_init; dctx->noForwardProgress = 0; dctx->isFrameDecompression = 1; } - if (reset == ZSTD_ResetDirective.ZSTD_reset_parameters || reset == ZSTD_ResetDirective.ZSTD_reset_session_and_parameters) + if ( + reset == ZSTD_ResetDirective.ZSTD_reset_parameters + || reset == ZSTD_ResetDirective.ZSTD_reset_session_and_parameters + ) { if (dctx->streamStage != ZSTD_dStreamStage.zdss_init) { @@ -2334,9 +2776,16 @@ public static nuint ZSTD_sizeof_DStream(ZSTD_DCtx_s* dctx) return ZSTD_sizeof_DCtx(dctx); } - private static nuint ZSTD_decodingBufferSize_internal(ulong windowSize, ulong frameContentSize, nuint blockSizeMax) + private static nuint ZSTD_decodingBufferSize_internal( + ulong windowSize, + ulong frameContentSize, + nuint blockSizeMax + ) { - nuint blockSize = (nuint)(windowSize < 1 << 17 ? windowSize : 1 << 17) < blockSizeMax ? (nuint)(windowSize < 1 << 17 ? windowSize : 1 << 17) : blockSizeMax; + nuint blockSize = + (nuint)(windowSize < 1 << 17 ? windowSize : 1 << 17) < blockSizeMax + ? (nuint)(windowSize < 1 << 17 ? windowSize : 1 << 17) + : blockSizeMax; /* We need blockSize + WILDCOPY_OVERLENGTH worth of buffer so that if a block * ends at windowSize + WILDCOPY_OVERLENGTH + 1 bytes, we can start writing * the block at the beginning of the output buffer, and maintain a full window. @@ -2349,7 +2798,9 @@ private static nuint ZSTD_decodingBufferSize_internal(ulong windowSize, ulong fr nuint minRBSize = (nuint)neededSize; if (minRBSize != neededSize) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_windowTooLarge)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_windowTooLarge) + ); } return minRBSize; @@ -2385,19 +2836,31 @@ public static nuint ZSTD_estimateDStreamSize_fromFrame(void* src, nuint srcSize) if (zfh.windowSize > windowSizeMax) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_windowTooLarge)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_windowTooLarge) + ); } return ZSTD_estimateDStreamSize((nuint)zfh.windowSize); } /* ***** Decompression ***** */ - private static int ZSTD_DCtx_isOverflow(ZSTD_DCtx_s* zds, nuint neededInBuffSize, nuint neededOutBuffSize) + private static int ZSTD_DCtx_isOverflow( + ZSTD_DCtx_s* zds, + nuint neededInBuffSize, + nuint neededOutBuffSize + ) { - return zds->inBuffSize + zds->outBuffSize >= (neededInBuffSize + neededOutBuffSize) * 3 ? 1 : 0; + return zds->inBuffSize + zds->outBuffSize >= (neededInBuffSize + neededOutBuffSize) * 3 + ? 1 + : 0; } - private static void ZSTD_DCtx_updateOversizedDuration(ZSTD_DCtx_s* zds, nuint neededInBuffSize, nuint neededOutBuffSize) + private static void ZSTD_DCtx_updateOversizedDuration( + ZSTD_DCtx_s* zds, + nuint neededInBuffSize, + nuint neededOutBuffSize + ) { if (ZSTD_DCtx_isOverflow(zds, neededInBuffSize, neededOutBuffSize) != 0) zds->oversizedDuration++; @@ -2418,7 +2881,11 @@ private static nuint ZSTD_checkOutBuffer(ZSTD_DCtx_s* zds, ZSTD_outBuffer_s* out return 0; if (zds->streamStage == ZSTD_dStreamStage.zdss_init) return 0; - if (expect.dst == output->dst && expect.pos == output->pos && expect.size == output->size) + if ( + expect.dst == output->dst + && expect.pos == output->pos + && expect.size == output->size + ) return 0; return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstBuffer_wrong)); } @@ -2428,13 +2895,25 @@ private static nuint ZSTD_checkOutBuffer(ZSTD_DCtx_s* zds, ZSTD_outBuffer_s* out * used both when reading directly from the ZSTD_inBuffer, and in buffered input mode. * NOTE: You must break after calling this function since the streamStage is modified. */ - private static nuint ZSTD_decompressContinueStream(ZSTD_DCtx_s* zds, sbyte** op, sbyte* oend, void* src, nuint srcSize) + private static nuint ZSTD_decompressContinueStream( + ZSTD_DCtx_s* zds, + sbyte** op, + sbyte* oend, + void* src, + nuint srcSize + ) { int isSkipFrame = ZSTD_isSkipFrame(zds); if (zds->outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered) { nuint dstSize = isSkipFrame != 0 ? 0 : zds->outBuffSize - zds->outStart; - nuint decodedSize = ZSTD_decompressContinue(zds, zds->outBuff + zds->outStart, dstSize, src, srcSize); + nuint decodedSize = ZSTD_decompressContinue( + zds, + zds->outBuff + zds->outStart, + dstSize, + src, + srcSize + ); { nuint err_code = decodedSize; if (ERR_isError(err_code)) @@ -2497,7 +2976,11 @@ private static nuint ZSTD_decompressContinueStream(ZSTD_DCtx_s* zds, sbyte** op, * which can be done explicitly (`ZSTD_DCtx_reset()`), * or is implied for operations starting some new decompression job (`ZSTD_initDStream`, `ZSTD_decompressDCtx()`, `ZSTD_decompress_usingDict()`) */ - public static nuint ZSTD_decompressStream(ZSTD_DCtx_s* zds, ZSTD_outBuffer_s* output, ZSTD_inBuffer_s* input) + public static nuint ZSTD_decompressStream( + ZSTD_DCtx_s* zds, + ZSTD_outBuffer_s* output, + ZSTD_inBuffer_s* input + ) { sbyte* src = (sbyte*)input->src; sbyte* istart = input->pos != 0 ? src + input->pos : src; @@ -2539,7 +3022,12 @@ public static nuint ZSTD_decompressStream(ZSTD_DCtx_s* zds, ZSTD_outBuffer_s* ou goto case ZSTD_dStreamStage.zdss_loadHeader; case ZSTD_dStreamStage.zdss_loadHeader: { - nuint hSize = ZSTD_getFrameHeader_advanced(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format); + nuint hSize = ZSTD_getFrameHeader_advanced( + &zds->fParams, + zds->headerBuffer, + zds->lhSize, + zds->format + ); if (zds->refMultipleDDicts != default && zds->ddictSet != null) { ZSTD_DCtx_selectFrameDDict(zds); @@ -2560,21 +3048,42 @@ public static nuint ZSTD_decompressStream(ZSTD_DCtx_s* zds, ZSTD_outBuffer_s* ou { if (remainingInput > 0) { - memcpy(zds->headerBuffer + zds->lhSize, ip, (uint)remainingInput); + memcpy( + zds->headerBuffer + zds->lhSize, + ip, + (uint)remainingInput + ); zds->lhSize += remainingInput; } input->pos = input->size; { /* check first few bytes */ - nuint err_code = ZSTD_getFrameHeader_advanced(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format); + nuint err_code = ZSTD_getFrameHeader_advanced( + &zds->fParams, + zds->headerBuffer, + zds->lhSize, + zds->format + ); if (ERR_isError(err_code)) { return err_code; } } - return ((nuint)(zds->format == ZSTD_format_e.ZSTD_f_zstd1 ? 6 : 2) > hSize ? (nuint)(zds->format == ZSTD_format_e.ZSTD_f_zstd1 ? 6 : 2) : hSize) - zds->lhSize + ZSTD_blockHeaderSize; + return ( + (nuint)( + zds->format == ZSTD_format_e.ZSTD_f_zstd1 ? 6 : 2 + ) > hSize + ? (nuint)( + zds->format == ZSTD_format_e.ZSTD_f_zstd1 + ? 6 + : 2 + ) + : hSize + ) + - zds->lhSize + + ZSTD_blockHeaderSize; } assert(ip != null); @@ -2585,13 +3094,28 @@ public static nuint ZSTD_decompressStream(ZSTD_DCtx_s* zds, ZSTD_outBuffer_s* ou } } - if (zds->fParams.frameContentSize != unchecked(0UL - 1) && zds->fParams.frameType != ZSTD_frameType_e.ZSTD_skippableFrame && (nuint)(oend - op) >= zds->fParams.frameContentSize) + if ( + zds->fParams.frameContentSize != unchecked(0UL - 1) + && zds->fParams.frameType != ZSTD_frameType_e.ZSTD_skippableFrame + && (nuint)(oend - op) >= zds->fParams.frameContentSize + ) { - nuint cSize = ZSTD_findFrameCompressedSize_advanced(istart, (nuint)(iend - istart), zds->format); + nuint cSize = ZSTD_findFrameCompressedSize_advanced( + istart, + (nuint)(iend - istart), + zds->format + ); if (cSize <= (nuint)(iend - istart)) { /* shortcut : using single-pass mode */ - nuint decompressedSize = ZSTD_decompress_usingDDict(zds, op, (nuint)(oend - op), istart, cSize, ZSTD_getDDict(zds)); + nuint decompressedSize = ZSTD_decompress_usingDDict( + zds, + op, + (nuint)(oend - op), + istart, + cSize, + ZSTD_getDDict(zds) + ); if (ERR_isError(decompressedSize)) return decompressedSize; assert(istart != null); @@ -2604,20 +3128,33 @@ public static nuint ZSTD_decompressStream(ZSTD_DCtx_s* zds, ZSTD_outBuffer_s* ou } } - if (zds->outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable && zds->fParams.frameType != ZSTD_frameType_e.ZSTD_skippableFrame && zds->fParams.frameContentSize != unchecked(0UL - 1) && (nuint)(oend - op) < zds->fParams.frameContentSize) + if ( + zds->outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable + && zds->fParams.frameType != ZSTD_frameType_e.ZSTD_skippableFrame + && zds->fParams.frameContentSize != unchecked(0UL - 1) + && (nuint)(oend - op) < zds->fParams.frameContentSize + ) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) + ); } { - nuint err_code = ZSTD_decompressBegin_usingDDict(zds, ZSTD_getDDict(zds)); + nuint err_code = ZSTD_decompressBegin_usingDDict( + zds, + ZSTD_getDDict(zds) + ); if (ERR_isError(err_code)) { return err_code; } } - if (zds->format == ZSTD_format_e.ZSTD_f_zstd1 && (MEM_readLE32(zds->headerBuffer) & 0xFFFFFFF0) == 0x184D2A50) + if ( + zds->format == ZSTD_format_e.ZSTD_f_zstd1 + && (MEM_readLE32(zds->headerBuffer) & 0xFFFFFFF0) == 0x184D2A50 + ) { zds->expected = MEM_readLE32(zds->headerBuffer + 4); zds->stage = ZSTD_dStage.ZSTDds_skipFrame; @@ -2625,7 +3162,11 @@ public static nuint ZSTD_decompressStream(ZSTD_DCtx_s* zds, ZSTD_outBuffer_s* ou else { { - nuint err_code = ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize); + nuint err_code = ZSTD_decodeFrameHeader( + zds, + zds->headerBuffer, + zds->lhSize + ); if (ERR_isError(err_code)) { return err_code; @@ -2636,21 +3177,46 @@ public static nuint ZSTD_decompressStream(ZSTD_DCtx_s* zds, ZSTD_outBuffer_s* ou zds->stage = ZSTD_dStage.ZSTDds_decodeBlockHeader; } - zds->fParams.windowSize = zds->fParams.windowSize > 1U << 10 ? zds->fParams.windowSize : 1U << 10; + zds->fParams.windowSize = + zds->fParams.windowSize > 1U << 10 ? zds->fParams.windowSize : 1U << 10; if (zds->fParams.windowSize > zds->maxWindowSize) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_windowTooLarge)); + return unchecked( + (nuint)( + -(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_windowTooLarge + ) + ); } if (zds->maxBlockSizeParam != 0) - zds->fParams.blockSizeMax = zds->fParams.blockSizeMax < (uint)zds->maxBlockSizeParam ? zds->fParams.blockSizeMax : (uint)zds->maxBlockSizeParam; + zds->fParams.blockSizeMax = + zds->fParams.blockSizeMax < (uint)zds->maxBlockSizeParam + ? zds->fParams.blockSizeMax + : (uint)zds->maxBlockSizeParam; + { /* frame checksum */ - nuint neededInBuffSize = zds->fParams.blockSizeMax > 4 ? zds->fParams.blockSizeMax : 4; - nuint neededOutBuffSize = zds->outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered ? ZSTD_decodingBufferSize_internal(zds->fParams.windowSize, zds->fParams.frameContentSize, zds->fParams.blockSizeMax) : 0; - ZSTD_DCtx_updateOversizedDuration(zds, neededInBuffSize, neededOutBuffSize); + nuint neededInBuffSize = + zds->fParams.blockSizeMax > 4 ? zds->fParams.blockSizeMax : 4; + nuint neededOutBuffSize = + zds->outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered + ? ZSTD_decodingBufferSize_internal( + zds->fParams.windowSize, + zds->fParams.frameContentSize, + zds->fParams.blockSizeMax + ) + : 0; + ZSTD_DCtx_updateOversizedDuration( + zds, + neededInBuffSize, + neededOutBuffSize + ); { - int tooSmall = zds->inBuffSize < neededInBuffSize || zds->outBuffSize < neededOutBuffSize ? 1 : 0; + int tooSmall = + zds->inBuffSize < neededInBuffSize + || zds->outBuffSize < neededOutBuffSize + ? 1 + : 0; int tooLarge = ZSTD_DCtx_isOversizedTooLong(zds); if (tooSmall != 0 || tooLarge != 0) { @@ -2658,9 +3224,17 @@ public static nuint ZSTD_decompressStream(ZSTD_DCtx_s* zds, ZSTD_outBuffer_s* ou if (zds->staticSize != 0) { assert(zds->staticSize >= (nuint)sizeof(ZSTD_DCtx_s)); - if (bufferSize > zds->staticSize - (nuint)sizeof(ZSTD_DCtx_s)) + if ( + bufferSize + > zds->staticSize - (nuint)sizeof(ZSTD_DCtx_s) + ) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + return unchecked( + (nuint)( + -(int) + ZSTD_ErrorCode.ZSTD_error_memory_allocation + ) + ); } } else @@ -2668,10 +3242,18 @@ public static nuint ZSTD_decompressStream(ZSTD_DCtx_s* zds, ZSTD_outBuffer_s* ou ZSTD_customFree(zds->inBuff, zds->customMem); zds->inBuffSize = 0; zds->outBuffSize = 0; - zds->inBuff = (sbyte*)ZSTD_customMalloc(bufferSize, zds->customMem); + zds->inBuff = (sbyte*)ZSTD_customMalloc( + bufferSize, + zds->customMem + ); if (zds->inBuff == null) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + return unchecked( + (nuint)( + -(int) + ZSTD_ErrorCode.ZSTD_error_memory_allocation + ) + ); } } @@ -2686,7 +3268,10 @@ public static nuint ZSTD_decompressStream(ZSTD_DCtx_s* zds, ZSTD_outBuffer_s* ou goto case ZSTD_dStreamStage.zdss_read; case ZSTD_dStreamStage.zdss_read: { - nuint neededInSize = ZSTD_nextSrcSizeToDecompressWithInputSize(zds, (nuint)(iend - ip)); + nuint neededInSize = ZSTD_nextSrcSizeToDecompressWithInputSize( + zds, + (nuint)(iend - ip) + ); if (neededInSize == 0) { zds->streamStage = ZSTD_dStreamStage.zdss_init; @@ -2697,7 +3282,13 @@ public static nuint ZSTD_decompressStream(ZSTD_DCtx_s* zds, ZSTD_outBuffer_s* ou if ((nuint)(iend - ip) >= neededInSize) { { - nuint err_code = ZSTD_decompressContinueStream(zds, &op, oend, ip, neededInSize); + nuint err_code = ZSTD_decompressContinueStream( + zds, + &op, + oend, + ip, + neededInSize + ); if (ERR_isError(err_code)) { return err_code; @@ -2719,60 +3310,87 @@ public static nuint ZSTD_decompressStream(ZSTD_DCtx_s* zds, ZSTD_outBuffer_s* ou zds->streamStage = ZSTD_dStreamStage.zdss_load; goto case ZSTD_dStreamStage.zdss_load; case ZSTD_dStreamStage.zdss_load: + { + nuint neededInSize = ZSTD_nextSrcSizeToDecompress(zds); + nuint toLoad = neededInSize - zds->inPos; + int isSkipFrame = ZSTD_isSkipFrame(zds); + nuint loadedSize; + assert( + neededInSize + == ZSTD_nextSrcSizeToDecompressWithInputSize( + zds, + (nuint)(iend - ip) + ) + ); + if (isSkipFrame != 0) + { + loadedSize = toLoad < (nuint)(iend - ip) ? toLoad : (nuint)(iend - ip); + } + else { - nuint neededInSize = ZSTD_nextSrcSizeToDecompress(zds); - nuint toLoad = neededInSize - zds->inPos; - int isSkipFrame = ZSTD_isSkipFrame(zds); - nuint loadedSize; - assert(neededInSize == ZSTD_nextSrcSizeToDecompressWithInputSize(zds, (nuint)(iend - ip))); - if (isSkipFrame != 0) + if (toLoad > zds->inBuffSize - zds->inPos) { - loadedSize = toLoad < (nuint)(iend - ip) ? toLoad : (nuint)(iend - ip); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); } - else - { - if (toLoad > zds->inBuffSize - zds->inPos) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - } - loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, (nuint)(iend - ip)); - } + loadedSize = ZSTD_limitCopy( + zds->inBuff + zds->inPos, + toLoad, + ip, + (nuint)(iend - ip) + ); + } - if (loadedSize != 0) - { - ip += loadedSize; - zds->inPos += loadedSize; - } + if (loadedSize != 0) + { + ip += loadedSize; + zds->inPos += loadedSize; + } - if (loadedSize < toLoad) - { - someMoreWork = 0; - break; - } + if (loadedSize < toLoad) + { + someMoreWork = 0; + break; + } - zds->inPos = 0; + zds->inPos = 0; + { + nuint err_code = ZSTD_decompressContinueStream( + zds, + &op, + oend, + zds->inBuff, + neededInSize + ); + if (ERR_isError(err_code)) { - nuint err_code = ZSTD_decompressContinueStream(zds, &op, oend, zds->inBuff, neededInSize); - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } - - break; } + break; + } + case ZSTD_dStreamStage.zdss_flush: { nuint toFlushSize = zds->outEnd - zds->outStart; - nuint flushedSize = ZSTD_limitCopy(op, (nuint)(oend - op), zds->outBuff + zds->outStart, toFlushSize); + nuint flushedSize = ZSTD_limitCopy( + op, + (nuint)(oend - op), + zds->outBuff + zds->outStart, + toFlushSize + ); op = op != null ? op + flushedSize : op; zds->outStart += flushedSize; if (flushedSize == toFlushSize) { zds->streamStage = ZSTD_dStreamStage.zdss_read; - if (zds->outBuffSize < zds->fParams.frameContentSize && zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) + if ( + zds->outBuffSize < zds->fParams.frameContentSize + && zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize + ) { zds->outStart = zds->outEnd = 0; } @@ -2799,12 +3417,16 @@ public static nuint ZSTD_decompressStream(ZSTD_DCtx_s* zds, ZSTD_outBuffer_s* ou { if (op == oend) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_noForwardProgress_destFull)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_noForwardProgress_destFull) + ); } if (ip == iend) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_noForwardProgress_inputEmpty)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_noForwardProgress_inputEmpty) + ); } assert(0 != 0); @@ -2844,7 +3466,11 @@ public static nuint ZSTD_decompressStream(ZSTD_DCtx_s* zds, ZSTD_outBuffer_s* ou return 1; } - nextSrcSizeHint += ZSTD_blockHeaderSize * (nuint)(ZSTD_nextInputType(zds) == ZSTD_nextInputType_e.ZSTDnit_block ? 1 : 0); + nextSrcSizeHint += + ZSTD_blockHeaderSize + * (nuint)( + ZSTD_nextInputType(zds) == ZSTD_nextInputType_e.ZSTDnit_block ? 1 : 0 + ); assert(zds->inPos <= nextSrcSizeHint); nextSrcSizeHint -= zds->inPos; return nextSrcSizeHint; @@ -2857,7 +3483,15 @@ public static nuint ZSTD_decompressStream(ZSTD_DCtx_s* zds, ZSTD_outBuffer_s* ou * This can be helpful for binders from dynamic languages * which have troubles handling structures containing memory pointers. */ - public static nuint ZSTD_decompressStream_simpleArgs(ZSTD_DCtx_s* dctx, void* dst, nuint dstCapacity, nuint* dstPos, void* src, nuint srcSize, nuint* srcPos) + public static nuint ZSTD_decompressStream_simpleArgs( + ZSTD_DCtx_s* dctx, + void* dst, + nuint dstCapacity, + nuint* dstPos, + void* src, + nuint srcSize, + nuint* srcPos + ) { ZSTD_outBuffer_s output; ZSTD_inBuffer_s input; @@ -2875,4 +3509,4 @@ public static nuint ZSTD_decompressStream_simpleArgs(ZSTD_DCtx_s* dctx, void* ds } } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompressBlock.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompressBlock.cs index a3ef9e251..a91c728a8 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompressBlock.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompressBlock.cs @@ -1,7 +1,7 @@ -using static ZstdSharp.UnsafeHelper; -using System.Runtime.CompilerServices; using System; +using System.Runtime.CompilerServices; using System.Runtime.InteropServices; +using static ZstdSharp.UnsafeHelper; namespace ZstdSharp.Unsafe { @@ -20,7 +20,8 @@ private static void ZSTD_copy4(void* dst, void* src) ***************************************************************/ private static nuint ZSTD_blockSizeMax(ZSTD_DCtx_s* dctx) { - nuint blockSizeMax = dctx->isFrameDecompression != 0 ? dctx->fParams.blockSizeMax : 1 << 17; + nuint blockSizeMax = + dctx->isFrameDecompression != 0 ? dctx->fParams.blockSizeMax : 1 << 17; assert(blockSizeMax <= 1 << 17); return blockSizeMax; } @@ -52,13 +53,26 @@ private static nuint ZSTD_getcBlockSize(void* src, nuint srcSize, blockPropertie } /* Allocate buffer for literals, either overlapping current dst, or split between dst and litExtraBuffer, or stored entirely within litExtraBuffer */ - private static void ZSTD_allocateLiteralsBuffer(ZSTD_DCtx_s* dctx, void* dst, nuint dstCapacity, nuint litSize, streaming_operation streaming, nuint expectedWriteSize, uint splitImmediately) + private static void ZSTD_allocateLiteralsBuffer( + ZSTD_DCtx_s* dctx, + void* dst, + nuint dstCapacity, + nuint litSize, + streaming_operation streaming, + nuint expectedWriteSize, + uint splitImmediately + ) { nuint blockSizeMax = ZSTD_blockSizeMax(dctx); assert(litSize <= blockSizeMax); - assert(dctx->isFrameDecompression != 0 || streaming == streaming_operation.not_streaming); + assert( + dctx->isFrameDecompression != 0 || streaming == streaming_operation.not_streaming + ); assert(expectedWriteSize <= blockSizeMax); - if (streaming == streaming_operation.not_streaming && dstCapacity > blockSizeMax + 32 + litSize + 32) + if ( + streaming == streaming_operation.not_streaming + && dstCapacity > blockSizeMax + 32 + litSize + 32 + ) { dctx->litBuffer = (byte*)dst + blockSizeMax + 32; dctx->litBufferEnd = dctx->litBuffer + litSize; @@ -97,7 +111,14 @@ private static void ZSTD_allocateLiteralsBuffer(ZSTD_DCtx_s* dctx, void* dst, nu * * @return : nb of bytes read from src (< srcSize ) * note : symbol not declared but exposed for fullbench */ - private static nuint ZSTD_decodeLiteralsBlock(ZSTD_DCtx_s* dctx, void* src, nuint srcSize, void* dst, nuint dstCapacity, streaming_operation streaming) + private static nuint ZSTD_decodeLiteralsBlock( + ZSTD_DCtx_s* dctx, + void* src, + nuint srcSize, + void* dst, + nuint dstCapacity, + streaming_operation streaming + ) { if (srcSize < 1 + 1) { @@ -113,24 +134,42 @@ private static nuint ZSTD_decodeLiteralsBlock(ZSTD_DCtx_s* dctx, void* src, nuin case SymbolEncodingType_e.set_repeat: if (dctx->litEntropy == 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted) + ); } goto case SymbolEncodingType_e.set_compressed; case SymbolEncodingType_e.set_compressed: if (srcSize < 5) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); } { - nuint lhSize, litSize, litCSize; + nuint lhSize, + litSize, + litCSize; uint singleStream = 0; uint lhlCode = (uint)(istart[0] >> 2 & 3); uint lhc = MEM_readLE32(istart); nuint hufSuccess; - nuint expectedWriteSize = blockSizeMax < dstCapacity ? blockSizeMax : dstCapacity; - int flags = 0 | (ZSTD_DCtx_get_bmi2(dctx) != 0 ? (int)HUF_flags_e.HUF_flags_bmi2 : 0) | (dctx->disableHufAsm != 0 ? (int)HUF_flags_e.HUF_flags_disableAsm : 0); + nuint expectedWriteSize = + blockSizeMax < dstCapacity ? blockSizeMax : dstCapacity; + int flags = + 0 + | ( + ZSTD_DCtx_get_bmi2(dctx) != 0 + ? (int)HUF_flags_e.HUF_flags_bmi2 + : 0 + ) + | ( + dctx->disableHufAsm != 0 + ? (int)HUF_flags_e.HUF_flags_disableAsm + : 0 + ); switch (lhlCode) { case 0: @@ -155,31 +194,51 @@ private static nuint ZSTD_decodeLiteralsBlock(ZSTD_DCtx_s* dctx, void* src, nuin if (litSize > 0 && dst == null) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) + ); } if (litSize > blockSizeMax) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); } if (singleStream == 0) if (litSize < 6) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_literals_headerWrong)); + return unchecked( + (nuint)( + -(int)ZSTD_ErrorCode.ZSTD_error_literals_headerWrong + ) + ); } if (litCSize + lhSize > srcSize) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); } if (expectedWriteSize < litSize) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) + ); } - ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 0); + ZSTD_allocateLiteralsBuffer( + dctx, + dst, + dstCapacity, + litSize, + streaming, + expectedWriteSize, + 0 + ); if (dctx->ddictIsCold != 0 && litSize > 768) { sbyte* _ptr = (sbyte*)dctx->HUFptr; @@ -200,31 +259,71 @@ private static nuint ZSTD_decodeLiteralsBlock(ZSTD_DCtx_s* dctx, void* src, nuin { if (singleStream != 0) { - hufSuccess = HUF_decompress1X_usingDTable(dctx->litBuffer, litSize, istart + lhSize, litCSize, dctx->HUFptr, flags); + hufSuccess = HUF_decompress1X_usingDTable( + dctx->litBuffer, + litSize, + istart + lhSize, + litCSize, + dctx->HUFptr, + flags + ); } else { assert(litSize >= 6); - hufSuccess = HUF_decompress4X_usingDTable(dctx->litBuffer, litSize, istart + lhSize, litCSize, dctx->HUFptr, flags); + hufSuccess = HUF_decompress4X_usingDTable( + dctx->litBuffer, + litSize, + istart + lhSize, + litCSize, + dctx->HUFptr, + flags + ); } } else { if (singleStream != 0) { - hufSuccess = HUF_decompress1X1_DCtx_wksp(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart + lhSize, litCSize, dctx->workspace, sizeof(uint) * 640, flags); + hufSuccess = HUF_decompress1X1_DCtx_wksp( + dctx->entropy.hufTable, + dctx->litBuffer, + litSize, + istart + lhSize, + litCSize, + dctx->workspace, + sizeof(uint) * 640, + flags + ); } else { - hufSuccess = HUF_decompress4X_hufOnly_wksp(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart + lhSize, litCSize, dctx->workspace, sizeof(uint) * 640, flags); + hufSuccess = HUF_decompress4X_hufOnly_wksp( + dctx->entropy.hufTable, + dctx->litBuffer, + litSize, + istart + lhSize, + litCSize, + dctx->workspace, + sizeof(uint) * 640, + flags + ); } } if (dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split) { assert(litSize > 1 << 16); - memcpy(dctx->litExtraBuffer, dctx->litBufferEnd - (1 << 16), 1 << 16); - memmove(dctx->litBuffer + (1 << 16) - 32, dctx->litBuffer, litSize - (1 << 16)); + memcpy( + dctx->litExtraBuffer, + dctx->litBufferEnd - (1 << 16), + 1 << 16 + ); + memmove( + dctx->litBuffer + (1 << 16) - 32, + dctx->litBuffer, + litSize - (1 << 16) + ); dctx->litBuffer += (1 << 16) - 32; dctx->litBufferEnd -= 32; assert(dctx->litBufferEnd <= (byte*)dst + blockSizeMax); @@ -232,7 +331,9 @@ private static nuint ZSTD_decodeLiteralsBlock(ZSTD_DCtx_s* dctx, void* src, nuin if (ERR_isError(hufSuccess)) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); } dctx->litPtr = dctx->litBuffer; @@ -244,159 +345,462 @@ private static nuint ZSTD_decodeLiteralsBlock(ZSTD_DCtx_s* dctx, void* src, nuin } case SymbolEncodingType_e.set_basic: + { + nuint litSize, + lhSize; + uint lhlCode = (uint)(istart[0] >> 2 & 3); + nuint expectedWriteSize = + blockSizeMax < dstCapacity ? blockSizeMax : dstCapacity; + switch (lhlCode) { - nuint litSize, lhSize; - uint lhlCode = (uint)(istart[0] >> 2 & 3); - nuint expectedWriteSize = blockSizeMax < dstCapacity ? blockSizeMax : dstCapacity; - switch (lhlCode) - { - case 0: - case 2: - default: - lhSize = 1; - litSize = (nuint)(istart[0] >> 3); - break; - case 1: - lhSize = 2; - litSize = (nuint)(MEM_readLE16(istart) >> 4); - break; - case 3: - lhSize = 3; - if (srcSize < 3) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - } + case 0: + case 2: + default: + lhSize = 1; + litSize = (nuint)(istart[0] >> 3); + break; + case 1: + lhSize = 2; + litSize = (nuint)(MEM_readLE16(istart) >> 4); + break; + case 3: + lhSize = 3; + if (srcSize < 3) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); + } - litSize = MEM_readLE24(istart) >> 4; - break; - } + litSize = MEM_readLE24(istart) >> 4; + break; + } - if (litSize > 0 && dst == null) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - } + if (litSize > 0 && dst == null) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) + ); + } - if (litSize > blockSizeMax) + if (litSize > blockSizeMax) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); + } + + if (expectedWriteSize < litSize) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) + ); + } + + ZSTD_allocateLiteralsBuffer( + dctx, + dst, + dstCapacity, + litSize, + streaming, + expectedWriteSize, + 1 + ); + if (lhSize + litSize + 32 > srcSize) + { + if (litSize + lhSize > srcSize) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); } - if (expectedWriteSize < litSize) + if (dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + memcpy( + dctx->litBuffer, + istart + lhSize, + (uint)(litSize - (1 << 16)) + ); + memcpy( + dctx->litExtraBuffer, + istart + lhSize + litSize - (1 << 16), + 1 << 16 + ); } - - ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 1); - if (lhSize + litSize + 32 > srcSize) + else { - if (litSize + lhSize > srcSize) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - } - - if (dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split) - { - memcpy(dctx->litBuffer, istart + lhSize, (uint)(litSize - (1 << 16))); - memcpy(dctx->litExtraBuffer, istart + lhSize + litSize - (1 << 16), 1 << 16); - } - else - { - memcpy(dctx->litBuffer, istart + lhSize, (uint)litSize); - } - - dctx->litPtr = dctx->litBuffer; - dctx->litSize = litSize; - return lhSize + litSize; + memcpy(dctx->litBuffer, istart + lhSize, (uint)litSize); } - dctx->litPtr = istart + lhSize; + dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; - dctx->litBufferEnd = dctx->litPtr + litSize; - dctx->litBufferLocation = ZSTD_litLocation_e.ZSTD_not_in_dst; return lhSize + litSize; } + dctx->litPtr = istart + lhSize; + dctx->litSize = litSize; + dctx->litBufferEnd = dctx->litPtr + litSize; + dctx->litBufferLocation = ZSTD_litLocation_e.ZSTD_not_in_dst; + return lhSize + litSize; + } + case SymbolEncodingType_e.set_rle: + { + uint lhlCode = (uint)(istart[0] >> 2 & 3); + nuint litSize, + lhSize; + nuint expectedWriteSize = + blockSizeMax < dstCapacity ? blockSizeMax : dstCapacity; + switch (lhlCode) { - uint lhlCode = (uint)(istart[0] >> 2 & 3); - nuint litSize, lhSize; - nuint expectedWriteSize = blockSizeMax < dstCapacity ? blockSizeMax : dstCapacity; - switch (lhlCode) - { - case 0: - case 2: - default: - lhSize = 1; - litSize = (nuint)(istart[0] >> 3); - break; - case 1: - lhSize = 2; - if (srcSize < 3) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - } - - litSize = (nuint)(MEM_readLE16(istart) >> 4); - break; - case 3: - lhSize = 3; - if (srcSize < 4) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - } + case 0: + case 2: + default: + lhSize = 1; + litSize = (nuint)(istart[0] >> 3); + break; + case 1: + lhSize = 2; + if (srcSize < 3) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); + } - litSize = MEM_readLE24(istart) >> 4; - break; - } + litSize = (nuint)(MEM_readLE16(istart) >> 4); + break; + case 3: + lhSize = 3; + if (srcSize < 4) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); + } - if (litSize > 0 && dst == null) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - } + litSize = MEM_readLE24(istart) >> 4; + break; + } - if (litSize > blockSizeMax) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - } + if (litSize > 0 && dst == null) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) + ); + } - if (expectedWriteSize < litSize) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - } + if (litSize > blockSizeMax) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); + } - ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 1); - if (dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split) - { - memset(dctx->litBuffer, istart[lhSize], (uint)(litSize - (1 << 16))); - memset(dctx->litExtraBuffer, istart[lhSize], 1 << 16); - } - else - { - memset(dctx->litBuffer, istart[lhSize], (uint)litSize); - } + if (expectedWriteSize < litSize) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) + ); + } - dctx->litPtr = dctx->litBuffer; - dctx->litSize = litSize; - return lhSize + 1; + ZSTD_allocateLiteralsBuffer( + dctx, + dst, + dstCapacity, + litSize, + streaming, + expectedWriteSize, + 1 + ); + if (dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split) + { + memset(dctx->litBuffer, istart[lhSize], (uint)(litSize - (1 << 16))); + memset(dctx->litExtraBuffer, istart[lhSize], 1 << 16); + } + else + { + memset(dctx->litBuffer, istart[lhSize], (uint)litSize); } + dctx->litPtr = dctx->litBuffer; + dctx->litSize = litSize; + return lhSize + 1; + } + default: - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); } } } /* Hidden declaration for fullbench */ - private static nuint ZSTD_decodeLiteralsBlock_wrapper(ZSTD_DCtx_s* dctx, void* src, nuint srcSize, void* dst, nuint dstCapacity) + private static nuint ZSTD_decodeLiteralsBlock_wrapper( + ZSTD_DCtx_s* dctx, + void* src, + nuint srcSize, + void* dst, + nuint dstCapacity + ) { dctx->isFrameDecompression = 0; - return ZSTD_decodeLiteralsBlock(dctx, src, srcSize, dst, dstCapacity, streaming_operation.not_streaming); + return ZSTD_decodeLiteralsBlock( + dctx, + src, + srcSize, + dst, + dstCapacity, + streaming_operation.not_streaming + ); } - private static readonly ZSTD_seqSymbol* LL_defaultDTable = GetArrayPointer(new ZSTD_seqSymbol[65] { new ZSTD_seqSymbol(nextState: 1, nbAdditionalBits: 1, nbBits: 1, baseValue: 6), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 4, baseValue: 0), new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 0, nbBits: 4, baseValue: 0), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 1), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 3), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 4), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 6), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 7), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 9), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 10), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 12), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 14), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 5, baseValue: 16), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 5, baseValue: 20), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 5, baseValue: 22), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 2, nbBits: 5, baseValue: 28), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 3, nbBits: 5, baseValue: 32), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 4, nbBits: 5, baseValue: 48), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 6, nbBits: 5, baseValue: 64), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 7, nbBits: 5, baseValue: 128), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 8, nbBits: 6, baseValue: 256), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 10, nbBits: 6, baseValue: 1024), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 12, nbBits: 6, baseValue: 4096), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 4, baseValue: 0), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 4, baseValue: 1), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 2), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 4), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 5), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 7), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 8), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 10), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 11), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 13), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 1, nbBits: 5, baseValue: 16), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 5, baseValue: 18), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 1, nbBits: 5, baseValue: 22), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 2, nbBits: 5, baseValue: 24), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 3, nbBits: 5, baseValue: 32), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 3, nbBits: 5, baseValue: 40), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 6, nbBits: 4, baseValue: 64), new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 6, nbBits: 4, baseValue: 64), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 7, nbBits: 5, baseValue: 128), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 9, nbBits: 6, baseValue: 512), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 11, nbBits: 6, baseValue: 2048), new ZSTD_seqSymbol(nextState: 48, nbAdditionalBits: 0, nbBits: 4, baseValue: 0), new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 0, nbBits: 4, baseValue: 1), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 2), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 3), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 5), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 6), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 8), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 9), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 11), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 12), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 15), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 1, nbBits: 5, baseValue: 18), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 1, nbBits: 5, baseValue: 20), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 2, nbBits: 5, baseValue: 24), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 2, nbBits: 5, baseValue: 28), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 3, nbBits: 5, baseValue: 40), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 4, nbBits: 5, baseValue: 48), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 16, nbBits: 6, baseValue: 65536), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 15, nbBits: 6, baseValue: 32768), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 14, nbBits: 6, baseValue: 16384), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 13, nbBits: 6, baseValue: 8192) }); - private static readonly ZSTD_seqSymbol* OF_defaultDTable = GetArrayPointer(new ZSTD_seqSymbol[33] { new ZSTD_seqSymbol(nextState: 1, nbAdditionalBits: 1, nbBits: 1, baseValue: 5), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 0), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 6, nbBits: 4, baseValue: 61), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 9, nbBits: 5, baseValue: 509), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 15, nbBits: 5, baseValue: 32765), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 21, nbBits: 5, baseValue: 2097149), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 3, nbBits: 5, baseValue: 5), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 7, nbBits: 4, baseValue: 125), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 12, nbBits: 5, baseValue: 4093), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 18, nbBits: 5, baseValue: 262141), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 23, nbBits: 5, baseValue: 8388605), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 5, nbBits: 5, baseValue: 29), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 8, nbBits: 4, baseValue: 253), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 14, nbBits: 5, baseValue: 16381), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 20, nbBits: 5, baseValue: 1048573), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 2, nbBits: 5, baseValue: 1), new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 7, nbBits: 4, baseValue: 125), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 11, nbBits: 5, baseValue: 2045), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 17, nbBits: 5, baseValue: 131069), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 22, nbBits: 5, baseValue: 4194301), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 4, nbBits: 5, baseValue: 13), new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 8, nbBits: 4, baseValue: 253), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 13, nbBits: 5, baseValue: 8189), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 19, nbBits: 5, baseValue: 524285), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 5, baseValue: 1), new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 6, nbBits: 4, baseValue: 61), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 10, nbBits: 5, baseValue: 1021), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 16, nbBits: 5, baseValue: 65533), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 28, nbBits: 5, baseValue: 268435453), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 27, nbBits: 5, baseValue: 134217725), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 26, nbBits: 5, baseValue: 67108861), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 25, nbBits: 5, baseValue: 33554429), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 24, nbBits: 5, baseValue: 16777213) }); - private static readonly ZSTD_seqSymbol* ML_defaultDTable = GetArrayPointer(new ZSTD_seqSymbol[65] { new ZSTD_seqSymbol(nextState: 1, nbAdditionalBits: 1, nbBits: 1, baseValue: 6), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 3), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 4, baseValue: 4), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 5), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 6), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 8), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 9), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 11), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 13), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 16), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 19), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 22), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 25), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 28), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 31), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 34), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 6, baseValue: 37), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 6, baseValue: 41), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 2, nbBits: 6, baseValue: 47), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 3, nbBits: 6, baseValue: 59), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 4, nbBits: 6, baseValue: 83), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 7, nbBits: 6, baseValue: 131), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 9, nbBits: 6, baseValue: 515), new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 0, nbBits: 4, baseValue: 4), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 4, baseValue: 5), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 6), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 7), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 9), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 10), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 12), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 15), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 18), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 21), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 24), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 27), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 30), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 33), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 6, baseValue: 35), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 6, baseValue: 39), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 2, nbBits: 6, baseValue: 43), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 3, nbBits: 6, baseValue: 51), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 4, nbBits: 6, baseValue: 67), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 5, nbBits: 6, baseValue: 99), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 8, nbBits: 6, baseValue: 259), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 4, baseValue: 4), new ZSTD_seqSymbol(nextState: 48, nbAdditionalBits: 0, nbBits: 4, baseValue: 4), new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 0, nbBits: 4, baseValue: 5), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 7), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 8), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 10), new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 11), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 14), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 17), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 20), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 23), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 26), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 29), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 32), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 16, nbBits: 6, baseValue: 65539), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 15, nbBits: 6, baseValue: 32771), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 14, nbBits: 6, baseValue: 16387), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 13, nbBits: 6, baseValue: 8195), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 12, nbBits: 6, baseValue: 4099), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 11, nbBits: 6, baseValue: 2051), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 10, nbBits: 6, baseValue: 1027) }); - private static void ZSTD_buildSeqTable_rle(ZSTD_seqSymbol* dt, uint baseValue, byte nbAddBits) + private static readonly ZSTD_seqSymbol* LL_defaultDTable = GetArrayPointer( + new ZSTD_seqSymbol[65] + { + new ZSTD_seqSymbol(nextState: 1, nbAdditionalBits: 1, nbBits: 1, baseValue: 6), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 4, baseValue: 0), + new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 0, nbBits: 4, baseValue: 0), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 1), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 3), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 4), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 6), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 7), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 9), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 10), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 12), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 14), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 5, baseValue: 16), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 5, baseValue: 20), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 5, baseValue: 22), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 2, nbBits: 5, baseValue: 28), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 3, nbBits: 5, baseValue: 32), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 4, nbBits: 5, baseValue: 48), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 6, nbBits: 5, baseValue: 64), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 7, nbBits: 5, baseValue: 128), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 8, nbBits: 6, baseValue: 256), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 10, nbBits: 6, baseValue: 1024), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 12, nbBits: 6, baseValue: 4096), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 4, baseValue: 0), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 4, baseValue: 1), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 2), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 4), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 5), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 7), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 8), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 10), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 11), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 13), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 1, nbBits: 5, baseValue: 16), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 5, baseValue: 18), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 1, nbBits: 5, baseValue: 22), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 2, nbBits: 5, baseValue: 24), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 3, nbBits: 5, baseValue: 32), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 3, nbBits: 5, baseValue: 40), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 6, nbBits: 4, baseValue: 64), + new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 6, nbBits: 4, baseValue: 64), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 7, nbBits: 5, baseValue: 128), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 9, nbBits: 6, baseValue: 512), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 11, nbBits: 6, baseValue: 2048), + new ZSTD_seqSymbol(nextState: 48, nbAdditionalBits: 0, nbBits: 4, baseValue: 0), + new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 0, nbBits: 4, baseValue: 1), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 2), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 3), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 5), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 6), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 8), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 9), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 11), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 12), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 15), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 1, nbBits: 5, baseValue: 18), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 1, nbBits: 5, baseValue: 20), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 2, nbBits: 5, baseValue: 24), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 2, nbBits: 5, baseValue: 28), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 3, nbBits: 5, baseValue: 40), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 4, nbBits: 5, baseValue: 48), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 16, nbBits: 6, baseValue: 65536), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 15, nbBits: 6, baseValue: 32768), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 14, nbBits: 6, baseValue: 16384), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 13, nbBits: 6, baseValue: 8192), + } + ); + private static readonly ZSTD_seqSymbol* OF_defaultDTable = GetArrayPointer( + new ZSTD_seqSymbol[33] + { + new ZSTD_seqSymbol(nextState: 1, nbAdditionalBits: 1, nbBits: 1, baseValue: 5), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 0), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 6, nbBits: 4, baseValue: 61), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 9, nbBits: 5, baseValue: 509), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 15, nbBits: 5, baseValue: 32765), + new ZSTD_seqSymbol( + nextState: 0, + nbAdditionalBits: 21, + nbBits: 5, + baseValue: 2097149 + ), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 3, nbBits: 5, baseValue: 5), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 7, nbBits: 4, baseValue: 125), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 12, nbBits: 5, baseValue: 4093), + new ZSTD_seqSymbol( + nextState: 0, + nbAdditionalBits: 18, + nbBits: 5, + baseValue: 262141 + ), + new ZSTD_seqSymbol( + nextState: 0, + nbAdditionalBits: 23, + nbBits: 5, + baseValue: 8388605 + ), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 5, nbBits: 5, baseValue: 29), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 8, nbBits: 4, baseValue: 253), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 14, nbBits: 5, baseValue: 16381), + new ZSTD_seqSymbol( + nextState: 0, + nbAdditionalBits: 20, + nbBits: 5, + baseValue: 1048573 + ), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 2, nbBits: 5, baseValue: 1), + new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 7, nbBits: 4, baseValue: 125), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 11, nbBits: 5, baseValue: 2045), + new ZSTD_seqSymbol( + nextState: 0, + nbAdditionalBits: 17, + nbBits: 5, + baseValue: 131069 + ), + new ZSTD_seqSymbol( + nextState: 0, + nbAdditionalBits: 22, + nbBits: 5, + baseValue: 4194301 + ), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 4, nbBits: 5, baseValue: 13), + new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 8, nbBits: 4, baseValue: 253), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 13, nbBits: 5, baseValue: 8189), + new ZSTD_seqSymbol( + nextState: 0, + nbAdditionalBits: 19, + nbBits: 5, + baseValue: 524285 + ), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 5, baseValue: 1), + new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 6, nbBits: 4, baseValue: 61), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 10, nbBits: 5, baseValue: 1021), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 16, nbBits: 5, baseValue: 65533), + new ZSTD_seqSymbol( + nextState: 0, + nbAdditionalBits: 28, + nbBits: 5, + baseValue: 268435453 + ), + new ZSTD_seqSymbol( + nextState: 0, + nbAdditionalBits: 27, + nbBits: 5, + baseValue: 134217725 + ), + new ZSTD_seqSymbol( + nextState: 0, + nbAdditionalBits: 26, + nbBits: 5, + baseValue: 67108861 + ), + new ZSTD_seqSymbol( + nextState: 0, + nbAdditionalBits: 25, + nbBits: 5, + baseValue: 33554429 + ), + new ZSTD_seqSymbol( + nextState: 0, + nbAdditionalBits: 24, + nbBits: 5, + baseValue: 16777213 + ), + } + ); + private static readonly ZSTD_seqSymbol* ML_defaultDTable = GetArrayPointer( + new ZSTD_seqSymbol[65] + { + new ZSTD_seqSymbol(nextState: 1, nbAdditionalBits: 1, nbBits: 1, baseValue: 6), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 3), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 4, baseValue: 4), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 5), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 6), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 8), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 9), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 11), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 13), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 16), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 19), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 22), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 25), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 28), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 31), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 34), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 6, baseValue: 37), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 6, baseValue: 41), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 2, nbBits: 6, baseValue: 47), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 3, nbBits: 6, baseValue: 59), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 4, nbBits: 6, baseValue: 83), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 7, nbBits: 6, baseValue: 131), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 9, nbBits: 6, baseValue: 515), + new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 0, nbBits: 4, baseValue: 4), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 4, baseValue: 5), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 6), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 7), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 9), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 10), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 12), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 15), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 18), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 21), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 24), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 27), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 30), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 33), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 6, baseValue: 35), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 6, baseValue: 39), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 2, nbBits: 6, baseValue: 43), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 3, nbBits: 6, baseValue: 51), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 4, nbBits: 6, baseValue: 67), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 5, nbBits: 6, baseValue: 99), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 8, nbBits: 6, baseValue: 259), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 4, baseValue: 4), + new ZSTD_seqSymbol(nextState: 48, nbAdditionalBits: 0, nbBits: 4, baseValue: 4), + new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 0, nbBits: 4, baseValue: 5), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 7), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 8), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 10), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 11), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 14), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 17), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 20), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 23), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 26), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 29), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 32), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 16, nbBits: 6, baseValue: 65539), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 15, nbBits: 6, baseValue: 32771), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 14, nbBits: 6, baseValue: 16387), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 13, nbBits: 6, baseValue: 8195), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 12, nbBits: 6, baseValue: 4099), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 11, nbBits: 6, baseValue: 2051), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 10, nbBits: 6, baseValue: 1027), + } + ); + + private static void ZSTD_buildSeqTable_rle( + ZSTD_seqSymbol* dt, + uint baseValue, + byte nbAddBits + ) { void* ptr = dt; ZSTD_seqSymbol_header* DTableH = (ZSTD_seqSymbol_header*)ptr; @@ -415,7 +819,16 @@ private static void ZSTD_buildSeqTable_rle(ZSTD_seqSymbol* dt, uint baseValue, b * cannot fail if input is valid => * all inputs are presumed validated at this stage */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt, short* normalizedCounter, uint maxSymbolValue, uint* baseValue, byte* nbAdditionalBits, uint tableLog, void* wksp, nuint wkspSize) + private static void ZSTD_buildFSETable_body( + ZSTD_seqSymbol* dt, + short* normalizedCounter, + uint maxSymbolValue, + uint* baseValue, + byte* nbAdditionalBits, + uint tableLog, + void* wksp, + nuint wkspSize + ) { ZSTD_seqSymbol* tableDecode = dt + 1; uint maxSV1 = maxSymbolValue + 1; @@ -502,7 +915,8 @@ private static void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt, short* normalize { uint tableMask = tableSize - 1; uint step = (tableSize >> 1) + (tableSize >> 3) + 3; - uint s, position = 0; + uint s, + position = 0; for (s = 0; s < maxSV1; s++) { int i; @@ -526,7 +940,9 @@ private static void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt, short* normalize uint symbol = tableDecode[u].baseValue; uint nextState = symbolNext[symbol]++; tableDecode[u].nbBits = (byte)(tableLog - ZSTD_highbit32(nextState)); - tableDecode[u].nextState = (ushort)((nextState << tableDecode[u].nbBits) - tableSize); + tableDecode[u].nextState = (ushort)( + (nextState << tableDecode[u].nbBits) - tableSize + ); assert(nbAdditionalBits[symbol] < 255); tableDecode[u].nbAdditionalBits = nbAdditionalBits[symbol]; tableDecode[u].baseValue = baseValue[symbol]; @@ -535,9 +951,27 @@ private static void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt, short* normalize } /* Avoids the FORCE_INLINE of the _body() function. */ - private static void ZSTD_buildFSETable_body_default(ZSTD_seqSymbol* dt, short* normalizedCounter, uint maxSymbolValue, uint* baseValue, byte* nbAdditionalBits, uint tableLog, void* wksp, nuint wkspSize) + private static void ZSTD_buildFSETable_body_default( + ZSTD_seqSymbol* dt, + short* normalizedCounter, + uint maxSymbolValue, + uint* baseValue, + byte* nbAdditionalBits, + uint tableLog, + void* wksp, + nuint wkspSize + ) { - ZSTD_buildFSETable_body(dt, normalizedCounter, maxSymbolValue, baseValue, nbAdditionalBits, tableLog, wksp, wkspSize); + ZSTD_buildFSETable_body( + dt, + normalizedCounter, + maxSymbolValue, + baseValue, + nbAdditionalBits, + tableLog, + wksp, + wkspSize + ); } /* ZSTD_buildFSETable() : @@ -549,15 +983,51 @@ private static void ZSTD_buildFSETable_body_default(ZSTD_seqSymbol* dt, short* n * defined in zstd_decompress_internal.h. * Internal use only. */ - private static void ZSTD_buildFSETable(ZSTD_seqSymbol* dt, short* normalizedCounter, uint maxSymbolValue, uint* baseValue, byte* nbAdditionalBits, uint tableLog, void* wksp, nuint wkspSize, int bmi2) + private static void ZSTD_buildFSETable( + ZSTD_seqSymbol* dt, + short* normalizedCounter, + uint maxSymbolValue, + uint* baseValue, + byte* nbAdditionalBits, + uint tableLog, + void* wksp, + nuint wkspSize, + int bmi2 + ) { - ZSTD_buildFSETable_body_default(dt, normalizedCounter, maxSymbolValue, baseValue, nbAdditionalBits, tableLog, wksp, wkspSize); + ZSTD_buildFSETable_body_default( + dt, + normalizedCounter, + maxSymbolValue, + baseValue, + nbAdditionalBits, + tableLog, + wksp, + wkspSize + ); } /*! ZSTD_buildSeqTable() : * @return : nb bytes read from src, * or an error code if it fails */ - private static nuint ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, ZSTD_seqSymbol** DTablePtr, SymbolEncodingType_e type, uint max, uint maxLog, void* src, nuint srcSize, uint* baseValue, byte* nbAdditionalBits, ZSTD_seqSymbol* defaultTable, uint flagRepeatTable, int ddictIsCold, int nbSeq, uint* wksp, nuint wkspSize, int bmi2) + private static nuint ZSTD_buildSeqTable( + ZSTD_seqSymbol* DTableSpace, + ZSTD_seqSymbol** DTablePtr, + SymbolEncodingType_e type, + uint max, + uint maxLog, + void* src, + nuint srcSize, + uint* baseValue, + byte* nbAdditionalBits, + ZSTD_seqSymbol* defaultTable, + uint flagRepeatTable, + int ddictIsCold, + int nbSeq, + uint* wksp, + nuint wkspSize, + int bmi2 + ) { switch (type) { @@ -569,7 +1039,9 @@ private static nuint ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, ZSTD_seqSym if (*(byte*)src > max) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); } { @@ -587,7 +1059,9 @@ private static nuint ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, ZSTD_seqSym case SymbolEncodingType_e.set_repeat: if (flagRepeatTable == 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); } if (ddictIsCold != 0 && nbSeq > 24) @@ -612,25 +1086,39 @@ private static nuint ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, ZSTD_seqSym return 0; case SymbolEncodingType_e.set_compressed: + { + uint tableLog; + short* norm = stackalloc short[53]; + nuint headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize); + if (ERR_isError(headerSize)) { - uint tableLog; - short* norm = stackalloc short[53]; - nuint headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize); - if (ERR_isError(headerSize)) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - } - - if (tableLog > maxLog) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - } + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); + } - ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog, wksp, wkspSize, bmi2); - *DTablePtr = DTableSpace; - return headerSize; + if (tableLog > maxLog) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); } + ZSTD_buildFSETable( + DTableSpace, + norm, + max, + baseValue, + nbAdditionalBits, + tableLog, + wksp, + wkspSize, + bmi2 + ); + *DTablePtr = DTableSpace; + return headerSize; + } + default: assert(0 != 0); return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); @@ -640,7 +1128,12 @@ private static nuint ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, ZSTD_seqSym /*! ZSTD_decodeSeqHeaders() : * decode sequence header from src */ /* Used by: zstd_decompress_block, fullbench */ - private static nuint ZSTD_decodeSeqHeaders(ZSTD_DCtx_s* dctx, int* nbSeqPtr, void* src, nuint srcSize) + private static nuint ZSTD_decodeSeqHeaders( + ZSTD_DCtx_s* dctx, + int* nbSeqPtr, + void* src, + nuint srcSize + ) { byte* istart = (byte*)src; byte* iend = istart + srcSize; @@ -702,30 +1195,87 @@ private static nuint ZSTD_decodeSeqHeaders(ZSTD_DCtx_s* dctx, int* nbSeqPtr, voi SymbolEncodingType_e MLtype = (SymbolEncodingType_e)(*ip >> 2 & 3); ip++; { - nuint llhSize = ZSTD_buildSeqTable(&dctx->entropy.LLTable.e0, &dctx->LLTptr, LLtype, 35, 9, ip, (nuint)(iend - ip), LL_base, LL_bits, LL_defaultDTable, dctx->fseEntropy, dctx->ddictIsCold, nbSeq, dctx->workspace, sizeof(uint) * 640, ZSTD_DCtx_get_bmi2(dctx)); + nuint llhSize = ZSTD_buildSeqTable( + &dctx->entropy.LLTable.e0, + &dctx->LLTptr, + LLtype, + 35, + 9, + ip, + (nuint)(iend - ip), + LL_base, + LL_bits, + LL_defaultDTable, + dctx->fseEntropy, + dctx->ddictIsCold, + nbSeq, + dctx->workspace, + sizeof(uint) * 640, + ZSTD_DCtx_get_bmi2(dctx) + ); if (ERR_isError(llhSize)) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); } ip += llhSize; } { - nuint ofhSize = ZSTD_buildSeqTable(&dctx->entropy.OFTable.e0, &dctx->OFTptr, OFtype, 31, 8, ip, (nuint)(iend - ip), OF_base, OF_bits, OF_defaultDTable, dctx->fseEntropy, dctx->ddictIsCold, nbSeq, dctx->workspace, sizeof(uint) * 640, ZSTD_DCtx_get_bmi2(dctx)); + nuint ofhSize = ZSTD_buildSeqTable( + &dctx->entropy.OFTable.e0, + &dctx->OFTptr, + OFtype, + 31, + 8, + ip, + (nuint)(iend - ip), + OF_base, + OF_bits, + OF_defaultDTable, + dctx->fseEntropy, + dctx->ddictIsCold, + nbSeq, + dctx->workspace, + sizeof(uint) * 640, + ZSTD_DCtx_get_bmi2(dctx) + ); if (ERR_isError(ofhSize)) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); } ip += ofhSize; } { - nuint mlhSize = ZSTD_buildSeqTable(&dctx->entropy.MLTable.e0, &dctx->MLTptr, MLtype, 52, 9, ip, (nuint)(iend - ip), ML_base, ML_bits, ML_defaultDTable, dctx->fseEntropy, dctx->ddictIsCold, nbSeq, dctx->workspace, sizeof(uint) * 640, ZSTD_DCtx_get_bmi2(dctx)); + nuint mlhSize = ZSTD_buildSeqTable( + &dctx->entropy.MLTable.e0, + &dctx->MLTptr, + MLtype, + 52, + 9, + ip, + (nuint)(iend - ip), + ML_base, + ML_bits, + ML_defaultDTable, + dctx->fseEntropy, + dctx->ddictIsCold, + nbSeq, + dctx->workspace, + sizeof(uint) * 640, + ZSTD_DCtx_get_bmi2(dctx) + ); if (ERR_isError(mlhSize)) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); } ip += mlhSize; @@ -736,38 +1286,30 @@ private static nuint ZSTD_decodeSeqHeaders(ZSTD_DCtx_s* dctx, int* nbSeqPtr, voi } #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_dec32table => new uint[8] - { - 0, - 1, - 2, - 1, - 4, - 4, - 4, - 4 - }; - private static uint* dec32table => (uint*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_dec32table)); + private static ReadOnlySpan Span_dec32table => new uint[8] { 0, 1, 2, 1, 4, 4, 4, 4 }; + private static uint* dec32table => + (uint*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_dec32table) + ); #else - private static readonly uint* dec32table = GetArrayPointer(new uint[8] { 0, 1, 2, 1, 4, 4, 4, 4 }); + private static readonly uint* dec32table = GetArrayPointer( + new uint[8] { 0, 1, 2, 1, 4, 4, 4, 4 } + ); #endif #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_dec64table => new int[8] - { - 8, - 8, - 8, - 7, - 8, - 9, - 10, - 11 - }; - private static int* dec64table => (int*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_dec64table)); + private static ReadOnlySpan Span_dec64table => new int[8] { 8, 8, 8, 7, 8, 9, 10, 11 }; + private static int* dec64table => + (int*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_dec64table) + ); #else - private static readonly int* dec64table = GetArrayPointer(new int[8] { 8, 8, 8, 7, 8, 9, 10, 11 }); + private static readonly int* dec64table = GetArrayPointer( + new int[8] { 8, 8, 8, 7, 8, 9, 10, 11 } + ); #endif /*! ZSTD_overlapCopy8() : * Copies 8 bytes from ip to op and updates op and ip where ip <= op. @@ -812,11 +1354,21 @@ private static void ZSTD_overlapCopy8(byte** op, byte** ip, nuint offset) * - ZSTD_overlap_src_before_dst: The src and dst may overlap and may be any distance apart. * The src buffer must be before the dst buffer. */ - private static void ZSTD_safecopy(byte* op, byte* oend_w, byte* ip, nint length, ZSTD_overlap_e ovtype) + private static void ZSTD_safecopy( + byte* op, + byte* oend_w, + byte* ip, + nint length, + ZSTD_overlap_e ovtype + ) { nint diff = (nint)(op - ip); byte* oend = op + length; - assert(ovtype == ZSTD_overlap_e.ZSTD_no_overlap && (diff <= -8 || diff >= 8 || op >= oend_w) || ovtype == ZSTD_overlap_e.ZSTD_overlap_src_before_dst && diff >= 0); + assert( + ovtype == ZSTD_overlap_e.ZSTD_no_overlap + && (diff <= -8 || diff >= 8 || op >= oend_w) + || ovtype == ZSTD_overlap_e.ZSTD_overlap_src_before_dst && diff >= 0 + ); if (length < 8) { while (op < oend) @@ -884,7 +1436,16 @@ private static void ZSTD_safecopyDstBeforeSrc(byte* op, byte* ip, nint length) * NOTE: This function needs to be fast for a single long sequence, but doesn't need * to be optimized for many small sequences, since those fall into ZSTD_execSequence(). */ - private static nuint ZSTD_execSequenceEnd(byte* op, byte* oend, seq_t sequence, byte** litPtr, byte* litLimit, byte* prefixStart, byte* virtualStart, byte* dictEnd) + private static nuint ZSTD_execSequenceEnd( + byte* op, + byte* oend, + seq_t sequence, + byte** litPtr, + byte* litLimit, + byte* prefixStart, + byte* virtualStart, + byte* dictEnd + ) { byte* oLitEnd = op + sequence.litLength; nuint sequenceLength = sequence.litLength + sequence.matchLength; @@ -903,7 +1464,13 @@ private static nuint ZSTD_execSequenceEnd(byte* op, byte* oend, seq_t sequence, assert(op < op + sequenceLength); assert(oLitEnd < op + sequenceLength); - ZSTD_safecopy(op, oend_w, *litPtr, (nint)sequence.litLength, ZSTD_overlap_e.ZSTD_no_overlap); + ZSTD_safecopy( + op, + oend_w, + *litPtr, + (nint)sequence.litLength, + ZSTD_overlap_e.ZSTD_no_overlap + ); op = oLitEnd; *litPtr = iLitEnd; if (sequence.offset > (nuint)(oLitEnd - prefixStart)) @@ -929,14 +1496,30 @@ private static nuint ZSTD_execSequenceEnd(byte* op, byte* oend, seq_t sequence, } } - ZSTD_safecopy(op, oend_w, match, (nint)sequence.matchLength, ZSTD_overlap_e.ZSTD_overlap_src_before_dst); + ZSTD_safecopy( + op, + oend_w, + match, + (nint)sequence.matchLength, + ZSTD_overlap_e.ZSTD_overlap_src_before_dst + ); return sequenceLength; } /* ZSTD_execSequenceEndSplitLitBuffer(): * This version is intended to be used during instances where the litBuffer is still split. It is kept separate to avoid performance impact for the good case. */ - private static nuint ZSTD_execSequenceEndSplitLitBuffer(byte* op, byte* oend, byte* oend_w, seq_t sequence, byte** litPtr, byte* litLimit, byte* prefixStart, byte* virtualStart, byte* dictEnd) + private static nuint ZSTD_execSequenceEndSplitLitBuffer( + byte* op, + byte* oend, + byte* oend_w, + seq_t sequence, + byte** litPtr, + byte* litLimit, + byte* prefixStart, + byte* virtualStart, + byte* dictEnd + ) { byte* oLitEnd = op + sequence.litLength; nuint sequenceLength = sequence.litLength + sequence.matchLength; @@ -985,12 +1568,27 @@ private static nuint ZSTD_execSequenceEndSplitLitBuffer(byte* op, byte* oend, by } } - ZSTD_safecopy(op, oend_w, match, (nint)sequence.matchLength, ZSTD_overlap_e.ZSTD_overlap_src_before_dst); + ZSTD_safecopy( + op, + oend_w, + match, + (nint)sequence.matchLength, + ZSTD_overlap_e.ZSTD_overlap_src_before_dst + ); return sequenceLength; } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_execSequence(byte* op, byte* oend, seq_t sequence, byte** litPtr, byte* litLimit, byte* prefixStart, byte* virtualStart, byte* dictEnd) + private static nuint ZSTD_execSequence( + byte* op, + byte* oend, + seq_t sequence, + byte** litPtr, + byte* litLimit, + byte* prefixStart, + byte* virtualStart, + byte* dictEnd + ) { var sequence_litLength = sequence.litLength; var sequence_matchLength = sequence.matchLength; @@ -1005,8 +1603,26 @@ private static nuint ZSTD_execSequence(byte* op, byte* oend, seq_t sequence, byt byte* match = oLitEnd - sequence_offset; assert(op != null); assert(oend_w < oend); - if (iLitEnd > litLimit || oMatchEnd > oend_w || MEM_32bits && (nuint)(oend - op) < sequenceLength + 32) - return ZSTD_execSequenceEnd(op, oend, new seq_t { litLength = sequence_litLength, matchLength = sequence_matchLength, offset = sequence_offset }, litPtr, litLimit, prefixStart, virtualStart, dictEnd); + if ( + iLitEnd > litLimit + || oMatchEnd > oend_w + || MEM_32bits && (nuint)(oend - op) < sequenceLength + 32 + ) + return ZSTD_execSequenceEnd( + op, + oend, + new seq_t + { + litLength = sequence_litLength, + matchLength = sequence_matchLength, + offset = sequence_offset, + }, + litPtr, + litLimit, + prefixStart, + virtualStart, + dictEnd + ); assert(op <= oLitEnd); assert(oLitEnd < oMatchEnd); assert(oMatchEnd <= oend); @@ -1017,7 +1633,12 @@ private static nuint ZSTD_execSequence(byte* op, byte* oend, seq_t sequence, byt ZSTD_copy16(op, *litPtr); if (sequence_litLength > 16) { - ZSTD_wildcopy(op + 16, *litPtr + 16, (nint)(sequence_litLength - 16), ZSTD_overlap_e.ZSTD_no_overlap); + ZSTD_wildcopy( + op + 16, + *litPtr + 16, + (nint)(sequence_litLength - 16), + ZSTD_overlap_e.ZSTD_no_overlap + ); } op = oLitEnd; @@ -1051,7 +1672,12 @@ private static nuint ZSTD_execSequence(byte* op, byte* oend, seq_t sequence, byt assert(sequence_matchLength >= 1); if (sequence_offset >= 16) { - ZSTD_wildcopy(op, match, (nint)sequence_matchLength, ZSTD_overlap_e.ZSTD_no_overlap); + ZSTD_wildcopy( + op, + match, + (nint)sequence_matchLength, + ZSTD_overlap_e.ZSTD_no_overlap + ); return sequenceLength; } @@ -1060,14 +1686,29 @@ private static nuint ZSTD_execSequence(byte* op, byte* oend, seq_t sequence, byt if (sequence_matchLength > 8) { assert(op < oMatchEnd); - ZSTD_wildcopy(op, match, (nint)sequence_matchLength - 8, ZSTD_overlap_e.ZSTD_overlap_src_before_dst); + ZSTD_wildcopy( + op, + match, + (nint)sequence_matchLength - 8, + ZSTD_overlap_e.ZSTD_overlap_src_before_dst + ); } return sequenceLength; } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_execSequenceSplitLitBuffer(byte* op, byte* oend, byte* oend_w, seq_t sequence, byte** litPtr, byte* litLimit, byte* prefixStart, byte* virtualStart, byte* dictEnd) + private static nuint ZSTD_execSequenceSplitLitBuffer( + byte* op, + byte* oend, + byte* oend_w, + seq_t sequence, + byte** litPtr, + byte* litLimit, + byte* prefixStart, + byte* virtualStart, + byte* dictEnd + ) { byte* oLitEnd = op + sequence.litLength; nuint sequenceLength = sequence.litLength + sequence.matchLength; @@ -1077,8 +1718,22 @@ private static nuint ZSTD_execSequenceSplitLitBuffer(byte* op, byte* oend, byte* byte* match = oLitEnd - sequence.offset; assert(op != null); assert(oend_w < oend); - if (iLitEnd > litLimit || oMatchEnd > oend_w || MEM_32bits && (nuint)(oend - op) < sequenceLength + 32) - return ZSTD_execSequenceEndSplitLitBuffer(op, oend, oend_w, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd); + if ( + iLitEnd > litLimit + || oMatchEnd > oend_w + || MEM_32bits && (nuint)(oend - op) < sequenceLength + 32 + ) + return ZSTD_execSequenceEndSplitLitBuffer( + op, + oend, + oend_w, + sequence, + litPtr, + litLimit, + prefixStart, + virtualStart, + dictEnd + ); assert(op <= oLitEnd); assert(oLitEnd < oMatchEnd); assert(oMatchEnd <= oend); @@ -1089,7 +1744,12 @@ private static nuint ZSTD_execSequenceSplitLitBuffer(byte* op, byte* oend, byte* ZSTD_copy16(op, *litPtr); if (sequence.litLength > 16) { - ZSTD_wildcopy(op + 16, *litPtr + 16, (nint)(sequence.litLength - 16), ZSTD_overlap_e.ZSTD_no_overlap); + ZSTD_wildcopy( + op + 16, + *litPtr + 16, + (nint)(sequence.litLength - 16), + ZSTD_overlap_e.ZSTD_no_overlap + ); } op = oLitEnd; @@ -1123,7 +1783,12 @@ private static nuint ZSTD_execSequenceSplitLitBuffer(byte* op, byte* oend, byte* assert(sequence.matchLength >= 1); if (sequence.offset >= 16) { - ZSTD_wildcopy(op, match, (nint)sequence.matchLength, ZSTD_overlap_e.ZSTD_no_overlap); + ZSTD_wildcopy( + op, + match, + (nint)sequence.matchLength, + ZSTD_overlap_e.ZSTD_no_overlap + ); return sequenceLength; } @@ -1132,13 +1797,22 @@ private static nuint ZSTD_execSequenceSplitLitBuffer(byte* op, byte* oend, byte* if (sequence.matchLength > 8) { assert(op < oMatchEnd); - ZSTD_wildcopy(op, match, (nint)sequence.matchLength - 8, ZSTD_overlap_e.ZSTD_overlap_src_before_dst); + ZSTD_wildcopy( + op, + match, + (nint)sequence.matchLength - 8, + ZSTD_overlap_e.ZSTD_overlap_src_before_dst + ); } return sequenceLength; } - private static void ZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, ZSTD_seqSymbol* dt) + private static void ZSTD_initFseState( + ZSTD_fseState* DStatePtr, + BIT_DStream_t* bitD, + ZSTD_seqSymbol* dt + ) { void* ptr = dt; ZSTD_seqSymbol_header* DTableH = (ZSTD_seqSymbol_header*)ptr; @@ -1148,7 +1822,12 @@ private static void ZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* b } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_updateFseStateWithDInfo(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, ushort nextState, uint nbBits) + private static void ZSTD_updateFseStateWithDInfo( + ZSTD_fseState* DStatePtr, + BIT_DStream_t* bitD, + ushort nextState, + uint nbBits + ) { nuint lowBits = BIT_readBits(bitD, nbBits); DStatePtr->state = nextState + lowBits; @@ -1161,7 +1840,11 @@ private static void ZSTD_updateFseStateWithDInfo(ZSTD_fseState* DStatePtr, BIT_D * @return : Sequence (litL + matchL + offset) */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static seq_t ZSTD_decodeSequence(seqState_t* seqState, ZSTD_longOffset_e longOffsets, int isLastSeq) + private static seq_t ZSTD_decodeSequence( + seqState_t* seqState, + ZSTD_longOffset_e longOffsets, + int isLastSeq + ) { seq_t seq; ZSTD_seqSymbol* llDInfo = seqState->stateLL.table + seqState->stateLL.state; @@ -1194,7 +1877,12 @@ private static seq_t ZSTD_decodeSequence(seqState_t* seqState, ZSTD_longOffset_e * avoids branches, and avoids accidentally reading 0 bits. */ const uint extraBits = 30 - 25; - offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << (int)extraBits); + offset = + ofBase + + ( + BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) + << (int)extraBits + ); BIT_reloadDStream(&seqState->DStream); offset += BIT_readBitsFast(&seqState->DStream, extraBits); } @@ -1222,7 +1910,10 @@ private static seq_t ZSTD_decodeSequence(seqState_t* seqState, ZSTD_longOffset_e { offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1); { - nuint temp = offset == 3 ? seqState->prevOffset.e0 - 1 : (&seqState->prevOffset.e0)[offset]; + nuint temp = + offset == 3 + ? seqState->prevOffset.e0 - 1 + : (&seqState->prevOffset.e0)[offset]; temp -= temp == 0 ? 1U : 0U; if (offset != 1) seqState->prevOffset.e2 = seqState->prevOffset.e1; @@ -1247,11 +1938,26 @@ private static seq_t ZSTD_decodeSequence(seqState_t* seqState, ZSTD_longOffset_e BIT_reloadDStream(&seqState->DStream); if (isLastSeq == 0) { - ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llNext, llnbBits); - ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlNext, mlnbBits); + ZSTD_updateFseStateWithDInfo( + &seqState->stateLL, + &seqState->DStream, + llNext, + llnbBits + ); + ZSTD_updateFseStateWithDInfo( + &seqState->stateML, + &seqState->DStream, + mlNext, + mlnbBits + ); if (MEM_32bits) BIT_reloadDStream(&seqState->DStream); - ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofNext, ofnbBits); + ZSTD_updateFseStateWithDInfo( + &seqState->stateOffb, + &seqState->DStream, + ofNext, + ofnbBits + ); BIT_reloadDStream(&seqState->DStream); } } @@ -1260,7 +1966,15 @@ private static seq_t ZSTD_decodeSequence(seqState_t* seqState, ZSTD_longOffset_e } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_decompressSequences_bodySplitLitBuffer(ZSTD_DCtx_s* dctx, void* dst, nuint maxDstSize, void* seqStart, nuint seqSize, int nbSeq, ZSTD_longOffset_e isLongOffset) + private static nuint ZSTD_decompressSequences_bodySplitLitBuffer( + ZSTD_DCtx_s* dctx, + void* dst, + nuint maxDstSize, + void* seqStart, + nuint seqSize, + int nbSeq, + ZSTD_longOffset_e isLongOffset + ) { byte* ip = (byte*)seqStart; byte* iend = ip + seqSize; @@ -1297,7 +2011,7 @@ private static nuint ZSTD_decompressSequences_bodySplitLitBuffer(ZSTD_DCtx_s* dc { litLength = 0, matchLength = 0, - offset = 0 + offset = 0, }; for (; nbSeq != 0; nbSeq--) { @@ -1305,7 +2019,17 @@ private static nuint ZSTD_decompressSequences_bodySplitLitBuffer(ZSTD_DCtx_s* dc if (litPtr + sequence.litLength > dctx->litBufferEnd) break; { - nuint oneSeqSize = ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequence.litLength - 32, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd); + nuint oneSeqSize = ZSTD_execSequenceSplitLitBuffer( + op, + oend, + litPtr + sequence.litLength - 32, + sequence, + &litPtr, + litBufferEnd, + prefixStart, + vBase, + dictEnd + ); if (ERR_isError(oneSeqSize)) return oneSeqSize; op += oneSeqSize; @@ -1319,7 +2043,9 @@ private static nuint ZSTD_decompressSequences_bodySplitLitBuffer(ZSTD_DCtx_s* dc { if (leftoverLit > (nuint)(oend - op)) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) + ); } ZSTD_safecopyDstBeforeSrc(op, litPtr, (nint)leftoverLit); @@ -1331,7 +2057,16 @@ private static nuint ZSTD_decompressSequences_bodySplitLitBuffer(ZSTD_DCtx_s* dc litBufferEnd = dctx->litExtraBuffer + (1 << 16); dctx->litBufferLocation = ZSTD_litLocation_e.ZSTD_not_in_dst; { - nuint oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd); + nuint oneSeqSize = ZSTD_execSequence( + op, + oend, + sequence, + &litPtr, + litBufferEnd, + prefixStart, + vBase, + dictEnd + ); if (ERR_isError(oneSeqSize)) return oneSeqSize; op += oneSeqSize; @@ -1345,8 +2080,21 @@ private static nuint ZSTD_decompressSequences_bodySplitLitBuffer(ZSTD_DCtx_s* dc { for (; nbSeq != 0; nbSeq--) { - seq_t sequence = ZSTD_decodeSequence(&seqState, isLongOffset, nbSeq == 1 ? 1 : 0); - nuint oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd); + seq_t sequence = ZSTD_decodeSequence( + &seqState, + isLongOffset, + nbSeq == 1 ? 1 : 0 + ); + nuint oneSeqSize = ZSTD_execSequence( + op, + oend, + sequence, + &litPtr, + litBufferEnd, + prefixStart, + vBase, + dictEnd + ); if (ERR_isError(oneSeqSize)) return oneSeqSize; op += oneSeqSize; @@ -1408,14 +2156,25 @@ private static nuint ZSTD_decompressSequences_bodySplitLitBuffer(ZSTD_DCtx_s* dc } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_decompressSequences_body(ZSTD_DCtx_s* dctx, void* dst, nuint maxDstSize, void* seqStart, nuint seqSize, int nbSeq, ZSTD_longOffset_e isLongOffset) + private static nuint ZSTD_decompressSequences_body( + ZSTD_DCtx_s* dctx, + void* dst, + nuint maxDstSize, + void* seqStart, + nuint seqSize, + int nbSeq, + ZSTD_longOffset_e isLongOffset + ) { // HACK, force nbSeq to stack (better register usage) System.Threading.Thread.VolatileRead(ref nbSeq); byte* ip = (byte*)seqStart; byte* iend = ip + seqSize; byte* ostart = (byte*)dst; - byte* oend = dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_not_in_dst ? ZSTD_maybeNullPtrAdd(ostart, (nint)maxDstSize) : dctx->litBuffer; + byte* oend = + dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_not_in_dst + ? ZSTD_maybeNullPtrAdd(ostart, (nint)maxDstSize) + : dctx->litBuffer; byte* op = ostart; byte* litPtr = dctx->litPtr; byte* litEnd = litPtr + dctx->litSize; @@ -1430,7 +2189,10 @@ private static nuint ZSTD_decompressSequences_body(ZSTD_DCtx_s* dctx, void* dst, { uint i; for (i = 0; i < 3; i++) - System.Runtime.CompilerServices.Unsafe.Add(ref seqState.prevOffset.e0, (int)i) = dctx->entropy.rep[i]; + System.Runtime.CompilerServices.Unsafe.Add( + ref seqState.prevOffset.e0, + (int)i + ) = dctx->entropy.rep[i]; } if (ERR_isError(BIT_initDStream(ref seqState.DStream, ip, (nuint)(iend - ip)))) @@ -1482,15 +2244,45 @@ private static nuint ZSTD_decompressSequences_body(ZSTD_DCtx_s* dctx, void* dst, * avoids branches, and avoids accidentally reading 0 bits. */ const uint extraBits = 30 - 25; - offset = ofBase + (BIT_readBitsFast(seqState_DStream_bitContainer, ref seqState_DStream_bitsConsumed, ofBits - extraBits) << (int)extraBits); - BIT_reloadDStream(ref seqState_DStream_bitContainer, ref seqState_DStream_bitsConsumed, ref seqState_DStream_ptr, seqState_DStream_start, seqState_DStream_limitPtr); - offset += BIT_readBitsFast(seqState_DStream_bitContainer, ref seqState_DStream_bitsConsumed, extraBits); + offset = + ofBase + + ( + BIT_readBitsFast( + seqState_DStream_bitContainer, + ref seqState_DStream_bitsConsumed, + ofBits - extraBits + ) << (int)extraBits + ); + BIT_reloadDStream( + ref seqState_DStream_bitContainer, + ref seqState_DStream_bitsConsumed, + ref seqState_DStream_ptr, + seqState_DStream_start, + seqState_DStream_limitPtr + ); + offset += BIT_readBitsFast( + seqState_DStream_bitContainer, + ref seqState_DStream_bitsConsumed, + extraBits + ); } else { - offset = ofBase + BIT_readBitsFast(seqState_DStream_bitContainer, ref seqState_DStream_bitsConsumed, ofBits); + offset = + ofBase + + BIT_readBitsFast( + seqState_DStream_bitContainer, + ref seqState_DStream_bitsConsumed, + ofBits + ); if (MEM_32bits) - BIT_reloadDStream(ref seqState_DStream_bitContainer, ref seqState_DStream_bitsConsumed, ref seqState_DStream_ptr, seqState_DStream_start, seqState_DStream_limitPtr); + BIT_reloadDStream( + ref seqState_DStream_bitContainer, + ref seqState_DStream_bitsConsumed, + ref seqState_DStream_ptr, + seqState_DStream_start, + seqState_DStream_limitPtr + ); } seqState.prevOffset.e2 = seqState.prevOffset.e1; @@ -1502,15 +2294,35 @@ private static nuint ZSTD_decompressSequences_body(ZSTD_DCtx_s* dctx, void* dst, uint ll0 = llDInfo->baseValue == 0 ? 1U : 0U; if (ofBits == 0) { - offset = System.Runtime.CompilerServices.Unsafe.Add(ref seqState.prevOffset.e0, (int)ll0); - seqState.prevOffset.e1 = System.Runtime.CompilerServices.Unsafe.Add(ref seqState.prevOffset.e0, ll0 == 0 ? 1 : 0); + offset = System.Runtime.CompilerServices.Unsafe.Add( + ref seqState.prevOffset.e0, + (int)ll0 + ); + seqState.prevOffset.e1 = + System.Runtime.CompilerServices.Unsafe.Add( + ref seqState.prevOffset.e0, + ll0 == 0 ? 1 : 0 + ); seqState.prevOffset.e0 = offset; } else { - offset = ofBase + ll0 + BIT_readBitsFast(seqState_DStream_bitContainer, ref seqState_DStream_bitsConsumed, 1); + offset = + ofBase + + ll0 + + BIT_readBitsFast( + seqState_DStream_bitContainer, + ref seqState_DStream_bitsConsumed, + 1 + ); { - nuint temp = offset == 3 ? seqState.prevOffset.e0 - 1 : System.Runtime.CompilerServices.Unsafe.Add(ref seqState.prevOffset.e0, (int)offset); + nuint temp = + offset == 3 + ? seqState.prevOffset.e0 - 1 + : System.Runtime.CompilerServices.Unsafe.Add( + ref seqState.prevOffset.e0, + (int)offset + ); temp -= temp == 0 ? 1U : 0U; if (offset != 1) seqState.prevOffset.e2 = seqState.prevOffset.e1; @@ -1524,23 +2336,79 @@ private static nuint ZSTD_decompressSequences_body(ZSTD_DCtx_s* dctx, void* dst, } if (mlBits > 0) - sequence_matchLength += BIT_readBitsFast(seqState_DStream_bitContainer, ref seqState_DStream_bitsConsumed, mlBits); + sequence_matchLength += BIT_readBitsFast( + seqState_DStream_bitContainer, + ref seqState_DStream_bitsConsumed, + mlBits + ); if (MEM_32bits && mlBits + llBits >= 25 - (30 - 25)) - BIT_reloadDStream(ref seqState_DStream_bitContainer, ref seqState_DStream_bitsConsumed, ref seqState_DStream_ptr, seqState_DStream_start, seqState_DStream_limitPtr); + BIT_reloadDStream( + ref seqState_DStream_bitContainer, + ref seqState_DStream_bitsConsumed, + ref seqState_DStream_ptr, + seqState_DStream_start, + seqState_DStream_limitPtr + ); if (MEM_64bits && totalBits >= 57 - (9 + 9 + 8)) - BIT_reloadDStream(ref seqState_DStream_bitContainer, ref seqState_DStream_bitsConsumed, ref seqState_DStream_ptr, seqState_DStream_start, seqState_DStream_limitPtr); + BIT_reloadDStream( + ref seqState_DStream_bitContainer, + ref seqState_DStream_bitsConsumed, + ref seqState_DStream_ptr, + seqState_DStream_start, + seqState_DStream_limitPtr + ); if (llBits > 0) - sequence_litLength += BIT_readBitsFast(seqState_DStream_bitContainer, ref seqState_DStream_bitsConsumed, llBits); + sequence_litLength += BIT_readBitsFast( + seqState_DStream_bitContainer, + ref seqState_DStream_bitsConsumed, + llBits + ); if (MEM_32bits) - BIT_reloadDStream(ref seqState_DStream_bitContainer, ref seqState_DStream_bitsConsumed, ref seqState_DStream_ptr, seqState_DStream_start, seqState_DStream_limitPtr); + BIT_reloadDStream( + ref seqState_DStream_bitContainer, + ref seqState_DStream_bitsConsumed, + ref seqState_DStream_ptr, + seqState_DStream_start, + seqState_DStream_limitPtr + ); if ((nbSeq == 1 ? 1 : 0) == 0) { - ZSTD_updateFseStateWithDInfo(ref seqState.stateLL, seqState_DStream_bitContainer, ref seqState_DStream_bitsConsumed, llNext, llnbBits); - ZSTD_updateFseStateWithDInfo(ref seqState.stateML, seqState_DStream_bitContainer, ref seqState_DStream_bitsConsumed, mlNext, mlnbBits); + ZSTD_updateFseStateWithDInfo( + ref seqState.stateLL, + seqState_DStream_bitContainer, + ref seqState_DStream_bitsConsumed, + llNext, + llnbBits + ); + ZSTD_updateFseStateWithDInfo( + ref seqState.stateML, + seqState_DStream_bitContainer, + ref seqState_DStream_bitsConsumed, + mlNext, + mlnbBits + ); if (MEM_32bits) - BIT_reloadDStream(ref seqState_DStream_bitContainer, ref seqState_DStream_bitsConsumed, ref seqState_DStream_ptr, seqState_DStream_start, seqState_DStream_limitPtr); - ZSTD_updateFseStateWithDInfo(ref seqState.stateOffb, seqState_DStream_bitContainer, ref seqState_DStream_bitsConsumed, ofNext, ofnbBits); - BIT_reloadDStream(ref seqState_DStream_bitContainer, ref seqState_DStream_bitsConsumed, ref seqState_DStream_ptr, seqState_DStream_start, seqState_DStream_limitPtr); + BIT_reloadDStream( + ref seqState_DStream_bitContainer, + ref seqState_DStream_bitsConsumed, + ref seqState_DStream_ptr, + seqState_DStream_start, + seqState_DStream_limitPtr + ); + ZSTD_updateFseStateWithDInfo( + ref seqState.stateOffb, + seqState_DStream_bitContainer, + ref seqState_DStream_bitsConsumed, + ofNext, + ofnbBits + ); + BIT_reloadDStream( + ref seqState_DStream_bitContainer, + ref seqState_DStream_bitsConsumed, + ref seqState_DStream_ptr, + seqState_DStream_start, + seqState_DStream_limitPtr + ); } } @@ -1556,9 +2424,27 @@ private static nuint ZSTD_decompressSequences_body(ZSTD_DCtx_s* dctx, void* dst, byte* match = oLitEnd - sequence_offset; assert(op != null); assert(oend_w < oend); - if (iLitEnd > litEnd || oMatchEnd > oend_w || MEM_32bits && (nuint)(oend - op) < oneSeqSize + 32) + if ( + iLitEnd > litEnd + || oMatchEnd > oend_w + || MEM_32bits && (nuint)(oend - op) < oneSeqSize + 32 + ) { - oneSeqSize = ZSTD_execSequenceEnd(op, oend, new seq_t { litLength = sequence_litLength, matchLength = sequence_matchLength, offset = sequence_offset }, &litPtr, litEnd, prefixStart, vBase, dictEnd); + oneSeqSize = ZSTD_execSequenceEnd( + op, + oend, + new seq_t + { + litLength = sequence_litLength, + matchLength = sequence_matchLength, + offset = sequence_offset, + }, + &litPtr, + litEnd, + prefixStart, + vBase, + dictEnd + ); goto returnOneSeqSize; } @@ -1572,7 +2458,12 @@ private static nuint ZSTD_decompressSequences_body(ZSTD_DCtx_s* dctx, void* dst, ZSTD_copy16(op, litPtr); if (sequence_litLength > 16) { - ZSTD_wildcopy(op + 16, litPtr + 16, (nint)(sequence_litLength - 16), ZSTD_overlap_e.ZSTD_no_overlap); + ZSTD_wildcopy( + op + 16, + litPtr + 16, + (nint)(sequence_litLength - 16), + ZSTD_overlap_e.ZSTD_no_overlap + ); } byte* opInner = oLitEnd; @@ -1581,7 +2472,9 @@ private static nuint ZSTD_decompressSequences_body(ZSTD_DCtx_s* dctx, void* dst, { if (sequence_offset > (nuint)(oLitEnd - vBase)) { - oneSeqSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + oneSeqSize = unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); goto returnOneSeqSize; } @@ -1607,7 +2500,12 @@ private static nuint ZSTD_decompressSequences_body(ZSTD_DCtx_s* dctx, void* dst, assert(sequence_matchLength >= 1); if (sequence_offset >= 16) { - ZSTD_wildcopy(opInner, match, (nint)sequence_matchLength, ZSTD_overlap_e.ZSTD_no_overlap); + ZSTD_wildcopy( + opInner, + match, + (nint)sequence_matchLength, + ZSTD_overlap_e.ZSTD_no_overlap + ); goto returnOneSeqSize; } @@ -1616,10 +2514,15 @@ private static nuint ZSTD_decompressSequences_body(ZSTD_DCtx_s* dctx, void* dst, if (sequence_matchLength > 8) { assert(opInner < oMatchEnd); - ZSTD_wildcopy(opInner, match, (nint)sequence_matchLength - 8, ZSTD_overlap_e.ZSTD_overlap_src_before_dst); + ZSTD_wildcopy( + opInner, + match, + (nint)sequence_matchLength - 8, + ZSTD_overlap_e.ZSTD_overlap_src_before_dst + ); } - returnOneSeqSize: + returnOneSeqSize: ; } @@ -1629,7 +2532,13 @@ private static nuint ZSTD_decompressSequences_body(ZSTD_DCtx_s* dctx, void* dst, } assert(nbSeq == 0); - if (BIT_endOfDStream(seqState_DStream_bitsConsumed, seqState_DStream_ptr, seqState_DStream_start) == 0) + if ( + BIT_endOfDStream( + seqState_DStream_bitsConsumed, + seqState_DStream_ptr, + seqState_DStream_start + ) == 0 + ) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); } @@ -1637,7 +2546,11 @@ private static nuint ZSTD_decompressSequences_body(ZSTD_DCtx_s* dctx, void* dst, { uint i; for (i = 0; i < 3; i++) - dctx->entropy.rep[i] = (uint)System.Runtime.CompilerServices.Unsafe.Add(ref seqState.prevOffset.e0, (int)i); + dctx->entropy.rep[i] = (uint) + System.Runtime.CompilerServices.Unsafe.Add( + ref seqState.prevOffset.e0, + (int)i + ); } } @@ -1658,25 +2571,65 @@ private static nuint ZSTD_decompressSequences_body(ZSTD_DCtx_s* dctx, void* dst, return (nuint)(op - ostart); } - private static nuint ZSTD_decompressSequences_default(ZSTD_DCtx_s* dctx, void* dst, nuint maxDstSize, void* seqStart, nuint seqSize, int nbSeq, ZSTD_longOffset_e isLongOffset) + private static nuint ZSTD_decompressSequences_default( + ZSTD_DCtx_s* dctx, + void* dst, + nuint maxDstSize, + void* seqStart, + nuint seqSize, + int nbSeq, + ZSTD_longOffset_e isLongOffset + ) { - return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); + return ZSTD_decompressSequences_body( + dctx, + dst, + maxDstSize, + seqStart, + seqSize, + nbSeq, + isLongOffset + ); } - private static nuint ZSTD_decompressSequencesSplitLitBuffer_default(ZSTD_DCtx_s* dctx, void* dst, nuint maxDstSize, void* seqStart, nuint seqSize, int nbSeq, ZSTD_longOffset_e isLongOffset) + private static nuint ZSTD_decompressSequencesSplitLitBuffer_default( + ZSTD_DCtx_s* dctx, + void* dst, + nuint maxDstSize, + void* seqStart, + nuint seqSize, + int nbSeq, + ZSTD_longOffset_e isLongOffset + ) { - return ZSTD_decompressSequences_bodySplitLitBuffer(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); + return ZSTD_decompressSequences_bodySplitLitBuffer( + dctx, + dst, + maxDstSize, + seqStart, + seqSize, + nbSeq, + isLongOffset + ); } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_prefetchMatch(nuint prefetchPos, seq_t sequence, byte* prefixStart, byte* dictEnd) + private static nuint ZSTD_prefetchMatch( + nuint prefetchPos, + seq_t sequence, + byte* prefixStart, + byte* dictEnd + ) { prefetchPos += sequence.litLength; { byte* matchBase = sequence.offset > prefetchPos ? dictEnd : prefixStart; /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted. * No consequence though : memory address is only used for prefetching, not for dereferencing */ - byte* match = ZSTD_wrappedPtrSub(ZSTD_wrappedPtrAdd(matchBase, (nint)prefetchPos), (nint)sequence.offset); + byte* match = ZSTD_wrappedPtrSub( + ZSTD_wrappedPtrAdd(matchBase, (nint)prefetchPos), + (nint)sequence.offset + ); #if NETCOREAPP3_0_OR_GREATER if (System.Runtime.Intrinsics.X86.Sse.IsSupported) { @@ -1694,12 +2647,23 @@ private static nuint ZSTD_prefetchMatch(nuint prefetchPos, seq_t sequence, byte* * It's generally employed when block contains a significant portion of long-distance matches * or when coupled with a "cold" dictionary */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_decompressSequencesLong_body(ZSTD_DCtx_s* dctx, void* dst, nuint maxDstSize, void* seqStart, nuint seqSize, int nbSeq, ZSTD_longOffset_e isLongOffset) + private static nuint ZSTD_decompressSequencesLong_body( + ZSTD_DCtx_s* dctx, + void* dst, + nuint maxDstSize, + void* seqStart, + nuint seqSize, + int nbSeq, + ZSTD_longOffset_e isLongOffset + ) { byte* ip = (byte*)seqStart; byte* iend = ip + seqSize; byte* ostart = (byte*)dst; - byte* oend = dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_in_dst ? dctx->litBuffer : ZSTD_maybeNullPtrAdd(ostart, (nint)maxDstSize); + byte* oend = + dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_in_dst + ? dctx->litBuffer + : ZSTD_maybeNullPtrAdd(ostart, (nint)maxDstSize); byte* op = ostart; byte* litPtr = dctx->litPtr; byte* litBufferEnd = dctx->litBufferEnd; @@ -1733,15 +2697,26 @@ private static nuint ZSTD_decompressSequencesLong_body(ZSTD_DCtx_s* dctx, void* ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); for (seqNb = 0; seqNb < seqAdvance; seqNb++) { - seq_t sequence = ZSTD_decodeSequence(&seqState, isLongOffset, seqNb == nbSeq - 1 ? 1 : 0); + seq_t sequence = ZSTD_decodeSequence( + &seqState, + isLongOffset, + seqNb == nbSeq - 1 ? 1 : 0 + ); prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd); sequences[seqNb] = sequence; } for (; seqNb < nbSeq; seqNb++) { - seq_t sequence = ZSTD_decodeSequence(&seqState, isLongOffset, seqNb == nbSeq - 1 ? 1 : 0); - if (dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split && litPtr + sequences[seqNb - 8 & 8 - 1].litLength > dctx->litBufferEnd) + seq_t sequence = ZSTD_decodeSequence( + &seqState, + isLongOffset, + seqNb == nbSeq - 1 ? 1 : 0 + ); + if ( + dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split + && litPtr + sequences[seqNb - 8 & 8 - 1].litLength > dctx->litBufferEnd + ) { /* lit buffer is reaching split point, empty out the first buffer and transition to litExtraBuffer */ nuint leftoverLit = (nuint)(dctx->litBufferEnd - litPtr); @@ -1749,7 +2724,9 @@ private static nuint ZSTD_decompressSequencesLong_body(ZSTD_DCtx_s* dctx, void* { if (leftoverLit > (nuint)(oend - op)) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) + ); } ZSTD_safecopyDstBeforeSrc(op, litPtr, (nint)leftoverLit); @@ -1761,10 +2738,24 @@ private static nuint ZSTD_decompressSequencesLong_body(ZSTD_DCtx_s* dctx, void* litBufferEnd = dctx->litExtraBuffer + (1 << 16); dctx->litBufferLocation = ZSTD_litLocation_e.ZSTD_not_in_dst; { - nuint oneSeqSize = ZSTD_execSequence(op, oend, sequences[seqNb - 8 & 8 - 1], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); + nuint oneSeqSize = ZSTD_execSequence( + op, + oend, + sequences[seqNb - 8 & 8 - 1], + &litPtr, + litBufferEnd, + prefixStart, + dictStart, + dictEnd + ); if (ERR_isError(oneSeqSize)) return oneSeqSize; - prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd); + prefetchPos = ZSTD_prefetchMatch( + prefetchPos, + sequence, + prefixStart, + dictEnd + ); sequences[seqNb & 8 - 1] = sequence; op += oneSeqSize; } @@ -1772,10 +2763,37 @@ private static nuint ZSTD_decompressSequencesLong_body(ZSTD_DCtx_s* dctx, void* else { /* lit buffer is either wholly contained in first or second split, or not split at all*/ - nuint oneSeqSize = dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split ? ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequences[seqNb - 8 & 8 - 1].litLength - 32, sequences[seqNb - 8 & 8 - 1], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd) : ZSTD_execSequence(op, oend, sequences[seqNb - 8 & 8 - 1], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); + nuint oneSeqSize = + dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split + ? ZSTD_execSequenceSplitLitBuffer( + op, + oend, + litPtr + sequences[seqNb - 8 & 8 - 1].litLength - 32, + sequences[seqNb - 8 & 8 - 1], + &litPtr, + litBufferEnd, + prefixStart, + dictStart, + dictEnd + ) + : ZSTD_execSequence( + op, + oend, + sequences[seqNb - 8 & 8 - 1], + &litPtr, + litBufferEnd, + prefixStart, + dictStart, + dictEnd + ); if (ERR_isError(oneSeqSize)) return oneSeqSize; - prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd); + prefetchPos = ZSTD_prefetchMatch( + prefetchPos, + sequence, + prefixStart, + dictEnd + ); sequences[seqNb & 8 - 1] = sequence; op += oneSeqSize; } @@ -1790,14 +2808,19 @@ private static nuint ZSTD_decompressSequencesLong_body(ZSTD_DCtx_s* dctx, void* for (; seqNb < nbSeq; seqNb++) { seq_t* sequence = &sequences[seqNb & 8 - 1]; - if (dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split && litPtr + sequence->litLength > dctx->litBufferEnd) + if ( + dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split + && litPtr + sequence->litLength > dctx->litBufferEnd + ) { nuint leftoverLit = (nuint)(dctx->litBufferEnd - litPtr); if (leftoverLit != 0) { if (leftoverLit > (nuint)(oend - op)) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) + ); } ZSTD_safecopyDstBeforeSrc(op, litPtr, (nint)leftoverLit); @@ -1809,7 +2832,16 @@ private static nuint ZSTD_decompressSequencesLong_body(ZSTD_DCtx_s* dctx, void* litBufferEnd = dctx->litExtraBuffer + (1 << 16); dctx->litBufferLocation = ZSTD_litLocation_e.ZSTD_not_in_dst; { - nuint oneSeqSize = ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); + nuint oneSeqSize = ZSTD_execSequence( + op, + oend, + *sequence, + &litPtr, + litBufferEnd, + prefixStart, + dictStart, + dictEnd + ); if (ERR_isError(oneSeqSize)) return oneSeqSize; op += oneSeqSize; @@ -1817,7 +2849,29 @@ private static nuint ZSTD_decompressSequencesLong_body(ZSTD_DCtx_s* dctx, void* } else { - nuint oneSeqSize = dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split ? ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequence->litLength - 32, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd) : ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); + nuint oneSeqSize = + dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split + ? ZSTD_execSequenceSplitLitBuffer( + op, + oend, + litPtr + sequence->litLength - 32, + *sequence, + &litPtr, + litBufferEnd, + prefixStart, + dictStart, + dictEnd + ) + : ZSTD_execSequence( + op, + oend, + *sequence, + &litPtr, + litBufferEnd, + prefixStart, + dictStart, + dictEnd + ); if (ERR_isError(oneSeqSize)) return oneSeqSize; op += oneSeqSize; @@ -1866,19 +2920,67 @@ private static nuint ZSTD_decompressSequencesLong_body(ZSTD_DCtx_s* dctx, void* return (nuint)(op - ostart); } - private static nuint ZSTD_decompressSequencesLong_default(ZSTD_DCtx_s* dctx, void* dst, nuint maxDstSize, void* seqStart, nuint seqSize, int nbSeq, ZSTD_longOffset_e isLongOffset) + private static nuint ZSTD_decompressSequencesLong_default( + ZSTD_DCtx_s* dctx, + void* dst, + nuint maxDstSize, + void* seqStart, + nuint seqSize, + int nbSeq, + ZSTD_longOffset_e isLongOffset + ) { - return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); + return ZSTD_decompressSequencesLong_body( + dctx, + dst, + maxDstSize, + seqStart, + seqSize, + nbSeq, + isLongOffset + ); } - private static nuint ZSTD_decompressSequences(ZSTD_DCtx_s* dctx, void* dst, nuint maxDstSize, void* seqStart, nuint seqSize, int nbSeq, ZSTD_longOffset_e isLongOffset) + private static nuint ZSTD_decompressSequences( + ZSTD_DCtx_s* dctx, + void* dst, + nuint maxDstSize, + void* seqStart, + nuint seqSize, + int nbSeq, + ZSTD_longOffset_e isLongOffset + ) { - return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); + return ZSTD_decompressSequences_default( + dctx, + dst, + maxDstSize, + seqStart, + seqSize, + nbSeq, + isLongOffset + ); } - private static nuint ZSTD_decompressSequencesSplitLitBuffer(ZSTD_DCtx_s* dctx, void* dst, nuint maxDstSize, void* seqStart, nuint seqSize, int nbSeq, ZSTD_longOffset_e isLongOffset) + private static nuint ZSTD_decompressSequencesSplitLitBuffer( + ZSTD_DCtx_s* dctx, + void* dst, + nuint maxDstSize, + void* seqStart, + nuint seqSize, + int nbSeq, + ZSTD_longOffset_e isLongOffset + ) { - return ZSTD_decompressSequencesSplitLitBuffer_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); + return ZSTD_decompressSequencesSplitLitBuffer_default( + dctx, + dst, + maxDstSize, + seqStart, + seqSize, + nbSeq, + isLongOffset + ); } /* ZSTD_decompressSequencesLong() : @@ -1886,9 +2988,25 @@ private static nuint ZSTD_decompressSequencesSplitLitBuffer(ZSTD_DCtx_s* dctx, v * aka out of cache. * note : "long" definition seems overloaded here, sometimes meaning "wider than bitstream register", and sometimes meaning "farther than memory cache distance". * This function will try to mitigate main memory latency through the use of prefetching */ - private static nuint ZSTD_decompressSequencesLong(ZSTD_DCtx_s* dctx, void* dst, nuint maxDstSize, void* seqStart, nuint seqSize, int nbSeq, ZSTD_longOffset_e isLongOffset) + private static nuint ZSTD_decompressSequencesLong( + ZSTD_DCtx_s* dctx, + void* dst, + nuint maxDstSize, + void* seqStart, + nuint seqSize, + int nbSeq, + ZSTD_longOffset_e isLongOffset + ) { - return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); + return ZSTD_decompressSequencesLong_default( + dctx, + dst, + maxDstSize, + seqStart, + seqSize, + nbSeq, + isLongOffset + ); } /** @@ -1912,7 +3030,7 @@ private static ZSTD_OffsetInfo ZSTD_getOffsetInfo(ZSTD_seqSymbol* offTable, int ZSTD_OffsetInfo info = new ZSTD_OffsetInfo { longOffsetShare = 0, - maxNbAdditionalBits = 0 + maxNbAdditionalBits = 0, }; if (nbSeq != 0) { @@ -1924,7 +3042,10 @@ private static ZSTD_OffsetInfo ZSTD_getOffsetInfo(ZSTD_seqSymbol* offTable, int assert(max <= 1 << 8); for (u = 0; u < max; u++) { - info.maxNbAdditionalBits = info.maxNbAdditionalBits > table[u].nbAdditionalBits ? info.maxNbAdditionalBits : table[u].nbAdditionalBits; + info.maxNbAdditionalBits = + info.maxNbAdditionalBits > table[u].nbAdditionalBits + ? info.maxNbAdditionalBits + : table[u].nbAdditionalBits; if (table[u].nbAdditionalBits > 22) info.longOffsetShare += 1; } @@ -1966,7 +3087,14 @@ private static nuint ZSTD_maxShortOffset() * @return : decompressed block size, * or an error code (which can be tested using ZSTD_isError()) */ - private static nuint ZSTD_decompressBlock_internal(ZSTD_DCtx_s* dctx, void* dst, nuint dstCapacity, void* src, nuint srcSize, streaming_operation streaming) + private static nuint ZSTD_decompressBlock_internal( + ZSTD_DCtx_s* dctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + streaming_operation streaming + ) { byte* ip = (byte*)src; if (srcSize > ZSTD_blockSizeMax(dctx)) @@ -1975,7 +3103,14 @@ private static nuint ZSTD_decompressBlock_internal(ZSTD_DCtx_s* dctx, void* dst, } { - nuint litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize, dst, dstCapacity, streaming); + nuint litCSize = ZSTD_decodeLiteralsBlock( + dctx, + src, + srcSize, + dst, + dstCapacity, + streaming + ); if (ERR_isError(litCSize)) return litCSize; ip += litCSize; @@ -1986,8 +3121,12 @@ private static nuint ZSTD_decompressBlock_internal(ZSTD_DCtx_s* dctx, void* dst, /* Compute the maximum block size, which must also work when !frame and fParams are unset. * Additionally, take the min with dstCapacity to ensure that the totalHistorySize fits in a size_t. */ - nuint blockSizeMax = dstCapacity < ZSTD_blockSizeMax(dctx) ? dstCapacity : ZSTD_blockSizeMax(dctx); - nuint totalHistorySize = ZSTD_totalHistorySize(ZSTD_maybeNullPtrAdd((byte*)dst, (nint)blockSizeMax), (byte*)dctx->virtualStart); + nuint blockSizeMax = + dstCapacity < ZSTD_blockSizeMax(dctx) ? dstCapacity : ZSTD_blockSizeMax(dctx); + nuint totalHistorySize = ZSTD_totalHistorySize( + ZSTD_maybeNullPtrAdd((byte*)dst, (nint)blockSizeMax), + (byte*)dctx->virtualStart + ); /* isLongOffset must be true if there are long offsets. * Offsets are long if they are larger than ZSTD_maxShortOffset(). * We don't expect that to be the case in 64-bit mode. @@ -1999,7 +3138,9 @@ private static nuint ZSTD_decompressBlock_internal(ZSTD_DCtx_s* dctx, void* dst, * If isLongOffsets is true, then we will later check our decoding table to see * if it is even possible to generate long offsets. */ - ZSTD_longOffset_e isLongOffset = (ZSTD_longOffset_e)(MEM_32bits && totalHistorySize > ZSTD_maxShortOffset() ? 1 : 0); + ZSTD_longOffset_e isLongOffset = (ZSTD_longOffset_e)( + MEM_32bits && totalHistorySize > ZSTD_maxShortOffset() ? 1 : 0 + ); int usePrefetchDecoder = dctx->ddictIsCold; int nbSeq; nuint seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize); @@ -2012,15 +3153,25 @@ private static nuint ZSTD_decompressBlock_internal(ZSTD_DCtx_s* dctx, void* dst, return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); } - if (MEM_64bits && sizeof(nuint) == sizeof(void*) && unchecked((nuint)(-1)) - (nuint)dst < 1 << 20) + if ( + MEM_64bits + && sizeof(nuint) == sizeof(void*) + && unchecked((nuint)(-1)) - (nuint)dst < 1 << 20 + ) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); } - if (isLongOffset != default || usePrefetchDecoder == 0 && totalHistorySize > 1U << 24 && nbSeq > 8) + if ( + isLongOffset != default + || usePrefetchDecoder == 0 && totalHistorySize > 1U << 24 && nbSeq > 8 + ) { ZSTD_OffsetInfo info = ZSTD_getOffsetInfo(dctx->OFTptr, nbSeq); - if (isLongOffset != default && info.maxNbAdditionalBits <= (uint)(MEM_32bits ? 25 : 57)) + if ( + isLongOffset != default + && info.maxNbAdditionalBits <= (uint)(MEM_32bits ? 25 : 57) + ) { isLongOffset = ZSTD_longOffset_e.ZSTD_lo_isRegularOffset; } @@ -2036,13 +3187,37 @@ private static nuint ZSTD_decompressBlock_internal(ZSTD_DCtx_s* dctx, void* dst, dctx->ddictIsCold = 0; if (usePrefetchDecoder != 0) { - return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset); + return ZSTD_decompressSequencesLong( + dctx, + dst, + dstCapacity, + ip, + srcSize, + nbSeq, + isLongOffset + ); } if (dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split) - return ZSTD_decompressSequencesSplitLitBuffer(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset); + return ZSTD_decompressSequencesSplitLitBuffer( + dctx, + dst, + dstCapacity, + ip, + srcSize, + nbSeq, + isLongOffset + ); else - return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset); + return ZSTD_decompressSequences( + dctx, + dst, + dstCapacity, + ip, + srcSize, + nbSeq, + isLongOffset + ); } } @@ -2056,19 +3231,33 @@ private static void ZSTD_checkContinuity(ZSTD_DCtx_s* dctx, void* dst, nuint dst if (dst != dctx->previousDstEnd && dstSize > 0) { dctx->dictEnd = dctx->previousDstEnd; - dctx->virtualStart = (sbyte*)dst - ((sbyte*)dctx->previousDstEnd - (sbyte*)dctx->prefixStart); + dctx->virtualStart = + (sbyte*)dst - ((sbyte*)dctx->previousDstEnd - (sbyte*)dctx->prefixStart); dctx->prefixStart = dst; dctx->previousDstEnd = dst; } } /* Internal definition of ZSTD_decompressBlock() to avoid deprecation warnings. */ - private static nuint ZSTD_decompressBlock_deprecated(ZSTD_DCtx_s* dctx, void* dst, nuint dstCapacity, void* src, nuint srcSize) + private static nuint ZSTD_decompressBlock_deprecated( + ZSTD_DCtx_s* dctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize + ) { nuint dSize; dctx->isFrameDecompression = 0; ZSTD_checkContinuity(dctx, dst, dstCapacity); - dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, streaming_operation.not_streaming); + dSize = ZSTD_decompressBlock_internal( + dctx, + dst, + dstCapacity, + src, + srcSize, + streaming_operation.not_streaming + ); { nuint err_code = dSize; if (ERR_isError(err_code)) @@ -2082,22 +3271,48 @@ private static nuint ZSTD_decompressBlock_deprecated(ZSTD_DCtx_s* dctx, void* ds } /* NOTE: Must just wrap ZSTD_decompressBlock_deprecated() */ - public static nuint ZSTD_decompressBlock(ZSTD_DCtx_s* dctx, void* dst, nuint dstCapacity, void* src, nuint srcSize) + public static nuint ZSTD_decompressBlock( + ZSTD_DCtx_s* dctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize + ) { return ZSTD_decompressBlock_deprecated(dctx, dst, dstCapacity, src, srcSize); } - private static void ZSTD_initFseState(ref ZSTD_fseState DStatePtr, ref BIT_DStream_t bitD, ZSTD_seqSymbol* dt) + private static void ZSTD_initFseState( + ref ZSTD_fseState DStatePtr, + ref BIT_DStream_t bitD, + ZSTD_seqSymbol* dt + ) { void* ptr = dt; ZSTD_seqSymbol_header* DTableH = (ZSTD_seqSymbol_header*)ptr; - DStatePtr.state = BIT_readBits(bitD.bitContainer, ref bitD.bitsConsumed, DTableH->tableLog); - BIT_reloadDStream(ref bitD.bitContainer, ref bitD.bitsConsumed, ref bitD.ptr, bitD.start, bitD.limitPtr); + DStatePtr.state = BIT_readBits( + bitD.bitContainer, + ref bitD.bitsConsumed, + DTableH->tableLog + ); + BIT_reloadDStream( + ref bitD.bitContainer, + ref bitD.bitsConsumed, + ref bitD.ptr, + bitD.start, + bitD.limitPtr + ); DStatePtr.table = dt + 1; } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_updateFseStateWithDInfo(ref ZSTD_fseState DStatePtr, nuint bitD_bitContainer, ref uint bitD_bitsConsumed, ushort nextState, uint nbBits) + private static void ZSTD_updateFseStateWithDInfo( + ref ZSTD_fseState DStatePtr, + nuint bitD_bitContainer, + ref uint bitD_bitsConsumed, + ushort nextState, + uint nbBits + ) { nuint lowBits = BIT_readBits(bitD_bitContainer, ref bitD_bitsConsumed, nbBits); DStatePtr.state = nextState + lowBits; diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompressInternal.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompressInternal.cs index 5b37eaa40..c0fb99b45 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompressInternal.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompressInternal.cs @@ -1,205 +1,395 @@ -using static ZstdSharp.UnsafeHelper; using System; -using System.Runtime.InteropServices; using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; +using static ZstdSharp.UnsafeHelper; namespace ZstdSharp.Unsafe { public static unsafe partial class Methods { #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_LL_base => new uint[36] - { - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - 18, - 20, - 22, - 24, - 28, - 32, - 40, - 48, - 64, - 0x80, - 0x100, - 0x200, - 0x400, - 0x800, - 0x1000, - 0x2000, - 0x4000, - 0x8000, - 0x10000 - }; - private static uint* LL_base => (uint*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_LL_base)); + private static ReadOnlySpan Span_LL_base => + new uint[36] + { + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 18, + 20, + 22, + 24, + 28, + 32, + 40, + 48, + 64, + 0x80, + 0x100, + 0x200, + 0x400, + 0x800, + 0x1000, + 0x2000, + 0x4000, + 0x8000, + 0x10000, + }; + private static uint* LL_base => + (uint*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_LL_base) + ); #else - private static readonly uint* LL_base = GetArrayPointer(new uint[36] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18, 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000 }); + private static readonly uint* LL_base = GetArrayPointer( + new uint[36] + { + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 18, + 20, + 22, + 24, + 28, + 32, + 40, + 48, + 64, + 0x80, + 0x100, + 0x200, + 0x400, + 0x800, + 0x1000, + 0x2000, + 0x4000, + 0x8000, + 0x10000, + } + ); #endif #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_OF_base => new uint[32] - { - 0, - 1, - 1, - 5, - 0xD, - 0x1D, - 0x3D, - 0x7D, - 0xFD, - 0x1FD, - 0x3FD, - 0x7FD, - 0xFFD, - 0x1FFD, - 0x3FFD, - 0x7FFD, - 0xFFFD, - 0x1FFFD, - 0x3FFFD, - 0x7FFFD, - 0xFFFFD, - 0x1FFFFD, - 0x3FFFFD, - 0x7FFFFD, - 0xFFFFFD, - 0x1FFFFFD, - 0x3FFFFFD, - 0x7FFFFFD, - 0xFFFFFFD, - 0x1FFFFFFD, - 0x3FFFFFFD, - 0x7FFFFFFD - }; - private static uint* OF_base => (uint*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_OF_base)); + private static ReadOnlySpan Span_OF_base => + new uint[32] + { + 0, + 1, + 1, + 5, + 0xD, + 0x1D, + 0x3D, + 0x7D, + 0xFD, + 0x1FD, + 0x3FD, + 0x7FD, + 0xFFD, + 0x1FFD, + 0x3FFD, + 0x7FFD, + 0xFFFD, + 0x1FFFD, + 0x3FFFD, + 0x7FFFD, + 0xFFFFD, + 0x1FFFFD, + 0x3FFFFD, + 0x7FFFFD, + 0xFFFFFD, + 0x1FFFFFD, + 0x3FFFFFD, + 0x7FFFFFD, + 0xFFFFFFD, + 0x1FFFFFFD, + 0x3FFFFFFD, + 0x7FFFFFFD, + }; + private static uint* OF_base => + (uint*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_OF_base) + ); #else - private static readonly uint* OF_base = GetArrayPointer(new uint[32] { 0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D, 0xFD, 0x1FD, 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD, 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD, 0x1FFFFFFD, 0x3FFFFFFD, 0x7FFFFFFD }); + private static readonly uint* OF_base = GetArrayPointer( + new uint[32] + { + 0, + 1, + 1, + 5, + 0xD, + 0x1D, + 0x3D, + 0x7D, + 0xFD, + 0x1FD, + 0x3FD, + 0x7FD, + 0xFFD, + 0x1FFD, + 0x3FFD, + 0x7FFD, + 0xFFFD, + 0x1FFFD, + 0x3FFFD, + 0x7FFFD, + 0xFFFFD, + 0x1FFFFD, + 0x3FFFFD, + 0x7FFFFD, + 0xFFFFFD, + 0x1FFFFFD, + 0x3FFFFFD, + 0x7FFFFFD, + 0xFFFFFFD, + 0x1FFFFFFD, + 0x3FFFFFFD, + 0x7FFFFFFD, + } + ); #endif #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_OF_bits => new byte[32] - { - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - 17, - 18, - 19, - 20, - 21, - 22, - 23, - 24, - 25, - 26, - 27, - 28, - 29, - 30, - 31 - }; - private static byte* OF_bits => (byte*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_OF_bits)); + private static ReadOnlySpan Span_OF_bits => + new byte[32] + { + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + }; + private static byte* OF_bits => + (byte*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_OF_bits) + ); #else - private static readonly byte* OF_bits = GetArrayPointer(new byte[32] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31 }); + private static readonly byte* OF_bits = GetArrayPointer( + new byte[32] + { + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + } + ); #endif #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_ML_base => new uint[53] - { - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - 17, - 18, - 19, - 20, - 21, - 22, - 23, - 24, - 25, - 26, - 27, - 28, - 29, - 30, - 31, - 32, - 33, - 34, - 35, - 37, - 39, - 41, - 43, - 47, - 51, - 59, - 67, - 83, - 99, - 0x83, - 0x103, - 0x203, - 0x403, - 0x803, - 0x1003, - 0x2003, - 0x4003, - 0x8003, - 0x10003 - }; - private static uint* ML_base => (uint*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_ML_base)); + private static ReadOnlySpan Span_ML_base => + new uint[53] + { + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 37, + 39, + 41, + 43, + 47, + 51, + 59, + 67, + 83, + 99, + 0x83, + 0x103, + 0x203, + 0x403, + 0x803, + 0x1003, + 0x2003, + 0x4003, + 0x8003, + 0x10003, + }; + private static uint* ML_base => + (uint*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_ML_base) + ); #else - private static readonly uint* ML_base = GetArrayPointer(new uint[53] { 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 39, 41, 43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, 0x1003, 0x2003, 0x4003, 0x8003, 0x10003 }); + private static readonly uint* ML_base = GetArrayPointer( + new uint[53] + { + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 37, + 39, + 41, + 43, + 47, + 51, + 59, + 67, + 83, + 99, + 0x83, + 0x103, + 0x203, + 0x403, + 0x803, + 0x1003, + 0x2003, + 0x4003, + 0x8003, + 0x10003, + } + ); #endif + [MethodImpl(MethodImplOptions.AggressiveInlining)] private static int ZSTD_DCtx_get_bmi2(ZSTD_DCtx_s* dctx) { return 0; } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDoubleFast.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDoubleFast.cs index ec35208f2..f7b916f83 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDoubleFast.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDoubleFast.cs @@ -1,13 +1,17 @@ -using System.Runtime.CompilerServices; -using static ZstdSharp.UnsafeHelper; using System; +using System.Runtime.CompilerServices; using System.Runtime.InteropServices; +using static ZstdSharp.UnsafeHelper; namespace ZstdSharp.Unsafe { public static unsafe partial class Methods { - private static void ZSTD_fillDoubleHashTableForCDict(ZSTD_MatchState_t* ms, void* end, ZSTD_dictTableLoadMethod_e dtlm) + private static void ZSTD_fillDoubleHashTableForCDict( + ZSTD_MatchState_t* ms, + void* end, + ZSTD_dictTableLoadMethod_e dtlm + ) { ZSTD_compressionParameters* cParams = &ms->cParams; uint* hashLarge = ms->hashTable; @@ -43,7 +47,11 @@ private static void ZSTD_fillDoubleHashTableForCDict(ZSTD_MatchState_t* ms, void } } - private static void ZSTD_fillDoubleHashTableForCCtx(ZSTD_MatchState_t* ms, void* end, ZSTD_dictTableLoadMethod_e dtlm) + private static void ZSTD_fillDoubleHashTableForCCtx( + ZSTD_MatchState_t* ms, + void* end, + ZSTD_dictTableLoadMethod_e dtlm + ) { ZSTD_compressionParameters* cParams = &ms->cParams; uint* hashLarge = ms->hashTable; @@ -73,7 +81,12 @@ private static void ZSTD_fillDoubleHashTableForCCtx(ZSTD_MatchState_t* ms, void* } } - private static void ZSTD_fillDoubleHashTable(ZSTD_MatchState_t* ms, void* end, ZSTD_dictTableLoadMethod_e dtlm, ZSTD_tableFillPurpose_e tfp) + private static void ZSTD_fillDoubleHashTable( + ZSTD_MatchState_t* ms, + void* end, + ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp + ) { if (tfp == ZSTD_tableFillPurpose_e.ZSTD_tfp_forCDict) { @@ -86,26 +99,29 @@ private static void ZSTD_fillDoubleHashTable(ZSTD_MatchState_t* ms, void* end, Z } #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_dummy => new byte[10] - { - 0x12, - 0x34, - 0x56, - 0x78, - 0x9a, - 0xbc, - 0xde, - 0xf0, - 0xe2, - 0xb4 - }; - private static byte* dummy => (byte*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_dummy)); + private static ReadOnlySpan Span_dummy => + new byte[10] { 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0, 0xe2, 0xb4 }; + private static byte* dummy => + (byte*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_dummy) + ); #else - private static readonly byte* dummy = GetArrayPointer(new byte[10] { 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0, 0xe2, 0xb4 }); + private static readonly byte* dummy = GetArrayPointer( + new byte[10] { 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0, 0xe2, 0xb4 } + ); #endif + [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_compressBlock_doubleFast_noDict_generic(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize, uint mls) + private static nuint ZSTD_compressBlock_doubleFast_noDict_generic( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize, + uint mls + ) { ZSTD_compressionParameters* cParams = &ms->cParams; uint* hashLong = ms->hashTable; @@ -121,8 +137,10 @@ private static nuint ZSTD_compressBlock_doubleFast_noDict_generic(ZSTD_MatchStat byte* prefixLowest = @base + prefixLowestIndex; byte* iend = istart + srcSize; byte* ilimit = iend - 8; - uint offset_1 = rep[0], offset_2 = rep[1]; - uint offsetSaved1 = 0, offsetSaved2 = 0; + uint offset_1 = rep[0], + offset_2 = rep[1]; + uint offsetSaved1 = 0, + offsetSaved2 = 0; nuint mLength; uint offset; uint curr; @@ -202,7 +220,12 @@ private static nuint ZSTD_compressBlock_doubleFast_noDict_generic(ZSTD_MatchStat hl1 = ZSTD_hashPtr(ip1, hBitsL, 8); { - byte* matchl0_safe = ZSTD_selectAddr(idxl0, prefixLowestIndex, matchl0, &dummy[0]); + byte* matchl0_safe = ZSTD_selectAddr( + idxl0, + prefixLowestIndex, + matchl0, + &dummy[0] + ); if (MEM_read64(matchl0_safe) == MEM_read64(ip) && matchl0_safe == matchl0) { mLength = ZSTD_count(ip + 8, matchl0 + 8, iend) + 8; @@ -245,14 +268,13 @@ private static nuint ZSTD_compressBlock_doubleFast_noDict_generic(ZSTD_MatchStat hl0 = hl1; idxl0 = idxl1; matchl0 = matchl1; - } - while (ip1 <= ilimit); - _cleanup: + } while (ip1 <= ilimit); + _cleanup: offsetSaved2 = offsetSaved1 != 0 && offset_1 != 0 ? offsetSaved1 : offsetSaved2; rep[0] = offset_1 != 0 ? offset_1 : offsetSaved1; rep[1] = offset_2 != 0 ? offset_2 : offsetSaved2; return (nuint)(iend - anchor); - _search_next_long: + _search_next_long: mLength = ZSTD_count(ip + 4, matchs0 + 4, iend) + 4; offset = (uint)(ip - matchs0); if (idxl1 > prefixLowestIndex && MEM_read64(matchl1) == MEM_read64(ip1)) @@ -274,7 +296,7 @@ private static nuint ZSTD_compressBlock_doubleFast_noDict_generic(ZSTD_MatchStat mLength++; } - _match_found: + _match_found: offset_2 = offset_1; offset_1 = offset; if (step < 4) @@ -284,7 +306,7 @@ private static nuint ZSTD_compressBlock_doubleFast_noDict_generic(ZSTD_MatchStat assert(offset > 0); ZSTD_storeSeq(seqStore, (nuint)(ip - anchor), anchor, iend, offset + 3, mLength); - _match_stored: + _match_stored: ip += mLength; anchor = ip; if (ip <= ilimit) @@ -297,7 +319,9 @@ private static nuint ZSTD_compressBlock_doubleFast_noDict_generic(ZSTD_MatchStat hashSmall[ZSTD_hashPtr(ip - 1, hBitsS, mls)] = (uint)(ip - 1 - @base); } - while (ip <= ilimit && offset_2 > 0 && MEM_read32(ip) == MEM_read32(ip - offset_2)) + while ( + ip <= ilimit && offset_2 > 0 && MEM_read32(ip) == MEM_read32(ip - offset_2) + ) { /* store sequence */ nuint rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4; @@ -319,7 +343,14 @@ private static nuint ZSTD_compressBlock_doubleFast_noDict_generic(ZSTD_MatchStat } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_compressBlock_doubleFast_dictMatchState_generic(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize, uint mls) + private static nuint ZSTD_compressBlock_doubleFast_dictMatchState_generic( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize, + uint mls + ) { ZSTD_compressionParameters* cParams = &ms->cParams; uint* hashLong = ms->hashTable; @@ -336,7 +367,8 @@ private static nuint ZSTD_compressBlock_doubleFast_dictMatchState_generic(ZSTD_M byte* prefixLowest = @base + prefixLowestIndex; byte* iend = istart + srcSize; byte* ilimit = iend - 8; - uint offset_1 = rep[0], offset_2 = rep[1]; + uint offset_1 = rep[0], + offset_2 = rep[1]; ZSTD_MatchState_t* dms = ms->dictMatchState; ZSTD_compressionParameters* dictCParams = &dms->cParams; uint* dictHashLong = dms->hashTable; @@ -406,12 +438,25 @@ private static nuint ZSTD_compressBlock_doubleFast_dictMatchState_generic(ZSTD_M byte* matchLong = @base + matchIndexL; byte* match = @base + matchIndexS; uint repIndex = curr + 1 - offset_1; - byte* repMatch = repIndex < prefixLowestIndex ? dictBase + (repIndex - dictIndexDelta) : @base + repIndex; + byte* repMatch = + repIndex < prefixLowestIndex + ? dictBase + (repIndex - dictIndexDelta) + : @base + repIndex; hashLong[h2] = hashSmall[h] = curr; - if (ZSTD_index_overlap_check(prefixLowestIndex, repIndex) != 0 && MEM_read32(repMatch) == MEM_read32(ip + 1)) + if ( + ZSTD_index_overlap_check(prefixLowestIndex, repIndex) != 0 + && MEM_read32(repMatch) == MEM_read32(ip + 1) + ) { byte* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; - mLength = ZSTD_count_2segments(ip + 1 + 4, repMatch + 4, iend, repMatchEnd, prefixLowest) + 4; + mLength = + ZSTD_count_2segments( + ip + 1 + 4, + repMatch + 4, + iend, + repMatchEnd, + prefixLowest + ) + 4; ip++; assert(1 >= 1); assert(1 <= 3); @@ -440,7 +485,14 @@ private static nuint ZSTD_compressBlock_doubleFast_dictMatchState_generic(ZSTD_M assert(dictMatchL < dictEnd); if (dictMatchL > dictStart && MEM_read64(dictMatchL) == MEM_read64(ip)) { - mLength = ZSTD_count_2segments(ip + 8, dictMatchL + 8, iend, dictEnd, prefixLowest) + 8; + mLength = + ZSTD_count_2segments( + ip + 8, + dictMatchL + 8, + iend, + dictEnd, + prefixLowest + ) + 8; offset = curr - dictMatchIndexL - dictIndexDelta; while (ip > anchor && dictMatchL > dictStart && ip[-1] == dictMatchL[-1]) { @@ -474,16 +526,22 @@ private static nuint ZSTD_compressBlock_doubleFast_dictMatchState_generic(ZSTD_M ip += (ip - anchor >> 8) + 1; continue; - _search_next_long: + _search_next_long: { nuint hl3 = ZSTD_hashPtr(ip + 1, hBitsL, 8); nuint dictHashAndTagL3 = ZSTD_hashPtr(ip + 1, dictHBitsL, 8); uint matchIndexL3 = hashLong[hl3]; uint dictMatchIndexAndTagL3 = dictHashLong[dictHashAndTagL3 >> 8]; - int dictTagsMatchL3 = ZSTD_comparePackedTags(dictMatchIndexAndTagL3, dictHashAndTagL3); + int dictTagsMatchL3 = ZSTD_comparePackedTags( + dictMatchIndexAndTagL3, + dictHashAndTagL3 + ); byte* matchL3 = @base + matchIndexL3; hashLong[hl3] = curr + 1; - if (matchIndexL3 >= prefixLowestIndex && MEM_read64(matchL3) == MEM_read64(ip + 1)) + if ( + matchIndexL3 >= prefixLowestIndex + && MEM_read64(matchL3) == MEM_read64(ip + 1) + ) { mLength = ZSTD_count(ip + 9, matchL3 + 8, iend) + 8; ip++; @@ -503,12 +561,24 @@ private static nuint ZSTD_compressBlock_doubleFast_dictMatchState_generic(ZSTD_M uint dictMatchIndexL3 = dictMatchIndexAndTagL3 >> 8; byte* dictMatchL3 = dictBase + dictMatchIndexL3; assert(dictMatchL3 < dictEnd); - if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip + 1)) + if ( + dictMatchL3 > dictStart + && MEM_read64(dictMatchL3) == MEM_read64(ip + 1) + ) { - mLength = ZSTD_count_2segments(ip + 1 + 8, dictMatchL3 + 8, iend, dictEnd, prefixLowest) + 8; + mLength = + ZSTD_count_2segments( + ip + 1 + 8, + dictMatchL3 + 8, + iend, + dictEnd, + prefixLowest + ) + 8; ip++; offset = curr + 1 - dictMatchIndexL3 - dictIndexDelta; - while (ip > anchor && dictMatchL3 > dictStart && ip[-1] == dictMatchL3[-1]) + while ( + ip > anchor && dictMatchL3 > dictStart && ip[-1] == dictMatchL3[-1] + ) { ip--; dictMatchL3--; @@ -522,7 +592,8 @@ private static nuint ZSTD_compressBlock_doubleFast_dictMatchState_generic(ZSTD_M if (matchIndexS < prefixLowestIndex) { - mLength = ZSTD_count_2segments(ip + 4, match + 4, iend, dictEnd, prefixLowest) + 4; + mLength = + ZSTD_count_2segments(ip + 4, match + 4, iend, dictEnd, prefixLowest) + 4; offset = curr - matchIndexS; while (ip > anchor && match > dictStart && ip[-1] == match[-1]) { @@ -543,12 +614,12 @@ private static nuint ZSTD_compressBlock_doubleFast_dictMatchState_generic(ZSTD_M } } - _match_found: + _match_found: offset_2 = offset_1; offset_1 = offset; assert(offset > 0); ZSTD_storeSeq(seqStore, (nuint)(ip - anchor), anchor, iend, offset + 3, mLength); - _match_stored: + _match_stored: ip += mLength; anchor = ip; if (ip <= ilimit) @@ -565,11 +636,24 @@ private static nuint ZSTD_compressBlock_doubleFast_dictMatchState_generic(ZSTD_M { uint current2 = (uint)(ip - @base); uint repIndex2 = current2 - offset_2; - byte* repMatch2 = repIndex2 < prefixLowestIndex ? dictBase + repIndex2 - dictIndexDelta : @base + repIndex2; - if (ZSTD_index_overlap_check(prefixLowestIndex, repIndex2) != 0 && MEM_read32(repMatch2) == MEM_read32(ip)) + byte* repMatch2 = + repIndex2 < prefixLowestIndex + ? dictBase + repIndex2 - dictIndexDelta + : @base + repIndex2; + if ( + ZSTD_index_overlap_check(prefixLowestIndex, repIndex2) != 0 + && MEM_read32(repMatch2) == MEM_read32(ip) + ) { byte* repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend; - nuint repLength2 = ZSTD_count_2segments(ip + 4, repMatch2 + 4, iend, repEnd2, prefixLowest) + 4; + nuint repLength2 = + ZSTD_count_2segments( + ip + 4, + repMatch2 + 4, + iend, + repEnd2, + prefixLowest + ) + 4; /* swap offset_2 <=> offset_1 */ uint tmpOffset = offset_2; offset_2 = offset_1; @@ -594,47 +678,129 @@ private static nuint ZSTD_compressBlock_doubleFast_dictMatchState_generic(ZSTD_M return (nuint)(iend - anchor); } - private static nuint ZSTD_compressBlock_doubleFast_noDict_4(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_doubleFast_noDict_4( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { return ZSTD_compressBlock_doubleFast_noDict_generic(ms, seqStore, rep, src, srcSize, 4); } - private static nuint ZSTD_compressBlock_doubleFast_noDict_5(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_doubleFast_noDict_5( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { return ZSTD_compressBlock_doubleFast_noDict_generic(ms, seqStore, rep, src, srcSize, 5); } - private static nuint ZSTD_compressBlock_doubleFast_noDict_6(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_doubleFast_noDict_6( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { return ZSTD_compressBlock_doubleFast_noDict_generic(ms, seqStore, rep, src, srcSize, 6); } - private static nuint ZSTD_compressBlock_doubleFast_noDict_7(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_doubleFast_noDict_7( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { return ZSTD_compressBlock_doubleFast_noDict_generic(ms, seqStore, rep, src, srcSize, 7); } - private static nuint ZSTD_compressBlock_doubleFast_dictMatchState_4(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_doubleFast_dictMatchState_4( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_doubleFast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 4); + return ZSTD_compressBlock_doubleFast_dictMatchState_generic( + ms, + seqStore, + rep, + src, + srcSize, + 4 + ); } - private static nuint ZSTD_compressBlock_doubleFast_dictMatchState_5(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_doubleFast_dictMatchState_5( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_doubleFast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 5); + return ZSTD_compressBlock_doubleFast_dictMatchState_generic( + ms, + seqStore, + rep, + src, + srcSize, + 5 + ); } - private static nuint ZSTD_compressBlock_doubleFast_dictMatchState_6(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_doubleFast_dictMatchState_6( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_doubleFast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 6); + return ZSTD_compressBlock_doubleFast_dictMatchState_generic( + ms, + seqStore, + rep, + src, + srcSize, + 6 + ); } - private static nuint ZSTD_compressBlock_doubleFast_dictMatchState_7(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_doubleFast_dictMatchState_7( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_doubleFast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 7); + return ZSTD_compressBlock_doubleFast_dictMatchState_generic( + ms, + seqStore, + rep, + src, + srcSize, + 7 + ); } - private static nuint ZSTD_compressBlock_doubleFast(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_doubleFast( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { uint mls = ms->cParams.minMatch; switch (mls) @@ -651,24 +817,61 @@ private static nuint ZSTD_compressBlock_doubleFast(ZSTD_MatchState_t* ms, SeqSto } } - private static nuint ZSTD_compressBlock_doubleFast_dictMatchState(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_doubleFast_dictMatchState( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { uint mls = ms->cParams.minMatch; switch (mls) { default: case 4: - return ZSTD_compressBlock_doubleFast_dictMatchState_4(ms, seqStore, rep, src, srcSize); + return ZSTD_compressBlock_doubleFast_dictMatchState_4( + ms, + seqStore, + rep, + src, + srcSize + ); case 5: - return ZSTD_compressBlock_doubleFast_dictMatchState_5(ms, seqStore, rep, src, srcSize); + return ZSTD_compressBlock_doubleFast_dictMatchState_5( + ms, + seqStore, + rep, + src, + srcSize + ); case 6: - return ZSTD_compressBlock_doubleFast_dictMatchState_6(ms, seqStore, rep, src, srcSize); + return ZSTD_compressBlock_doubleFast_dictMatchState_6( + ms, + seqStore, + rep, + src, + srcSize + ); case 7: - return ZSTD_compressBlock_doubleFast_dictMatchState_7(ms, seqStore, rep, src, srcSize); + return ZSTD_compressBlock_doubleFast_dictMatchState_7( + ms, + seqStore, + rep, + src, + srcSize + ); } } - private static nuint ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize, uint mls) + private static nuint ZSTD_compressBlock_doubleFast_extDict_generic( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize, + uint mls + ) { ZSTD_compressionParameters* cParams = &ms->cParams; uint* hashLong = ms->hashTable; @@ -690,7 +893,8 @@ private static nuint ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_MatchSta byte* dictBase = ms->window.dictBase; byte* dictStart = dictBase + dictStartIndex; byte* dictEnd = dictBase + prefixStartIndex; - uint offset_1 = rep[0], offset_2 = rep[1]; + uint offset_1 = rep[0], + offset_2 = rep[1]; if (prefixStartIndex == dictStartIndex) return ZSTD_compressBlock_doubleFast(ms, seqStore, rep, src, srcSize); while (ip < ilimit) @@ -710,10 +914,23 @@ private static nuint ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_MatchSta byte* repMatch = repBase + repIndex; nuint mLength; hashSmall[hSmall] = hashLong[hLong] = curr; - if ((ZSTD_index_overlap_check(prefixStartIndex, repIndex) & (offset_1 <= curr + 1 - dictStartIndex ? 1 : 0)) != 0 && MEM_read32(repMatch) == MEM_read32(ip + 1)) + if ( + ( + ZSTD_index_overlap_check(prefixStartIndex, repIndex) + & (offset_1 <= curr + 1 - dictStartIndex ? 1 : 0) + ) != 0 + && MEM_read32(repMatch) == MEM_read32(ip + 1) + ) { byte* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; - mLength = ZSTD_count_2segments(ip + 1 + 4, repMatch + 4, iend, repMatchEnd, prefixStart) + 4; + mLength = + ZSTD_count_2segments( + ip + 1 + 4, + repMatch + 4, + iend, + repMatchEnd, + prefixStart + ) + 4; ip++; assert(1 >= 1); assert(1 <= 3); @@ -724,9 +941,12 @@ private static nuint ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_MatchSta if (matchLongIndex > dictStartIndex && MEM_read64(matchLong) == MEM_read64(ip)) { byte* matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend; - byte* lowMatchPtr = matchLongIndex < prefixStartIndex ? dictStart : prefixStart; + byte* lowMatchPtr = + matchLongIndex < prefixStartIndex ? dictStart : prefixStart; uint offset; - mLength = ZSTD_count_2segments(ip + 8, matchLong + 8, iend, matchEnd, prefixStart) + 8; + mLength = + ZSTD_count_2segments(ip + 8, matchLong + 8, iend, matchEnd, prefixStart) + + 8; offset = curr - matchLongIndex; while (ip > anchor && matchLong > lowMatchPtr && ip[-1] == matchLong[-1]) { @@ -738,7 +958,14 @@ private static nuint ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_MatchSta offset_2 = offset_1; offset_1 = offset; assert(offset > 0); - ZSTD_storeSeq(seqStore, (nuint)(ip - anchor), anchor, iend, offset + 3, mLength); + ZSTD_storeSeq( + seqStore, + (nuint)(ip - anchor), + anchor, + iend, + offset + 3, + mLength + ); } else if (matchIndex > dictStartIndex && MEM_read32(match) == MEM_read32(ip)) { @@ -748,11 +975,22 @@ private static nuint ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_MatchSta byte* match3 = match3Base + matchIndex3; uint offset; hashLong[h3] = curr + 1; - if (matchIndex3 > dictStartIndex && MEM_read64(match3) == MEM_read64(ip + 1)) + if ( + matchIndex3 > dictStartIndex + && MEM_read64(match3) == MEM_read64(ip + 1) + ) { byte* matchEnd = matchIndex3 < prefixStartIndex ? dictEnd : iend; - byte* lowMatchPtr = matchIndex3 < prefixStartIndex ? dictStart : prefixStart; - mLength = ZSTD_count_2segments(ip + 9, match3 + 8, iend, matchEnd, prefixStart) + 8; + byte* lowMatchPtr = + matchIndex3 < prefixStartIndex ? dictStart : prefixStart; + mLength = + ZSTD_count_2segments( + ip + 9, + match3 + 8, + iend, + matchEnd, + prefixStart + ) + 8; ip++; offset = curr + 1 - matchIndex3; while (ip > anchor && match3 > lowMatchPtr && ip[-1] == match3[-1]) @@ -765,8 +1003,11 @@ private static nuint ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_MatchSta else { byte* matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend; - byte* lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart; - mLength = ZSTD_count_2segments(ip + 4, match + 4, iend, matchEnd, prefixStart) + 4; + byte* lowMatchPtr = + matchIndex < prefixStartIndex ? dictStart : prefixStart; + mLength = + ZSTD_count_2segments(ip + 4, match + 4, iend, matchEnd, prefixStart) + + 4; offset = curr - matchIndex; while (ip > anchor && match > lowMatchPtr && ip[-1] == match[-1]) { @@ -779,7 +1020,14 @@ private static nuint ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_MatchSta offset_2 = offset_1; offset_1 = offset; assert(offset > 0); - ZSTD_storeSeq(seqStore, (nuint)(ip - anchor), anchor, iend, offset + 3, mLength); + ZSTD_storeSeq( + seqStore, + (nuint)(ip - anchor), + anchor, + iend, + offset + 3, + mLength + ); } else { @@ -804,11 +1052,25 @@ private static nuint ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_MatchSta { uint current2 = (uint)(ip - @base); uint repIndex2 = current2 - offset_2; - byte* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : @base + repIndex2; - if ((ZSTD_index_overlap_check(prefixStartIndex, repIndex2) & (offset_2 <= current2 - dictStartIndex ? 1 : 0)) != 0 && MEM_read32(repMatch2) == MEM_read32(ip)) + byte* repMatch2 = + repIndex2 < prefixStartIndex ? dictBase + repIndex2 : @base + repIndex2; + if ( + ( + ZSTD_index_overlap_check(prefixStartIndex, repIndex2) + & (offset_2 <= current2 - dictStartIndex ? 1 : 0) + ) != 0 + && MEM_read32(repMatch2) == MEM_read32(ip) + ) { byte* repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; - nuint repLength2 = ZSTD_count_2segments(ip + 4, repMatch2 + 4, iend, repEnd2, prefixStart) + 4; + nuint repLength2 = + ZSTD_count_2segments( + ip + 4, + repMatch2 + 4, + iend, + repEnd2, + prefixStart + ) + 4; /* swap offset_2 <=> offset_1 */ uint tmpOffset = offset_2; offset_2 = offset_1; @@ -833,27 +1095,85 @@ private static nuint ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_MatchSta return (nuint)(iend - anchor); } - private static nuint ZSTD_compressBlock_doubleFast_extDict_4(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_doubleFast_extDict_4( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 4); + return ZSTD_compressBlock_doubleFast_extDict_generic( + ms, + seqStore, + rep, + src, + srcSize, + 4 + ); } - private static nuint ZSTD_compressBlock_doubleFast_extDict_5(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_doubleFast_extDict_5( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 5); + return ZSTD_compressBlock_doubleFast_extDict_generic( + ms, + seqStore, + rep, + src, + srcSize, + 5 + ); } - private static nuint ZSTD_compressBlock_doubleFast_extDict_6(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_doubleFast_extDict_6( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 6); + return ZSTD_compressBlock_doubleFast_extDict_generic( + ms, + seqStore, + rep, + src, + srcSize, + 6 + ); } - private static nuint ZSTD_compressBlock_doubleFast_extDict_7(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_doubleFast_extDict_7( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 7); + return ZSTD_compressBlock_doubleFast_extDict_generic( + ms, + seqStore, + rep, + src, + srcSize, + 7 + ); } - private static nuint ZSTD_compressBlock_doubleFast_extDict(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_doubleFast_extDict( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { uint mls = ms->cParams.minMatch; switch (mls) @@ -870,4 +1190,4 @@ private static nuint ZSTD_compressBlock_doubleFast_extDict(ZSTD_MatchState_t* ms } } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdFast.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdFast.cs index e4a03f891..49aa8a7d8 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdFast.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdFast.cs @@ -1,11 +1,15 @@ -using static ZstdSharp.UnsafeHelper; using System.Runtime.CompilerServices; +using static ZstdSharp.UnsafeHelper; namespace ZstdSharp.Unsafe { public static unsafe partial class Methods { - private static void ZSTD_fillHashTableForCDict(ZSTD_MatchState_t* ms, void* end, ZSTD_dictTableLoadMethod_e dtlm) + private static void ZSTD_fillHashTableForCDict( + ZSTD_MatchState_t* ms, + void* end, + ZSTD_dictTableLoadMethod_e dtlm + ) { ZSTD_compressionParameters* cParams = &ms->cParams; uint* hashTable = ms->hashTable; @@ -40,7 +44,11 @@ private static void ZSTD_fillHashTableForCDict(ZSTD_MatchState_t* ms, void* end, } } - private static void ZSTD_fillHashTableForCCtx(ZSTD_MatchState_t* ms, void* end, ZSTD_dictTableLoadMethod_e dtlm) + private static void ZSTD_fillHashTableForCCtx( + ZSTD_MatchState_t* ms, + void* end, + ZSTD_dictTableLoadMethod_e dtlm + ) { ZSTD_compressionParameters* cParams = &ms->cParams; uint* hashTable = ms->hashTable; @@ -72,7 +80,12 @@ private static void ZSTD_fillHashTableForCCtx(ZSTD_MatchState_t* ms, void* end, } } - private static void ZSTD_fillHashTable(ZSTD_MatchState_t* ms, void* end, ZSTD_dictTableLoadMethod_e dtlm, ZSTD_tableFillPurpose_e tfp) + private static void ZSTD_fillHashTable( + ZSTD_MatchState_t* ms, + void* end, + ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp + ) { if (tfp == ZSTD_tableFillPurpose_e.ZSTD_tfp_forCDict) { @@ -85,7 +98,12 @@ private static void ZSTD_fillHashTable(ZSTD_MatchState_t* ms, void* end, ZSTD_di } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static int ZSTD_match4Found_cmov(byte* currentPtr, byte* matchAddress, uint matchIdx, uint idxLowLimit) + private static int ZSTD_match4Found_cmov( + byte* currentPtr, + byte* matchAddress, + uint matchIdx, + uint idxLowLimit + ) { /* currentIdx >= lowLimit is a (somewhat) unpredictable branch. * However expression below compiles into conditional move. @@ -97,7 +115,12 @@ private static int ZSTD_match4Found_cmov(byte* currentPtr, byte* matchAddress, u } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static int ZSTD_match4Found_branch(byte* currentPtr, byte* matchAddress, uint matchIdx, uint idxLowLimit) + private static int ZSTD_match4Found_branch( + byte* currentPtr, + byte* matchAddress, + uint matchIdx, + uint idxLowLimit + ) { /* using a branch instead of a cmov, * because it's faster in scenarios where matchIdx >= idxLowLimit is generally true, @@ -162,7 +185,15 @@ private static int ZSTD_match4Found_branch(byte* currentPtr, byte* matchAddress, * This is also the work we do at the beginning to enter the loop initially. */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_compressBlock_fast_noDict_generic(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize, uint mls, int useCmov) + private static nuint ZSTD_compressBlock_fast_noDict_generic( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize, + uint mls, + int useCmov + ) { ZSTD_compressionParameters* cParams = &ms->cParams; uint* hashTable = ms->hashTable; @@ -184,7 +215,8 @@ private static nuint ZSTD_compressBlock_fast_noDict_generic(ZSTD_MatchState_t* m uint current0; uint rep_offset1 = rep[0]; uint rep_offset2 = rep[1]; - uint offsetSaved1 = 0, offsetSaved2 = 0; + uint offsetSaved1 = 0, + offsetSaved2 = 0; /* hash for ip0 */ nuint hash0; /* hash for ip1 */ @@ -201,7 +233,10 @@ private static nuint ZSTD_compressBlock_fast_noDict_generic(ZSTD_MatchState_t* m nuint step; byte* nextStep; const nuint kStepIncr = 1 << 8 - 1; - void* matchFound = useCmov != 0 ? (delegate* managed)(&ZSTD_match4Found_cmov) : (delegate* managed)(&ZSTD_match4Found_branch); + void* matchFound = + useCmov != 0 + ? (delegate* managed)(&ZSTD_match4Found_cmov) + : (delegate* managed)(&ZSTD_match4Found_branch); ip0 += ip0 == prefixStart ? 1 : 0; { uint curr = (uint)(ip0 - @base); @@ -220,7 +255,7 @@ private static nuint ZSTD_compressBlock_fast_noDict_generic(ZSTD_MatchState_t* m } } - _start: + _start: step = stepSize; nextStep = ip0 + kStepIncr; ip1 = ip0 + 1; @@ -255,7 +290,14 @@ private static nuint ZSTD_compressBlock_fast_noDict_generic(ZSTD_MatchState_t* m goto _match; } - if (((delegate* managed)matchFound)(ip0, @base + matchIdx, matchIdx, prefixStartIndex) != 0) + if ( + ((delegate* managed)matchFound)( + ip0, + @base + matchIdx, + matchIdx, + prefixStartIndex + ) != 0 + ) { hashTable[hash1] = (uint)(ip1 - @base); goto _offset; @@ -269,7 +311,14 @@ private static nuint ZSTD_compressBlock_fast_noDict_generic(ZSTD_MatchState_t* m ip2 = ip3; current0 = (uint)(ip0 - @base); hashTable[hash0] = current0; - if (((delegate* managed)matchFound)(ip0, @base + matchIdx, matchIdx, prefixStartIndex) != 0) + if ( + ((delegate* managed)matchFound)( + ip0, + @base + matchIdx, + matchIdx, + prefixStartIndex + ) != 0 + ) { if (step <= 4) { @@ -299,14 +348,13 @@ private static nuint ZSTD_compressBlock_fast_noDict_generic(ZSTD_MatchState_t* m nextStep += kStepIncr; } - } - while (ip3 < ilimit); - _cleanup: + } while (ip3 < ilimit); + _cleanup: offsetSaved2 = offsetSaved1 != 0 && rep_offset1 != 0 ? offsetSaved1 : offsetSaved2; rep[0] = rep_offset1 != 0 ? rep_offset1 : offsetSaved1; rep[1] = rep_offset2 != 0 ? rep_offset2 : offsetSaved2; return (nuint)(iend - anchor); - _offset: + _offset: match0 = @base + matchIdx; rep_offset2 = rep_offset1; rep_offset1 = (uint)(ip0 - match0); @@ -320,7 +368,7 @@ private static nuint ZSTD_compressBlock_fast_noDict_generic(ZSTD_MatchState_t* m mLength++; } - _match: + _match: mLength += ZSTD_count(ip0 + mLength, match0 + mLength, iend); ZSTD_storeSeq(seqStore, (nuint)(ip0 - anchor), anchor, iend, offcode, mLength); ip0 += mLength; @@ -357,47 +405,101 @@ private static nuint ZSTD_compressBlock_fast_noDict_generic(ZSTD_MatchState_t* m goto _start; } - private static nuint ZSTD_compressBlock_fast_noDict_4_1(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_fast_noDict_4_1( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { return ZSTD_compressBlock_fast_noDict_generic(ms, seqStore, rep, src, srcSize, 4, 1); } - private static nuint ZSTD_compressBlock_fast_noDict_5_1(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_fast_noDict_5_1( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { return ZSTD_compressBlock_fast_noDict_generic(ms, seqStore, rep, src, srcSize, 5, 1); } - private static nuint ZSTD_compressBlock_fast_noDict_6_1(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_fast_noDict_6_1( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { return ZSTD_compressBlock_fast_noDict_generic(ms, seqStore, rep, src, srcSize, 6, 1); } - private static nuint ZSTD_compressBlock_fast_noDict_7_1(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_fast_noDict_7_1( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { return ZSTD_compressBlock_fast_noDict_generic(ms, seqStore, rep, src, srcSize, 7, 1); } - private static nuint ZSTD_compressBlock_fast_noDict_4_0(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_fast_noDict_4_0( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { return ZSTD_compressBlock_fast_noDict_generic(ms, seqStore, rep, src, srcSize, 4, 0); } - private static nuint ZSTD_compressBlock_fast_noDict_5_0(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_fast_noDict_5_0( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { return ZSTD_compressBlock_fast_noDict_generic(ms, seqStore, rep, src, srcSize, 5, 0); } - private static nuint ZSTD_compressBlock_fast_noDict_6_0(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_fast_noDict_6_0( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { return ZSTD_compressBlock_fast_noDict_generic(ms, seqStore, rep, src, srcSize, 6, 0); } - private static nuint ZSTD_compressBlock_fast_noDict_7_0(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_fast_noDict_7_0( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { return ZSTD_compressBlock_fast_noDict_generic(ms, seqStore, rep, src, srcSize, 7, 0); } - private static nuint ZSTD_compressBlock_fast(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_fast( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { uint mml = ms->cParams.minMatch; /* use cmov when "candidate in range" branch is likely unpredictable */ @@ -436,7 +538,15 @@ private static nuint ZSTD_compressBlock_fast(ZSTD_MatchState_t* ms, SeqStore_t* } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_compressBlock_fast_dictMatchState_generic(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize, uint mls, uint hasStep) + private static nuint ZSTD_compressBlock_fast_dictMatchState_generic( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize, + uint mls, + uint hasStep + ) { ZSTD_compressionParameters* cParams = &ms->cParams; uint* hashTable = ms->hashTable; @@ -453,7 +563,8 @@ private static nuint ZSTD_compressBlock_fast_dictMatchState_generic(ZSTD_MatchSt byte* prefixStart = @base + prefixStartIndex; byte* iend = istart + srcSize; byte* ilimit = iend - 8; - uint offset_1 = rep[0], offset_2 = rep[1]; + uint offset_1 = rep[0], + offset_2 = rep[1]; ZSTD_MatchState_t* dms = ms->dictMatchState; ZSTD_compressionParameters* dictCParams = &dms->cParams; uint* dictHashTable = dms->hashTable; @@ -509,14 +620,27 @@ private static nuint ZSTD_compressBlock_fast_dictMatchState_generic(ZSTD_MatchSt { byte* match = @base + matchIndex; uint repIndex = curr + 1 - offset_1; - byte* repMatch = repIndex < prefixStartIndex ? dictBase + (repIndex - dictIndexDelta) : @base + repIndex; + byte* repMatch = + repIndex < prefixStartIndex + ? dictBase + (repIndex - dictIndexDelta) + : @base + repIndex; nuint hash1 = ZSTD_hashPtr(ip1, hlog, mls); nuint dictHashAndTag1 = ZSTD_hashPtr(ip1, dictHBits, mls); hashTable[hash0] = curr; - if (ZSTD_index_overlap_check(prefixStartIndex, repIndex) != 0 && MEM_read32(repMatch) == MEM_read32(ip0 + 1)) + if ( + ZSTD_index_overlap_check(prefixStartIndex, repIndex) != 0 + && MEM_read32(repMatch) == MEM_read32(ip0 + 1) + ) { byte* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; - mLength = ZSTD_count_2segments(ip0 + 1 + 4, repMatch + 4, iend, repMatchEnd, prefixStart) + 4; + mLength = + ZSTD_count_2segments( + ip0 + 1 + 4, + repMatch + 4, + iend, + repMatchEnd, + prefixStart + ) + 4; ip0++; assert(1 >= 1); assert(1 <= 3); @@ -529,13 +653,27 @@ private static nuint ZSTD_compressBlock_fast_dictMatchState_generic(ZSTD_MatchSt /* Found a possible dict match */ uint dictMatchIndex = dictMatchIndexAndTag >> 8; byte* dictMatch = dictBase + dictMatchIndex; - if (dictMatchIndex > dictStartIndex && MEM_read32(dictMatch) == MEM_read32(ip0)) + if ( + dictMatchIndex > dictStartIndex + && MEM_read32(dictMatch) == MEM_read32(ip0) + ) { if (matchIndex <= prefixStartIndex) { uint offset = curr - dictMatchIndex - dictIndexDelta; - mLength = ZSTD_count_2segments(ip0 + 4, dictMatch + 4, iend, dictEnd, prefixStart) + 4; - while (ip0 > anchor && dictMatch > dictStart && ip0[-1] == dictMatch[-1]) + mLength = + ZSTD_count_2segments( + ip0 + 4, + dictMatch + 4, + iend, + dictEnd, + prefixStart + ) + 4; + while ( + ip0 > anchor + && dictMatch > dictStart + && ip0[-1] == dictMatch[-1] + ) { ip0--; dictMatch--; @@ -545,7 +683,14 @@ private static nuint ZSTD_compressBlock_fast_dictMatchState_generic(ZSTD_MatchSt offset_2 = offset_1; offset_1 = offset; assert(offset > 0); - ZSTD_storeSeq(seqStore, (nuint)(ip0 - anchor), anchor, iend, offset + 3, mLength); + ZSTD_storeSeq( + seqStore, + (nuint)(ip0 - anchor), + anchor, + iend, + offset + 3, + mLength + ); break; } } @@ -566,7 +711,14 @@ private static nuint ZSTD_compressBlock_fast_dictMatchState_generic(ZSTD_MatchSt offset_2 = offset_1; offset_1 = offset; assert(offset > 0); - ZSTD_storeSeq(seqStore, (nuint)(ip0 - anchor), anchor, iend, offset + 3, mLength); + ZSTD_storeSeq( + seqStore, + (nuint)(ip0 - anchor), + anchor, + iend, + offset + 3, + mLength + ); break; } @@ -599,11 +751,24 @@ private static nuint ZSTD_compressBlock_fast_dictMatchState_generic(ZSTD_MatchSt { uint current2 = (uint)(ip0 - @base); uint repIndex2 = current2 - offset_2; - byte* repMatch2 = repIndex2 < prefixStartIndex ? dictBase - dictIndexDelta + repIndex2 : @base + repIndex2; - if (ZSTD_index_overlap_check(prefixStartIndex, repIndex2) != 0 && MEM_read32(repMatch2) == MEM_read32(ip0)) + byte* repMatch2 = + repIndex2 < prefixStartIndex + ? dictBase - dictIndexDelta + repIndex2 + : @base + repIndex2; + if ( + ZSTD_index_overlap_check(prefixStartIndex, repIndex2) != 0 + && MEM_read32(repMatch2) == MEM_read32(ip0) + ) { byte* repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; - nuint repLength2 = ZSTD_count_2segments(ip0 + 4, repMatch2 + 4, iend, repEnd2, prefixStart) + 4; + nuint repLength2 = + ZSTD_count_2segments( + ip0 + 4, + repMatch2 + 4, + iend, + repEnd2, + prefixStart + ) + 4; /* swap offset_2 <=> offset_1 */ uint tmpOffset = offset_2; offset_2 = offset_1; @@ -625,33 +790,95 @@ private static nuint ZSTD_compressBlock_fast_dictMatchState_generic(ZSTD_MatchSt ip1 = ip0 + stepSize; } - _cleanup: + _cleanup: rep[0] = offset_1; rep[1] = offset_2; return (nuint)(iend - anchor); } - private static nuint ZSTD_compressBlock_fast_dictMatchState_4_0(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_fast_dictMatchState_4_0( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 4, 0); + return ZSTD_compressBlock_fast_dictMatchState_generic( + ms, + seqStore, + rep, + src, + srcSize, + 4, + 0 + ); } - private static nuint ZSTD_compressBlock_fast_dictMatchState_5_0(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_fast_dictMatchState_5_0( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 5, 0); + return ZSTD_compressBlock_fast_dictMatchState_generic( + ms, + seqStore, + rep, + src, + srcSize, + 5, + 0 + ); } - private static nuint ZSTD_compressBlock_fast_dictMatchState_6_0(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_fast_dictMatchState_6_0( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 6, 0); + return ZSTD_compressBlock_fast_dictMatchState_generic( + ms, + seqStore, + rep, + src, + srcSize, + 6, + 0 + ); } - private static nuint ZSTD_compressBlock_fast_dictMatchState_7_0(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_fast_dictMatchState_7_0( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 7, 0); + return ZSTD_compressBlock_fast_dictMatchState_generic( + ms, + seqStore, + rep, + src, + srcSize, + 7, + 0 + ); } - private static nuint ZSTD_compressBlock_fast_dictMatchState(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_fast_dictMatchState( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { uint mls = ms->cParams.minMatch; assert(ms->dictMatchState != null); @@ -659,17 +886,49 @@ private static nuint ZSTD_compressBlock_fast_dictMatchState(ZSTD_MatchState_t* m { default: case 4: - return ZSTD_compressBlock_fast_dictMatchState_4_0(ms, seqStore, rep, src, srcSize); + return ZSTD_compressBlock_fast_dictMatchState_4_0( + ms, + seqStore, + rep, + src, + srcSize + ); case 5: - return ZSTD_compressBlock_fast_dictMatchState_5_0(ms, seqStore, rep, src, srcSize); + return ZSTD_compressBlock_fast_dictMatchState_5_0( + ms, + seqStore, + rep, + src, + srcSize + ); case 6: - return ZSTD_compressBlock_fast_dictMatchState_6_0(ms, seqStore, rep, src, srcSize); + return ZSTD_compressBlock_fast_dictMatchState_6_0( + ms, + seqStore, + rep, + src, + srcSize + ); case 7: - return ZSTD_compressBlock_fast_dictMatchState_7_0(ms, seqStore, rep, src, srcSize); + return ZSTD_compressBlock_fast_dictMatchState_7_0( + ms, + seqStore, + rep, + src, + srcSize + ); } } - private static nuint ZSTD_compressBlock_fast_extDict_generic(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize, uint mls, uint hasStep) + private static nuint ZSTD_compressBlock_fast_extDict_generic( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize, + uint mls, + uint hasStep + ) { ZSTD_compressionParameters* cParams = &ms->cParams; uint* hashTable = ms->hashTable; @@ -690,8 +949,10 @@ private static nuint ZSTD_compressBlock_fast_extDict_generic(ZSTD_MatchState_t* byte* dictEnd = dictBase + prefixStartIndex; byte* iend = istart + srcSize; byte* ilimit = iend - 8; - uint offset_1 = rep[0], offset_2 = rep[1]; - uint offsetSaved1 = 0, offsetSaved2 = 0; + uint offset_1 = rep[0], + offset_2 = rep[1]; + uint offsetSaved1 = 0, + offsetSaved2 = 0; byte* ip0 = istart; byte* ip1; byte* ip2; @@ -731,7 +992,7 @@ private static nuint ZSTD_compressBlock_fast_extDict_generic(ZSTD_MatchState_t* } } - _start: + _start: step = stepSize; nextStep = ip0 + kStepIncr; ip1 = ip0 + 1; @@ -782,7 +1043,8 @@ private static nuint ZSTD_compressBlock_fast_extDict_generic(ZSTD_MatchState_t* } { - uint mval = idx >= dictStartIndex ? MEM_read32(idxBase + idx) : MEM_read32(ip0) ^ 1; + uint mval = + idx >= dictStartIndex ? MEM_read32(idxBase + idx) : MEM_read32(ip0) ^ 1; if (MEM_read32(ip0) == mval) { goto _offset; @@ -799,7 +1061,8 @@ private static nuint ZSTD_compressBlock_fast_extDict_generic(ZSTD_MatchState_t* current0 = (uint)(ip0 - @base); hashTable[hash0] = current0; { - uint mval = idx >= dictStartIndex ? MEM_read32(idxBase + idx) : MEM_read32(ip0) ^ 1; + uint mval = + idx >= dictStartIndex ? MEM_read32(idxBase + idx) : MEM_read32(ip0) ^ 1; if (MEM_read32(ip0) == mval) { goto _offset; @@ -827,14 +1090,13 @@ private static nuint ZSTD_compressBlock_fast_extDict_generic(ZSTD_MatchState_t* nextStep += kStepIncr; } - } - while (ip3 < ilimit); - _cleanup: + } while (ip3 < ilimit); + _cleanup: offsetSaved2 = offsetSaved1 != 0 && offset_1 != 0 ? offsetSaved1 : offsetSaved2; rep[0] = offset_1 != 0 ? offset_1 : offsetSaved1; rep[1] = offset_2 != 0 ? offset_2 : offsetSaved2; return (nuint)(iend - anchor); - _offset: + _offset: { uint offset = current0 - idx; byte* lowMatchPtr = idx < prefixStartIndex ? dictStart : prefixStart; @@ -853,9 +1115,15 @@ private static nuint ZSTD_compressBlock_fast_extDict_generic(ZSTD_MatchState_t* } } - _match: + _match: assert(matchEnd != null); - mLength += ZSTD_count_2segments(ip0 + mLength, match0 + mLength, iend, matchEnd, prefixStart); + mLength += ZSTD_count_2segments( + ip0 + mLength, + match0 + mLength, + iend, + matchEnd, + prefixStart + ); ZSTD_storeSeq(seqStore, (nuint)(ip0 - anchor), anchor, iend, offcode, mLength); ip0 += mLength; anchor = ip0; @@ -872,11 +1140,20 @@ private static nuint ZSTD_compressBlock_fast_extDict_generic(ZSTD_MatchState_t* while (ip0 <= ilimit) { uint repIndex2 = (uint)(ip0 - @base) - offset_2; - byte* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : @base + repIndex2; - if ((ZSTD_index_overlap_check(prefixStartIndex, repIndex2) & (offset_2 > 0 ? 1 : 0)) != 0 && MEM_read32(repMatch2) == MEM_read32(ip0)) + byte* repMatch2 = + repIndex2 < prefixStartIndex ? dictBase + repIndex2 : @base + repIndex2; + if ( + ( + ZSTD_index_overlap_check(prefixStartIndex, repIndex2) + & (offset_2 > 0 ? 1 : 0) + ) != 0 + && MEM_read32(repMatch2) == MEM_read32(ip0) + ) { byte* repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; - nuint repLength2 = ZSTD_count_2segments(ip0 + 4, repMatch2 + 4, iend, repEnd2, prefixStart) + 4; + nuint repLength2 = + ZSTD_count_2segments(ip0 + 4, repMatch2 + 4, iend, repEnd2, prefixStart) + + 4; { /* swap offset_2 <=> offset_1 */ uint tmpOffset = offset_2; @@ -900,27 +1177,57 @@ private static nuint ZSTD_compressBlock_fast_extDict_generic(ZSTD_MatchState_t* goto _start; } - private static nuint ZSTD_compressBlock_fast_extDict_4_0(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_fast_extDict_4_0( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 4, 0); } - private static nuint ZSTD_compressBlock_fast_extDict_5_0(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_fast_extDict_5_0( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 5, 0); } - private static nuint ZSTD_compressBlock_fast_extDict_6_0(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_fast_extDict_6_0( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 6, 0); } - private static nuint ZSTD_compressBlock_fast_extDict_7_0(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_fast_extDict_7_0( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 7, 0); } - private static nuint ZSTD_compressBlock_fast_extDict(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_fast_extDict( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { uint mls = ms->cParams.minMatch; assert(ms->dictMatchState == null); diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdInternal.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdInternal.cs index 2ad2c6289..e6ccfb0ca 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdInternal.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdInternal.cs @@ -1,7 +1,7 @@ -using static ZstdSharp.UnsafeHelper; using System; -using System.Runtime.InteropServices; using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; +using static ZstdSharp.UnsafeHelper; #if NETCOREAPP3_0_OR_GREATER using System.Runtime.Intrinsics.X86; #endif @@ -14,275 +14,531 @@ namespace ZstdSharp.Unsafe public static unsafe partial class Methods { #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_repStartValue => new uint[3] - { - 1, - 4, - 8 - }; - private static uint* repStartValue => (uint*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_repStartValue)); + private static ReadOnlySpan Span_repStartValue => new uint[3] { 1, 4, 8 }; + private static uint* repStartValue => + (uint*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_repStartValue) + ); #else private static readonly uint* repStartValue = GetArrayPointer(new uint[3] { 1, 4, 8 }); #endif - private static readonly nuint* ZSTD_fcs_fieldSize = GetArrayPointer(new nuint[4] { 0, 2, 4, 8 }); - private static readonly nuint* ZSTD_did_fieldSize = GetArrayPointer(new nuint[4] { 0, 1, 2, 4 }); + private static readonly nuint* ZSTD_fcs_fieldSize = GetArrayPointer( + new nuint[4] { 0, 2, 4, 8 } + ); + private static readonly nuint* ZSTD_did_fieldSize = GetArrayPointer( + new nuint[4] { 0, 1, 2, 4 } + ); private const uint ZSTD_blockHeaderSize = 3; #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_LL_bits => new byte[36] - { - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 1, - 1, - 1, - 1, - 2, - 2, - 3, - 3, - 4, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16 - }; - private static byte* LL_bits => (byte*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_LL_bits)); + private static ReadOnlySpan Span_LL_bits => + new byte[36] + { + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 2, + 2, + 3, + 3, + 4, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + }; + private static byte* LL_bits => + (byte*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_LL_bits) + ); #else - private static readonly byte* LL_bits = GetArrayPointer(new byte[36] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 }); + private static readonly byte* LL_bits = GetArrayPointer( + new byte[36] + { + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 2, + 2, + 3, + 3, + 4, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + } + ); #endif #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_LL_defaultNorm => new short[36] - { - 4, - 3, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 1, - 1, - 1, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 3, - 2, - 1, - 1, - 1, - 1, - 1, - -1, - -1, - -1, - -1 - }; - private static short* LL_defaultNorm => (short*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_LL_defaultNorm)); + private static ReadOnlySpan Span_LL_defaultNorm => + new short[36] + { + 4, + 3, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 1, + 1, + 1, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 3, + 2, + 1, + 1, + 1, + 1, + 1, + -1, + -1, + -1, + -1, + }; + private static short* LL_defaultNorm => + (short*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_LL_defaultNorm) + ); #else - private static readonly short* LL_defaultNorm = GetArrayPointer(new short[36] { 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, (short)(-1), (short)(-1), (short)(-1), (short)(-1) }); + private static readonly short* LL_defaultNorm = GetArrayPointer( + new short[36] + { + 4, + 3, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 1, + 1, + 1, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 3, + 2, + 1, + 1, + 1, + 1, + 1, + (short)(-1), + (short)(-1), + (short)(-1), + (short)(-1), + } + ); #endif private const uint LL_defaultNormLog = 6; #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_ML_bits => new byte[53] - { - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 1, - 1, - 1, - 1, - 2, - 2, - 3, - 3, - 4, - 4, - 5, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16 - }; - private static byte* ML_bits => (byte*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_ML_bits)); + private static ReadOnlySpan Span_ML_bits => + new byte[53] + { + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 2, + 2, + 3, + 3, + 4, + 4, + 5, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + }; + private static byte* ML_bits => + (byte*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_ML_bits) + ); #else - private static readonly byte* ML_bits = GetArrayPointer(new byte[53] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 }); + private static readonly byte* ML_bits = GetArrayPointer( + new byte[53] + { + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 2, + 2, + 3, + 3, + 4, + 4, + 5, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + } + ); #endif #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_ML_defaultNorm => new short[53] - { - 1, - 4, - 3, - 2, - 2, - 2, - 2, - 2, - 2, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - -1, - -1, - -1, - -1, - -1, - -1, - -1 - }; - private static short* ML_defaultNorm => (short*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_ML_defaultNorm)); + private static ReadOnlySpan Span_ML_defaultNorm => + new short[53] + { + 1, + 4, + 3, + 2, + 2, + 2, + 2, + 2, + 2, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + }; + private static short* ML_defaultNorm => + (short*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_ML_defaultNorm) + ); #else - private static readonly short* ML_defaultNorm = GetArrayPointer(new short[53] { 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, (short)(-1), (short)(-1), (short)(-1), (short)(-1), (short)(-1), (short)(-1), (short)(-1) }); + private static readonly short* ML_defaultNorm = GetArrayPointer( + new short[53] + { + 1, + 4, + 3, + 2, + 2, + 2, + 2, + 2, + 2, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + (short)(-1), + (short)(-1), + (short)(-1), + (short)(-1), + (short)(-1), + (short)(-1), + (short)(-1), + } + ); #endif private const uint ML_defaultNormLog = 6; #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_OF_defaultNorm => new short[29] - { - 1, - 1, - 1, - 1, - 1, - 1, - 2, - 2, - 2, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - -1, - -1, - -1, - -1, - -1 - }; - private static short* OF_defaultNorm => (short*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_OF_defaultNorm)); + private static ReadOnlySpan Span_OF_defaultNorm => + new short[29] + { + 1, + 1, + 1, + 1, + 1, + 1, + 2, + 2, + 2, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + -1, + -1, + -1, + -1, + -1, + }; + private static short* OF_defaultNorm => + (short*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_OF_defaultNorm) + ); #else - private static readonly short* OF_defaultNorm = GetArrayPointer(new short[29] { 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, (short)(-1), (short)(-1), (short)(-1), (short)(-1), (short)(-1) }); + private static readonly short* OF_defaultNorm = GetArrayPointer( + new short[29] + { + 1, + 1, + 1, + 1, + 1, + 1, + 2, + 2, + 2, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + (short)(-1), + (short)(-1), + (short)(-1), + (short)(-1), + (short)(-1), + } + ); #endif private const uint OF_defaultNormLog = 5; + /*-******************************************* * Shared functions to include for inlining *********************************************/ @@ -314,7 +570,9 @@ private static void ZSTD_copy16(void* dst, void* src) #endif { var v1 = System.Runtime.CompilerServices.Unsafe.ReadUnaligned((ulong*)src); - var v2 = System.Runtime.CompilerServices.Unsafe.ReadUnaligned((ulong*)src + 1); + var v2 = System.Runtime.CompilerServices.Unsafe.ReadUnaligned( + (ulong*)src + 1 + ); System.Runtime.CompilerServices.Unsafe.WriteUnaligned((ulong*)dst, v1); System.Runtime.CompilerServices.Unsafe.WriteUnaligned((ulong*)dst + 1, v2); } @@ -341,8 +599,7 @@ private static void ZSTD_wildcopy(void* dst, void* src, nint length, ZSTD_overla ZSTD_copy8(op, ip); op += 8; ip += 8; - } - while (op < oend); + } while (op < oend); } else { @@ -365,8 +622,7 @@ private static void ZSTD_wildcopy(void* dst, void* src, nint length, ZSTD_overla op += 16; ip += 16; } - } - while (op < oend); + } while (op < oend); } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLazy.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLazy.cs index c537716e7..b5a3d7779 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLazy.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLazy.cs @@ -1,7 +1,7 @@ -using static ZstdSharp.UnsafeHelper; -using System.Runtime.CompilerServices; -using System.Numerics; using System; +using System.Numerics; +using System.Runtime.CompilerServices; +using static ZstdSharp.UnsafeHelper; #if NETCOREAPP3_0_OR_GREATER using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.X86; @@ -49,13 +49,21 @@ private static void ZSTD_updateDUBT(ZSTD_MatchState_t* ms, byte* ip, byte* iend, * sort one already inserted but unsorted position * assumption : curr >= btlow == (curr - btmask) * doesn't fail */ - private static void ZSTD_insertDUBT1(ZSTD_MatchState_t* ms, uint curr, byte* inputEnd, uint nbCompares, uint btLow, ZSTD_dictMode_e dictMode) + private static void ZSTD_insertDUBT1( + ZSTD_MatchState_t* ms, + uint curr, + byte* inputEnd, + uint nbCompares, + uint btLow, + ZSTD_dictMode_e dictMode + ) { ZSTD_compressionParameters* cParams = &ms->cParams; uint* bt = ms->chainTable; uint btLog = cParams->chainLog - 1; uint btMask = (uint)((1 << (int)btLog) - 1); - nuint commonLengthSmaller = 0, commonLengthLarger = 0; + nuint commonLengthSmaller = 0, + commonLengthLarger = 0; byte* @base = ms->window.@base; byte* dictBase = ms->window.dictBase; uint dictLimit = ms->window.dictLimit; @@ -79,11 +87,22 @@ private static void ZSTD_insertDUBT1(ZSTD_MatchState_t* ms, uint curr, byte* inp { uint* nextPtr = bt + 2 * (matchIndex & btMask); /* guaranteed minimum nb of common bytes */ - nuint matchLength = commonLengthSmaller < commonLengthLarger ? commonLengthSmaller : commonLengthLarger; + nuint matchLength = + commonLengthSmaller < commonLengthLarger + ? commonLengthSmaller + : commonLengthLarger; assert(matchIndex < curr); - if (dictMode != ZSTD_dictMode_e.ZSTD_extDict || matchIndex + matchLength >= dictLimit || curr < dictLimit) + if ( + dictMode != ZSTD_dictMode_e.ZSTD_extDict + || matchIndex + matchLength >= dictLimit + || curr < dictLimit + ) { - byte* mBase = dictMode != ZSTD_dictMode_e.ZSTD_extDict || matchIndex + matchLength >= dictLimit ? @base : dictBase; + byte* mBase = + dictMode != ZSTD_dictMode_e.ZSTD_extDict + || matchIndex + matchLength >= dictLimit + ? @base + : dictBase; assert(matchIndex + matchLength >= dictLimit || curr < dictLimit); match = mBase + matchIndex; matchLength += ZSTD_count(ip + matchLength, match + matchLength, iend); @@ -91,7 +110,13 @@ private static void ZSTD_insertDUBT1(ZSTD_MatchState_t* ms, uint curr, byte* inp else { match = dictBase + matchIndex; - matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iend, dictEnd, prefixStart); + matchLength += ZSTD_count_2segments( + ip + matchLength, + match + matchLength, + iend, + dictEnd, + prefixStart + ); if (matchIndex + matchLength >= dictLimit) match = @base + matchIndex; } @@ -132,7 +157,16 @@ private static void ZSTD_insertDUBT1(ZSTD_MatchState_t* ms, uint curr, byte* inp *smallerPtr = *largerPtr = 0; } - private static nuint ZSTD_DUBT_findBetterDictMatch(ZSTD_MatchState_t* ms, byte* ip, byte* iend, nuint* offsetPtr, nuint bestLength, uint nbCompares, uint mls, ZSTD_dictMode_e dictMode) + private static nuint ZSTD_DUBT_findBetterDictMatch( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iend, + nuint* offsetPtr, + nuint bestLength, + uint nbCompares, + uint mls, + ZSTD_dictMode_e dictMode + ) { ZSTD_MatchState_t* dms = ms->dictMatchState; ZSTD_compressionParameters* dmsCParams = &dms->cParams; @@ -151,22 +185,39 @@ private static nuint ZSTD_DUBT_findBetterDictMatch(ZSTD_MatchState_t* ms, byte* uint* dictBt = dms->chainTable; uint btLog = dmsCParams->chainLog - 1; uint btMask = (uint)((1 << (int)btLog) - 1); - uint btLow = btMask >= dictHighLimit - dictLowLimit ? dictLowLimit : dictHighLimit - btMask; - nuint commonLengthSmaller = 0, commonLengthLarger = 0; + uint btLow = + btMask >= dictHighLimit - dictLowLimit ? dictLowLimit : dictHighLimit - btMask; + nuint commonLengthSmaller = 0, + commonLengthLarger = 0; assert(dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState); for (; nbCompares != 0 && dictMatchIndex > dictLowLimit; --nbCompares) { uint* nextPtr = dictBt + 2 * (dictMatchIndex & btMask); /* guaranteed minimum nb of common bytes */ - nuint matchLength = commonLengthSmaller < commonLengthLarger ? commonLengthSmaller : commonLengthLarger; + nuint matchLength = + commonLengthSmaller < commonLengthLarger + ? commonLengthSmaller + : commonLengthLarger; byte* match = dictBase + dictMatchIndex; - matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iend, dictEnd, prefixStart); + matchLength += ZSTD_count_2segments( + ip + matchLength, + match + matchLength, + iend, + dictEnd, + prefixStart + ); if (dictMatchIndex + matchLength >= dictHighLimit) match = @base + dictMatchIndex + dictIndexDelta; if (matchLength > bestLength) { uint matchIndex = dictMatchIndex + dictIndexDelta; - if (4 * (int)(matchLength - bestLength) > (int)(ZSTD_highbit32(curr - matchIndex + 1) - ZSTD_highbit32((uint)offsetPtr[0] + 1))) + if ( + 4 * (int)(matchLength - bestLength) + > (int)( + ZSTD_highbit32(curr - matchIndex + 1) + - ZSTD_highbit32((uint)offsetPtr[0] + 1) + ) + ) { bestLength = matchLength; assert(curr - matchIndex > 0); @@ -210,7 +261,14 @@ private static nuint ZSTD_DUBT_findBetterDictMatch(ZSTD_MatchState_t* ms, byte* return bestLength; } - private static nuint ZSTD_DUBT_findBestMatch(ZSTD_MatchState_t* ms, byte* ip, byte* iend, nuint* offBasePtr, uint mls, ZSTD_dictMode_e dictMode) + private static nuint ZSTD_DUBT_findBestMatch( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iend, + nuint* offBasePtr, + uint mls, + ZSTD_dictMode_e dictMode + ) { ZSTD_compressionParameters* cParams = &ms->cParams; uint* hashTable = ms->hashTable; @@ -258,7 +316,8 @@ private static nuint ZSTD_DUBT_findBestMatch(ZSTD_MatchState_t* ms, byte* ip, by } { - nuint commonLengthSmaller = 0, commonLengthLarger = 0; + nuint commonLengthSmaller = 0, + commonLengthLarger = 0; byte* dictBase = ms->window.dictBase; uint dictLimit = ms->window.dictLimit; byte* dictEnd = dictBase + dictLimit; @@ -275,9 +334,15 @@ private static nuint ZSTD_DUBT_findBestMatch(ZSTD_MatchState_t* ms, byte* ip, by { uint* nextPtr = bt + 2 * (matchIndex & btMask); /* guaranteed minimum nb of common bytes */ - nuint matchLength = commonLengthSmaller < commonLengthLarger ? commonLengthSmaller : commonLengthLarger; + nuint matchLength = + commonLengthSmaller < commonLengthLarger + ? commonLengthSmaller + : commonLengthLarger; byte* match; - if (dictMode != ZSTD_dictMode_e.ZSTD_extDict || matchIndex + matchLength >= dictLimit) + if ( + dictMode != ZSTD_dictMode_e.ZSTD_extDict + || matchIndex + matchLength >= dictLimit + ) { match = @base + matchIndex; matchLength += ZSTD_count(ip + matchLength, match + matchLength, iend); @@ -285,7 +350,13 @@ private static nuint ZSTD_DUBT_findBestMatch(ZSTD_MatchState_t* ms, byte* ip, by else { match = dictBase + matchIndex; - matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iend, dictEnd, prefixStart); + matchLength += ZSTD_count_2segments( + ip + matchLength, + match + matchLength, + iend, + dictEnd, + prefixStart + ); if (matchIndex + matchLength >= dictLimit) match = @base + matchIndex; } @@ -294,7 +365,13 @@ private static nuint ZSTD_DUBT_findBestMatch(ZSTD_MatchState_t* ms, byte* ip, by { if (matchLength > matchEndIdx - matchIndex) matchEndIdx = matchIndex + (uint)matchLength; - if (4 * (int)(matchLength - bestLength) > (int)(ZSTD_highbit32(curr - matchIndex + 1) - ZSTD_highbit32((uint)*offBasePtr))) + if ( + 4 * (int)(matchLength - bestLength) + > (int)( + ZSTD_highbit32(curr - matchIndex + 1) + - ZSTD_highbit32((uint)*offBasePtr) + ) + ) { bestLength = matchLength; assert(curr - matchIndex > 0); @@ -344,7 +421,16 @@ private static nuint ZSTD_DUBT_findBestMatch(ZSTD_MatchState_t* ms, byte* ip, by assert(nbCompares <= 1U << (sizeof(nuint) == 4 ? 30 : 31) - 1); if (dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState && nbCompares != 0) { - bestLength = ZSTD_DUBT_findBetterDictMatch(ms, ip, iend, offBasePtr, bestLength, nbCompares, mls, dictMode); + bestLength = ZSTD_DUBT_findBetterDictMatch( + ms, + ip, + iend, + offBasePtr, + bestLength, + nbCompares, + mls, + dictMode + ); } assert(matchEndIdx > curr + 8); @@ -361,7 +447,14 @@ private static nuint ZSTD_DUBT_findBestMatch(ZSTD_MatchState_t* ms, byte* ip, by /** ZSTD_BtFindBestMatch() : Tree updater, providing best match */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_BtFindBestMatch(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offBasePtr, uint mls, ZSTD_dictMode_e dictMode) + private static nuint ZSTD_BtFindBestMatch( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offBasePtr, + uint mls, + ZSTD_dictMode_e dictMode + ) { if (ip < ms->window.@base + ms->nextToUpdate) return 0; @@ -372,7 +465,10 @@ private static nuint ZSTD_BtFindBestMatch(ZSTD_MatchState_t* ms, byte* ip, byte* /*********************************** * Dedicated dict search ***********************************/ - private static void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_MatchState_t* ms, byte* ip) + private static void ZSTD_dedicatedDictSearch_lazy_loadDictionary( + ZSTD_MatchState_t* ms, + byte* ip + ) { byte* @base = ms->window.@base; uint target = (uint)(ip - @base); @@ -430,7 +526,7 @@ private static void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_MatchState if (count == cacheSize) { - for (count = 0; count < chainLimit;) + for (count = 0; count < chainLimit; ) { if (i < minChain) { @@ -468,7 +564,7 @@ private static void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_MatchState assert(chainPos <= chainSize); } - for (hashIdx = (uint)(1 << (int)hashLog); hashIdx != 0;) + for (hashIdx = (uint)(1 << (int)hashLog); hashIdx != 0; ) { uint bucketIdx = --hashIdx << 2; uint chainPackedPointer = tmpHashTable[hashIdx]; @@ -497,7 +593,18 @@ private static void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_MatchState * If none are longer than the argument ml, then ml will be returned. */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_dedicatedDictSearch_lazy_search(nuint* offsetPtr, nuint ml, uint nbAttempts, ZSTD_MatchState_t* dms, byte* ip, byte* iLimit, byte* prefixStart, uint curr, uint dictLimit, nuint ddsIdx) + private static nuint ZSTD_dedicatedDictSearch_lazy_search( + nuint* offsetPtr, + nuint ml, + uint nbAttempts, + ZSTD_MatchState_t* dms, + byte* ip, + byte* iLimit, + byte* prefixStart, + uint curr, + uint dictLimit, + nuint ddsIdx + ) { uint ddsLowestIndex = dms->window.dictLimit; byte* ddsBase = dms->window.@base; @@ -544,7 +651,8 @@ private static nuint ZSTD_dedicatedDictSearch_lazy_search(nuint* offsetPtr, nuin assert(match + 4 <= ddsEnd); if (MEM_read32(match) == MEM_read32(ip)) { - currentMl = ZSTD_count_2segments(ip + 4, match + 4, iLimit, ddsEnd, prefixStart) + 4; + currentMl = + ZSTD_count_2segments(ip + 4, match + 4, iLimit, ddsEnd, prefixStart) + 4; } if (currentMl > ml) @@ -586,7 +694,9 @@ private static nuint ZSTD_dedicatedDictSearch_lazy_search(nuint* offsetPtr, nuin assert(match + 4 <= ddsEnd); if (MEM_read32(match) == MEM_read32(ip)) { - currentMl = ZSTD_count_2segments(ip + 4, match + 4, iLimit, ddsEnd, prefixStart) + 4; + currentMl = + ZSTD_count_2segments(ip + 4, match + 4, iLimit, ddsEnd, prefixStart) + + 4; } if (currentMl > ml) @@ -606,7 +716,13 @@ private static nuint ZSTD_dedicatedDictSearch_lazy_search(nuint* offsetPtr, nuin /* Update chains up to ip (excluded) Assumption : always within prefix (i.e. not within extDict) */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_insertAndFindFirstIndex_internal(ZSTD_MatchState_t* ms, ZSTD_compressionParameters* cParams, byte* ip, uint mls, uint lazySkipping) + private static uint ZSTD_insertAndFindFirstIndex_internal( + ZSTD_MatchState_t* ms, + ZSTD_compressionParameters* cParams, + byte* ip, + uint mls, + uint lazySkipping + ) { uint* hashTable = ms->hashTable; uint hashLog = cParams->hashLog; @@ -637,7 +753,14 @@ private static uint ZSTD_insertAndFindFirstIndex(ZSTD_MatchState_t* ms, byte* ip /* inlining is important to hardwire a hot branch (template emulation) */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_HcFindBestMatch(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr, uint mls, ZSTD_dictMode_e dictMode) + private static nuint ZSTD_HcFindBestMatch( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr, + uint mls, + ZSTD_dictMode_e dictMode + ) { ZSTD_compressionParameters* cParams = &ms->cParams; uint* chainTable = ms->chainTable; @@ -651,15 +774,20 @@ private static nuint ZSTD_HcFindBestMatch(ZSTD_MatchState_t* ms, byte* ip, byte* uint curr = (uint)(ip - @base); uint maxDistance = 1U << (int)cParams->windowLog; uint lowestValid = ms->window.lowLimit; - uint withinMaxDistance = curr - lowestValid > maxDistance ? curr - maxDistance : lowestValid; + uint withinMaxDistance = + curr - lowestValid > maxDistance ? curr - maxDistance : lowestValid; uint isDictionary = ms->loadedDictEnd != 0 ? 1U : 0U; uint lowLimit = isDictionary != 0 ? lowestValid : withinMaxDistance; uint minChain = curr > chainSize ? curr - chainSize : 0; uint nbAttempts = 1U << (int)cParams->searchLog; nuint ml = 4 - 1; ZSTD_MatchState_t* dms = ms->dictMatchState; - uint ddsHashLog = dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch ? dms->cParams.hashLog - 2 : 0; - nuint ddsIdx = dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch ? ZSTD_hashPtr(ip, ddsHashLog, mls) << 2 : 0; + uint ddsHashLog = + dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch ? dms->cParams.hashLog - 2 : 0; + nuint ddsIdx = + dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch + ? ZSTD_hashPtr(ip, ddsHashLog, mls) << 2 + : 0; uint matchIndex; if (dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch) { @@ -672,7 +800,13 @@ private static nuint ZSTD_HcFindBestMatch(ZSTD_MatchState_t* ms, byte* ip, byte* #endif } - matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls, (uint)ms->lazySkipping); + matchIndex = ZSTD_insertAndFindFirstIndex_internal( + ms, + cParams, + ip, + mls, + (uint)ms->lazySkipping + ); for (; matchIndex >= lowLimit && nbAttempts > 0; nbAttempts--) { nuint currentMl = 0; @@ -688,7 +822,9 @@ private static nuint ZSTD_HcFindBestMatch(ZSTD_MatchState_t* ms, byte* ip, byte* byte* match = dictBase + matchIndex; assert(match + 4 <= dictEnd); if (MEM_read32(match) == MEM_read32(ip)) - currentMl = ZSTD_count_2segments(ip + 4, match + 4, iLimit, dictEnd, prefixStart) + 4; + currentMl = + ZSTD_count_2segments(ip + 4, match + 4, iLimit, dictEnd, prefixStart) + + 4; } if (currentMl > ml) @@ -708,7 +844,18 @@ private static nuint ZSTD_HcFindBestMatch(ZSTD_MatchState_t* ms, byte* ip, byte* assert(nbAttempts <= 1U << (sizeof(nuint) == 4 ? 30 : 31) - 1); if (dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch) { - ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts, dms, ip, iLimit, prefixStart, curr, dictLimit, ddsIdx); + ml = ZSTD_dedicatedDictSearch_lazy_search( + offsetPtr, + ml, + nbAttempts, + dms, + ip, + iLimit, + prefixStart, + curr, + dictLimit, + ddsIdx + ); } else if (dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState) { @@ -728,7 +875,9 @@ private static nuint ZSTD_HcFindBestMatch(ZSTD_MatchState_t* ms, byte* ip, byte* byte* match = dmsBase + matchIndex; assert(match + 4 <= dmsEnd); if (MEM_read32(match) == MEM_read32(ip)) - currentMl = ZSTD_count_2segments(ip + 4, match + 4, iLimit, dmsEnd, prefixStart) + 4; + currentMl = + ZSTD_count_2segments(ip + 4, match + 4, iLimit, dmsEnd, prefixStart) + + 4; if (currentMl > ml) { ml = currentMl; @@ -786,7 +935,12 @@ private static int ZSTD_isAligned(void* ptr, nuint align) * Performs prefetching for the hashTable and tagTable at a given row. */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_row_prefetch(uint* hashTable, byte* tagTable, uint relRow, uint rowLog) + private static void ZSTD_row_prefetch( + uint* hashTable, + byte* tagTable, + uint relRow, + uint rowLog + ) { #if NETCOREAPP3_0_OR_GREATER if (Sse.IsSupported) @@ -832,7 +986,14 @@ private static void ZSTD_row_prefetch(uint* hashTable, byte* tagTable, uint relR * but not beyond iLimit. */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_row_fillHashCache(ZSTD_MatchState_t* ms, byte* @base, uint rowLog, uint mls, uint idx, byte* iLimit) + private static void ZSTD_row_fillHashCache( + ZSTD_MatchState_t* ms, + byte* @base, + uint rowLog, + uint mls, + uint idx, + byte* iLimit + ) { uint* hashTable = ms->hashTable; byte* tagTable = ms->tagTable; @@ -853,7 +1014,17 @@ private static void ZSTD_row_fillHashCache(ZSTD_MatchState_t* ms, byte* @base, u * base + idx + ZSTD_ROW_HASH_CACHE_SIZE. Also prefetches the appropriate rows from hashTable and tagTable. */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_row_nextCachedHash(uint* cache, uint* hashTable, byte* tagTable, byte* @base, uint idx, uint hashLog, uint rowLog, uint mls, ulong hashSalt) + private static uint ZSTD_row_nextCachedHash( + uint* cache, + uint* hashTable, + byte* tagTable, + byte* @base, + uint idx, + uint hashLog, + uint rowLog, + uint mls, + ulong hashSalt + ) { uint newHash = (uint)ZSTD_hashPtrSalted(@base + idx + 8, hashLog + 8, mls, hashSalt); uint row = newHash >> 8 << (int)rowLog; @@ -869,7 +1040,15 @@ private static uint ZSTD_row_nextCachedHash(uint* cache, uint* hashTable, byte* * Updates the hash table with positions starting from updateStartIdx until updateEndIdx. */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_row_update_internalImpl(ZSTD_MatchState_t* ms, uint updateStartIdx, uint updateEndIdx, uint mls, uint rowLog, uint rowMask, uint useCache) + private static void ZSTD_row_update_internalImpl( + ZSTD_MatchState_t* ms, + uint updateStartIdx, + uint updateEndIdx, + uint mls, + uint rowLog, + uint rowMask, + uint useCache + ) { uint* hashTable = ms->hashTable; byte* tagTable = ms->tagTable; @@ -877,12 +1056,38 @@ private static void ZSTD_row_update_internalImpl(ZSTD_MatchState_t* ms, uint upd byte* @base = ms->window.@base; for (; updateStartIdx < updateEndIdx; ++updateStartIdx) { - uint hash = useCache != 0 ? ZSTD_row_nextCachedHash(ms->hashCache, hashTable, tagTable, @base, updateStartIdx, hashLog, rowLog, mls, ms->hashSalt) : (uint)ZSTD_hashPtrSalted(@base + updateStartIdx, hashLog + 8, mls, ms->hashSalt); + uint hash = + useCache != 0 + ? ZSTD_row_nextCachedHash( + ms->hashCache, + hashTable, + tagTable, + @base, + updateStartIdx, + hashLog, + rowLog, + mls, + ms->hashSalt + ) + : (uint)ZSTD_hashPtrSalted( + @base + updateStartIdx, + hashLog + 8, + mls, + ms->hashSalt + ); uint relRow = hash >> 8 << (int)rowLog; uint* row = hashTable + relRow; byte* tagRow = tagTable + relRow; uint pos = ZSTD_row_nextIndex(tagRow, rowMask); - assert(hash == ZSTD_hashPtrSalted(@base + updateStartIdx, hashLog + 8, mls, ms->hashSalt)); + assert( + hash + == ZSTD_hashPtrSalted( + @base + updateStartIdx, + hashLog + 8, + mls, + ms->hashSalt + ) + ); tagRow[pos] = (byte)(hash & (1U << 8) - 1); row[pos] = updateStartIdx; } @@ -893,7 +1098,14 @@ private static void ZSTD_row_update_internalImpl(ZSTD_MatchState_t* ms, uint upd * Skips sections of long matches as is necessary. */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_row_update_internal(ZSTD_MatchState_t* ms, byte* ip, uint mls, uint rowLog, uint rowMask, uint useCache) + private static void ZSTD_row_update_internal( + ZSTD_MatchState_t* ms, + byte* ip, + uint mls, + uint rowLog, + uint rowMask, + uint useCache + ) { uint idx = ms->nextToUpdate; byte* @base = ms->window.@base; @@ -923,7 +1135,10 @@ private static void ZSTD_row_update_internal(ZSTD_MatchState_t* ms, byte* ip, ui */ private static void ZSTD_row_update(ZSTD_MatchState_t* ms, byte* ip) { - uint rowLog = ms->cParams.searchLog <= 4 ? 4 : ms->cParams.searchLog <= 6 ? ms->cParams.searchLog : 6; + uint rowLog = + ms->cParams.searchLog <= 4 ? 4 + : ms->cParams.searchLog <= 6 ? ms->cParams.searchLog + : 6; uint rowMask = (1U << (int)rowLog) - 1; /* mls caps out at 6 */ uint mls = ms->cParams.minMatch < 6 ? ms->cParams.minMatch : 6; @@ -996,7 +1211,13 @@ private static ulong ZSTD_row_getSSEMask(int nbChunks, byte* src, byte tag, uint Vector128 chunk3 = Sse2.LoadVector128(src + 16 * 3); Vector128 equalMask3 = Sse2.CompareEqual(chunk3, comparisonMask); int matches3 = Sse2.MoveMask(equalMask3); - return BitOperations.RotateRight((ulong)matches3 << 48 | (ulong)matches2 << 32 | (ulong)matches1 << 16 | (uint)matches0, (int)head); + return BitOperations.RotateRight( + (ulong)matches3 << 48 + | (ulong)matches2 << 32 + | (ulong)matches1 << 16 + | (uint)matches0, + (int)head + ); } } #endif @@ -1008,7 +1229,12 @@ private static ulong ZSTD_row_getSSEMask(int nbChunks, byte* src, byte tag, uint * must rotate the "matches" bitfield to match up with the actual layout of the * entries within the hashTable */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static ulong ZSTD_row_getMatchMask(byte* tagRow, byte tag, uint headGrouped, uint rowEntries) + private static ulong ZSTD_row_getMatchMask( + byte* tagRow, + byte tag, + uint headGrouped, + uint rowEntries + ) { byte* src = tagRow; assert(rowEntries == 16 || rowEntries == 32 || rowEntries == 64); @@ -1032,10 +1258,13 @@ private static ulong ZSTD_row_getMatchMask(byte* tagRow, byte tag, uint headGrou * 0x88 = 0b10001000. */ Vector128 chunk = AdvSimd.LoadVector128(src); - Vector128 equalMask = AdvSimd.CompareEqual(chunk, AdvSimd.DuplicateToVector128(tag)).As(); + Vector128 equalMask = AdvSimd + .CompareEqual(chunk, AdvSimd.DuplicateToVector128(tag)) + .As(); Vector64 res = AdvSimd.ShiftRightLogicalNarrowingLower(equalMask, 4); ulong matches = res.As().GetElement(0); - return BitOperations.RotateRight(matches, (int)headGrouped) & 0x8888888888888888; + return BitOperations.RotateRight(matches, (int)headGrouped) + & 0x8888888888888888; } else if (rowEntries == 32) { @@ -1045,13 +1274,21 @@ private static ulong ZSTD_row_getMatchMask(byte* tagRow, byte tag, uint headGrou /* Same idea as with rowEntries == 16 but doing AND with * 0x55 = 0b01010101. */ - (Vector128 chunk0, Vector128 chunk1) = AdvSimd.Arm64.Load2xVector128AndUnzip((ushort*)src); + (Vector128 chunk0, Vector128 chunk1) = + AdvSimd.Arm64.Load2xVector128AndUnzip((ushort*)src); Vector128 dup = AdvSimd.DuplicateToVector128(tag); - Vector64 t0 = AdvSimd.ShiftRightLogicalNarrowingLower(AdvSimd.CompareEqual(chunk0.As(), dup).As(), 6); - Vector64 t1 = AdvSimd.ShiftRightLogicalNarrowingLower(AdvSimd.CompareEqual(chunk1.As(), dup).As(), 6); + Vector64 t0 = AdvSimd.ShiftRightLogicalNarrowingLower( + AdvSimd.CompareEqual(chunk0.As(), dup).As(), + 6 + ); + Vector64 t1 = AdvSimd.ShiftRightLogicalNarrowingLower( + AdvSimd.CompareEqual(chunk1.As(), dup).As(), + 6 + ); Vector64 res = AdvSimd.ShiftLeftAndInsert(t0, t1, 4); ulong matches = res.As().GetElement(0); - return BitOperations.RotateRight(matches, (int)headGrouped) & 0x5555555555555555; + return BitOperations.RotateRight(matches, (int)headGrouped) + & 0x5555555555555555; } #endif } @@ -1060,7 +1297,12 @@ private static ulong ZSTD_row_getMatchMask(byte* tagRow, byte tag, uint headGrou #if NET9_0_OR_GREATER if (AdvSimd.Arm64.IsSupported) { - (Vector128 chunk0, Vector128 chunk1, Vector128 chunk2, Vector128 chunk3) = AdvSimd.Arm64.Load4xVector128AndUnzip(src); + ( + Vector128 chunk0, + Vector128 chunk1, + Vector128 chunk2, + Vector128 chunk3 + ) = AdvSimd.Arm64.Load4xVector128AndUnzip(src); Vector128 dup = AdvSimd.DuplicateToVector128(tag); Vector128 cmp0 = AdvSimd.CompareEqual(chunk0, dup); Vector128 cmp1 = AdvSimd.CompareEqual(chunk1, dup); @@ -1071,9 +1313,12 @@ private static ulong ZSTD_row_getMatchMask(byte* tagRow, byte tag, uint headGrou Vector128 t1 = AdvSimd.ShiftRightAndInsert(cmp3, cmp2, 1); Vector128 t2 = AdvSimd.ShiftRightAndInsert(t1, t0, 2); Vector128 t3 = AdvSimd.ShiftRightAndInsert(t2, t2, 4); - Vector64 t4 = AdvSimd.ShiftRightLogicalNarrowingLower(t3.As(), 4); + Vector64 t4 = AdvSimd.ShiftRightLogicalNarrowingLower( + t3.As(), + 4 + ); ulong matches = t4.As().GetElement(0); - return BitOperations.RotateRight(matches, (int) headGrouped); + return BitOperations.RotateRight(matches, (int)headGrouped); } #endif } @@ -1101,8 +1346,7 @@ private static ulong ZSTD_row_getMatchMask(byte* tagRow, byte tag, uint headGrou matches <<= (int)chunkSize; matches |= chunk * extractMagic >> (int)shiftAmount; i -= (int)chunkSize; - } - while (i >= 0); + } while (i >= 0); } else { @@ -1116,8 +1360,7 @@ private static ulong ZSTD_row_getMatchMask(byte* tagRow, byte tag, uint headGrou matches <<= (int)chunkSize; matches |= (chunk >> 7) * extractMagic >> (int)shiftAmount; i -= (int)chunkSize; - } - while (i >= 0); + } while (i >= 0); } matches = ~matches; @@ -1152,7 +1395,15 @@ private static ulong ZSTD_row_getMatchMask(byte* tagRow, byte tag, uint headGrou * - Insert the tag into the equivalent row and position in the tagTable. */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_RowFindBestMatch(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr, uint mls, ZSTD_dictMode_e dictMode, uint rowLog) + private static nuint ZSTD_RowFindBestMatch( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr, + uint mls, + ZSTD_dictMode_e dictMode, + uint rowLog + ) { uint* hashTable = ms->hashTable; byte* tagTable = ms->tagTable; @@ -1167,7 +1418,8 @@ private static nuint ZSTD_RowFindBestMatch(ZSTD_MatchState_t* ms, byte* ip, byte uint curr = (uint)(ip - @base); uint maxDistance = 1U << (int)cParams->windowLog; uint lowestValid = ms->window.lowLimit; - uint withinMaxDistance = curr - lowestValid > maxDistance ? curr - maxDistance : lowestValid; + uint withinMaxDistance = + curr - lowestValid > maxDistance ? curr - maxDistance : lowestValid; uint isDictionary = ms->loadedDictEnd != 0 ? 1U : 0U; uint lowLimit = isDictionary != 0 ? lowestValid : withinMaxDistance; uint rowEntries = 1U << (int)rowLog; @@ -1201,7 +1453,8 @@ private static nuint ZSTD_RowFindBestMatch(ZSTD_MatchState_t* ms, byte* ip, byte #endif } - ddsExtraAttempts = cParams->searchLog > rowLog ? 1U << (int)(cParams->searchLog - rowLog) : 0; + ddsExtraAttempts = + cParams->searchLog > rowLog ? 1U << (int)(cParams->searchLog - rowLog) : 0; } if (dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState) @@ -1220,7 +1473,17 @@ private static nuint ZSTD_RowFindBestMatch(ZSTD_MatchState_t* ms, byte* ip, byte if (ms->lazySkipping == 0) { ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 1); - hash = ZSTD_row_nextCachedHash(hashCache, hashTable, tagTable, @base, curr, hashLog, rowLog, mls, hashSalt); + hash = ZSTD_row_nextCachedHash( + hashCache, + hashTable, + tagTable, + @base, + curr, + hashLog, + rowLog, + mls, + hashSalt + ); } else { @@ -1241,7 +1504,8 @@ private static nuint ZSTD_RowFindBestMatch(ZSTD_MatchState_t* ms, byte* ip, byte ulong matches = ZSTD_row_getMatchMask(tagRow, (byte)tag, headGrouped, rowEntries); for (; matches > 0 && nbAttempts > 0; matches &= matches - 1) { - uint matchPos = (headGrouped + ZSTD_VecMask_next(matches)) / groupWidth & rowMask; + uint matchPos = + (headGrouped + ZSTD_VecMask_next(matches)) / groupWidth & rowMask; uint matchIndex = row[matchPos]; if (matchPos == 0) continue; @@ -1295,7 +1559,14 @@ private static nuint ZSTD_RowFindBestMatch(ZSTD_MatchState_t* ms, byte* ip, byte byte* match = dictBase + matchIndex; assert(match + 4 <= dictEnd); if (MEM_read32(match) == MEM_read32(ip)) - currentMl = ZSTD_count_2segments(ip + 4, match + 4, iLimit, dictEnd, prefixStart) + 4; + currentMl = + ZSTD_count_2segments( + ip + 4, + match + 4, + iLimit, + dictEnd, + prefixStart + ) + 4; } if (currentMl > ml) @@ -1312,7 +1583,18 @@ private static nuint ZSTD_RowFindBestMatch(ZSTD_MatchState_t* ms, byte* ip, byte assert(nbAttempts <= 1U << (sizeof(nuint) == 4 ? 30 : 31) - 1); if (dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch) { - ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts + ddsExtraAttempts, dms, ip, iLimit, prefixStart, curr, dictLimit, ddsIdx); + ml = ZSTD_dedicatedDictSearch_lazy_search( + offsetPtr, + ml, + nbAttempts + ddsExtraAttempts, + dms, + ip, + iLimit, + prefixStart, + curr, + dictLimit, + ddsIdx + ); } else if (dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState) { @@ -1327,10 +1609,16 @@ private static nuint ZSTD_RowFindBestMatch(ZSTD_MatchState_t* ms, byte* ip, byte uint* matchBuffer = stackalloc uint[64]; nuint numMatches = 0; nuint currMatch = 0; - ulong matches = ZSTD_row_getMatchMask(dmsTagRow, (byte)dmsTag, headGrouped, rowEntries); + ulong matches = ZSTD_row_getMatchMask( + dmsTagRow, + (byte)dmsTag, + headGrouped, + rowEntries + ); for (; matches > 0 && nbAttempts > 0; matches &= matches - 1) { - uint matchPos = (headGrouped + ZSTD_VecMask_next(matches)) / groupWidth & rowMask; + uint matchPos = + (headGrouped + ZSTD_VecMask_next(matches)) / groupWidth & rowMask; uint matchIndex = dmsRow[matchPos]; if (matchPos == 0) continue; @@ -1357,7 +1645,14 @@ private static nuint ZSTD_RowFindBestMatch(ZSTD_MatchState_t* ms, byte* ip, byte byte* match = dmsBase + matchIndex; assert(match + 4 <= dmsEnd); if (MEM_read32(match) == MEM_read32(ip)) - currentMl = ZSTD_count_2segments(ip + 4, match + 4, iLimit, dmsEnd, prefixStart) + 4; + currentMl = + ZSTD_count_2segments( + ip + 4, + match + 4, + iLimit, + dmsEnd, + prefixStart + ) + 4; } if (currentMl > ml) @@ -1377,402 +1672,1671 @@ private static nuint ZSTD_RowFindBestMatch(ZSTD_MatchState_t* ms, byte* ip, byte } /* Generate row search fns for each combination of (dictMode, mls, rowLog) */ - private static nuint ZSTD_RowFindBestMatch_noDict_4_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_noDict_4_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 4); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_noDict, 4); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 4 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 4, + ZSTD_dictMode_e.ZSTD_noDict, + 4 + ); } - private static nuint ZSTD_RowFindBestMatch_noDict_4_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_noDict_4_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 5); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_noDict, 5); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 5 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 4, + ZSTD_dictMode_e.ZSTD_noDict, + 5 + ); } - private static nuint ZSTD_RowFindBestMatch_noDict_4_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_noDict_4_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 6); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_noDict, 6); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 6 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 4, + ZSTD_dictMode_e.ZSTD_noDict, + 6 + ); } - private static nuint ZSTD_RowFindBestMatch_noDict_5_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_noDict_5_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 4); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_noDict, 4); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 4 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 5, + ZSTD_dictMode_e.ZSTD_noDict, + 4 + ); } - private static nuint ZSTD_RowFindBestMatch_noDict_5_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_noDict_5_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 5); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_noDict, 5); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 5 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 5, + ZSTD_dictMode_e.ZSTD_noDict, + 5 + ); } - private static nuint ZSTD_RowFindBestMatch_noDict_5_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_noDict_5_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 6); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_noDict, 6); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 6 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 5, + ZSTD_dictMode_e.ZSTD_noDict, + 6 + ); } - private static nuint ZSTD_RowFindBestMatch_noDict_6_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_noDict_6_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 4); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_noDict, 4); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 4 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 6, + ZSTD_dictMode_e.ZSTD_noDict, + 4 + ); } - private static nuint ZSTD_RowFindBestMatch_noDict_6_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_noDict_6_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 5); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_noDict, 5); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 5 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 6, + ZSTD_dictMode_e.ZSTD_noDict, + 5 + ); } - private static nuint ZSTD_RowFindBestMatch_noDict_6_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_noDict_6_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 6); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_noDict, 6); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 6 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 6, + ZSTD_dictMode_e.ZSTD_noDict, + 6 + ); } - private static nuint ZSTD_RowFindBestMatch_extDict_4_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_extDict_4_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 4); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_extDict, 4); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 4 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 4, + ZSTD_dictMode_e.ZSTD_extDict, + 4 + ); } - private static nuint ZSTD_RowFindBestMatch_extDict_4_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_extDict_4_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 5); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_extDict, 5); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 5 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 4, + ZSTD_dictMode_e.ZSTD_extDict, + 5 + ); } - private static nuint ZSTD_RowFindBestMatch_extDict_4_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_extDict_4_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 6); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_extDict, 6); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 6 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 4, + ZSTD_dictMode_e.ZSTD_extDict, + 6 + ); } - private static nuint ZSTD_RowFindBestMatch_extDict_5_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_extDict_5_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 4); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_extDict, 4); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 4 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 5, + ZSTD_dictMode_e.ZSTD_extDict, + 4 + ); } - private static nuint ZSTD_RowFindBestMatch_extDict_5_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_extDict_5_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 5); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_extDict, 5); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 5 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 5, + ZSTD_dictMode_e.ZSTD_extDict, + 5 + ); } - private static nuint ZSTD_RowFindBestMatch_extDict_5_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_extDict_5_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 6); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_extDict, 6); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 6 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 5, + ZSTD_dictMode_e.ZSTD_extDict, + 6 + ); } - private static nuint ZSTD_RowFindBestMatch_extDict_6_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_extDict_6_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 4); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_extDict, 4); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 4 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 6, + ZSTD_dictMode_e.ZSTD_extDict, + 4 + ); } - private static nuint ZSTD_RowFindBestMatch_extDict_6_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_extDict_6_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 5); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_extDict, 5); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 5 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 6, + ZSTD_dictMode_e.ZSTD_extDict, + 5 + ); } - private static nuint ZSTD_RowFindBestMatch_extDict_6_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_extDict_6_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 6); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_extDict, 6); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 6 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 6, + ZSTD_dictMode_e.ZSTD_extDict, + 6 + ); } - private static nuint ZSTD_RowFindBestMatch_dictMatchState_4_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_dictMatchState_4_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 4); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_dictMatchState, 4); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 4 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 4, + ZSTD_dictMode_e.ZSTD_dictMatchState, + 4 + ); } - private static nuint ZSTD_RowFindBestMatch_dictMatchState_4_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_dictMatchState_4_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 5); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_dictMatchState, 5); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 5 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 4, + ZSTD_dictMode_e.ZSTD_dictMatchState, + 5 + ); } - private static nuint ZSTD_RowFindBestMatch_dictMatchState_4_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_dictMatchState_4_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 6); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_dictMatchState, 6); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 6 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 4, + ZSTD_dictMode_e.ZSTD_dictMatchState, + 6 + ); } - private static nuint ZSTD_RowFindBestMatch_dictMatchState_5_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_dictMatchState_5_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 4); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_dictMatchState, 4); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 4 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 5, + ZSTD_dictMode_e.ZSTD_dictMatchState, + 4 + ); } - private static nuint ZSTD_RowFindBestMatch_dictMatchState_5_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_dictMatchState_5_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 5); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_dictMatchState, 5); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 5 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 5, + ZSTD_dictMode_e.ZSTD_dictMatchState, + 5 + ); } - private static nuint ZSTD_RowFindBestMatch_dictMatchState_5_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_dictMatchState_5_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 6); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_dictMatchState, 6); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 6 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 5, + ZSTD_dictMode_e.ZSTD_dictMatchState, + 6 + ); } - private static nuint ZSTD_RowFindBestMatch_dictMatchState_6_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_dictMatchState_6_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 4); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_dictMatchState, 4); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 4 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 6, + ZSTD_dictMode_e.ZSTD_dictMatchState, + 4 + ); } - private static nuint ZSTD_RowFindBestMatch_dictMatchState_6_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_dictMatchState_6_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 5); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_dictMatchState, 5); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 5 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 6, + ZSTD_dictMode_e.ZSTD_dictMatchState, + 5 + ); } - private static nuint ZSTD_RowFindBestMatch_dictMatchState_6_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_dictMatchState_6_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 6); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_dictMatchState, 6); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 6 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 6, + ZSTD_dictMode_e.ZSTD_dictMatchState, + 6 + ); } - private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_4_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_4_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 4); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, 4); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 4 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 4, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, + 4 + ); } - private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_4_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_4_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 5); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, 5); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 5 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 4, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, + 5 + ); } - private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_4_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_4_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 6); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, 6); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 6 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 4, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, + 6 + ); } - private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_5_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_5_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 4); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, 4); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 4 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 5, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, + 4 + ); } - private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_5_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_5_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 5); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, 5); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 5 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 5, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, + 5 + ); } - private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_5_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_5_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 6); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, 6); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 6 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 5, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, + 6 + ); } - private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_6_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_6_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 4); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, 4); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 4 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 6, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, + 4 + ); } - private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_6_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_6_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 5); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, 5); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 5 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 6, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, + 5 + ); } - private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_6_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_6_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); - assert((4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 : 6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) == 6); - return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, 6); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 6 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 6, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, + 6 + ); } /* Generate binary Tree search fns for each combination of (dictMode, mls) */ - private static nuint ZSTD_BtFindBestMatch_noDict_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offBasePtr) + private static nuint ZSTD_BtFindBestMatch_noDict_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offBasePtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, 4, ZSTD_dictMode_e.ZSTD_noDict); } - private static nuint ZSTD_BtFindBestMatch_noDict_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offBasePtr) + private static nuint ZSTD_BtFindBestMatch_noDict_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offBasePtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, 5, ZSTD_dictMode_e.ZSTD_noDict); } - private static nuint ZSTD_BtFindBestMatch_noDict_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offBasePtr) + private static nuint ZSTD_BtFindBestMatch_noDict_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offBasePtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, 6, ZSTD_dictMode_e.ZSTD_noDict); } - private static nuint ZSTD_BtFindBestMatch_extDict_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offBasePtr) + private static nuint ZSTD_BtFindBestMatch_extDict_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offBasePtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); - return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, 4, ZSTD_dictMode_e.ZSTD_extDict); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + return ZSTD_BtFindBestMatch( + ms, + ip, + iLimit, + offBasePtr, + 4, + ZSTD_dictMode_e.ZSTD_extDict + ); } - private static nuint ZSTD_BtFindBestMatch_extDict_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offBasePtr) + private static nuint ZSTD_BtFindBestMatch_extDict_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offBasePtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); - return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, 5, ZSTD_dictMode_e.ZSTD_extDict); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + return ZSTD_BtFindBestMatch( + ms, + ip, + iLimit, + offBasePtr, + 5, + ZSTD_dictMode_e.ZSTD_extDict + ); } - private static nuint ZSTD_BtFindBestMatch_extDict_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offBasePtr) + private static nuint ZSTD_BtFindBestMatch_extDict_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offBasePtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); - return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, 6, ZSTD_dictMode_e.ZSTD_extDict); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + return ZSTD_BtFindBestMatch( + ms, + ip, + iLimit, + offBasePtr, + 6, + ZSTD_dictMode_e.ZSTD_extDict + ); } - private static nuint ZSTD_BtFindBestMatch_dictMatchState_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offBasePtr) + private static nuint ZSTD_BtFindBestMatch_dictMatchState_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offBasePtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); - return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, 4, ZSTD_dictMode_e.ZSTD_dictMatchState); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + return ZSTD_BtFindBestMatch( + ms, + ip, + iLimit, + offBasePtr, + 4, + ZSTD_dictMode_e.ZSTD_dictMatchState + ); } - private static nuint ZSTD_BtFindBestMatch_dictMatchState_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offBasePtr) + private static nuint ZSTD_BtFindBestMatch_dictMatchState_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offBasePtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); - return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, 5, ZSTD_dictMode_e.ZSTD_dictMatchState); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + return ZSTD_BtFindBestMatch( + ms, + ip, + iLimit, + offBasePtr, + 5, + ZSTD_dictMode_e.ZSTD_dictMatchState + ); } - private static nuint ZSTD_BtFindBestMatch_dictMatchState_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offBasePtr) + private static nuint ZSTD_BtFindBestMatch_dictMatchState_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offBasePtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); - return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, 6, ZSTD_dictMode_e.ZSTD_dictMatchState); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + return ZSTD_BtFindBestMatch( + ms, + ip, + iLimit, + offBasePtr, + 6, + ZSTD_dictMode_e.ZSTD_dictMatchState + ); } - private static nuint ZSTD_BtFindBestMatch_dedicatedDictSearch_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offBasePtr) + private static nuint ZSTD_BtFindBestMatch_dedicatedDictSearch_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offBasePtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); - return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, 4, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + return ZSTD_BtFindBestMatch( + ms, + ip, + iLimit, + offBasePtr, + 4, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch + ); } - private static nuint ZSTD_BtFindBestMatch_dedicatedDictSearch_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offBasePtr) + private static nuint ZSTD_BtFindBestMatch_dedicatedDictSearch_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offBasePtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); - return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, 5, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + return ZSTD_BtFindBestMatch( + ms, + ip, + iLimit, + offBasePtr, + 5, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch + ); } - private static nuint ZSTD_BtFindBestMatch_dedicatedDictSearch_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offBasePtr) + private static nuint ZSTD_BtFindBestMatch_dedicatedDictSearch_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offBasePtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); - return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, 6, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + return ZSTD_BtFindBestMatch( + ms, + ip, + iLimit, + offBasePtr, + 6, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch + ); } /* Generate hash chain search fns for each combination of (dictMode, mls) */ - private static nuint ZSTD_HcFindBestMatch_noDict_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_HcFindBestMatch_noDict_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_noDict); } - private static nuint ZSTD_HcFindBestMatch_noDict_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_HcFindBestMatch_noDict_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_noDict); } - private static nuint ZSTD_HcFindBestMatch_noDict_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_HcFindBestMatch_noDict_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_noDict); } - private static nuint ZSTD_HcFindBestMatch_extDict_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_HcFindBestMatch_extDict_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_extDict); } - private static nuint ZSTD_HcFindBestMatch_extDict_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_HcFindBestMatch_extDict_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_extDict); } - private static nuint ZSTD_HcFindBestMatch_extDict_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_HcFindBestMatch_extDict_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_extDict); } - private static nuint ZSTD_HcFindBestMatch_dictMatchState_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_HcFindBestMatch_dictMatchState_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); - return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_dictMatchState); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + return ZSTD_HcFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 4, + ZSTD_dictMode_e.ZSTD_dictMatchState + ); } - private static nuint ZSTD_HcFindBestMatch_dictMatchState_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_HcFindBestMatch_dictMatchState_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); - return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_dictMatchState); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + return ZSTD_HcFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 5, + ZSTD_dictMode_e.ZSTD_dictMatchState + ); } - private static nuint ZSTD_HcFindBestMatch_dictMatchState_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_HcFindBestMatch_dictMatchState_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); - return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_dictMatchState); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + return ZSTD_HcFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 6, + ZSTD_dictMode_e.ZSTD_dictMatchState + ); } - private static nuint ZSTD_HcFindBestMatch_dedicatedDictSearch_4(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_HcFindBestMatch_dedicatedDictSearch_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 4); - return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + return ZSTD_HcFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 4, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch + ); } - private static nuint ZSTD_HcFindBestMatch_dedicatedDictSearch_5(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_HcFindBestMatch_dedicatedDictSearch_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 5); - return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + return ZSTD_HcFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 5, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch + ); } - private static nuint ZSTD_HcFindBestMatch_dedicatedDictSearch_6(ZSTD_MatchState_t* ms, byte* ip, byte* iLimit, nuint* offsetPtr) + private static nuint ZSTD_HcFindBestMatch_dedicatedDictSearch_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) { - assert((4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 : 6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) == 6); - return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch); + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + return ZSTD_HcFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 6, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch + ); } /** @@ -1800,7 +3364,16 @@ private static nuint ZSTD_HcFindBestMatch_dedicatedDictSearch_6(ZSTD_MatchState_ * If a match is found its offset is stored in @p offsetPtr. */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_searchMax(ZSTD_MatchState_t* ms, byte* ip, byte* iend, nuint* offsetPtr, uint mls, uint rowLog, searchMethod_e searchMethod, ZSTD_dictMode_e dictMode) + private static nuint ZSTD_searchMax( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iend, + nuint* offsetPtr, + uint mls, + uint rowLog, + searchMethod_e searchMethod, + ZSTD_dictMode_e dictMode + ) { if (dictMode == ZSTD_dictMode_e.ZSTD_noDict) { @@ -1810,32 +3383,42 @@ private static nuint ZSTD_searchMax(ZSTD_MatchState_t* ms, byte* ip, byte* iend, { if (rowLog == 4) return ZSTD_RowFindBestMatch_noDict_4_4(ms, ip, iend, offsetPtr); - return rowLog == 5 ? ZSTD_RowFindBestMatch_noDict_4_5(ms, ip, iend, offsetPtr) : ZSTD_RowFindBestMatch_noDict_4_6(ms, ip, iend, offsetPtr); + return rowLog == 5 + ? ZSTD_RowFindBestMatch_noDict_4_5(ms, ip, iend, offsetPtr) + : ZSTD_RowFindBestMatch_noDict_4_6(ms, ip, iend, offsetPtr); } if (mls == 5) { if (rowLog == 4) return ZSTD_RowFindBestMatch_noDict_5_4(ms, ip, iend, offsetPtr); - return rowLog == 5 ? ZSTD_RowFindBestMatch_noDict_5_5(ms, ip, iend, offsetPtr) : ZSTD_RowFindBestMatch_noDict_5_6(ms, ip, iend, offsetPtr); + return rowLog == 5 + ? ZSTD_RowFindBestMatch_noDict_5_5(ms, ip, iend, offsetPtr) + : ZSTD_RowFindBestMatch_noDict_5_6(ms, ip, iend, offsetPtr); } if (rowLog == 4) return ZSTD_RowFindBestMatch_noDict_6_4(ms, ip, iend, offsetPtr); - return rowLog == 5 ? ZSTD_RowFindBestMatch_noDict_6_5(ms, ip, iend, offsetPtr) : ZSTD_RowFindBestMatch_noDict_6_6(ms, ip, iend, offsetPtr); + return rowLog == 5 + ? ZSTD_RowFindBestMatch_noDict_6_5(ms, ip, iend, offsetPtr) + : ZSTD_RowFindBestMatch_noDict_6_6(ms, ip, iend, offsetPtr); } if (searchMethod == searchMethod_e.search_hashChain) { if (mls == 4) return ZSTD_HcFindBestMatch_noDict_4(ms, ip, iend, offsetPtr); - return mls == 5 ? ZSTD_HcFindBestMatch_noDict_5(ms, ip, iend, offsetPtr) : ZSTD_HcFindBestMatch_noDict_6(ms, ip, iend, offsetPtr); + return mls == 5 + ? ZSTD_HcFindBestMatch_noDict_5(ms, ip, iend, offsetPtr) + : ZSTD_HcFindBestMatch_noDict_6(ms, ip, iend, offsetPtr); } // searchMethod_e.search_binaryTree if (mls == 4) return ZSTD_BtFindBestMatch_noDict_4(ms, ip, iend, offsetPtr); - return mls == 5 ? ZSTD_BtFindBestMatch_noDict_5(ms, ip, iend, offsetPtr) : ZSTD_BtFindBestMatch_noDict_6(ms, ip, iend, offsetPtr); + return mls == 5 + ? ZSTD_BtFindBestMatch_noDict_5(ms, ip, iend, offsetPtr) + : ZSTD_BtFindBestMatch_noDict_6(ms, ip, iend, offsetPtr); } if (dictMode == ZSTD_dictMode_e.ZSTD_extDict) @@ -1894,27 +3477,57 @@ private static nuint ZSTD_searchMax(ZSTD_MatchState_t* ms, byte* ip, byte* iend, if (mls == 4) { if (rowLog == 4) - return ZSTD_RowFindBestMatch_dictMatchState_4_4(ms, ip, iend, offsetPtr); + return ZSTD_RowFindBestMatch_dictMatchState_4_4( + ms, + ip, + iend, + offsetPtr + ); if (rowLog == 5) - return ZSTD_RowFindBestMatch_dictMatchState_4_5(ms, ip, iend, offsetPtr); + return ZSTD_RowFindBestMatch_dictMatchState_4_5( + ms, + ip, + iend, + offsetPtr + ); return ZSTD_RowFindBestMatch_dictMatchState_4_6(ms, ip, iend, offsetPtr); } if (mls == 5) { if (rowLog == 4) - return ZSTD_RowFindBestMatch_dictMatchState_5_4(ms, ip, iend, offsetPtr); + return ZSTD_RowFindBestMatch_dictMatchState_5_4( + ms, + ip, + iend, + offsetPtr + ); if (rowLog == 5) - return ZSTD_RowFindBestMatch_dictMatchState_5_5(ms, ip, iend, offsetPtr); + return ZSTD_RowFindBestMatch_dictMatchState_5_5( + ms, + ip, + iend, + offsetPtr + ); return ZSTD_RowFindBestMatch_dictMatchState_5_6(ms, ip, iend, offsetPtr); } if (mls == 6) { if (rowLog == 4) - return ZSTD_RowFindBestMatch_dictMatchState_6_4(ms, ip, iend, offsetPtr); + return ZSTD_RowFindBestMatch_dictMatchState_6_4( + ms, + ip, + iend, + offsetPtr + ); if (rowLog == 5) - return ZSTD_RowFindBestMatch_dictMatchState_6_5(ms, ip, iend, offsetPtr); + return ZSTD_RowFindBestMatch_dictMatchState_6_5( + ms, + ip, + iend, + offsetPtr + ); return ZSTD_RowFindBestMatch_dictMatchState_6_6(ms, ip, iend, offsetPtr); } } @@ -1941,27 +3554,57 @@ private static nuint ZSTD_searchMax(ZSTD_MatchState_t* ms, byte* ip, byte* iend, if (mls == 4) { if (rowLog == 4) - return ZSTD_RowFindBestMatch_dedicatedDictSearch_4_4(ms, ip, iend, offsetPtr); + return ZSTD_RowFindBestMatch_dedicatedDictSearch_4_4( + ms, + ip, + iend, + offsetPtr + ); if (rowLog == 5) - return ZSTD_RowFindBestMatch_dedicatedDictSearch_4_5(ms, ip, iend, offsetPtr); + return ZSTD_RowFindBestMatch_dedicatedDictSearch_4_5( + ms, + ip, + iend, + offsetPtr + ); return ZSTD_RowFindBestMatch_dedicatedDictSearch_4_6(ms, ip, iend, offsetPtr); } if (mls == 5) { if (rowLog == 4) - return ZSTD_RowFindBestMatch_dedicatedDictSearch_5_4(ms, ip, iend, offsetPtr); + return ZSTD_RowFindBestMatch_dedicatedDictSearch_5_4( + ms, + ip, + iend, + offsetPtr + ); if (rowLog == 5) - return ZSTD_RowFindBestMatch_dedicatedDictSearch_5_5(ms, ip, iend, offsetPtr); + return ZSTD_RowFindBestMatch_dedicatedDictSearch_5_5( + ms, + ip, + iend, + offsetPtr + ); return ZSTD_RowFindBestMatch_dedicatedDictSearch_5_6(ms, ip, iend, offsetPtr); } if (mls == 6) { if (rowLog == 4) - return ZSTD_RowFindBestMatch_dedicatedDictSearch_6_4(ms, ip, iend, offsetPtr); + return ZSTD_RowFindBestMatch_dedicatedDictSearch_6_4( + ms, + ip, + iend, + offsetPtr + ); if (rowLog == 5) - return ZSTD_RowFindBestMatch_dedicatedDictSearch_6_5(ms, ip, iend, offsetPtr); + return ZSTD_RowFindBestMatch_dedicatedDictSearch_6_5( + ms, + ip, + iend, + offsetPtr + ); return ZSTD_RowFindBestMatch_dedicatedDictSearch_6_6(ms, ip, iend, offsetPtr); } } @@ -1987,7 +3630,16 @@ private static nuint ZSTD_searchMax(ZSTD_MatchState_t* ms, byte* ip, byte* iend, * Common parser - lazy strategy *********************************/ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_compressBlock_lazy_generic(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize, searchMethod_e searchMethod, uint depth, ZSTD_dictMode_e dictMode) + private static nuint ZSTD_compressBlock_lazy_generic( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize, + searchMethod_e searchMethod, + uint depth, + ZSTD_dictMode_e dictMode + ) { byte* istart = (byte*)src; byte* ip = istart; @@ -1997,10 +3649,18 @@ private static nuint ZSTD_compressBlock_lazy_generic(ZSTD_MatchState_t* ms, SeqS byte* @base = ms->window.@base; uint prefixLowestIndex = ms->window.dictLimit; byte* prefixLowest = @base + prefixLowestIndex; - uint mls = ms->cParams.minMatch <= 4 ? 4 : ms->cParams.minMatch <= 6 ? ms->cParams.minMatch : 6; - uint rowLog = ms->cParams.searchLog <= 4 ? 4 : ms->cParams.searchLog <= 6 ? ms->cParams.searchLog : 6; - uint offset_1 = rep[0], offset_2 = rep[1]; - uint offsetSaved1 = 0, offsetSaved2 = 0; + uint mls = + ms->cParams.minMatch <= 4 ? 4 + : ms->cParams.minMatch <= 6 ? ms->cParams.minMatch + : 6; + uint rowLog = + ms->cParams.searchLog <= 4 ? 4 + : ms->cParams.searchLog <= 6 ? ms->cParams.searchLog + : 6; + uint offset_1 = rep[0], + offset_2 = rep[1]; + uint offsetSaved1 = 0, + offsetSaved2 = 0; int isDMS = dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? 1 : 0; int isDDS = dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch ? 1 : 0; int isDxS = isDMS != 0 || isDDS != 0 ? 1 : 0; @@ -2054,17 +3714,38 @@ private static nuint ZSTD_compressBlock_lazy_generic(ZSTD_MatchState_t* ms, SeqS if (isDxS != 0) { uint repIndex = (uint)(ip - @base) + 1 - offset_1; - byte* repMatch = (dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState || dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch) && repIndex < prefixLowestIndex ? dictBase + (repIndex - dictIndexDelta) : @base + repIndex; - if (ZSTD_index_overlap_check(prefixLowestIndex, repIndex) != 0 && MEM_read32(repMatch) == MEM_read32(ip + 1)) + byte* repMatch = + ( + dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState + || dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch + ) + && repIndex < prefixLowestIndex + ? dictBase + (repIndex - dictIndexDelta) + : @base + repIndex; + if ( + ZSTD_index_overlap_check(prefixLowestIndex, repIndex) != 0 + && MEM_read32(repMatch) == MEM_read32(ip + 1) + ) { byte* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; - matchLength = ZSTD_count_2segments(ip + 1 + 4, repMatch + 4, iend, repMatchEnd, prefixLowest) + 4; + matchLength = + ZSTD_count_2segments( + ip + 1 + 4, + repMatch + 4, + iend, + repMatchEnd, + prefixLowest + ) + 4; if (depth == 0) goto _storeSequence; } } - if (dictMode == ZSTD_dictMode_e.ZSTD_noDict && offset_1 > 0 && MEM_read32(ip + 1 - offset_1) == MEM_read32(ip + 1)) + if ( + dictMode == ZSTD_dictMode_e.ZSTD_noDict + && offset_1 > 0 + && MEM_read32(ip + 1 - offset_1) == MEM_read32(ip + 1) + ) { matchLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4; if (depth == 0) @@ -2073,7 +3754,16 @@ private static nuint ZSTD_compressBlock_lazy_generic(ZSTD_MatchState_t* ms, SeqS { nuint offbaseFound = 999999999; - nuint ml2 = ZSTD_searchMax(ms, ip, iend, &offbaseFound, mls, rowLog, searchMethod, dictMode); + nuint ml2 = ZSTD_searchMax( + ms, + ip, + iend, + &offbaseFound, + mls, + rowLog, + searchMethod, + dictMode + ); if (ml2 > matchLength) { matchLength = ml2; @@ -2095,7 +3785,12 @@ private static nuint ZSTD_compressBlock_lazy_generic(ZSTD_MatchState_t* ms, SeqS while (ip < ilimit) { ip++; - if (dictMode == ZSTD_dictMode_e.ZSTD_noDict && offBase != 0 && offset_1 > 0 && MEM_read32(ip) == MEM_read32(ip - offset_1)) + if ( + dictMode == ZSTD_dictMode_e.ZSTD_noDict + && offBase != 0 + && offset_1 > 0 + && MEM_read32(ip) == MEM_read32(ip - offset_1) + ) { nuint mlRep = ZSTD_count(ip + 4, ip + 4 - offset_1, iend) + 4; int gain2 = (int)(mlRep * 3); @@ -2113,13 +3808,28 @@ private static nuint ZSTD_compressBlock_lazy_generic(ZSTD_MatchState_t* ms, SeqS if (isDxS != 0) { uint repIndex = (uint)(ip - @base) - offset_1; - byte* repMatch = repIndex < prefixLowestIndex ? dictBase + (repIndex - dictIndexDelta) : @base + repIndex; - if (ZSTD_index_overlap_check(prefixLowestIndex, repIndex) != 0 && MEM_read32(repMatch) == MEM_read32(ip)) + byte* repMatch = + repIndex < prefixLowestIndex + ? dictBase + (repIndex - dictIndexDelta) + : @base + repIndex; + if ( + ZSTD_index_overlap_check(prefixLowestIndex, repIndex) != 0 + && MEM_read32(repMatch) == MEM_read32(ip) + ) { byte* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; - nuint mlRep = ZSTD_count_2segments(ip + 4, repMatch + 4, iend, repMatchEnd, prefixLowest) + 4; + nuint mlRep = + ZSTD_count_2segments( + ip + 4, + repMatch + 4, + iend, + repMatchEnd, + prefixLowest + ) + 4; int gain2 = (int)(mlRep * 3); - int gain1 = (int)(matchLength * 3 - ZSTD_highbit32((uint)offBase) + 1); + int gain1 = (int)( + matchLength * 3 - ZSTD_highbit32((uint)offBase) + 1 + ); if (mlRep >= 4 && gain2 > gain1) { matchLength = mlRep; @@ -2133,7 +3843,16 @@ private static nuint ZSTD_compressBlock_lazy_generic(ZSTD_MatchState_t* ms, SeqS { nuint ofbCandidate = 999999999; - nuint ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, dictMode); + nuint ml2 = ZSTD_searchMax( + ms, + ip, + iend, + &ofbCandidate, + mls, + rowLog, + searchMethod, + dictMode + ); /* raw approx */ int gain2 = (int)(ml2 * 4 - ZSTD_highbit32((uint)ofbCandidate)); int gain1 = (int)(matchLength * 4 - ZSTD_highbit32((uint)offBase) + 4); @@ -2149,11 +3868,18 @@ private static nuint ZSTD_compressBlock_lazy_generic(ZSTD_MatchState_t* ms, SeqS if (depth == 2 && ip < ilimit) { ip++; - if (dictMode == ZSTD_dictMode_e.ZSTD_noDict && offBase != 0 && offset_1 > 0 && MEM_read32(ip) == MEM_read32(ip - offset_1)) + if ( + dictMode == ZSTD_dictMode_e.ZSTD_noDict + && offBase != 0 + && offset_1 > 0 + && MEM_read32(ip) == MEM_read32(ip - offset_1) + ) { nuint mlRep = ZSTD_count(ip + 4, ip + 4 - offset_1, iend) + 4; int gain2 = (int)(mlRep * 4); - int gain1 = (int)(matchLength * 4 - ZSTD_highbit32((uint)offBase) + 1); + int gain1 = (int)( + matchLength * 4 - ZSTD_highbit32((uint)offBase) + 1 + ); if (mlRep >= 4 && gain2 > gain1) { matchLength = mlRep; @@ -2167,13 +3893,29 @@ private static nuint ZSTD_compressBlock_lazy_generic(ZSTD_MatchState_t* ms, SeqS if (isDxS != 0) { uint repIndex = (uint)(ip - @base) - offset_1; - byte* repMatch = repIndex < prefixLowestIndex ? dictBase + (repIndex - dictIndexDelta) : @base + repIndex; - if (ZSTD_index_overlap_check(prefixLowestIndex, repIndex) != 0 && MEM_read32(repMatch) == MEM_read32(ip)) + byte* repMatch = + repIndex < prefixLowestIndex + ? dictBase + (repIndex - dictIndexDelta) + : @base + repIndex; + if ( + ZSTD_index_overlap_check(prefixLowestIndex, repIndex) != 0 + && MEM_read32(repMatch) == MEM_read32(ip) + ) { - byte* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; - nuint mlRep = ZSTD_count_2segments(ip + 4, repMatch + 4, iend, repMatchEnd, prefixLowest) + 4; + byte* repMatchEnd = + repIndex < prefixLowestIndex ? dictEnd : iend; + nuint mlRep = + ZSTD_count_2segments( + ip + 4, + repMatch + 4, + iend, + repMatchEnd, + prefixLowest + ) + 4; int gain2 = (int)(mlRep * 4); - int gain1 = (int)(matchLength * 4 - ZSTD_highbit32((uint)offBase) + 1); + int gain1 = (int)( + matchLength * 4 - ZSTD_highbit32((uint)offBase) + 1 + ); if (mlRep >= 4 && gain2 > gain1) { matchLength = mlRep; @@ -2187,10 +3929,21 @@ private static nuint ZSTD_compressBlock_lazy_generic(ZSTD_MatchState_t* ms, SeqS { nuint ofbCandidate = 999999999; - nuint ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, dictMode); + nuint ml2 = ZSTD_searchMax( + ms, + ip, + iend, + &ofbCandidate, + mls, + rowLog, + searchMethod, + dictMode + ); /* raw approx */ int gain2 = (int)(ml2 * 4 - ZSTD_highbit32((uint)ofbCandidate)); - int gain1 = (int)(matchLength * 4 - ZSTD_highbit32((uint)offBase) + 7); + int gain1 = (int)( + matchLength * 4 - ZSTD_highbit32((uint)offBase) + 7 + ); if (ml2 >= 4 && gain2 > gain1) { matchLength = ml2; @@ -2210,7 +3963,11 @@ private static nuint ZSTD_compressBlock_lazy_generic(ZSTD_MatchState_t* ms, SeqS { assert(offBase > 3); assert(offBase > 3); - while (start > anchor && start - (offBase - 3) > prefixLowest && start[-1] == (start - (offBase - 3))[-1]) + while ( + start > anchor + && start - (offBase - 3) > prefixLowest + && start[-1] == (start - (offBase - 3))[-1] + ) { start--; matchLength++; @@ -2221,7 +3978,10 @@ private static nuint ZSTD_compressBlock_lazy_generic(ZSTD_MatchState_t* ms, SeqS { assert(offBase > 3); uint matchIndex = (uint)((nuint)(start - @base) - (offBase - 3)); - byte* match = matchIndex < prefixLowestIndex ? dictBase + matchIndex - dictIndexDelta : @base + matchIndex; + byte* match = + matchIndex < prefixLowestIndex + ? dictBase + matchIndex - dictIndexDelta + : @base + matchIndex; byte* mStart = matchIndex < prefixLowestIndex ? dictLowest : prefixLowest; while (start > anchor && match > mStart && start[-1] == match[-1]) { @@ -2236,7 +3996,7 @@ private static nuint ZSTD_compressBlock_lazy_generic(ZSTD_MatchState_t* ms, SeqS offset_1 = (uint)(offBase - 3); } - _storeSequence: + _storeSequence: { nuint litLength = (nuint)(start - anchor); ZSTD_storeSeq(seqStore, litLength, anchor, iend, (uint)offBase, matchLength); @@ -2259,11 +4019,24 @@ private static nuint ZSTD_compressBlock_lazy_generic(ZSTD_MatchState_t* ms, SeqS { uint current2 = (uint)(ip - @base); uint repIndex = current2 - offset_2; - byte* repMatch = repIndex < prefixLowestIndex ? dictBase - dictIndexDelta + repIndex : @base + repIndex; - if (ZSTD_index_overlap_check(prefixLowestIndex, repIndex) != 0 && MEM_read32(repMatch) == MEM_read32(ip)) + byte* repMatch = + repIndex < prefixLowestIndex + ? dictBase - dictIndexDelta + repIndex + : @base + repIndex; + if ( + ZSTD_index_overlap_check(prefixLowestIndex, repIndex) != 0 + && MEM_read32(repMatch) == MEM_read32(ip) + ) { byte* repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend; - matchLength = ZSTD_count_2segments(ip + 4, repMatch + 4, iend, repEnd2, prefixLowest) + 4; + matchLength = + ZSTD_count_2segments( + ip + 4, + repMatch + 4, + iend, + repEnd2, + prefixLowest + ) + 4; offBase = offset_2; offset_2 = offset_1; offset_1 = (uint)offBase; @@ -2281,7 +4054,9 @@ private static nuint ZSTD_compressBlock_lazy_generic(ZSTD_MatchState_t* ms, SeqS if (dictMode == ZSTD_dictMode_e.ZSTD_noDict) { - while (ip <= ilimit && offset_2 > 0 && MEM_read32(ip) == MEM_read32(ip - offset_2)) + while ( + ip <= ilimit && offset_2 > 0 && MEM_read32(ip) == MEM_read32(ip - offset_2) + ) { matchLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4; offBase = offset_2; @@ -2303,108 +4078,416 @@ private static nuint ZSTD_compressBlock_lazy_generic(ZSTD_MatchState_t* ms, SeqS return (nuint)(iend - anchor); } - private static nuint ZSTD_compressBlock_greedy(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_greedy( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_hashChain, 0, ZSTD_dictMode_e.ZSTD_noDict); + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_hashChain, + 0, + ZSTD_dictMode_e.ZSTD_noDict + ); } - private static nuint ZSTD_compressBlock_greedy_dictMatchState(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_greedy_dictMatchState( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_hashChain, 0, ZSTD_dictMode_e.ZSTD_dictMatchState); + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_hashChain, + 0, + ZSTD_dictMode_e.ZSTD_dictMatchState + ); } - private static nuint ZSTD_compressBlock_greedy_dedicatedDictSearch(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_greedy_dedicatedDictSearch( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_hashChain, 0, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch); + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_hashChain, + 0, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch + ); } - private static nuint ZSTD_compressBlock_greedy_row(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_greedy_row( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_rowHash, 0, ZSTD_dictMode_e.ZSTD_noDict); + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_rowHash, + 0, + ZSTD_dictMode_e.ZSTD_noDict + ); } - private static nuint ZSTD_compressBlock_greedy_dictMatchState_row(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_greedy_dictMatchState_row( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_rowHash, 0, ZSTD_dictMode_e.ZSTD_dictMatchState); + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_rowHash, + 0, + ZSTD_dictMode_e.ZSTD_dictMatchState + ); } - private static nuint ZSTD_compressBlock_greedy_dedicatedDictSearch_row(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_greedy_dedicatedDictSearch_row( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_rowHash, 0, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch); + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_rowHash, + 0, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch + ); } - private static nuint ZSTD_compressBlock_lazy(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_lazy( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_hashChain, 1, ZSTD_dictMode_e.ZSTD_noDict); + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_hashChain, + 1, + ZSTD_dictMode_e.ZSTD_noDict + ); } - private static nuint ZSTD_compressBlock_lazy_dictMatchState(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_lazy_dictMatchState( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_hashChain, 1, ZSTD_dictMode_e.ZSTD_dictMatchState); + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_hashChain, + 1, + ZSTD_dictMode_e.ZSTD_dictMatchState + ); } - private static nuint ZSTD_compressBlock_lazy_dedicatedDictSearch(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_lazy_dedicatedDictSearch( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_hashChain, 1, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch); + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_hashChain, + 1, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch + ); } - private static nuint ZSTD_compressBlock_lazy_row(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_lazy_row( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_rowHash, 1, ZSTD_dictMode_e.ZSTD_noDict); + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_rowHash, + 1, + ZSTD_dictMode_e.ZSTD_noDict + ); } - private static nuint ZSTD_compressBlock_lazy_dictMatchState_row(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_lazy_dictMatchState_row( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_rowHash, 1, ZSTD_dictMode_e.ZSTD_dictMatchState); + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_rowHash, + 1, + ZSTD_dictMode_e.ZSTD_dictMatchState + ); } - private static nuint ZSTD_compressBlock_lazy_dedicatedDictSearch_row(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_lazy_dedicatedDictSearch_row( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_rowHash, 1, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch); + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_rowHash, + 1, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch + ); } - private static nuint ZSTD_compressBlock_lazy2(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_lazy2( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_hashChain, 2, ZSTD_dictMode_e.ZSTD_noDict); + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_hashChain, + 2, + ZSTD_dictMode_e.ZSTD_noDict + ); } - private static nuint ZSTD_compressBlock_lazy2_dictMatchState(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_lazy2_dictMatchState( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_hashChain, 2, ZSTD_dictMode_e.ZSTD_dictMatchState); + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_hashChain, + 2, + ZSTD_dictMode_e.ZSTD_dictMatchState + ); } - private static nuint ZSTD_compressBlock_lazy2_dedicatedDictSearch(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_lazy2_dedicatedDictSearch( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_hashChain, 2, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch); + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_hashChain, + 2, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch + ); } - private static nuint ZSTD_compressBlock_lazy2_row(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_lazy2_row( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_rowHash, 2, ZSTD_dictMode_e.ZSTD_noDict); + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_rowHash, + 2, + ZSTD_dictMode_e.ZSTD_noDict + ); } - private static nuint ZSTD_compressBlock_lazy2_dictMatchState_row(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_lazy2_dictMatchState_row( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_rowHash, 2, ZSTD_dictMode_e.ZSTD_dictMatchState); + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_rowHash, + 2, + ZSTD_dictMode_e.ZSTD_dictMatchState + ); } - private static nuint ZSTD_compressBlock_lazy2_dedicatedDictSearch_row(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_lazy2_dedicatedDictSearch_row( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_rowHash, 2, ZSTD_dictMode_e.ZSTD_dedicatedDictSearch); + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_rowHash, + 2, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch + ); } - private static nuint ZSTD_compressBlock_btlazy2(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_btlazy2( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_binaryTree, 2, ZSTD_dictMode_e.ZSTD_noDict); + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_binaryTree, + 2, + ZSTD_dictMode_e.ZSTD_noDict + ); } - private static nuint ZSTD_compressBlock_btlazy2_dictMatchState(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_btlazy2_dictMatchState( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_binaryTree, 2, ZSTD_dictMode_e.ZSTD_dictMatchState); + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_binaryTree, + 2, + ZSTD_dictMode_e.ZSTD_dictMatchState + ); } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_compressBlock_lazy_extDict_generic(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize, searchMethod_e searchMethod, uint depth) + private static nuint ZSTD_compressBlock_lazy_extDict_generic( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize, + searchMethod_e searchMethod, + uint depth + ) { byte* istart = (byte*)src; byte* ip = istart; @@ -2418,9 +4501,16 @@ private static nuint ZSTD_compressBlock_lazy_extDict_generic(ZSTD_MatchState_t* byte* dictEnd = dictBase + dictLimit; byte* dictStart = dictBase + ms->window.lowLimit; uint windowLog = ms->cParams.windowLog; - uint mls = ms->cParams.minMatch <= 4 ? 4 : ms->cParams.minMatch <= 6 ? ms->cParams.minMatch : 6; - uint rowLog = ms->cParams.searchLog <= 4 ? 4 : ms->cParams.searchLog <= 6 ? ms->cParams.searchLog : 6; - uint offset_1 = rep[0], offset_2 = rep[1]; + uint mls = + ms->cParams.minMatch <= 4 ? 4 + : ms->cParams.minMatch <= 6 ? ms->cParams.minMatch + : 6; + uint rowLog = + ms->cParams.searchLog <= 4 ? 4 + : ms->cParams.searchLog <= 6 ? ms->cParams.searchLog + : 6; + uint offset_1 = rep[0], + offset_2 = rep[1]; ms->lazySkipping = 0; ip += ip == prefixStart ? 1 : 0; if (searchMethod == searchMethod_e.search_rowHash) @@ -2441,12 +4531,24 @@ private static nuint ZSTD_compressBlock_lazy_extDict_generic(ZSTD_MatchState_t* uint repIndex = curr + 1 - offset_1; byte* repBase = repIndex < dictLimit ? dictBase : @base; byte* repMatch = repBase + repIndex; - if ((ZSTD_index_overlap_check(dictLimit, repIndex) & (offset_1 <= curr + 1 - windowLow ? 1 : 0)) != 0) + if ( + ( + ZSTD_index_overlap_check(dictLimit, repIndex) + & (offset_1 <= curr + 1 - windowLow ? 1 : 0) + ) != 0 + ) if (MEM_read32(ip + 1) == MEM_read32(repMatch)) { /* repcode detected we should take it */ byte* repEnd = repIndex < dictLimit ? dictEnd : iend; - matchLength = ZSTD_count_2segments(ip + 1 + 4, repMatch + 4, iend, repEnd, prefixStart) + 4; + matchLength = + ZSTD_count_2segments( + ip + 1 + 4, + repMatch + 4, + iend, + repEnd, + prefixStart + ) + 4; if (depth == 0) goto _storeSequence; } @@ -2454,7 +4556,16 @@ private static nuint ZSTD_compressBlock_lazy_extDict_generic(ZSTD_MatchState_t* { nuint ofbCandidate = 999999999; - nuint ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_dictMode_e.ZSTD_extDict); + nuint ml2 = ZSTD_searchMax( + ms, + ip, + iend, + &ofbCandidate, + mls, + rowLog, + searchMethod, + ZSTD_dictMode_e.ZSTD_extDict + ); if (ml2 > matchLength) { matchLength = ml2; @@ -2482,14 +4593,28 @@ private static nuint ZSTD_compressBlock_lazy_extDict_generic(ZSTD_MatchState_t* uint repIndex = curr - offset_1; byte* repBase = repIndex < dictLimit ? dictBase : @base; byte* repMatch = repBase + repIndex; - if ((ZSTD_index_overlap_check(dictLimit, repIndex) & (offset_1 <= curr - windowLow ? 1 : 0)) != 0) + if ( + ( + ZSTD_index_overlap_check(dictLimit, repIndex) + & (offset_1 <= curr - windowLow ? 1 : 0) + ) != 0 + ) if (MEM_read32(ip) == MEM_read32(repMatch)) { /* repcode detected */ byte* repEnd = repIndex < dictLimit ? dictEnd : iend; - nuint repLength = ZSTD_count_2segments(ip + 4, repMatch + 4, iend, repEnd, prefixStart) + 4; + nuint repLength = + ZSTD_count_2segments( + ip + 4, + repMatch + 4, + iend, + repEnd, + prefixStart + ) + 4; int gain2 = (int)(repLength * 3); - int gain1 = (int)(matchLength * 3 - ZSTD_highbit32((uint)offBase) + 1); + int gain1 = (int)( + matchLength * 3 - ZSTD_highbit32((uint)offBase) + 1 + ); if (repLength >= 4 && gain2 > gain1) { matchLength = repLength; @@ -2503,7 +4628,16 @@ private static nuint ZSTD_compressBlock_lazy_extDict_generic(ZSTD_MatchState_t* { nuint ofbCandidate = 999999999; - nuint ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_dictMode_e.ZSTD_extDict); + nuint ml2 = ZSTD_searchMax( + ms, + ip, + iend, + &ofbCandidate, + mls, + rowLog, + searchMethod, + ZSTD_dictMode_e.ZSTD_extDict + ); /* raw approx */ int gain2 = (int)(ml2 * 4 - ZSTD_highbit32((uint)ofbCandidate)); int gain1 = (int)(matchLength * 4 - ZSTD_highbit32((uint)offBase) + 4); @@ -2526,14 +4660,28 @@ private static nuint ZSTD_compressBlock_lazy_extDict_generic(ZSTD_MatchState_t* uint repIndex = curr - offset_1; byte* repBase = repIndex < dictLimit ? dictBase : @base; byte* repMatch = repBase + repIndex; - if ((ZSTD_index_overlap_check(dictLimit, repIndex) & (offset_1 <= curr - windowLow ? 1 : 0)) != 0) + if ( + ( + ZSTD_index_overlap_check(dictLimit, repIndex) + & (offset_1 <= curr - windowLow ? 1 : 0) + ) != 0 + ) if (MEM_read32(ip) == MEM_read32(repMatch)) { /* repcode detected */ byte* repEnd = repIndex < dictLimit ? dictEnd : iend; - nuint repLength = ZSTD_count_2segments(ip + 4, repMatch + 4, iend, repEnd, prefixStart) + 4; + nuint repLength = + ZSTD_count_2segments( + ip + 4, + repMatch + 4, + iend, + repEnd, + prefixStart + ) + 4; int gain2 = (int)(repLength * 4); - int gain1 = (int)(matchLength * 4 - ZSTD_highbit32((uint)offBase) + 1); + int gain1 = (int)( + matchLength * 4 - ZSTD_highbit32((uint)offBase) + 1 + ); if (repLength >= 4 && gain2 > gain1) { matchLength = repLength; @@ -2547,10 +4695,21 @@ private static nuint ZSTD_compressBlock_lazy_extDict_generic(ZSTD_MatchState_t* { nuint ofbCandidate = 999999999; - nuint ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_dictMode_e.ZSTD_extDict); + nuint ml2 = ZSTD_searchMax( + ms, + ip, + iend, + &ofbCandidate, + mls, + rowLog, + searchMethod, + ZSTD_dictMode_e.ZSTD_extDict + ); /* raw approx */ int gain2 = (int)(ml2 * 4 - ZSTD_highbit32((uint)ofbCandidate)); - int gain1 = (int)(matchLength * 4 - ZSTD_highbit32((uint)offBase) + 7); + int gain1 = (int)( + matchLength * 4 - ZSTD_highbit32((uint)offBase) + 7 + ); if (ml2 >= 4 && gain2 > gain1) { matchLength = ml2; @@ -2568,7 +4727,8 @@ private static nuint ZSTD_compressBlock_lazy_extDict_generic(ZSTD_MatchState_t* { assert(offBase > 3); uint matchIndex = (uint)((nuint)(start - @base) - (offBase - 3)); - byte* match = matchIndex < dictLimit ? dictBase + matchIndex : @base + matchIndex; + byte* match = + matchIndex < dictLimit ? dictBase + matchIndex : @base + matchIndex; byte* mStart = matchIndex < dictLimit ? dictStart : prefixStart; while (start > anchor && match > mStart && start[-1] == match[-1]) { @@ -2582,7 +4742,7 @@ private static nuint ZSTD_compressBlock_lazy_extDict_generic(ZSTD_MatchState_t* offset_1 = (uint)(offBase - 3); } - _storeSequence: + _storeSequence: { nuint litLength = (nuint)(start - anchor); ZSTD_storeSeq(seqStore, litLength, anchor, iend, (uint)offBase, matchLength); @@ -2606,12 +4766,24 @@ private static nuint ZSTD_compressBlock_lazy_extDict_generic(ZSTD_MatchState_t* uint repIndex = repCurrent - offset_2; byte* repBase = repIndex < dictLimit ? dictBase : @base; byte* repMatch = repBase + repIndex; - if ((ZSTD_index_overlap_check(dictLimit, repIndex) & (offset_2 <= repCurrent - windowLow ? 1 : 0)) != 0) + if ( + ( + ZSTD_index_overlap_check(dictLimit, repIndex) + & (offset_2 <= repCurrent - windowLow ? 1 : 0) + ) != 0 + ) if (MEM_read32(ip) == MEM_read32(repMatch)) { /* repcode detected we should take it */ byte* repEnd = repIndex < dictLimit ? dictEnd : iend; - matchLength = ZSTD_count_2segments(ip + 4, repMatch + 4, iend, repEnd, prefixStart) + 4; + matchLength = + ZSTD_count_2segments( + ip + 4, + repMatch + 4, + iend, + repEnd, + prefixStart + ) + 4; offBase = offset_2; offset_2 = offset_1; offset_1 = (uint)offBase; @@ -2632,39 +4804,137 @@ private static nuint ZSTD_compressBlock_lazy_extDict_generic(ZSTD_MatchState_t* return (nuint)(iend - anchor); } - private static nuint ZSTD_compressBlock_greedy_extDict(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_greedy_extDict( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_hashChain, 0); + return ZSTD_compressBlock_lazy_extDict_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_hashChain, + 0 + ); } - private static nuint ZSTD_compressBlock_greedy_extDict_row(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_greedy_extDict_row( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_rowHash, 0); + return ZSTD_compressBlock_lazy_extDict_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_rowHash, + 0 + ); } - private static nuint ZSTD_compressBlock_lazy_extDict(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_lazy_extDict( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_hashChain, 1); + return ZSTD_compressBlock_lazy_extDict_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_hashChain, + 1 + ); } - private static nuint ZSTD_compressBlock_lazy_extDict_row(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_lazy_extDict_row( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_rowHash, 1); + return ZSTD_compressBlock_lazy_extDict_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_rowHash, + 1 + ); } - private static nuint ZSTD_compressBlock_lazy2_extDict(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_lazy2_extDict( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_hashChain, 2); + return ZSTD_compressBlock_lazy_extDict_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_hashChain, + 2 + ); } - private static nuint ZSTD_compressBlock_lazy2_extDict_row(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_lazy2_extDict_row( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_rowHash, 2); + return ZSTD_compressBlock_lazy_extDict_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_rowHash, + 2 + ); } - private static nuint ZSTD_compressBlock_btlazy2_extDict(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_btlazy2_extDict( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, searchMethod_e.search_binaryTree, 2); + return ZSTD_compressBlock_lazy_extDict_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_binaryTree, + 2 + ); } } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLdm.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLdm.cs index 53a559439..407ec8799 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLdm.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLdm.cs @@ -15,7 +15,8 @@ private static void ZSTD_ldm_gear_init(ldmRollingHashState_t* state, ldmParams_t state->rolling = ~(uint)0; if (hashRateLog > 0 && hashRateLog <= maxBitsInMask) { - state->stopMask = ((ulong)1 << (int)hashRateLog) - 1 << (int)(maxBitsInMask - hashRateLog); + state->stopMask = + ((ulong)1 << (int)hashRateLog) - 1 << (int)(maxBitsInMask - hashRateLog); } else { @@ -28,7 +29,11 @@ private static void ZSTD_ldm_gear_init(ldmRollingHashState_t* state, ldmParams_t * splits. This effectively resets the hash state. This is used when skipping * over data, either at the beginning of a block, or skipping sections. */ - private static void ZSTD_ldm_gear_reset(ldmRollingHashState_t* state, byte* data, nuint minMatchLength) + private static void ZSTD_ldm_gear_reset( + ldmRollingHashState_t* state, + byte* data, + nuint minMatchLength + ) { ulong hash = state->rolling; nuint n = 0; @@ -71,10 +76,17 @@ private static void ZSTD_ldm_gear_reset(ldmRollingHashState_t* state, byte* data * * Precondition: The splits array must not be full. * Returns: The number of bytes processed. */ - private static nuint ZSTD_ldm_gear_feed(ldmRollingHashState_t* state, byte* data, nuint size, nuint* splits, uint* numSplits) + private static nuint ZSTD_ldm_gear_feed( + ldmRollingHashState_t* state, + byte* data, + nuint size, + nuint* splits, + uint* numSplits + ) { nuint n; - ulong hash, mask; + ulong hash, + mask; hash = state->rolling; mask = state->stopMask; n = 0; @@ -142,7 +154,7 @@ private static nuint ZSTD_ldm_gear_feed(ldmRollingHashState_t* state, byte* data } } - done: + done: state->rolling = hash; return n; } @@ -156,14 +168,26 @@ private static nuint ZSTD_ldm_gear_feed(ldmRollingHashState_t* state, byte* data * * Ensures that the minMatchLength >= targetLength during optimal parsing. */ - private static void ZSTD_ldm_adjustParameters(ldmParams_t* @params, ZSTD_compressionParameters* cParams) + private static void ZSTD_ldm_adjustParameters( + ldmParams_t* @params, + ZSTD_compressionParameters* cParams + ) { @params->windowLog = cParams->windowLog; if (@params->hashRateLog == 0) { if (@params->hashLog > 0) { - assert(@params->hashLog <= (uint)((sizeof(nuint) == 4 ? 30 : 31) < 30 ? sizeof(nuint) == 4 ? 30 : 31 : 30)); + assert( + @params->hashLog + <= (uint)( + (sizeof(nuint) == 4 ? 30 : 31) < 30 + ? sizeof(nuint) == 4 + ? 30 + : 31 + : 30 + ) + ); if (@params->windowLog > @params->hashLog) { @params->hashRateLog = @params->windowLog - @params->hashLog; @@ -178,7 +202,24 @@ private static void ZSTD_ldm_adjustParameters(ldmParams_t* @params, ZSTD_compres if (@params->hashLog == 0) { - @params->hashLog = @params->windowLog - @params->hashRateLog <= 6 ? 6 : @params->windowLog - @params->hashRateLog <= (uint)((sizeof(nuint) == 4 ? 30 : 31) < 30 ? sizeof(nuint) == 4 ? 30 : 31 : 30) ? @params->windowLog - @params->hashRateLog : (uint)((sizeof(nuint) == 4 ? 30 : 31) < 30 ? sizeof(nuint) == 4 ? 30 : 31 : 30); + @params->hashLog = + @params->windowLog - @params->hashRateLog <= 6 ? 6 + : @params->windowLog - @params->hashRateLog + <= (uint)( + (sizeof(nuint) == 4 ? 30 : 31) < 30 + ? sizeof(nuint) == 4 + ? 30 + : 31 + : 30 + ) + ? @params->windowLog - @params->hashRateLog + : (uint)( + (sizeof(nuint) == 4 ? 30 : 31) < 30 + ? sizeof(nuint) == 4 + ? 30 + : 31 + : 30 + ); } if (@params->minMatchLength == 0) @@ -191,10 +232,16 @@ private static void ZSTD_ldm_adjustParameters(ldmParams_t* @params, ZSTD_compres if (@params->bucketSizeLog == 0) { assert(1 <= (int)cParams->strategy && (int)cParams->strategy <= 9); - @params->bucketSizeLog = (uint)cParams->strategy <= 4 ? 4 : (uint)cParams->strategy <= 8 ? (uint)cParams->strategy : 8; + @params->bucketSizeLog = + (uint)cParams->strategy <= 4 ? 4 + : (uint)cParams->strategy <= 8 ? (uint)cParams->strategy + : 8; } - @params->bucketSizeLog = @params->bucketSizeLog < @params->hashLog ? @params->bucketSizeLog : @params->hashLog; + @params->bucketSizeLog = + @params->bucketSizeLog < @params->hashLog + ? @params->bucketSizeLog + : @params->hashLog; } /** ZSTD_ldm_getTableSize() : @@ -204,9 +251,12 @@ private static void ZSTD_ldm_adjustParameters(ldmParams_t* @params, ZSTD_compres private static nuint ZSTD_ldm_getTableSize(ldmParams_t @params) { nuint ldmHSize = (nuint)1 << (int)@params.hashLog; - nuint ldmBucketSizeLog = @params.bucketSizeLog < @params.hashLog ? @params.bucketSizeLog : @params.hashLog; + nuint ldmBucketSizeLog = + @params.bucketSizeLog < @params.hashLog ? @params.bucketSizeLog : @params.hashLog; nuint ldmBucketSize = (nuint)1 << (int)(@params.hashLog - ldmBucketSizeLog); - nuint totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize) + ZSTD_cwksp_alloc_size(ldmHSize * (nuint)sizeof(ldmEntry_t)); + nuint totalSize = + ZSTD_cwksp_alloc_size(ldmBucketSize) + + ZSTD_cwksp_alloc_size(ldmHSize * (nuint)sizeof(ldmEntry_t)); return @params.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable ? totalSize : 0; } @@ -216,19 +266,30 @@ private static nuint ZSTD_ldm_getTableSize(ldmParams_t @params) */ private static nuint ZSTD_ldm_getMaxNbSeq(ldmParams_t @params, nuint maxChunkSize) { - return @params.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable ? maxChunkSize / @params.minMatchLength : 0; + return @params.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable + ? maxChunkSize / @params.minMatchLength + : 0; } /** ZSTD_ldm_getBucket() : * Returns a pointer to the start of the bucket associated with hash. */ - private static ldmEntry_t* ZSTD_ldm_getBucket(ldmState_t* ldmState, nuint hash, uint bucketSizeLog) + private static ldmEntry_t* ZSTD_ldm_getBucket( + ldmState_t* ldmState, + nuint hash, + uint bucketSizeLog + ) { return ldmState->hashTable + (hash << (int)bucketSizeLog); } /** ZSTD_ldm_insertEntry() : * Insert the entry with corresponding hash into the hash table */ - private static void ZSTD_ldm_insertEntry(ldmState_t* ldmState, nuint hash, ldmEntry_t entry, uint bucketSizeLog) + private static void ZSTD_ldm_insertEntry( + ldmState_t* ldmState, + nuint hash, + ldmEntry_t entry, + uint bucketSizeLog + ) { byte* pOffset = ldmState->bucketOffsets + hash; uint offset = *pOffset; @@ -240,7 +301,12 @@ private static void ZSTD_ldm_insertEntry(ldmState_t* ldmState, nuint hash, ldmEn * Returns the number of bytes that match backwards before pIn and pMatch. * * We count only bytes where pMatch >= pBase and pIn >= pAnchor. */ - private static nuint ZSTD_ldm_countBackwardsMatch(byte* pIn, byte* pAnchor, byte* pMatch, byte* pMatchBase) + private static nuint ZSTD_ldm_countBackwardsMatch( + byte* pIn, + byte* pAnchor, + byte* pMatch, + byte* pMatchBase + ) { nuint matchLength = 0; while (pIn > pAnchor && pMatch > pMatchBase && pIn[-1] == pMatch[-1]) @@ -258,7 +324,14 @@ private static nuint ZSTD_ldm_countBackwardsMatch(byte* pIn, byte* pAnchor, byte * even with the backwards match spanning 2 different segments. * * On reaching `pMatchBase`, start counting from mEnd */ - private static nuint ZSTD_ldm_countBackwardsMatch_2segments(byte* pIn, byte* pAnchor, byte* pMatch, byte* pMatchBase, byte* pExtDictStart, byte* pExtDictEnd) + private static nuint ZSTD_ldm_countBackwardsMatch_2segments( + byte* pIn, + byte* pAnchor, + byte* pMatch, + byte* pMatchBase, + byte* pExtDictStart, + byte* pExtDictEnd + ) { nuint matchLength = ZSTD_ldm_countBackwardsMatch(pIn, pAnchor, pMatch, pMatchBase); if (pMatch - matchLength != pMatchBase || pMatchBase == pExtDictStart) @@ -266,7 +339,12 @@ private static nuint ZSTD_ldm_countBackwardsMatch_2segments(byte* pIn, byte* pAn return matchLength; } - matchLength += ZSTD_ldm_countBackwardsMatch(pIn - matchLength, pAnchor, pExtDictEnd, pExtDictStart); + matchLength += ZSTD_ldm_countBackwardsMatch( + pIn - matchLength, + pAnchor, + pExtDictEnd, + pExtDictStart + ); return matchLength; } @@ -283,10 +361,20 @@ private static nuint ZSTD_ldm_fillFastTables(ZSTD_MatchState_t* ms, void* end) switch (ms->cParams.strategy) { case ZSTD_strategy.ZSTD_fast: - ZSTD_fillHashTable(ms, iend, ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, ZSTD_tableFillPurpose_e.ZSTD_tfp_forCCtx); + ZSTD_fillHashTable( + ms, + iend, + ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, + ZSTD_tableFillPurpose_e.ZSTD_tfp_forCCtx + ); break; case ZSTD_strategy.ZSTD_dfast: - ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, ZSTD_tableFillPurpose_e.ZSTD_tfp_forCCtx); + ZSTD_fillDoubleHashTable( + ms, + iend, + ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, + ZSTD_tableFillPurpose_e.ZSTD_tfp_forCCtx + ); break; case ZSTD_strategy.ZSTD_greedy: case ZSTD_strategy.ZSTD_lazy: @@ -304,7 +392,12 @@ private static nuint ZSTD_ldm_fillFastTables(ZSTD_MatchState_t* ms, void* end) return 0; } - private static void ZSTD_ldm_fillHashTable(ldmState_t* ldmState, byte* ip, byte* iend, ldmParams_t* @params) + private static void ZSTD_ldm_fillHashTable( + ldmState_t* ldmState, + byte* ip, + byte* iend, + ldmParams_t* @params + ) { uint minMatchLength = @params->minMatchLength; uint bucketSizeLog = @params->bucketSizeLog; @@ -349,11 +442,19 @@ private static void ZSTD_ldm_limitTableUpdate(ZSTD_MatchState_t* ms, byte* ancho uint curr = (uint)(anchor - ms->window.@base); if (curr > ms->nextToUpdate + 1024) { - ms->nextToUpdate = curr - (512 < curr - ms->nextToUpdate - 1024 ? 512 : curr - ms->nextToUpdate - 1024); + ms->nextToUpdate = + curr + - (512 < curr - ms->nextToUpdate - 1024 ? 512 : curr - ms->nextToUpdate - 1024); } } - private static nuint ZSTD_ldm_generateSequences_internal(ldmState_t* ldmState, RawSeqStore_t* rawSeqStore, ldmParams_t* @params, void* src, nuint srcSize) + private static nuint ZSTD_ldm_generateSequences_internal( + ldmState_t* ldmState, + RawSeqStore_t* rawSeqStore, + ldmParams_t* @params, + void* src, + nuint srcSize + ) { /* LDM parameters */ int extDict = (int)ZSTD_window_hasExtDict(ldmState->window); @@ -391,7 +492,13 @@ private static nuint ZSTD_ldm_generateSequences_internal(ldmState_t* ldmState, R nuint hashed; uint n; numSplits = 0; - hashed = ZSTD_ldm_gear_feed(&hashState, ip, (nuint)(ilimit - ip), splits, &numSplits); + hashed = ZSTD_ldm_gear_feed( + &hashState, + ip, + (nuint)(ilimit - ip), + splits, + &numSplits + ); for (n = 0; n < numSplits; n++) { byte* split = ip + splits[n] - minMatchLength; @@ -400,7 +507,11 @@ private static nuint ZSTD_ldm_generateSequences_internal(ldmState_t* ldmState, R candidates[n].split = split; candidates[n].hash = hash; candidates[n].checksum = (uint)(xxhash >> 32); - candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, @params->bucketSizeLog); + candidates[n].bucket = ZSTD_ldm_getBucket( + ldmState, + hash, + @params->bucketSizeLog + ); #if NETCOREAPP3_0_OR_GREATER if (System.Runtime.Intrinsics.X86.Sse.IsSupported) { @@ -411,7 +522,10 @@ private static nuint ZSTD_ldm_generateSequences_internal(ldmState_t* ldmState, R for (n = 0; n < numSplits; n++) { - nuint forwardMatchLength = 0, backwardMatchLength = 0, bestMatchLength = 0, mLength; + nuint forwardMatchLength = 0, + backwardMatchLength = 0, + bestMatchLength = 0, + mLength; uint offset; byte* split = candidates[n].split; uint checksum = candidates[n].checksum; @@ -430,7 +544,9 @@ private static nuint ZSTD_ldm_generateSequences_internal(ldmState_t* ldmState, R for (cur = bucket; cur < bucket + entsPerBucket; cur++) { - nuint curForwardMatchLength, curBackwardMatchLength, curTotalMatchLength; + nuint curForwardMatchLength, + curBackwardMatchLength, + curTotalMatchLength; if (cur->checksum != checksum || cur->offset <= lowestIndex) { continue; @@ -442,13 +558,26 @@ private static nuint ZSTD_ldm_generateSequences_internal(ldmState_t* ldmState, R byte* pMatch = curMatchBase + cur->offset; byte* matchEnd = cur->offset < dictLimit ? dictEnd : iend; byte* lowMatchPtr = cur->offset < dictLimit ? dictStart : lowPrefixPtr; - curForwardMatchLength = ZSTD_count_2segments(split, pMatch, iend, matchEnd, lowPrefixPtr); + curForwardMatchLength = ZSTD_count_2segments( + split, + pMatch, + iend, + matchEnd, + lowPrefixPtr + ); if (curForwardMatchLength < minMatchLength) { continue; } - curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch_2segments(split, anchor, pMatch, lowMatchPtr, dictStart, dictEnd); + curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch_2segments( + split, + anchor, + pMatch, + lowMatchPtr, + dictStart, + dictEnd + ); } else { @@ -459,7 +588,12 @@ private static nuint ZSTD_ldm_generateSequences_internal(ldmState_t* ldmState, R continue; } - curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch(split, anchor, pMatch, lowPrefixPtr); + curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch( + split, + anchor, + pMatch, + lowPrefixPtr + ); } curTotalMatchLength = curForwardMatchLength + curBackwardMatchLength; @@ -483,7 +617,9 @@ private static nuint ZSTD_ldm_generateSequences_internal(ldmState_t* ldmState, R { rawSeq* seq = rawSeqStore->seq + rawSeqStore->size; if (rawSeqStore->size == rawSeqStore->capacity) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) + ); seq->litLength = (uint)(split - backwardMatchLength - anchor); seq->matchLength = (uint)mLength; seq->offset = offset; @@ -534,16 +670,26 @@ private static void ZSTD_ldm_reduceTable(ldmEntry_t* table, uint size, uint redu * NOTE: This function returns an error if it runs out of space to store * sequences. */ - private static nuint ZSTD_ldm_generateSequences(ldmState_t* ldmState, RawSeqStore_t* sequences, ldmParams_t* @params, void* src, nuint srcSize) + private static nuint ZSTD_ldm_generateSequences( + ldmState_t* ldmState, + RawSeqStore_t* sequences, + ldmParams_t* @params, + void* src, + nuint srcSize + ) { uint maxDist = 1U << (int)@params->windowLog; byte* istart = (byte*)src; byte* iend = istart + srcSize; const nuint kMaxChunkSize = 1 << 20; - nuint nbChunks = srcSize / kMaxChunkSize + (nuint)(srcSize % kMaxChunkSize != 0 ? 1 : 0); + nuint nbChunks = + srcSize / kMaxChunkSize + (nuint)(srcSize % kMaxChunkSize != 0 ? 1 : 0); nuint chunk; nuint leftoverSize = 0; - assert(unchecked((uint)-1) - (MEM_64bits ? 3500U * (1 << 20) : 2000U * (1 << 20)) >= kMaxChunkSize); + assert( + unchecked((uint)-1) - (MEM_64bits ? 3500U * (1 << 20) : 2000U * (1 << 20)) + >= kMaxChunkSize + ); assert(ldmState->window.nextSrc >= (byte*)src + srcSize); assert(sequences->pos <= sequences->size); assert(sequences->size <= sequences->capacity); @@ -556,16 +702,42 @@ private static nuint ZSTD_ldm_generateSequences(ldmState_t* ldmState, RawSeqStor nuint newLeftoverSize; nuint prevSize = sequences->size; assert(chunkStart < iend); - if (ZSTD_window_needOverflowCorrection(ldmState->window, 0, maxDist, ldmState->loadedDictEnd, chunkStart, chunkEnd) != 0) + if ( + ZSTD_window_needOverflowCorrection( + ldmState->window, + 0, + maxDist, + ldmState->loadedDictEnd, + chunkStart, + chunkEnd + ) != 0 + ) { uint ldmHSize = 1U << (int)@params->hashLog; - uint correction = ZSTD_window_correctOverflow(&ldmState->window, 0, maxDist, chunkStart); + uint correction = ZSTD_window_correctOverflow( + &ldmState->window, + 0, + maxDist, + chunkStart + ); ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction); ldmState->loadedDictEnd = 0; } - ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, &ldmState->loadedDictEnd, null); - newLeftoverSize = ZSTD_ldm_generateSequences_internal(ldmState, sequences, @params, chunkStart, chunkSize); + ZSTD_window_enforceMaxDist( + &ldmState->window, + chunkEnd, + maxDist, + &ldmState->loadedDictEnd, + null + ); + newLeftoverSize = ZSTD_ldm_generateSequences_internal( + ldmState, + sequences, + @params, + chunkStart, + chunkSize + ); if (ERR_isError(newLeftoverSize)) return newLeftoverSize; if (prevSize < sequences->size) @@ -590,7 +762,11 @@ private static nuint ZSTD_ldm_generateSequences(ldmState_t* ldmState, RawSeqStor * Avoids emitting matches less than `minMatch` bytes. * Must be called for data that is not passed to ZSTD_ldm_blockCompress(). */ - private static void ZSTD_ldm_skipSequences(RawSeqStore_t* rawSeqStore, nuint srcSize, uint minMatch) + private static void ZSTD_ldm_skipSequences( + RawSeqStore_t* rawSeqStore, + nuint srcSize, + uint minMatch + ) { while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) { @@ -632,7 +808,11 @@ private static void ZSTD_ldm_skipSequences(RawSeqStore_t* rawSeqStore, nuint src * Returns the current sequence to handle, or if the rest of the block should * be literals, it returns a sequence with offset == 0. */ - private static rawSeq maybeSplitSequence(RawSeqStore_t* rawSeqStore, uint remaining, uint minMatch) + private static rawSeq maybeSplitSequence( + RawSeqStore_t* rawSeqStore, + uint remaining, + uint minMatch + ) { rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos]; assert(sequence.offset > 0); @@ -706,11 +886,23 @@ private static void ZSTD_ldm_skipRawSeqStoreBytes(RawSeqStore_t* rawSeqStore, nu * two. We handle that case correctly, and update `rawSeqStore` appropriately. * NOTE: This function does not return any errors. */ - private static nuint ZSTD_ldm_blockCompress(RawSeqStore_t* rawSeqStore, ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, ZSTD_paramSwitch_e useRowMatchFinder, void* src, nuint srcSize) + private static nuint ZSTD_ldm_blockCompress( + RawSeqStore_t* rawSeqStore, + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + ZSTD_paramSwitch_e useRowMatchFinder, + void* src, + nuint srcSize + ) { ZSTD_compressionParameters* cParams = &ms->cParams; uint minMatch = cParams->minMatch; - ZSTD_BlockCompressor_f blockCompressor = ZSTD_selectBlockCompressor(cParams->strategy, useRowMatchFinder, ZSTD_matchState_dictMode(ms)); + ZSTD_BlockCompressor_f blockCompressor = ZSTD_selectBlockCompressor( + cParams->strategy, + useRowMatchFinder, + ZSTD_matchState_dictMode(ms) + ); /* Input bounds */ byte* istart = (byte*)src; byte* iend = istart + srcSize; @@ -744,7 +936,14 @@ private static nuint ZSTD_ldm_blockCompress(RawSeqStore_t* rawSeqStore, ZSTD_Mat rep[i] = rep[i - 1]; rep[0] = sequence.offset; assert(sequence.offset > 0); - ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend, sequence.offset + 3, sequence.matchLength); + ZSTD_storeSeq( + seqStore, + newLitLength, + ip - newLitLength, + iend, + sequence.offset + 3, + sequence.matchLength + ); ip += sequence.matchLength; } } @@ -754,4 +953,4 @@ private static nuint ZSTD_ldm_blockCompress(RawSeqStore_t* rawSeqStore, ZSTD_Mat return blockCompressor(ms, seqStore, rep, ip, (nuint)(iend - ip)); } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLdmGeartab.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLdmGeartab.cs index 7027cb6f7..5cf4c7a43 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLdmGeartab.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLdmGeartab.cs @@ -1,275 +1,540 @@ -using static ZstdSharp.UnsafeHelper; using System; using System.Runtime.InteropServices; +using static ZstdSharp.UnsafeHelper; namespace ZstdSharp.Unsafe { public static unsafe partial class Methods { #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_ZSTD_ldm_gearTab => new ulong[256] - { - 0xf5b8f72c5f77775c, - 0x84935f266b7ac412, - 0xb647ada9ca730ccc, - 0xb065bb4b114fb1de, - 0x34584e7e8c3a9fd0, - 0x4e97e17c6ae26b05, - 0x3a03d743bc99a604, - 0xcecd042422c4044f, - 0x76de76c58524259e, - 0x9c8528f65badeaca, - 0x86563706e2097529, - 0x2902475fa375d889, - 0xafb32a9739a5ebe6, - 0xce2714da3883e639, - 0x21eaf821722e69e, - 0x37b628620b628, - 0x49a8d455d88caf5, - 0x8556d711e6958140, - 0x4f7ae74fc605c1f, - 0x829f0c3468bd3a20, - 0x4ffdc885c625179e, - 0x8473de048a3daf1b, - 0x51008822b05646b2, - 0x69d75d12b2d1cc5f, - 0x8c9d4a19159154bc, - 0xc3cc10f4abbd4003, - 0xd06ddc1cecb97391, - 0xbe48e6e7ed80302e, - 0x3481db31cee03547, - 0xacc3f67cdaa1d210, - 0x65cb771d8c7f96cc, - 0x8eb27177055723dd, - 0xc789950d44cd94be, - 0x934feadc3700b12b, - 0x5e485f11edbdf182, - 0x1e2e2a46fd64767a, - 0x2969ca71d82efa7c, - 0x9d46e9935ebbba2e, - 0xe056b67e05e6822b, - 0x94d73f55739d03a0, - 0xcd7010bdb69b5a03, - 0x455ef9fcd79b82f4, - 0x869cb54a8749c161, - 0x38d1a4fa6185d225, - 0xb475166f94bbe9bb, - 0xa4143548720959f1, - 0x7aed4780ba6b26ba, - 0xd0ce264439e02312, - 0x84366d746078d508, - 0xa8ce973c72ed17be, - 0x21c323a29a430b01, - 0x9962d617e3af80ee, - 0xab0ce91d9c8cf75b, - 0x530e8ee6d19a4dbc, - 0x2ef68c0cf53f5d72, - 0xc03a681640a85506, - 0x496e4e9f9c310967, - 0x78580472b59b14a0, - 0x273824c23b388577, - 0x66bf923ad45cb553, - 0x47ae1a5a2492ba86, - 0x35e304569e229659, - 0x4765182a46870b6f, - 0x6cbab625e9099412, - 0xddac9a2e598522c1, - 0x7172086e666624f2, - 0xdf5003ca503b7837, - 0x88c0c1db78563d09, - 0x58d51865acfc289d, - 0x177671aec65224f1, - 0xfb79d8a241e967d7, - 0x2be1e101cad9a49a, - 0x6625682f6e29186b, - 0x399553457ac06e50, - 0x35dffb4c23abb74, - 0x429db2591f54aade, - 0xc52802a8037d1009, - 0x6acb27381f0b25f3, - 0xf45e2551ee4f823b, - 0x8b0ea2d99580c2f7, - 0x3bed519cbcb4e1e1, - 0xff452823dbb010a, - 0x9d42ed614f3dd267, - 0x5b9313c06257c57b, - 0xa114b8008b5e1442, - 0xc1fe311c11c13d4b, - 0x66e8763ea34c5568, - 0x8b982af1c262f05d, - 0xee8876faaa75fbb7, - 0x8a62a4d0d172bb2a, - 0xc13d94a3b7449a97, - 0x6dbbba9dc15d037c, - 0xc786101f1d92e0f1, - 0xd78681a907a0b79b, - 0xf61aaf2962c9abb9, - 0x2cfd16fcd3cb7ad9, - 0x868c5b6744624d21, - 0x25e650899c74ddd7, - 0xba042af4a7c37463, - 0x4eb1a539465a3eca, - 0xbe09dbf03b05d5ca, - 0x774e5a362b5472ba, - 0x47a1221229d183cd, - 0x504b0ca18ef5a2df, - 0xdffbdfbde2456eb9, - 0x46cd2b2fbee34634, - 0xf2aef8fe819d98c3, - 0x357f5276d4599d61, - 0x24a5483879c453e3, - 0x88026889192b4b9, - 0x28da96671782dbec, - 0x4ef37c40588e9aaa, - 0x8837b90651bc9fb3, - 0xc164f741d3f0e5d6, - 0xbc135a0a704b70ba, - 0x69cd868f7622ada, - 0xbc37ba89e0b9c0ab, - 0x47c14a01323552f6, - 0x4f00794bacee98bb, - 0x7107de7d637a69d5, - 0x88af793bb6f2255e, - 0xf3c6466b8799b598, - 0xc288c616aa7f3b59, - 0x81ca63cf42fca3fd, - 0x88d85ace36a2674b, - 0xd056bd3792389e7, - 0xe55c396c4e9dd32d, - 0xbefb504571e6c0a6, - 0x96ab32115e91e8cc, - 0xbf8acb18de8f38d1, - 0x66dae58801672606, - 0x833b6017872317fb, - 0xb87c16f2d1c92864, - 0xdb766a74e58b669c, - 0x89659f85c61417be, - 0xc8daad856011ea0c, - 0x76a4b565b6fe7eae, - 0xa469d085f6237312, - 0xaaf0365683a3e96c, - 0x4dbb746f8424f7b8, - 0x638755af4e4acc1, - 0x3d7807f5bde64486, - 0x17be6d8f5bbb7639, - 0x903f0cd44dc35dc, - 0x67b672eafdf1196c, - 0xa676ff93ed4c82f1, - 0x521d1004c5053d9d, - 0x37ba9ad09ccc9202, - 0x84e54d297aacfb51, - 0xa0b4b776a143445, - 0x820d471e20b348e, - 0x1874383cb83d46dc, - 0x97edeec7a1efe11c, - 0xb330e50b1bdc42aa, - 0x1dd91955ce70e032, - 0xa514cdb88f2939d5, - 0x2791233fd90db9d3, - 0x7b670a4cc50f7a9b, - 0x77c07d2a05c6dfa5, - 0xe3778b6646d0a6fa, - 0xb39c8eda47b56749, - 0x933ed448addbef28, - 0xaf846af6ab7d0bf4, - 0xe5af208eb666e49, - 0x5e6622f73534cd6a, - 0x297daeca42ef5b6e, - 0x862daef3d35539a6, - 0xe68722498f8e1ea9, - 0x981c53093dc0d572, - 0xfa09b0bfbf86fbf5, - 0x30b1e96166219f15, - 0x70e7d466bdc4fb83, - 0x5a66736e35f2a8e9, - 0xcddb59d2b7c1baef, - 0xd6c7d247d26d8996, - 0xea4e39eac8de1ba3, - 0x539c8bb19fa3aff2, - 0x9f90e4c5fd508d8, - 0xa34e5956fbaf3385, - 0x2e2f8e151d3ef375, - 0x173691e9b83faec1, - 0xb85a8d56bf016379, - 0x8382381267408ae3, - 0xb90f901bbdc0096d, - 0x7c6ad32933bcec65, - 0x76bb5e2f2c8ad595, - 0x390f851a6cf46d28, - 0xc3e6064da1c2da72, - 0xc52a0c101cfa5389, - 0xd78eaf84a3fbc530, - 0x3781b9e2288b997e, - 0x73c2f6dea83d05c4, - 0x4228e364c5b5ed7, - 0x9d7a3edf0da43911, - 0x8edcfeda24686756, - 0x5e7667a7b7a9b3a1, - 0x4c4f389fa143791d, - 0xb08bc1023da7cddc, - 0x7ab4be3ae529b1cc, - 0x754e6132dbe74ff9, - 0x71635442a839df45, - 0x2f6fb1643fbe52de, - 0x961e0a42cf7a8177, - 0xf3b45d83d89ef2ea, - 0xee3de4cf4a6e3e9b, - 0xcd6848542c3295e7, - 0xe4cee1664c78662f, - 0x9947548b474c68c4, - 0x25d73777a5ed8b0b, - 0xc915b1d636b7fc, - 0x21c2ba75d9b0d2da, - 0x5f6b5dcf608a64a1, - 0xdcf333255ff9570c, - 0x633b922418ced4ee, - 0xc136dde0b004b34a, - 0x58cc83b05d4b2f5a, - 0x5eb424dda28e42d2, - 0x62df47369739cd98, - 0xb4e0b42485e4ce17, - 0x16e1f0c1f9a8d1e7, - 0x8ec3916707560ebf, - 0x62ba6e2df2cc9db3, - 0xcbf9f4ff77d83a16, - 0x78d9d7d07d2bbcc4, - 0xef554ce1e02c41f4, - 0x8d7581127eccf94d, - 0xa9b53336cb3c8a05, - 0x38c42c0bf45c4f91, - 0x640893cdf4488863, - 0x80ec34bc575ea568, - 0x39f324f5b48eaa40, - 0xe9d9ed1f8eff527f, - 0x9224fc058cc5a214, - 0xbaba00b04cfe7741, - 0x309a9f120fcf52af, - 0xa558f3ec65626212, - 0x424bec8b7adabe2f, - 0x41622513a6aea433, - 0xb88da2d5324ca798, - 0xd287733b245528a4, - 0x9a44697e6d68aec3, - 0x7b1093be2f49bb28, - 0x50bbec632e3d8aad, - 0x6cd90723e1ea8283, - 0x897b9e7431b02bf3, - 0x219efdcb338a7047, - 0x3b0311f0a27c0656, - 0xdb17bf91c0db96e7, - 0x8cd4fd6b4e85a5b2, - 0xfab071054ba6409d, - 0x40d6fe831fa9dfd9, - 0xaf358debad7d791e, - 0xeb8d0e25a65e3e58, - 0xbbcbd3df14e08580, - 0xcf751f27ecdab2b, - 0x2b4da14f2613d8f4 - }; - private static ulong* ZSTD_ldm_gearTab => (ulong*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_ZSTD_ldm_gearTab)); + private static ReadOnlySpan Span_ZSTD_ldm_gearTab => + new ulong[256] + { + 0xf5b8f72c5f77775c, + 0x84935f266b7ac412, + 0xb647ada9ca730ccc, + 0xb065bb4b114fb1de, + 0x34584e7e8c3a9fd0, + 0x4e97e17c6ae26b05, + 0x3a03d743bc99a604, + 0xcecd042422c4044f, + 0x76de76c58524259e, + 0x9c8528f65badeaca, + 0x86563706e2097529, + 0x2902475fa375d889, + 0xafb32a9739a5ebe6, + 0xce2714da3883e639, + 0x21eaf821722e69e, + 0x37b628620b628, + 0x49a8d455d88caf5, + 0x8556d711e6958140, + 0x4f7ae74fc605c1f, + 0x829f0c3468bd3a20, + 0x4ffdc885c625179e, + 0x8473de048a3daf1b, + 0x51008822b05646b2, + 0x69d75d12b2d1cc5f, + 0x8c9d4a19159154bc, + 0xc3cc10f4abbd4003, + 0xd06ddc1cecb97391, + 0xbe48e6e7ed80302e, + 0x3481db31cee03547, + 0xacc3f67cdaa1d210, + 0x65cb771d8c7f96cc, + 0x8eb27177055723dd, + 0xc789950d44cd94be, + 0x934feadc3700b12b, + 0x5e485f11edbdf182, + 0x1e2e2a46fd64767a, + 0x2969ca71d82efa7c, + 0x9d46e9935ebbba2e, + 0xe056b67e05e6822b, + 0x94d73f55739d03a0, + 0xcd7010bdb69b5a03, + 0x455ef9fcd79b82f4, + 0x869cb54a8749c161, + 0x38d1a4fa6185d225, + 0xb475166f94bbe9bb, + 0xa4143548720959f1, + 0x7aed4780ba6b26ba, + 0xd0ce264439e02312, + 0x84366d746078d508, + 0xa8ce973c72ed17be, + 0x21c323a29a430b01, + 0x9962d617e3af80ee, + 0xab0ce91d9c8cf75b, + 0x530e8ee6d19a4dbc, + 0x2ef68c0cf53f5d72, + 0xc03a681640a85506, + 0x496e4e9f9c310967, + 0x78580472b59b14a0, + 0x273824c23b388577, + 0x66bf923ad45cb553, + 0x47ae1a5a2492ba86, + 0x35e304569e229659, + 0x4765182a46870b6f, + 0x6cbab625e9099412, + 0xddac9a2e598522c1, + 0x7172086e666624f2, + 0xdf5003ca503b7837, + 0x88c0c1db78563d09, + 0x58d51865acfc289d, + 0x177671aec65224f1, + 0xfb79d8a241e967d7, + 0x2be1e101cad9a49a, + 0x6625682f6e29186b, + 0x399553457ac06e50, + 0x35dffb4c23abb74, + 0x429db2591f54aade, + 0xc52802a8037d1009, + 0x6acb27381f0b25f3, + 0xf45e2551ee4f823b, + 0x8b0ea2d99580c2f7, + 0x3bed519cbcb4e1e1, + 0xff452823dbb010a, + 0x9d42ed614f3dd267, + 0x5b9313c06257c57b, + 0xa114b8008b5e1442, + 0xc1fe311c11c13d4b, + 0x66e8763ea34c5568, + 0x8b982af1c262f05d, + 0xee8876faaa75fbb7, + 0x8a62a4d0d172bb2a, + 0xc13d94a3b7449a97, + 0x6dbbba9dc15d037c, + 0xc786101f1d92e0f1, + 0xd78681a907a0b79b, + 0xf61aaf2962c9abb9, + 0x2cfd16fcd3cb7ad9, + 0x868c5b6744624d21, + 0x25e650899c74ddd7, + 0xba042af4a7c37463, + 0x4eb1a539465a3eca, + 0xbe09dbf03b05d5ca, + 0x774e5a362b5472ba, + 0x47a1221229d183cd, + 0x504b0ca18ef5a2df, + 0xdffbdfbde2456eb9, + 0x46cd2b2fbee34634, + 0xf2aef8fe819d98c3, + 0x357f5276d4599d61, + 0x24a5483879c453e3, + 0x88026889192b4b9, + 0x28da96671782dbec, + 0x4ef37c40588e9aaa, + 0x8837b90651bc9fb3, + 0xc164f741d3f0e5d6, + 0xbc135a0a704b70ba, + 0x69cd868f7622ada, + 0xbc37ba89e0b9c0ab, + 0x47c14a01323552f6, + 0x4f00794bacee98bb, + 0x7107de7d637a69d5, + 0x88af793bb6f2255e, + 0xf3c6466b8799b598, + 0xc288c616aa7f3b59, + 0x81ca63cf42fca3fd, + 0x88d85ace36a2674b, + 0xd056bd3792389e7, + 0xe55c396c4e9dd32d, + 0xbefb504571e6c0a6, + 0x96ab32115e91e8cc, + 0xbf8acb18de8f38d1, + 0x66dae58801672606, + 0x833b6017872317fb, + 0xb87c16f2d1c92864, + 0xdb766a74e58b669c, + 0x89659f85c61417be, + 0xc8daad856011ea0c, + 0x76a4b565b6fe7eae, + 0xa469d085f6237312, + 0xaaf0365683a3e96c, + 0x4dbb746f8424f7b8, + 0x638755af4e4acc1, + 0x3d7807f5bde64486, + 0x17be6d8f5bbb7639, + 0x903f0cd44dc35dc, + 0x67b672eafdf1196c, + 0xa676ff93ed4c82f1, + 0x521d1004c5053d9d, + 0x37ba9ad09ccc9202, + 0x84e54d297aacfb51, + 0xa0b4b776a143445, + 0x820d471e20b348e, + 0x1874383cb83d46dc, + 0x97edeec7a1efe11c, + 0xb330e50b1bdc42aa, + 0x1dd91955ce70e032, + 0xa514cdb88f2939d5, + 0x2791233fd90db9d3, + 0x7b670a4cc50f7a9b, + 0x77c07d2a05c6dfa5, + 0xe3778b6646d0a6fa, + 0xb39c8eda47b56749, + 0x933ed448addbef28, + 0xaf846af6ab7d0bf4, + 0xe5af208eb666e49, + 0x5e6622f73534cd6a, + 0x297daeca42ef5b6e, + 0x862daef3d35539a6, + 0xe68722498f8e1ea9, + 0x981c53093dc0d572, + 0xfa09b0bfbf86fbf5, + 0x30b1e96166219f15, + 0x70e7d466bdc4fb83, + 0x5a66736e35f2a8e9, + 0xcddb59d2b7c1baef, + 0xd6c7d247d26d8996, + 0xea4e39eac8de1ba3, + 0x539c8bb19fa3aff2, + 0x9f90e4c5fd508d8, + 0xa34e5956fbaf3385, + 0x2e2f8e151d3ef375, + 0x173691e9b83faec1, + 0xb85a8d56bf016379, + 0x8382381267408ae3, + 0xb90f901bbdc0096d, + 0x7c6ad32933bcec65, + 0x76bb5e2f2c8ad595, + 0x390f851a6cf46d28, + 0xc3e6064da1c2da72, + 0xc52a0c101cfa5389, + 0xd78eaf84a3fbc530, + 0x3781b9e2288b997e, + 0x73c2f6dea83d05c4, + 0x4228e364c5b5ed7, + 0x9d7a3edf0da43911, + 0x8edcfeda24686756, + 0x5e7667a7b7a9b3a1, + 0x4c4f389fa143791d, + 0xb08bc1023da7cddc, + 0x7ab4be3ae529b1cc, + 0x754e6132dbe74ff9, + 0x71635442a839df45, + 0x2f6fb1643fbe52de, + 0x961e0a42cf7a8177, + 0xf3b45d83d89ef2ea, + 0xee3de4cf4a6e3e9b, + 0xcd6848542c3295e7, + 0xe4cee1664c78662f, + 0x9947548b474c68c4, + 0x25d73777a5ed8b0b, + 0xc915b1d636b7fc, + 0x21c2ba75d9b0d2da, + 0x5f6b5dcf608a64a1, + 0xdcf333255ff9570c, + 0x633b922418ced4ee, + 0xc136dde0b004b34a, + 0x58cc83b05d4b2f5a, + 0x5eb424dda28e42d2, + 0x62df47369739cd98, + 0xb4e0b42485e4ce17, + 0x16e1f0c1f9a8d1e7, + 0x8ec3916707560ebf, + 0x62ba6e2df2cc9db3, + 0xcbf9f4ff77d83a16, + 0x78d9d7d07d2bbcc4, + 0xef554ce1e02c41f4, + 0x8d7581127eccf94d, + 0xa9b53336cb3c8a05, + 0x38c42c0bf45c4f91, + 0x640893cdf4488863, + 0x80ec34bc575ea568, + 0x39f324f5b48eaa40, + 0xe9d9ed1f8eff527f, + 0x9224fc058cc5a214, + 0xbaba00b04cfe7741, + 0x309a9f120fcf52af, + 0xa558f3ec65626212, + 0x424bec8b7adabe2f, + 0x41622513a6aea433, + 0xb88da2d5324ca798, + 0xd287733b245528a4, + 0x9a44697e6d68aec3, + 0x7b1093be2f49bb28, + 0x50bbec632e3d8aad, + 0x6cd90723e1ea8283, + 0x897b9e7431b02bf3, + 0x219efdcb338a7047, + 0x3b0311f0a27c0656, + 0xdb17bf91c0db96e7, + 0x8cd4fd6b4e85a5b2, + 0xfab071054ba6409d, + 0x40d6fe831fa9dfd9, + 0xaf358debad7d791e, + 0xeb8d0e25a65e3e58, + 0xbbcbd3df14e08580, + 0xcf751f27ecdab2b, + 0x2b4da14f2613d8f4, + }; + private static ulong* ZSTD_ldm_gearTab => + (ulong*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_ZSTD_ldm_gearTab) + ); #else - private static readonly ulong* ZSTD_ldm_gearTab = GetArrayPointer(new ulong[256] { 0xf5b8f72c5f77775c, 0x84935f266b7ac412, 0xb647ada9ca730ccc, 0xb065bb4b114fb1de, 0x34584e7e8c3a9fd0, 0x4e97e17c6ae26b05, 0x3a03d743bc99a604, 0xcecd042422c4044f, 0x76de76c58524259e, 0x9c8528f65badeaca, 0x86563706e2097529, 0x2902475fa375d889, 0xafb32a9739a5ebe6, 0xce2714da3883e639, 0x21eaf821722e69e, 0x37b628620b628, 0x49a8d455d88caf5, 0x8556d711e6958140, 0x4f7ae74fc605c1f, 0x829f0c3468bd3a20, 0x4ffdc885c625179e, 0x8473de048a3daf1b, 0x51008822b05646b2, 0x69d75d12b2d1cc5f, 0x8c9d4a19159154bc, 0xc3cc10f4abbd4003, 0xd06ddc1cecb97391, 0xbe48e6e7ed80302e, 0x3481db31cee03547, 0xacc3f67cdaa1d210, 0x65cb771d8c7f96cc, 0x8eb27177055723dd, 0xc789950d44cd94be, 0x934feadc3700b12b, 0x5e485f11edbdf182, 0x1e2e2a46fd64767a, 0x2969ca71d82efa7c, 0x9d46e9935ebbba2e, 0xe056b67e05e6822b, 0x94d73f55739d03a0, 0xcd7010bdb69b5a03, 0x455ef9fcd79b82f4, 0x869cb54a8749c161, 0x38d1a4fa6185d225, 0xb475166f94bbe9bb, 0xa4143548720959f1, 0x7aed4780ba6b26ba, 0xd0ce264439e02312, 0x84366d746078d508, 0xa8ce973c72ed17be, 0x21c323a29a430b01, 0x9962d617e3af80ee, 0xab0ce91d9c8cf75b, 0x530e8ee6d19a4dbc, 0x2ef68c0cf53f5d72, 0xc03a681640a85506, 0x496e4e9f9c310967, 0x78580472b59b14a0, 0x273824c23b388577, 0x66bf923ad45cb553, 0x47ae1a5a2492ba86, 0x35e304569e229659, 0x4765182a46870b6f, 0x6cbab625e9099412, 0xddac9a2e598522c1, 0x7172086e666624f2, 0xdf5003ca503b7837, 0x88c0c1db78563d09, 0x58d51865acfc289d, 0x177671aec65224f1, 0xfb79d8a241e967d7, 0x2be1e101cad9a49a, 0x6625682f6e29186b, 0x399553457ac06e50, 0x35dffb4c23abb74, 0x429db2591f54aade, 0xc52802a8037d1009, 0x6acb27381f0b25f3, 0xf45e2551ee4f823b, 0x8b0ea2d99580c2f7, 0x3bed519cbcb4e1e1, 0xff452823dbb010a, 0x9d42ed614f3dd267, 0x5b9313c06257c57b, 0xa114b8008b5e1442, 0xc1fe311c11c13d4b, 0x66e8763ea34c5568, 0x8b982af1c262f05d, 0xee8876faaa75fbb7, 0x8a62a4d0d172bb2a, 0xc13d94a3b7449a97, 0x6dbbba9dc15d037c, 0xc786101f1d92e0f1, 0xd78681a907a0b79b, 0xf61aaf2962c9abb9, 0x2cfd16fcd3cb7ad9, 0x868c5b6744624d21, 0x25e650899c74ddd7, 0xba042af4a7c37463, 0x4eb1a539465a3eca, 0xbe09dbf03b05d5ca, 0x774e5a362b5472ba, 0x47a1221229d183cd, 0x504b0ca18ef5a2df, 0xdffbdfbde2456eb9, 0x46cd2b2fbee34634, 0xf2aef8fe819d98c3, 0x357f5276d4599d61, 0x24a5483879c453e3, 0x88026889192b4b9, 0x28da96671782dbec, 0x4ef37c40588e9aaa, 0x8837b90651bc9fb3, 0xc164f741d3f0e5d6, 0xbc135a0a704b70ba, 0x69cd868f7622ada, 0xbc37ba89e0b9c0ab, 0x47c14a01323552f6, 0x4f00794bacee98bb, 0x7107de7d637a69d5, 0x88af793bb6f2255e, 0xf3c6466b8799b598, 0xc288c616aa7f3b59, 0x81ca63cf42fca3fd, 0x88d85ace36a2674b, 0xd056bd3792389e7, 0xe55c396c4e9dd32d, 0xbefb504571e6c0a6, 0x96ab32115e91e8cc, 0xbf8acb18de8f38d1, 0x66dae58801672606, 0x833b6017872317fb, 0xb87c16f2d1c92864, 0xdb766a74e58b669c, 0x89659f85c61417be, 0xc8daad856011ea0c, 0x76a4b565b6fe7eae, 0xa469d085f6237312, 0xaaf0365683a3e96c, 0x4dbb746f8424f7b8, 0x638755af4e4acc1, 0x3d7807f5bde64486, 0x17be6d8f5bbb7639, 0x903f0cd44dc35dc, 0x67b672eafdf1196c, 0xa676ff93ed4c82f1, 0x521d1004c5053d9d, 0x37ba9ad09ccc9202, 0x84e54d297aacfb51, 0xa0b4b776a143445, 0x820d471e20b348e, 0x1874383cb83d46dc, 0x97edeec7a1efe11c, 0xb330e50b1bdc42aa, 0x1dd91955ce70e032, 0xa514cdb88f2939d5, 0x2791233fd90db9d3, 0x7b670a4cc50f7a9b, 0x77c07d2a05c6dfa5, 0xe3778b6646d0a6fa, 0xb39c8eda47b56749, 0x933ed448addbef28, 0xaf846af6ab7d0bf4, 0xe5af208eb666e49, 0x5e6622f73534cd6a, 0x297daeca42ef5b6e, 0x862daef3d35539a6, 0xe68722498f8e1ea9, 0x981c53093dc0d572, 0xfa09b0bfbf86fbf5, 0x30b1e96166219f15, 0x70e7d466bdc4fb83, 0x5a66736e35f2a8e9, 0xcddb59d2b7c1baef, 0xd6c7d247d26d8996, 0xea4e39eac8de1ba3, 0x539c8bb19fa3aff2, 0x9f90e4c5fd508d8, 0xa34e5956fbaf3385, 0x2e2f8e151d3ef375, 0x173691e9b83faec1, 0xb85a8d56bf016379, 0x8382381267408ae3, 0xb90f901bbdc0096d, 0x7c6ad32933bcec65, 0x76bb5e2f2c8ad595, 0x390f851a6cf46d28, 0xc3e6064da1c2da72, 0xc52a0c101cfa5389, 0xd78eaf84a3fbc530, 0x3781b9e2288b997e, 0x73c2f6dea83d05c4, 0x4228e364c5b5ed7, 0x9d7a3edf0da43911, 0x8edcfeda24686756, 0x5e7667a7b7a9b3a1, 0x4c4f389fa143791d, 0xb08bc1023da7cddc, 0x7ab4be3ae529b1cc, 0x754e6132dbe74ff9, 0x71635442a839df45, 0x2f6fb1643fbe52de, 0x961e0a42cf7a8177, 0xf3b45d83d89ef2ea, 0xee3de4cf4a6e3e9b, 0xcd6848542c3295e7, 0xe4cee1664c78662f, 0x9947548b474c68c4, 0x25d73777a5ed8b0b, 0xc915b1d636b7fc, 0x21c2ba75d9b0d2da, 0x5f6b5dcf608a64a1, 0xdcf333255ff9570c, 0x633b922418ced4ee, 0xc136dde0b004b34a, 0x58cc83b05d4b2f5a, 0x5eb424dda28e42d2, 0x62df47369739cd98, 0xb4e0b42485e4ce17, 0x16e1f0c1f9a8d1e7, 0x8ec3916707560ebf, 0x62ba6e2df2cc9db3, 0xcbf9f4ff77d83a16, 0x78d9d7d07d2bbcc4, 0xef554ce1e02c41f4, 0x8d7581127eccf94d, 0xa9b53336cb3c8a05, 0x38c42c0bf45c4f91, 0x640893cdf4488863, 0x80ec34bc575ea568, 0x39f324f5b48eaa40, 0xe9d9ed1f8eff527f, 0x9224fc058cc5a214, 0xbaba00b04cfe7741, 0x309a9f120fcf52af, 0xa558f3ec65626212, 0x424bec8b7adabe2f, 0x41622513a6aea433, 0xb88da2d5324ca798, 0xd287733b245528a4, 0x9a44697e6d68aec3, 0x7b1093be2f49bb28, 0x50bbec632e3d8aad, 0x6cd90723e1ea8283, 0x897b9e7431b02bf3, 0x219efdcb338a7047, 0x3b0311f0a27c0656, 0xdb17bf91c0db96e7, 0x8cd4fd6b4e85a5b2, 0xfab071054ba6409d, 0x40d6fe831fa9dfd9, 0xaf358debad7d791e, 0xeb8d0e25a65e3e58, 0xbbcbd3df14e08580, 0xcf751f27ecdab2b, 0x2b4da14f2613d8f4 }); + private static readonly ulong* ZSTD_ldm_gearTab = GetArrayPointer( + new ulong[256] + { + 0xf5b8f72c5f77775c, + 0x84935f266b7ac412, + 0xb647ada9ca730ccc, + 0xb065bb4b114fb1de, + 0x34584e7e8c3a9fd0, + 0x4e97e17c6ae26b05, + 0x3a03d743bc99a604, + 0xcecd042422c4044f, + 0x76de76c58524259e, + 0x9c8528f65badeaca, + 0x86563706e2097529, + 0x2902475fa375d889, + 0xafb32a9739a5ebe6, + 0xce2714da3883e639, + 0x21eaf821722e69e, + 0x37b628620b628, + 0x49a8d455d88caf5, + 0x8556d711e6958140, + 0x4f7ae74fc605c1f, + 0x829f0c3468bd3a20, + 0x4ffdc885c625179e, + 0x8473de048a3daf1b, + 0x51008822b05646b2, + 0x69d75d12b2d1cc5f, + 0x8c9d4a19159154bc, + 0xc3cc10f4abbd4003, + 0xd06ddc1cecb97391, + 0xbe48e6e7ed80302e, + 0x3481db31cee03547, + 0xacc3f67cdaa1d210, + 0x65cb771d8c7f96cc, + 0x8eb27177055723dd, + 0xc789950d44cd94be, + 0x934feadc3700b12b, + 0x5e485f11edbdf182, + 0x1e2e2a46fd64767a, + 0x2969ca71d82efa7c, + 0x9d46e9935ebbba2e, + 0xe056b67e05e6822b, + 0x94d73f55739d03a0, + 0xcd7010bdb69b5a03, + 0x455ef9fcd79b82f4, + 0x869cb54a8749c161, + 0x38d1a4fa6185d225, + 0xb475166f94bbe9bb, + 0xa4143548720959f1, + 0x7aed4780ba6b26ba, + 0xd0ce264439e02312, + 0x84366d746078d508, + 0xa8ce973c72ed17be, + 0x21c323a29a430b01, + 0x9962d617e3af80ee, + 0xab0ce91d9c8cf75b, + 0x530e8ee6d19a4dbc, + 0x2ef68c0cf53f5d72, + 0xc03a681640a85506, + 0x496e4e9f9c310967, + 0x78580472b59b14a0, + 0x273824c23b388577, + 0x66bf923ad45cb553, + 0x47ae1a5a2492ba86, + 0x35e304569e229659, + 0x4765182a46870b6f, + 0x6cbab625e9099412, + 0xddac9a2e598522c1, + 0x7172086e666624f2, + 0xdf5003ca503b7837, + 0x88c0c1db78563d09, + 0x58d51865acfc289d, + 0x177671aec65224f1, + 0xfb79d8a241e967d7, + 0x2be1e101cad9a49a, + 0x6625682f6e29186b, + 0x399553457ac06e50, + 0x35dffb4c23abb74, + 0x429db2591f54aade, + 0xc52802a8037d1009, + 0x6acb27381f0b25f3, + 0xf45e2551ee4f823b, + 0x8b0ea2d99580c2f7, + 0x3bed519cbcb4e1e1, + 0xff452823dbb010a, + 0x9d42ed614f3dd267, + 0x5b9313c06257c57b, + 0xa114b8008b5e1442, + 0xc1fe311c11c13d4b, + 0x66e8763ea34c5568, + 0x8b982af1c262f05d, + 0xee8876faaa75fbb7, + 0x8a62a4d0d172bb2a, + 0xc13d94a3b7449a97, + 0x6dbbba9dc15d037c, + 0xc786101f1d92e0f1, + 0xd78681a907a0b79b, + 0xf61aaf2962c9abb9, + 0x2cfd16fcd3cb7ad9, + 0x868c5b6744624d21, + 0x25e650899c74ddd7, + 0xba042af4a7c37463, + 0x4eb1a539465a3eca, + 0xbe09dbf03b05d5ca, + 0x774e5a362b5472ba, + 0x47a1221229d183cd, + 0x504b0ca18ef5a2df, + 0xdffbdfbde2456eb9, + 0x46cd2b2fbee34634, + 0xf2aef8fe819d98c3, + 0x357f5276d4599d61, + 0x24a5483879c453e3, + 0x88026889192b4b9, + 0x28da96671782dbec, + 0x4ef37c40588e9aaa, + 0x8837b90651bc9fb3, + 0xc164f741d3f0e5d6, + 0xbc135a0a704b70ba, + 0x69cd868f7622ada, + 0xbc37ba89e0b9c0ab, + 0x47c14a01323552f6, + 0x4f00794bacee98bb, + 0x7107de7d637a69d5, + 0x88af793bb6f2255e, + 0xf3c6466b8799b598, + 0xc288c616aa7f3b59, + 0x81ca63cf42fca3fd, + 0x88d85ace36a2674b, + 0xd056bd3792389e7, + 0xe55c396c4e9dd32d, + 0xbefb504571e6c0a6, + 0x96ab32115e91e8cc, + 0xbf8acb18de8f38d1, + 0x66dae58801672606, + 0x833b6017872317fb, + 0xb87c16f2d1c92864, + 0xdb766a74e58b669c, + 0x89659f85c61417be, + 0xc8daad856011ea0c, + 0x76a4b565b6fe7eae, + 0xa469d085f6237312, + 0xaaf0365683a3e96c, + 0x4dbb746f8424f7b8, + 0x638755af4e4acc1, + 0x3d7807f5bde64486, + 0x17be6d8f5bbb7639, + 0x903f0cd44dc35dc, + 0x67b672eafdf1196c, + 0xa676ff93ed4c82f1, + 0x521d1004c5053d9d, + 0x37ba9ad09ccc9202, + 0x84e54d297aacfb51, + 0xa0b4b776a143445, + 0x820d471e20b348e, + 0x1874383cb83d46dc, + 0x97edeec7a1efe11c, + 0xb330e50b1bdc42aa, + 0x1dd91955ce70e032, + 0xa514cdb88f2939d5, + 0x2791233fd90db9d3, + 0x7b670a4cc50f7a9b, + 0x77c07d2a05c6dfa5, + 0xe3778b6646d0a6fa, + 0xb39c8eda47b56749, + 0x933ed448addbef28, + 0xaf846af6ab7d0bf4, + 0xe5af208eb666e49, + 0x5e6622f73534cd6a, + 0x297daeca42ef5b6e, + 0x862daef3d35539a6, + 0xe68722498f8e1ea9, + 0x981c53093dc0d572, + 0xfa09b0bfbf86fbf5, + 0x30b1e96166219f15, + 0x70e7d466bdc4fb83, + 0x5a66736e35f2a8e9, + 0xcddb59d2b7c1baef, + 0xd6c7d247d26d8996, + 0xea4e39eac8de1ba3, + 0x539c8bb19fa3aff2, + 0x9f90e4c5fd508d8, + 0xa34e5956fbaf3385, + 0x2e2f8e151d3ef375, + 0x173691e9b83faec1, + 0xb85a8d56bf016379, + 0x8382381267408ae3, + 0xb90f901bbdc0096d, + 0x7c6ad32933bcec65, + 0x76bb5e2f2c8ad595, + 0x390f851a6cf46d28, + 0xc3e6064da1c2da72, + 0xc52a0c101cfa5389, + 0xd78eaf84a3fbc530, + 0x3781b9e2288b997e, + 0x73c2f6dea83d05c4, + 0x4228e364c5b5ed7, + 0x9d7a3edf0da43911, + 0x8edcfeda24686756, + 0x5e7667a7b7a9b3a1, + 0x4c4f389fa143791d, + 0xb08bc1023da7cddc, + 0x7ab4be3ae529b1cc, + 0x754e6132dbe74ff9, + 0x71635442a839df45, + 0x2f6fb1643fbe52de, + 0x961e0a42cf7a8177, + 0xf3b45d83d89ef2ea, + 0xee3de4cf4a6e3e9b, + 0xcd6848542c3295e7, + 0xe4cee1664c78662f, + 0x9947548b474c68c4, + 0x25d73777a5ed8b0b, + 0xc915b1d636b7fc, + 0x21c2ba75d9b0d2da, + 0x5f6b5dcf608a64a1, + 0xdcf333255ff9570c, + 0x633b922418ced4ee, + 0xc136dde0b004b34a, + 0x58cc83b05d4b2f5a, + 0x5eb424dda28e42d2, + 0x62df47369739cd98, + 0xb4e0b42485e4ce17, + 0x16e1f0c1f9a8d1e7, + 0x8ec3916707560ebf, + 0x62ba6e2df2cc9db3, + 0xcbf9f4ff77d83a16, + 0x78d9d7d07d2bbcc4, + 0xef554ce1e02c41f4, + 0x8d7581127eccf94d, + 0xa9b53336cb3c8a05, + 0x38c42c0bf45c4f91, + 0x640893cdf4488863, + 0x80ec34bc575ea568, + 0x39f324f5b48eaa40, + 0xe9d9ed1f8eff527f, + 0x9224fc058cc5a214, + 0xbaba00b04cfe7741, + 0x309a9f120fcf52af, + 0xa558f3ec65626212, + 0x424bec8b7adabe2f, + 0x41622513a6aea433, + 0xb88da2d5324ca798, + 0xd287733b245528a4, + 0x9a44697e6d68aec3, + 0x7b1093be2f49bb28, + 0x50bbec632e3d8aad, + 0x6cd90723e1ea8283, + 0x897b9e7431b02bf3, + 0x219efdcb338a7047, + 0x3b0311f0a27c0656, + 0xdb17bf91c0db96e7, + 0x8cd4fd6b4e85a5b2, + 0xfab071054ba6409d, + 0x40d6fe831fa9dfd9, + 0xaf358debad7d791e, + 0xeb8d0e25a65e3e58, + 0xbbcbd3df14e08580, + 0xcf751f27ecdab2b, + 0x2b4da14f2613d8f4, + } + ); #endif } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdOpt.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdOpt.cs index c001858d8..d5ebe36ad 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdOpt.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdOpt.cs @@ -1,7 +1,7 @@ -using System.Runtime.CompilerServices; -using static ZstdSharp.UnsafeHelper; using System; +using System.Runtime.CompilerServices; using System.Runtime.InteropServices; +using static ZstdSharp.UnsafeHelper; namespace ZstdSharp.Unsafe { @@ -41,10 +41,22 @@ private static int ZSTD_compressedLiterals(optState_t* optPtr) private static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel) { if (ZSTD_compressedLiterals(optPtr) != 0) - optPtr->litSumBasePrice = optLevel != 0 ? ZSTD_fracWeight(optPtr->litSum) : ZSTD_bitWeight(optPtr->litSum); - optPtr->litLengthSumBasePrice = optLevel != 0 ? ZSTD_fracWeight(optPtr->litLengthSum) : ZSTD_bitWeight(optPtr->litLengthSum); - optPtr->matchLengthSumBasePrice = optLevel != 0 ? ZSTD_fracWeight(optPtr->matchLengthSum) : ZSTD_bitWeight(optPtr->matchLengthSum); - optPtr->offCodeSumBasePrice = optLevel != 0 ? ZSTD_fracWeight(optPtr->offCodeSum) : ZSTD_bitWeight(optPtr->offCodeSum); + optPtr->litSumBasePrice = + optLevel != 0 + ? ZSTD_fracWeight(optPtr->litSum) + : ZSTD_bitWeight(optPtr->litSum); + optPtr->litLengthSumBasePrice = + optLevel != 0 + ? ZSTD_fracWeight(optPtr->litLengthSum) + : ZSTD_bitWeight(optPtr->litLengthSum); + optPtr->matchLengthSumBasePrice = + optLevel != 0 + ? ZSTD_fracWeight(optPtr->matchLengthSum) + : ZSTD_bitWeight(optPtr->matchLengthSum); + optPtr->offCodeSumBasePrice = + optLevel != 0 + ? ZSTD_fracWeight(optPtr->offCodeSum) + : ZSTD_bitWeight(optPtr->offCodeSum); } private static uint sum_u32(uint* table, nuint nbElts) @@ -59,13 +71,23 @@ private static uint sum_u32(uint* table, nuint nbElts) return total; } - private static uint ZSTD_downscaleStats(uint* table, uint lastEltIndex, uint shift, base_directive_e base1) + private static uint ZSTD_downscaleStats( + uint* table, + uint lastEltIndex, + uint shift, + base_directive_e base1 + ) { - uint s, sum = 0; + uint s, + sum = 0; assert(shift < 30); for (s = 0; s < lastEltIndex + 1; s++) { - uint @base = (uint)(base1 != default ? 1 : table[s] > 0 ? 1 : 0); + uint @base = (uint)( + base1 != default ? 1 + : table[s] > 0 ? 1 + : 0 + ); uint newStat = @base + (table[s] >> (int)shift); sum += newStat; table[s] = newStat; @@ -84,94 +106,185 @@ private static uint ZSTD_scaleStats(uint* table, uint lastEltIndex, uint logTarg assert(logTarget < 30); if (factor <= 1) return prevsum; - return ZSTD_downscaleStats(table, lastEltIndex, ZSTD_highbit32(factor), base_directive_e.base_1guaranteed); + return ZSTD_downscaleStats( + table, + lastEltIndex, + ZSTD_highbit32(factor), + base_directive_e.base_1guaranteed + ); } #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_baseLLfreqs => new uint[36] - { - 4, - 2, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1 - }; - private static uint* baseLLfreqs => (uint*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_baseLLfreqs)); + private static ReadOnlySpan Span_baseLLfreqs => + new uint[36] + { + 4, + 2, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + }; + private static uint* baseLLfreqs => + (uint*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_baseLLfreqs) + ); #else - private static readonly uint* baseLLfreqs = GetArrayPointer(new uint[36] { 4, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }); + private static readonly uint* baseLLfreqs = GetArrayPointer( + new uint[36] + { + 4, + 2, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + } + ); #endif #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_baseOFCfreqs => new uint[32] - { - 6, - 2, - 1, - 1, - 2, - 3, - 4, - 4, - 4, - 3, - 2, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1 - }; - private static uint* baseOFCfreqs => (uint*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_baseOFCfreqs)); + private static ReadOnlySpan Span_baseOFCfreqs => + new uint[32] + { + 6, + 2, + 1, + 1, + 2, + 3, + 4, + 4, + 4, + 3, + 2, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + }; + private static uint* baseOFCfreqs => + (uint*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_baseOFCfreqs) + ); #else - private static readonly uint* baseOFCfreqs = GetArrayPointer(new uint[32] { 6, 2, 1, 1, 2, 3, 4, 4, 4, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }); + private static readonly uint* baseOFCfreqs = GetArrayPointer( + new uint[32] + { + 6, + 2, + 1, + 1, + 2, + 3, + 4, + 4, + 4, + 3, + 2, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + } + ); #endif /* ZSTD_rescaleFreqs() : * if first block (detected by optPtr->litLengthSum == 0) : init statistics @@ -180,7 +293,12 @@ private static uint ZSTD_scaleStats(uint* table, uint lastEltIndex, uint logTarg * using src for literals stats, and baseline stats for sequence symbols * otherwise downscale existing stats, to be used as seed for next block. */ - private static void ZSTD_rescaleFreqs(optState_t* optPtr, byte* src, nuint srcSize, int optLevel) + private static void ZSTD_rescaleFreqs( + optState_t* optPtr, + byte* src, + nuint srcSize, + int optLevel + ) { int compressedLiterals = ZSTD_compressedLiterals(optPtr); optPtr->priceType = ZSTD_OptPrice_e.zop_dynamic; @@ -205,9 +323,14 @@ private static void ZSTD_rescaleFreqs(optState_t* optPtr, byte* src, nuint srcSi { /* scale to 2K */ const uint scaleLog = 11; - uint bitCost = HUF_getNbBitsFromCTable(&optPtr->symbolCosts->huf.CTable.e0, lit); + uint bitCost = HUF_getNbBitsFromCTable( + &optPtr->symbolCosts->huf.CTable.e0, + lit + ); assert(bitCost <= scaleLog); - optPtr->litFreq[lit] = (uint)(bitCost != 0 ? 1 << (int)(scaleLog - bitCost) : 1); + optPtr->litFreq[lit] = (uint)( + bitCost != 0 ? 1 << (int)(scaleLog - bitCost) : 1 + ); optPtr->litSum += optPtr->litFreq[lit]; } } @@ -223,7 +346,9 @@ private static void ZSTD_rescaleFreqs(optState_t* optPtr, byte* src, nuint srcSi const uint scaleLog = 10; uint bitCost = FSE_getMaxNbBits(llstate.symbolTT, ll); assert(bitCost < scaleLog); - optPtr->litLengthFreq[ll] = (uint)(bitCost != 0 ? 1 << (int)(scaleLog - bitCost) : 1); + optPtr->litLengthFreq[ll] = (uint)( + bitCost != 0 ? 1 << (int)(scaleLog - bitCost) : 1 + ); optPtr->litLengthSum += optPtr->litLengthFreq[ll]; } } @@ -238,7 +363,9 @@ private static void ZSTD_rescaleFreqs(optState_t* optPtr, byte* src, nuint srcSi const uint scaleLog = 10; uint bitCost = FSE_getMaxNbBits(mlstate.symbolTT, ml); assert(bitCost < scaleLog); - optPtr->matchLengthFreq[ml] = (uint)(bitCost != 0 ? 1 << (int)(scaleLog - bitCost) : 1); + optPtr->matchLengthFreq[ml] = (uint)( + bitCost != 0 ? 1 << (int)(scaleLog - bitCost) : 1 + ); optPtr->matchLengthSum += optPtr->matchLengthFreq[ml]; } } @@ -253,7 +380,9 @@ private static void ZSTD_rescaleFreqs(optState_t* optPtr, byte* src, nuint srcSi const uint scaleLog = 10; uint bitCost = FSE_getMaxNbBits(ofstate.symbolTT, of); assert(bitCost < scaleLog); - optPtr->offCodeFreq[of] = (uint)(bitCost != 0 ? 1 << (int)(scaleLog - bitCost) : 1); + optPtr->offCodeFreq[of] = (uint)( + bitCost != 0 ? 1 << (int)(scaleLog - bitCost) : 1 + ); optPtr->offCodeSum += optPtr->offCodeFreq[of]; } } @@ -266,7 +395,12 @@ private static void ZSTD_rescaleFreqs(optState_t* optPtr, byte* src, nuint srcSi /* base initial cost of literals on direct frequency within src */ uint lit = (1 << 8) - 1; HIST_count_simple(optPtr->litFreq, &lit, src, srcSize); - optPtr->litSum = ZSTD_downscaleStats(optPtr->litFreq, (1 << 8) - 1, 8, base_directive_e.base_0possible); + optPtr->litSum = ZSTD_downscaleStats( + optPtr->litFreq, + (1 << 8) - 1, + 8, + base_directive_e.base_0possible + ); } { @@ -302,7 +436,12 @@ private static void ZSTD_rescaleFreqs(optState_t* optPtr, byte* src, nuint srcSi /* ZSTD_rawLiteralsCost() : * price of literals (only) in specified segment (which length can be 0). * does not include price of literalLength symbol */ - private static uint ZSTD_rawLiteralsCost(byte* literals, uint litLength, optState_t* optPtr, int optLevel) + private static uint ZSTD_rawLiteralsCost( + byte* literals, + uint litLength, + optState_t* optPtr, + int optLevel + ) { if (litLength == 0) return 0; @@ -317,7 +456,10 @@ private static uint ZSTD_rawLiteralsCost(byte* literals, uint litLength, optStat assert(optPtr->litSumBasePrice >= 1 << 8); for (u = 0; u < litLength; u++) { - uint litPrice = optLevel != 0 ? ZSTD_fracWeight(optPtr->litFreq[literals[u]]) : ZSTD_bitWeight(optPtr->litFreq[literals[u]]); + uint litPrice = + optLevel != 0 + ? ZSTD_fracWeight(optPtr->litFreq[literals[u]]) + : ZSTD_bitWeight(optPtr->litFreq[literals[u]]); if (litPrice > litPriceMax) litPrice = litPriceMax; price -= litPrice; @@ -338,7 +480,13 @@ private static uint ZSTD_litLengthPrice(uint litLength, optState_t* optPtr, int return (1 << 8) + ZSTD_litLengthPrice((1 << 17) - 1, optPtr, optLevel); { uint llCode = ZSTD_LLcode(litLength); - return (uint)(LL_bits[llCode] * (1 << 8)) + optPtr->litLengthSumBasePrice - (optLevel != 0 ? ZSTD_fracWeight(optPtr->litLengthFreq[llCode]) : ZSTD_bitWeight(optPtr->litLengthFreq[llCode])); + return (uint)(LL_bits[llCode] * (1 << 8)) + + optPtr->litLengthSumBasePrice + - ( + optLevel != 0 + ? ZSTD_fracWeight(optPtr->litLengthFreq[llCode]) + : ZSTD_bitWeight(optPtr->litLengthFreq[llCode]) + ); } } @@ -349,20 +497,44 @@ private static uint ZSTD_litLengthPrice(uint litLength, optState_t* optPtr, int * @optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_getMatchPrice(uint offBase, uint matchLength, optState_t* optPtr, int optLevel) + private static uint ZSTD_getMatchPrice( + uint offBase, + uint matchLength, + optState_t* optPtr, + int optLevel + ) { uint price; uint offCode = ZSTD_highbit32(offBase); uint mlBase = matchLength - 3; assert(matchLength >= 3); if (optPtr->priceType == ZSTD_OptPrice_e.zop_predef) - return (optLevel != 0 ? ZSTD_fracWeight(mlBase) : ZSTD_bitWeight(mlBase)) + (16 + offCode) * (1 << 8); - price = offCode * (1 << 8) + (optPtr->offCodeSumBasePrice - (optLevel != 0 ? ZSTD_fracWeight(optPtr->offCodeFreq[offCode]) : ZSTD_bitWeight(optPtr->offCodeFreq[offCode]))); + return (optLevel != 0 ? ZSTD_fracWeight(mlBase) : ZSTD_bitWeight(mlBase)) + + (16 + offCode) * (1 << 8); + price = + offCode * (1 << 8) + + ( + optPtr->offCodeSumBasePrice + - ( + optLevel != 0 + ? ZSTD_fracWeight(optPtr->offCodeFreq[offCode]) + : ZSTD_bitWeight(optPtr->offCodeFreq[offCode]) + ) + ); if (optLevel < 2 && offCode >= 20) price += (offCode - 19) * 2 * (1 << 8); { uint mlCode = ZSTD_MLcode(mlBase); - price += (uint)(ML_bits[mlCode] * (1 << 8)) + (optPtr->matchLengthSumBasePrice - (optLevel != 0 ? ZSTD_fracWeight(optPtr->matchLengthFreq[mlCode]) : ZSTD_bitWeight(optPtr->matchLengthFreq[mlCode]))); + price += + (uint)(ML_bits[mlCode] * (1 << 8)) + + ( + optPtr->matchLengthSumBasePrice + - ( + optLevel != 0 + ? ZSTD_fracWeight(optPtr->matchLengthFreq[mlCode]) + : ZSTD_bitWeight(optPtr->matchLengthFreq[mlCode]) + ) + ); } price += (1 << 8) / 5; @@ -371,7 +543,13 @@ private static uint ZSTD_getMatchPrice(uint offBase, uint matchLength, optState_ /* ZSTD_updateStats() : * assumption : literals + litLength <= iend */ - private static void ZSTD_updateStats(optState_t* optPtr, uint litLength, byte* literals, uint offBase, uint matchLength) + private static void ZSTD_updateStats( + optState_t* optPtr, + uint litLength, + byte* literals, + uint offBase, + uint matchLength + ) { if (ZSTD_compressedLiterals(optPtr) != 0) { @@ -423,7 +601,11 @@ private static uint ZSTD_readMINMATCH(void* memPtr, uint length) /* Update hashTable3 up to ip (excluded) Assumption : always within prefix (i.e. not within extDict) */ - private static uint ZSTD_insertAndFindFirstIndexHash3(ZSTD_MatchState_t* ms, uint* nextToUpdate3, byte* ip) + private static uint ZSTD_insertAndFindFirstIndexHash3( + ZSTD_MatchState_t* ms, + uint* nextToUpdate3, + byte* ip + ) { uint* hashTable3 = ms->hashTable3; uint hashLog3 = ms->hashLog3; @@ -449,7 +631,14 @@ private static uint ZSTD_insertAndFindFirstIndexHash3(ZSTD_MatchState_t* ms, uin * @param ip assumed <= iend-8 . * @param target The target of ZSTD_updateTree_internal() - we are filling to this position * @return : nb of positions added */ - private static uint ZSTD_insertBt1(ZSTD_MatchState_t* ms, byte* ip, byte* iend, uint target, uint mls, int extDict) + private static uint ZSTD_insertBt1( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iend, + uint target, + uint mls, + int extDict + ) { ZSTD_compressionParameters* cParams = &ms->cParams; uint* hashTable = ms->hashTable; @@ -459,7 +648,8 @@ private static uint ZSTD_insertBt1(ZSTD_MatchState_t* ms, byte* ip, byte* iend, uint btLog = cParams->chainLog - 1; uint btMask = (uint)((1 << (int)btLog) - 1); uint matchIndex = hashTable[h]; - nuint commonLengthSmaller = 0, commonLengthLarger = 0; + nuint commonLengthSmaller = 0, + commonLengthLarger = 0; byte* @base = ms->window.@base; byte* dictBase = ms->window.dictBase; uint dictLimit = ms->window.dictLimit; @@ -487,7 +677,10 @@ private static uint ZSTD_insertBt1(ZSTD_MatchState_t* ms, byte* ip, byte* iend, { uint* nextPtr = bt + 2 * (matchIndex & btMask); /* guaranteed minimum nb of common bytes */ - nuint matchLength = commonLengthSmaller < commonLengthLarger ? commonLengthSmaller : commonLengthLarger; + nuint matchLength = + commonLengthSmaller < commonLengthLarger + ? commonLengthSmaller + : commonLengthLarger; assert(matchIndex < curr); if (extDict == 0 || matchIndex + matchLength >= dictLimit) { @@ -498,7 +691,13 @@ private static uint ZSTD_insertBt1(ZSTD_MatchState_t* ms, byte* ip, byte* iend, else { match = dictBase + matchIndex; - matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iend, dictEnd, prefixStart); + matchLength += ZSTD_count_2segments( + ip + matchLength, + match + matchLength, + iend, + dictEnd, + prefixStart + ); if (matchIndex + matchLength >= dictLimit) match = @base + matchIndex; } @@ -554,14 +753,27 @@ private static uint ZSTD_insertBt1(ZSTD_MatchState_t* ms, byte* ip, byte* iend, } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_updateTree_internal(ZSTD_MatchState_t* ms, byte* ip, byte* iend, uint mls, ZSTD_dictMode_e dictMode) + private static void ZSTD_updateTree_internal( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iend, + uint mls, + ZSTD_dictMode_e dictMode + ) { byte* @base = ms->window.@base; uint target = (uint)(ip - @base); uint idx = ms->nextToUpdate; while (idx < target) { - uint forward = ZSTD_insertBt1(ms, @base + idx, iend, target, mls, dictMode == ZSTD_dictMode_e.ZSTD_extDict ? 1 : 0); + uint forward = ZSTD_insertBt1( + ms, + @base + idx, + iend, + target, + mls, + dictMode == ZSTD_dictMode_e.ZSTD_extDict ? 1 : 0 + ); assert(idx < idx + forward); idx += forward; } @@ -574,14 +786,32 @@ private static void ZSTD_updateTree_internal(ZSTD_MatchState_t* ms, byte* ip, by /* used in ZSTD_loadDictionaryContent() */ private static void ZSTD_updateTree(ZSTD_MatchState_t* ms, byte* ip, byte* iend) { - ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_dictMode_e.ZSTD_noDict); + ZSTD_updateTree_internal( + ms, + ip, + iend, + ms->cParams.minMatch, + ZSTD_dictMode_e.ZSTD_noDict + ); } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_insertBtAndGetAllMatches(ZSTD_match_t* matches, ZSTD_MatchState_t* ms, uint* nextToUpdate3, byte* ip, byte* iLimit, ZSTD_dictMode_e dictMode, uint* rep, uint ll0, uint lengthToBeat, uint mls) + private static uint ZSTD_insertBtAndGetAllMatches( + ZSTD_match_t* matches, + ZSTD_MatchState_t* ms, + uint* nextToUpdate3, + byte* ip, + byte* iLimit, + ZSTD_dictMode_e dictMode, + uint* rep, + uint ll0, + uint lengthToBeat, + uint mls + ) { ZSTD_compressionParameters* cParams = &ms->cParams; - uint sufficient_len = cParams->targetLength < (1 << 12) - 1 ? cParams->targetLength : (1 << 12) - 1; + uint sufficient_len = + cParams->targetLength < (1 << 12) - 1 ? cParams->targetLength : (1 << 12) - 1; byte* @base = ms->window.@base; uint curr = (uint)(ip - @base); uint hashLog = cParams->hashLog; @@ -592,7 +822,8 @@ private static uint ZSTD_insertBtAndGetAllMatches(ZSTD_match_t* matches, ZSTD_Ma uint* bt = ms->chainTable; uint btLog = cParams->chainLog - 1; uint btMask = (1U << (int)btLog) - 1; - nuint commonLengthSmaller = 0, commonLengthLarger = 0; + nuint commonLengthSmaller = 0, + commonLengthLarger = 0; byte* dictBase = ms->window.dictBase; uint dictLimit = ms->window.dictLimit; byte* dictEnd = dictBase + dictLimit; @@ -608,17 +839,31 @@ private static uint ZSTD_insertBtAndGetAllMatches(ZSTD_match_t* matches, ZSTD_Ma uint dummy32; uint mnum = 0; uint nbCompares = 1U << (int)cParams->searchLog; - ZSTD_MatchState_t* dms = dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? ms->dictMatchState : null; - ZSTD_compressionParameters* dmsCParams = dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? &dms->cParams : null; - byte* dmsBase = dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? dms->window.@base : null; - byte* dmsEnd = dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? dms->window.nextSrc : null; - uint dmsHighLimit = dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? (uint)(dmsEnd - dmsBase) : 0; - uint dmsLowLimit = dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? dms->window.lowLimit : 0; - uint dmsIndexDelta = dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? windowLow - dmsHighLimit : 0; - uint dmsHashLog = dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? dmsCParams->hashLog : hashLog; - uint dmsBtLog = dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? dmsCParams->chainLog - 1 : btLog; - uint dmsBtMask = dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? (1U << (int)dmsBtLog) - 1 : 0; - uint dmsBtLow = dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState && dmsBtMask < dmsHighLimit - dmsLowLimit ? dmsHighLimit - dmsBtMask : dmsLowLimit; + ZSTD_MatchState_t* dms = + dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? ms->dictMatchState : null; + ZSTD_compressionParameters* dmsCParams = + dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? &dms->cParams : null; + byte* dmsBase = + dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? dms->window.@base : null; + byte* dmsEnd = + dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? dms->window.nextSrc : null; + uint dmsHighLimit = + dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? (uint)(dmsEnd - dmsBase) : 0; + uint dmsLowLimit = + dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? dms->window.lowLimit : 0; + uint dmsIndexDelta = + dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? windowLow - dmsHighLimit : 0; + uint dmsHashLog = + dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? dmsCParams->hashLog : hashLog; + uint dmsBtLog = + dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? dmsCParams->chainLog - 1 : btLog; + uint dmsBtMask = + dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? (1U << (int)dmsBtLog) - 1 : 0; + uint dmsBtLow = + dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState + && dmsBtMask < dmsHighLimit - dmsLowLimit + ? dmsHighLimit - dmsBtMask + : dmsLowLimit; nuint bestLength = lengthToBeat - 1; assert(ll0 <= 1); { @@ -632,23 +877,62 @@ private static uint ZSTD_insertBtAndGetAllMatches(ZSTD_match_t* matches, ZSTD_Ma assert(curr >= dictLimit); if (repOffset - 1 < curr - dictLimit) { - if (repIndex >= windowLow && ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repOffset, minMatch)) + if ( + repIndex >= windowLow + && ZSTD_readMINMATCH(ip, minMatch) + == ZSTD_readMINMATCH(ip - repOffset, minMatch) + ) { - repLen = (uint)ZSTD_count(ip + minMatch, ip + minMatch - repOffset, iLimit) + minMatch; + repLen = + (uint)ZSTD_count(ip + minMatch, ip + minMatch - repOffset, iLimit) + + minMatch; } } else { - byte* repMatch = dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? dmsBase + repIndex - dmsIndexDelta : dictBase + repIndex; + byte* repMatch = + dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState + ? dmsBase + repIndex - dmsIndexDelta + : dictBase + repIndex; assert(curr >= windowLow); - if (dictMode == ZSTD_dictMode_e.ZSTD_extDict && ((repOffset - 1 < curr - windowLow ? 1 : 0) & ZSTD_index_overlap_check(dictLimit, repIndex)) != 0 && ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) + if ( + dictMode == ZSTD_dictMode_e.ZSTD_extDict + && ( + (repOffset - 1 < curr - windowLow ? 1 : 0) + & ZSTD_index_overlap_check(dictLimit, repIndex) + ) != 0 + && ZSTD_readMINMATCH(ip, minMatch) + == ZSTD_readMINMATCH(repMatch, minMatch) + ) { - repLen = (uint)ZSTD_count_2segments(ip + minMatch, repMatch + minMatch, iLimit, dictEnd, prefixStart) + minMatch; + repLen = + (uint)ZSTD_count_2segments( + ip + minMatch, + repMatch + minMatch, + iLimit, + dictEnd, + prefixStart + ) + minMatch; } - if (dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState && ((repOffset - 1 < curr - (dmsLowLimit + dmsIndexDelta) ? 1 : 0) & ZSTD_index_overlap_check(dictLimit, repIndex)) != 0 && ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) + if ( + dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState + && ( + (repOffset - 1 < curr - (dmsLowLimit + dmsIndexDelta) ? 1 : 0) + & ZSTD_index_overlap_check(dictLimit, repIndex) + ) != 0 + && ZSTD_readMINMATCH(ip, minMatch) + == ZSTD_readMINMATCH(repMatch, minMatch) + ) { - repLen = (uint)ZSTD_count_2segments(ip + minMatch, repMatch + minMatch, iLimit, dmsEnd, prefixStart) + minMatch; + repLen = + (uint)ZSTD_count_2segments( + ip + minMatch, + repMatch + minMatch, + iLimit, + dmsEnd, + prefixStart + ) + minMatch; } } @@ -674,7 +958,11 @@ private static uint ZSTD_insertBtAndGetAllMatches(ZSTD_match_t* matches, ZSTD_Ma if (matchIndex3 >= matchLow && curr - matchIndex3 < 1 << 18) { nuint mlen; - if (dictMode == ZSTD_dictMode_e.ZSTD_noDict || dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState || matchIndex3 >= dictLimit) + if ( + dictMode == ZSTD_dictMode_e.ZSTD_noDict + || dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState + || matchIndex3 >= dictLimit + ) { byte* match = @base + matchIndex3; mlen = ZSTD_count(ip, match, iLimit); @@ -709,9 +997,16 @@ private static uint ZSTD_insertBtAndGetAllMatches(ZSTD_match_t* matches, ZSTD_Ma uint* nextPtr = bt + 2 * (matchIndex & btMask); byte* match; /* guaranteed minimum nb of common bytes */ - nuint matchLength = commonLengthSmaller < commonLengthLarger ? commonLengthSmaller : commonLengthLarger; + nuint matchLength = + commonLengthSmaller < commonLengthLarger + ? commonLengthSmaller + : commonLengthLarger; assert(curr > matchIndex); - if (dictMode == ZSTD_dictMode_e.ZSTD_noDict || dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState || matchIndex + matchLength >= dictLimit) + if ( + dictMode == ZSTD_dictMode_e.ZSTD_noDict + || dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState + || matchIndex + matchLength >= dictLimit + ) { assert(matchIndex + matchLength >= dictLimit); match = @base + matchIndex; @@ -725,7 +1020,13 @@ private static uint ZSTD_insertBtAndGetAllMatches(ZSTD_match_t* matches, ZSTD_Ma { match = dictBase + matchIndex; assert(memcmp(match, ip, matchLength) == 0); - matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iLimit, dictEnd, prefixStart); + matchLength += ZSTD_count_2segments( + ip + matchLength, + match + matchLength, + iLimit, + dictEnd, + prefixStart + ); if (matchIndex + matchLength >= dictLimit) match = @base + matchIndex; } @@ -788,9 +1089,18 @@ private static uint ZSTD_insertBtAndGetAllMatches(ZSTD_match_t* matches, ZSTD_Ma { uint* nextPtr = dmsBt + 2 * (dictMatchIndex & dmsBtMask); /* guaranteed minimum nb of common bytes */ - nuint matchLength = commonLengthSmaller < commonLengthLarger ? commonLengthSmaller : commonLengthLarger; + nuint matchLength = + commonLengthSmaller < commonLengthLarger + ? commonLengthSmaller + : commonLengthLarger; byte* match = dmsBase + dictMatchIndex; - matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iLimit, dmsEnd, prefixStart); + matchLength += ZSTD_count_2segments( + ip + matchLength, + match + matchLength, + iLimit, + dmsEnd, + prefixStart + ); if (dictMatchIndex + matchLength >= dmsHighLimit) match = @base + dictMatchIndex + dmsIndexDelta; if (matchLength > bestLength) @@ -833,102 +1143,378 @@ private static uint ZSTD_insertBtAndGetAllMatches(ZSTD_match_t* matches, ZSTD_Ma } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_btGetAllMatches_internal(ZSTD_match_t* matches, ZSTD_MatchState_t* ms, uint* nextToUpdate3, byte* ip, byte* iHighLimit, uint* rep, uint ll0, uint lengthToBeat, ZSTD_dictMode_e dictMode, uint mls) + private static uint ZSTD_btGetAllMatches_internal( + ZSTD_match_t* matches, + ZSTD_MatchState_t* ms, + uint* nextToUpdate3, + byte* ip, + byte* iHighLimit, + uint* rep, + uint ll0, + uint lengthToBeat, + ZSTD_dictMode_e dictMode, + uint mls + ) { - assert((ms->cParams.minMatch <= 3 ? 3 : ms->cParams.minMatch <= 6 ? ms->cParams.minMatch : 6) == mls); + assert( + ( + ms->cParams.minMatch <= 3 ? 3 + : ms->cParams.minMatch <= 6 ? ms->cParams.minMatch + : 6 + ) == mls + ); if (ip < ms->window.@base + ms->nextToUpdate) return 0; ZSTD_updateTree_internal(ms, ip, iHighLimit, mls, dictMode); - return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, mls); + return ZSTD_insertBtAndGetAllMatches( + matches, + ms, + nextToUpdate3, + ip, + iHighLimit, + dictMode, + rep, + ll0, + lengthToBeat, + mls + ); } - private static uint ZSTD_btGetAllMatches_noDict_3(ZSTD_match_t* matches, ZSTD_MatchState_t* ms, uint* nextToUpdate3, byte* ip, byte* iHighLimit, uint* rep, uint ll0, uint lengthToBeat) + private static uint ZSTD_btGetAllMatches_noDict_3( + ZSTD_match_t* matches, + ZSTD_MatchState_t* ms, + uint* nextToUpdate3, + byte* ip, + byte* iHighLimit, + uint* rep, + uint ll0, + uint lengthToBeat + ) { - return ZSTD_btGetAllMatches_internal(matches, ms, nextToUpdate3, ip, iHighLimit, rep, ll0, lengthToBeat, ZSTD_dictMode_e.ZSTD_noDict, 3); + return ZSTD_btGetAllMatches_internal( + matches, + ms, + nextToUpdate3, + ip, + iHighLimit, + rep, + ll0, + lengthToBeat, + ZSTD_dictMode_e.ZSTD_noDict, + 3 + ); } - private static uint ZSTD_btGetAllMatches_noDict_4(ZSTD_match_t* matches, ZSTD_MatchState_t* ms, uint* nextToUpdate3, byte* ip, byte* iHighLimit, uint* rep, uint ll0, uint lengthToBeat) + private static uint ZSTD_btGetAllMatches_noDict_4( + ZSTD_match_t* matches, + ZSTD_MatchState_t* ms, + uint* nextToUpdate3, + byte* ip, + byte* iHighLimit, + uint* rep, + uint ll0, + uint lengthToBeat + ) { - return ZSTD_btGetAllMatches_internal(matches, ms, nextToUpdate3, ip, iHighLimit, rep, ll0, lengthToBeat, ZSTD_dictMode_e.ZSTD_noDict, 4); + return ZSTD_btGetAllMatches_internal( + matches, + ms, + nextToUpdate3, + ip, + iHighLimit, + rep, + ll0, + lengthToBeat, + ZSTD_dictMode_e.ZSTD_noDict, + 4 + ); } - private static uint ZSTD_btGetAllMatches_noDict_5(ZSTD_match_t* matches, ZSTD_MatchState_t* ms, uint* nextToUpdate3, byte* ip, byte* iHighLimit, uint* rep, uint ll0, uint lengthToBeat) + private static uint ZSTD_btGetAllMatches_noDict_5( + ZSTD_match_t* matches, + ZSTD_MatchState_t* ms, + uint* nextToUpdate3, + byte* ip, + byte* iHighLimit, + uint* rep, + uint ll0, + uint lengthToBeat + ) { - return ZSTD_btGetAllMatches_internal(matches, ms, nextToUpdate3, ip, iHighLimit, rep, ll0, lengthToBeat, ZSTD_dictMode_e.ZSTD_noDict, 5); + return ZSTD_btGetAllMatches_internal( + matches, + ms, + nextToUpdate3, + ip, + iHighLimit, + rep, + ll0, + lengthToBeat, + ZSTD_dictMode_e.ZSTD_noDict, + 5 + ); } - private static uint ZSTD_btGetAllMatches_noDict_6(ZSTD_match_t* matches, ZSTD_MatchState_t* ms, uint* nextToUpdate3, byte* ip, byte* iHighLimit, uint* rep, uint ll0, uint lengthToBeat) + private static uint ZSTD_btGetAllMatches_noDict_6( + ZSTD_match_t* matches, + ZSTD_MatchState_t* ms, + uint* nextToUpdate3, + byte* ip, + byte* iHighLimit, + uint* rep, + uint ll0, + uint lengthToBeat + ) { - return ZSTD_btGetAllMatches_internal(matches, ms, nextToUpdate3, ip, iHighLimit, rep, ll0, lengthToBeat, ZSTD_dictMode_e.ZSTD_noDict, 6); + return ZSTD_btGetAllMatches_internal( + matches, + ms, + nextToUpdate3, + ip, + iHighLimit, + rep, + ll0, + lengthToBeat, + ZSTD_dictMode_e.ZSTD_noDict, + 6 + ); } - private static uint ZSTD_btGetAllMatches_extDict_3(ZSTD_match_t* matches, ZSTD_MatchState_t* ms, uint* nextToUpdate3, byte* ip, byte* iHighLimit, uint* rep, uint ll0, uint lengthToBeat) + private static uint ZSTD_btGetAllMatches_extDict_3( + ZSTD_match_t* matches, + ZSTD_MatchState_t* ms, + uint* nextToUpdate3, + byte* ip, + byte* iHighLimit, + uint* rep, + uint ll0, + uint lengthToBeat + ) { - return ZSTD_btGetAllMatches_internal(matches, ms, nextToUpdate3, ip, iHighLimit, rep, ll0, lengthToBeat, ZSTD_dictMode_e.ZSTD_extDict, 3); + return ZSTD_btGetAllMatches_internal( + matches, + ms, + nextToUpdate3, + ip, + iHighLimit, + rep, + ll0, + lengthToBeat, + ZSTD_dictMode_e.ZSTD_extDict, + 3 + ); } - private static uint ZSTD_btGetAllMatches_extDict_4(ZSTD_match_t* matches, ZSTD_MatchState_t* ms, uint* nextToUpdate3, byte* ip, byte* iHighLimit, uint* rep, uint ll0, uint lengthToBeat) + private static uint ZSTD_btGetAllMatches_extDict_4( + ZSTD_match_t* matches, + ZSTD_MatchState_t* ms, + uint* nextToUpdate3, + byte* ip, + byte* iHighLimit, + uint* rep, + uint ll0, + uint lengthToBeat + ) { - return ZSTD_btGetAllMatches_internal(matches, ms, nextToUpdate3, ip, iHighLimit, rep, ll0, lengthToBeat, ZSTD_dictMode_e.ZSTD_extDict, 4); + return ZSTD_btGetAllMatches_internal( + matches, + ms, + nextToUpdate3, + ip, + iHighLimit, + rep, + ll0, + lengthToBeat, + ZSTD_dictMode_e.ZSTD_extDict, + 4 + ); } - private static uint ZSTD_btGetAllMatches_extDict_5(ZSTD_match_t* matches, ZSTD_MatchState_t* ms, uint* nextToUpdate3, byte* ip, byte* iHighLimit, uint* rep, uint ll0, uint lengthToBeat) + private static uint ZSTD_btGetAllMatches_extDict_5( + ZSTD_match_t* matches, + ZSTD_MatchState_t* ms, + uint* nextToUpdate3, + byte* ip, + byte* iHighLimit, + uint* rep, + uint ll0, + uint lengthToBeat + ) { - return ZSTD_btGetAllMatches_internal(matches, ms, nextToUpdate3, ip, iHighLimit, rep, ll0, lengthToBeat, ZSTD_dictMode_e.ZSTD_extDict, 5); + return ZSTD_btGetAllMatches_internal( + matches, + ms, + nextToUpdate3, + ip, + iHighLimit, + rep, + ll0, + lengthToBeat, + ZSTD_dictMode_e.ZSTD_extDict, + 5 + ); } - private static uint ZSTD_btGetAllMatches_extDict_6(ZSTD_match_t* matches, ZSTD_MatchState_t* ms, uint* nextToUpdate3, byte* ip, byte* iHighLimit, uint* rep, uint ll0, uint lengthToBeat) + private static uint ZSTD_btGetAllMatches_extDict_6( + ZSTD_match_t* matches, + ZSTD_MatchState_t* ms, + uint* nextToUpdate3, + byte* ip, + byte* iHighLimit, + uint* rep, + uint ll0, + uint lengthToBeat + ) { - return ZSTD_btGetAllMatches_internal(matches, ms, nextToUpdate3, ip, iHighLimit, rep, ll0, lengthToBeat, ZSTD_dictMode_e.ZSTD_extDict, 6); + return ZSTD_btGetAllMatches_internal( + matches, + ms, + nextToUpdate3, + ip, + iHighLimit, + rep, + ll0, + lengthToBeat, + ZSTD_dictMode_e.ZSTD_extDict, + 6 + ); } - private static uint ZSTD_btGetAllMatches_dictMatchState_3(ZSTD_match_t* matches, ZSTD_MatchState_t* ms, uint* nextToUpdate3, byte* ip, byte* iHighLimit, uint* rep, uint ll0, uint lengthToBeat) + private static uint ZSTD_btGetAllMatches_dictMatchState_3( + ZSTD_match_t* matches, + ZSTD_MatchState_t* ms, + uint* nextToUpdate3, + byte* ip, + byte* iHighLimit, + uint* rep, + uint ll0, + uint lengthToBeat + ) { - return ZSTD_btGetAllMatches_internal(matches, ms, nextToUpdate3, ip, iHighLimit, rep, ll0, lengthToBeat, ZSTD_dictMode_e.ZSTD_dictMatchState, 3); + return ZSTD_btGetAllMatches_internal( + matches, + ms, + nextToUpdate3, + ip, + iHighLimit, + rep, + ll0, + lengthToBeat, + ZSTD_dictMode_e.ZSTD_dictMatchState, + 3 + ); } - private static uint ZSTD_btGetAllMatches_dictMatchState_4(ZSTD_match_t* matches, ZSTD_MatchState_t* ms, uint* nextToUpdate3, byte* ip, byte* iHighLimit, uint* rep, uint ll0, uint lengthToBeat) + private static uint ZSTD_btGetAllMatches_dictMatchState_4( + ZSTD_match_t* matches, + ZSTD_MatchState_t* ms, + uint* nextToUpdate3, + byte* ip, + byte* iHighLimit, + uint* rep, + uint ll0, + uint lengthToBeat + ) { - return ZSTD_btGetAllMatches_internal(matches, ms, nextToUpdate3, ip, iHighLimit, rep, ll0, lengthToBeat, ZSTD_dictMode_e.ZSTD_dictMatchState, 4); + return ZSTD_btGetAllMatches_internal( + matches, + ms, + nextToUpdate3, + ip, + iHighLimit, + rep, + ll0, + lengthToBeat, + ZSTD_dictMode_e.ZSTD_dictMatchState, + 4 + ); } - private static uint ZSTD_btGetAllMatches_dictMatchState_5(ZSTD_match_t* matches, ZSTD_MatchState_t* ms, uint* nextToUpdate3, byte* ip, byte* iHighLimit, uint* rep, uint ll0, uint lengthToBeat) + private static uint ZSTD_btGetAllMatches_dictMatchState_5( + ZSTD_match_t* matches, + ZSTD_MatchState_t* ms, + uint* nextToUpdate3, + byte* ip, + byte* iHighLimit, + uint* rep, + uint ll0, + uint lengthToBeat + ) { - return ZSTD_btGetAllMatches_internal(matches, ms, nextToUpdate3, ip, iHighLimit, rep, ll0, lengthToBeat, ZSTD_dictMode_e.ZSTD_dictMatchState, 5); + return ZSTD_btGetAllMatches_internal( + matches, + ms, + nextToUpdate3, + ip, + iHighLimit, + rep, + ll0, + lengthToBeat, + ZSTD_dictMode_e.ZSTD_dictMatchState, + 5 + ); } - private static uint ZSTD_btGetAllMatches_dictMatchState_6(ZSTD_match_t* matches, ZSTD_MatchState_t* ms, uint* nextToUpdate3, byte* ip, byte* iHighLimit, uint* rep, uint ll0, uint lengthToBeat) + private static uint ZSTD_btGetAllMatches_dictMatchState_6( + ZSTD_match_t* matches, + ZSTD_MatchState_t* ms, + uint* nextToUpdate3, + byte* ip, + byte* iHighLimit, + uint* rep, + uint ll0, + uint lengthToBeat + ) { - return ZSTD_btGetAllMatches_internal(matches, ms, nextToUpdate3, ip, iHighLimit, rep, ll0, lengthToBeat, ZSTD_dictMode_e.ZSTD_dictMatchState, 6); + return ZSTD_btGetAllMatches_internal( + matches, + ms, + nextToUpdate3, + ip, + iHighLimit, + rep, + ll0, + lengthToBeat, + ZSTD_dictMode_e.ZSTD_dictMatchState, + 6 + ); } - private static readonly ZSTD_getAllMatchesFn[][] getAllMatchesFns = new ZSTD_getAllMatchesFn[3][] - { - new ZSTD_getAllMatchesFn[4] - { - ZSTD_btGetAllMatches_noDict_3, - ZSTD_btGetAllMatches_noDict_4, - ZSTD_btGetAllMatches_noDict_5, - ZSTD_btGetAllMatches_noDict_6 - }, - new ZSTD_getAllMatchesFn[4] - { - ZSTD_btGetAllMatches_extDict_3, - ZSTD_btGetAllMatches_extDict_4, - ZSTD_btGetAllMatches_extDict_5, - ZSTD_btGetAllMatches_extDict_6 - }, - new ZSTD_getAllMatchesFn[4] + private static readonly ZSTD_getAllMatchesFn[][] getAllMatchesFns = + new ZSTD_getAllMatchesFn[3][] { - ZSTD_btGetAllMatches_dictMatchState_3, - ZSTD_btGetAllMatches_dictMatchState_4, - ZSTD_btGetAllMatches_dictMatchState_5, - ZSTD_btGetAllMatches_dictMatchState_6 - } - }; - private static ZSTD_getAllMatchesFn ZSTD_selectBtGetAllMatches(ZSTD_MatchState_t* ms, ZSTD_dictMode_e dictMode) + new ZSTD_getAllMatchesFn[4] + { + ZSTD_btGetAllMatches_noDict_3, + ZSTD_btGetAllMatches_noDict_4, + ZSTD_btGetAllMatches_noDict_5, + ZSTD_btGetAllMatches_noDict_6, + }, + new ZSTD_getAllMatchesFn[4] + { + ZSTD_btGetAllMatches_extDict_3, + ZSTD_btGetAllMatches_extDict_4, + ZSTD_btGetAllMatches_extDict_5, + ZSTD_btGetAllMatches_extDict_6, + }, + new ZSTD_getAllMatchesFn[4] + { + ZSTD_btGetAllMatches_dictMatchState_3, + ZSTD_btGetAllMatches_dictMatchState_4, + ZSTD_btGetAllMatches_dictMatchState_5, + ZSTD_btGetAllMatches_dictMatchState_6, + }, + }; + + private static ZSTD_getAllMatchesFn ZSTD_selectBtGetAllMatches( + ZSTD_MatchState_t* ms, + ZSTD_dictMode_e dictMode + ) { - uint mls = ms->cParams.minMatch <= 3 ? 3 : ms->cParams.minMatch <= 6 ? ms->cParams.minMatch : 6; + uint mls = + ms->cParams.minMatch <= 3 ? 3 + : ms->cParams.minMatch <= 6 ? ms->cParams.minMatch + : 6; assert((uint)dictMode < 3); assert(mls - 3 < 4); return getAllMatchesFns[(int)dictMode][mls - 3]; @@ -938,7 +1524,10 @@ private static ZSTD_getAllMatchesFn ZSTD_selectBtGetAllMatches(ZSTD_MatchState_t * Moves forward in @rawSeqStore by @nbBytes, * which will update the fields 'pos' and 'posInSequence'. */ - private static void ZSTD_optLdm_skipRawSeqStoreBytes(RawSeqStore_t* rawSeqStore, nuint nbBytes) + private static void ZSTD_optLdm_skipRawSeqStoreBytes( + RawSeqStore_t* rawSeqStore, + nuint nbBytes + ) { uint currPos = (uint)(rawSeqStore->posInSequence + nbBytes); while (currPos != 0 && rawSeqStore->pos < rawSeqStore->size) @@ -966,7 +1555,11 @@ private static void ZSTD_optLdm_skipRawSeqStoreBytes(RawSeqStore_t* rawSeqStore, * Calculates the beginning and end of the next match in the current block. * Updates 'pos' and 'posInSequence' of the ldmSeqStore. */ - private static void ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, uint currPosInBlock, uint blockBytesRemaining) + private static void ZSTD_opt_getNextMatchAndUpdateSeqStore( + ZSTD_optLdm_t* optLdm, + uint currPosInBlock, + uint blockBytesRemaining + ) { rawSeq currSeq; uint currBlockEndPos; @@ -982,8 +1575,15 @@ private static void ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm currSeq = optLdm->seqStore.seq[optLdm->seqStore.pos]; assert(optLdm->seqStore.posInSequence <= currSeq.litLength + currSeq.matchLength); currBlockEndPos = currPosInBlock + blockBytesRemaining; - literalsBytesRemaining = optLdm->seqStore.posInSequence < currSeq.litLength ? currSeq.litLength - (uint)optLdm->seqStore.posInSequence : 0; - matchBytesRemaining = literalsBytesRemaining == 0 ? currSeq.matchLength - ((uint)optLdm->seqStore.posInSequence - currSeq.litLength) : currSeq.matchLength; + literalsBytesRemaining = + optLdm->seqStore.posInSequence < currSeq.litLength + ? currSeq.litLength - (uint)optLdm->seqStore.posInSequence + : 0; + matchBytesRemaining = + literalsBytesRemaining == 0 + ? currSeq.matchLength + - ((uint)optLdm->seqStore.posInSequence - currSeq.litLength) + : currSeq.matchLength; if (literalsBytesRemaining >= blockBytesRemaining) { optLdm->startPosInBlock = 0xffffffff; @@ -998,11 +1598,17 @@ private static void ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm if (optLdm->endPosInBlock > currBlockEndPos) { optLdm->endPosInBlock = currBlockEndPos; - ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, currBlockEndPos - currPosInBlock); + ZSTD_optLdm_skipRawSeqStoreBytes( + &optLdm->seqStore, + currBlockEndPos - currPosInBlock + ); } else { - ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, literalsBytesRemaining + matchBytesRemaining); + ZSTD_optLdm_skipRawSeqStoreBytes( + &optLdm->seqStore, + literalsBytesRemaining + matchBytesRemaining + ); } } @@ -1011,17 +1617,30 @@ private static void ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm * based on it's 'matchStartPosInBlock' and 'matchEndPosInBlock', * into 'matches'. Maintains the correct ordering of 'matches'. */ - private static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, uint* nbMatches, ZSTD_optLdm_t* optLdm, uint currPosInBlock, uint minMatch) + private static void ZSTD_optLdm_maybeAddMatch( + ZSTD_match_t* matches, + uint* nbMatches, + ZSTD_optLdm_t* optLdm, + uint currPosInBlock, + uint minMatch + ) { uint posDiff = currPosInBlock - optLdm->startPosInBlock; /* Note: ZSTD_match_t actually contains offBase and matchLength (before subtracting MINMATCH) */ uint candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff; - if (currPosInBlock < optLdm->startPosInBlock || currPosInBlock >= optLdm->endPosInBlock || candidateMatchLength < minMatch) + if ( + currPosInBlock < optLdm->startPosInBlock + || currPosInBlock >= optLdm->endPosInBlock + || candidateMatchLength < minMatch + ) { return; } - if (*nbMatches == 0 || candidateMatchLength > matches[*nbMatches - 1].len && *nbMatches < 1 << 12) + if ( + *nbMatches == 0 + || candidateMatchLength > matches[*nbMatches - 1].len && *nbMatches < 1 << 12 + ) { assert(optLdm->offset > 0); uint candidateOffBase = optLdm->offset + 3; @@ -1034,7 +1653,14 @@ private static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, uint* nbMat /* ZSTD_optLdm_processMatchCandidate(): * Wrapper function to update ldm seq store and call ldm functions as necessary. */ - private static void ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm, ZSTD_match_t* matches, uint* nbMatches, uint currPosInBlock, uint remainingBytes, uint minMatch) + private static void ZSTD_optLdm_processMatchCandidate( + ZSTD_optLdm_t* optLdm, + ZSTD_match_t* matches, + uint* nbMatches, + uint currPosInBlock, + uint remainingBytes, + uint minMatch + ) { if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) { @@ -1060,7 +1686,15 @@ private static void ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm, ZST } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_compressBlock_opt_generic(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize, int optLevel, ZSTD_dictMode_e dictMode) + private static nuint ZSTD_compressBlock_opt_generic( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize, + int optLevel, + ZSTD_dictMode_e dictMode + ) { optState_t* optStatePtr = &ms->opt; byte* istart = (byte*)src; @@ -1072,7 +1706,8 @@ private static nuint ZSTD_compressBlock_opt_generic(ZSTD_MatchState_t* ms, SeqSt byte* prefixStart = @base + ms->window.dictLimit; ZSTD_compressionParameters* cParams = &ms->cParams; ZSTD_getAllMatchesFn getAllMatches = ZSTD_selectBtGetAllMatches(ms, dictMode); - uint sufficient_len = cParams->targetLength < (1 << 12) - 1 ? cParams->targetLength : (1 << 12) - 1; + uint sufficient_len = + cParams->targetLength < (1 << 12) - 1 ? cParams->targetLength : (1 << 12) - 1; uint minMatch = (uint)(cParams->minMatch == 3 ? 3 : 4); uint nextToUpdate3 = ms->nextToUpdate; ZSTD_optimal_t* opt = optStatePtr->priceTable; @@ -1088,12 +1723,29 @@ private static nuint ZSTD_compressBlock_opt_generic(ZSTD_MatchState_t* ms, SeqSt ip += ip == prefixStart ? 1 : 0; while (ip < ilimit) { - uint cur, last_pos = 0; + uint cur, + last_pos = 0; { uint litlen = (uint)(ip - anchor); uint ll0 = litlen == 0 ? 1U : 0U; - uint nbMatches = getAllMatches(matches, ms, &nextToUpdate3, ip, iend, rep, ll0, minMatch); - ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches, (uint)(ip - istart), (uint)(iend - ip), minMatch); + uint nbMatches = getAllMatches( + matches, + ms, + &nextToUpdate3, + ip, + iend, + rep, + ll0, + minMatch + ); + ZSTD_optLdm_processMatchCandidate( + &optLdm, + matches, + &nbMatches, + (uint)(ip - istart), + (uint)(iend - ip), + minMatch + ); if (nbMatches == 0) { ip++; @@ -1135,12 +1787,19 @@ private static nuint ZSTD_compressBlock_opt_generic(ZSTD_MatchState_t* ms, SeqSt uint end = matches[matchNb].len; for (; pos <= end; pos++) { - int matchPrice = (int)ZSTD_getMatchPrice(offBase, pos, optStatePtr, optLevel); + int matchPrice = (int)ZSTD_getMatchPrice( + offBase, + pos, + optStatePtr, + optLevel + ); int sequencePrice = opt[0].price + matchPrice; opt[pos].mlen = pos; opt[pos].off = offBase; opt[pos].litlen = 0; - opt[pos].price = sequencePrice + (int)ZSTD_litLengthPrice(0, optStatePtr, optLevel); + opt[pos].price = + sequencePrice + + (int)ZSTD_litLengthPrice(0, optStatePtr, optLevel); } } @@ -1155,7 +1814,13 @@ private static nuint ZSTD_compressBlock_opt_generic(ZSTD_MatchState_t* ms, SeqSt assert(cur <= 1 << 12); { uint litlen = opt[cur - 1].litlen + 1; - int price = opt[cur - 1].price + (int)ZSTD_rawLiteralsCost(ip + cur - 1, 1, optStatePtr, optLevel) + ((int)ZSTD_litLengthPrice(litlen, optStatePtr, optLevel) - (int)ZSTD_litLengthPrice(litlen - 1, optStatePtr, optLevel)); + int price = + opt[cur - 1].price + + (int)ZSTD_rawLiteralsCost(ip + cur - 1, 1, optStatePtr, optLevel) + + ( + (int)ZSTD_litLengthPrice(litlen, optStatePtr, optLevel) + - (int)ZSTD_litLengthPrice(litlen - 1, optStatePtr, optLevel) + ); assert(price < 1000000000); if (price <= opt[cur].price) { @@ -1163,16 +1828,46 @@ private static nuint ZSTD_compressBlock_opt_generic(ZSTD_MatchState_t* ms, SeqSt opt[cur] = opt[cur - 1]; opt[cur].litlen = litlen; opt[cur].price = price; - if (optLevel >= 1 && prevMatch.litlen == 0 && (int)ZSTD_litLengthPrice(1, optStatePtr, optLevel) - (int)ZSTD_litLengthPrice(1 - 1, optStatePtr, optLevel) < 0 && ip + cur < iend) + if ( + optLevel >= 1 + && prevMatch.litlen == 0 + && (int)ZSTD_litLengthPrice(1, optStatePtr, optLevel) + - (int)ZSTD_litLengthPrice(1 - 1, optStatePtr, optLevel) + < 0 + && ip + cur < iend + ) { /* check next position, in case it would be cheaper */ - int with1literal = prevMatch.price + (int)ZSTD_rawLiteralsCost(ip + cur, 1, optStatePtr, optLevel) + ((int)ZSTD_litLengthPrice(1, optStatePtr, optLevel) - (int)ZSTD_litLengthPrice(1 - 1, optStatePtr, optLevel)); - int withMoreLiterals = price + (int)ZSTD_rawLiteralsCost(ip + cur, 1, optStatePtr, optLevel) + ((int)ZSTD_litLengthPrice(litlen + 1, optStatePtr, optLevel) - (int)ZSTD_litLengthPrice(litlen + 1 - 1, optStatePtr, optLevel)); - if (with1literal < withMoreLiterals && with1literal < opt[cur + 1].price) + int with1literal = + prevMatch.price + + (int)ZSTD_rawLiteralsCost(ip + cur, 1, optStatePtr, optLevel) + + ( + (int)ZSTD_litLengthPrice(1, optStatePtr, optLevel) + - (int)ZSTD_litLengthPrice(1 - 1, optStatePtr, optLevel) + ); + int withMoreLiterals = + price + + (int)ZSTD_rawLiteralsCost(ip + cur, 1, optStatePtr, optLevel) + + ( + (int)ZSTD_litLengthPrice(litlen + 1, optStatePtr, optLevel) + - (int)ZSTD_litLengthPrice( + litlen + 1 - 1, + optStatePtr, + optLevel + ) + ); + if ( + with1literal < withMoreLiterals + && with1literal < opt[cur + 1].price + ) { /* update offset history - before it disappears */ uint prev = cur - prevMatch.mlen; - repcodes_s newReps = ZSTD_newRep(opt[prev].rep, prevMatch.off, opt[prev].litlen == 0 ? 1U : 0U); + repcodes_s newReps = ZSTD_newRep( + opt[prev].rep, + prevMatch.off, + opt[prev].litlen == 0 ? 1U : 0U + ); assert(cur >= prevMatch.mlen); opt[cur + 1] = prevMatch; memcpy(opt[cur + 1].rep, &newReps, (uint)sizeof(repcodes_s)); @@ -1190,7 +1885,11 @@ private static nuint ZSTD_compressBlock_opt_generic(ZSTD_MatchState_t* ms, SeqSt { /* just finished a match => alter offset history */ uint prev = cur - opt[cur].mlen; - repcodes_s newReps = ZSTD_newRep(opt[prev].rep, opt[cur].off, opt[prev].litlen == 0 ? 1U : 0U); + repcodes_s newReps = ZSTD_newRep( + opt[prev].rep, + opt[cur].off, + opt[prev].litlen == 0 ? 1U : 0U + ); memcpy(opt[cur].rep, &newReps, (uint)sizeof(repcodes_s)); } @@ -1207,10 +1906,27 @@ private static nuint ZSTD_compressBlock_opt_generic(ZSTD_MatchState_t* ms, SeqSt { uint ll0 = opt[cur].litlen == 0 ? 1U : 0U; int previousPrice = opt[cur].price; - int basePrice = previousPrice + (int)ZSTD_litLengthPrice(0, optStatePtr, optLevel); - uint nbMatches = getAllMatches(matches, ms, &nextToUpdate3, inr, iend, opt[cur].rep, ll0, minMatch); + int basePrice = + previousPrice + (int)ZSTD_litLengthPrice(0, optStatePtr, optLevel); + uint nbMatches = getAllMatches( + matches, + ms, + &nextToUpdate3, + inr, + iend, + opt[cur].rep, + ll0, + minMatch + ); uint matchNb; - ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches, (uint)(inr - istart), (uint)(iend - inr), minMatch); + ZSTD_optLdm_processMatchCandidate( + &optLdm, + matches, + &nbMatches, + (uint)(inr - istart), + (uint)(iend - inr), + minMatch + ); if (nbMatches == 0) { continue; @@ -1218,7 +1934,11 @@ private static nuint ZSTD_compressBlock_opt_generic(ZSTD_MatchState_t* ms, SeqSt { uint longestML = matches[nbMatches - 1].len; - if (longestML > sufficient_len || cur + longestML >= 1 << 12 || ip + cur + longestML >= iend) + if ( + longestML > sufficient_len + || cur + longestML >= 1 << 12 + || ip + cur + longestML >= iend + ) { lastStretch.mlen = longestML; lastStretch.off = matches[nbMatches - 1].off; @@ -1237,7 +1957,9 @@ private static nuint ZSTD_compressBlock_opt_generic(ZSTD_MatchState_t* ms, SeqSt for (mlen = lastML; mlen >= startML; mlen--) { uint pos = cur + mlen; - int price = basePrice + (int)ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel); + int price = + basePrice + + (int)ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel); if (pos > last_pos || price < opt[pos].price) { while (last_pos < pos) @@ -1267,7 +1989,7 @@ private static nuint ZSTD_compressBlock_opt_generic(ZSTD_MatchState_t* ms, SeqSt lastStretch = opt[last_pos]; assert(cur >= lastStretch.mlen); cur = last_pos - lastStretch.mlen; - _shortestPath: + _shortestPath: assert(opt[0].mlen == 0); assert(last_pos >= lastStretch.mlen); assert(cur == last_pos - lastStretch.mlen); @@ -1282,7 +2004,11 @@ private static nuint ZSTD_compressBlock_opt_generic(ZSTD_MatchState_t* ms, SeqSt if (lastStretch.litlen == 0) { /* finishing on a match : update offset history */ - repcodes_s reps = ZSTD_newRep(opt[cur].rep, lastStretch.off, opt[cur].litlen == 0 ? 1U : 0U); + repcodes_s reps = ZSTD_newRep( + opt[cur].rep, + lastStretch.off, + opt[cur].litlen == 0 ? 1U : 0U + ); memcpy(rep, &reps, (uint)sizeof(repcodes_s)); } else @@ -1355,19 +2081,46 @@ private static nuint ZSTD_compressBlock_opt_generic(ZSTD_MatchState_t* ms, SeqSt return (nuint)(iend - anchor); } - private static nuint ZSTD_compressBlock_opt0(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize, ZSTD_dictMode_e dictMode) + private static nuint ZSTD_compressBlock_opt0( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize, + ZSTD_dictMode_e dictMode + ) { return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0, dictMode); } - private static nuint ZSTD_compressBlock_opt2(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize, ZSTD_dictMode_e dictMode) + private static nuint ZSTD_compressBlock_opt2( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize, + ZSTD_dictMode_e dictMode + ) { return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2, dictMode); } - private static nuint ZSTD_compressBlock_btopt(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_btopt( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_dictMode_e.ZSTD_noDict); + return ZSTD_compressBlock_opt0( + ms, + seqStore, + rep, + src, + srcSize, + ZSTD_dictMode_e.ZSTD_noDict + ); } /* ZSTD_initStats_ultra(): @@ -1375,7 +2128,13 @@ private static nuint ZSTD_compressBlock_btopt(ZSTD_MatchState_t* ms, SeqStore_t* * only works on first block, with no dictionary and no ldm. * this function cannot error out, its narrow contract must be respected. */ - private static void ZSTD_initStats_ultra(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static void ZSTD_initStats_ultra( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { /* updated rep codes will sink here */ uint* tmpRep = stackalloc uint[3]; @@ -1384,7 +2143,14 @@ private static void ZSTD_initStats_ultra(ZSTD_MatchState_t* ms, SeqStore_t* seqS assert(seqStore->sequences == seqStore->sequencesStart); assert(ms->window.dictLimit == ms->window.lowLimit); assert(ms->window.dictLimit - ms->nextToUpdate <= 1); - ZSTD_compressBlock_opt2(ms, seqStore, tmpRep, src, srcSize, ZSTD_dictMode_e.ZSTD_noDict); + ZSTD_compressBlock_opt2( + ms, + seqStore, + tmpRep, + src, + srcSize, + ZSTD_dictMode_e.ZSTD_noDict + ); ZSTD_resetSeqStore(seqStore); ms->window.@base -= srcSize; ms->window.dictLimit += (uint)srcSize; @@ -1392,44 +2158,128 @@ private static void ZSTD_initStats_ultra(ZSTD_MatchState_t* ms, SeqStore_t* seqS ms->nextToUpdate = ms->window.dictLimit; } - private static nuint ZSTD_compressBlock_btultra(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_btultra( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_dictMode_e.ZSTD_noDict); + return ZSTD_compressBlock_opt2( + ms, + seqStore, + rep, + src, + srcSize, + ZSTD_dictMode_e.ZSTD_noDict + ); } /* note : no btultra2 variant for extDict nor dictMatchState, * because btultra2 is not meant to work with dictionaries * and is only specific for the first block (no prefix) */ - private static nuint ZSTD_compressBlock_btultra2(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_btultra2( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { uint curr = (uint)((byte*)src - ms->window.@base); assert(srcSize <= 1 << 17); - if (ms->opt.litLengthSum == 0 && seqStore->sequences == seqStore->sequencesStart && ms->window.dictLimit == ms->window.lowLimit && curr == ms->window.dictLimit && srcSize > 8) + if ( + ms->opt.litLengthSum == 0 + && seqStore->sequences == seqStore->sequencesStart + && ms->window.dictLimit == ms->window.lowLimit + && curr == ms->window.dictLimit + && srcSize > 8 + ) { ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize); } - return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_dictMode_e.ZSTD_noDict); + return ZSTD_compressBlock_opt2( + ms, + seqStore, + rep, + src, + srcSize, + ZSTD_dictMode_e.ZSTD_noDict + ); } - private static nuint ZSTD_compressBlock_btopt_dictMatchState(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_btopt_dictMatchState( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_dictMode_e.ZSTD_dictMatchState); + return ZSTD_compressBlock_opt0( + ms, + seqStore, + rep, + src, + srcSize, + ZSTD_dictMode_e.ZSTD_dictMatchState + ); } - private static nuint ZSTD_compressBlock_btopt_extDict(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_btopt_extDict( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_dictMode_e.ZSTD_extDict); + return ZSTD_compressBlock_opt0( + ms, + seqStore, + rep, + src, + srcSize, + ZSTD_dictMode_e.ZSTD_extDict + ); } - private static nuint ZSTD_compressBlock_btultra_dictMatchState(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_btultra_dictMatchState( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_dictMode_e.ZSTD_dictMatchState); + return ZSTD_compressBlock_opt2( + ms, + seqStore, + rep, + src, + srcSize, + ZSTD_dictMode_e.ZSTD_dictMatchState + ); } - private static nuint ZSTD_compressBlock_btultra_extDict(ZSTD_MatchState_t* ms, SeqStore_t* seqStore, uint* rep, void* src, nuint srcSize) + private static nuint ZSTD_compressBlock_btultra_extDict( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) { - return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_dictMode_e.ZSTD_extDict); + return ZSTD_compressBlock_opt2( + ms, + seqStore, + rep, + src, + srcSize, + ZSTD_dictMode_e.ZSTD_extDict + ); } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdPresplit.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdPresplit.cs index 22a13a59a..96ba2eab0 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdPresplit.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdPresplit.cs @@ -1,7 +1,7 @@ -using System.Runtime.CompilerServices; -using static ZstdSharp.UnsafeHelper; using System; +using System.Runtime.CompilerServices; using System.Runtime.InteropServices; +using static ZstdSharp.UnsafeHelper; namespace ZstdSharp.Unsafe { @@ -26,7 +26,13 @@ private static void initStats(FPStats* fpstats) } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void addEvents_generic(Fingerprint* fp, void* src, nuint srcSize, nuint samplingRate, uint hashLog) + private static void addEvents_generic( + Fingerprint* fp, + void* src, + nuint srcSize, + nuint samplingRate, + uint hashLog + ) { sbyte* p = (sbyte*)src; nuint limit = srcSize - 2 + 1; @@ -41,7 +47,13 @@ private static void addEvents_generic(Fingerprint* fp, void* src, nuint srcSize, } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void recordFingerprint_generic(Fingerprint* fp, void* src, nuint srcSize, nuint samplingRate, uint hashLog) + private static void recordFingerprint_generic( + Fingerprint* fp, + void* src, + nuint srcSize, + nuint samplingRate, + uint hashLog + ) { memset(fp, 0, (uint)(sizeof(uint) * ((nuint)1 << (int)hashLog))); fp->nbEvents = 0; @@ -80,7 +92,9 @@ private static ulong fpDistance(Fingerprint* fp1, Fingerprint* fp2, uint hashLog assert(hashLog <= 10); for (n = 0; n < (nuint)1 << (int)hashLog; n++) { - distance += abs64(fp1->events[n] * (long)fp2->nbEvents - fp2->events[n] * (long)fp1->nbEvents); + distance += abs64( + fp1->events[n] * (long)fp2->nbEvents - fp2->events[n] * (long)fp1->nbEvents + ); } return distance; @@ -89,7 +103,12 @@ private static ulong fpDistance(Fingerprint* fp1, Fingerprint* fp2, uint hashLog /* Compare newEvents with pastEvents * return 1 when considered "too different" */ - private static int compareFingerprints(Fingerprint* @ref, Fingerprint* newfp, int penalty, uint hashLog) + private static int compareFingerprints( + Fingerprint* @ref, + Fingerprint* newfp, + int penalty, + uint hashLog + ) { assert(@ref->nbEvents > 0); assert(newfp->nbEvents > 0); @@ -138,25 +157,30 @@ private static void removeEvents(Fingerprint* acc, Fingerprint* slice) private static readonly void*[] records_fs = new void*[4] { - (delegate* managed )(&ZSTD_recordFingerprint_43), - (delegate* managed )(&ZSTD_recordFingerprint_11), - (delegate* managed )(&ZSTD_recordFingerprint_5), - (delegate* managed )(&ZSTD_recordFingerprint_1) + (delegate* managed)(&ZSTD_recordFingerprint_43), + (delegate* managed)(&ZSTD_recordFingerprint_11), + (delegate* managed)(&ZSTD_recordFingerprint_5), + (delegate* managed)(&ZSTD_recordFingerprint_1), }; #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_hashParams => new uint[4] - { - 8, - 9, - 10, - 10 - }; - private static uint* hashParams => (uint*)System.Runtime.CompilerServices.Unsafe.AsPointer(ref MemoryMarshal.GetReference(Span_hashParams)); + private static ReadOnlySpan Span_hashParams => new uint[4] { 8, 9, 10, 10 }; + private static uint* hashParams => + (uint*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_hashParams) + ); #else private static readonly uint* hashParams = GetArrayPointer(new uint[4] { 8, 9, 10, 10 }); #endif - private static nuint ZSTD_splitBlock_byChunks(void* blockStart, nuint blockSize, int level, void* workspace, nuint wkspSize) + + private static nuint ZSTD_splitBlock_byChunks( + void* blockStart, + nuint blockSize, + int level, + void* workspace, + nuint wkspSize + ) { assert(0 <= level && level <= 3); void* record_f = records_fs[level]; @@ -169,11 +193,26 @@ private static nuint ZSTD_splitBlock_byChunks(void* blockStart, nuint blockSize, assert((nuint)workspace % (nuint)Math.Max(sizeof(uint), sizeof(ulong)) == 0); assert(wkspSize >= (nuint)sizeof(FPStats)); initStats(fpstats); - ((delegate* managed)record_f)(&fpstats->pastEvents, p, 8 << 10); + ((delegate* managed)record_f)( + &fpstats->pastEvents, + p, + 8 << 10 + ); for (pos = 8 << 10; pos <= blockSize - (8 << 10); pos += 8 << 10) { - ((delegate* managed)record_f)(&fpstats->newEvents, p + pos, 8 << 10); - if (compareFingerprints(&fpstats->pastEvents, &fpstats->newEvents, penalty, hashParams[level]) != 0) + ((delegate* managed)record_f)( + &fpstats->newEvents, + p + pos, + 8 << 10 + ); + if ( + compareFingerprints( + &fpstats->pastEvents, + &fpstats->newEvents, + penalty, + hashParams[level] + ) != 0 + ) { return pos; } @@ -198,10 +237,16 @@ private static nuint ZSTD_splitBlock_byChunks(void* blockStart, nuint blockSize, * More accurate splitting saves more, but speed impact is also more perceptible. * For better accuracy, use more elaborate variant *_byChunks. */ - private static nuint ZSTD_splitBlock_fromBorders(void* blockStart, nuint blockSize, void* workspace, nuint wkspSize) + private static nuint ZSTD_splitBlock_fromBorders( + void* blockStart, + nuint blockSize, + void* workspace, + nuint wkspSize + ) { FPStats* fpstats = (FPStats*)workspace; - Fingerprint* middleEvents = (Fingerprint*)(void*)((sbyte*)workspace + 512 * sizeof(uint)); + Fingerprint* middleEvents = (Fingerprint*) + (void*)((sbyte*)workspace + 512 * sizeof(uint)); assert(blockSize == 128 << 10); assert(workspace != null); assert((nuint)workspace % (nuint)Math.Max(sizeof(uint), sizeof(ulong)) == 0); @@ -235,7 +280,13 @@ private static nuint ZSTD_splitBlock_fromBorders(void* blockStart, nuint blockSi * While this could be extended to smaller sizes in the future, * it is not yet clear if this would be useful. TBD. */ - private static nuint ZSTD_splitBlock(void* blockStart, nuint blockSize, int level, void* workspace, nuint wkspSize) + private static nuint ZSTD_splitBlock( + void* blockStart, + nuint blockSize, + int level, + void* workspace, + nuint wkspSize + ) { assert(0 <= level && level <= 4); if (level == 0) @@ -243,4 +294,4 @@ private static nuint ZSTD_splitBlock(void* blockStart, nuint blockSize, int leve return ZSTD_splitBlock_byChunks(blockStart, blockSize, level - 1, workspace, wkspSize); } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdmtCompress.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdmtCompress.cs index b379f7ebd..bcf2da7bd 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdmtCompress.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdmtCompress.cs @@ -1,11 +1,12 @@ -using static ZstdSharp.UnsafeHelper; using System.Runtime.CompilerServices; +using static ZstdSharp.UnsafeHelper; namespace ZstdSharp.Unsafe { public static unsafe partial class Methods { private static readonly buffer_s g_nullBuffer = new buffer_s(start: null, capacity: 0); + private static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool_s* bufPool) { if (bufPool == null) @@ -25,13 +26,22 @@ private static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool_s* bufPool) ZSTD_customFree(bufPool, bufPool->cMem); } - private static ZSTDMT_bufferPool_s* ZSTDMT_createBufferPool(uint maxNbBuffers, ZSTD_customMem cMem) + private static ZSTDMT_bufferPool_s* ZSTDMT_createBufferPool( + uint maxNbBuffers, + ZSTD_customMem cMem + ) { - ZSTDMT_bufferPool_s* bufPool = (ZSTDMT_bufferPool_s*)ZSTD_customCalloc((nuint)sizeof(ZSTDMT_bufferPool_s), cMem); + ZSTDMT_bufferPool_s* bufPool = (ZSTDMT_bufferPool_s*)ZSTD_customCalloc( + (nuint)sizeof(ZSTDMT_bufferPool_s), + cMem + ); if (bufPool == null) return null; SynchronizationWrapper.Init(&bufPool->poolMutex); - bufPool->buffers = (buffer_s*)ZSTD_customCalloc(maxNbBuffers * (uint)sizeof(buffer_s), cMem); + bufPool->buffers = (buffer_s*)ZSTD_customCalloc( + maxNbBuffers * (uint)sizeof(buffer_s), + cMem + ); if (bufPool->buffers == null) { ZSTDMT_freeBufferPool(bufPool); @@ -70,7 +80,10 @@ private static void ZSTDMT_setBufferSize(ZSTDMT_bufferPool_s* bufPool, nuint bSi SynchronizationWrapper.Exit(&bufPool->poolMutex); } - private static ZSTDMT_bufferPool_s* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool_s* srcBufPool, uint maxNbBuffers) + private static ZSTDMT_bufferPool_s* ZSTDMT_expandBufferPool( + ZSTDMT_bufferPool_s* srcBufPool, + uint maxNbBuffers + ) { if (srcBufPool == null) return null; @@ -180,7 +193,10 @@ private static void ZSTDMT_setNbSeq(ZSTDMT_bufferPool_s* seqPool, nuint nbSeq) ZSTDMT_setBufferSize(seqPool, nbSeq * (nuint)sizeof(rawSeq)); } - private static ZSTDMT_bufferPool_s* ZSTDMT_createSeqPool(uint nbWorkers, ZSTD_customMem cMem) + private static ZSTDMT_bufferPool_s* ZSTDMT_createSeqPool( + uint nbWorkers, + ZSTD_customMem cMem + ) { ZSTDMT_bufferPool_s* seqPool = ZSTDMT_createBufferPool(nbWorkers, cMem); if (seqPool == null) @@ -194,7 +210,10 @@ private static void ZSTDMT_freeSeqPool(ZSTDMT_bufferPool_s* seqPool) ZSTDMT_freeBufferPool(seqPool); } - private static ZSTDMT_bufferPool_s* ZSTDMT_expandSeqPool(ZSTDMT_bufferPool_s* pool, uint nbWorkers) + private static ZSTDMT_bufferPool_s* ZSTDMT_expandSeqPool( + ZSTDMT_bufferPool_s* pool, + uint nbWorkers + ) { return ZSTDMT_expandBufferPool(pool, nbWorkers); } @@ -220,13 +239,19 @@ private static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool) * implies nbWorkers >= 1 , checked by caller ZSTDMT_createCCtx() */ private static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(int nbWorkers, ZSTD_customMem cMem) { - ZSTDMT_CCtxPool* cctxPool = (ZSTDMT_CCtxPool*)ZSTD_customCalloc((nuint)sizeof(ZSTDMT_CCtxPool), cMem); + ZSTDMT_CCtxPool* cctxPool = (ZSTDMT_CCtxPool*)ZSTD_customCalloc( + (nuint)sizeof(ZSTDMT_CCtxPool), + cMem + ); assert(nbWorkers > 0); if (cctxPool == null) return null; SynchronizationWrapper.Init(&cctxPool->poolMutex); cctxPool->totalCCtx = nbWorkers; - cctxPool->cctxs = (ZSTD_CCtx_s**)ZSTD_customCalloc((nuint)(nbWorkers * sizeof(ZSTD_CCtx_s*)), cMem); + cctxPool->cctxs = (ZSTD_CCtx_s**)ZSTD_customCalloc( + (nuint)(nbWorkers * sizeof(ZSTD_CCtx_s*)), + cMem + ); if (cctxPool->cctxs == null) { ZSTDMT_freeCCtxPool(cctxPool); @@ -245,7 +270,10 @@ private static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool) return cctxPool; } - private static ZSTDMT_CCtxPool* ZSTDMT_expandCCtxPool(ZSTDMT_CCtxPool* srcPool, int nbWorkers) + private static ZSTDMT_CCtxPool* ZSTDMT_expandCCtxPool( + ZSTDMT_CCtxPool* srcPool, + int nbWorkers + ) { if (srcPool == null) return null; @@ -311,7 +339,15 @@ private static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx_s* cctx) SynchronizationWrapper.Exit(&pool->poolMutex); } - private static int ZSTDMT_serialState_reset(SerialState* serialState, ZSTDMT_bufferPool_s* seqPool, ZSTD_CCtx_params_s @params, nuint jobSize, void* dict, nuint dictSize, ZSTD_dictContentType_e dictContentType) + private static int ZSTDMT_serialState_reset( + SerialState* serialState, + ZSTDMT_bufferPool_s* seqPool, + ZSTD_CCtx_params_s @params, + nuint jobSize, + void* dict, + nuint dictSize, + ZSTD_dictContentType_e dictContentType + ) { if (@params.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) { @@ -333,23 +369,37 @@ private static int ZSTDMT_serialState_reset(SerialState* serialState, ZSTDMT_buf uint hashLog = @params.ldmParams.hashLog; nuint hashSize = ((nuint)1 << (int)hashLog) * (nuint)sizeof(ldmEntry_t); uint bucketLog = @params.ldmParams.hashLog - @params.ldmParams.bucketSizeLog; - uint prevBucketLog = serialState->@params.ldmParams.hashLog - serialState->@params.ldmParams.bucketSizeLog; + uint prevBucketLog = + serialState->@params.ldmParams.hashLog + - serialState->@params.ldmParams.bucketSizeLog; nuint numBuckets = (nuint)1 << (int)bucketLog; ZSTDMT_setNbSeq(seqPool, ZSTD_ldm_getMaxNbSeq(@params.ldmParams, jobSize)); ZSTD_window_init(&serialState->ldmState.window); - if (serialState->ldmState.hashTable == null || serialState->@params.ldmParams.hashLog < hashLog) + if ( + serialState->ldmState.hashTable == null + || serialState->@params.ldmParams.hashLog < hashLog + ) { ZSTD_customFree(serialState->ldmState.hashTable, cMem); - serialState->ldmState.hashTable = (ldmEntry_t*)ZSTD_customMalloc(hashSize, cMem); + serialState->ldmState.hashTable = (ldmEntry_t*)ZSTD_customMalloc( + hashSize, + cMem + ); } if (serialState->ldmState.bucketOffsets == null || prevBucketLog < bucketLog) { ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem); - serialState->ldmState.bucketOffsets = (byte*)ZSTD_customMalloc(numBuckets, cMem); + serialState->ldmState.bucketOffsets = (byte*)ZSTD_customMalloc( + numBuckets, + cMem + ); } - if (serialState->ldmState.hashTable == null || serialState->ldmState.bucketOffsets == null) + if ( + serialState->ldmState.hashTable == null + || serialState->ldmState.bucketOffsets == null + ) return 1; memset(serialState->ldmState.hashTable, 0, (uint)hashSize); memset(serialState->ldmState.bucketOffsets, 0, (uint)numBuckets); @@ -360,8 +410,16 @@ private static int ZSTDMT_serialState_reset(SerialState* serialState, ZSTDMT_buf { byte* dictEnd = (byte*)dict + dictSize; ZSTD_window_update(&serialState->ldmState.window, dict, dictSize, 0); - ZSTD_ldm_fillHashTable(&serialState->ldmState, (byte*)dict, dictEnd, &@params.ldmParams); - serialState->ldmState.loadedDictEnd = @params.forceWindow != 0 ? 0 : (uint)(dictEnd - serialState->ldmState.window.@base); + ZSTD_ldm_fillHashTable( + &serialState->ldmState, + (byte*)dict, + dictEnd, + &@params.ldmParams + ); + serialState->ldmState.loadedDictEnd = + @params.forceWindow != 0 + ? 0 + : (uint)(dictEnd - serialState->ldmState.window.@base); } } @@ -395,7 +453,12 @@ private static void ZSTDMT_serialState_free(SerialState* serialState) ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem); } - private static void ZSTDMT_serialState_genSequences(SerialState* serialState, RawSeqStore_t* seqStore, Range src, uint jobID) + private static void ZSTDMT_serialState_genSequences( + SerialState* serialState, + RawSeqStore_t* seqStore, + Range src, + uint jobID + ) { SynchronizationWrapper.Enter(&serialState->mutex); while (serialState->nextJobID < jobID) @@ -408,10 +471,21 @@ private static void ZSTDMT_serialState_genSequences(SerialState* serialState, Ra if (serialState->@params.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) { nuint error; - assert(seqStore->seq != null && seqStore->pos == 0 && seqStore->size == 0 && seqStore->capacity > 0); + assert( + seqStore->seq != null + && seqStore->pos == 0 + && seqStore->size == 0 + && seqStore->capacity > 0 + ); assert(src.size <= serialState->@params.jobSize); ZSTD_window_update(&serialState->ldmState.window, src.start, src.size, 0); - error = ZSTD_ldm_generateSequences(&serialState->ldmState, seqStore, &serialState->@params.ldmParams, src.start, src.size); + error = ZSTD_ldm_generateSequences( + &serialState->ldmState, + seqStore, + &serialState->@params.ldmParams, + src.start, + src.size + ); assert(!ERR_isError(error)); SynchronizationWrapper.Enter(&serialState->ldmWindowMutex); serialState->ldmWindow = serialState->ldmState.window; @@ -428,17 +502,27 @@ private static void ZSTDMT_serialState_genSequences(SerialState* serialState, Ra SynchronizationWrapper.Exit(&serialState->mutex); } - private static void ZSTDMT_serialState_applySequences(SerialState* serialState, ZSTD_CCtx_s* jobCCtx, RawSeqStore_t* seqStore) + private static void ZSTDMT_serialState_applySequences( + SerialState* serialState, + ZSTD_CCtx_s* jobCCtx, + RawSeqStore_t* seqStore + ) { if (seqStore->size > 0) { - assert(serialState->@params.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable); + assert( + serialState->@params.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable + ); assert(jobCCtx != null); ZSTD_referenceExternalSequences(jobCCtx, seqStore->seq, seqStore->size); } } - private static void ZSTDMT_serialState_ensureFinished(SerialState* serialState, uint jobID, nuint cSize) + private static void ZSTDMT_serialState_ensureFinished( + SerialState* serialState, + uint jobID, + nuint cSize + ) { SynchronizationWrapper.Enter(&serialState->mutex); if (serialState->nextJobID <= jobID) @@ -456,6 +540,7 @@ private static void ZSTDMT_serialState_ensureFinished(SerialState* serialState, } private static readonly Range kNullRange = new Range(start: null, size: 0); + /* ZSTDMT_compressionJob() is a POOL_function type */ private static void ZSTDMT_compressionJob(void* jobDescription) { @@ -480,7 +565,9 @@ private static void ZSTDMT_compressionJob(void* jobDescription) if (dstBuff.start == null) { SynchronizationWrapper.Enter(&job->job_mutex); - job->cSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + job->cSize = unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) + ); SynchronizationWrapper.Exit(&job->job_mutex); goto _endJob; } @@ -488,7 +575,10 @@ private static void ZSTDMT_compressionJob(void* jobDescription) job->dstBuff = dstBuff; } - if (jobParams.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable && rawSeqStore.seq == null) + if ( + jobParams.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable + && rawSeqStore.seq == null + ) { SynchronizationWrapper.Enter(&job->job_mutex); job->cSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); @@ -503,7 +593,16 @@ private static void ZSTDMT_compressionJob(void* jobDescription) ZSTDMT_serialState_genSequences(job->serial, &rawSeqStore, job->src, job->jobID); if (job->cdict != null) { - nuint initError = ZSTD_compressBegin_advanced_internal(cctx, null, 0, ZSTD_dictContentType_e.ZSTD_dct_auto, ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, job->cdict, &jobParams, job->fullFrameSize); + nuint initError = ZSTD_compressBegin_advanced_internal( + cctx, + null, + 0, + ZSTD_dictContentType_e.ZSTD_dct_auto, + ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, + job->cdict, + &jobParams, + job->fullFrameSize + ); assert(job->firstJob != 0); if (ERR_isError(initError)) { @@ -517,7 +616,11 @@ private static void ZSTDMT_compressionJob(void* jobDescription) { ulong pledgedSrcSize = job->firstJob != 0 ? job->fullFrameSize : job->src.size; { - nuint forceWindowError = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_cParameter.ZSTD_c_experimentalParam3, job->firstJob == 0 ? 1 : 0); + nuint forceWindowError = ZSTD_CCtxParams_setParameter( + &jobParams, + ZSTD_cParameter.ZSTD_c_experimentalParam3, + job->firstJob == 0 ? 1 : 0 + ); if (ERR_isError(forceWindowError)) { SynchronizationWrapper.Enter(&job->job_mutex); @@ -529,7 +632,11 @@ private static void ZSTDMT_compressionJob(void* jobDescription) if (job->firstJob == 0) { - nuint err = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_cParameter.ZSTD_c_experimentalParam15, 0); + nuint err = ZSTD_CCtxParams_setParameter( + &jobParams, + ZSTD_cParameter.ZSTD_c_experimentalParam15, + 0 + ); if (ERR_isError(err)) { SynchronizationWrapper.Enter(&job->job_mutex); @@ -540,7 +647,16 @@ private static void ZSTDMT_compressionJob(void* jobDescription) } { - nuint initError = ZSTD_compressBegin_advanced_internal(cctx, job->prefix.start, job->prefix.size, ZSTD_dictContentType_e.ZSTD_dct_rawContent, ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, null, &jobParams, pledgedSrcSize); + nuint initError = ZSTD_compressBegin_advanced_internal( + cctx, + job->prefix.start, + job->prefix.size, + ZSTD_dictContentType_e.ZSTD_dct_rawContent, + ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, + null, + &jobParams, + pledgedSrcSize + ); if (ERR_isError(initError)) { SynchronizationWrapper.Enter(&job->job_mutex); @@ -554,7 +670,13 @@ private static void ZSTDMT_compressionJob(void* jobDescription) ZSTDMT_serialState_applySequences(job->serial, cctx, &rawSeqStore); if (job->firstJob == 0) { - nuint hSize = ZSTD_compressContinue_public(cctx, dstBuff.start, dstBuff.capacity, job->src.start, 0); + nuint hSize = ZSTD_compressContinue_public( + cctx, + dstBuff.start, + dstBuff.capacity, + job->src.start, + 0 + ); if (ERR_isError(hSize)) { SynchronizationWrapper.Enter(&job->job_mutex); @@ -581,7 +703,13 @@ private static void ZSTDMT_compressionJob(void* jobDescription) assert(job->cSize == 0); for (chunkNb = 1; chunkNb < nbChunks; chunkNb++) { - nuint cSize = ZSTD_compressContinue_public(cctx, op, (nuint)(oend - op), ip, chunkSize); + nuint cSize = ZSTD_compressContinue_public( + cctx, + op, + (nuint)(oend - op), + ip, + chunkSize + ); if (ERR_isError(cSize)) { SynchronizationWrapper.Enter(&job->job_mutex); @@ -605,8 +733,26 @@ private static void ZSTDMT_compressionJob(void* jobDescription) if (((uint)(nbChunks > 0 ? 1 : 0) | job->lastJob) != 0) { nuint lastBlockSize1 = job->src.size & chunkSize - 1; - nuint lastBlockSize = lastBlockSize1 == 0 && job->src.size >= chunkSize ? chunkSize : lastBlockSize1; - nuint cSize = job->lastJob != 0 ? ZSTD_compressEnd_public(cctx, op, (nuint)(oend - op), ip, lastBlockSize) : ZSTD_compressContinue_public(cctx, op, (nuint)(oend - op), ip, lastBlockSize); + nuint lastBlockSize = + lastBlockSize1 == 0 && job->src.size >= chunkSize + ? chunkSize + : lastBlockSize1; + nuint cSize = + job->lastJob != 0 + ? ZSTD_compressEnd_public( + cctx, + op, + (nuint)(oend - op), + ip, + lastBlockSize + ) + : ZSTD_compressContinue_public( + cctx, + op, + (nuint)(oend - op), + ip, + lastBlockSize + ); if (ERR_isError(cSize)) { SynchronizationWrapper.Enter(&job->job_mutex); @@ -627,7 +773,7 @@ private static void ZSTDMT_compressionJob(void* jobDescription) #endif ZSTD_CCtx_trace(cctx, 0); - _endJob: + _endJob: ZSTDMT_serialState_ensureFinished(job->serial, job->jobID, job->cSize); ZSTDMT_releaseSeq(job->seqPool, rawSeqStore); ZSTDMT_releaseCCtx(job->cctxPool, cctx); @@ -640,8 +786,17 @@ private static void ZSTDMT_compressionJob(void* jobDescription) SynchronizationWrapper.Exit(&job->job_mutex); } - private static readonly RoundBuff_t kNullRoundBuff = new RoundBuff_t(buffer: null, capacity: 0, pos: 0); - private static void ZSTDMT_freeJobsTable(ZSTDMT_jobDescription* jobTable, uint nbJobs, ZSTD_customMem cMem) + private static readonly RoundBuff_t kNullRoundBuff = new RoundBuff_t( + buffer: null, + capacity: 0, + pos: 0 + ); + + private static void ZSTDMT_freeJobsTable( + ZSTDMT_jobDescription* jobTable, + uint nbJobs, + ZSTD_customMem cMem + ) { uint jobNb; if (jobTable == null) @@ -657,12 +812,18 @@ private static void ZSTDMT_freeJobsTable(ZSTDMT_jobDescription* jobTable, uint n /* ZSTDMT_allocJobsTable() * allocate and init a job table. * update *nbJobsPtr to next power of 2 value, as size of table */ - private static ZSTDMT_jobDescription* ZSTDMT_createJobsTable(uint* nbJobsPtr, ZSTD_customMem cMem) + private static ZSTDMT_jobDescription* ZSTDMT_createJobsTable( + uint* nbJobsPtr, + ZSTD_customMem cMem + ) { uint nbJobsLog2 = ZSTD_highbit32(*nbJobsPtr) + 1; uint nbJobs = (uint)(1 << (int)nbJobsLog2); uint jobNb; - ZSTDMT_jobDescription* jobTable = (ZSTDMT_jobDescription*)ZSTD_customCalloc(nbJobs * (uint)sizeof(ZSTDMT_jobDescription), cMem); + ZSTDMT_jobDescription* jobTable = (ZSTDMT_jobDescription*)ZSTD_customCalloc( + nbJobs * (uint)sizeof(ZSTDMT_jobDescription), + cMem + ); int initError = 0; if (jobTable == null) return null; @@ -702,20 +863,34 @@ private static nuint ZSTDMT_expandJobsTable(ZSTDMT_CCtx_s* mtctx, uint nbWorkers /* ZSTDMT_CCtxParam_setNbWorkers(): * Internal use only */ - private static nuint ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params_s* @params, uint nbWorkers) + private static nuint ZSTDMT_CCtxParam_setNbWorkers( + ZSTD_CCtx_params_s* @params, + uint nbWorkers + ) { - return ZSTD_CCtxParams_setParameter(@params, ZSTD_cParameter.ZSTD_c_nbWorkers, (int)nbWorkers); + return ZSTD_CCtxParams_setParameter( + @params, + ZSTD_cParameter.ZSTD_c_nbWorkers, + (int)nbWorkers + ); } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static ZSTDMT_CCtx_s* ZSTDMT_createCCtx_advanced_internal(uint nbWorkers, ZSTD_customMem cMem, void* pool) + private static ZSTDMT_CCtx_s* ZSTDMT_createCCtx_advanced_internal( + uint nbWorkers, + ZSTD_customMem cMem, + void* pool + ) { ZSTDMT_CCtx_s* mtctx; uint nbJobs = nbWorkers + 2; int initError; if (nbWorkers < 1) return null; - nbWorkers = nbWorkers < (uint)(sizeof(void*) == 4 ? 64 : 256) ? nbWorkers : (uint)(sizeof(void*) == 4 ? 64 : 256); + nbWorkers = + nbWorkers < (uint)(sizeof(void*) == 4 ? 64 : 256) + ? nbWorkers + : (uint)(sizeof(void*) == 4 ? 64 : 256); if (((cMem.customAlloc != null ? 1 : 0) ^ (cMem.customFree != null ? 1 : 0)) != 0) return null; mtctx = (ZSTDMT_CCtx_s*)ZSTD_customCalloc((nuint)sizeof(ZSTDMT_CCtx_s), cMem); @@ -744,7 +919,19 @@ private static nuint ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params_s* @params, mtctx->seqPool = ZSTDMT_createSeqPool(nbWorkers, cMem); initError = ZSTDMT_serialState_init(&mtctx->serial); mtctx->roundBuff = kNullRoundBuff; - if (((mtctx->factory == null || mtctx->jobs == null || mtctx->bufPool == null || mtctx->cctxPool == null || mtctx->seqPool == null ? 1 : 0) | initError) != 0) + if ( + ( + ( + mtctx->factory == null + || mtctx->jobs == null + || mtctx->bufPool == null + || mtctx->cctxPool == null + || mtctx->seqPool == null + ? 1 + : 0 + ) | initError + ) != 0 + ) { ZSTDMT_freeCCtx(mtctx); return null; @@ -754,7 +941,11 @@ private static nuint ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params_s* @params, } /* Requires ZSTD_MULTITHREAD to be defined during compilation, otherwise it will return NULL. */ - private static ZSTDMT_CCtx_s* ZSTDMT_createCCtx_advanced(uint nbWorkers, ZSTD_customMem cMem, void* pool) + private static ZSTDMT_CCtx_s* ZSTDMT_createCCtx_advanced( + uint nbWorkers, + ZSTD_customMem cMem, + void* pool + ) { return ZSTDMT_createCCtx_advanced_internal(nbWorkers, cMem, pool); } @@ -773,7 +964,7 @@ private static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx_s* mtctx) mtctx->jobs[jobID] = new ZSTDMT_jobDescription { job_mutex = mutex, - job_cond = cond + job_cond = cond, }; } @@ -821,7 +1012,14 @@ private static nuint ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx_s* mtctx) { if (mtctx == null) return 0; - return (nuint)sizeof(ZSTDMT_CCtx_s) + POOL_sizeof(mtctx->factory) + ZSTDMT_sizeof_bufferPool(mtctx->bufPool) + (mtctx->jobIDMask + 1) * (uint)sizeof(ZSTDMT_jobDescription) + ZSTDMT_sizeof_CCtxPool(mtctx->cctxPool) + ZSTDMT_sizeof_seqPool(mtctx->seqPool) + ZSTD_sizeof_CDict(mtctx->cdictLocal) + mtctx->roundBuff.capacity; + return (nuint)sizeof(ZSTDMT_CCtx_s) + + POOL_sizeof(mtctx->factory) + + ZSTDMT_sizeof_bufferPool(mtctx->bufPool) + + (mtctx->jobIDMask + 1) * (uint)sizeof(ZSTDMT_jobDescription) + + ZSTDMT_sizeof_CCtxPool(mtctx->cctxPool) + + ZSTDMT_sizeof_seqPool(mtctx->seqPool) + + ZSTD_sizeof_CDict(mtctx->cdictLocal) + + mtctx->roundBuff.capacity; } /* ZSTDMT_resize() : @@ -854,14 +1052,22 @@ private static nuint ZSTDMT_resize(ZSTDMT_CCtx_s* mtctx, uint nbWorkers) /*! ZSTDMT_updateCParams_whileCompressing() : * Updates a selected set of compression parameters, remaining compatible with currently active frame. * New parameters will be applied to next compression job. */ - private static void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx_s* mtctx, ZSTD_CCtx_params_s* cctxParams) + private static void ZSTDMT_updateCParams_whileCompressing( + ZSTDMT_CCtx_s* mtctx, + ZSTD_CCtx_params_s* cctxParams + ) { /* Do not modify windowLog while compressing */ uint saved_wlog = mtctx->@params.cParams.windowLog; int compressionLevel = cctxParams->compressionLevel; mtctx->@params.compressionLevel = compressionLevel; { - ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams(cctxParams, unchecked(0UL - 1), 0, ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict); + ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams( + cctxParams, + unchecked(0UL - 1), + 0, + ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict + ); cParams.windowLog = saved_wlog; mtctx->@params.cParams = cParams; } @@ -953,7 +1159,10 @@ private static uint ZSTDMT_computeTargetJobLog(ZSTD_CCtx_params_s* @params) uint jobLog; if (@params->ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) { - jobLog = 21 > ZSTD_cycleLog(@params->cParams.chainLog, @params->cParams.strategy) + 3 ? 21 : ZSTD_cycleLog(@params->cParams.chainLog, @params->cParams.strategy) + 3; + jobLog = + 21 > ZSTD_cycleLog(@params->cParams.chainLog, @params->cParams.strategy) + 3 + ? 21 + : ZSTD_cycleLog(@params->cParams.chainLog, @params->cParams.strategy) + 3; } else { @@ -997,11 +1206,19 @@ private static int ZSTDMT_overlapLog(int ovlog, ZSTD_strategy strat) private static nuint ZSTDMT_computeOverlapSize(ZSTD_CCtx_params_s* @params) { int overlapRLog = 9 - ZSTDMT_overlapLog(@params->overlapLog, @params->cParams.strategy); - int ovLog = (int)(overlapRLog >= 8 ? 0 : @params->cParams.windowLog - (uint)overlapRLog); + int ovLog = (int)( + overlapRLog >= 8 ? 0 : @params->cParams.windowLog - (uint)overlapRLog + ); assert(0 <= overlapRLog && overlapRLog <= 8); if (@params->ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) { - ovLog = (int)((@params->cParams.windowLog < ZSTDMT_computeTargetJobLog(@params) - 2 ? @params->cParams.windowLog : ZSTDMT_computeTargetJobLog(@params) - 2) - (uint)overlapRLog); + ovLog = (int)( + ( + @params->cParams.windowLog < ZSTDMT_computeTargetJobLog(@params) - 2 + ? @params->cParams.windowLog + : ZSTDMT_computeTargetJobLog(@params) - 2 + ) - (uint)overlapRLog + ); } assert(0 <= ovLog && ovLog <= (sizeof(nuint) == 4 ? 30 : 31)); @@ -1011,7 +1228,15 @@ private static nuint ZSTDMT_computeOverlapSize(ZSTD_CCtx_params_s* @params) /* ====================================== */ /* ======= Streaming API ======= */ /* ====================================== */ - private static nuint ZSTDMT_initCStream_internal(ZSTDMT_CCtx_s* mtctx, void* dict, nuint dictSize, ZSTD_dictContentType_e dictContentType, ZSTD_CDict_s* cdict, ZSTD_CCtx_params_s @params, ulong pledgedSrcSize) + private static nuint ZSTDMT_initCStream_internal( + ZSTDMT_CCtx_s* mtctx, + void* dict, + nuint dictSize, + ZSTD_dictContentType_e dictContentType, + ZSTD_CDict_s* cdict, + ZSTD_CCtx_params_s @params, + ulong pledgedSrcSize + ) { assert(!ERR_isError(ZSTD_checkCParams(@params.cParams))); assert(!(dict != null && cdict != null)); @@ -1041,7 +1266,14 @@ private static nuint ZSTDMT_initCStream_internal(ZSTDMT_CCtx_s* mtctx, void* dic ZSTD_freeCDict(mtctx->cdictLocal); if (dict != null) { - mtctx->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, ZSTD_dictLoadMethod_e.ZSTD_dlm_byCopy, dictContentType, @params.cParams, mtctx->cMem); + mtctx->cdictLocal = ZSTD_createCDict_advanced( + dict, + dictSize, + ZSTD_dictLoadMethod_e.ZSTD_dlm_byCopy, + dictContentType, + @params.cParams, + mtctx->cMem + ); mtctx->cdict = mtctx->cdictLocal; if (mtctx->cdictLocal == null) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); @@ -1056,10 +1288,14 @@ private static nuint ZSTDMT_initCStream_internal(ZSTDMT_CCtx_s* mtctx, void* dic mtctx->targetSectionSize = @params.jobSize; if (mtctx->targetSectionSize == 0) { - mtctx->targetSectionSize = (nuint)(1UL << (int)ZSTDMT_computeTargetJobLog(&@params)); + mtctx->targetSectionSize = (nuint)( + 1UL << (int)ZSTDMT_computeTargetJobLog(&@params) + ); } - assert(mtctx->targetSectionSize <= (nuint)(MEM_32bits ? 512 * (1 << 20) : 1024 * (1 << 20))); + assert( + mtctx->targetSectionSize <= (nuint)(MEM_32bits ? 512 * (1 << 20) : 1024 * (1 << 20)) + ); if (@params.rsyncable != 0) { /* Aim for the targetsectionSize as the average job size. */ @@ -1077,7 +1313,10 @@ private static nuint ZSTDMT_initCStream_internal(ZSTDMT_CCtx_s* mtctx, void* dic ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(mtctx->targetSectionSize)); { /* If ldm is enabled we need windowSize space. */ - nuint windowSize = mtctx->@params.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable ? 1U << (int)mtctx->@params.cParams.windowLog : 0; + nuint windowSize = + mtctx->@params.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable + ? 1U << (int)mtctx->@params.cParams.windowLog + : 0; /* Two buffers of slack, plus extra space for the overlap * This is the minimum slack that LDM works with. One extra because * flush might waste up to targetSectionSize-1 bytes. Another extra @@ -1087,9 +1326,12 @@ private static nuint ZSTDMT_initCStream_internal(ZSTDMT_CCtx_s* mtctx, void* dic nuint nbSlackBuffers = (nuint)(2 + (mtctx->targetPrefixSize > 0 ? 1 : 0)); nuint slackSize = mtctx->targetSectionSize * nbSlackBuffers; /* Compute the total size, and always have enough slack */ - nuint nbWorkers = (nuint)(mtctx->@params.nbWorkers > 1 ? mtctx->@params.nbWorkers : 1); + nuint nbWorkers = (nuint)( + mtctx->@params.nbWorkers > 1 ? mtctx->@params.nbWorkers : 1 + ); nuint sectionsSize = mtctx->targetSectionSize * nbWorkers; - nuint capacity = (windowSize > sectionsSize ? windowSize : sectionsSize) + slackSize; + nuint capacity = + (windowSize > sectionsSize ? windowSize : sectionsSize) + slackSize; if (mtctx->roundBuff.capacity < capacity) { if (mtctx->roundBuff.buffer != null) @@ -1098,7 +1340,9 @@ private static nuint ZSTDMT_initCStream_internal(ZSTDMT_CCtx_s* mtctx, void* dic if (mtctx->roundBuff.buffer == null) { mtctx->roundBuff.capacity = 0; - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) + ); } mtctx->roundBuff.capacity = capacity; @@ -1127,10 +1371,19 @@ private static nuint ZSTDMT_initCStream_internal(ZSTDMT_CCtx_s* mtctx, void* dic } else { - mtctx->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, dictContentType, @params.cParams, mtctx->cMem); + mtctx->cdictLocal = ZSTD_createCDict_advanced( + dict, + dictSize, + ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, + dictContentType, + @params.cParams, + mtctx->cMem + ); mtctx->cdict = mtctx->cdictLocal; if (mtctx->cdictLocal == null) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) + ); } } else @@ -1138,7 +1391,17 @@ private static nuint ZSTDMT_initCStream_internal(ZSTDMT_CCtx_s* mtctx, void* dic mtctx->cdict = cdict; } - if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, @params, mtctx->targetSectionSize, dict, dictSize, dictContentType) != 0) + if ( + ZSTDMT_serialState_reset( + &mtctx->serial, + mtctx->seqPool, + @params, + mtctx->targetSectionSize, + dict, + dictSize, + dictContentType + ) != 0 + ) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); return 0; } @@ -1168,13 +1431,19 @@ private static void ZSTDMT_writeLastEmptyBlock(ZSTDMT_jobDescription* job) assert(job->consumed == 0); } - private static nuint ZSTDMT_createCompressionJob(ZSTDMT_CCtx_s* mtctx, nuint srcSize, ZSTD_EndDirective endOp) + private static nuint ZSTDMT_createCompressionJob( + ZSTDMT_CCtx_s* mtctx, + nuint srcSize, + ZSTD_EndDirective endOp + ) { uint jobID = mtctx->nextJobID & mtctx->jobIDMask; int endFrame = endOp == ZSTD_EndDirective.ZSTD_e_end ? 1 : 0; if (mtctx->nextJobID > mtctx->doneJobID + mtctx->jobIDMask) { - assert((mtctx->nextJobID & mtctx->jobIDMask) == (mtctx->doneJobID & mtctx->jobIDMask)); + assert( + (mtctx->nextJobID & mtctx->jobIDMask) == (mtctx->doneJobID & mtctx->jobIDMask) + ); return 0; } @@ -1198,14 +1467,20 @@ private static nuint ZSTDMT_createCompressionJob(ZSTDMT_CCtx_s* mtctx, nuint src mtctx->jobs[jobID].jobID = mtctx->nextJobID; mtctx->jobs[jobID].firstJob = mtctx->nextJobID == 0 ? 1U : 0U; mtctx->jobs[jobID].lastJob = (uint)endFrame; - mtctx->jobs[jobID].frameChecksumNeeded = mtctx->@params.fParams.checksumFlag != 0 && endFrame != 0 && mtctx->nextJobID > 0 ? 1U : 0U; + mtctx->jobs[jobID].frameChecksumNeeded = + mtctx->@params.fParams.checksumFlag != 0 + && endFrame != 0 + && mtctx->nextJobID > 0 + ? 1U + : 0U; mtctx->jobs[jobID].dstFlushed = 0; mtctx->roundBuff.pos += srcSize; mtctx->inBuff.buffer = g_nullBuffer; mtctx->inBuff.filled = 0; if (endFrame == 0) { - nuint newPrefixSize = srcSize < mtctx->targetPrefixSize ? srcSize : mtctx->targetPrefixSize; + nuint newPrefixSize = + srcSize < mtctx->targetPrefixSize ? srcSize : mtctx->targetPrefixSize; mtctx->inBuff.prefix.start = src + srcSize - newPrefixSize; mtctx->inBuff.prefix.size = newPrefixSize; } @@ -1228,7 +1503,13 @@ private static nuint ZSTDMT_createCompressionJob(ZSTDMT_CCtx_s* mtctx, nuint src } } - if (POOL_tryAdd(mtctx->factory, (delegate* managed)(&ZSTDMT_compressionJob), &mtctx->jobs[jobID]) != 0) + if ( + POOL_tryAdd( + mtctx->factory, + (delegate* managed)(&ZSTDMT_compressionJob), + &mtctx->jobs[jobID] + ) != 0 + ) { mtctx->nextJobID++; mtctx->jobReady = 0; @@ -1247,7 +1528,12 @@ private static nuint ZSTDMT_createCompressionJob(ZSTDMT_CCtx_s* mtctx, nuint src * `output` : `pos` will be updated with amount of data flushed . * `blockToFlush` : if >0, the function will block and wait if there is no data available to flush . * @return : amount of data remaining within internal buffer, 0 if no more, 1 if unknown but > 0, or an error code */ - private static nuint ZSTDMT_flushProduced(ZSTDMT_CCtx_s* mtctx, ZSTD_outBuffer_s* output, uint blockToFlush, ZSTD_EndDirective end) + private static nuint ZSTDMT_flushProduced( + ZSTDMT_CCtx_s* mtctx, + ZSTD_outBuffer_s* output, + uint blockToFlush, + ZSTD_EndDirective end + ) { uint wJobID = mtctx->doneJobID & mtctx->jobIDMask; assert(output->size >= output->pos); @@ -1285,7 +1571,10 @@ private static nuint ZSTDMT_flushProduced(ZSTDMT_CCtx_s* mtctx, ZSTD_outBuffer_s if (srcConsumed == srcSize && mtctx->jobs[wJobID].frameChecksumNeeded != 0) { uint checksum = (uint)ZSTD_XXH64_digest(&mtctx->serial.xxhState); - MEM_writeLE32((sbyte*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].cSize, checksum); + MEM_writeLE32( + (sbyte*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].cSize, + checksum + ); cSize += 4; mtctx->jobs[wJobID].cSize += 4; mtctx->jobs[wJobID].frameChecksumNeeded = 0; @@ -1293,13 +1582,21 @@ private static nuint ZSTDMT_flushProduced(ZSTDMT_CCtx_s* mtctx, ZSTD_outBuffer_s if (cSize > 0) { - nuint toFlush = cSize - mtctx->jobs[wJobID].dstFlushed < output->size - output->pos ? cSize - mtctx->jobs[wJobID].dstFlushed : output->size - output->pos; + nuint toFlush = + cSize - mtctx->jobs[wJobID].dstFlushed < output->size - output->pos + ? cSize - mtctx->jobs[wJobID].dstFlushed + : output->size - output->pos; assert(mtctx->doneJobID < mtctx->nextJobID); assert(cSize >= mtctx->jobs[wJobID].dstFlushed); assert(mtctx->jobs[wJobID].dstBuff.start != null); if (toFlush > 0) { - memcpy((sbyte*)output->dst + output->pos, (sbyte*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].dstFlushed, (uint)toFlush); + memcpy( + (sbyte*)output->dst + output->pos, + (sbyte*)mtctx->jobs[wJobID].dstBuff.start + + mtctx->jobs[wJobID].dstFlushed, + (uint)toFlush + ); } output->pos += toFlush; @@ -1397,7 +1694,11 @@ private static int ZSTDMT_doesOverlapWindow(buffer_s buffer, ZSTD_window_t windo extDict.size = window.dictLimit - window.lowLimit; prefix.start = window.@base + window.dictLimit; prefix.size = (nuint)(window.nextSrc - (window.@base + window.dictLimit)); - return ZSTDMT_isOverlapped(buffer, extDict) != 0 || ZSTDMT_isOverlapped(buffer, prefix) != 0 ? 1 : 0; + return + ZSTDMT_isOverlapped(buffer, extDict) != 0 + || ZSTDMT_isOverlapped(buffer, prefix) != 0 + ? 1 + : 0; } private static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx_s* mtctx, buffer_s buffer) @@ -1469,7 +1770,10 @@ private static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx_s* mtctx) * Otherwise, we will load as many bytes as possible and instruct the caller * to continue as normal. */ - private static SyncPoint findSynchronizationPoint(ZSTDMT_CCtx_s* mtctx, ZSTD_inBuffer_s input) + private static SyncPoint findSynchronizationPoint( + ZSTDMT_CCtx_s* mtctx, + ZSTD_inBuffer_s input + ) { byte* istart = (byte*)input.src + input.pos; ulong primePower = mtctx->rsync.primePower; @@ -1478,7 +1782,10 @@ private static SyncPoint findSynchronizationPoint(ZSTDMT_CCtx_s* mtctx, ZSTD_inB ulong hash; byte* prev; nuint pos; - syncPoint.toLoad = input.size - input.pos < mtctx->targetSectionSize - mtctx->inBuff.filled ? input.size - input.pos : mtctx->targetSectionSize - mtctx->inBuff.filled; + syncPoint.toLoad = + input.size - input.pos < mtctx->targetSectionSize - mtctx->inBuff.filled + ? input.size - input.pos + : mtctx->targetSectionSize - mtctx->inBuff.filled; syncPoint.flush = 0; if (mtctx->@params.rsyncable == 0) return syncPoint; @@ -1549,7 +1856,12 @@ private static nuint ZSTDMT_nextInputSizeHint(ZSTDMT_CCtx_s* mtctx) * internal use only - exposed to be invoked from zstd_compress.c * assumption : output and input are valid (pos <= size) * @return : minimum amount of data remaining to flush, 0 if none */ - private static nuint ZSTDMT_compressStream_generic(ZSTDMT_CCtx_s* mtctx, ZSTD_outBuffer_s* output, ZSTD_inBuffer_s* input, ZSTD_EndDirective endOp) + private static nuint ZSTDMT_compressStream_generic( + ZSTDMT_CCtx_s* mtctx, + ZSTD_outBuffer_s* output, + ZSTD_inBuffer_s* input, + ZSTD_EndDirective endOp + ) { uint forwardInputProgress = 0; assert(output->pos <= output->size); @@ -1579,7 +1891,11 @@ private static nuint ZSTDMT_compressStream_generic(ZSTDMT_CCtx_s* mtctx, ZSTD_ou } assert(mtctx->inBuff.buffer.capacity >= mtctx->targetSectionSize); - memcpy((sbyte*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (sbyte*)input->src + input->pos, (uint)syncPoint.toLoad); + memcpy( + (sbyte*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, + (sbyte*)input->src + input->pos, + (uint)syncPoint.toLoad + ); input->pos += syncPoint.toLoad; mtctx->inBuff.filled += syncPoint.toLoad; forwardInputProgress = syncPoint.toLoad > 0 ? 1U : 0U; @@ -1588,11 +1904,20 @@ private static nuint ZSTDMT_compressStream_generic(ZSTDMT_CCtx_s* mtctx, ZSTD_ou if (input->pos < input->size && endOp == ZSTD_EndDirective.ZSTD_e_end) { - assert(mtctx->inBuff.filled == 0 || mtctx->inBuff.filled == mtctx->targetSectionSize || mtctx->@params.rsyncable != 0); + assert( + mtctx->inBuff.filled == 0 + || mtctx->inBuff.filled == mtctx->targetSectionSize + || mtctx->@params.rsyncable != 0 + ); endOp = ZSTD_EndDirective.ZSTD_e_flush; } - if (mtctx->jobReady != 0 || mtctx->inBuff.filled >= mtctx->targetSectionSize || endOp != ZSTD_EndDirective.ZSTD_e_continue && mtctx->inBuff.filled > 0 || endOp == ZSTD_EndDirective.ZSTD_e_end && mtctx->frameEnded == 0) + if ( + mtctx->jobReady != 0 + || mtctx->inBuff.filled >= mtctx->targetSectionSize + || endOp != ZSTD_EndDirective.ZSTD_e_continue && mtctx->inBuff.filled > 0 + || endOp == ZSTD_EndDirective.ZSTD_e_end && mtctx->frameEnded == 0 + ) { nuint jobSize = mtctx->inBuff.filled; assert(mtctx->inBuff.filled <= mtctx->targetSectionSize); @@ -1607,7 +1932,12 @@ private static nuint ZSTDMT_compressStream_generic(ZSTDMT_CCtx_s* mtctx, ZSTD_ou { /* block if there was no forward input progress */ - nuint remainingToFlush = ZSTDMT_flushProduced(mtctx, output, forwardInputProgress == 0 ? 1U : 0U, endOp); + nuint remainingToFlush = ZSTDMT_flushProduced( + mtctx, + output, + forwardInputProgress == 0 ? 1U : 0U, + endOp + ); if (input->pos < input->size) return remainingToFlush > 1 ? remainingToFlush : 1; return remainingToFlush; diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/_wksps_e__Union.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/_wksps_e__Union.cs index d13d9b12b..c0da19f62 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/_wksps_e__Union.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/_wksps_e__Union.cs @@ -7,9 +7,11 @@ public unsafe struct _wksps_e__Union { [FieldOffset(0)] public HUF_buildCTable_wksp_tables buildCTable_wksp; + [FieldOffset(0)] public HUF_WriteCTableWksp writeCTable_wksp; + [FieldOffset(0)] public fixed uint hist_wksp[1024]; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/algo_time_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/algo_time_t.cs index e54af2070..5ea98f181 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/algo_time_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/algo_time_t.cs @@ -4,10 +4,11 @@ public struct algo_time_t { public uint tableTime; public uint decode256Time; + public algo_time_t(uint tableTime, uint decode256Time) { this.tableTime = tableTime; this.decode256Time = decode256Time; } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/base_directive_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/base_directive_e.cs index a77fea3bd..1e95f5e00 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/base_directive_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/base_directive_e.cs @@ -3,6 +3,6 @@ namespace ZstdSharp.Unsafe public enum base_directive_e { base_0possible = 0, - base_1guaranteed = 1 + base_1guaranteed = 1, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/blockProperties_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/blockProperties_t.cs index 058ddffd2..4413c5d4d 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/blockProperties_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/blockProperties_t.cs @@ -6,4 +6,4 @@ public struct blockProperties_t public uint lastBlock; public uint origSize; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/blockType_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/blockType_e.cs index baa27390a..44f7af6b6 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/blockType_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/blockType_e.cs @@ -5,6 +5,6 @@ public enum blockType_e bt_raw, bt_rle, bt_compressed, - bt_reserved + bt_reserved, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/buffer_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/buffer_s.cs index 093a0b8d2..332edb1f9 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/buffer_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/buffer_s.cs @@ -6,10 +6,11 @@ public unsafe struct buffer_s { public void* start; public nuint capacity; + public buffer_s(void* start, nuint capacity) { this.start = start; this.capacity = capacity; } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/dictItem.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/dictItem.cs index a4b1abb30..b18b0c719 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/dictItem.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/dictItem.cs @@ -6,4 +6,4 @@ public struct dictItem public uint length; public uint savings; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/inBuff_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/inBuff_t.cs index 8fc651685..1c9e85244 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/inBuff_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/inBuff_t.cs @@ -10,4 +10,4 @@ public struct InBuff_t public buffer_s buffer; public nuint filled; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmEntry_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmEntry_t.cs index 391fa98ec..8b308fd58 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmEntry_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmEntry_t.cs @@ -5,4 +5,4 @@ public struct ldmEntry_t public uint offset; public uint checksum; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmMatchCandidate_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmMatchCandidate_t.cs index f3e41eddb..a5958c318 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmMatchCandidate_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmMatchCandidate_t.cs @@ -7,4 +7,4 @@ public unsafe struct ldmMatchCandidate_t public uint checksum; public ldmEntry_t* bucket; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmParams_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmParams_t.cs index 253e3bfee..4558d0c55 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmParams_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmParams_t.cs @@ -4,15 +4,20 @@ public struct ldmParams_t { /* ZSTD_ps_enable to enable LDM. ZSTD_ps_auto by default */ public ZSTD_paramSwitch_e enableLdm; + /* Log size of hashTable */ public uint hashLog; + /* Log bucket size for collision resolution, at most 8 */ public uint bucketSizeLog; + /* Minimum match length */ public uint minMatchLength; + /* Log number of entries to skip */ public uint hashRateLog; + /* Window log for the LDM */ public uint windowLog; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmRollingHashState_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmRollingHashState_t.cs index 810380075..d4fa28ec4 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmRollingHashState_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmRollingHashState_t.cs @@ -5,4 +5,4 @@ public struct ldmRollingHashState_t public ulong rolling; public ulong stopMask; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmState_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmState_t.cs index a02441936..db4484e84 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmState_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmState_t.cs @@ -8,10 +8,12 @@ public unsafe struct ldmState_t public ZSTD_window_t window; public ldmEntry_t* hashTable; public uint loadedDictEnd; + /* Next position in bucket to insert entry */ public byte* bucketOffsets; public _splitIndices_e__FixedBuffer splitIndices; public _matchCandidates_e__FixedBuffer matchCandidates; + #if NET8_0_OR_GREATER [InlineArray(64)] public unsafe struct _splitIndices_e__FixedBuffer @@ -166,4 +168,4 @@ public unsafe struct _matchCandidates_e__FixedBuffer } #endif } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/nodeElt_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/nodeElt_s.cs index 3e30dffc7..6ec0c8030 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/nodeElt_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/nodeElt_s.cs @@ -10,4 +10,4 @@ public struct nodeElt_s public byte @byte; public byte nbBits; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/offsetCount_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/offsetCount_t.cs index 7c652776f..a29f87e27 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/offsetCount_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/offsetCount_t.cs @@ -5,4 +5,4 @@ public struct offsetCount_t public uint offset; public uint count; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/optState_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/optState_t.cs index 5f08de1a0..b88e4d555 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/optState_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/optState_t.cs @@ -4,36 +4,51 @@ public unsafe struct optState_t { /* table of literals statistics, of size 256 */ public uint* litFreq; + /* table of litLength statistics, of size (MaxLL+1) */ public uint* litLengthFreq; + /* table of matchLength statistics, of size (MaxML+1) */ public uint* matchLengthFreq; + /* table of offCode statistics, of size (MaxOff+1) */ public uint* offCodeFreq; + /* list of found matches, of size ZSTD_OPT_SIZE */ public ZSTD_match_t* matchTable; + /* All positions tracked by optimal parser, of size ZSTD_OPT_SIZE */ public ZSTD_optimal_t* priceTable; + /* nb of literals */ public uint litSum; + /* nb of litLength codes */ public uint litLengthSum; + /* nb of matchLength codes */ public uint matchLengthSum; + /* nb of offset codes */ public uint offCodeSum; + /* to compare to log2(litfreq) */ public uint litSumBasePrice; + /* to compare to log2(llfreq) */ public uint litLengthSumBasePrice; + /* to compare to log2(mlfreq) */ public uint matchLengthSumBasePrice; + /* to compare to log2(offreq) */ public uint offCodeSumBasePrice; + /* prices can be determined dynamically, or follow a pre-defined cost structure */ public ZSTD_OptPrice_e priceType; + /* pre-calculated dictionary statistics */ public ZSTD_entropyCTables_t* symbolCosts; public ZSTD_paramSwitch_e literalCompressionMode; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/rankPos.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/rankPos.cs index f78f027d2..874302f61 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/rankPos.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/rankPos.cs @@ -5,4 +5,4 @@ public struct rankPos public ushort @base; public ushort curr; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/rankValCol_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/rankValCol_t.cs index d6c4fb5d7..be14d6624 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/rankValCol_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/rankValCol_t.cs @@ -4,4 +4,4 @@ public unsafe struct rankValCol_t { public fixed uint Body[13]; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/rawSeq.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/rawSeq.cs index adefb2625..1161bd217 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/rawSeq.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/rawSeq.cs @@ -4,9 +4,11 @@ public struct rawSeq { /* Offset of sequence */ public uint offset; + /* Length of literals prior to match */ public uint litLength; + /* Raw length of match */ public uint matchLength; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/repcodes_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/repcodes_s.cs index c63ef35b3..d0e18b12a 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/repcodes_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/repcodes_s.cs @@ -4,4 +4,4 @@ public unsafe struct repcodes_s { public fixed uint rep[3]; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/searchMethod_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/searchMethod_e.cs index fb38e0f68..6823088c2 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/searchMethod_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/searchMethod_e.cs @@ -4,6 +4,6 @@ public enum searchMethod_e { search_hashChain = 0, search_binaryTree = 1, - search_rowHash = 2 + search_rowHash = 2, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/seqState_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/seqState_t.cs index 2743a146f..5c7496ec1 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/seqState_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/seqState_t.cs @@ -7,6 +7,7 @@ public unsafe struct seqState_t public ZSTD_fseState stateOffb; public ZSTD_fseState stateML; public _prevOffset_e__FixedBuffer prevOffset; + public unsafe struct _prevOffset_e__FixedBuffer { public nuint e0; @@ -14,4 +15,4 @@ public unsafe struct _prevOffset_e__FixedBuffer public nuint e2; } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/seqStoreSplits.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/seqStoreSplits.cs index b4b7a4d40..b227d69f7 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/seqStoreSplits.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/seqStoreSplits.cs @@ -5,7 +5,8 @@ public unsafe struct seqStoreSplits { /* Array of split indices */ public uint* splitLocations; + /* The current index within splitLocations being worked on */ public nuint idx; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/seq_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/seq_t.cs index d286898c4..5dac7aad1 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/seq_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/seq_t.cs @@ -6,4 +6,4 @@ public struct seq_t public nuint matchLength; public nuint offset; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/sortedSymbol_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/sortedSymbol_t.cs index facf809c8..6c320a04b 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/sortedSymbol_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/sortedSymbol_t.cs @@ -4,4 +4,4 @@ public struct sortedSymbol_t { public byte symbol; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/streaming_operation.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/streaming_operation.cs index fae3973f0..db838c61c 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/streaming_operation.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/streaming_operation.cs @@ -4,6 +4,6 @@ namespace ZstdSharp.Unsafe public enum streaming_operation { not_streaming = 0, - is_streaming = 1 + is_streaming = 1, } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/UnsafeHelper.cs b/src/SharpCompress/Compressors/ZStandard/UnsafeHelper.cs index 09aef626d..3a3722da1 100644 --- a/src/SharpCompress/Compressors/ZStandard/UnsafeHelper.cs +++ b/src/SharpCompress/Compressors/ZStandard/UnsafeHelper.cs @@ -3,7 +3,6 @@ using System.Runtime.CompilerServices; using System.Runtime.InteropServices; - namespace ZstdSharp { public static unsafe class UnsafeHelper @@ -18,9 +17,9 @@ public static unsafe class UnsafeHelper public static void* malloc(ulong size) { #if NET6_0_OR_GREATER - var ptr = NativeMemory.Alloc((nuint) size); + var ptr = NativeMemory.Alloc((nuint)size); #else - var ptr = (void*) Marshal.AllocHGlobal((nint) size); + var ptr = (void*)Marshal.AllocHGlobal((nint)size); #endif #if DEBUG return PoisonMemory(ptr, size); @@ -33,7 +32,7 @@ public static unsafe class UnsafeHelper public static void* calloc(ulong num, ulong size) { #if NET6_0_OR_GREATER - return NativeMemory.AllocZeroed((nuint) num, (nuint) size); + return NativeMemory.AllocZeroed((nuint)num, (nuint)size); #else var total = num * size; assert(total <= uint.MaxValue); @@ -44,12 +43,12 @@ public static unsafe class UnsafeHelper } [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static void memcpy(void* destination, void* source, uint size) - => System.Runtime.CompilerServices.Unsafe.CopyBlockUnaligned(destination, source, size); + public static void memcpy(void* destination, void* source, uint size) => + System.Runtime.CompilerServices.Unsafe.CopyBlockUnaligned(destination, source, size); [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static void memset(void* memPtr, byte val, uint size) - => System.Runtime.CompilerServices.Unsafe.InitBlockUnaligned(memPtr, val, size); + public static void memset(void* memPtr, byte val, uint size) => + System.Runtime.CompilerServices.Unsafe.InitBlockUnaligned(memPtr, val, size); [MethodImpl(MethodImplOptions.AggressiveInlining)] public static void free(void* ptr) @@ -57,12 +56,13 @@ public static void free(void* ptr) #if NET6_0_OR_GREATER NativeMemory.Free(ptr); #else - Marshal.FreeHGlobal((IntPtr) ptr); + Marshal.FreeHGlobal((IntPtr)ptr); #endif } [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static T* GetArrayPointer(T[] array) where T : unmanaged + public static T* GetArrayPointer(T[] array) + where T : unmanaged { var size = (uint)(sizeof(T) * array.Length); #if NET9_0_OR_GREATER @@ -72,12 +72,17 @@ public static void free(void* ptr) // loading the assembly in an unloadable AssemblyLoadContext. // While introduced in .NET 5, we call this only in .NET 9+, because // it's not implemented in the Mono runtime until then. - var destination = (T*)RuntimeHelpers.AllocateTypeAssociatedMemory(typeof(UnsafeHelper), (int)size); + var destination = (T*) + RuntimeHelpers.AllocateTypeAssociatedMemory(typeof(UnsafeHelper), (int)size); #else var destination = (T*)malloc(size); #endif fixed (void* source = &array[0]) - System.Runtime.CompilerServices.Unsafe.CopyBlockUnaligned(destination, source, size); + System.Runtime.CompilerServices.Unsafe.CopyBlockUnaligned( + destination, + source, + size + ); return destination; } @@ -91,16 +96,17 @@ public static void assert(bool condition, string? message = null) } [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static void memmove(void* destination, void* source, ulong size) - => Buffer.MemoryCopy(source, destination, size, size); + public static void memmove(void* destination, void* source, ulong size) => + Buffer.MemoryCopy(source, destination, size, size); [MethodImpl(MethodImplOptions.AggressiveInlining)] public static int memcmp(void* buf1, void* buf2, ulong size) { assert(size <= int.MaxValue); var intSize = (int)size; - return new ReadOnlySpan(buf1, intSize) - .SequenceCompareTo(new ReadOnlySpan(buf2, intSize)); + return new ReadOnlySpan(buf1, intSize).SequenceCompareTo( + new ReadOnlySpan(buf2, intSize) + ); } } } diff --git a/src/SharpCompress/Compressors/ZStandard/ZstdException.cs b/src/SharpCompress/Compressors/ZStandard/ZstdException.cs index d16ce18b7..1427bd84f 100644 --- a/src/SharpCompress/Compressors/ZStandard/ZstdException.cs +++ b/src/SharpCompress/Compressors/ZStandard/ZstdException.cs @@ -4,12 +4,10 @@ namespace ZstdSharp { - - public class ZstdException : SharpCompressException { - public ZstdException(ZSTD_ErrorCode code, string message) : base(message) - => Code = code; + public ZstdException(ZSTD_ErrorCode code, string message) + : base(message) => Code = code; public ZSTD_ErrorCode Code { get; } } From 906baf18d253ac682a6621d860faa41917dfe968 Mon Sep 17 00:00:00 2001 From: Adam Hathcock Date: Mon, 13 Oct 2025 17:02:21 +0100 Subject: [PATCH 5/6] fix namespaces --- src/SharpCompress/Common/Arc/ArcEntry.cs | 59 +- .../Common/Arc/ArcEntryHeader.cs | 115 +- src/SharpCompress/Common/Arc/ArcFilePart.cs | 99 +- src/SharpCompress/Common/Arc/ArcVolume.cs | 11 +- src/SharpCompress/Common/Zip/ZipFilePart.cs | 2 +- .../Compressors/Filters/DeltaFilter.cs | 51 +- .../Compressors/LZMA/Registry.cs | 2 +- .../Compressors/Lzw/LzwConstants.cs | 103 +- .../Compressors/Lzw/LzwStream.cs | 1017 +- src/SharpCompress/Compressors/RLE90/RLE.cs | 69 +- .../Compressors/RLE90/RunLength90Stream.cs | 113 +- .../Compressors/Shrink/BitStream.cs | 123 +- .../Compressors/Shrink/HwUnshrink.cs | 663 +- .../Compressors/Squeezed/SqueezedStream.cs | 193 +- .../Compressors/ZStandard/BitOperations.cs | 2 +- .../ZStandard/CompressionStream.cs | 449 +- .../Compressors/ZStandard/Compressor.cs | 333 +- .../Compressors/ZStandard/Constants.cs | 15 +- .../ZStandard/DecompressionStream.cs | 405 +- .../Compressors/ZStandard/Decompressor.cs | 293 +- .../Compressors/ZStandard/JobThreadPool.cs | 203 +- .../Compressors/ZStandard/Pool.cs | 122 - .../Compressors/ZStandard/SafeHandles.cs | 263 +- .../ZStandard/SynchronizationWrapper.cs | 25 +- .../Compressors/ZStandard/ThrowHelper.cs | 87 +- .../Compressors/ZStandard/UnmanagedObject.cs | 21 +- .../ZStandard/Unsafe/Allocations.cs | 79 +- .../ZStandard/Unsafe/BIT_CStream_t.cs | 27 +- .../ZStandard/Unsafe/BIT_DStream_status.cs | 25 +- .../ZStandard/Unsafe/BIT_DStream_t.cs | 23 +- .../Compressors/ZStandard/Unsafe/Bits.cs | 89 +- .../Compressors/ZStandard/Unsafe/Bitstream.cs | 1181 +- .../ZStandard/Unsafe/BlockSummary.cs | 15 +- .../ZStandard/Unsafe/COVER_best_s.cs | 39 +- .../ZStandard/Unsafe/COVER_ctx_t.cs | 37 +- .../ZStandard/Unsafe/COVER_dictSelection.cs | 19 +- .../ZStandard/Unsafe/COVER_epoch_info_t.cs | 17 +- .../ZStandard/Unsafe/COVER_map_pair_t_s.cs | 11 +- .../ZStandard/Unsafe/COVER_map_s.cs | 17 +- .../ZStandard/Unsafe/COVER_segment_t.cs | 21 +- .../Unsafe/COVER_tryParameters_data_s.cs | 21 +- .../Compressors/ZStandard/Unsafe/Clevels.cs | 1693 +- .../Compressors/ZStandard/Unsafe/Compiler.cs | 103 +- .../Compressors/ZStandard/Unsafe/Cover.cs | 687 +- .../ZStandard/Unsafe/DTableDesc.cs | 23 +- .../ZStandard/Unsafe/EStats_ress_t.cs | 21 +- .../ZStandard/Unsafe/EntropyCommon.cs | 799 +- .../ZStandard/Unsafe/ErrorPrivate.cs | 199 +- .../ZStandard/Unsafe/EstimatedBlockSize.cs | 13 +- .../ZStandard/Unsafe/FASTCOVER_accel_t.cs | 29 +- .../ZStandard/Unsafe/FASTCOVER_ctx_t.cs | 37 +- .../Unsafe/FASTCOVER_tryParameters_data_s.cs | 23 +- .../Compressors/ZStandard/Unsafe/FPStats.cs | 11 +- .../ZStandard/Unsafe/FSE_CState_t.cs | 29 +- .../ZStandard/Unsafe/FSE_DState_t.cs | 19 +- .../ZStandard/Unsafe/FSE_DTableHeader.cs | 15 +- .../ZStandard/Unsafe/FSE_DecompressWksp.cs | 11 +- .../ZStandard/Unsafe/FSE_decode_t.cs | 13 +- .../ZStandard/Unsafe/FSE_repeat.cs | 19 +- .../Unsafe/FSE_symbolCompressionTransform.cs | 17 +- .../Compressors/ZStandard/Unsafe/Fastcover.cs | 1303 +- .../ZStandard/Unsafe/Fingerprint.cs | 11 +- .../Compressors/ZStandard/Unsafe/Fse.cs | 365 +- .../ZStandard/Unsafe/FseCompress.cs | 1309 +- .../ZStandard/Unsafe/FseDecompress.cs | 797 +- .../ZStandard/Unsafe/HIST_checkInput_e.cs | 13 +- .../ZStandard/Unsafe/HUF_CStream_t.cs | 35 +- .../ZStandard/Unsafe/HUF_CTableHeader.cs | 13 +- .../Unsafe/HUF_CompressWeightsWksp.cs | 17 +- .../ZStandard/Unsafe/HUF_DEltX1.cs | 19 +- .../ZStandard/Unsafe/HUF_DEltX2.cs | 19 +- .../Unsafe/HUF_DecompressFastArgs.cs | 89 +- .../Unsafe/HUF_ReadDTableX1_Workspace.cs | 19 +- .../Unsafe/HUF_ReadDTableX2_Workspace.cs | 567 +- .../ZStandard/Unsafe/HUF_WriteCTableWksp.cs | 15 +- .../Unsafe/HUF_buildCTable_wksp_tables.cs | 1433 +- .../ZStandard/Unsafe/HUF_compress_tables_t.cs | 533 +- .../ZStandard/Unsafe/HUF_flags_e.cs | 71 +- .../ZStandard/Unsafe/HUF_nbStreams_e.cs | 11 +- .../ZStandard/Unsafe/HUF_repeat.cs | 19 +- .../Compressors/ZStandard/Unsafe/Hist.cs | 487 +- .../ZStandard/Unsafe/HufCompress.cs | 3193 ++- .../ZStandard/Unsafe/HufDecompress.cs | 4269 ++-- .../Compressors/ZStandard/Unsafe/Mem.cs | 213 +- .../Compressors/ZStandard/Unsafe/Pool.cs | 122 + .../ZStandard/Unsafe/RSyncState_t.cs | 15 +- .../Compressors/ZStandard/Unsafe/Range.cs | 21 +- .../ZStandard/Unsafe/RawSeqStore_t.cs | 55 +- .../ZStandard/Unsafe/RoundBuff_t.cs | 47 +- .../ZStandard/Unsafe/SeqCollector.cs | 15 +- .../Compressors/ZStandard/Unsafe/SeqDef_s.cs | 25 +- .../ZStandard/Unsafe/SeqStore_t.cs | 43 +- .../ZStandard/Unsafe/SerialState.cs | 37 +- .../ZStandard/Unsafe/SymbolEncodingType_e.cs | 15 +- .../Compressors/ZStandard/Unsafe/SyncPoint.cs | 15 +- .../ZStandard/Unsafe/XXH32_canonical_t.cs | 19 +- .../ZStandard/Unsafe/XXH32_state_s.cs | 55 +- .../ZStandard/Unsafe/XXH64_canonical_t.cs | 15 +- .../ZStandard/Unsafe/XXH64_state_s.cs | 55 +- .../ZStandard/Unsafe/XXH_alignment.cs | 23 +- .../ZStandard/Unsafe/XXH_errorcode.cs | 21 +- .../Compressors/ZStandard/Unsafe/Xxhash.cs | 1085 +- .../ZStandard/Unsafe/ZDICT_cover_params_t.cs | 47 +- .../Unsafe/ZDICT_fastCover_params_t.cs | 45 +- .../ZStandard/Unsafe/ZDICT_legacy_params_t.cs | 13 +- .../ZStandard/Unsafe/ZDICT_params_t.cs | 33 +- .../ZStandard/Unsafe/ZSTDMT_CCtxPool.cs | 21 +- .../ZStandard/Unsafe/ZSTDMT_CCtx_s.cs | 59 +- .../ZStandard/Unsafe/ZSTDMT_bufferPool_s.cs | 21 +- .../ZStandard/Unsafe/ZSTDMT_jobDescription.cs | 83 +- .../Unsafe/ZSTD_BlockCompressor_f.cs | 21 +- .../ZStandard/Unsafe/ZSTD_BuildCTableWksp.cs | 11 +- .../ZStandard/Unsafe/ZSTD_BuildSeqStore_e.cs | 11 +- .../ZStandard/Unsafe/ZSTD_CCtx_params_s.cs | 169 +- .../ZStandard/Unsafe/ZSTD_CCtx_s.cs | 179 +- .../ZStandard/Unsafe/ZSTD_CDict_s.cs | 51 +- .../ZStandard/Unsafe/ZSTD_CParamMode_e.cs | 49 +- .../ZStandard/Unsafe/ZSTD_DCtx_s.cs | 187 +- .../ZStandard/Unsafe/ZSTD_DDictHashSet.cs | 17 +- .../ZStandard/Unsafe/ZSTD_DDict_s.cs | 29 +- .../ZStandard/Unsafe/ZSTD_DefaultPolicy_e.cs | 13 +- .../ZStandard/Unsafe/ZSTD_EndDirective.cs | 35 +- .../ZStandard/Unsafe/ZSTD_ErrorCode.cs | 113 +- .../ZStandard/Unsafe/ZSTD_MatchState_t.cs | 103 +- .../ZStandard/Unsafe/ZSTD_OffsetInfo.cs | 11 +- .../ZStandard/Unsafe/ZSTD_OptPrice_e.cs | 13 +- .../ZStandard/Unsafe/ZSTD_ResetDirective.cs | 13 +- .../ZStandard/Unsafe/ZSTD_Sequence.cs | 71 +- .../ZStandard/Unsafe/ZSTD_SequenceLength.cs | 11 +- .../ZStandard/Unsafe/ZSTD_SequencePosition.cs | 21 +- .../ZStandard/Unsafe/ZSTD_blockSplitCtx.cs | 21 +- .../ZStandard/Unsafe/ZSTD_blockState_t.cs | 13 +- .../ZStandard/Unsafe/ZSTD_bounds.cs | 13 +- .../ZStandard/Unsafe/ZSTD_bufferMode_e.cs | 17 +- .../Unsafe/ZSTD_buffered_policy_e.cs | 23 +- .../ZStandard/Unsafe/ZSTD_cParameter.cs | 433 +- .../ZStandard/Unsafe/ZSTD_cStreamStage.cs | 15 +- .../Unsafe/ZSTD_compResetPolicy_e.cs | 27 +- .../Unsafe/ZSTD_compressedBlockState_t.cs | 11 +- .../Unsafe/ZSTD_compressionParameters.cs | 81 +- .../Unsafe/ZSTD_compressionStage_e.cs | 23 +- .../ZStandard/Unsafe/ZSTD_customMem.cs | 25 +- .../ZStandard/Unsafe/ZSTD_cwksp.cs | 217 +- .../Unsafe/ZSTD_cwksp_alloc_phase_e.cs | 21 +- .../Unsafe/ZSTD_cwksp_static_alloc_e.cs | 23 +- .../ZStandard/Unsafe/ZSTD_dParameter.cs | 71 +- .../ZStandard/Unsafe/ZSTD_dStage.cs | 23 +- .../ZStandard/Unsafe/ZSTD_dStreamStage.cs | 19 +- .../ZStandard/Unsafe/ZSTD_dictAttachPref_e.cs | 23 +- .../Unsafe/ZSTD_dictContentType_e.cs | 21 +- .../ZStandard/Unsafe/ZSTD_dictLoadMethod_e.cs | 15 +- .../ZStandard/Unsafe/ZSTD_dictMode_e.cs | 15 +- .../Unsafe/ZSTD_dictTableLoadMethod_e.cs | 11 +- .../ZStandard/Unsafe/ZSTD_dictUses_e.cs | 21 +- .../Unsafe/ZSTD_entropyCTablesMetadata_t.cs | 11 +- .../ZStandard/Unsafe/ZSTD_entropyCTables_t.cs | 11 +- .../ZStandard/Unsafe/ZSTD_entropyDTables_t.cs | 2613 +- .../Unsafe/ZSTD_forceIgnoreChecksum_e.cs | 15 +- .../ZStandard/Unsafe/ZSTD_format_e.cs | 19 +- .../ZStandard/Unsafe/ZSTD_frameHeader.cs | 35 +- .../ZStandard/Unsafe/ZSTD_frameParameters.cs | 19 +- .../ZStandard/Unsafe/ZSTD_frameProgression.cs | 33 +- .../ZStandard/Unsafe/ZSTD_frameSizeInfo.cs | 25 +- .../ZStandard/Unsafe/ZSTD_frameType_e.cs | 13 +- .../Unsafe/ZSTD_fseCTablesMetadata_t.cs | 31 +- .../ZStandard/Unsafe/ZSTD_fseCTables_t.cs | 21 +- .../ZStandard/Unsafe/ZSTD_fseState.cs | 11 +- .../ZStandard/Unsafe/ZSTD_getAllMatchesFn.cs | 27 +- .../Unsafe/ZSTD_hufCTablesMetadata_t.cs | 29 +- .../ZStandard/Unsafe/ZSTD_hufCTables_t.cs | 531 +- .../ZStandard/Unsafe/ZSTD_inBuffer_s.cs | 27 +- .../Unsafe/ZSTD_indexResetPolicy_e.cs | 23 +- .../ZStandard/Unsafe/ZSTD_litLocation_e.cs | 19 +- .../Unsafe/ZSTD_literalCompressionMode_e.cs | 25 +- .../ZStandard/Unsafe/ZSTD_localDict.cs | 19 +- .../ZStandard/Unsafe/ZSTD_longLengthType_e.cs | 23 +- .../ZStandard/Unsafe/ZSTD_longOffset_e.cs | 11 +- .../ZStandard/Unsafe/ZSTD_match_t.cs | 23 +- .../ZStandard/Unsafe/ZSTD_nextInputType_e.cs | 19 +- .../ZStandard/Unsafe/ZSTD_optLdm_t.cs | 25 +- .../ZStandard/Unsafe/ZSTD_optimal_t.cs | 27 +- .../ZStandard/Unsafe/ZSTD_outBuffer_s.cs | 21 +- .../ZStandard/Unsafe/ZSTD_overlap_e.cs | 11 +- .../ZStandard/Unsafe/ZSTD_paramSwitch_e.cs | 19 +- .../ZStandard/Unsafe/ZSTD_parameters.cs | 13 +- .../ZStandard/Unsafe/ZSTD_prefixDict_s.cs | 13 +- .../Unsafe/ZSTD_refMultipleDDicts_e.cs | 13 +- .../ZStandard/Unsafe/ZSTD_resetTarget_e.cs | 13 +- .../ZStandard/Unsafe/ZSTD_seqSymbol.cs | 29 +- .../ZStandard/Unsafe/ZSTD_seqSymbol_header.cs | 17 +- .../ZStandard/Unsafe/ZSTD_sequenceFormat_e.cs | 17 +- .../ZStandard/Unsafe/ZSTD_strategy.cs | 27 +- .../Unsafe/ZSTD_symbolEncodingTypeStats_t.cs | 27 +- .../Unsafe/ZSTD_tableFillPurpose_e.cs | 13 +- .../ZStandard/Unsafe/ZSTD_window_t.cs | 37 +- .../Compressors/ZStandard/Unsafe/Zdict.cs | 1113 +- .../Compressors/ZStandard/Unsafe/Zstd.cs | 17 +- .../ZStandard/Unsafe/ZstdCommon.cs | 79 +- .../ZStandard/Unsafe/ZstdCompress.cs | 19625 ++++++++-------- .../ZStandard/Unsafe/ZstdCompressInternal.cs | 2165 +- .../ZStandard/Unsafe/ZstdCompressLiterals.cs | 541 +- .../ZStandard/Unsafe/ZstdCompressSequences.cs | 1610 +- .../Unsafe/ZstdCompressSuperblock.cs | 1705 +- .../Compressors/ZStandard/Unsafe/ZstdCwksp.cs | 1001 +- .../Compressors/ZStandard/Unsafe/ZstdDdict.cs | 509 +- .../ZStandard/Unsafe/ZstdDecompress.cs | 5921 +++-- .../ZStandard/Unsafe/ZstdDecompressBlock.cs | 5657 +++-- .../Unsafe/ZstdDecompressInternal.cs | 363 +- .../ZStandard/Unsafe/ZstdDoubleFast.cs | 2001 +- .../Compressors/ZStandard/Unsafe/ZstdFast.cs | 2167 +- .../ZStandard/Unsafe/ZstdInternal.cs | 637 +- .../Compressors/ZStandard/Unsafe/ZstdLazy.cs | 8141 ++++--- .../Compressors/ZStandard/Unsafe/ZstdLdm.cs | 1573 +- .../ZStandard/Unsafe/ZstdLdmGeartab.cs | 531 +- .../Compressors/ZStandard/Unsafe/ZstdOpt.cs | 3821 ++- .../ZStandard/Unsafe/ZstdPresplit.cs | 477 +- .../ZStandard/Unsafe/ZstdmtCompress.cs | 3171 ++- .../ZStandard/Unsafe/_wksps_e__Union.cs | 21 +- .../ZStandard/Unsafe/algo_time_t.cs | 19 +- .../ZStandard/Unsafe/base_directive_e.cs | 11 +- .../ZStandard/Unsafe/blockProperties_t.cs | 13 +- .../ZStandard/Unsafe/blockType_e.cs | 15 +- .../Compressors/ZStandard/Unsafe/buffer_s.cs | 23 +- .../Compressors/ZStandard/Unsafe/dictItem.cs | 13 +- .../Compressors/ZStandard/Unsafe/inBuff_t.cs | 23 +- .../ZStandard/Unsafe/ldmEntry_t.cs | 11 +- .../ZStandard/Unsafe/ldmMatchCandidate_t.cs | 15 +- .../ZStandard/Unsafe/ldmParams_t.cs | 31 +- .../ZStandard/Unsafe/ldmRollingHashState_t.cs | 13 +- .../ZStandard/Unsafe/ldmState_t.cs | 291 +- .../Compressors/ZStandard/Unsafe/nodeElt_s.cs | 21 +- .../ZStandard/Unsafe/offsetCount_t.cs | 13 +- .../ZStandard/Unsafe/optState_t.cs | 73 +- .../Compressors/ZStandard/Unsafe/rankPos.cs | 11 +- .../ZStandard/Unsafe/rankValCol_t.cs | 11 +- .../Compressors/ZStandard/Unsafe/rawSeq.cs | 19 +- .../ZStandard/Unsafe/repcodes_s.cs | 11 +- .../ZStandard/Unsafe/searchMethod_e.cs | 13 +- .../ZStandard/Unsafe/seqState_t.cs | 27 +- .../ZStandard/Unsafe/seqStoreSplits.cs | 17 +- .../Compressors/ZStandard/Unsafe/seq_t.cs | 13 +- .../ZStandard/Unsafe/sortedSymbol_t.cs | 11 +- .../ZStandard/Unsafe/streaming_operation.cs | 13 +- .../Compressors/ZStandard/UnsafeHelper.cs | 123 +- .../Compressors/ZStandard/ZStandardStream.cs | 2 +- .../Compressors/ZStandard/ZstdException.cs | 18 +- src/SharpCompress/Factories/ArcFactory.cs | 57 +- src/SharpCompress/IO/IStreamStack.cs | 379 +- src/SharpCompress/Readers/Arc/ArcReader.cs | 49 +- src/SharpCompress/Writers/Zip/ZipWriter.cs | 3 +- 250 files changed, 48637 insertions(+), 48881 deletions(-) delete mode 100644 src/SharpCompress/Compressors/ZStandard/Pool.cs create mode 100644 src/SharpCompress/Compressors/ZStandard/Unsafe/Pool.cs diff --git a/src/SharpCompress/Common/Arc/ArcEntry.cs b/src/SharpCompress/Common/Arc/ArcEntry.cs index a67f10d11..0a94ae0c8 100644 --- a/src/SharpCompress/Common/Arc/ArcEntry.cs +++ b/src/SharpCompress/Common/Arc/ArcEntry.cs @@ -7,54 +7,53 @@ using SharpCompress.Common.GZip; using SharpCompress.Common.Tar; -namespace SharpCompress.Common.Arc +namespace SharpCompress.Common.Arc; + +public class ArcEntry : Entry { - public class ArcEntry : Entry - { - private readonly ArcFilePart? _filePart; + private readonly ArcFilePart? _filePart; - internal ArcEntry(ArcFilePart? filePart) - { - _filePart = filePart; - } + internal ArcEntry(ArcFilePart? filePart) + { + _filePart = filePart; + } - public override long Crc + public override long Crc + { + get { - get + if (_filePart == null) { - if (_filePart == null) - { - return 0; - } - return _filePart.Header.Crc16; + return 0; } + return _filePart.Header.Crc16; } + } - public override string? Key => _filePart?.Header.Name; + public override string? Key => _filePart?.Header.Name; - public override string? LinkTarget => null; + public override string? LinkTarget => null; - public override long CompressedSize => _filePart?.Header.CompressedSize ?? 0; + public override long CompressedSize => _filePart?.Header.CompressedSize ?? 0; - public override CompressionType CompressionType => - _filePart?.Header.CompressionMethod ?? CompressionType.Unknown; + public override CompressionType CompressionType => + _filePart?.Header.CompressionMethod ?? CompressionType.Unknown; - public override long Size => throw new NotImplementedException(); + public override long Size => throw new NotImplementedException(); - public override DateTime? LastModifiedTime => null; + public override DateTime? LastModifiedTime => null; - public override DateTime? CreatedTime => null; + public override DateTime? CreatedTime => null; - public override DateTime? LastAccessedTime => null; + public override DateTime? LastAccessedTime => null; - public override DateTime? ArchivedTime => null; + public override DateTime? ArchivedTime => null; - public override bool IsEncrypted => false; + public override bool IsEncrypted => false; - public override bool IsDirectory => false; + public override bool IsDirectory => false; - public override bool IsSplitAfter => false; + public override bool IsSplitAfter => false; - internal override IEnumerable Parts => _filePart.Empty(); - } + internal override IEnumerable Parts => _filePart.Empty(); } diff --git a/src/SharpCompress/Common/Arc/ArcEntryHeader.cs b/src/SharpCompress/Common/Arc/ArcEntryHeader.cs index 137b190a6..b37303479 100644 --- a/src/SharpCompress/Common/Arc/ArcEntryHeader.cs +++ b/src/SharpCompress/Common/Arc/ArcEntryHeader.cs @@ -3,74 +3,73 @@ using System.Linq; using System.Text; -namespace SharpCompress.Common.Arc +namespace SharpCompress.Common.Arc; + +public class ArcEntryHeader { - public class ArcEntryHeader - { - public ArchiveEncoding ArchiveEncoding { get; } - public CompressionType CompressionMethod { get; private set; } - public string? Name { get; private set; } - public long CompressedSize { get; private set; } - public DateTime DateTime { get; private set; } - public int Crc16 { get; private set; } - public long OriginalSize { get; private set; } - public long DataStartPosition { get; private set; } + public ArchiveEncoding ArchiveEncoding { get; } + public CompressionType CompressionMethod { get; private set; } + public string? Name { get; private set; } + public long CompressedSize { get; private set; } + public DateTime DateTime { get; private set; } + public int Crc16 { get; private set; } + public long OriginalSize { get; private set; } + public long DataStartPosition { get; private set; } - public ArcEntryHeader(ArchiveEncoding archiveEncoding) - { - this.ArchiveEncoding = archiveEncoding; - } + public ArcEntryHeader(ArchiveEncoding archiveEncoding) + { + this.ArchiveEncoding = archiveEncoding; + } - public ArcEntryHeader? ReadHeader(Stream stream) + public ArcEntryHeader? ReadHeader(Stream stream) + { + byte[] headerBytes = new byte[29]; + if (stream.Read(headerBytes, 0, headerBytes.Length) != headerBytes.Length) { - byte[] headerBytes = new byte[29]; - if (stream.Read(headerBytes, 0, headerBytes.Length) != headerBytes.Length) - { - return null; - } - DataStartPosition = stream.Position; - return LoadFrom(headerBytes); + return null; } + DataStartPosition = stream.Position; + return LoadFrom(headerBytes); + } - public ArcEntryHeader LoadFrom(byte[] headerBytes) - { - CompressionMethod = GetCompressionType(headerBytes[1]); + public ArcEntryHeader LoadFrom(byte[] headerBytes) + { + CompressionMethod = GetCompressionType(headerBytes[1]); - // Read name - int nameEnd = Array.IndexOf(headerBytes, (byte)0, 1); // Find null terminator - Name = Encoding.UTF8.GetString(headerBytes, 2, nameEnd > 0 ? nameEnd - 2 : 12); + // Read name + int nameEnd = Array.IndexOf(headerBytes, (byte)0, 1); // Find null terminator + Name = Encoding.UTF8.GetString(headerBytes, 2, nameEnd > 0 ? nameEnd - 2 : 12); - int offset = 15; - CompressedSize = BitConverter.ToUInt32(headerBytes, offset); - offset += 4; - uint rawDateTime = BitConverter.ToUInt32(headerBytes, offset); - DateTime = ConvertToDateTime(rawDateTime); - offset += 4; - Crc16 = BitConverter.ToUInt16(headerBytes, offset); - offset += 2; - OriginalSize = BitConverter.ToUInt32(headerBytes, offset); - return this; - } + int offset = 15; + CompressedSize = BitConverter.ToUInt32(headerBytes, offset); + offset += 4; + uint rawDateTime = BitConverter.ToUInt32(headerBytes, offset); + DateTime = ConvertToDateTime(rawDateTime); + offset += 4; + Crc16 = BitConverter.ToUInt16(headerBytes, offset); + offset += 2; + OriginalSize = BitConverter.ToUInt32(headerBytes, offset); + return this; + } - private CompressionType GetCompressionType(byte value) + private CompressionType GetCompressionType(byte value) + { + return value switch { - return value switch - { - 1 or 2 => CompressionType.None, - 3 => CompressionType.RLE90, - 4 => CompressionType.Squeezed, - 5 or 6 or 7 or 8 => CompressionType.Crunched, - 9 => CompressionType.Squashed, - 10 => CompressionType.Crushed, - 11 => CompressionType.Distilled, - _ => CompressionType.Unknown, - }; - } + 1 or 2 => CompressionType.None, + 3 => CompressionType.RLE90, + 4 => CompressionType.Squeezed, + 5 or 6 or 7 or 8 => CompressionType.Crunched, + 9 => CompressionType.Squashed, + 10 => CompressionType.Crushed, + 11 => CompressionType.Distilled, + _ => CompressionType.Unknown, + }; + } - public static DateTime ConvertToDateTime(long rawDateTime) - { - // Convert Unix timestamp to DateTime (UTC) - return DateTimeOffset.FromUnixTimeSeconds(rawDateTime).UtcDateTime; - } + public static DateTime ConvertToDateTime(long rawDateTime) + { + // Convert Unix timestamp to DateTime (UTC) + return DateTimeOffset.FromUnixTimeSeconds(rawDateTime).UtcDateTime; } } diff --git a/src/SharpCompress/Common/Arc/ArcFilePart.cs b/src/SharpCompress/Common/Arc/ArcFilePart.cs index d1ff2cfc5..231733857 100644 --- a/src/SharpCompress/Common/Arc/ArcFilePart.cs +++ b/src/SharpCompress/Common/Arc/ArcFilePart.cs @@ -13,63 +13,62 @@ using SharpCompress.Compressors.Squeezed; using SharpCompress.IO; -namespace SharpCompress.Common.Arc +namespace SharpCompress.Common.Arc; + +public class ArcFilePart : FilePart { - public class ArcFilePart : FilePart - { - private readonly Stream? _stream; + private readonly Stream? _stream; - internal ArcFilePart(ArcEntryHeader localArcHeader, Stream? seekableStream) - : base(localArcHeader.ArchiveEncoding) - { - _stream = seekableStream; - Header = localArcHeader; - } + internal ArcFilePart(ArcEntryHeader localArcHeader, Stream? seekableStream) + : base(localArcHeader.ArchiveEncoding) + { + _stream = seekableStream; + Header = localArcHeader; + } - internal ArcEntryHeader Header { get; set; } + internal ArcEntryHeader Header { get; set; } - internal override string? FilePartName => Header.Name; + internal override string? FilePartName => Header.Name; - internal override Stream GetCompressedStream() + internal override Stream GetCompressedStream() + { + if (_stream != null) { - if (_stream != null) + Stream compressedStream; + switch (Header.CompressionMethod) { - Stream compressedStream; - switch (Header.CompressionMethod) - { - case CompressionType.None: - compressedStream = new ReadOnlySubStream( - _stream, - Header.DataStartPosition, - Header.CompressedSize - ); - break; - case CompressionType.RLE90: - compressedStream = new RunLength90Stream( - _stream, - (int)Header.CompressedSize - ); - break; - case CompressionType.Squeezed: - compressedStream = new SqueezeStream(_stream, (int)Header.CompressedSize); - break; - case CompressionType.Crunched: - compressedStream = new ArcLzwStream( - _stream, - (int)Header.CompressedSize, - true - ); - break; - default: - throw new NotSupportedException( - "CompressionMethod: " + Header.CompressionMethod - ); - } - return compressedStream; + case CompressionType.None: + compressedStream = new ReadOnlySubStream( + _stream, + Header.DataStartPosition, + Header.CompressedSize + ); + break; + case CompressionType.RLE90: + compressedStream = new RunLength90Stream( + _stream, + (int)Header.CompressedSize + ); + break; + case CompressionType.Squeezed: + compressedStream = new SqueezeStream(_stream, (int)Header.CompressedSize); + break; + case CompressionType.Crunched: + compressedStream = new ArcLzwStream( + _stream, + (int)Header.CompressedSize, + true + ); + break; + default: + throw new NotSupportedException( + "CompressionMethod: " + Header.CompressionMethod + ); } - return _stream.NotNull(); + return compressedStream; } - - internal override Stream? GetRawStream() => _stream; + return _stream.NotNull(); } -} + + internal override Stream? GetRawStream() => _stream; +} \ No newline at end of file diff --git a/src/SharpCompress/Common/Arc/ArcVolume.cs b/src/SharpCompress/Common/Arc/ArcVolume.cs index 8ebd11ea9..99fb56eee 100644 --- a/src/SharpCompress/Common/Arc/ArcVolume.cs +++ b/src/SharpCompress/Common/Arc/ArcVolume.cs @@ -6,11 +6,10 @@ using System.Threading.Tasks; using SharpCompress.Readers; -namespace SharpCompress.Common.Arc +namespace SharpCompress.Common.Arc; + +public class ArcVolume : Volume { - public class ArcVolume : Volume - { - public ArcVolume(Stream stream, ReaderOptions readerOptions, int index = 0) - : base(stream, readerOptions, index) { } - } + public ArcVolume(Stream stream, ReaderOptions readerOptions, int index = 0) + : base(stream, readerOptions, index) { } } diff --git a/src/SharpCompress/Common/Zip/ZipFilePart.cs b/src/SharpCompress/Common/Zip/ZipFilePart.cs index 77dc4abba..16eb8e1a9 100644 --- a/src/SharpCompress/Common/Zip/ZipFilePart.cs +++ b/src/SharpCompress/Common/Zip/ZipFilePart.cs @@ -13,8 +13,8 @@ using SharpCompress.Compressors.Reduce; using SharpCompress.Compressors.Shrink; using SharpCompress.Compressors.Xz; +using SharpCompress.Compressors.ZStandard; using SharpCompress.IO; -using ZstdSharp; namespace SharpCompress.Common.Zip; diff --git a/src/SharpCompress/Compressors/Filters/DeltaFilter.cs b/src/SharpCompress/Compressors/Filters/DeltaFilter.cs index a6954116e..2f1c2b2eb 100644 --- a/src/SharpCompress/Compressors/Filters/DeltaFilter.cs +++ b/src/SharpCompress/Compressors/Filters/DeltaFilter.cs @@ -1,36 +1,35 @@ using System.IO; -namespace SharpCompress.Compressors.Filters +namespace SharpCompress.Compressors.Filters; + +internal class DeltaFilter : Filter { - internal class DeltaFilter : Filter + private const int DISTANCE_MIN = 1; + private const int DISTANCE_MAX = 256; + private const int DISTANCE_MASK = DISTANCE_MAX - 1; + + private int _distance; + private byte[] _history; + private int _position; + + public DeltaFilter(bool isEncoder, Stream baseStream, byte[] info) + : base(isEncoder, baseStream, 1) { - private const int DISTANCE_MIN = 1; - private const int DISTANCE_MAX = 256; - private const int DISTANCE_MASK = DISTANCE_MAX - 1; + _distance = info[0]; + _history = new byte[DISTANCE_MAX]; + _position = 0; + } - private int _distance; - private byte[] _history; - private int _position; + protected override int Transform(byte[] buffer, int offset, int count) + { + var end = offset + count; - public DeltaFilter(bool isEncoder, Stream baseStream, byte[] info) - : base(isEncoder, baseStream, 1) + for (var i = offset; i < end; i++) { - _distance = info[0]; - _history = new byte[DISTANCE_MAX]; - _position = 0; + buffer[i] += _history[(_distance + _position--) & DISTANCE_MASK]; + _history[_position & DISTANCE_MASK] = buffer[i]; } - protected override int Transform(byte[] buffer, int offset, int count) - { - var end = offset + count; - - for (var i = offset; i < end; i++) - { - buffer[i] += _history[(_distance + _position--) & DISTANCE_MASK]; - _history[_position & DISTANCE_MASK] = buffer[i]; - } - - return count; - } + return count; } -} +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/LZMA/Registry.cs b/src/SharpCompress/Compressors/LZMA/Registry.cs index eb3e3bdd6..d71abded2 100644 --- a/src/SharpCompress/Compressors/LZMA/Registry.cs +++ b/src/SharpCompress/Compressors/LZMA/Registry.cs @@ -7,7 +7,7 @@ using SharpCompress.Compressors.Filters; using SharpCompress.Compressors.LZMA.Utilites; using SharpCompress.Compressors.PPMd; -using ZstdSharp; +using SharpCompress.Compressors.ZStandard; namespace SharpCompress.Compressors.LZMA; diff --git a/src/SharpCompress/Compressors/Lzw/LzwConstants.cs b/src/SharpCompress/Compressors/Lzw/LzwConstants.cs index 0325adbbd..7e8a63997 100644 --- a/src/SharpCompress/Compressors/Lzw/LzwConstants.cs +++ b/src/SharpCompress/Compressors/Lzw/LzwConstants.cs @@ -1,65 +1,64 @@ -namespace SharpCompress.Compressors.Lzw +namespace SharpCompress.Compressors.Lzw; + +/// +/// This class contains constants used for LZW +/// +[System.Diagnostics.CodeAnalysis.SuppressMessage( + "Naming", + "CA1707:Identifiers should not contain underscores", + Justification = "kept for backwards compatibility" +)] +public sealed class LzwConstants { /// - /// This class contains constants used for LZW + /// Magic number found at start of LZW header: 0x1f 0x9d /// - [System.Diagnostics.CodeAnalysis.SuppressMessage( - "Naming", - "CA1707:Identifiers should not contain underscores", - Justification = "kept for backwards compatibility" - )] - public sealed class LzwConstants - { - /// - /// Magic number found at start of LZW header: 0x1f 0x9d - /// - public const int MAGIC = 0x1f9d; + public const int MAGIC = 0x1f9d; - /// - /// Maximum number of bits per code - /// - public const int MAX_BITS = 16; + /// + /// Maximum number of bits per code + /// + public const int MAX_BITS = 16; - /* 3rd header byte: - * bit 0..4 Number of compression bits - * bit 5 Extended header - * bit 6 Free - * bit 7 Block mode - */ + /* 3rd header byte: + * bit 0..4 Number of compression bits + * bit 5 Extended header + * bit 6 Free + * bit 7 Block mode + */ - /// - /// Mask for 'number of compression bits' - /// - public const int BIT_MASK = 0x1f; + /// + /// Mask for 'number of compression bits' + /// + public const int BIT_MASK = 0x1f; - /// - /// Indicates the presence of a fourth header byte - /// - public const int EXTENDED_MASK = 0x20; + /// + /// Indicates the presence of a fourth header byte + /// + public const int EXTENDED_MASK = 0x20; - //public const int FREE_MASK = 0x40; + //public const int FREE_MASK = 0x40; - /// - /// Reserved bits - /// - public const int RESERVED_MASK = 0x60; + /// + /// Reserved bits + /// + public const int RESERVED_MASK = 0x60; - /// - /// Block compression: if table is full and compression rate is dropping, - /// clear the dictionary. - /// - public const int BLOCK_MODE_MASK = 0x80; + /// + /// Block compression: if table is full and compression rate is dropping, + /// clear the dictionary. + /// + public const int BLOCK_MODE_MASK = 0x80; - /// - /// LZW file header size (in bytes) - /// - public const int HDR_SIZE = 3; + /// + /// LZW file header size (in bytes) + /// + public const int HDR_SIZE = 3; - /// - /// Initial number of bits per code - /// - public const int INIT_BITS = 9; + /// + /// Initial number of bits per code + /// + public const int INIT_BITS = 9; - private LzwConstants() { } - } -} + private LzwConstants() { } +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/Lzw/LzwStream.cs b/src/SharpCompress/Compressors/Lzw/LzwStream.cs index 71132dac0..c6a0d09b0 100644 --- a/src/SharpCompress/Compressors/Lzw/LzwStream.cs +++ b/src/SharpCompress/Compressors/Lzw/LzwStream.cs @@ -3,622 +3,621 @@ using SharpCompress.Common; using SharpCompress.IO; -namespace SharpCompress.Compressors.Lzw +namespace SharpCompress.Compressors.Lzw; + +/// +/// This filter stream is used to decompress a LZW format stream. +/// Specifically, a stream that uses the LZC compression method. +/// This file format is usually associated with the .Z file extension. +/// +/// See http://en.wikipedia.org/wiki/Compress +/// See http://wiki.wxwidgets.org/Development:_Z_File_Format +/// +/// The file header consists of 3 (or optionally 4) bytes. The first two bytes +/// contain the magic marker "0x1f 0x9d", followed by a byte of flags. +/// +/// Based on Java code by Ronald Tschalar, which in turn was based on the unlzw.c +/// code in the gzip package. +/// +/// This sample shows how to unzip a compressed file +/// +/// using System; +/// using System.IO; +/// +/// using ICSharpCode.SharpZipLib.Core; +/// using ICSharpCode.SharpZipLib.LZW; +/// +/// class MainClass +/// { +/// public static void Main(string[] args) +/// { +/// using (Stream inStream = new LzwInputStream(File.OpenRead(args[0]))) +/// using (FileStream outStream = File.Create(Path.GetFileNameWithoutExtension(args[0]))) { +/// byte[] buffer = new byte[4096]; +/// StreamUtils.Copy(inStream, outStream, buffer); +/// // OR +/// inStream.Read(buffer, 0, buffer.Length); +/// // now do something with the buffer +/// } +/// } +/// } +/// +/// +public class LzwStream : Stream, IStreamStack { - /// - /// This filter stream is used to decompress a LZW format stream. - /// Specifically, a stream that uses the LZC compression method. - /// This file format is usually associated with the .Z file extension. - /// - /// See http://en.wikipedia.org/wiki/Compress - /// See http://wiki.wxwidgets.org/Development:_Z_File_Format - /// - /// The file header consists of 3 (or optionally 4) bytes. The first two bytes - /// contain the magic marker "0x1f 0x9d", followed by a byte of flags. - /// - /// Based on Java code by Ronald Tschalar, which in turn was based on the unlzw.c - /// code in the gzip package. - /// - /// This sample shows how to unzip a compressed file - /// - /// using System; - /// using System.IO; - /// - /// using ICSharpCode.SharpZipLib.Core; - /// using ICSharpCode.SharpZipLib.LZW; - /// - /// class MainClass - /// { - /// public static void Main(string[] args) - /// { - /// using (Stream inStream = new LzwInputStream(File.OpenRead(args[0]))) - /// using (FileStream outStream = File.Create(Path.GetFileNameWithoutExtension(args[0]))) { - /// byte[] buffer = new byte[4096]; - /// StreamUtils.Copy(inStream, outStream, buffer); - /// // OR - /// inStream.Read(buffer, 0, buffer.Length); - /// // now do something with the buffer - /// } - /// } - /// } - /// - /// - public class LzwStream : Stream, IStreamStack - { #if DEBUG_STREAMS long IStreamStack.InstanceId { get; set; } #endif - int IStreamStack.DefaultBufferSize { get; set; } + int IStreamStack.DefaultBufferSize { get; set; } - Stream IStreamStack.BaseStream() => baseInputStream; + Stream IStreamStack.BaseStream() => baseInputStream; - int IStreamStack.BufferSize - { - get => 0; - set { } - } - int IStreamStack.BufferPosition - { - get => 0; - set { } - } + int IStreamStack.BufferSize + { + get => 0; + set { } + } + int IStreamStack.BufferPosition + { + get => 0; + set { } + } - void IStreamStack.SetPosition(long position) { } + void IStreamStack.SetPosition(long position) { } - public static bool IsLzwStream(Stream stream) + public static bool IsLzwStream(Stream stream) + { + try { - try - { - byte[] hdr = new byte[LzwConstants.HDR_SIZE]; + byte[] hdr = new byte[LzwConstants.HDR_SIZE]; - int result = stream.Read(hdr, 0, hdr.Length); + int result = stream.Read(hdr, 0, hdr.Length); - // Check the magic marker - if (result < 0) - throw new IncompleteArchiveException("Failed to read LZW header"); + // Check the magic marker + if (result < 0) + throw new IncompleteArchiveException("Failed to read LZW header"); - if (hdr[0] != (LzwConstants.MAGIC >> 8) || hdr[1] != (LzwConstants.MAGIC & 0xff)) - { - throw new IncompleteArchiveException( - String.Format( - "Wrong LZW header. Magic bytes don't match. 0x{0:x2} 0x{1:x2}", - hdr[0], - hdr[1] - ) - ); - } - } - catch (Exception) + if (hdr[0] != (LzwConstants.MAGIC >> 8) || hdr[1] != (LzwConstants.MAGIC & 0xff)) { - return false; + throw new IncompleteArchiveException( + String.Format( + "Wrong LZW header. Magic bytes don't match. 0x{0:x2} 0x{1:x2}", + hdr[0], + hdr[1] + ) + ); } - return true; } - - /// - /// Gets or sets a flag indicating ownership of underlying stream. - /// When the flag is true will close the underlying stream also. - /// - /// The default value is true. - public bool IsStreamOwner { get; set; } = false; - - /// - /// Creates a LzwInputStream - /// - /// - /// The stream to read compressed data from (baseInputStream LZW format) - /// - public LzwStream(Stream baseInputStream) + catch (Exception) { - this.baseInputStream = baseInputStream; + return false; + } + return true; + } + + /// + /// Gets or sets a flag indicating ownership of underlying stream. + /// When the flag is true will close the underlying stream also. + /// + /// The default value is true. + public bool IsStreamOwner { get; set; } = false; + + /// + /// Creates a LzwInputStream + /// + /// + /// The stream to read compressed data from (baseInputStream LZW format) + /// + public LzwStream(Stream baseInputStream) + { + this.baseInputStream = baseInputStream; #if DEBUG_STREAMS this.DebugConstruct(typeof(LzwStream)); #endif + } + + /// + /// See + /// + /// + public override int ReadByte() + { + int b = Read(one, 0, 1); + if (b == 1) + return (one[0] & 0xff); + return -1; + } + + /// + /// Reads decompressed data into the provided buffer byte array + /// + /// + /// The array to read and decompress data into + /// + /// + /// The offset indicating where the data should be placed + /// + /// + /// The number of bytes to decompress + /// + /// The number of bytes read. Zero signals the end of stream + public override int Read(byte[] buffer, int offset, int count) + { + if (!headerParsed) + ParseHeader(); + + if (eof) + return 0; + + int start = offset; + + /* Using local copies of various variables speeds things up by as + * much as 30% in Java! Performance not tested in C#. + */ + int[] lTabPrefix = tabPrefix; + byte[] lTabSuffix = tabSuffix; + byte[] lStack = stack; + int lNBits = nBits; + int lMaxCode = maxCode; + int lMaxMaxCode = maxMaxCode; + int lBitMask = bitMask; + int lOldCode = oldCode; + byte lFinChar = finChar; + int lStackP = stackP; + int lFreeEnt = freeEnt; + byte[] lData = data; + int lBitPos = bitPos; + + // empty stack if stuff still left + int sSize = lStack.Length - lStackP; + if (sSize > 0) + { + int num = (sSize >= count) ? count : sSize; + Array.Copy(lStack, lStackP, buffer, offset, num); + offset += num; + count -= num; + lStackP += num; } - /// - /// See - /// - /// - public override int ReadByte() + if (count == 0) { - int b = Read(one, 0, 1); - if (b == 1) - return (one[0] & 0xff); - return -1; + stackP = lStackP; + return offset - start; } - /// - /// Reads decompressed data into the provided buffer byte array - /// - /// - /// The array to read and decompress data into - /// - /// - /// The offset indicating where the data should be placed - /// - /// - /// The number of bytes to decompress - /// - /// The number of bytes read. Zero signals the end of stream - public override int Read(byte[] buffer, int offset, int count) + // loop, filling local buffer until enough data has been decompressed + MainLoop: + do { - if (!headerParsed) - ParseHeader(); - - if (eof) - return 0; - - int start = offset; - - /* Using local copies of various variables speeds things up by as - * much as 30% in Java! Performance not tested in C#. - */ - int[] lTabPrefix = tabPrefix; - byte[] lTabSuffix = tabSuffix; - byte[] lStack = stack; - int lNBits = nBits; - int lMaxCode = maxCode; - int lMaxMaxCode = maxMaxCode; - int lBitMask = bitMask; - int lOldCode = oldCode; - byte lFinChar = finChar; - int lStackP = stackP; - int lFreeEnt = freeEnt; - byte[] lData = data; - int lBitPos = bitPos; - - // empty stack if stuff still left - int sSize = lStack.Length - lStackP; - if (sSize > 0) + if (end < EXTRA) { - int num = (sSize >= count) ? count : sSize; - Array.Copy(lStack, lStackP, buffer, offset, num); - offset += num; - count -= num; - lStackP += num; + Fill(); } - if (count == 0) - { - stackP = lStackP; - return offset - start; - } + int bitIn = (got > 0) ? (end - end % lNBits) << 3 : (end << 3) - (lNBits - 1); - // loop, filling local buffer until enough data has been decompressed - MainLoop: - do + while (lBitPos < bitIn) { - if (end < EXTRA) + #region A + + // handle 1-byte reads correctly + if (count == 0) { - Fill(); + nBits = lNBits; + maxCode = lMaxCode; + maxMaxCode = lMaxMaxCode; + bitMask = lBitMask; + oldCode = lOldCode; + finChar = lFinChar; + stackP = lStackP; + freeEnt = lFreeEnt; + bitPos = lBitPos; + + return offset - start; } - int bitIn = (got > 0) ? (end - end % lNBits) << 3 : (end << 3) - (lNBits - 1); - - while (lBitPos < bitIn) + // check for code-width expansion + if (lFreeEnt > lMaxCode) { - #region A + int nBytes = lNBits << 3; + lBitPos = (lBitPos - 1) + nBytes - (lBitPos - 1 + nBytes) % nBytes; - // handle 1-byte reads correctly - if (count == 0) - { - nBits = lNBits; - maxCode = lMaxCode; - maxMaxCode = lMaxMaxCode; - bitMask = lBitMask; - oldCode = lOldCode; - finChar = lFinChar; - stackP = lStackP; - freeEnt = lFreeEnt; - bitPos = lBitPos; - - return offset - start; - } + lNBits++; + lMaxCode = (lNBits == maxBits) ? lMaxMaxCode : (1 << lNBits) - 1; - // check for code-width expansion - if (lFreeEnt > lMaxCode) - { - int nBytes = lNBits << 3; - lBitPos = (lBitPos - 1) + nBytes - (lBitPos - 1 + nBytes) % nBytes; + lBitMask = (1 << lNBits) - 1; + lBitPos = ResetBuf(lBitPos); + goto MainLoop; + } - lNBits++; - lMaxCode = (lNBits == maxBits) ? lMaxMaxCode : (1 << lNBits) - 1; + #endregion A - lBitMask = (1 << lNBits) - 1; - lBitPos = ResetBuf(lBitPos); - goto MainLoop; - } + #region B - #endregion A + // read next code + int pos = lBitPos >> 3; + int code = + ( + ( + (lData[pos] & 0xFF) + | ((lData[pos + 1] & 0xFF) << 8) + | ((lData[pos + 2] & 0xFF) << 16) + ) >> (lBitPos & 0x7) + ) & lBitMask; - #region B + lBitPos += lNBits; - // read next code - int pos = lBitPos >> 3; - int code = - ( - ( - (lData[pos] & 0xFF) - | ((lData[pos + 1] & 0xFF) << 8) - | ((lData[pos + 2] & 0xFF) << 16) - ) >> (lBitPos & 0x7) - ) & lBitMask; - - lBitPos += lNBits; + // handle first iteration + if (lOldCode == -1) + { + if (code >= 256) + throw new IncompleteArchiveException( + "corrupt input: " + code + " > 255" + ); - // handle first iteration - if (lOldCode == -1) - { - if (code >= 256) - throw new IncompleteArchiveException( - "corrupt input: " + code + " > 255" - ); - - lFinChar = (byte)(lOldCode = code); - buffer[offset++] = lFinChar; - count--; - continue; - } + lFinChar = (byte)(lOldCode = code); + buffer[offset++] = lFinChar; + count--; + continue; + } - // handle CLEAR code - if (code == TBL_CLEAR && blockMode) - { - Array.Copy(zeros, 0, lTabPrefix, 0, zeros.Length); - lFreeEnt = TBL_FIRST - 1; + // handle CLEAR code + if (code == TBL_CLEAR && blockMode) + { + Array.Copy(zeros, 0, lTabPrefix, 0, zeros.Length); + lFreeEnt = TBL_FIRST - 1; - int nBytes = lNBits << 3; - lBitPos = (lBitPos - 1) + nBytes - (lBitPos - 1 + nBytes) % nBytes; - lNBits = LzwConstants.INIT_BITS; - lMaxCode = (1 << lNBits) - 1; - lBitMask = lMaxCode; + int nBytes = lNBits << 3; + lBitPos = (lBitPos - 1) + nBytes - (lBitPos - 1 + nBytes) % nBytes; + lNBits = LzwConstants.INIT_BITS; + lMaxCode = (1 << lNBits) - 1; + lBitMask = lMaxCode; - // Code tables reset + // Code tables reset - lBitPos = ResetBuf(lBitPos); - goto MainLoop; - } + lBitPos = ResetBuf(lBitPos); + goto MainLoop; + } - #endregion B + #endregion B - #region C + #region C - // setup - int inCode = code; - lStackP = lStack.Length; + // setup + int inCode = code; + lStackP = lStack.Length; - // Handle KwK case - if (code >= lFreeEnt) + // Handle KwK case + if (code >= lFreeEnt) + { + if (code > lFreeEnt) { - if (code > lFreeEnt) - { - throw new IncompleteArchiveException( - "corrupt input: code=" + code + ", freeEnt=" + lFreeEnt - ); - } - - lStack[--lStackP] = lFinChar; - code = lOldCode; + throw new IncompleteArchiveException( + "corrupt input: code=" + code + ", freeEnt=" + lFreeEnt + ); } - // Generate output characters in reverse order - while (code >= 256) - { - lStack[--lStackP] = lTabSuffix[code]; - code = lTabPrefix[code]; - } + lStack[--lStackP] = lFinChar; + code = lOldCode; + } - lFinChar = lTabSuffix[code]; - buffer[offset++] = lFinChar; - count--; + // Generate output characters in reverse order + while (code >= 256) + { + lStack[--lStackP] = lTabSuffix[code]; + code = lTabPrefix[code]; + } - // And put them out in forward order - sSize = lStack.Length - lStackP; - int num = (sSize >= count) ? count : sSize; - Array.Copy(lStack, lStackP, buffer, offset, num); - offset += num; - count -= num; - lStackP += num; + lFinChar = lTabSuffix[code]; + buffer[offset++] = lFinChar; + count--; - #endregion C + // And put them out in forward order + sSize = lStack.Length - lStackP; + int num = (sSize >= count) ? count : sSize; + Array.Copy(lStack, lStackP, buffer, offset, num); + offset += num; + count -= num; + lStackP += num; - #region D + #endregion C - // generate new entry in table - if (lFreeEnt < lMaxMaxCode) - { - lTabPrefix[lFreeEnt] = lOldCode; - lTabSuffix[lFreeEnt] = lFinChar; - lFreeEnt++; - } + #region D - // Remember previous code - lOldCode = inCode; + // generate new entry in table + if (lFreeEnt < lMaxMaxCode) + { + lTabPrefix[lFreeEnt] = lOldCode; + lTabSuffix[lFreeEnt] = lFinChar; + lFreeEnt++; + } - // if output buffer full, then return - if (count == 0) - { - nBits = lNBits; - maxCode = lMaxCode; - bitMask = lBitMask; - oldCode = lOldCode; - finChar = lFinChar; - stackP = lStackP; - freeEnt = lFreeEnt; - bitPos = lBitPos; - - return offset - start; - } + // Remember previous code + lOldCode = inCode; - #endregion D - } // while + // if output buffer full, then return + if (count == 0) + { + nBits = lNBits; + maxCode = lMaxCode; + bitMask = lBitMask; + oldCode = lOldCode; + finChar = lFinChar; + stackP = lStackP; + freeEnt = lFreeEnt; + bitPos = lBitPos; + + return offset - start; + } - lBitPos = ResetBuf(lBitPos); - } while (got > 0); // do..while + #endregion D + } // while - nBits = lNBits; - maxCode = lMaxCode; - bitMask = lBitMask; - oldCode = lOldCode; - finChar = lFinChar; - stackP = lStackP; - freeEnt = lFreeEnt; - bitPos = lBitPos; + lBitPos = ResetBuf(lBitPos); + } while (got > 0); // do..while - eof = true; - return offset - start; - } + nBits = lNBits; + maxCode = lMaxCode; + bitMask = lBitMask; + oldCode = lOldCode; + finChar = lFinChar; + stackP = lStackP; + freeEnt = lFreeEnt; + bitPos = lBitPos; - /// - /// Moves the unread data in the buffer to the beginning and resets - /// the pointers. - /// - /// - /// - private int ResetBuf(int bitPosition) - { - int pos = bitPosition >> 3; - Array.Copy(data, pos, data, 0, end - pos); - end -= pos; - return 0; - } + eof = true; + return offset - start; + } - private void Fill() - { - got = baseInputStream.Read(data, end, data.Length - 1 - end); - if (got > 0) - { - end += got; - } - } + /// + /// Moves the unread data in the buffer to the beginning and resets + /// the pointers. + /// + /// + /// + private int ResetBuf(int bitPosition) + { + int pos = bitPosition >> 3; + Array.Copy(data, pos, data, 0, end - pos); + end -= pos; + return 0; + } - private void ParseHeader() + private void Fill() + { + got = baseInputStream.Read(data, end, data.Length - 1 - end); + if (got > 0) { - headerParsed = true; - - byte[] hdr = new byte[LzwConstants.HDR_SIZE]; - - int result = baseInputStream.Read(hdr, 0, hdr.Length); - - // Check the magic marker - if (result < 0) - throw new IncompleteArchiveException("Failed to read LZW header"); + end += got; + } + } - if (hdr[0] != (LzwConstants.MAGIC >> 8) || hdr[1] != (LzwConstants.MAGIC & 0xff)) - { - throw new IncompleteArchiveException( - String.Format( - "Wrong LZW header. Magic bytes don't match. 0x{0:x2} 0x{1:x2}", - hdr[0], - hdr[1] - ) - ); - } + private void ParseHeader() + { + headerParsed = true; - // Check the 3rd header byte - blockMode = (hdr[2] & LzwConstants.BLOCK_MODE_MASK) > 0; - maxBits = hdr[2] & LzwConstants.BIT_MASK; + byte[] hdr = new byte[LzwConstants.HDR_SIZE]; - if (maxBits > LzwConstants.MAX_BITS) - { - throw new ArchiveException( - "Stream compressed with " - + maxBits - + " bits, but decompression can only handle " - + LzwConstants.MAX_BITS - + " bits." - ); - } + int result = baseInputStream.Read(hdr, 0, hdr.Length); - if ((hdr[2] & LzwConstants.RESERVED_MASK) > 0) - { - throw new ArchiveException("Unsupported bits set in the header."); - } + // Check the magic marker + if (result < 0) + throw new IncompleteArchiveException("Failed to read LZW header"); - // Initialize variables - maxMaxCode = 1 << maxBits; - nBits = LzwConstants.INIT_BITS; - maxCode = (1 << nBits) - 1; - bitMask = maxCode; - oldCode = -1; - finChar = 0; - freeEnt = blockMode ? TBL_FIRST : 256; - - tabPrefix = new int[1 << maxBits]; - tabSuffix = new byte[1 << maxBits]; - stack = new byte[1 << maxBits]; - stackP = stack.Length; - - for (int idx = 255; idx >= 0; idx--) - tabSuffix[idx] = (byte)idx; + if (hdr[0] != (LzwConstants.MAGIC >> 8) || hdr[1] != (LzwConstants.MAGIC & 0xff)) + { + throw new IncompleteArchiveException( + String.Format( + "Wrong LZW header. Magic bytes don't match. 0x{0:x2} 0x{1:x2}", + hdr[0], + hdr[1] + ) + ); } - #region Stream Overrides + // Check the 3rd header byte + blockMode = (hdr[2] & LzwConstants.BLOCK_MODE_MASK) > 0; + maxBits = hdr[2] & LzwConstants.BIT_MASK; - /// - /// Gets a value indicating whether the current stream supports reading - /// - public override bool CanRead + if (maxBits > LzwConstants.MAX_BITS) { - get { return baseInputStream.CanRead; } + throw new ArchiveException( + "Stream compressed with " + + maxBits + + " bits, but decompression can only handle " + + LzwConstants.MAX_BITS + + " bits." + ); } - /// - /// Gets a value of false indicating seeking is not supported for this stream. - /// - public override bool CanSeek + if ((hdr[2] & LzwConstants.RESERVED_MASK) > 0) { - get { return false; } + throw new ArchiveException("Unsupported bits set in the header."); } - /// - /// Gets a value of false indicating that this stream is not writeable. - /// - public override bool CanWrite - { - get { return false; } - } + // Initialize variables + maxMaxCode = 1 << maxBits; + nBits = LzwConstants.INIT_BITS; + maxCode = (1 << nBits) - 1; + bitMask = maxCode; + oldCode = -1; + finChar = 0; + freeEnt = blockMode ? TBL_FIRST : 256; + + tabPrefix = new int[1 << maxBits]; + tabSuffix = new byte[1 << maxBits]; + stack = new byte[1 << maxBits]; + stackP = stack.Length; + + for (int idx = 255; idx >= 0; idx--) + tabSuffix[idx] = (byte)idx; + } - /// - /// A value representing the length of the stream in bytes. - /// - public override long Length - { - get { return got; } - } + #region Stream Overrides - /// - /// The current position within the stream. - /// Throws a NotSupportedException when attempting to set the position - /// - /// Attempting to set the position - public override long Position - { - get { return baseInputStream.Position; } - set { throw new NotSupportedException("InflaterInputStream Position not supported"); } - } + /// + /// Gets a value indicating whether the current stream supports reading + /// + public override bool CanRead + { + get { return baseInputStream.CanRead; } + } - /// - /// Flushes the baseInputStream - /// - public override void Flush() - { - baseInputStream.Flush(); - } + /// + /// Gets a value of false indicating seeking is not supported for this stream. + /// + public override bool CanSeek + { + get { return false; } + } - /// - /// Sets the position within the current stream - /// Always throws a NotSupportedException - /// - /// The relative offset to seek to. - /// The defining where to seek from. - /// The new position in the stream. - /// Any access - public override long Seek(long offset, SeekOrigin origin) - { - throw new NotSupportedException("Seek not supported"); - } + /// + /// Gets a value of false indicating that this stream is not writeable. + /// + public override bool CanWrite + { + get { return false; } + } - /// - /// Set the length of the current stream - /// Always throws a NotSupportedException - /// - /// The new length value for the stream. - /// Any access - public override void SetLength(long value) - { - throw new NotSupportedException("InflaterInputStream SetLength not supported"); - } + /// + /// A value representing the length of the stream in bytes. + /// + public override long Length + { + get { return got; } + } - /// - /// Writes a sequence of bytes to stream and advances the current position - /// This method always throws a NotSupportedException - /// - /// The buffer containing data to write. - /// The offset of the first byte to write. - /// The number of bytes to write. - /// Any access - public override void Write(byte[] buffer, int offset, int count) - { - throw new NotSupportedException("InflaterInputStream Write not supported"); - } + /// + /// The current position within the stream. + /// Throws a NotSupportedException when attempting to set the position + /// + /// Attempting to set the position + public override long Position + { + get { return baseInputStream.Position; } + set { throw new NotSupportedException("InflaterInputStream Position not supported"); } + } - /// - /// Writes one byte to the current stream and advances the current position - /// Always throws a NotSupportedException - /// - /// The byte to write. - /// Any access - public override void WriteByte(byte value) - { - throw new NotSupportedException("InflaterInputStream WriteByte not supported"); - } + /// + /// Flushes the baseInputStream + /// + public override void Flush() + { + baseInputStream.Flush(); + } + + /// + /// Sets the position within the current stream + /// Always throws a NotSupportedException + /// + /// The relative offset to seek to. + /// The defining where to seek from. + /// The new position in the stream. + /// Any access + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException("Seek not supported"); + } - /// - /// Closes the input stream. When - /// is true the underlying stream is also closed. - /// - protected override void Dispose(bool disposing) + /// + /// Set the length of the current stream + /// Always throws a NotSupportedException + /// + /// The new length value for the stream. + /// Any access + public override void SetLength(long value) + { + throw new NotSupportedException("InflaterInputStream SetLength not supported"); + } + + /// + /// Writes a sequence of bytes to stream and advances the current position + /// This method always throws a NotSupportedException + /// + /// The buffer containing data to write. + /// The offset of the first byte to write. + /// The number of bytes to write. + /// Any access + public override void Write(byte[] buffer, int offset, int count) + { + throw new NotSupportedException("InflaterInputStream Write not supported"); + } + + /// + /// Writes one byte to the current stream and advances the current position + /// Always throws a NotSupportedException + /// + /// The byte to write. + /// Any access + public override void WriteByte(byte value) + { + throw new NotSupportedException("InflaterInputStream WriteByte not supported"); + } + + /// + /// Closes the input stream. When + /// is true the underlying stream is also closed. + /// + protected override void Dispose(bool disposing) + { + if (!isClosed) { - if (!isClosed) - { - isClosed = true; + isClosed = true; #if DEBUG_STREAMS this.DebugDispose(typeof(LzwStream)); #endif - if (IsStreamOwner) - { - baseInputStream.Dispose(); - } + if (IsStreamOwner) + { + baseInputStream.Dispose(); } } + } - #endregion Stream Overrides + #endregion Stream Overrides - #region Instance Fields + #region Instance Fields - private Stream baseInputStream; + private Stream baseInputStream; - /// - /// Flag indicating wether this instance has been closed or not. - /// - private bool isClosed; + /// + /// Flag indicating wether this instance has been closed or not. + /// + private bool isClosed; - private readonly byte[] one = new byte[1]; - private bool headerParsed; + private readonly byte[] one = new byte[1]; + private bool headerParsed; - // string table stuff - private const int TBL_CLEAR = 0x100; + // string table stuff + private const int TBL_CLEAR = 0x100; - private const int TBL_FIRST = TBL_CLEAR + 1; + private const int TBL_FIRST = TBL_CLEAR + 1; - private int[] tabPrefix = new int[0]; // - private byte[] tabSuffix = new byte[0]; // - private readonly int[] zeros = new int[256]; - private byte[] stack = new byte[0]; // + private int[] tabPrefix = new int[0]; // + private byte[] tabSuffix = new byte[0]; // + private readonly int[] zeros = new int[256]; + private byte[] stack = new byte[0]; // - // various state - private bool blockMode; + // various state + private bool blockMode; - private int nBits; - private int maxBits; - private int maxMaxCode; - private int maxCode; - private int bitMask; - private int oldCode; - private byte finChar; - private int stackP; - private int freeEnt; + private int nBits; + private int maxBits; + private int maxMaxCode; + private int maxCode; + private int bitMask; + private int oldCode; + private byte finChar; + private int stackP; + private int freeEnt; - // input buffer - private readonly byte[] data = new byte[1024 * 8]; + // input buffer + private readonly byte[] data = new byte[1024 * 8]; - private int bitPos; - private int end; - private int got; - private bool eof; - private const int EXTRA = 64; + private int bitPos; + private int end; + private int got; + private bool eof; + private const int EXTRA = 64; - #endregion Instance Fields - } + #endregion Instance Fields } diff --git a/src/SharpCompress/Compressors/RLE90/RLE.cs b/src/SharpCompress/Compressors/RLE90/RLE.cs index 8bc8ee1ba..68b52ea40 100644 --- a/src/SharpCompress/Compressors/RLE90/RLE.cs +++ b/src/SharpCompress/Compressors/RLE90/RLE.cs @@ -1,52 +1,51 @@ using System.Collections.Generic; using System.Linq; -namespace SharpCompress.Compressors.RLE90 +namespace SharpCompress.Compressors.RLE90; + +public static class RLE { - public static class RLE + private const byte DLE = 0x90; + + /// + /// Unpacks an RLE compressed buffer. + /// Format: DLE , where count == 0 -> DLE + /// + /// The compressed buffer to unpack. + /// A list of unpacked bytes. + public static List UnpackRLE(byte[] compressedBuffer) { - private const byte DLE = 0x90; + var result = new List(compressedBuffer.Length * 2); // Optimized initial capacity + var countMode = false; + byte last = 0; - /// - /// Unpacks an RLE compressed buffer. - /// Format: DLE , where count == 0 -> DLE - /// - /// The compressed buffer to unpack. - /// A list of unpacked bytes. - public static List UnpackRLE(byte[] compressedBuffer) + foreach (var c in compressedBuffer) { - var result = new List(compressedBuffer.Length * 2); // Optimized initial capacity - var countMode = false; - byte last = 0; - - foreach (var c in compressedBuffer) + if (!countMode) + { + if (c == DLE) + { + countMode = true; + } + else + { + result.Add(c); + last = c; + } + } + else { - if (!countMode) + countMode = false; + if (c == 0) { - if (c == DLE) - { - countMode = true; - } - else - { - result.Add(c); - last = c; - } + result.Add(DLE); } else { - countMode = false; - if (c == 0) - { - result.Add(DLE); - } - else - { - result.AddRange(Enumerable.Repeat(last, c - 1)); - } + result.AddRange(Enumerable.Repeat(last, c - 1)); } } - return result; } + return result; } } diff --git a/src/SharpCompress/Compressors/RLE90/RunLength90Stream.cs b/src/SharpCompress/Compressors/RLE90/RunLength90Stream.cs index 09034040a..d70685f97 100644 --- a/src/SharpCompress/Compressors/RLE90/RunLength90Stream.cs +++ b/src/SharpCompress/Compressors/RLE90/RunLength90Stream.cs @@ -6,91 +6,90 @@ using System.Threading.Tasks; using SharpCompress.IO; -namespace SharpCompress.Compressors.RLE90 +namespace SharpCompress.Compressors.RLE90; + +public class RunLength90Stream : Stream, IStreamStack { - public class RunLength90Stream : Stream, IStreamStack - { #if DEBUG_STREAMS long IStreamStack.InstanceId { get; set; } #endif - int IStreamStack.DefaultBufferSize { get; set; } + int IStreamStack.DefaultBufferSize { get; set; } - Stream IStreamStack.BaseStream() => _stream; + Stream IStreamStack.BaseStream() => _stream; - int IStreamStack.BufferSize - { - get => 0; - set { } - } - int IStreamStack.BufferPosition - { - get => 0; - set { } - } + int IStreamStack.BufferSize + { + get => 0; + set { } + } + int IStreamStack.BufferPosition + { + get => 0; + set { } + } - void IStreamStack.SetPosition(long position) { } + void IStreamStack.SetPosition(long position) { } - private readonly Stream _stream; - private const byte DLE = 0x90; - private int _compressedSize; - private bool _processed = false; + private readonly Stream _stream; + private const byte DLE = 0x90; + private int _compressedSize; + private bool _processed = false; - public RunLength90Stream(Stream stream, int compressedSize) - { - _stream = stream; - _compressedSize = compressedSize; + public RunLength90Stream(Stream stream, int compressedSize) + { + _stream = stream; + _compressedSize = compressedSize; #if DEBUG_STREAMS this.DebugConstruct(typeof(RunLength90Stream)); #endif - } + } - protected override void Dispose(bool disposing) - { + protected override void Dispose(bool disposing) + { #if DEBUG_STREAMS this.DebugDispose(typeof(RunLength90Stream)); #endif - base.Dispose(disposing); - } + base.Dispose(disposing); + } - public override bool CanRead => true; + public override bool CanRead => true; - public override bool CanSeek => false; + public override bool CanSeek => false; - public override bool CanWrite => false; + public override bool CanWrite => false; - public override long Length => throw new NotImplementedException(); + public override long Length => throw new NotImplementedException(); - public override long Position - { - get => _stream.Position; - set => throw new NotImplementedException(); - } + public override long Position + { + get => _stream.Position; + set => throw new NotImplementedException(); + } - public override void Flush() => throw new NotImplementedException(); + public override void Flush() => throw new NotImplementedException(); - public override int Read(byte[] buffer, int offset, int count) + public override int Read(byte[] buffer, int offset, int count) + { + if (_processed) { - if (_processed) - { - return 0; - } - _processed = true; + return 0; + } + _processed = true; - using var binaryReader = new BinaryReader(_stream); - byte[] compressedBuffer = binaryReader.ReadBytes(_compressedSize); + using var binaryReader = new BinaryReader(_stream); + byte[] compressedBuffer = binaryReader.ReadBytes(_compressedSize); - var unpacked = RLE.UnpackRLE(compressedBuffer); - unpacked.CopyTo(buffer); + var unpacked = RLE.UnpackRLE(compressedBuffer); + unpacked.CopyTo(buffer); - return unpacked.Count; - } + return unpacked.Count; + } - public override long Seek(long offset, SeekOrigin origin) => - throw new NotImplementedException(); + public override long Seek(long offset, SeekOrigin origin) => + throw new NotImplementedException(); - public override void SetLength(long value) => throw new NotImplementedException(); + public override void SetLength(long value) => throw new NotImplementedException(); - public override void Write(byte[] buffer, int offset, int count) => - throw new NotImplementedException(); - } + public override void Write(byte[] buffer, int offset, int count) => + throw new NotImplementedException(); } diff --git a/src/SharpCompress/Compressors/Shrink/BitStream.cs b/src/SharpCompress/Compressors/Shrink/BitStream.cs index 8bb69ead7..4bfa0c322 100644 --- a/src/SharpCompress/Compressors/Shrink/BitStream.cs +++ b/src/SharpCompress/Compressors/Shrink/BitStream.cs @@ -1,79 +1,78 @@ -namespace SharpCompress.Compressors.Shrink +namespace SharpCompress.Compressors.Shrink; + +internal class BitStream { - internal class BitStream + private byte[] _src; + private int _srcLen; + private int _byteIdx; + private int _bitIdx; + private int _bitsLeft; + private ulong _bitBuffer; + private static uint[] _maskBits = new uint[17] { - private byte[] _src; - private int _srcLen; - private int _byteIdx; - private int _bitIdx; - private int _bitsLeft; - private ulong _bitBuffer; - private static uint[] _maskBits = new uint[17] - { - 0U, - 1U, - 3U, - 7U, - 15U, - 31U, - 63U, - (uint)sbyte.MaxValue, - (uint)byte.MaxValue, - 511U, - 1023U, - 2047U, - 4095U, - 8191U, - 16383U, - (uint)short.MaxValue, - (uint)ushort.MaxValue, - }; + 0U, + 1U, + 3U, + 7U, + 15U, + 31U, + 63U, + (uint)sbyte.MaxValue, + (uint)byte.MaxValue, + 511U, + 1023U, + 2047U, + 4095U, + 8191U, + 16383U, + (uint)short.MaxValue, + (uint)ushort.MaxValue, + }; - public BitStream(byte[] src, int srcLen) - { - _src = src; - _srcLen = srcLen; - _byteIdx = 0; - _bitIdx = 0; - } + public BitStream(byte[] src, int srcLen) + { + _src = src; + _srcLen = srcLen; + _byteIdx = 0; + _bitIdx = 0; + } - public int BytesRead => (_byteIdx << 3) + _bitIdx; + public int BytesRead => (_byteIdx << 3) + _bitIdx; - private int NextByte() + private int NextByte() + { + if (_byteIdx >= _srcLen) { - if (_byteIdx >= _srcLen) - { - return 0; - } - - return _src[_byteIdx++]; + return 0; } - public int NextBits(int nbits) + return _src[_byteIdx++]; + } + + public int NextBits(int nbits) + { + var result = 0; + if (nbits > _bitsLeft) { - var result = 0; - if (nbits > _bitsLeft) + int num; + while (_bitsLeft <= 24 && (num = NextByte()) != 1234) { - int num; - while (_bitsLeft <= 24 && (num = NextByte()) != 1234) - { - _bitBuffer |= (ulong)num << _bitsLeft; - _bitsLeft += 8; - } + _bitBuffer |= (ulong)num << _bitsLeft; + _bitsLeft += 8; } - result = (int)((long)_bitBuffer & (long)_maskBits[nbits]); - _bitBuffer >>= nbits; - _bitsLeft -= nbits; - return result; } + result = (int)((long)_bitBuffer & (long)_maskBits[nbits]); + _bitBuffer >>= nbits; + _bitsLeft -= nbits; + return result; + } - public bool Advance(int count) + public bool Advance(int count) + { + if (_byteIdx > _srcLen) { - if (_byteIdx > _srcLen) - { - return false; - } - return true; + return false; } + return true; } } diff --git a/src/SharpCompress/Compressors/Shrink/HwUnshrink.cs b/src/SharpCompress/Compressors/Shrink/HwUnshrink.cs index 4506ccb16..2367e2ac7 100644 --- a/src/SharpCompress/Compressors/Shrink/HwUnshrink.cs +++ b/src/SharpCompress/Compressors/Shrink/HwUnshrink.cs @@ -1,275 +1,297 @@ using System; -namespace SharpCompress.Compressors.Shrink +namespace SharpCompress.Compressors.Shrink; + +public class HwUnshrink { - public class HwUnshrink - { - private const int MIN_CODE_SIZE = 9; - private const int MAX_CODE_SIZE = 13; + private const int MIN_CODE_SIZE = 9; + private const int MAX_CODE_SIZE = 13; + + private const ushort MAX_CODE = (ushort)((1U << MAX_CODE_SIZE) - 1); + private const ushort INVALID_CODE = ushort.MaxValue; + private const ushort CONTROL_CODE = 256; + private const ushort INC_CODE_SIZE = 1; + private const ushort PARTIAL_CLEAR = 2; - private const ushort MAX_CODE = (ushort)((1U << MAX_CODE_SIZE) - 1); - private const ushort INVALID_CODE = ushort.MaxValue; - private const ushort CONTROL_CODE = 256; - private const ushort INC_CODE_SIZE = 1; - private const ushort PARTIAL_CLEAR = 2; + private const int HASH_BITS = MAX_CODE_SIZE + 1; // For a load factor of 0.5. + private const int HASHTAB_SIZE = 1 << HASH_BITS; + private const ushort UNKNOWN_LEN = ushort.MaxValue; - private const int HASH_BITS = MAX_CODE_SIZE + 1; // For a load factor of 0.5. - private const int HASHTAB_SIZE = 1 << HASH_BITS; - private const ushort UNKNOWN_LEN = ushort.MaxValue; + private struct CodeTabEntry + { + public int prefixCode; // INVALID_CODE means the entry is invalid. + public byte extByte; + public ushort len; + public int lastDstPos; + } - private struct CodeTabEntry + private static void CodeTabInit(CodeTabEntry[] codeTab) + { + for (var i = 0; i <= byte.MaxValue; i++) { - public int prefixCode; // INVALID_CODE means the entry is invalid. - public byte extByte; - public ushort len; - public int lastDstPos; + codeTab[i].prefixCode = (ushort)i; + codeTab[i].extByte = (byte)i; + codeTab[i].len = 1; } - private static void CodeTabInit(CodeTabEntry[] codeTab) + for (var i = byte.MaxValue + 1; i <= MAX_CODE; i++) { - for (var i = 0; i <= byte.MaxValue; i++) - { - codeTab[i].prefixCode = (ushort)i; - codeTab[i].extByte = (byte)i; - codeTab[i].len = 1; - } - - for (var i = byte.MaxValue + 1; i <= MAX_CODE; i++) - { - codeTab[i].prefixCode = INVALID_CODE; - } + codeTab[i].prefixCode = INVALID_CODE; } + } - private static void UnshrinkPartialClear(CodeTabEntry[] codeTab, ref CodeQueue queue) - { - var isPrefix = new bool[MAX_CODE + 1]; - int codeQueueSize; + private static void UnshrinkPartialClear(CodeTabEntry[] codeTab, ref CodeQueue queue) + { + var isPrefix = new bool[MAX_CODE + 1]; + int codeQueueSize; - // Scan for codes that have been used as a prefix. - for (var i = CONTROL_CODE + 1; i <= MAX_CODE; i++) + // Scan for codes that have been used as a prefix. + for (var i = CONTROL_CODE + 1; i <= MAX_CODE; i++) + { + if (codeTab[i].prefixCode != INVALID_CODE) { - if (codeTab[i].prefixCode != INVALID_CODE) - { - isPrefix[codeTab[i].prefixCode] = true; - } + isPrefix[codeTab[i].prefixCode] = true; } - - // Clear "non-prefix" codes in the table; populate the code queue. - codeQueueSize = 0; - for (var i = CONTROL_CODE + 1; i <= MAX_CODE; i++) - { - if (!isPrefix[i]) - { - codeTab[i].prefixCode = INVALID_CODE; - queue.codes[codeQueueSize++] = (ushort)i; - } - } - - queue.codes[codeQueueSize] = INVALID_CODE; // End-of-queue marker. - queue.nextIdx = 0; } - private static bool ReadCode( - BitStream stream, - ref int codeSize, - CodeTabEntry[] codeTab, - ref CodeQueue queue, - out int nextCode - ) + // Clear "non-prefix" codes in the table; populate the code queue. + codeQueueSize = 0; + for (var i = CONTROL_CODE + 1; i <= MAX_CODE; i++) { - int code, - controlCode; - - code = (int)stream.NextBits(codeSize); - if (!stream.Advance(codeSize)) + if (!isPrefix[i]) { - nextCode = INVALID_CODE; - return false; + codeTab[i].prefixCode = INVALID_CODE; + queue.codes[codeQueueSize++] = (ushort)i; } + } - // Handle regular codes (the common case). - if (code != CONTROL_CODE) - { - nextCode = code; - return true; - } + queue.codes[codeQueueSize] = INVALID_CODE; // End-of-queue marker. + queue.nextIdx = 0; + } - // Handle control codes. - controlCode = (ushort)stream.NextBits(codeSize); - if (!stream.Advance(codeSize)) - { - nextCode = INVALID_CODE; - return true; - } + private static bool ReadCode( + BitStream stream, + ref int codeSize, + CodeTabEntry[] codeTab, + ref CodeQueue queue, + out int nextCode + ) + { + int code, + controlCode; - if (controlCode == INC_CODE_SIZE && codeSize < MAX_CODE_SIZE) - { - codeSize++; - return ReadCode(stream, ref codeSize, codeTab, ref queue, out nextCode); - } + code = (int)stream.NextBits(codeSize); + if (!stream.Advance(codeSize)) + { + nextCode = INVALID_CODE; + return false; + } - if (controlCode == PARTIAL_CLEAR) - { - UnshrinkPartialClear(codeTab, ref queue); - return ReadCode(stream, ref codeSize, codeTab, ref queue, out nextCode); - } + // Handle regular codes (the common case). + if (code != CONTROL_CODE) + { + nextCode = code; + return true; + } + // Handle control codes. + controlCode = (ushort)stream.NextBits(codeSize); + if (!stream.Advance(codeSize)) + { nextCode = INVALID_CODE; return true; } - private static void CopyFromPrevPos(byte[] dst, int prevPos, int dstPos, int len) + if (controlCode == INC_CODE_SIZE && codeSize < MAX_CODE_SIZE) { - if (dstPos + len > dst.Length) - { - // Not enough room in dst for the sloppy copy below. - Array.Copy(dst, prevPos, dst, dstPos, len); - return; - } + codeSize++; + return ReadCode(stream, ref codeSize, codeTab, ref queue, out nextCode); + } - if (prevPos + len > dstPos) - { - // Benign one-byte overlap possible in the KwKwK case. - //assert(prevPos + len == dstPos + 1); - //assert(dst[prevPos] == dst[prevPos + len - 1]); - } + if (controlCode == PARTIAL_CLEAR) + { + UnshrinkPartialClear(codeTab, ref queue); + return ReadCode(stream, ref codeSize, codeTab, ref queue, out nextCode); + } - Buffer.BlockCopy(dst, prevPos, dst, dstPos, len); + nextCode = INVALID_CODE; + return true; + } + + private static void CopyFromPrevPos(byte[] dst, int prevPos, int dstPos, int len) + { + if (dstPos + len > dst.Length) + { + // Not enough room in dst for the sloppy copy below. + Array.Copy(dst, prevPos, dst, dstPos, len); + return; } - private static UnshrnkStatus OutputCode( - int code, - byte[] dst, - int dstPos, - int dstCap, - int prevCode, - CodeTabEntry[] codeTab, - ref CodeQueue queue, - out byte firstByte, - out int len - ) + if (prevPos + len > dstPos) + { + // Benign one-byte overlap possible in the KwKwK case. + //assert(prevPos + len == dstPos + 1); + //assert(dst[prevPos] == dst[prevPos + len - 1]); + } + + Buffer.BlockCopy(dst, prevPos, dst, dstPos, len); + } + + private static UnshrnkStatus OutputCode( + int code, + byte[] dst, + int dstPos, + int dstCap, + int prevCode, + CodeTabEntry[] codeTab, + ref CodeQueue queue, + out byte firstByte, + out int len + ) + { + int prefixCode; + + //assert(code <= MAX_CODE && code != CONTROL_CODE); + //assert(dstPos < dstCap); + firstByte = 0; + if (code <= byte.MaxValue) { - int prefixCode; + // Output literal byte. + firstByte = (byte)code; + len = 1; + dst[dstPos] = (byte)code; + return UnshrnkStatus.Ok; + } - //assert(code <= MAX_CODE && code != CONTROL_CODE); - //assert(dstPos < dstCap); + if (codeTab[code].prefixCode == INVALID_CODE || codeTab[code].prefixCode == code) + { + // Reject invalid codes. Self-referential codes may exist in the table but cannot be used. firstByte = 0; - if (code <= byte.MaxValue) - { - // Output literal byte. - firstByte = (byte)code; - len = 1; - dst[dstPos] = (byte)code; - return UnshrnkStatus.Ok; - } + len = 0; + return UnshrnkStatus.Error; + } - if (codeTab[code].prefixCode == INVALID_CODE || codeTab[code].prefixCode == code) + if (codeTab[code].len != UNKNOWN_LEN) + { + // Output string with known length (the common case). + if (dstCap - dstPos < codeTab[code].len) { - // Reject invalid codes. Self-referential codes may exist in the table but cannot be used. firstByte = 0; len = 0; - return UnshrnkStatus.Error; + return UnshrnkStatus.Full; } - if (codeTab[code].len != UNKNOWN_LEN) - { - // Output string with known length (the common case). - if (dstCap - dstPos < codeTab[code].len) - { - firstByte = 0; - len = 0; - return UnshrnkStatus.Full; - } + CopyFromPrevPos(dst, codeTab[code].lastDstPos, dstPos, codeTab[code].len); + firstByte = dst[dstPos]; + len = codeTab[code].len; + return UnshrnkStatus.Ok; + } - CopyFromPrevPos(dst, codeTab[code].lastDstPos, dstPos, codeTab[code].len); - firstByte = dst[dstPos]; - len = codeTab[code].len; - return UnshrnkStatus.Ok; - } + // Output a string of unknown length. + //assert(codeTab[code].len == UNKNOWN_LEN); + prefixCode = codeTab[code].prefixCode; + // assert(prefixCode > CONTROL_CODE); - // Output a string of unknown length. - //assert(codeTab[code].len == UNKNOWN_LEN); - prefixCode = codeTab[code].prefixCode; - // assert(prefixCode > CONTROL_CODE); + if (prefixCode == queue.codes[queue.nextIdx]) + { + // The prefix code hasn't been added yet, but we were just about to: the KwKwK case. + //assert(codeTab[prevCode].prefixCode != INVALID_CODE); + codeTab[prefixCode].prefixCode = prevCode; + codeTab[prefixCode].extByte = firstByte; + codeTab[prefixCode].len = (ushort)(codeTab[prevCode].len + 1); + codeTab[prefixCode].lastDstPos = codeTab[prevCode].lastDstPos; + dst[dstPos] = firstByte; + } + else if (codeTab[prefixCode].prefixCode == INVALID_CODE) + { + // The prefix code is still invalid. + firstByte = 0; + len = 0; + return UnshrnkStatus.Error; + } - if (prefixCode == queue.codes[queue.nextIdx]) - { - // The prefix code hasn't been added yet, but we were just about to: the KwKwK case. - //assert(codeTab[prevCode].prefixCode != INVALID_CODE); - codeTab[prefixCode].prefixCode = prevCode; - codeTab[prefixCode].extByte = firstByte; - codeTab[prefixCode].len = (ushort)(codeTab[prevCode].len + 1); - codeTab[prefixCode].lastDstPos = codeTab[prevCode].lastDstPos; - dst[dstPos] = firstByte; - } - else if (codeTab[prefixCode].prefixCode == INVALID_CODE) - { - // The prefix code is still invalid. - firstByte = 0; - len = 0; - return UnshrnkStatus.Error; - } + // Output the prefix string, then the extension byte. + len = codeTab[prefixCode].len + 1; + if (dstCap - dstPos < len) + { + firstByte = 0; + len = 0; + return UnshrnkStatus.Full; + } - // Output the prefix string, then the extension byte. - len = codeTab[prefixCode].len + 1; - if (dstCap - dstPos < len) - { - firstByte = 0; - len = 0; - return UnshrnkStatus.Full; - } + CopyFromPrevPos(dst, codeTab[prefixCode].lastDstPos, dstPos, codeTab[prefixCode].len); + dst[dstPos + len - 1] = codeTab[code].extByte; + firstByte = dst[dstPos]; - CopyFromPrevPos(dst, codeTab[prefixCode].lastDstPos, dstPos, codeTab[prefixCode].len); - dst[dstPos + len - 1] = codeTab[code].extByte; - firstByte = dst[dstPos]; + // Update the code table now that the string has a length and pos. + //assert(prevCode != code); + codeTab[code].len = (ushort)len; + codeTab[code].lastDstPos = dstPos; - // Update the code table now that the string has a length and pos. - //assert(prevCode != code); - codeTab[code].len = (ushort)len; - codeTab[code].lastDstPos = dstPos; + return UnshrnkStatus.Ok; + } + public static UnshrnkStatus Unshrink( + byte[] src, + int srcLen, + out int srcUsed, + byte[] dst, + int dstCap, + out int dstUsed + ) + { + var codeTab = new CodeTabEntry[HASHTAB_SIZE]; + var queue = new CodeQueue(); + var stream = new BitStream(src, srcLen); + int codeSize, + dstPos, + len; + int currCode, + prevCode, + newCode; + byte firstByte; + + CodeTabInit(codeTab); + CodeQueueInit(ref queue); + codeSize = MIN_CODE_SIZE; + dstPos = 0; + + // Handle the first code separately since there is no previous code. + if (!ReadCode(stream, ref codeSize, codeTab, ref queue, out currCode)) + { + srcUsed = stream.BytesRead; + dstUsed = 0; return UnshrnkStatus.Ok; } - public static UnshrnkStatus Unshrink( - byte[] src, - int srcLen, - out int srcUsed, - byte[] dst, - int dstCap, - out int dstUsed - ) + //assert(currCode != CONTROL_CODE); + if (currCode > byte.MaxValue) { - var codeTab = new CodeTabEntry[HASHTAB_SIZE]; - var queue = new CodeQueue(); - var stream = new BitStream(src, srcLen); - int codeSize, - dstPos, - len; - int currCode, - prevCode, - newCode; - byte firstByte; + srcUsed = stream.BytesRead; + dstUsed = 0; + return UnshrnkStatus.Error; // The first code must be a literal. + } - CodeTabInit(codeTab); - CodeQueueInit(ref queue); - codeSize = MIN_CODE_SIZE; - dstPos = 0; + if (dstPos == dstCap) + { + srcUsed = stream.BytesRead; + dstUsed = 0; + return UnshrnkStatus.Full; + } - // Handle the first code separately since there is no previous code. - if (!ReadCode(stream, ref codeSize, codeTab, ref queue, out currCode)) - { - srcUsed = stream.BytesRead; - dstUsed = 0; - return UnshrnkStatus.Ok; - } + firstByte = (byte)currCode; + dst[dstPos] = (byte)currCode; + codeTab[currCode].lastDstPos = dstPos; + dstPos++; - //assert(currCode != CONTROL_CODE); - if (currCode > byte.MaxValue) + prevCode = currCode; + while (ReadCode(stream, ref codeSize, codeTab, ref queue, out currCode)) + { + if (currCode == INVALID_CODE) { srcUsed = stream.BytesRead; dstUsed = 0; - return UnshrnkStatus.Error; // The first code must be a literal. + return UnshrnkStatus.Error; } if (dstPos == dstCap) @@ -279,153 +301,130 @@ out int dstUsed return UnshrnkStatus.Full; } - firstByte = (byte)currCode; - dst[dstPos] = (byte)currCode; - codeTab[currCode].lastDstPos = dstPos; - dstPos++; - - prevCode = currCode; - while (ReadCode(stream, ref codeSize, codeTab, ref queue, out currCode)) + // Handle KwKwK: next code used before being added. + if (currCode == queue.codes[queue.nextIdx]) { - if (currCode == INVALID_CODE) + if (codeTab[prevCode].prefixCode == INVALID_CODE) { + // The previous code is no longer valid. srcUsed = stream.BytesRead; dstUsed = 0; return UnshrnkStatus.Error; } - if (dstPos == dstCap) - { - srcUsed = stream.BytesRead; - dstUsed = 0; - return UnshrnkStatus.Full; - } + // Extend the previous code with its first byte. + //assert(currCode != prevCode); + codeTab[currCode].prefixCode = prevCode; + codeTab[currCode].extByte = firstByte; + codeTab[currCode].len = (ushort)(codeTab[prevCode].len + 1); + codeTab[currCode].lastDstPos = codeTab[prevCode].lastDstPos; + //assert(dstPos < dstCap); + dst[dstPos] = firstByte; + } - // Handle KwKwK: next code used before being added. - if (currCode == queue.codes[queue.nextIdx]) - { - if (codeTab[prevCode].prefixCode == INVALID_CODE) - { - // The previous code is no longer valid. - srcUsed = stream.BytesRead; - dstUsed = 0; - return UnshrnkStatus.Error; - } - - // Extend the previous code with its first byte. - //assert(currCode != prevCode); - codeTab[currCode].prefixCode = prevCode; - codeTab[currCode].extByte = firstByte; - codeTab[currCode].len = (ushort)(codeTab[prevCode].len + 1); - codeTab[currCode].lastDstPos = codeTab[prevCode].lastDstPos; - //assert(dstPos < dstCap); - dst[dstPos] = firstByte; - } + // Output the string represented by the current code. + var status = OutputCode( + currCode, + dst, + dstPos, + dstCap, + prevCode, + codeTab, + ref queue, + out firstByte, + out len + ); + if (status != UnshrnkStatus.Ok) + { + srcUsed = stream.BytesRead; + dstUsed = 0; + return status; + } - // Output the string represented by the current code. - var status = OutputCode( - currCode, - dst, - dstPos, - dstCap, - prevCode, - codeTab, - ref queue, - out firstByte, - out len - ); - if (status != UnshrnkStatus.Ok) - { - srcUsed = stream.BytesRead; - dstUsed = 0; - return status; - } + // Verify that the output matches walking the prefixes. + var c = currCode; + for (var i = 0; i < len; i++) + { + // assert(codeTab[c].len == len - i); + //assert(codeTab[c].extByte == dst[dstPos + len - i - 1]); + c = codeTab[c].prefixCode; + } - // Verify that the output matches walking the prefixes. - var c = currCode; - for (var i = 0; i < len; i++) - { - // assert(codeTab[c].len == len - i); - //assert(codeTab[c].extByte == dst[dstPos + len - i - 1]); - c = codeTab[c].prefixCode; - } + // Add a new code to the string table if there's room. + // The string is the previous code's string extended with the first byte of the current code's string. + newCode = CodeQueueRemoveNext(ref queue); + if (newCode != INVALID_CODE) + { + //assert(codeTab[prevCode].lastDstPos < dstPos); + codeTab[newCode].prefixCode = prevCode; + codeTab[newCode].extByte = firstByte; + codeTab[newCode].len = (ushort)(codeTab[prevCode].len + 1); + codeTab[newCode].lastDstPos = codeTab[prevCode].lastDstPos; - // Add a new code to the string table if there's room. - // The string is the previous code's string extended with the first byte of the current code's string. - newCode = CodeQueueRemoveNext(ref queue); - if (newCode != INVALID_CODE) + if (codeTab[prevCode].prefixCode == INVALID_CODE) { - //assert(codeTab[prevCode].lastDstPos < dstPos); - codeTab[newCode].prefixCode = prevCode; - codeTab[newCode].extByte = firstByte; - codeTab[newCode].len = (ushort)(codeTab[prevCode].len + 1); - codeTab[newCode].lastDstPos = codeTab[prevCode].lastDstPos; - - if (codeTab[prevCode].prefixCode == INVALID_CODE) - { - // prevCode was invalidated in a partial clearing. Until that code is re-used, the - // string represented by newCode is indeterminate. - codeTab[newCode].len = UNKNOWN_LEN; - } - // If prevCode was invalidated in a partial clearing, it's possible that newCode == prevCode, - // in which case it will never be used or cleared. + // prevCode was invalidated in a partial clearing. Until that code is re-used, the + // string represented by newCode is indeterminate. + codeTab[newCode].len = UNKNOWN_LEN; } - - codeTab[currCode].lastDstPos = dstPos; - dstPos += len; - - prevCode = currCode; + // If prevCode was invalidated in a partial clearing, it's possible that newCode == prevCode, + // in which case it will never be used or cleared. } - srcUsed = stream.BytesRead; - dstUsed = dstPos; + codeTab[currCode].lastDstPos = dstPos; + dstPos += len; - return UnshrnkStatus.Ok; + prevCode = currCode; } - public enum UnshrnkStatus - { - Ok, - Full, - Error, - } + srcUsed = stream.BytesRead; + dstUsed = dstPos; - private struct CodeQueue - { - public int nextIdx; - public ushort[] codes; - } + return UnshrnkStatus.Ok; + } - private static void CodeQueueInit(ref CodeQueue q) - { - int codeQueueSize; - ushort code; + public enum UnshrnkStatus + { + Ok, + Full, + Error, + } - codeQueueSize = 0; - q.codes = new ushort[MAX_CODE - CONTROL_CODE + 2]; + private struct CodeQueue + { + public int nextIdx; + public ushort[] codes; + } - for (code = CONTROL_CODE + 1; code <= MAX_CODE; code++) - { - q.codes[codeQueueSize++] = code; - } + private static void CodeQueueInit(ref CodeQueue q) + { + int codeQueueSize; + ushort code; - //assert(codeQueueSize < q.codes.Length); - q.codes[codeQueueSize] = INVALID_CODE; // End-of-queue marker. - q.nextIdx = 0; + codeQueueSize = 0; + q.codes = new ushort[MAX_CODE - CONTROL_CODE + 2]; + + for (code = CONTROL_CODE + 1; code <= MAX_CODE; code++) + { + q.codes[codeQueueSize++] = code; } - private static ushort CodeQueueNext(ref CodeQueue q) => - //assert(q.nextIdx < q.codes.Length); - q.codes[q.nextIdx]; + //assert(codeQueueSize < q.codes.Length); + q.codes[codeQueueSize] = INVALID_CODE; // End-of-queue marker. + q.nextIdx = 0; + } + + private static ushort CodeQueueNext(ref CodeQueue q) => + //assert(q.nextIdx < q.codes.Length); + q.codes[q.nextIdx]; - private static ushort CodeQueueRemoveNext(ref CodeQueue q) + private static ushort CodeQueueRemoveNext(ref CodeQueue q) + { + var code = CodeQueueNext(ref q); + if (code != INVALID_CODE) { - var code = CodeQueueNext(ref q); - if (code != INVALID_CODE) - { - q.nextIdx++; - } - return code; + q.nextIdx++; } + return code; } } diff --git a/src/SharpCompress/Compressors/Squeezed/SqueezedStream.cs b/src/SharpCompress/Compressors/Squeezed/SqueezedStream.cs index bd9760df9..95e8b0dc4 100644 --- a/src/SharpCompress/Compressors/Squeezed/SqueezedStream.cs +++ b/src/SharpCompress/Compressors/Squeezed/SqueezedStream.cs @@ -7,139 +7,138 @@ using SharpCompress.Compressors.RLE90; using SharpCompress.IO; -namespace SharpCompress.Compressors.Squeezed +namespace SharpCompress.Compressors.Squeezed; + +public class SqueezeStream : Stream, IStreamStack { - public class SqueezeStream : Stream, IStreamStack - { #if DEBUG_STREAMS long IStreamStack.InstanceId { get; set; } #endif - int IStreamStack.DefaultBufferSize { get; set; } + int IStreamStack.DefaultBufferSize { get; set; } - Stream IStreamStack.BaseStream() => _stream; + Stream IStreamStack.BaseStream() => _stream; - int IStreamStack.BufferSize - { - get => 0; - set { } - } - int IStreamStack.BufferPosition - { - get => 0; - set { } - } + int IStreamStack.BufferSize + { + get => 0; + set { } + } + int IStreamStack.BufferPosition + { + get => 0; + set { } + } - void IStreamStack.SetPosition(long position) { } + void IStreamStack.SetPosition(long position) { } - private readonly Stream _stream; - private readonly int _compressedSize; - private const int NUMVALS = 257; - private const int SPEOF = 256; - private bool _processed = false; + private readonly Stream _stream; + private readonly int _compressedSize; + private const int NUMVALS = 257; + private const int SPEOF = 256; + private bool _processed = false; - public SqueezeStream(Stream stream, int compressedSize) - { - _stream = stream; - _compressedSize = compressedSize; + public SqueezeStream(Stream stream, int compressedSize) + { + _stream = stream; + _compressedSize = compressedSize; #if DEBUG_STREAMS this.DebugConstruct(typeof(SqueezeStream)); #endif - } + } - protected override void Dispose(bool disposing) - { + protected override void Dispose(bool disposing) + { #if DEBUG_STREAMS this.DebugDispose(typeof(SqueezeStream)); #endif - base.Dispose(disposing); - } + base.Dispose(disposing); + } - public override bool CanRead => true; + public override bool CanRead => true; - public override bool CanSeek => false; + public override bool CanSeek => false; - public override bool CanWrite => false; + public override bool CanWrite => false; - public override long Length => throw new NotImplementedException(); + public override long Length => throw new NotImplementedException(); - public override long Position - { - get => _stream.Position; - set => throw new NotImplementedException(); - } + public override long Position + { + get => _stream.Position; + set => throw new NotImplementedException(); + } - public override void Flush() => throw new NotImplementedException(); + public override void Flush() => throw new NotImplementedException(); - public override int Read(byte[] buffer, int offset, int count) + public override int Read(byte[] buffer, int offset, int count) + { + if (_processed) { - if (_processed) - { - return 0; - } - _processed = true; - using var binaryReader = new BinaryReader(_stream); + return 0; + } + _processed = true; + using var binaryReader = new BinaryReader(_stream); - // Read numnodes (equivalent to convert_u16!(numnodes, buf)) - var numnodes = binaryReader.ReadUInt16(); + // Read numnodes (equivalent to convert_u16!(numnodes, buf)) + var numnodes = binaryReader.ReadUInt16(); - // Validation: numnodes should be within bounds - if (numnodes >= NUMVALS) - { - throw new InvalidDataException( - $"Invalid number of nodes {numnodes} (max {NUMVALS - 1})" - ); - } + // Validation: numnodes should be within bounds + if (numnodes >= NUMVALS) + { + throw new InvalidDataException( + $"Invalid number of nodes {numnodes} (max {NUMVALS - 1})" + ); + } - // Handle the case where no nodes exist - if (numnodes == 0) - { - return 0; - } + // Handle the case where no nodes exist + if (numnodes == 0) + { + return 0; + } - // Build dnode (tree of nodes) - var dnode = new int[numnodes, 2]; - for (int j = 0; j < numnodes; j++) - { - dnode[j, 0] = binaryReader.ReadInt16(); - dnode[j, 1] = binaryReader.ReadInt16(); - } + // Build dnode (tree of nodes) + var dnode = new int[numnodes, 2]; + for (int j = 0; j < numnodes; j++) + { + dnode[j, 0] = binaryReader.ReadInt16(); + dnode[j, 1] = binaryReader.ReadInt16(); + } - // Initialize BitReader for reading bits - var bitReader = new BitReader(_stream); - var decoded = new List(); + // Initialize BitReader for reading bits + var bitReader = new BitReader(_stream); + var decoded = new List(); - int i = 0; - // Decode the buffer using the dnode tree - while (true) + int i = 0; + // Decode the buffer using the dnode tree + while (true) + { + i = dnode[i, bitReader.ReadBit() ? 1 : 0]; + if (i < 0) { - i = dnode[i, bitReader.ReadBit() ? 1 : 0]; - if (i < 0) + i = (short)-(i + 1); + if (i == SPEOF) { - i = (short)-(i + 1); - if (i == SPEOF) - { - break; - } - else - { - decoded.Add((byte)i); - i = 0; - } + break; + } + else + { + decoded.Add((byte)i); + i = 0; } } - - // Unpack the decoded buffer using the RLE class - var unpacked = RLE.UnpackRLE(decoded.ToArray()); - unpacked.CopyTo(buffer, 0); - return unpacked.Count(); } - public override long Seek(long offset, SeekOrigin origin) => - throw new NotImplementedException(); + // Unpack the decoded buffer using the RLE class + var unpacked = RLE.UnpackRLE(decoded.ToArray()); + unpacked.CopyTo(buffer, 0); + return unpacked.Count(); + } - public override void SetLength(long value) => throw new NotImplementedException(); + public override long Seek(long offset, SeekOrigin origin) => + throw new NotImplementedException(); - public override void Write(byte[] buffer, int offset, int count) => - throw new NotImplementedException(); - } + public override void SetLength(long value) => throw new NotImplementedException(); + + public override void Write(byte[] buffer, int offset, int count) => + throw new NotImplementedException(); } diff --git a/src/SharpCompress/Compressors/ZStandard/BitOperations.cs b/src/SharpCompress/Compressors/ZStandard/BitOperations.cs index d35133b20..fc8e3108d 100644 --- a/src/SharpCompress/Compressors/ZStandard/BitOperations.cs +++ b/src/SharpCompress/Compressors/ZStandard/BitOperations.cs @@ -4,7 +4,7 @@ #if !NETCOREAPP3_0_OR_GREATER using System.Runtime.CompilerServices; -using static ZstdSharp.UnsafeHelper; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; // Some routines inspired by the Stanford Bit Twiddling Hacks by Sean Eron Anderson: // http://graphics.stanford.edu/~seander/bithacks.html diff --git a/src/SharpCompress/Compressors/ZStandard/CompressionStream.cs b/src/SharpCompress/Compressors/ZStandard/CompressionStream.cs index c8546afc0..5dc293e17 100644 --- a/src/SharpCompress/Compressors/ZStandard/CompressionStream.cs +++ b/src/SharpCompress/Compressors/ZStandard/CompressionStream.cs @@ -3,187 +3,187 @@ using System.IO; using System.Threading; using System.Threading.Tasks; -using ZstdSharp.Unsafe; +using SharpCompress.Compressors.ZStandard.Unsafe; -namespace ZstdSharp +namespace SharpCompress.Compressors.ZStandard; + +public class CompressionStream : Stream { - public class CompressionStream : Stream + private readonly Stream innerStream; + private readonly byte[] outputBuffer; + private readonly bool preserveCompressor; + private readonly bool leaveOpen; + private Compressor? compressor; + private ZSTD_outBuffer_s output; + + public CompressionStream( + Stream stream, + int level = Compressor.DefaultCompressionLevel, + int bufferSize = 0, + bool leaveOpen = true + ) + : this(stream, new Compressor(level), bufferSize, false, leaveOpen) { } + + public CompressionStream( + Stream stream, + Compressor compressor, + int bufferSize = 0, + bool preserveCompressor = true, + bool leaveOpen = true + ) { - private readonly Stream innerStream; - private readonly byte[] outputBuffer; - private readonly bool preserveCompressor; - private readonly bool leaveOpen; - private Compressor? compressor; - private ZSTD_outBuffer_s output; - - public CompressionStream( - Stream stream, - int level = Compressor.DefaultCompressionLevel, - int bufferSize = 0, - bool leaveOpen = true - ) - : this(stream, new Compressor(level), bufferSize, false, leaveOpen) { } - - public CompressionStream( - Stream stream, - Compressor compressor, - int bufferSize = 0, - bool preserveCompressor = true, - bool leaveOpen = true - ) + if (stream == null) + throw new ArgumentNullException(nameof(stream)); + + if (!stream.CanWrite) + throw new ArgumentException("Stream is not writable", nameof(stream)); + + if (bufferSize < 0) + throw new ArgumentOutOfRangeException(nameof(bufferSize)); + + innerStream = stream; + this.compressor = compressor; + this.preserveCompressor = preserveCompressor; + this.leaveOpen = leaveOpen; + + var outputBufferSize = + bufferSize > 0 + ? bufferSize + : (int)Unsafe.Methods.ZSTD_CStreamOutSize().EnsureZstdSuccess(); + outputBuffer = ArrayPool.Shared.Rent(outputBufferSize); + output = new ZSTD_outBuffer_s { pos = 0, size = (nuint)outputBufferSize }; + } + + public void SetParameter(ZSTD_cParameter parameter, int value) + { + EnsureNotDisposed(); + compressor.NotNull().SetParameter(parameter, value); + } + + public int GetParameter(ZSTD_cParameter parameter) + { + EnsureNotDisposed(); + return compressor.NotNull().GetParameter(parameter); + } + + public void LoadDictionary(byte[] dict) + { + EnsureNotDisposed(); + compressor.NotNull().LoadDictionary(dict); + } + + ~CompressionStream() => Dispose(false); + +#if !NETSTANDARD2_0 && !NETFRAMEWORK + public override async ValueTask DisposeAsync() +#else + public async Task DisposeAsync() +#endif + { + if (compressor == null) + return; + + try { - if (stream == null) - throw new ArgumentNullException(nameof(stream)); - - if (!stream.CanWrite) - throw new ArgumentException("Stream is not writable", nameof(stream)); - - if (bufferSize < 0) - throw new ArgumentOutOfRangeException(nameof(bufferSize)); - - innerStream = stream; - this.compressor = compressor; - this.preserveCompressor = preserveCompressor; - this.leaveOpen = leaveOpen; - - var outputBufferSize = - bufferSize > 0 - ? bufferSize - : (int)Methods.ZSTD_CStreamOutSize().EnsureZstdSuccess(); - outputBuffer = ArrayPool.Shared.Rent(outputBufferSize); - output = new ZSTD_outBuffer_s { pos = 0, size = (nuint)outputBufferSize }; + await FlushInternalAsync(ZSTD_EndDirective.ZSTD_e_end).ConfigureAwait(false); } - - public void SetParameter(ZSTD_cParameter parameter, int value) + finally { - EnsureNotDisposed(); - compressor.NotNull().SetParameter(parameter, value); + ReleaseUnmanagedResources(); + GC.SuppressFinalize(this); } + } + + protected override void Dispose(bool disposing) + { + if (compressor == null) + return; - public int GetParameter(ZSTD_cParameter parameter) + try { - EnsureNotDisposed(); - return compressor.NotNull().GetParameter(parameter); + if (disposing) + FlushInternal(ZSTD_EndDirective.ZSTD_e_end); } - - public void LoadDictionary(byte[] dict) + finally { - EnsureNotDisposed(); - compressor.NotNull().LoadDictionary(dict); + ReleaseUnmanagedResources(); } + } - ~CompressionStream() => Dispose(false); - -#if !NETSTANDARD2_0 && !NETFRAMEWORK - public override async ValueTask DisposeAsync() -#else - public async Task DisposeAsync() -#endif + private void ReleaseUnmanagedResources() + { + if (!preserveCompressor) { - if (compressor == null) - return; - - try - { - await FlushInternalAsync(ZSTD_EndDirective.ZSTD_e_end).ConfigureAwait(false); - } - finally - { - ReleaseUnmanagedResources(); - GC.SuppressFinalize(this); - } + compressor.NotNull().Dispose(); } + compressor = null; - protected override void Dispose(bool disposing) + if (outputBuffer != null) { - if (compressor == null) - return; - - try - { - if (disposing) - FlushInternal(ZSTD_EndDirective.ZSTD_e_end); - } - finally - { - ReleaseUnmanagedResources(); - } + ArrayPool.Shared.Return(outputBuffer); } - private void ReleaseUnmanagedResources() + if (!leaveOpen) { - if (!preserveCompressor) - { - compressor.NotNull().Dispose(); - } - compressor = null; - - if (outputBuffer != null) - { - ArrayPool.Shared.Return(outputBuffer); - } - - if (!leaveOpen) - { - innerStream.Dispose(); - } + innerStream.Dispose(); } + } - public override void Flush() => FlushInternal(ZSTD_EndDirective.ZSTD_e_flush); + public override void Flush() => FlushInternal(ZSTD_EndDirective.ZSTD_e_flush); - public override async Task FlushAsync(CancellationToken cancellationToken) => - await FlushInternalAsync(ZSTD_EndDirective.ZSTD_e_flush, cancellationToken) - .ConfigureAwait(false); + public override async Task FlushAsync(CancellationToken cancellationToken) => + await FlushInternalAsync(ZSTD_EndDirective.ZSTD_e_flush, cancellationToken) + .ConfigureAwait(false); - private void FlushInternal(ZSTD_EndDirective directive) => WriteInternal(null, directive); + private void FlushInternal(ZSTD_EndDirective directive) => WriteInternal(null, directive); - private async Task FlushInternalAsync( - ZSTD_EndDirective directive, - CancellationToken cancellationToken = default - ) => await WriteInternalAsync(null, directive, cancellationToken).ConfigureAwait(false); + private async Task FlushInternalAsync( + ZSTD_EndDirective directive, + CancellationToken cancellationToken = default + ) => await WriteInternalAsync(null, directive, cancellationToken).ConfigureAwait(false); - public override void Write(byte[] buffer, int offset, int count) => - Write(new ReadOnlySpan(buffer, offset, count)); + public override void Write(byte[] buffer, int offset, int count) => + Write(new ReadOnlySpan(buffer, offset, count)); #if !NETSTANDARD2_0 && !NETFRAMEWORK - public override void Write(ReadOnlySpan buffer) => - WriteInternal(buffer, ZSTD_EndDirective.ZSTD_e_continue); + public override void Write(ReadOnlySpan buffer) => + WriteInternal(buffer, ZSTD_EndDirective.ZSTD_e_continue); #else public void Write(ReadOnlySpan buffer) => WriteInternal(buffer, ZSTD_EndDirective.ZSTD_e_continue); #endif - private void WriteInternal(ReadOnlySpan buffer, ZSTD_EndDirective directive) + private void WriteInternal(ReadOnlySpan buffer, ZSTD_EndDirective directive) + { + EnsureNotDisposed(); + + var input = new ZSTD_inBuffer_s { - EnsureNotDisposed(); - - var input = new ZSTD_inBuffer_s - { - pos = 0, - size = buffer != null ? (nuint)buffer.Length : 0, - }; - nuint remaining; - do - { - output.pos = 0; - remaining = CompressStream(ref input, buffer, directive); - - var written = (int)output.pos; - if (written > 0) - innerStream.Write(outputBuffer, 0, written); - } while ( - directive == ZSTD_EndDirective.ZSTD_e_continue - ? input.pos < input.size - : remaining > 0 - ); - } + pos = 0, + size = buffer != null ? (nuint)buffer.Length : 0, + }; + nuint remaining; + do + { + output.pos = 0; + remaining = CompressStream(ref input, buffer, directive); + + var written = (int)output.pos; + if (written > 0) + innerStream.Write(outputBuffer, 0, written); + } while ( + directive == ZSTD_EndDirective.ZSTD_e_continue + ? input.pos < input.size + : remaining > 0 + ); + } #if !NETSTANDARD2_0 && !NETFRAMEWORK - private async ValueTask WriteInternalAsync( - ReadOnlyMemory? buffer, - ZSTD_EndDirective directive, - CancellationToken cancellationToken = default - ) + private async ValueTask WriteInternalAsync( + ReadOnlyMemory? buffer, + ZSTD_EndDirective directive, + CancellationToken cancellationToken = default + ) #else private async Task WriteInternalAsync( ReadOnlyMemory? buffer, @@ -192,52 +192,52 @@ private async Task WriteInternalAsync( ) #endif + { + EnsureNotDisposed(); + + var input = new ZSTD_inBuffer_s { - EnsureNotDisposed(); - - var input = new ZSTD_inBuffer_s - { - pos = 0, - size = buffer.HasValue ? (nuint)buffer.Value.Length : 0, - }; - nuint remaining; - do - { - output.pos = 0; - remaining = CompressStream( - ref input, - buffer.HasValue ? buffer.Value.Span : null, - directive - ); - - var written = (int)output.pos; - if (written > 0) - await innerStream - .WriteAsync(outputBuffer, 0, written, cancellationToken) - .ConfigureAwait(false); - } while ( - directive == ZSTD_EndDirective.ZSTD_e_continue - ? input.pos < input.size - : remaining > 0 + pos = 0, + size = buffer.HasValue ? (nuint)buffer.Value.Length : 0, + }; + nuint remaining; + do + { + output.pos = 0; + remaining = CompressStream( + ref input, + buffer.HasValue ? buffer.Value.Span : null, + directive ); - } -#if !NETSTANDARD2_0 && !NETFRAMEWORK + var written = (int)output.pos; + if (written > 0) + await innerStream + .WriteAsync(outputBuffer, 0, written, cancellationToken) + .ConfigureAwait(false); + } while ( + directive == ZSTD_EndDirective.ZSTD_e_continue + ? input.pos < input.size + : remaining > 0 + ); + } - public override Task WriteAsync( - byte[] buffer, - int offset, - int count, - CancellationToken cancellationToken - ) => - WriteAsync(new ReadOnlyMemory(buffer, offset, count), cancellationToken).AsTask(); +#if !NETSTANDARD2_0 && !NETFRAMEWORK - public override async ValueTask WriteAsync( - ReadOnlyMemory buffer, - CancellationToken cancellationToken = default - ) => - await WriteInternalAsync(buffer, ZSTD_EndDirective.ZSTD_e_continue, cancellationToken) - .ConfigureAwait(false); + public override Task WriteAsync( + byte[] buffer, + int offset, + int count, + CancellationToken cancellationToken + ) => + WriteAsync(new ReadOnlyMemory(buffer, offset, count), cancellationToken).AsTask(); + + public override async ValueTask WriteAsync( + ReadOnlyMemory buffer, + CancellationToken cancellationToken = default + ) => + await WriteInternalAsync(buffer, ZSTD_EndDirective.ZSTD_e_continue, cancellationToken) + .ConfigureAwait(false); #else public override Task WriteAsync( @@ -255,54 +255,53 @@ await WriteInternalAsync(buffer, ZSTD_EndDirective.ZSTD_e_continue, cancellation .ConfigureAwait(false); #endif - internal unsafe nuint CompressStream( - ref ZSTD_inBuffer_s input, - ReadOnlySpan inputBuffer, - ZSTD_EndDirective directive - ) + internal unsafe nuint CompressStream( + ref ZSTD_inBuffer_s input, + ReadOnlySpan inputBuffer, + ZSTD_EndDirective directive + ) + { + fixed (byte* inputBufferPtr = inputBuffer) + fixed (byte* outputBufferPtr = outputBuffer) { - fixed (byte* inputBufferPtr = inputBuffer) - fixed (byte* outputBufferPtr = outputBuffer) - { - input.src = inputBufferPtr; - output.dst = outputBufferPtr; - return compressor - .NotNull() - .CompressStream(ref input, ref output, directive) - .EnsureZstdSuccess(); - } + input.src = inputBufferPtr; + output.dst = outputBufferPtr; + return compressor + .NotNull() + .CompressStream(ref input, ref output, directive) + .EnsureZstdSuccess(); } + } - public override bool CanRead => false; - public override bool CanSeek => false; - public override bool CanWrite => true; + public override bool CanRead => false; + public override bool CanSeek => false; + public override bool CanWrite => true; - public override long Length => throw new NotSupportedException(); + public override long Length => throw new NotSupportedException(); - public override long Position - { - get => throw new NotSupportedException(); - set => throw new NotSupportedException(); - } + public override long Position + { + get => throw new NotSupportedException(); + set => throw new NotSupportedException(); + } - public override long Seek(long offset, SeekOrigin origin) => - throw new NotSupportedException(); + public override long Seek(long offset, SeekOrigin origin) => + throw new NotSupportedException(); - public override void SetLength(long value) => throw new NotSupportedException(); + public override void SetLength(long value) => throw new NotSupportedException(); - public override int Read(byte[] buffer, int offset, int count) => - throw new NotSupportedException(); + public override int Read(byte[] buffer, int offset, int count) => + throw new NotSupportedException(); - private void EnsureNotDisposed() - { - if (compressor == null) - throw new ObjectDisposedException(nameof(CompressionStream)); - } + private void EnsureNotDisposed() + { + if (compressor == null) + throw new ObjectDisposedException(nameof(CompressionStream)); + } - public void SetPledgedSrcSize(ulong pledgedSrcSize) - { - EnsureNotDisposed(); - compressor.NotNull().SetPledgedSrcSize(pledgedSrcSize); - } + public void SetPledgedSrcSize(ulong pledgedSrcSize) + { + EnsureNotDisposed(); + compressor.NotNull().SetPledgedSrcSize(pledgedSrcSize); } } diff --git a/src/SharpCompress/Compressors/ZStandard/Compressor.cs b/src/SharpCompress/Compressors/ZStandard/Compressor.cs index 470cd5636..8e99d24cc 100644 --- a/src/SharpCompress/Compressors/ZStandard/Compressor.cs +++ b/src/SharpCompress/Compressors/ZStandard/Compressor.cs @@ -1,205 +1,204 @@ using System; -using ZstdSharp.Unsafe; +using SharpCompress.Compressors.ZStandard.Unsafe; -namespace ZstdSharp +namespace SharpCompress.Compressors.ZStandard; + +public unsafe class Compressor : IDisposable { - public unsafe class Compressor : IDisposable - { - /// - /// Minimum negative compression level allowed - /// - public static int MinCompressionLevel => Methods.ZSTD_minCLevel(); + /// + /// Minimum negative compression level allowed + /// + public static int MinCompressionLevel => Unsafe.Methods.ZSTD_minCLevel(); - /// - /// Maximum compression level available - /// - public static int MaxCompressionLevel => Methods.ZSTD_maxCLevel(); + /// + /// Maximum compression level available + /// + public static int MaxCompressionLevel => Unsafe.Methods.ZSTD_maxCLevel(); - /// - /// Default compression level - /// - /// - public const int DefaultCompressionLevel = 3; + /// + /// Default compression level + /// + /// + public const int DefaultCompressionLevel = 3; - private int level = DefaultCompressionLevel; + private int level = DefaultCompressionLevel; - private readonly SafeCctxHandle handle; + private readonly SafeCctxHandle handle; - public int Level + public int Level + { + get => level; + set { - get => level; - set + if (level != value) { - if (level != value) - { - level = value; - SetParameter(ZSTD_cParameter.ZSTD_c_compressionLevel, value); - } + level = value; + SetParameter(ZSTD_cParameter.ZSTD_c_compressionLevel, value); } } + } - public void SetParameter(ZSTD_cParameter parameter, int value) - { - using var cctx = handle.Acquire(); - Methods.ZSTD_CCtx_setParameter(cctx, parameter, value).EnsureZstdSuccess(); - } + public void SetParameter(ZSTD_cParameter parameter, int value) + { + using var cctx = handle.Acquire(); + Unsafe.Methods.ZSTD_CCtx_setParameter(cctx, parameter, value).EnsureZstdSuccess(); + } - public int GetParameter(ZSTD_cParameter parameter) - { - using var cctx = handle.Acquire(); - int value; - Methods.ZSTD_CCtx_getParameter(cctx, parameter, &value).EnsureZstdSuccess(); - return value; - } + public int GetParameter(ZSTD_cParameter parameter) + { + using var cctx = handle.Acquire(); + int value; + Unsafe.Methods.ZSTD_CCtx_getParameter(cctx, parameter, &value).EnsureZstdSuccess(); + return value; + } - public void LoadDictionary(byte[] dict) - { - var dictReadOnlySpan = new ReadOnlySpan(dict); - LoadDictionary(dictReadOnlySpan); - } + public void LoadDictionary(byte[] dict) + { + var dictReadOnlySpan = new ReadOnlySpan(dict); + LoadDictionary(dictReadOnlySpan); + } - public void LoadDictionary(ReadOnlySpan dict) - { - using var cctx = handle.Acquire(); - fixed (byte* dictPtr = dict) - Methods - .ZSTD_CCtx_loadDictionary(cctx, dictPtr, (nuint)dict.Length) - .EnsureZstdSuccess(); - } + public void LoadDictionary(ReadOnlySpan dict) + { + using var cctx = handle.Acquire(); + fixed (byte* dictPtr = dict) + Unsafe.Methods + .ZSTD_CCtx_loadDictionary(cctx, dictPtr, (nuint)dict.Length) + .EnsureZstdSuccess(); + } - public Compressor(int level = DefaultCompressionLevel) - { - handle = SafeCctxHandle.Create(); - Level = level; - } + public Compressor(int level = DefaultCompressionLevel) + { + handle = SafeCctxHandle.Create(); + Level = level; + } - public static int GetCompressBound(int length) => - (int)Methods.ZSTD_compressBound((nuint)length); + public static int GetCompressBound(int length) => + (int)Unsafe.Methods.ZSTD_compressBound((nuint)length); - public static ulong GetCompressBoundLong(ulong length) => - Methods.ZSTD_compressBound((nuint)length); + public static ulong GetCompressBoundLong(ulong length) => + Unsafe.Methods.ZSTD_compressBound((nuint)length); - public Span Wrap(ReadOnlySpan src) - { - var dest = new byte[GetCompressBound(src.Length)]; - var length = Wrap(src, dest); - return new Span(dest, 0, length); - } + public Span Wrap(ReadOnlySpan src) + { + var dest = new byte[GetCompressBound(src.Length)]; + var length = Wrap(src, dest); + return new Span(dest, 0, length); + } - public int Wrap(byte[] src, byte[] dest, int offset) => - Wrap(src, new Span(dest, offset, dest.Length - offset)); + public int Wrap(byte[] src, byte[] dest, int offset) => + Wrap(src, new Span(dest, offset, dest.Length - offset)); - public int Wrap(ReadOnlySpan src, Span dest) + public int Wrap(ReadOnlySpan src, Span dest) + { + fixed (byte* srcPtr = src) + fixed (byte* destPtr = dest) { - fixed (byte* srcPtr = src) - fixed (byte* destPtr = dest) - { - using var cctx = handle.Acquire(); - return (int) - Methods - .ZSTD_compress2( - cctx, - destPtr, - (nuint)dest.Length, - srcPtr, - (nuint)src.Length - ) - .EnsureZstdSuccess(); - } + using var cctx = handle.Acquire(); + return (int) + Unsafe.Methods + .ZSTD_compress2( + cctx, + destPtr, + (nuint)dest.Length, + srcPtr, + (nuint)src.Length + ) + .EnsureZstdSuccess(); } + } - public int Wrap(ArraySegment src, ArraySegment dest) => - Wrap((ReadOnlySpan)src, dest); - - public int Wrap( - byte[] src, - int srcOffset, - int srcLength, - byte[] dst, - int dstOffset, - int dstLength - ) => - Wrap( - new ReadOnlySpan(src, srcOffset, srcLength), - new Span(dst, dstOffset, dstLength) - ); - - public bool TryWrap(byte[] src, byte[] dest, int offset, out int written) => - TryWrap(src, new Span(dest, offset, dest.Length - offset), out written); - - public bool TryWrap(ReadOnlySpan src, Span dest, out int written) + public int Wrap(ArraySegment src, ArraySegment dest) => + Wrap((ReadOnlySpan)src, dest); + + public int Wrap( + byte[] src, + int srcOffset, + int srcLength, + byte[] dst, + int dstOffset, + int dstLength + ) => + Wrap( + new ReadOnlySpan(src, srcOffset, srcLength), + new Span(dst, dstOffset, dstLength) + ); + + public bool TryWrap(byte[] src, byte[] dest, int offset, out int written) => + TryWrap(src, new Span(dest, offset, dest.Length - offset), out written); + + public bool TryWrap(ReadOnlySpan src, Span dest, out int written) + { + fixed (byte* srcPtr = src) + fixed (byte* destPtr = dest) { - fixed (byte* srcPtr = src) - fixed (byte* destPtr = dest) + nuint returnValue; + using (var cctx = handle.Acquire()) { - nuint returnValue; - using (var cctx = handle.Acquire()) - { - returnValue = Methods.ZSTD_compress2( - cctx, - destPtr, - (nuint)dest.Length, - srcPtr, - (nuint)src.Length - ); - } - - if (returnValue == unchecked(0 - (nuint)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)) - { - written = default; - return false; - } - - returnValue.EnsureZstdSuccess(); - written = (int)returnValue; - return true; + returnValue = Unsafe.Methods.ZSTD_compress2( + cctx, + destPtr, + (nuint)dest.Length, + srcPtr, + (nuint)src.Length + ); } - } - public bool TryWrap(ArraySegment src, ArraySegment dest, out int written) => - TryWrap((ReadOnlySpan)src, dest, out written); - - public bool TryWrap( - byte[] src, - int srcOffset, - int srcLength, - byte[] dst, - int dstOffset, - int dstLength, - out int written - ) => - TryWrap( - new ReadOnlySpan(src, srcOffset, srcLength), - new Span(dst, dstOffset, dstLength), - out written - ); - - public void Dispose() - { - handle.Dispose(); - GC.SuppressFinalize(this); - } - - internal nuint CompressStream( - ref ZSTD_inBuffer_s input, - ref ZSTD_outBuffer_s output, - ZSTD_EndDirective directive - ) - { - fixed (ZSTD_inBuffer_s* inputPtr = &input) - fixed (ZSTD_outBuffer_s* outputPtr = &output) + if (returnValue == unchecked(0 - (nuint)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)) { - using var cctx = handle.Acquire(); - return Methods - .ZSTD_compressStream2(cctx, outputPtr, inputPtr, directive) - .EnsureZstdSuccess(); + written = default; + return false; } + + returnValue.EnsureZstdSuccess(); + written = (int)returnValue; + return true; } + } + + public bool TryWrap(ArraySegment src, ArraySegment dest, out int written) => + TryWrap((ReadOnlySpan)src, dest, out written); + + public bool TryWrap( + byte[] src, + int srcOffset, + int srcLength, + byte[] dst, + int dstOffset, + int dstLength, + out int written + ) => + TryWrap( + new ReadOnlySpan(src, srcOffset, srcLength), + new Span(dst, dstOffset, dstLength), + out written + ); + + public void Dispose() + { + handle.Dispose(); + GC.SuppressFinalize(this); + } - public void SetPledgedSrcSize(ulong pledgedSrcSize) + internal nuint CompressStream( + ref ZSTD_inBuffer_s input, + ref ZSTD_outBuffer_s output, + ZSTD_EndDirective directive + ) + { + fixed (ZSTD_inBuffer_s* inputPtr = &input) + fixed (ZSTD_outBuffer_s* outputPtr = &output) { using var cctx = handle.Acquire(); - Methods.ZSTD_CCtx_setPledgedSrcSize(cctx, pledgedSrcSize).EnsureZstdSuccess(); + return Unsafe.Methods + .ZSTD_compressStream2(cctx, outputPtr, inputPtr, directive) + .EnsureZstdSuccess(); } } + + public void SetPledgedSrcSize(ulong pledgedSrcSize) + { + using var cctx = handle.Acquire(); + Unsafe.Methods.ZSTD_CCtx_setPledgedSrcSize(cctx, pledgedSrcSize).EnsureZstdSuccess(); + } } diff --git a/src/SharpCompress/Compressors/ZStandard/Constants.cs b/src/SharpCompress/Compressors/ZStandard/Constants.cs index 844946f7e..12c57270e 100644 --- a/src/SharpCompress/Compressors/ZStandard/Constants.cs +++ b/src/SharpCompress/Compressors/ZStandard/Constants.cs @@ -1,9 +1,8 @@ -namespace ZstdSharp +namespace SharpCompress.Compressors.ZStandard; + +internal class Constants { - internal class Constants - { - //NOTE: https://docs.microsoft.com/en-us/dotnet/framework/configure-apps/file-schema/runtime/gcallowverylargeobjects-element#remarks - //NOTE: https://github.com/dotnet/runtime/blob/v5.0.0-rtm.20519.4/src/libraries/System.Private.CoreLib/src/System/Array.cs#L27 - public const ulong MaxByteArrayLength = 0x7FFFFFC7; - } -} + //NOTE: https://docs.microsoft.com/en-us/dotnet/framework/configure-apps/file-schema/runtime/gcallowverylargeobjects-element#remarks + //NOTE: https://github.com/dotnet/runtime/blob/v5.0.0-rtm.20519.4/src/libraries/System.Private.CoreLib/src/System/Array.cs#L27 + public const ulong MaxByteArrayLength = 0x7FFFFFC7; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/DecompressionStream.cs b/src/SharpCompress/Compressors/ZStandard/DecompressionStream.cs index 69c449359..583acd017 100644 --- a/src/SharpCompress/Compressors/ZStandard/DecompressionStream.cs +++ b/src/SharpCompress/Compressors/ZStandard/DecompressionStream.cs @@ -3,175 +3,175 @@ using System.IO; using System.Threading; using System.Threading.Tasks; -using ZstdSharp.Unsafe; +using SharpCompress.Compressors.ZStandard.Unsafe; -namespace ZstdSharp +namespace SharpCompress.Compressors.ZStandard; + +public class DecompressionStream : Stream { - public class DecompressionStream : Stream + private readonly Stream innerStream; + private readonly byte[] inputBuffer; + private readonly int inputBufferSize; + private readonly bool preserveDecompressor; + private readonly bool leaveOpen; + private readonly bool checkEndOfStream; + private Decompressor? decompressor; + private ZSTD_inBuffer_s input; + private nuint lastDecompressResult = 0; + private bool contextDrained = true; + + public DecompressionStream( + Stream stream, + int bufferSize = 0, + bool checkEndOfStream = true, + bool leaveOpen = true + ) + : this(stream, new Decompressor(), bufferSize, checkEndOfStream, false, leaveOpen) { } + + public DecompressionStream( + Stream stream, + Decompressor decompressor, + int bufferSize = 0, + bool checkEndOfStream = true, + bool preserveDecompressor = true, + bool leaveOpen = true + ) { - private readonly Stream innerStream; - private readonly byte[] inputBuffer; - private readonly int inputBufferSize; - private readonly bool preserveDecompressor; - private readonly bool leaveOpen; - private readonly bool checkEndOfStream; - private Decompressor? decompressor; - private ZSTD_inBuffer_s input; - private nuint lastDecompressResult = 0; - private bool contextDrained = true; - - public DecompressionStream( - Stream stream, - int bufferSize = 0, - bool checkEndOfStream = true, - bool leaveOpen = true - ) - : this(stream, new Decompressor(), bufferSize, checkEndOfStream, false, leaveOpen) { } - - public DecompressionStream( - Stream stream, - Decompressor decompressor, - int bufferSize = 0, - bool checkEndOfStream = true, - bool preserveDecompressor = true, - bool leaveOpen = true - ) + if (stream == null) + throw new ArgumentNullException(nameof(stream)); + + if (!stream.CanRead) + throw new ArgumentException("Stream is not readable", nameof(stream)); + + if (bufferSize < 0) + throw new ArgumentOutOfRangeException(nameof(bufferSize)); + + innerStream = stream; + this.decompressor = decompressor; + this.preserveDecompressor = preserveDecompressor; + this.leaveOpen = leaveOpen; + this.checkEndOfStream = checkEndOfStream; + + inputBufferSize = + bufferSize > 0 ? bufferSize : (int)Unsafe.Methods.ZSTD_DStreamInSize().EnsureZstdSuccess(); + inputBuffer = ArrayPool.Shared.Rent(inputBufferSize); + input = new ZSTD_inBuffer_s { - if (stream == null) - throw new ArgumentNullException(nameof(stream)); + pos = (nuint)inputBufferSize, + size = (nuint)inputBufferSize, + }; + } - if (!stream.CanRead) - throw new ArgumentException("Stream is not readable", nameof(stream)); + public void SetParameter(ZSTD_dParameter parameter, int value) + { + EnsureNotDisposed(); + decompressor.NotNull().SetParameter(parameter, value); + } - if (bufferSize < 0) - throw new ArgumentOutOfRangeException(nameof(bufferSize)); + public int GetParameter(ZSTD_dParameter parameter) + { + EnsureNotDisposed(); + return decompressor.NotNull().GetParameter(parameter); + } - innerStream = stream; - this.decompressor = decompressor; - this.preserveDecompressor = preserveDecompressor; - this.leaveOpen = leaveOpen; - this.checkEndOfStream = checkEndOfStream; + public void LoadDictionary(byte[] dict) + { + EnsureNotDisposed(); + decompressor.NotNull().LoadDictionary(dict); + } - inputBufferSize = - bufferSize > 0 ? bufferSize : (int)Methods.ZSTD_DStreamInSize().EnsureZstdSuccess(); - inputBuffer = ArrayPool.Shared.Rent(inputBufferSize); - input = new ZSTD_inBuffer_s - { - pos = (nuint)inputBufferSize, - size = (nuint)inputBufferSize, - }; - } + ~DecompressionStream() => Dispose(false); - public void SetParameter(ZSTD_dParameter parameter, int value) - { - EnsureNotDisposed(); - decompressor.NotNull().SetParameter(parameter, value); - } + protected override void Dispose(bool disposing) + { + if (decompressor == null) + return; - public int GetParameter(ZSTD_dParameter parameter) + if (!preserveDecompressor) { - EnsureNotDisposed(); - return decompressor.NotNull().GetParameter(parameter); + decompressor.Dispose(); } + decompressor = null; - public void LoadDictionary(byte[] dict) + if (inputBuffer != null) { - EnsureNotDisposed(); - decompressor.NotNull().LoadDictionary(dict); + ArrayPool.Shared.Return(inputBuffer); } - ~DecompressionStream() => Dispose(false); - - protected override void Dispose(bool disposing) + if (!leaveOpen) { - if (decompressor == null) - return; - - if (!preserveDecompressor) - { - decompressor.Dispose(); - } - decompressor = null; - - if (inputBuffer != null) - { - ArrayPool.Shared.Return(inputBuffer); - } - - if (!leaveOpen) - { - innerStream.Dispose(); - } + innerStream.Dispose(); } + } - public override int Read(byte[] buffer, int offset, int count) => - Read(new Span(buffer, offset, count)); + public override int Read(byte[] buffer, int offset, int count) => + Read(new Span(buffer, offset, count)); #if !NETSTANDARD2_0 && !NETFRAMEWORK - public override int Read(Span buffer) + public override int Read(Span buffer) #else public int Read(Span buffer) #endif - { - EnsureNotDisposed(); + { + EnsureNotDisposed(); - // Guard against infinite loop (output.pos would never become non-zero) - if (buffer.Length == 0) - { - return 0; - } + // Guard against infinite loop (output.pos would never become non-zero) + if (buffer.Length == 0) + { + return 0; + } - var output = new ZSTD_outBuffer_s { pos = 0, size = (nuint)buffer.Length }; - while (true) + var output = new ZSTD_outBuffer_s { pos = 0, size = (nuint)buffer.Length }; + while (true) + { + // If there is still input available, or there might be data buffered in the decompressor context, flush that out + while (input.pos < input.size || !contextDrained) { - // If there is still input available, or there might be data buffered in the decompressor context, flush that out - while (input.pos < input.size || !contextDrained) + nuint oldInputPos = input.pos; + nuint result = DecompressStream(ref output, buffer); + if (output.pos > 0 || oldInputPos != input.pos) { - nuint oldInputPos = input.pos; - nuint result = DecompressStream(ref output, buffer); - if (output.pos > 0 || oldInputPos != input.pos) - { - // Keep result from last decompress call that made some progress, so we known if we're at end of frame - lastDecompressResult = result; - } - // If decompression filled the output buffer, there might still be data buffered in the decompressor context - contextDrained = output.pos < output.size; - // If we have data to return, return it immediately, so we won't stall on Read - if (output.pos > 0) - { - return (int)output.pos; - } + // Keep result from last decompress call that made some progress, so we known if we're at end of frame + lastDecompressResult = result; } - - // Otherwise, read some more input - int bytesRead; - if ((bytesRead = innerStream.Read(inputBuffer, 0, inputBufferSize)) == 0) + // If decompression filled the output buffer, there might still be data buffered in the decompressor context + contextDrained = output.pos < output.size; + // If we have data to return, return it immediately, so we won't stall on Read + if (output.pos > 0) { - if (checkEndOfStream && lastDecompressResult != 0) - { - throw new EndOfStreamException("Premature end of stream"); - } + return (int)output.pos; + } + } - return 0; + // Otherwise, read some more input + int bytesRead; + if ((bytesRead = innerStream.Read(inputBuffer, 0, inputBufferSize)) == 0) + { + if (checkEndOfStream && lastDecompressResult != 0) + { + throw new EndOfStreamException("Premature end of stream"); } - input.size = (nuint)bytesRead; - input.pos = 0; + return 0; } + + input.size = (nuint)bytesRead; + input.pos = 0; } + } #if !NETSTANDARD2_0 && !NETFRAMEWORK - public override Task ReadAsync( - byte[] buffer, - int offset, - int count, - CancellationToken cancellationToken - ) => ReadAsync(new Memory(buffer, offset, count), cancellationToken).AsTask(); - - public override async ValueTask ReadAsync( - Memory buffer, - CancellationToken cancellationToken = default - ) + public override Task ReadAsync( + byte[] buffer, + int offset, + int count, + CancellationToken cancellationToken + ) => ReadAsync(new Memory(buffer, offset, count), cancellationToken).AsTask(); + + public override async ValueTask ReadAsync( + Memory buffer, + CancellationToken cancellationToken = default + ) #else public override Task ReadAsync( @@ -186,98 +186,98 @@ public async Task ReadAsync( CancellationToken cancellationToken = default ) #endif - { - EnsureNotDisposed(); + { + EnsureNotDisposed(); - // Guard against infinite loop (output.pos would never become non-zero) - if (buffer.Length == 0) - { - return 0; - } + // Guard against infinite loop (output.pos would never become non-zero) + if (buffer.Length == 0) + { + return 0; + } - var output = new ZSTD_outBuffer_s { pos = 0, size = (nuint)buffer.Length }; - while (true) + var output = new ZSTD_outBuffer_s { pos = 0, size = (nuint)buffer.Length }; + while (true) + { + // If there is still input available, or there might be data buffered in the decompressor context, flush that out + while (input.pos < input.size || !contextDrained) { - // If there is still input available, or there might be data buffered in the decompressor context, flush that out - while (input.pos < input.size || !contextDrained) + nuint oldInputPos = input.pos; + nuint result = DecompressStream(ref output, buffer.Span); + if (output.pos > 0 || oldInputPos != input.pos) { - nuint oldInputPos = input.pos; - nuint result = DecompressStream(ref output, buffer.Span); - if (output.pos > 0 || oldInputPos != input.pos) - { - // Keep result from last decompress call that made some progress, so we known if we're at end of frame - lastDecompressResult = result; - } - // If decompression filled the output buffer, there might still be data buffered in the decompressor context - contextDrained = output.pos < output.size; - // If we have data to return, return it immediately, so we won't stall on Read - if (output.pos > 0) - { - return (int)output.pos; - } + // Keep result from last decompress call that made some progress, so we known if we're at end of frame + lastDecompressResult = result; } - - // Otherwise, read some more input - int bytesRead; - if ( - ( - bytesRead = await innerStream - .ReadAsync(inputBuffer, 0, inputBufferSize, cancellationToken) - .ConfigureAwait(false) - ) == 0 - ) + // If decompression filled the output buffer, there might still be data buffered in the decompressor context + contextDrained = output.pos < output.size; + // If we have data to return, return it immediately, so we won't stall on Read + if (output.pos > 0) { - if (checkEndOfStream && lastDecompressResult != 0) - { - throw new EndOfStreamException("Premature end of stream"); - } + return (int)output.pos; + } + } - return 0; + // Otherwise, read some more input + int bytesRead; + if ( + ( + bytesRead = await innerStream + .ReadAsync(inputBuffer, 0, inputBufferSize, cancellationToken) + .ConfigureAwait(false) + ) == 0 + ) + { + if (checkEndOfStream && lastDecompressResult != 0) + { + throw new EndOfStreamException("Premature end of stream"); } - input.size = (nuint)bytesRead; - input.pos = 0; + return 0; } + + input.size = (nuint)bytesRead; + input.pos = 0; } + } - private unsafe nuint DecompressStream(ref ZSTD_outBuffer_s output, Span outputBuffer) + private unsafe nuint DecompressStream(ref ZSTD_outBuffer_s output, Span outputBuffer) + { + fixed (byte* inputBufferPtr = inputBuffer) + fixed (byte* outputBufferPtr = outputBuffer) { - fixed (byte* inputBufferPtr = inputBuffer) - fixed (byte* outputBufferPtr = outputBuffer) - { - input.src = inputBufferPtr; - output.dst = outputBufferPtr; - return decompressor.NotNull().DecompressStream(ref input, ref output); - } + input.src = inputBufferPtr; + output.dst = outputBufferPtr; + return decompressor.NotNull().DecompressStream(ref input, ref output); } + } - public override bool CanRead => true; - public override bool CanSeek => false; - public override bool CanWrite => false; + public override bool CanRead => true; + public override bool CanSeek => false; + public override bool CanWrite => false; - public override long Length => throw new NotSupportedException(); + public override long Length => throw new NotSupportedException(); - public override long Position - { - get => throw new NotSupportedException(); - set => throw new NotSupportedException(); - } + public override long Position + { + get => throw new NotSupportedException(); + set => throw new NotSupportedException(); + } - public override void Flush() => throw new NotSupportedException(); + public override void Flush() => throw new NotSupportedException(); - public override long Seek(long offset, SeekOrigin origin) => - throw new NotSupportedException(); + public override long Seek(long offset, SeekOrigin origin) => + throw new NotSupportedException(); - public override void SetLength(long value) => throw new NotSupportedException(); + public override void SetLength(long value) => throw new NotSupportedException(); - public override void Write(byte[] buffer, int offset, int count) => - throw new NotSupportedException(); + public override void Write(byte[] buffer, int offset, int count) => + throw new NotSupportedException(); - private void EnsureNotDisposed() - { - if (decompressor == null) - throw new ObjectDisposedException(nameof(DecompressionStream)); - } + private void EnsureNotDisposed() + { + if (decompressor == null) + throw new ObjectDisposedException(nameof(DecompressionStream)); + } #if NETSTANDARD2_0 || NETFRAMEWORK public virtual Task DisposeAsync() @@ -293,5 +293,4 @@ public virtual Task DisposeAsync() } } #endif - } } diff --git a/src/SharpCompress/Compressors/ZStandard/Decompressor.cs b/src/SharpCompress/Compressors/ZStandard/Decompressor.cs index 2e388bb27..80fa8851a 100644 --- a/src/SharpCompress/Compressors/ZStandard/Decompressor.cs +++ b/src/SharpCompress/Compressors/ZStandard/Decompressor.cs @@ -1,175 +1,174 @@ using System; -using ZstdSharp.Unsafe; +using SharpCompress.Compressors.ZStandard.Unsafe; -namespace ZstdSharp +namespace SharpCompress.Compressors.ZStandard; + +public unsafe class Decompressor : IDisposable { - public unsafe class Decompressor : IDisposable - { - private readonly SafeDctxHandle handle; + private readonly SafeDctxHandle handle; - public Decompressor() - { - handle = SafeDctxHandle.Create(); - } + public Decompressor() + { + handle = SafeDctxHandle.Create(); + } - public void SetParameter(ZSTD_dParameter parameter, int value) - { - using var dctx = handle.Acquire(); - Methods.ZSTD_DCtx_setParameter(dctx, parameter, value).EnsureZstdSuccess(); - } + public void SetParameter(ZSTD_dParameter parameter, int value) + { + using var dctx = handle.Acquire(); + Unsafe.Methods.ZSTD_DCtx_setParameter(dctx, parameter, value).EnsureZstdSuccess(); + } - public int GetParameter(ZSTD_dParameter parameter) - { - using var dctx = handle.Acquire(); - int value; - Methods.ZSTD_DCtx_getParameter(dctx, parameter, &value).EnsureZstdSuccess(); - return value; - } + public int GetParameter(ZSTD_dParameter parameter) + { + using var dctx = handle.Acquire(); + int value; + Unsafe.Methods.ZSTD_DCtx_getParameter(dctx, parameter, &value).EnsureZstdSuccess(); + return value; + } - public void LoadDictionary(byte[] dict) - { - var dictReadOnlySpan = new ReadOnlySpan(dict); - this.LoadDictionary(dictReadOnlySpan); - } + public void LoadDictionary(byte[] dict) + { + var dictReadOnlySpan = new ReadOnlySpan(dict); + this.LoadDictionary(dictReadOnlySpan); + } - public void LoadDictionary(ReadOnlySpan dict) - { - using var dctx = handle.Acquire(); - fixed (byte* dictPtr = dict) - Methods - .ZSTD_DCtx_loadDictionary(dctx, dictPtr, (nuint)dict.Length) - .EnsureZstdSuccess(); - } + public void LoadDictionary(ReadOnlySpan dict) + { + using var dctx = handle.Acquire(); + fixed (byte* dictPtr = dict) + Unsafe.Methods + .ZSTD_DCtx_loadDictionary(dctx, dictPtr, (nuint)dict.Length) + .EnsureZstdSuccess(); + } - public static ulong GetDecompressedSize(ReadOnlySpan src) - { - fixed (byte* srcPtr = src) - return Methods - .ZSTD_decompressBound(srcPtr, (nuint)src.Length) - .EnsureContentSizeOk(); - } + public static ulong GetDecompressedSize(ReadOnlySpan src) + { + fixed (byte* srcPtr = src) + return Unsafe.Methods + .ZSTD_decompressBound(srcPtr, (nuint)src.Length) + .EnsureContentSizeOk(); + } - public static ulong GetDecompressedSize(ArraySegment src) => - GetDecompressedSize((ReadOnlySpan)src); + public static ulong GetDecompressedSize(ArraySegment src) => + GetDecompressedSize((ReadOnlySpan)src); - public static ulong GetDecompressedSize(byte[] src, int srcOffset, int srcLength) => - GetDecompressedSize(new ReadOnlySpan(src, srcOffset, srcLength)); + public static ulong GetDecompressedSize(byte[] src, int srcOffset, int srcLength) => + GetDecompressedSize(new ReadOnlySpan(src, srcOffset, srcLength)); - public Span Unwrap(ReadOnlySpan src, int maxDecompressedSize = int.MaxValue) - { - var expectedDstSize = GetDecompressedSize(src); - if (expectedDstSize > (ulong)maxDecompressedSize) - throw new ZstdException( - ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall, - $"Decompressed content size {expectedDstSize} is greater than {nameof(maxDecompressedSize)} {maxDecompressedSize}" - ); - if (expectedDstSize > Constants.MaxByteArrayLength) - throw new ZstdException( - ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall, - $"Decompressed content size {expectedDstSize} is greater than max possible byte array size {Constants.MaxByteArrayLength}" - ); + public Span Unwrap(ReadOnlySpan src, int maxDecompressedSize = int.MaxValue) + { + var expectedDstSize = GetDecompressedSize(src); + if (expectedDstSize > (ulong)maxDecompressedSize) + throw new ZstdException( + ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall, + $"Decompressed content size {expectedDstSize} is greater than {nameof(maxDecompressedSize)} {maxDecompressedSize}" + ); + if (expectedDstSize > Constants.MaxByteArrayLength) + throw new ZstdException( + ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall, + $"Decompressed content size {expectedDstSize} is greater than max possible byte array size {Constants.MaxByteArrayLength}" + ); - var dest = new byte[expectedDstSize]; - var length = Unwrap(src, dest); - return new Span(dest, 0, length); - } + var dest = new byte[expectedDstSize]; + var length = Unwrap(src, dest); + return new Span(dest, 0, length); + } - public int Unwrap(byte[] src, byte[] dest, int offset) => - Unwrap(src, new Span(dest, offset, dest.Length - offset)); + public int Unwrap(byte[] src, byte[] dest, int offset) => + Unwrap(src, new Span(dest, offset, dest.Length - offset)); - public int Unwrap(ReadOnlySpan src, Span dest) + public int Unwrap(ReadOnlySpan src, Span dest) + { + fixed (byte* srcPtr = src) + fixed (byte* destPtr = dest) { - fixed (byte* srcPtr = src) - fixed (byte* destPtr = dest) - { - using var dctx = handle.Acquire(); - return (int) - Methods - .ZSTD_decompressDCtx( - dctx, - destPtr, - (nuint)dest.Length, - srcPtr, - (nuint)src.Length - ) - .EnsureZstdSuccess(); - } + using var dctx = handle.Acquire(); + return (int) + Unsafe.Methods + .ZSTD_decompressDCtx( + dctx, + destPtr, + (nuint)dest.Length, + srcPtr, + (nuint)src.Length + ) + .EnsureZstdSuccess(); } + } - public int Unwrap( - byte[] src, - int srcOffset, - int srcLength, - byte[] dst, - int dstOffset, - int dstLength - ) => - Unwrap( - new ReadOnlySpan(src, srcOffset, srcLength), - new Span(dst, dstOffset, dstLength) - ); - - public bool TryUnwrap(byte[] src, byte[] dest, int offset, out int written) => - TryUnwrap(src, new Span(dest, offset, dest.Length - offset), out written); - - public bool TryUnwrap(ReadOnlySpan src, Span dest, out int written) + public int Unwrap( + byte[] src, + int srcOffset, + int srcLength, + byte[] dst, + int dstOffset, + int dstLength + ) => + Unwrap( + new ReadOnlySpan(src, srcOffset, srcLength), + new Span(dst, dstOffset, dstLength) + ); + + public bool TryUnwrap(byte[] src, byte[] dest, int offset, out int written) => + TryUnwrap(src, new Span(dest, offset, dest.Length - offset), out written); + + public bool TryUnwrap(ReadOnlySpan src, Span dest, out int written) + { + fixed (byte* srcPtr = src) + fixed (byte* destPtr = dest) { - fixed (byte* srcPtr = src) - fixed (byte* destPtr = dest) + nuint returnValue; + using (var dctx = handle.Acquire()) { - nuint returnValue; - using (var dctx = handle.Acquire()) - { - returnValue = Methods.ZSTD_decompressDCtx( - dctx, - destPtr, - (nuint)dest.Length, - srcPtr, - (nuint)src.Length - ); - } - - if (returnValue == unchecked(0 - (nuint)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)) - { - written = default; - return false; - } - - returnValue.EnsureZstdSuccess(); - written = (int)returnValue; - return true; + returnValue = Unsafe.Methods.ZSTD_decompressDCtx( + dctx, + destPtr, + (nuint)dest.Length, + srcPtr, + (nuint)src.Length + ); } - } - public bool TryUnwrap( - byte[] src, - int srcOffset, - int srcLength, - byte[] dst, - int dstOffset, - int dstLength, - out int written - ) => - TryUnwrap( - new ReadOnlySpan(src, srcOffset, srcLength), - new Span(dst, dstOffset, dstLength), - out written - ); + if (returnValue == unchecked(0 - (nuint)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)) + { + written = default; + return false; + } - public void Dispose() - { - handle.Dispose(); - GC.SuppressFinalize(this); + returnValue.EnsureZstdSuccess(); + written = (int)returnValue; + return true; } + } - internal nuint DecompressStream(ref ZSTD_inBuffer_s input, ref ZSTD_outBuffer_s output) + public bool TryUnwrap( + byte[] src, + int srcOffset, + int srcLength, + byte[] dst, + int dstOffset, + int dstLength, + out int written + ) => + TryUnwrap( + new ReadOnlySpan(src, srcOffset, srcLength), + new Span(dst, dstOffset, dstLength), + out written + ); + + public void Dispose() + { + handle.Dispose(); + GC.SuppressFinalize(this); + } + + internal nuint DecompressStream(ref ZSTD_inBuffer_s input, ref ZSTD_outBuffer_s output) + { + fixed (ZSTD_inBuffer_s* inputPtr = &input) + fixed (ZSTD_outBuffer_s* outputPtr = &output) { - fixed (ZSTD_inBuffer_s* inputPtr = &input) - fixed (ZSTD_outBuffer_s* outputPtr = &output) - { - using var dctx = handle.Acquire(); - return Methods.ZSTD_decompressStream(dctx, outputPtr, inputPtr).EnsureZstdSuccess(); - } + using var dctx = handle.Acquire(); + return Unsafe.Methods.ZSTD_decompressStream(dctx, outputPtr, inputPtr).EnsureZstdSuccess(); } } -} +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/JobThreadPool.cs b/src/SharpCompress/Compressors/ZStandard/JobThreadPool.cs index c9bd20f79..e783940dc 100644 --- a/src/SharpCompress/Compressors/ZStandard/JobThreadPool.cs +++ b/src/SharpCompress/Compressors/ZStandard/JobThreadPool.cs @@ -3,140 +3,139 @@ using System.Collections.Generic; using System.Threading; -namespace ZstdSharp +namespace SharpCompress.Compressors.ZStandard; + +internal unsafe class JobThreadPool : IDisposable { - internal unsafe class JobThreadPool : IDisposable + private int numThreads; + private readonly List threads; + private readonly BlockingCollection queue; + + private struct Job + { + public void* function; + public void* opaque; + } + + private class JobThread { - private int numThreads; - private readonly List threads; - private readonly BlockingCollection queue; + private Thread Thread { get; } + public CancellationTokenSource CancellationTokenSource { get; } - private struct Job + public JobThread(Thread thread) { - public void* function; - public void* opaque; + CancellationTokenSource = new CancellationTokenSource(); + Thread = thread; } - private class JobThread + public void Start() { - private Thread Thread { get; } - public CancellationTokenSource CancellationTokenSource { get; } - - public JobThread(Thread thread) - { - CancellationTokenSource = new CancellationTokenSource(); - Thread = thread; - } - - public void Start() - { - Thread.Start(this); - } - - public void Cancel() - { - CancellationTokenSource.Cancel(); - } - - public void Join() - { - Thread.Join(); - } + Thread.Start(this); } - private void Worker(object? obj) + public void Cancel() { - if (obj is not JobThread poolThread) - return; - - var cancellationToken = poolThread.CancellationTokenSource.Token; - while (!queue.IsCompleted && !cancellationToken.IsCancellationRequested) - { - try - { - if (queue.TryTake(out var job, -1, cancellationToken)) - ((delegate* managed)job.function)(job.opaque); - } - catch (InvalidOperationException) { } - catch (OperationCanceledException) { } - } + CancellationTokenSource.Cancel(); } - public JobThreadPool(int num, int queueSize) + public void Join() { - numThreads = num; - queue = new BlockingCollection(queueSize + 1); - threads = new List(num); - for (var i = 0; i < numThreads; i++) - CreateThread(); + Thread.Join(); } + } + + private void Worker(object? obj) + { + if (obj is not JobThread poolThread) + return; - private void CreateThread() + var cancellationToken = poolThread.CancellationTokenSource.Token; + while (!queue.IsCompleted && !cancellationToken.IsCancellationRequested) { - var poolThread = new JobThread(new Thread(Worker)); - threads.Add(poolThread); - poolThread.Start(); + try + { + if (queue.TryTake(out var job, -1, cancellationToken)) + ((delegate* managed)job.function)(job.opaque); + } + catch (InvalidOperationException) { } + catch (OperationCanceledException) { } } + } - public void Resize(int num) + public JobThreadPool(int num, int queueSize) + { + numThreads = num; + queue = new BlockingCollection(queueSize + 1); + threads = new List(num); + for (var i = 0; i < numThreads; i++) + CreateThread(); + } + + private void CreateThread() + { + var poolThread = new JobThread(new Thread(Worker)); + threads.Add(poolThread); + poolThread.Start(); + } + + public void Resize(int num) + { + lock (threads) { - lock (threads) + if (num < numThreads) { - if (num < numThreads) - { - for (var i = numThreads - 1; i >= num; i--) - { - threads[i].Cancel(); - threads.RemoveAt(i); - } - } - else + for (var i = numThreads - 1; i >= num; i--) { - for (var i = numThreads; i < num; i++) - CreateThread(); + threads[i].Cancel(); + threads.RemoveAt(i); } } - - numThreads = num; + else + { + for (var i = numThreads; i < num; i++) + CreateThread(); + } } - public void Add(void* function, void* opaque) - { - queue.Add(new Job { function = function, opaque = opaque }); - } + numThreads = num; + } - public bool TryAdd(void* function, void* opaque) - { - return queue.TryAdd(new Job { function = function, opaque = opaque }); - } + public void Add(void* function, void* opaque) + { + queue.Add(new Job { function = function, opaque = opaque }); + } - public void Join(bool cancel = true) - { - queue.CompleteAdding(); - List jobThreads; - lock (threads) - jobThreads = new List(threads); + public bool TryAdd(void* function, void* opaque) + { + return queue.TryAdd(new Job { function = function, opaque = opaque }); + } - if (cancel) - { - foreach (var thread in jobThreads) - thread.Cancel(); - } + public void Join(bool cancel = true) + { + queue.CompleteAdding(); + List jobThreads; + lock (threads) + jobThreads = new List(threads); + if (cancel) + { foreach (var thread in jobThreads) - thread.Join(); + thread.Cancel(); } - public void Dispose() - { - queue.Dispose(); - } + foreach (var thread in jobThreads) + thread.Join(); + } - public int Size() - { - // todo not implemented - // https://github.com/dotnet/runtime/issues/24200 - return 0; - } + public void Dispose() + { + queue.Dispose(); + } + + public int Size() + { + // todo not implemented + // https://github.com/dotnet/runtime/issues/24200 + return 0; } } diff --git a/src/SharpCompress/Compressors/ZStandard/Pool.cs b/src/SharpCompress/Compressors/ZStandard/Pool.cs deleted file mode 100644 index 0d681cbc2..000000000 --- a/src/SharpCompress/Compressors/ZStandard/Pool.cs +++ /dev/null @@ -1,122 +0,0 @@ -using static ZstdSharp.UnsafeHelper; - -namespace ZstdSharp.Unsafe -{ - public static unsafe partial class Methods - { - private static JobThreadPool GetThreadPool(void* ctx) => - UnmanagedObject.Unwrap(ctx); - - /* ZSTD_createThreadPool() : public access point */ - public static void* ZSTD_createThreadPool(nuint numThreads) - { - return POOL_create(numThreads, 0); - } - - /*! POOL_create() : - * Create a thread pool with at most `numThreads` threads. - * `numThreads` must be at least 1. - * The maximum number of queued jobs before blocking is `queueSize`. - * @return : POOL_ctx pointer on success, else NULL. - */ - private static void* POOL_create(nuint numThreads, nuint queueSize) - { - return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem); - } - - private static void* POOL_create_advanced( - nuint numThreads, - nuint queueSize, - ZSTD_customMem customMem - ) - { - var jobThreadPool = new JobThreadPool((int)numThreads, (int)queueSize); - return UnmanagedObject.Wrap(jobThreadPool); - } - - /*! POOL_join() : - Shutdown the queue, wake any sleeping threads, and join all of the threads. - */ - private static void POOL_join(void* ctx) - { - GetThreadPool(ctx).Join(); - } - - /*! POOL_free() : - * Free a thread pool returned by POOL_create(). - */ - private static void POOL_free(void* ctx) - { - if (ctx == null) - { - return; - } - - var jobThreadPool = GetThreadPool(ctx); - jobThreadPool.Join(); - jobThreadPool.Dispose(); - UnmanagedObject.Free(ctx); - } - - /*! POOL_joinJobs() : - * Waits for all queued jobs to finish executing. - */ - private static void POOL_joinJobs(void* ctx) - { - var jobThreadPool = GetThreadPool(ctx); - jobThreadPool.Join(false); - } - - public static void ZSTD_freeThreadPool(void* pool) - { - POOL_free(pool); - } - - /*! POOL_sizeof() : - * @return threadpool memory usage - * note : compatible with NULL (returns 0 in this case) - */ - private static nuint POOL_sizeof(void* ctx) - { - if (ctx == null) - return 0; - var jobThreadPool = GetThreadPool(ctx); - return (nuint)jobThreadPool.Size(); - } - - /* @return : 0 on success, 1 on error */ - private static int POOL_resize(void* ctx, nuint numThreads) - { - if (ctx == null) - return 1; - var jobThreadPool = GetThreadPool(ctx); - jobThreadPool.Resize((int)numThreads); - return 0; - } - - /*! POOL_add() : - * Add the job `function(opaque)` to the thread pool. `ctx` must be valid. - * Possibly blocks until there is room in the queue. - * Note : The function may be executed asynchronously, - * therefore, `opaque` must live until function has been completed. - */ - private static void POOL_add(void* ctx, void* function, void* opaque) - { - assert(ctx != null); - var jobThreadPool = GetThreadPool(ctx); - jobThreadPool.Add(function, opaque); - } - - /*! POOL_tryAdd() : - * Add the job `function(opaque)` to thread pool _if_ a queue slot is available. - * Returns immediately even if not (does not block). - * @return : 1 if successful, 0 if not. - */ - private static int POOL_tryAdd(void* ctx, void* function, void* opaque) - { - assert(ctx != null); - var jobThreadPool = GetThreadPool(ctx); - return jobThreadPool.TryAdd(function, opaque) ? 1 : 0; - } - } -} diff --git a/src/SharpCompress/Compressors/ZStandard/SafeHandles.cs b/src/SharpCompress/Compressors/ZStandard/SafeHandles.cs index 7d46add31..4490274ca 100644 --- a/src/SharpCompress/Compressors/ZStandard/SafeHandles.cs +++ b/src/SharpCompress/Compressors/ZStandard/SafeHandles.cs @@ -1,170 +1,169 @@ using System; using System.Runtime.InteropServices; -using ZstdSharp.Unsafe; +using SharpCompress.Compressors.ZStandard.Unsafe; -namespace ZstdSharp +namespace SharpCompress.Compressors.ZStandard; + +/// +/// Provides the base class for ZstdSharp implementations. +/// +/// +/// Even though ZstdSharp is a managed library, its internals are using unmanaged +/// memory and we are using safe handles in the library's high-level API to ensure +/// proper disposal of unmanaged resources and increase safety. +/// +/// +/// +internal abstract unsafe class SafeZstdHandle : SafeHandle { /// - /// Provides the base class for ZstdSharp implementations. + /// Parameterless constructor is hidden. Use the static Create factory + /// method to create a new safe handle instance. /// - /// - /// Even though ZstdSharp is a managed library, its internals are using unmanaged - /// memory and we are using safe handles in the library's high-level API to ensure - /// proper disposal of unmanaged resources and increase safety. - /// - /// - /// - internal abstract unsafe class SafeZstdHandle : SafeHandle - { - /// - /// Parameterless constructor is hidden. Use the static Create factory - /// method to create a new safe handle instance. - /// - protected SafeZstdHandle() - : base(IntPtr.Zero, true) { } - - public sealed override bool IsInvalid => handle == IntPtr.Zero; - } + protected SafeZstdHandle() + : base(IntPtr.Zero, true) { } + + public sealed override bool IsInvalid => handle == IntPtr.Zero; +} + +/// +/// Safely wraps an unmanaged Zstd compression context. +/// +internal sealed unsafe class SafeCctxHandle : SafeZstdHandle +{ + /// + private SafeCctxHandle() { } /// - /// Safely wraps an unmanaged Zstd compression context. + /// Creates a new instance of . /// - internal sealed unsafe class SafeCctxHandle : SafeZstdHandle + /// + /// Creation failed. + public static SafeCctxHandle Create() { - /// - private SafeCctxHandle() { } - - /// - /// Creates a new instance of . - /// - /// - /// Creation failed. - public static SafeCctxHandle Create() + var safeHandle = new SafeCctxHandle(); + bool success = false; + try { - var safeHandle = new SafeCctxHandle(); - bool success = false; - try - { - var cctx = Methods.ZSTD_createCCtx(); - if (cctx == null) - throw new ZstdException( - ZSTD_ErrorCode.ZSTD_error_GENERIC, - "Failed to create cctx" - ); - safeHandle.SetHandle((IntPtr)cctx); - success = true; - } - finally + var cctx = Unsafe.Methods.ZSTD_createCCtx(); + if (cctx == null) + throw new ZstdException( + ZSTD_ErrorCode.ZSTD_error_GENERIC, + "Failed to create cctx" + ); + safeHandle.SetHandle((IntPtr)cctx); + success = true; + } + finally + { + if (!success) { - if (!success) - { - safeHandle.SetHandleAsInvalid(); - } + safeHandle.SetHandleAsInvalid(); } - return safeHandle; } + return safeHandle; + } - /// - /// Acquires a reference to the safe handle. - /// - /// - /// A instance that can be implicitly converted to a pointer - /// to . - /// - public SafeHandleHolder Acquire() => new(this); + /// + /// Acquires a reference to the safe handle. + /// + /// + /// A instance that can be implicitly converted to a pointer + /// to . + /// + public SafeHandleHolder Acquire() => new(this); - protected override bool ReleaseHandle() - { - return Methods.ZSTD_freeCCtx((ZSTD_CCtx_s*)handle) == 0; - } + protected override bool ReleaseHandle() + { + return Unsafe.Methods.ZSTD_freeCCtx((ZSTD_CCtx_s*)handle) == 0; } +} + +/// +/// Safely wraps an unmanaged Zstd compression context. +/// +internal sealed unsafe class SafeDctxHandle : SafeZstdHandle +{ + /// + private SafeDctxHandle() { } /// - /// Safely wraps an unmanaged Zstd compression context. + /// Creates a new instance of . /// - internal sealed unsafe class SafeDctxHandle : SafeZstdHandle + /// + /// Creation failed. + public static SafeDctxHandle Create() { - /// - private SafeDctxHandle() { } - - /// - /// Creates a new instance of . - /// - /// - /// Creation failed. - public static SafeDctxHandle Create() + var safeHandle = new SafeDctxHandle(); + bool success = false; + try { - var safeHandle = new SafeDctxHandle(); - bool success = false; - try - { - var dctx = Methods.ZSTD_createDCtx(); - if (dctx == null) - throw new ZstdException( - ZSTD_ErrorCode.ZSTD_error_GENERIC, - "Failed to create dctx" - ); - safeHandle.SetHandle((IntPtr)dctx); - success = true; - } - finally - { - if (!success) - { - safeHandle.SetHandleAsInvalid(); - } - } - return safeHandle; + var dctx = Unsafe.Methods.ZSTD_createDCtx(); + if (dctx == null) + throw new ZstdException( + ZSTD_ErrorCode.ZSTD_error_GENERIC, + "Failed to create dctx" + ); + safeHandle.SetHandle((IntPtr)dctx); + success = true; } - - /// - /// Acquires a reference to the safe handle. - /// - /// - /// A instance that can be implicitly converted to a pointer - /// to . - /// - public SafeHandleHolder Acquire() => new(this); - - protected override bool ReleaseHandle() + finally { - return Methods.ZSTD_freeDCtx((ZSTD_DCtx_s*)handle) == 0; + if (!success) + { + safeHandle.SetHandleAsInvalid(); + } } + return safeHandle; } /// - /// Provides a convenient interface to safely acquire pointers of a specific type - /// from a , by utilizing blocks. + /// Acquires a reference to the safe handle. /// - /// The type of pointers to return. - /// - /// Safe handle holders can be d to decrement the safe handle's - /// reference count, and can be implicitly converted to pointers to . - /// - internal unsafe ref struct SafeHandleHolder - where T : unmanaged + /// + /// A instance that can be implicitly converted to a pointer + /// to . + /// + public SafeHandleHolder Acquire() => new(this); + + protected override bool ReleaseHandle() { - private readonly SafeHandle _handle; + return Unsafe.Methods.ZSTD_freeDCtx((ZSTD_DCtx_s*)handle) == 0; + } +} - private bool _refAdded; +/// +/// Provides a convenient interface to safely acquire pointers of a specific type +/// from a , by utilizing blocks. +/// +/// The type of pointers to return. +/// +/// Safe handle holders can be d to decrement the safe handle's +/// reference count, and can be implicitly converted to pointers to . +/// +internal unsafe ref struct SafeHandleHolder + where T : unmanaged +{ + private readonly SafeHandle _handle; - public SafeHandleHolder(SafeHandle safeHandle) - { - _handle = safeHandle; - _refAdded = false; - safeHandle.DangerousAddRef(ref _refAdded); - } + private bool _refAdded; + + public SafeHandleHolder(SafeHandle safeHandle) + { + _handle = safeHandle; + _refAdded = false; + safeHandle.DangerousAddRef(ref _refAdded); + } - public static implicit operator T*(SafeHandleHolder holder) => - (T*)holder._handle.DangerousGetHandle(); + public static implicit operator T*(SafeHandleHolder holder) => + (T*)holder._handle.DangerousGetHandle(); - public void Dispose() + public void Dispose() + { + if (_refAdded) { - if (_refAdded) - { - _handle.DangerousRelease(); - _refAdded = false; - } + _handle.DangerousRelease(); + _refAdded = false; } } } diff --git a/src/SharpCompress/Compressors/ZStandard/SynchronizationWrapper.cs b/src/SharpCompress/Compressors/ZStandard/SynchronizationWrapper.cs index 53c232e77..e2f7894db 100644 --- a/src/SharpCompress/Compressors/ZStandard/SynchronizationWrapper.cs +++ b/src/SharpCompress/Compressors/ZStandard/SynchronizationWrapper.cs @@ -1,23 +1,22 @@ using System.Threading; -namespace ZstdSharp +namespace SharpCompress.Compressors.ZStandard; + +internal static unsafe class SynchronizationWrapper { - internal static unsafe class SynchronizationWrapper - { - private static object UnwrapObject(void** obj) => UnmanagedObject.Unwrap(*obj); + private static object UnwrapObject(void** obj) => UnmanagedObject.Unwrap(*obj); - public static void Init(void** obj) => *obj = UnmanagedObject.Wrap(new object()); + public static void Init(void** obj) => *obj = UnmanagedObject.Wrap(new object()); - public static void Free(void** obj) => UnmanagedObject.Free(*obj); + public static void Free(void** obj) => UnmanagedObject.Free(*obj); - public static void Enter(void** obj) => Monitor.Enter(UnwrapObject(obj)); + public static void Enter(void** obj) => Monitor.Enter(UnwrapObject(obj)); - public static void Exit(void** obj) => Monitor.Exit(UnwrapObject(obj)); + public static void Exit(void** obj) => Monitor.Exit(UnwrapObject(obj)); - public static void Pulse(void** obj) => Monitor.Pulse(UnwrapObject(obj)); + public static void Pulse(void** obj) => Monitor.Pulse(UnwrapObject(obj)); - public static void PulseAll(void** obj) => Monitor.PulseAll(UnwrapObject(obj)); + public static void PulseAll(void** obj) => Monitor.PulseAll(UnwrapObject(obj)); - public static void Wait(void** mutex) => Monitor.Wait(UnwrapObject(mutex)); - } -} + public static void Wait(void** mutex) => Monitor.Wait(UnwrapObject(mutex)); +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/ThrowHelper.cs b/src/SharpCompress/Compressors/ZStandard/ThrowHelper.cs index bf0ac0cbd..5e051deb1 100644 --- a/src/SharpCompress/Compressors/ZStandard/ThrowHelper.cs +++ b/src/SharpCompress/Compressors/ZStandard/ThrowHelper.cs @@ -1,49 +1,48 @@ -using ZstdSharp.Unsafe; +using SharpCompress.Compressors.ZStandard.Unsafe; -namespace ZstdSharp +namespace SharpCompress.Compressors.ZStandard; + +public static unsafe class ThrowHelper { - public static unsafe class ThrowHelper + private const ulong ZSTD_CONTENTSIZE_UNKNOWN = unchecked(0UL - 1); + private const ulong ZSTD_CONTENTSIZE_ERROR = unchecked(0UL - 2); + + public static nuint EnsureZstdSuccess(this nuint returnValue) + { + if (Unsafe.Methods.ZSTD_isError(returnValue)) + ThrowException(returnValue, Unsafe.Methods.ZSTD_getErrorName(returnValue)); + + return returnValue; + } + + public static nuint EnsureZdictSuccess(this nuint returnValue) + { + if (Unsafe.Methods.ZDICT_isError(returnValue)) + ThrowException(returnValue, Unsafe.Methods.ZDICT_getErrorName(returnValue)); + + return returnValue; + } + + public static ulong EnsureContentSizeOk(this ulong returnValue) + { + if (returnValue == ZSTD_CONTENTSIZE_UNKNOWN) + throw new ZstdException( + ZSTD_ErrorCode.ZSTD_error_GENERIC, + "Decompressed content size is not specified" + ); + + if (returnValue == ZSTD_CONTENTSIZE_ERROR) + throw new ZstdException( + ZSTD_ErrorCode.ZSTD_error_GENERIC, + "Decompressed content size cannot be determined (e.g. invalid magic number, srcSize too small)" + ); + + return returnValue; + } + + private static void ThrowException(nuint returnValue, string message) { - private const ulong ZSTD_CONTENTSIZE_UNKNOWN = unchecked(0UL - 1); - private const ulong ZSTD_CONTENTSIZE_ERROR = unchecked(0UL - 2); - - public static nuint EnsureZstdSuccess(this nuint returnValue) - { - if (Methods.ZSTD_isError(returnValue)) - ThrowException(returnValue, Methods.ZSTD_getErrorName(returnValue)); - - return returnValue; - } - - public static nuint EnsureZdictSuccess(this nuint returnValue) - { - if (Methods.ZDICT_isError(returnValue)) - ThrowException(returnValue, Methods.ZDICT_getErrorName(returnValue)); - - return returnValue; - } - - public static ulong EnsureContentSizeOk(this ulong returnValue) - { - if (returnValue == ZSTD_CONTENTSIZE_UNKNOWN) - throw new ZstdException( - ZSTD_ErrorCode.ZSTD_error_GENERIC, - "Decompressed content size is not specified" - ); - - if (returnValue == ZSTD_CONTENTSIZE_ERROR) - throw new ZstdException( - ZSTD_ErrorCode.ZSTD_error_GENERIC, - "Decompressed content size cannot be determined (e.g. invalid magic number, srcSize too small)" - ); - - return returnValue; - } - - private static void ThrowException(nuint returnValue, string message) - { - var code = 0 - returnValue; - throw new ZstdException((ZSTD_ErrorCode)code, message); - } + var code = 0 - returnValue; + throw new ZstdException((ZSTD_ErrorCode)code, message); } } diff --git a/src/SharpCompress/Compressors/ZStandard/UnmanagedObject.cs b/src/SharpCompress/Compressors/ZStandard/UnmanagedObject.cs index deae7c05e..a5bb31bec 100644 --- a/src/SharpCompress/Compressors/ZStandard/UnmanagedObject.cs +++ b/src/SharpCompress/Compressors/ZStandard/UnmanagedObject.cs @@ -1,19 +1,18 @@ using System; using System.Runtime.InteropServices; -namespace ZstdSharp +namespace SharpCompress.Compressors.ZStandard; + +/* + * Wrap object to void* to make it unmanaged + */ +internal static unsafe class UnmanagedObject { - /* - * Wrap object to void* to make it unmanaged - */ - internal static unsafe class UnmanagedObject - { - public static void* Wrap(object obj) => (void*)GCHandle.ToIntPtr(GCHandle.Alloc(obj)); + public static void* Wrap(object obj) => (void*)GCHandle.ToIntPtr(GCHandle.Alloc(obj)); - private static GCHandle UnwrapGcHandle(void* value) => GCHandle.FromIntPtr((IntPtr)value); + private static GCHandle UnwrapGcHandle(void* value) => GCHandle.FromIntPtr((IntPtr)value); - public static T Unwrap(void* value) => (T)UnwrapGcHandle(value).Target!; + public static T Unwrap(void* value) => (T)UnwrapGcHandle(value).Target!; - public static void Free(void* value) => UnwrapGcHandle(value).Free(); - } + public static void Free(void* value) => UnwrapGcHandle(value).Free(); } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Allocations.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Allocations.cs index 46e132e13..cecdd011e 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Allocations.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Allocations.cs @@ -1,53 +1,52 @@ using System.Runtime.CompilerServices; -using static ZstdSharp.UnsafeHelper; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods + /* custom memory allocation functions */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void* ZSTD_customMalloc(nuint size, ZSTD_customMem customMem) + { + if (customMem.customAlloc != null) + return ((delegate* managed)customMem.customAlloc)( + customMem.opaque, + size + ); + return malloc(size); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void* ZSTD_customCalloc(nuint size, ZSTD_customMem customMem) { - /* custom memory allocation functions */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void* ZSTD_customMalloc(nuint size, ZSTD_customMem customMem) + if (customMem.customAlloc != null) { - if (customMem.customAlloc != null) - return ((delegate* managed)customMem.customAlloc)( - customMem.opaque, - size - ); - return malloc(size); + /* calloc implemented as malloc+memset; + * not as efficient as calloc, but next best guess for custom malloc */ + void* ptr = ((delegate* managed)customMem.customAlloc)( + customMem.opaque, + size + ); + memset(ptr, 0, (uint)size); + return ptr; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void* ZSTD_customCalloc(nuint size, ZSTD_customMem customMem) + return calloc(1, size); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_customFree(void* ptr, ZSTD_customMem customMem) + { + if (ptr != null) { - if (customMem.customAlloc != null) - { - /* calloc implemented as malloc+memset; - * not as efficient as calloc, but next best guess for custom malloc */ - void* ptr = ((delegate* managed)customMem.customAlloc)( + if (customMem.customFree != null) + ((delegate* managed)customMem.customFree)( customMem.opaque, - size + ptr ); - memset(ptr, 0, (uint)size); - return ptr; - } - - return calloc(1, size); - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_customFree(void* ptr, ZSTD_customMem customMem) - { - if (ptr != null) - { - if (customMem.customFree != null) - ((delegate* managed)customMem.customFree)( - customMem.opaque, - ptr - ); - else - free(ptr); - } + else + free(ptr); } } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_CStream_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_CStream_t.cs index b3be314f0..b8ef82f3f 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_CStream_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_CStream_t.cs @@ -1,15 +1,14 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/* bitStream can mix input from multiple sources. + * A critical property of these streams is that they encode and decode in **reverse** direction. + * So the first bit sequence you add will be the last to be read, like a LIFO stack. + */ +public unsafe struct BIT_CStream_t { - /* bitStream can mix input from multiple sources. - * A critical property of these streams is that they encode and decode in **reverse** direction. - * So the first bit sequence you add will be the last to be read, like a LIFO stack. - */ - public unsafe struct BIT_CStream_t - { - public nuint bitContainer; - public uint bitPos; - public sbyte* startPtr; - public sbyte* ptr; - public sbyte* endPtr; - } -} + public nuint bitContainer; + public uint bitPos; + public sbyte* startPtr; + public sbyte* ptr; + public sbyte* endPtr; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_DStream_status.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_DStream_status.cs index aa2966ca3..4c0b15062 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_DStream_status.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_DStream_status.cs @@ -1,17 +1,16 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public enum BIT_DStream_status { - public enum BIT_DStream_status - { - /* fully refilled */ - BIT_DStream_unfinished = 0, + /* fully refilled */ + BIT_DStream_unfinished = 0, - /* still some bits left in bitstream */ - BIT_DStream_endOfBuffer = 1, + /* still some bits left in bitstream */ + BIT_DStream_endOfBuffer = 1, - /* bitstream entirely consumed, bit-exact */ - BIT_DStream_completed = 2, + /* bitstream entirely consumed, bit-exact */ + BIT_DStream_completed = 2, - /* user requested more bits than present in bitstream */ - BIT_DStream_overflow = 3, - } -} + /* user requested more bits than present in bitstream */ + BIT_DStream_overflow = 3, +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_DStream_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_DStream_t.cs index 76cc23611..c0e96134d 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_DStream_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_DStream_t.cs @@ -1,14 +1,13 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/*-******************************************** + * bitStream decoding API (read backward) + **********************************************/ +public unsafe struct BIT_DStream_t { - /*-******************************************** - * bitStream decoding API (read backward) - **********************************************/ - public unsafe struct BIT_DStream_t - { - public nuint bitContainer; - public uint bitsConsumed; - public sbyte* ptr; - public sbyte* start; - public sbyte* limitPtr; - } + public nuint bitContainer; + public uint bitsConsumed; + public sbyte* ptr; + public sbyte* start; + public sbyte* limitPtr; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Bits.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Bits.cs index 14fdc46ec..e5484ae1e 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Bits.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Bits.cs @@ -1,61 +1,60 @@ using System; using System.Numerics; using System.Runtime.CompilerServices; -using static ZstdSharp.UnsafeHelper; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_countTrailingZeros32(uint val) { - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_countTrailingZeros32(uint val) - { - assert(val != 0); - return (uint)BitOperations.TrailingZeroCount(val); - } + assert(val != 0); + return (uint)BitOperations.TrailingZeroCount(val); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_countLeadingZeros32(uint val) - { - assert(val != 0); - return (uint)BitOperations.LeadingZeroCount(val); - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_countLeadingZeros32(uint val) + { + assert(val != 0); + return (uint)BitOperations.LeadingZeroCount(val); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_countTrailingZeros64(ulong val) - { - assert(val != 0); - return (uint)BitOperations.TrailingZeroCount(val); - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_countTrailingZeros64(ulong val) + { + assert(val != 0); + return (uint)BitOperations.TrailingZeroCount(val); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_countLeadingZeros64(ulong val) - { - assert(val != 0); - return (uint)BitOperations.LeadingZeroCount(val); - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_countLeadingZeros64(ulong val) + { + assert(val != 0); + return (uint)BitOperations.LeadingZeroCount(val); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_NbCommonBytes(nuint val) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_NbCommonBytes(nuint val) + { + assert(val != 0); + if (BitConverter.IsLittleEndian) { - assert(val != 0); - if (BitConverter.IsLittleEndian) - { - return MEM_64bits - ? (uint)BitOperations.TrailingZeroCount(val) >> 3 - : (uint)BitOperations.TrailingZeroCount((uint)val) >> 3; - } - return MEM_64bits - ? (uint)BitOperations.LeadingZeroCount(val) >> 3 - : (uint)BitOperations.LeadingZeroCount((uint)val) >> 3; + ? (uint)BitOperations.TrailingZeroCount(val) >> 3 + : (uint)BitOperations.TrailingZeroCount((uint)val) >> 3; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_highbit32(uint val) - { - assert(val != 0); - return (uint)BitOperations.Log2(val); - } + return MEM_64bits + ? (uint)BitOperations.LeadingZeroCount(val) >> 3 + : (uint)BitOperations.LeadingZeroCount((uint)val) >> 3; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_highbit32(uint val) + { + assert(val != 0); + return (uint)BitOperations.Log2(val); } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Bitstream.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Bitstream.cs index 41a5ea77f..d95e90764 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Bitstream.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Bitstream.cs @@ -1,15 +1,15 @@ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; -using static ZstdSharp.UnsafeHelper; #if NETCOREAPP3_0_OR_GREATER using System.Runtime.Intrinsics.X86; #endif +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods - { #if NET7_0_OR_GREATER private static ReadOnlySpan Span_BIT_mask => new uint[32] @@ -54,72 +54,72 @@ ref MemoryMarshal.GetReference(Span_BIT_mask) ); #else - private static readonly uint* BIT_mask = GetArrayPointer( - new uint[32] - { - 0, - 1, - 3, - 7, - 0xF, - 0x1F, - 0x3F, - 0x7F, - 0xFF, - 0x1FF, - 0x3FF, - 0x7FF, - 0xFFF, - 0x1FFF, - 0x3FFF, - 0x7FFF, - 0xFFFF, - 0x1FFFF, - 0x3FFFF, - 0x7FFFF, - 0xFFFFF, - 0x1FFFFF, - 0x3FFFFF, - 0x7FFFFF, - 0xFFFFFF, - 0x1FFFFFF, - 0x3FFFFFF, - 0x7FFFFFF, - 0xFFFFFFF, - 0x1FFFFFFF, - 0x3FFFFFFF, - 0x7FFFFFFF, - } - ); -#endif - /*-************************************************************** - * bitStream encoding - ****************************************************************/ - /*! BIT_initCStream() : - * `dstCapacity` must be > sizeof(size_t) - * @return : 0 if success, - * otherwise an error code (can be tested using ERR_isError()) */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint BIT_initCStream( - ref BIT_CStream_t bitC, - void* startPtr, - nuint dstCapacity - ) + private static readonly uint* BIT_mask = GetArrayPointer( + new uint[32] { - bitC.bitContainer = 0; - bitC.bitPos = 0; - bitC.startPtr = (sbyte*)startPtr; - bitC.ptr = bitC.startPtr; - bitC.endPtr = bitC.startPtr + dstCapacity - sizeof(nuint); - if (dstCapacity <= (nuint)sizeof(nuint)) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - return 0; + 0, + 1, + 3, + 7, + 0xF, + 0x1F, + 0x3F, + 0x7F, + 0xFF, + 0x1FF, + 0x3FF, + 0x7FF, + 0xFFF, + 0x1FFF, + 0x3FFF, + 0x7FFF, + 0xFFFF, + 0x1FFFF, + 0x3FFFF, + 0x7FFFF, + 0xFFFFF, + 0x1FFFFF, + 0x3FFFFF, + 0x7FFFFF, + 0xFFFFFF, + 0x1FFFFFF, + 0x3FFFFFF, + 0x7FFFFFF, + 0xFFFFFFF, + 0x1FFFFFFF, + 0x3FFFFFFF, + 0x7FFFFFFF, } + ); +#endif + /*-************************************************************** + * bitStream encoding + ****************************************************************/ + /*! BIT_initCStream() : + * `dstCapacity` must be > sizeof(size_t) + * @return : 0 if success, + * otherwise an error code (can be tested using ERR_isError()) */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint BIT_initCStream( + ref BIT_CStream_t bitC, + void* startPtr, + nuint dstCapacity + ) + { + bitC.bitContainer = 0; + bitC.bitPos = 0; + bitC.startPtr = (sbyte*)startPtr; + bitC.ptr = bitC.startPtr; + bitC.endPtr = bitC.startPtr + dstCapacity - sizeof(nuint); + if (dstCapacity <= (nuint)sizeof(nuint)) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + return 0; + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint BIT_getLowerBits(nuint bitContainer, uint nbBits) - { - assert(nbBits < sizeof(uint) * 32 / sizeof(uint)); + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint BIT_getLowerBits(nuint bitContainer, uint nbBits) + { + assert(nbBits < sizeof(uint) * 32 / sizeof(uint)); #if NETCOREAPP3_1_OR_GREATER if (Bmi2.X64.IsSupported) { @@ -132,194 +132,194 @@ private static nuint BIT_getLowerBits(nuint bitContainer, uint nbBits) } #endif - return bitContainer & BIT_mask[nbBits]; - } + return bitContainer & BIT_mask[nbBits]; + } - /*! BIT_addBits() : - * can add up to 31 bits into `bitC`. - * Note : does not check for register overflow ! */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void BIT_addBits( - ref nuint bitC_bitContainer, - ref uint bitC_bitPos, - nuint value, - uint nbBits - ) - { - assert(nbBits < sizeof(uint) * 32 / sizeof(uint)); - assert(nbBits + bitC_bitPos < (uint)(sizeof(nuint) * 8)); - bitC_bitContainer |= BIT_getLowerBits(value, nbBits) << (int)bitC_bitPos; - bitC_bitPos += nbBits; - } + /*! BIT_addBits() : + * can add up to 31 bits into `bitC`. + * Note : does not check for register overflow ! */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void BIT_addBits( + ref nuint bitC_bitContainer, + ref uint bitC_bitPos, + nuint value, + uint nbBits + ) + { + assert(nbBits < sizeof(uint) * 32 / sizeof(uint)); + assert(nbBits + bitC_bitPos < (uint)(sizeof(nuint) * 8)); + bitC_bitContainer |= BIT_getLowerBits(value, nbBits) << (int)bitC_bitPos; + bitC_bitPos += nbBits; + } - /*! BIT_addBitsFast() : - * works only if `value` is _clean_, - * meaning all high bits above nbBits are 0 */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void BIT_addBitsFast( - ref nuint bitC_bitContainer, - ref uint bitC_bitPos, - nuint value, - uint nbBits - ) - { - assert(value >> (int)nbBits == 0); - assert(nbBits + bitC_bitPos < (uint)(sizeof(nuint) * 8)); - bitC_bitContainer |= value << (int)bitC_bitPos; - bitC_bitPos += nbBits; - } + /*! BIT_addBitsFast() : + * works only if `value` is _clean_, + * meaning all high bits above nbBits are 0 */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void BIT_addBitsFast( + ref nuint bitC_bitContainer, + ref uint bitC_bitPos, + nuint value, + uint nbBits + ) + { + assert(value >> (int)nbBits == 0); + assert(nbBits + bitC_bitPos < (uint)(sizeof(nuint) * 8)); + bitC_bitContainer |= value << (int)bitC_bitPos; + bitC_bitPos += nbBits; + } - /*! BIT_flushBitsFast() : - * assumption : bitContainer has not overflowed - * unsafe version; does not check buffer overflow */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void BIT_flushBitsFast( - ref nuint bitC_bitContainer, - ref uint bitC_bitPos, - ref sbyte* bitC_ptr, - sbyte* bitC_endPtr - ) - { - nuint nbBytes = bitC_bitPos >> 3; - assert(bitC_bitPos < (uint)(sizeof(nuint) * 8)); - assert(bitC_ptr <= bitC_endPtr); - MEM_writeLEST(bitC_ptr, bitC_bitContainer); - bitC_ptr += nbBytes; - bitC_bitPos &= 7; - bitC_bitContainer >>= (int)(nbBytes * 8); - } + /*! BIT_flushBitsFast() : + * assumption : bitContainer has not overflowed + * unsafe version; does not check buffer overflow */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void BIT_flushBitsFast( + ref nuint bitC_bitContainer, + ref uint bitC_bitPos, + ref sbyte* bitC_ptr, + sbyte* bitC_endPtr + ) + { + nuint nbBytes = bitC_bitPos >> 3; + assert(bitC_bitPos < (uint)(sizeof(nuint) * 8)); + assert(bitC_ptr <= bitC_endPtr); + MEM_writeLEST(bitC_ptr, bitC_bitContainer); + bitC_ptr += nbBytes; + bitC_bitPos &= 7; + bitC_bitContainer >>= (int)(nbBytes * 8); + } - /*! BIT_flushBits() : - * assumption : bitContainer has not overflowed - * safe version; check for buffer overflow, and prevents it. - * note : does not signal buffer overflow. - * overflow will be revealed later on using BIT_closeCStream() */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void BIT_flushBits( - ref nuint bitC_bitContainer, - ref uint bitC_bitPos, - ref sbyte* bitC_ptr, - sbyte* bitC_endPtr - ) - { - nuint nbBytes = bitC_bitPos >> 3; - assert(bitC_bitPos < (uint)(sizeof(nuint) * 8)); - assert(bitC_ptr <= bitC_endPtr); - MEM_writeLEST(bitC_ptr, bitC_bitContainer); - bitC_ptr += nbBytes; - if (bitC_ptr > bitC_endPtr) - bitC_ptr = bitC_endPtr; - bitC_bitPos &= 7; - bitC_bitContainer >>= (int)(nbBytes * 8); - } + /*! BIT_flushBits() : + * assumption : bitContainer has not overflowed + * safe version; check for buffer overflow, and prevents it. + * note : does not signal buffer overflow. + * overflow will be revealed later on using BIT_closeCStream() */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void BIT_flushBits( + ref nuint bitC_bitContainer, + ref uint bitC_bitPos, + ref sbyte* bitC_ptr, + sbyte* bitC_endPtr + ) + { + nuint nbBytes = bitC_bitPos >> 3; + assert(bitC_bitPos < (uint)(sizeof(nuint) * 8)); + assert(bitC_ptr <= bitC_endPtr); + MEM_writeLEST(bitC_ptr, bitC_bitContainer); + bitC_ptr += nbBytes; + if (bitC_ptr > bitC_endPtr) + bitC_ptr = bitC_endPtr; + bitC_bitPos &= 7; + bitC_bitContainer >>= (int)(nbBytes * 8); + } + + /*! BIT_closeCStream() : + * @return : size of CStream, in bytes, + * or 0 if it could not fit into dstBuffer */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint BIT_closeCStream( + ref nuint bitC_bitContainer, + ref uint bitC_bitPos, + sbyte* bitC_ptr, + sbyte* bitC_endPtr, + sbyte* bitC_startPtr + ) + { + BIT_addBitsFast(ref bitC_bitContainer, ref bitC_bitPos, 1, 1); + BIT_flushBits(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr); + if (bitC_ptr >= bitC_endPtr) + return 0; + return (nuint)(bitC_ptr - bitC_startPtr) + (nuint)(bitC_bitPos > 0 ? 1 : 0); + } - /*! BIT_closeCStream() : - * @return : size of CStream, in bytes, - * or 0 if it could not fit into dstBuffer */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint BIT_closeCStream( - ref nuint bitC_bitContainer, - ref uint bitC_bitPos, - sbyte* bitC_ptr, - sbyte* bitC_endPtr, - sbyte* bitC_startPtr - ) + /*-******************************************************** + * bitStream decoding + **********************************************************/ + /*! BIT_initDStream() : + * Initialize a BIT_DStream_t. + * `bitD` : a pointer to an already allocated BIT_DStream_t structure. + * `srcSize` must be the *exact* size of the bitStream, in bytes. + * @return : size of stream (== srcSize), or an errorCode if a problem is detected + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint BIT_initDStream(BIT_DStream_t* bitD, void* srcBuffer, nuint srcSize) + { + if (srcSize < 1) { - BIT_addBitsFast(ref bitC_bitContainer, ref bitC_bitPos, 1, 1); - BIT_flushBits(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr); - if (bitC_ptr >= bitC_endPtr) - return 0; - return (nuint)(bitC_ptr - bitC_startPtr) + (nuint)(bitC_bitPos > 0 ? 1 : 0); + *bitD = new BIT_DStream_t(); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); } - /*-******************************************************** - * bitStream decoding - **********************************************************/ - /*! BIT_initDStream() : - * Initialize a BIT_DStream_t. - * `bitD` : a pointer to an already allocated BIT_DStream_t structure. - * `srcSize` must be the *exact* size of the bitStream, in bytes. - * @return : size of stream (== srcSize), or an errorCode if a problem is detected - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint BIT_initDStream(BIT_DStream_t* bitD, void* srcBuffer, nuint srcSize) + bitD->start = (sbyte*)srcBuffer; + bitD->limitPtr = bitD->start + sizeof(nuint); + if (srcSize >= (nuint)sizeof(nuint)) { - if (srcSize < 1) + bitD->ptr = (sbyte*)srcBuffer + srcSize - sizeof(nuint); + bitD->bitContainer = MEM_readLEST(bitD->ptr); { - *bitD = new BIT_DStream_t(); - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + byte lastByte = ((byte*)srcBuffer)[srcSize - 1]; + bitD->bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0; + if (lastByte == 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); } - - bitD->start = (sbyte*)srcBuffer; - bitD->limitPtr = bitD->start + sizeof(nuint); - if (srcSize >= (nuint)sizeof(nuint)) + } + else + { + bitD->ptr = bitD->start; + bitD->bitContainer = *(byte*)bitD->start; + switch (srcSize) { - bitD->ptr = (sbyte*)srcBuffer + srcSize - sizeof(nuint); - bitD->bitContainer = MEM_readLEST(bitD->ptr); - { - byte lastByte = ((byte*)srcBuffer)[srcSize - 1]; - bitD->bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0; - if (lastByte == 0) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); - } + case 7: + bitD->bitContainer += + (nuint)((byte*)srcBuffer)[6] << sizeof(nuint) * 8 - 16; + goto case 6; + case 6: + bitD->bitContainer += + (nuint)((byte*)srcBuffer)[5] << sizeof(nuint) * 8 - 24; + goto case 5; + case 5: + bitD->bitContainer += + (nuint)((byte*)srcBuffer)[4] << sizeof(nuint) * 8 - 32; + goto case 4; + case 4: + bitD->bitContainer += (nuint)((byte*)srcBuffer)[3] << 24; + goto case 3; + case 3: + bitD->bitContainer += (nuint)((byte*)srcBuffer)[2] << 16; + goto case 2; + case 2: + bitD->bitContainer += (nuint)((byte*)srcBuffer)[1] << 8; + goto default; + default: + break; } - else + { - bitD->ptr = bitD->start; - bitD->bitContainer = *(byte*)bitD->start; - switch (srcSize) - { - case 7: - bitD->bitContainer += - (nuint)((byte*)srcBuffer)[6] << sizeof(nuint) * 8 - 16; - goto case 6; - case 6: - bitD->bitContainer += - (nuint)((byte*)srcBuffer)[5] << sizeof(nuint) * 8 - 24; - goto case 5; - case 5: - bitD->bitContainer += - (nuint)((byte*)srcBuffer)[4] << sizeof(nuint) * 8 - 32; - goto case 4; - case 4: - bitD->bitContainer += (nuint)((byte*)srcBuffer)[3] << 24; - goto case 3; - case 3: - bitD->bitContainer += (nuint)((byte*)srcBuffer)[2] << 16; - goto case 2; - case 2: - bitD->bitContainer += (nuint)((byte*)srcBuffer)[1] << 8; - goto default; - default: - break; - } - - { - byte lastByte = ((byte*)srcBuffer)[srcSize - 1]; - bitD->bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0; - if (lastByte == 0) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); - } - - bitD->bitsConsumed += (uint)((nuint)sizeof(nuint) - srcSize) * 8; + byte lastByte = ((byte*)srcBuffer)[srcSize - 1]; + bitD->bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0; + if (lastByte == 0) + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); } - return srcSize; + bitD->bitsConsumed += (uint)((nuint)sizeof(nuint) - srcSize) * 8; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint BIT_getUpperBits(nuint bitContainer, uint start) - { - return bitContainer >> (int)start; - } + return srcSize; + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint BIT_getMiddleBits(nuint bitContainer, uint start, uint nbBits) - { - uint regMask = (uint)(sizeof(nuint) * 8 - 1); - assert(nbBits < sizeof(uint) * 32 / sizeof(uint)); + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint BIT_getUpperBits(nuint bitContainer, uint start) + { + return bitContainer >> (int)start; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint BIT_getMiddleBits(nuint bitContainer, uint start, uint nbBits) + { + uint regMask = (uint)(sizeof(nuint) * 8 - 1); + assert(nbBits < sizeof(uint) * 32 / sizeof(uint)); #if NETCOREAPP3_1_OR_GREATER if (Bmi2.X64.IsSupported) { @@ -332,95 +332,95 @@ private static nuint BIT_getMiddleBits(nuint bitContainer, uint start, uint nbBi } #endif - return (nuint)(bitContainer >> (int)(start & regMask) & ((ulong)1 << (int)nbBits) - 1); - } + return (nuint)(bitContainer >> (int)(start & regMask) & ((ulong)1 << (int)nbBits) - 1); + } - /*! BIT_lookBits() : - * Provides next n bits from local register. - * local register is not modified. - * On 32-bits, maxNbBits==24. - * On 64-bits, maxNbBits==56. - * @return : value extracted */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint BIT_lookBits(BIT_DStream_t* bitD, uint nbBits) - { - return BIT_getMiddleBits( - bitD->bitContainer, - (uint)(sizeof(nuint) * 8) - bitD->bitsConsumed - nbBits, - nbBits - ); - } + /*! BIT_lookBits() : + * Provides next n bits from local register. + * local register is not modified. + * On 32-bits, maxNbBits==24. + * On 64-bits, maxNbBits==56. + * @return : value extracted */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint BIT_lookBits(BIT_DStream_t* bitD, uint nbBits) + { + return BIT_getMiddleBits( + bitD->bitContainer, + (uint)(sizeof(nuint) * 8) - bitD->bitsConsumed - nbBits, + nbBits + ); + } - /*! BIT_lookBitsFast() : - * unsafe version; only works if nbBits >= 1 */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint BIT_lookBitsFast(BIT_DStream_t* bitD, uint nbBits) - { - uint regMask = (uint)(sizeof(nuint) * 8 - 1); - assert(nbBits >= 1); - return bitD->bitContainer - << (int)(bitD->bitsConsumed & regMask) - >> (int)(regMask + 1 - nbBits & regMask); - } + /*! BIT_lookBitsFast() : + * unsafe version; only works if nbBits >= 1 */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint BIT_lookBitsFast(BIT_DStream_t* bitD, uint nbBits) + { + uint regMask = (uint)(sizeof(nuint) * 8 - 1); + assert(nbBits >= 1); + return bitD->bitContainer + << (int)(bitD->bitsConsumed & regMask) + >> (int)(regMask + 1 - nbBits & regMask); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void BIT_skipBits(BIT_DStream_t* bitD, uint nbBits) - { - bitD->bitsConsumed += nbBits; - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void BIT_skipBits(BIT_DStream_t* bitD, uint nbBits) + { + bitD->bitsConsumed += nbBits; + } - /*! BIT_readBits() : - * Read (consume) next n bits from local register and update. - * Pay attention to not read more than nbBits contained into local register. - * @return : extracted value. */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint BIT_readBits(BIT_DStream_t* bitD, uint nbBits) - { - nuint value = BIT_lookBits(bitD, nbBits); - BIT_skipBits(bitD, nbBits); - return value; - } + /*! BIT_readBits() : + * Read (consume) next n bits from local register and update. + * Pay attention to not read more than nbBits contained into local register. + * @return : extracted value. */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint BIT_readBits(BIT_DStream_t* bitD, uint nbBits) + { + nuint value = BIT_lookBits(bitD, nbBits); + BIT_skipBits(bitD, nbBits); + return value; + } - /*! BIT_readBitsFast() : - * unsafe version; only works if nbBits >= 1 */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint BIT_readBitsFast(BIT_DStream_t* bitD, uint nbBits) - { - nuint value = BIT_lookBitsFast(bitD, nbBits); - assert(nbBits >= 1); - BIT_skipBits(bitD, nbBits); - return value; - } + /*! BIT_readBitsFast() : + * unsafe version; only works if nbBits >= 1 */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint BIT_readBitsFast(BIT_DStream_t* bitD, uint nbBits) + { + nuint value = BIT_lookBitsFast(bitD, nbBits); + assert(nbBits >= 1); + BIT_skipBits(bitD, nbBits); + return value; + } - /*! BIT_reloadDStream_internal() : - * Simple variant of BIT_reloadDStream(), with two conditions: - * 1. bitstream is valid : bitsConsumed <= sizeof(bitD->bitContainer)*8 - * 2. look window is valid after shifted down : bitD->ptr >= bitD->start - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static BIT_DStream_status BIT_reloadDStream_internal(BIT_DStream_t* bitD) - { - assert(bitD->bitsConsumed <= (uint)(sizeof(nuint) * 8)); - bitD->ptr -= bitD->bitsConsumed >> 3; - assert(bitD->ptr >= bitD->start); - bitD->bitsConsumed &= 7; - bitD->bitContainer = MEM_readLEST(bitD->ptr); - return BIT_DStream_status.BIT_DStream_unfinished; - } + /*! BIT_reloadDStream_internal() : + * Simple variant of BIT_reloadDStream(), with two conditions: + * 1. bitstream is valid : bitsConsumed <= sizeof(bitD->bitContainer)*8 + * 2. look window is valid after shifted down : bitD->ptr >= bitD->start + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static BIT_DStream_status BIT_reloadDStream_internal(BIT_DStream_t* bitD) + { + assert(bitD->bitsConsumed <= (uint)(sizeof(nuint) * 8)); + bitD->ptr -= bitD->bitsConsumed >> 3; + assert(bitD->ptr >= bitD->start); + bitD->bitsConsumed &= 7; + bitD->bitContainer = MEM_readLEST(bitD->ptr); + return BIT_DStream_status.BIT_DStream_unfinished; + } - /*! BIT_reloadDStreamFast() : - * Similar to BIT_reloadDStream(), but with two differences: - * 1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold! - * 2. Returns BIT_DStream_overflow when bitD->ptr < bitD->limitPtr, at this - * point you must use BIT_reloadDStream() to reload. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD) - { - if (bitD->ptr < bitD->limitPtr) - return BIT_DStream_status.BIT_DStream_overflow; - return BIT_reloadDStream_internal(bitD); - } + /*! BIT_reloadDStreamFast() : + * Similar to BIT_reloadDStream(), but with two differences: + * 1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold! + * 2. Returns BIT_DStream_overflow when bitD->ptr < bitD->limitPtr, at this + * point you must use BIT_reloadDStream() to reload. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD) + { + if (bitD->ptr < bitD->limitPtr) + return BIT_DStream_status.BIT_DStream_overflow; + return BIT_reloadDStream_internal(bitD); + } #if NET7_0_OR_GREATER private static ReadOnlySpan Span_static_zeroFilled => @@ -432,229 +432,260 @@ ref MemoryMarshal.GetReference(Span_static_zeroFilled) ); #else - private static readonly nuint* static_zeroFilled = (nuint*)GetArrayPointer( - new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 } - ); + private static readonly nuint* static_zeroFilled = (nuint*)GetArrayPointer( + new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 } + ); #endif - /*! BIT_reloadDStream() : - * Refill `bitD` from buffer previously set in BIT_initDStream() . - * This function is safe, it guarantees it will not never beyond src buffer. - * @return : status of `BIT_DStream_t` internal register. - * when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD) + /*! BIT_reloadDStream() : + * Refill `bitD` from buffer previously set in BIT_initDStream() . + * This function is safe, it guarantees it will not never beyond src buffer. + * @return : status of `BIT_DStream_t` internal register. + * when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD) + { + if (bitD->bitsConsumed > (uint)(sizeof(nuint) * 8)) { - if (bitD->bitsConsumed > (uint)(sizeof(nuint) * 8)) - { - bitD->ptr = (sbyte*)&static_zeroFilled[0]; - return BIT_DStream_status.BIT_DStream_overflow; - } + bitD->ptr = (sbyte*)&static_zeroFilled[0]; + return BIT_DStream_status.BIT_DStream_overflow; + } - assert(bitD->ptr >= bitD->start); - if (bitD->ptr >= bitD->limitPtr) - { - return BIT_reloadDStream_internal(bitD); - } + assert(bitD->ptr >= bitD->start); + if (bitD->ptr >= bitD->limitPtr) + { + return BIT_reloadDStream_internal(bitD); + } - if (bitD->ptr == bitD->start) - { - if (bitD->bitsConsumed < (uint)(sizeof(nuint) * 8)) - return BIT_DStream_status.BIT_DStream_endOfBuffer; - return BIT_DStream_status.BIT_DStream_completed; - } + if (bitD->ptr == bitD->start) + { + if (bitD->bitsConsumed < (uint)(sizeof(nuint) * 8)) + return BIT_DStream_status.BIT_DStream_endOfBuffer; + return BIT_DStream_status.BIT_DStream_completed; + } + { + uint nbBytes = bitD->bitsConsumed >> 3; + BIT_DStream_status result = BIT_DStream_status.BIT_DStream_unfinished; + if (bitD->ptr - nbBytes < bitD->start) { - uint nbBytes = bitD->bitsConsumed >> 3; - BIT_DStream_status result = BIT_DStream_status.BIT_DStream_unfinished; - if (bitD->ptr - nbBytes < bitD->start) - { - nbBytes = (uint)(bitD->ptr - bitD->start); - result = BIT_DStream_status.BIT_DStream_endOfBuffer; - } - - bitD->ptr -= nbBytes; - bitD->bitsConsumed -= nbBytes * 8; - bitD->bitContainer = MEM_readLEST(bitD->ptr); - return result; + nbBytes = (uint)(bitD->ptr - bitD->start); + result = BIT_DStream_status.BIT_DStream_endOfBuffer; } + + bitD->ptr -= nbBytes; + bitD->bitsConsumed -= nbBytes * 8; + bitD->bitContainer = MEM_readLEST(bitD->ptr); + return result; } + } - /*! BIT_endOfDStream() : - * @return : 1 if DStream has _exactly_ reached its end (all bits consumed). - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint BIT_endOfDStream(BIT_DStream_t* DStream) - { - return - DStream->ptr == DStream->start && DStream->bitsConsumed == (uint)(sizeof(nuint) * 8) + /*! BIT_endOfDStream() : + * @return : 1 if DStream has _exactly_ reached its end (all bits consumed). + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint BIT_endOfDStream(BIT_DStream_t* DStream) + { + return + DStream->ptr == DStream->start && DStream->bitsConsumed == (uint)(sizeof(nuint) * 8) ? 1U : 0U; + } + + /*-******************************************************** + * bitStream decoding + **********************************************************/ + /*! BIT_initDStream() : + * Initialize a BIT_DStream_t. + * `bitD` : a pointer to an already allocated BIT_DStream_t structure. + * `srcSize` must be the *exact* size of the bitStream, in bytes. + * @return : size of stream (== srcSize), or an errorCode if a problem is detected + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint BIT_initDStream(ref BIT_DStream_t bitD, void* srcBuffer, nuint srcSize) + { + if (srcSize < 1) + { + bitD = new BIT_DStream_t(); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); } - /*-******************************************************** - * bitStream decoding - **********************************************************/ - /*! BIT_initDStream() : - * Initialize a BIT_DStream_t. - * `bitD` : a pointer to an already allocated BIT_DStream_t structure. - * `srcSize` must be the *exact* size of the bitStream, in bytes. - * @return : size of stream (== srcSize), or an errorCode if a problem is detected - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint BIT_initDStream(ref BIT_DStream_t bitD, void* srcBuffer, nuint srcSize) + bitD.start = (sbyte*)srcBuffer; + bitD.limitPtr = bitD.start + sizeof(nuint); + if (srcSize >= (nuint)sizeof(nuint)) { - if (srcSize < 1) + bitD.ptr = (sbyte*)srcBuffer + srcSize - sizeof(nuint); + bitD.bitContainer = MEM_readLEST(bitD.ptr); { - bitD = new BIT_DStream_t(); - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + byte lastByte = ((byte*)srcBuffer)[srcSize - 1]; + bitD.bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0; + if (lastByte == 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); } - - bitD.start = (sbyte*)srcBuffer; - bitD.limitPtr = bitD.start + sizeof(nuint); - if (srcSize >= (nuint)sizeof(nuint)) + } + else + { + bitD.ptr = bitD.start; + bitD.bitContainer = *(byte*)bitD.start; + switch (srcSize) { - bitD.ptr = (sbyte*)srcBuffer + srcSize - sizeof(nuint); - bitD.bitContainer = MEM_readLEST(bitD.ptr); - { - byte lastByte = ((byte*)srcBuffer)[srcSize - 1]; - bitD.bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0; - if (lastByte == 0) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); - } + case 7: + bitD.bitContainer += (nuint)((byte*)srcBuffer)[6] << sizeof(nuint) * 8 - 16; + goto case 6; + case 6: + bitD.bitContainer += (nuint)((byte*)srcBuffer)[5] << sizeof(nuint) * 8 - 24; + goto case 5; + case 5: + bitD.bitContainer += (nuint)((byte*)srcBuffer)[4] << sizeof(nuint) * 8 - 32; + goto case 4; + case 4: + bitD.bitContainer += (nuint)((byte*)srcBuffer)[3] << 24; + goto case 3; + case 3: + bitD.bitContainer += (nuint)((byte*)srcBuffer)[2] << 16; + goto case 2; + case 2: + bitD.bitContainer += (nuint)((byte*)srcBuffer)[1] << 8; + goto default; + default: + break; } - else + { - bitD.ptr = bitD.start; - bitD.bitContainer = *(byte*)bitD.start; - switch (srcSize) - { - case 7: - bitD.bitContainer += (nuint)((byte*)srcBuffer)[6] << sizeof(nuint) * 8 - 16; - goto case 6; - case 6: - bitD.bitContainer += (nuint)((byte*)srcBuffer)[5] << sizeof(nuint) * 8 - 24; - goto case 5; - case 5: - bitD.bitContainer += (nuint)((byte*)srcBuffer)[4] << sizeof(nuint) * 8 - 32; - goto case 4; - case 4: - bitD.bitContainer += (nuint)((byte*)srcBuffer)[3] << 24; - goto case 3; - case 3: - bitD.bitContainer += (nuint)((byte*)srcBuffer)[2] << 16; - goto case 2; - case 2: - bitD.bitContainer += (nuint)((byte*)srcBuffer)[1] << 8; - goto default; - default: - break; - } - - { - byte lastByte = ((byte*)srcBuffer)[srcSize - 1]; - bitD.bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0; - if (lastByte == 0) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); - } - - bitD.bitsConsumed += (uint)((nuint)sizeof(nuint) - srcSize) * 8; + byte lastByte = ((byte*)srcBuffer)[srcSize - 1]; + bitD.bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0; + if (lastByte == 0) + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); } - return srcSize; + bitD.bitsConsumed += (uint)((nuint)sizeof(nuint) - srcSize) * 8; } - /*! BIT_lookBits() : - * Provides next n bits from local register. - * local register is not modified. - * On 32-bits, maxNbBits==24. - * On 64-bits, maxNbBits==56. - * @return : value extracted */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint BIT_lookBits( - nuint bitD_bitContainer, - uint bitD_bitsConsumed, - uint nbBits - ) - { - return BIT_getMiddleBits( - bitD_bitContainer, - (uint)(sizeof(nuint) * 8) - bitD_bitsConsumed - nbBits, - nbBits - ); - } + return srcSize; + } - /*! BIT_lookBitsFast() : - * unsafe version; only works if nbBits >= 1 */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint BIT_lookBitsFast( - nuint bitD_bitContainer, - uint bitD_bitsConsumed, - uint nbBits - ) - { - uint regMask = (uint)(sizeof(nuint) * 8 - 1); - assert(nbBits >= 1); - return bitD_bitContainer - << (int)(bitD_bitsConsumed & regMask) - >> (int)(regMask + 1 - nbBits & regMask); - } + /*! BIT_lookBits() : + * Provides next n bits from local register. + * local register is not modified. + * On 32-bits, maxNbBits==24. + * On 64-bits, maxNbBits==56. + * @return : value extracted */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint BIT_lookBits( + nuint bitD_bitContainer, + uint bitD_bitsConsumed, + uint nbBits + ) + { + return BIT_getMiddleBits( + bitD_bitContainer, + (uint)(sizeof(nuint) * 8) - bitD_bitsConsumed - nbBits, + nbBits + ); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void BIT_skipBits(ref uint bitD_bitsConsumed, uint nbBits) - { - bitD_bitsConsumed += nbBits; - } + /*! BIT_lookBitsFast() : + * unsafe version; only works if nbBits >= 1 */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint BIT_lookBitsFast( + nuint bitD_bitContainer, + uint bitD_bitsConsumed, + uint nbBits + ) + { + uint regMask = (uint)(sizeof(nuint) * 8 - 1); + assert(nbBits >= 1); + return bitD_bitContainer + << (int)(bitD_bitsConsumed & regMask) + >> (int)(regMask + 1 - nbBits & regMask); + } - /*! BIT_readBits() : - * Read (consume) next n bits from local register and update. - * Pay attention to not read more than nbBits contained into local register. - * @return : extracted value. */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint BIT_readBits( - nuint bitD_bitContainer, - ref uint bitD_bitsConsumed, - uint nbBits - ) - { - nuint value = BIT_lookBits(bitD_bitContainer, bitD_bitsConsumed, nbBits); - BIT_skipBits(ref bitD_bitsConsumed, nbBits); - return value; - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void BIT_skipBits(ref uint bitD_bitsConsumed, uint nbBits) + { + bitD_bitsConsumed += nbBits; + } + + /*! BIT_readBits() : + * Read (consume) next n bits from local register and update. + * Pay attention to not read more than nbBits contained into local register. + * @return : extracted value. */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint BIT_readBits( + nuint bitD_bitContainer, + ref uint bitD_bitsConsumed, + uint nbBits + ) + { + nuint value = BIT_lookBits(bitD_bitContainer, bitD_bitsConsumed, nbBits); + BIT_skipBits(ref bitD_bitsConsumed, nbBits); + return value; + } + + /*! BIT_readBitsFast() : + * unsafe version; only works if nbBits >= 1 */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint BIT_readBitsFast( + nuint bitD_bitContainer, + ref uint bitD_bitsConsumed, + uint nbBits + ) + { + nuint value = BIT_lookBitsFast(bitD_bitContainer, bitD_bitsConsumed, nbBits); + assert(nbBits >= 1); + BIT_skipBits(ref bitD_bitsConsumed, nbBits); + return value; + } + + /*! BIT_reloadDStreamFast() : + * Similar to BIT_reloadDStream(), but with two differences: + * 1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold! + * 2. Returns BIT_DStream_overflow when bitD->ptr < bitD->limitPtr, at this + * point you must use BIT_reloadDStream() to reload. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static BIT_DStream_status BIT_reloadDStreamFast( + ref nuint bitD_bitContainer, + ref uint bitD_bitsConsumed, + ref sbyte* bitD_ptr, + sbyte* bitD_start, + sbyte* bitD_limitPtr + ) + { + if (bitD_ptr < bitD_limitPtr) + return BIT_DStream_status.BIT_DStream_overflow; + return BIT_reloadDStream_internal( + ref bitD_bitContainer, + ref bitD_bitsConsumed, + ref bitD_ptr, + bitD_start + ); + } - /*! BIT_readBitsFast() : - * unsafe version; only works if nbBits >= 1 */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint BIT_readBitsFast( - nuint bitD_bitContainer, - ref uint bitD_bitsConsumed, - uint nbBits - ) + /*! BIT_reloadDStream() : + * Refill `bitD` from buffer previously set in BIT_initDStream() . + * This function is safe, it guarantees it will not never beyond src buffer. + * @return : status of `BIT_DStream_t` internal register. + * when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static BIT_DStream_status BIT_reloadDStream( + ref nuint bitD_bitContainer, + ref uint bitD_bitsConsumed, + ref sbyte* bitD_ptr, + sbyte* bitD_start, + sbyte* bitD_limitPtr + ) + { + if (bitD_bitsConsumed > (uint)(sizeof(nuint) * 8)) { - nuint value = BIT_lookBitsFast(bitD_bitContainer, bitD_bitsConsumed, nbBits); - assert(nbBits >= 1); - BIT_skipBits(ref bitD_bitsConsumed, nbBits); - return value; + bitD_ptr = (sbyte*)&static_zeroFilled[0]; + return BIT_DStream_status.BIT_DStream_overflow; } - /*! BIT_reloadDStreamFast() : - * Similar to BIT_reloadDStream(), but with two differences: - * 1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold! - * 2. Returns BIT_DStream_overflow when bitD->ptr < bitD->limitPtr, at this - * point you must use BIT_reloadDStream() to reload. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static BIT_DStream_status BIT_reloadDStreamFast( - ref nuint bitD_bitContainer, - ref uint bitD_bitsConsumed, - ref sbyte* bitD_ptr, - sbyte* bitD_start, - sbyte* bitD_limitPtr - ) + assert(bitD_ptr >= bitD_start); + if (bitD_ptr >= bitD_limitPtr) { - if (bitD_ptr < bitD_limitPtr) - return BIT_DStream_status.BIT_DStream_overflow; return BIT_reloadDStream_internal( ref bitD_bitContainer, ref bitD_bitsConsumed, @@ -663,94 +694,62 @@ private static BIT_DStream_status BIT_reloadDStreamFast( ); } - /*! BIT_reloadDStream() : - * Refill `bitD` from buffer previously set in BIT_initDStream() . - * This function is safe, it guarantees it will not never beyond src buffer. - * @return : status of `BIT_DStream_t` internal register. - * when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static BIT_DStream_status BIT_reloadDStream( - ref nuint bitD_bitContainer, - ref uint bitD_bitsConsumed, - ref sbyte* bitD_ptr, - sbyte* bitD_start, - sbyte* bitD_limitPtr - ) + if (bitD_ptr == bitD_start) { - if (bitD_bitsConsumed > (uint)(sizeof(nuint) * 8)) - { - bitD_ptr = (sbyte*)&static_zeroFilled[0]; - return BIT_DStream_status.BIT_DStream_overflow; - } - - assert(bitD_ptr >= bitD_start); - if (bitD_ptr >= bitD_limitPtr) - { - return BIT_reloadDStream_internal( - ref bitD_bitContainer, - ref bitD_bitsConsumed, - ref bitD_ptr, - bitD_start - ); - } - - if (bitD_ptr == bitD_start) - { - if (bitD_bitsConsumed < (uint)(sizeof(nuint) * 8)) - return BIT_DStream_status.BIT_DStream_endOfBuffer; - return BIT_DStream_status.BIT_DStream_completed; - } + if (bitD_bitsConsumed < (uint)(sizeof(nuint) * 8)) + return BIT_DStream_status.BIT_DStream_endOfBuffer; + return BIT_DStream_status.BIT_DStream_completed; + } + { + uint nbBytes = bitD_bitsConsumed >> 3; + BIT_DStream_status result = BIT_DStream_status.BIT_DStream_unfinished; + if (bitD_ptr - nbBytes < bitD_start) { - uint nbBytes = bitD_bitsConsumed >> 3; - BIT_DStream_status result = BIT_DStream_status.BIT_DStream_unfinished; - if (bitD_ptr - nbBytes < bitD_start) - { - nbBytes = (uint)(bitD_ptr - bitD_start); - result = BIT_DStream_status.BIT_DStream_endOfBuffer; - } - - bitD_ptr -= nbBytes; - bitD_bitsConsumed -= nbBytes * 8; - bitD_bitContainer = MEM_readLEST(bitD_ptr); - return result; + nbBytes = (uint)(bitD_ptr - bitD_start); + result = BIT_DStream_status.BIT_DStream_endOfBuffer; } - } - /*! BIT_reloadDStream_internal() : - * Simple variant of BIT_reloadDStream(), with two conditions: - * 1. bitstream is valid : bitsConsumed <= sizeof(bitD->bitContainer)*8 - * 2. look window is valid after shifted down : bitD->ptr >= bitD->start - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static BIT_DStream_status BIT_reloadDStream_internal( - ref nuint bitD_bitContainer, - ref uint bitD_bitsConsumed, - ref sbyte* bitD_ptr, - sbyte* bitD_start - ) - { - assert(bitD_bitsConsumed <= (uint)(sizeof(nuint) * 8)); - bitD_ptr -= bitD_bitsConsumed >> 3; - assert(bitD_ptr >= bitD_start); - bitD_bitsConsumed &= 7; + bitD_ptr -= nbBytes; + bitD_bitsConsumed -= nbBytes * 8; bitD_bitContainer = MEM_readLEST(bitD_ptr); - return BIT_DStream_status.BIT_DStream_unfinished; + return result; } + } - /*! BIT_endOfDStream() : - * @return : 1 if DStream has _exactly_ reached its end (all bits consumed). - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint BIT_endOfDStream( - uint DStream_bitsConsumed, - sbyte* DStream_ptr, - sbyte* DStream_start - ) - { - return DStream_ptr == DStream_start && DStream_bitsConsumed == (uint)(sizeof(nuint) * 8) - ? 1U - : 0U; - } + /*! BIT_reloadDStream_internal() : + * Simple variant of BIT_reloadDStream(), with two conditions: + * 1. bitstream is valid : bitsConsumed <= sizeof(bitD->bitContainer)*8 + * 2. look window is valid after shifted down : bitD->ptr >= bitD->start + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static BIT_DStream_status BIT_reloadDStream_internal( + ref nuint bitD_bitContainer, + ref uint bitD_bitsConsumed, + ref sbyte* bitD_ptr, + sbyte* bitD_start + ) + { + assert(bitD_bitsConsumed <= (uint)(sizeof(nuint) * 8)); + bitD_ptr -= bitD_bitsConsumed >> 3; + assert(bitD_ptr >= bitD_start); + bitD_bitsConsumed &= 7; + bitD_bitContainer = MEM_readLEST(bitD_ptr); + return BIT_DStream_status.BIT_DStream_unfinished; + } + + /*! BIT_endOfDStream() : + * @return : 1 if DStream has _exactly_ reached its end (all bits consumed). + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint BIT_endOfDStream( + uint DStream_bitsConsumed, + sbyte* DStream_ptr, + sbyte* DStream_start + ) + { + return DStream_ptr == DStream_start && DStream_bitsConsumed == (uint)(sizeof(nuint) * 8) + ? 1U + : 0U; } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/BlockSummary.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/BlockSummary.cs index 5905c452d..46f1d7061 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/BlockSummary.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/BlockSummary.cs @@ -1,9 +1,8 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public struct BlockSummary { - public struct BlockSummary - { - public nuint nbSequences; - public nuint blockSize; - public nuint litSize; - } -} + public nuint nbSequences; + public nuint blockSize; + public nuint litSize; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_best_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_best_s.cs index 5c26ba0a2..1c56883ab 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_best_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_best_s.cs @@ -1,21 +1,20 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/** + * COVER_best_t is used for two purposes: + * 1. Synchronizing threads. + * 2. Saving the best parameters and dictionary. + * + * All of the methods except COVER_best_init() are thread safe if zstd is + * compiled with multithreaded support. + */ +public unsafe struct COVER_best_s { - /** - * COVER_best_t is used for two purposes: - * 1. Synchronizing threads. - * 2. Saving the best parameters and dictionary. - * - * All of the methods except COVER_best_init() are thread safe if zstd is - * compiled with multithreaded support. - */ - public unsafe struct COVER_best_s - { - public void* mutex; - public void* cond; - public nuint liveJobs; - public void* dict; - public nuint dictSize; - public ZDICT_cover_params_t parameters; - public nuint compressedSize; - } -} + public void* mutex; + public void* cond; + public nuint liveJobs; + public void* dict; + public nuint dictSize; + public ZDICT_cover_params_t parameters; + public nuint compressedSize; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_ctx_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_ctx_t.cs index cddd0f7b7..839d37351 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_ctx_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_ctx_t.cs @@ -1,20 +1,19 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/*-************************************* + * Context + ***************************************/ +public unsafe struct COVER_ctx_t { - /*-************************************* - * Context - ***************************************/ - public unsafe struct COVER_ctx_t - { - public byte* samples; - public nuint* offsets; - public nuint* samplesSizes; - public nuint nbSamples; - public nuint nbTrainSamples; - public nuint nbTestSamples; - public uint* suffix; - public nuint suffixSize; - public uint* freqs; - public uint* dmerAt; - public uint d; - } -} + public byte* samples; + public nuint* offsets; + public nuint* samplesSizes; + public nuint nbSamples; + public nuint nbTrainSamples; + public nuint nbTestSamples; + public uint* suffix; + public nuint suffixSize; + public uint* freqs; + public uint* dmerAt; + public uint d; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_dictSelection.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_dictSelection.cs index c077af0c0..f88d1e674 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_dictSelection.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_dictSelection.cs @@ -1,12 +1,11 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/** + * Struct used for the dictionary selection function. + */ +public unsafe struct COVER_dictSelection { - /** - * Struct used for the dictionary selection function. - */ - public unsafe struct COVER_dictSelection - { - public byte* dictContent; - public nuint dictSize; - public nuint totalCompressedSize; - } + public byte* dictContent; + public nuint dictSize; + public nuint totalCompressedSize; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_epoch_info_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_epoch_info_t.cs index 3d4210616..dbb298909 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_epoch_info_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_epoch_info_t.cs @@ -1,11 +1,10 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/** + *Number of epochs and size of each epoch. + */ +public struct COVER_epoch_info_t { - /** - *Number of epochs and size of each epoch. - */ - public struct COVER_epoch_info_t - { - public uint num; - public uint size; - } + public uint num; + public uint size; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_map_pair_t_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_map_pair_t_s.cs index 121a8dde7..502f33b0c 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_map_pair_t_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_map_pair_t_s.cs @@ -1,8 +1,7 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public struct COVER_map_pair_t_s { - public struct COVER_map_pair_t_s - { - public uint key; - public uint value; - } + public uint key; + public uint value; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_map_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_map_s.cs index 1f872b013..7c7d29d73 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_map_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_map_s.cs @@ -1,10 +1,9 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct COVER_map_s { - public unsafe struct COVER_map_s - { - public COVER_map_pair_t_s* data; - public uint sizeLog; - public uint size; - public uint sizeMask; - } -} + public COVER_map_pair_t_s* data; + public uint sizeLog; + public uint size; + public uint sizeMask; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_segment_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_segment_t.cs index c83a9f613..bc6be3ec4 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_segment_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_segment_t.cs @@ -1,12 +1,11 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/** + * A segment is a range in the source as well as the score of the segment. + */ +public struct COVER_segment_t { - /** - * A segment is a range in the source as well as the score of the segment. - */ - public struct COVER_segment_t - { - public uint begin; - public uint end; - public uint score; - } -} + public uint begin; + public uint end; + public uint score; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_tryParameters_data_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_tryParameters_data_s.cs index 91208d00c..789ca9dc6 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_tryParameters_data_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_tryParameters_data_s.cs @@ -1,13 +1,12 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/** + * Parameters for COVER_tryParameters(). + */ +public unsafe struct COVER_tryParameters_data_s { - /** - * Parameters for COVER_tryParameters(). - */ - public unsafe struct COVER_tryParameters_data_s - { - public COVER_ctx_t* ctx; - public COVER_best_s* best; - public nuint dictBufferCapacity; - public ZDICT_cover_params_t parameters; - } + public COVER_ctx_t* ctx; + public COVER_best_s* best; + public nuint dictBufferCapacity; + public ZDICT_cover_params_t parameters; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Clevels.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Clevels.cs index 35ca9cb85..9b3e78394 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Clevels.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Clevels.cs @@ -1,850 +1,849 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods - { - private static readonly ZSTD_compressionParameters[][] ZSTD_defaultCParameters = - new ZSTD_compressionParameters[4][] + private static readonly ZSTD_compressionParameters[][] ZSTD_defaultCParameters = + new ZSTD_compressionParameters[4][] + { + new ZSTD_compressionParameters[23] { - new ZSTD_compressionParameters[23] - { - new ZSTD_compressionParameters( - windowLog: 19, - chainLog: 12, - hashLog: 13, - searchLog: 1, - minMatch: 6, - targetLength: 1, - strategy: ZSTD_strategy.ZSTD_fast - ), - new ZSTD_compressionParameters( - windowLog: 19, - chainLog: 13, - hashLog: 14, - searchLog: 1, - minMatch: 7, - targetLength: 0, - strategy: ZSTD_strategy.ZSTD_fast - ), - new ZSTD_compressionParameters( - windowLog: 20, - chainLog: 15, - hashLog: 16, - searchLog: 1, - minMatch: 6, - targetLength: 0, - strategy: ZSTD_strategy.ZSTD_fast - ), - new ZSTD_compressionParameters( - windowLog: 21, - chainLog: 16, - hashLog: 17, - searchLog: 1, - minMatch: 5, - targetLength: 0, - strategy: ZSTD_strategy.ZSTD_dfast - ), - new ZSTD_compressionParameters( - windowLog: 21, - chainLog: 18, - hashLog: 18, - searchLog: 1, - minMatch: 5, - targetLength: 0, - strategy: ZSTD_strategy.ZSTD_dfast - ), - new ZSTD_compressionParameters( - windowLog: 21, - chainLog: 18, - hashLog: 19, - searchLog: 3, - minMatch: 5, - targetLength: 2, - strategy: ZSTD_strategy.ZSTD_greedy - ), - new ZSTD_compressionParameters( - windowLog: 21, - chainLog: 18, - hashLog: 19, - searchLog: 3, - minMatch: 5, - targetLength: 4, - strategy: ZSTD_strategy.ZSTD_lazy - ), - new ZSTD_compressionParameters( - windowLog: 21, - chainLog: 19, - hashLog: 20, - searchLog: 4, - minMatch: 5, - targetLength: 8, - strategy: ZSTD_strategy.ZSTD_lazy - ), - new ZSTD_compressionParameters( - windowLog: 21, - chainLog: 19, - hashLog: 20, - searchLog: 4, - minMatch: 5, - targetLength: 16, - strategy: ZSTD_strategy.ZSTD_lazy2 - ), - new ZSTD_compressionParameters( - windowLog: 22, - chainLog: 20, - hashLog: 21, - searchLog: 4, - minMatch: 5, - targetLength: 16, - strategy: ZSTD_strategy.ZSTD_lazy2 - ), - new ZSTD_compressionParameters( - windowLog: 22, - chainLog: 21, - hashLog: 22, - searchLog: 5, - minMatch: 5, - targetLength: 16, - strategy: ZSTD_strategy.ZSTD_lazy2 - ), - new ZSTD_compressionParameters( - windowLog: 22, - chainLog: 21, - hashLog: 22, - searchLog: 6, - minMatch: 5, - targetLength: 16, - strategy: ZSTD_strategy.ZSTD_lazy2 - ), - new ZSTD_compressionParameters( - windowLog: 22, - chainLog: 22, - hashLog: 23, - searchLog: 6, - minMatch: 5, - targetLength: 32, - strategy: ZSTD_strategy.ZSTD_lazy2 - ), - new ZSTD_compressionParameters( - windowLog: 22, - chainLog: 22, - hashLog: 22, - searchLog: 4, - minMatch: 5, - targetLength: 32, - strategy: ZSTD_strategy.ZSTD_btlazy2 - ), - new ZSTD_compressionParameters( - windowLog: 22, - chainLog: 22, - hashLog: 23, - searchLog: 5, - minMatch: 5, - targetLength: 32, - strategy: ZSTD_strategy.ZSTD_btlazy2 - ), - new ZSTD_compressionParameters( - windowLog: 22, - chainLog: 23, - hashLog: 23, - searchLog: 6, - minMatch: 5, - targetLength: 32, - strategy: ZSTD_strategy.ZSTD_btlazy2 - ), - new ZSTD_compressionParameters( - windowLog: 22, - chainLog: 22, - hashLog: 22, - searchLog: 5, - minMatch: 5, - targetLength: 48, - strategy: ZSTD_strategy.ZSTD_btopt - ), - new ZSTD_compressionParameters( - windowLog: 23, - chainLog: 23, - hashLog: 22, - searchLog: 5, - minMatch: 4, - targetLength: 64, - strategy: ZSTD_strategy.ZSTD_btopt - ), - new ZSTD_compressionParameters( - windowLog: 23, - chainLog: 23, - hashLog: 22, - searchLog: 6, - minMatch: 3, - targetLength: 64, - strategy: ZSTD_strategy.ZSTD_btultra - ), - new ZSTD_compressionParameters( - windowLog: 23, - chainLog: 24, - hashLog: 22, - searchLog: 7, - minMatch: 3, - targetLength: 256, - strategy: ZSTD_strategy.ZSTD_btultra2 - ), - new ZSTD_compressionParameters( - windowLog: 25, - chainLog: 25, - hashLog: 23, - searchLog: 7, - minMatch: 3, - targetLength: 256, - strategy: ZSTD_strategy.ZSTD_btultra2 - ), - new ZSTD_compressionParameters( - windowLog: 26, - chainLog: 26, - hashLog: 24, - searchLog: 7, - minMatch: 3, - targetLength: 512, - strategy: ZSTD_strategy.ZSTD_btultra2 - ), - new ZSTD_compressionParameters( - windowLog: 27, - chainLog: 27, - hashLog: 25, - searchLog: 9, - minMatch: 3, - targetLength: 999, - strategy: ZSTD_strategy.ZSTD_btultra2 - ), - }, - new ZSTD_compressionParameters[23] - { - new ZSTD_compressionParameters( - windowLog: 18, - chainLog: 12, - hashLog: 13, - searchLog: 1, - minMatch: 5, - targetLength: 1, - strategy: ZSTD_strategy.ZSTD_fast - ), - new ZSTD_compressionParameters( - windowLog: 18, - chainLog: 13, - hashLog: 14, - searchLog: 1, - minMatch: 6, - targetLength: 0, - strategy: ZSTD_strategy.ZSTD_fast - ), - new ZSTD_compressionParameters( - windowLog: 18, - chainLog: 14, - hashLog: 14, - searchLog: 1, - minMatch: 5, - targetLength: 0, - strategy: ZSTD_strategy.ZSTD_dfast - ), - new ZSTD_compressionParameters( - windowLog: 18, - chainLog: 16, - hashLog: 16, - searchLog: 1, - minMatch: 4, - targetLength: 0, - strategy: ZSTD_strategy.ZSTD_dfast - ), - new ZSTD_compressionParameters( - windowLog: 18, - chainLog: 16, - hashLog: 17, - searchLog: 3, - minMatch: 5, - targetLength: 2, - strategy: ZSTD_strategy.ZSTD_greedy - ), - new ZSTD_compressionParameters( - windowLog: 18, - chainLog: 17, - hashLog: 18, - searchLog: 5, - minMatch: 5, - targetLength: 2, - strategy: ZSTD_strategy.ZSTD_greedy - ), - new ZSTD_compressionParameters( - windowLog: 18, - chainLog: 18, - hashLog: 19, - searchLog: 3, - minMatch: 5, - targetLength: 4, - strategy: ZSTD_strategy.ZSTD_lazy - ), - new ZSTD_compressionParameters( - windowLog: 18, - chainLog: 18, - hashLog: 19, - searchLog: 4, - minMatch: 4, - targetLength: 4, - strategy: ZSTD_strategy.ZSTD_lazy - ), - new ZSTD_compressionParameters( - windowLog: 18, - chainLog: 18, - hashLog: 19, - searchLog: 4, - minMatch: 4, - targetLength: 8, - strategy: ZSTD_strategy.ZSTD_lazy2 - ), - new ZSTD_compressionParameters( - windowLog: 18, - chainLog: 18, - hashLog: 19, - searchLog: 5, - minMatch: 4, - targetLength: 8, - strategy: ZSTD_strategy.ZSTD_lazy2 - ), - new ZSTD_compressionParameters( - windowLog: 18, - chainLog: 18, - hashLog: 19, - searchLog: 6, - minMatch: 4, - targetLength: 8, - strategy: ZSTD_strategy.ZSTD_lazy2 - ), - new ZSTD_compressionParameters( - windowLog: 18, - chainLog: 18, - hashLog: 19, - searchLog: 5, - minMatch: 4, - targetLength: 12, - strategy: ZSTD_strategy.ZSTD_btlazy2 - ), - new ZSTD_compressionParameters( - windowLog: 18, - chainLog: 19, - hashLog: 19, - searchLog: 7, - minMatch: 4, - targetLength: 12, - strategy: ZSTD_strategy.ZSTD_btlazy2 - ), - new ZSTD_compressionParameters( - windowLog: 18, - chainLog: 18, - hashLog: 19, - searchLog: 4, - minMatch: 4, - targetLength: 16, - strategy: ZSTD_strategy.ZSTD_btopt - ), - new ZSTD_compressionParameters( - windowLog: 18, - chainLog: 18, - hashLog: 19, - searchLog: 4, - minMatch: 3, - targetLength: 32, - strategy: ZSTD_strategy.ZSTD_btopt - ), - new ZSTD_compressionParameters( - windowLog: 18, - chainLog: 18, - hashLog: 19, - searchLog: 6, - minMatch: 3, - targetLength: 128, - strategy: ZSTD_strategy.ZSTD_btopt - ), - new ZSTD_compressionParameters( - windowLog: 18, - chainLog: 19, - hashLog: 19, - searchLog: 6, - minMatch: 3, - targetLength: 128, - strategy: ZSTD_strategy.ZSTD_btultra - ), - new ZSTD_compressionParameters( - windowLog: 18, - chainLog: 19, - hashLog: 19, - searchLog: 8, - minMatch: 3, - targetLength: 256, - strategy: ZSTD_strategy.ZSTD_btultra - ), - new ZSTD_compressionParameters( - windowLog: 18, - chainLog: 19, - hashLog: 19, - searchLog: 6, - minMatch: 3, - targetLength: 128, - strategy: ZSTD_strategy.ZSTD_btultra2 - ), - new ZSTD_compressionParameters( - windowLog: 18, - chainLog: 19, - hashLog: 19, - searchLog: 8, - minMatch: 3, - targetLength: 256, - strategy: ZSTD_strategy.ZSTD_btultra2 - ), - new ZSTD_compressionParameters( - windowLog: 18, - chainLog: 19, - hashLog: 19, - searchLog: 10, - minMatch: 3, - targetLength: 512, - strategy: ZSTD_strategy.ZSTD_btultra2 - ), - new ZSTD_compressionParameters( - windowLog: 18, - chainLog: 19, - hashLog: 19, - searchLog: 12, - minMatch: 3, - targetLength: 512, - strategy: ZSTD_strategy.ZSTD_btultra2 - ), - new ZSTD_compressionParameters( - windowLog: 18, - chainLog: 19, - hashLog: 19, - searchLog: 13, - minMatch: 3, - targetLength: 999, - strategy: ZSTD_strategy.ZSTD_btultra2 - ), - }, - new ZSTD_compressionParameters[23] - { - new ZSTD_compressionParameters( - windowLog: 17, - chainLog: 12, - hashLog: 12, - searchLog: 1, - minMatch: 5, - targetLength: 1, - strategy: ZSTD_strategy.ZSTD_fast - ), - new ZSTD_compressionParameters( - windowLog: 17, - chainLog: 12, - hashLog: 13, - searchLog: 1, - minMatch: 6, - targetLength: 0, - strategy: ZSTD_strategy.ZSTD_fast - ), - new ZSTD_compressionParameters( - windowLog: 17, - chainLog: 13, - hashLog: 15, - searchLog: 1, - minMatch: 5, - targetLength: 0, - strategy: ZSTD_strategy.ZSTD_fast - ), - new ZSTD_compressionParameters( - windowLog: 17, - chainLog: 15, - hashLog: 16, - searchLog: 2, - minMatch: 5, - targetLength: 0, - strategy: ZSTD_strategy.ZSTD_dfast - ), - new ZSTD_compressionParameters( - windowLog: 17, - chainLog: 17, - hashLog: 17, - searchLog: 2, - minMatch: 4, - targetLength: 0, - strategy: ZSTD_strategy.ZSTD_dfast - ), - new ZSTD_compressionParameters( - windowLog: 17, - chainLog: 16, - hashLog: 17, - searchLog: 3, - minMatch: 4, - targetLength: 2, - strategy: ZSTD_strategy.ZSTD_greedy - ), - new ZSTD_compressionParameters( - windowLog: 17, - chainLog: 16, - hashLog: 17, - searchLog: 3, - minMatch: 4, - targetLength: 4, - strategy: ZSTD_strategy.ZSTD_lazy - ), - new ZSTD_compressionParameters( - windowLog: 17, - chainLog: 16, - hashLog: 17, - searchLog: 3, - minMatch: 4, - targetLength: 8, - strategy: ZSTD_strategy.ZSTD_lazy2 - ), - new ZSTD_compressionParameters( - windowLog: 17, - chainLog: 16, - hashLog: 17, - searchLog: 4, - minMatch: 4, - targetLength: 8, - strategy: ZSTD_strategy.ZSTD_lazy2 - ), - new ZSTD_compressionParameters( - windowLog: 17, - chainLog: 16, - hashLog: 17, - searchLog: 5, - minMatch: 4, - targetLength: 8, - strategy: ZSTD_strategy.ZSTD_lazy2 - ), - new ZSTD_compressionParameters( - windowLog: 17, - chainLog: 16, - hashLog: 17, - searchLog: 6, - minMatch: 4, - targetLength: 8, - strategy: ZSTD_strategy.ZSTD_lazy2 - ), - new ZSTD_compressionParameters( - windowLog: 17, - chainLog: 17, - hashLog: 17, - searchLog: 5, - minMatch: 4, - targetLength: 8, - strategy: ZSTD_strategy.ZSTD_btlazy2 - ), - new ZSTD_compressionParameters( - windowLog: 17, - chainLog: 18, - hashLog: 17, - searchLog: 7, - minMatch: 4, - targetLength: 12, - strategy: ZSTD_strategy.ZSTD_btlazy2 - ), - new ZSTD_compressionParameters( - windowLog: 17, - chainLog: 18, - hashLog: 17, - searchLog: 3, - minMatch: 4, - targetLength: 12, - strategy: ZSTD_strategy.ZSTD_btopt - ), - new ZSTD_compressionParameters( - windowLog: 17, - chainLog: 18, - hashLog: 17, - searchLog: 4, - minMatch: 3, - targetLength: 32, - strategy: ZSTD_strategy.ZSTD_btopt - ), - new ZSTD_compressionParameters( - windowLog: 17, - chainLog: 18, - hashLog: 17, - searchLog: 6, - minMatch: 3, - targetLength: 256, - strategy: ZSTD_strategy.ZSTD_btopt - ), - new ZSTD_compressionParameters( - windowLog: 17, - chainLog: 18, - hashLog: 17, - searchLog: 6, - minMatch: 3, - targetLength: 128, - strategy: ZSTD_strategy.ZSTD_btultra - ), - new ZSTD_compressionParameters( - windowLog: 17, - chainLog: 18, - hashLog: 17, - searchLog: 8, - minMatch: 3, - targetLength: 256, - strategy: ZSTD_strategy.ZSTD_btultra - ), - new ZSTD_compressionParameters( - windowLog: 17, - chainLog: 18, - hashLog: 17, - searchLog: 10, - minMatch: 3, - targetLength: 512, - strategy: ZSTD_strategy.ZSTD_btultra - ), - new ZSTD_compressionParameters( - windowLog: 17, - chainLog: 18, - hashLog: 17, - searchLog: 5, - minMatch: 3, - targetLength: 256, - strategy: ZSTD_strategy.ZSTD_btultra2 - ), - new ZSTD_compressionParameters( - windowLog: 17, - chainLog: 18, - hashLog: 17, - searchLog: 7, - minMatch: 3, - targetLength: 512, - strategy: ZSTD_strategy.ZSTD_btultra2 - ), - new ZSTD_compressionParameters( - windowLog: 17, - chainLog: 18, - hashLog: 17, - searchLog: 9, - minMatch: 3, - targetLength: 512, - strategy: ZSTD_strategy.ZSTD_btultra2 - ), - new ZSTD_compressionParameters( - windowLog: 17, - chainLog: 18, - hashLog: 17, - searchLog: 11, - minMatch: 3, - targetLength: 999, - strategy: ZSTD_strategy.ZSTD_btultra2 - ), - }, - new ZSTD_compressionParameters[23] - { - new ZSTD_compressionParameters( - windowLog: 14, - chainLog: 12, - hashLog: 13, - searchLog: 1, - minMatch: 5, - targetLength: 1, - strategy: ZSTD_strategy.ZSTD_fast - ), - new ZSTD_compressionParameters( - windowLog: 14, - chainLog: 14, - hashLog: 15, - searchLog: 1, - minMatch: 5, - targetLength: 0, - strategy: ZSTD_strategy.ZSTD_fast - ), - new ZSTD_compressionParameters( - windowLog: 14, - chainLog: 14, - hashLog: 15, - searchLog: 1, - minMatch: 4, - targetLength: 0, - strategy: ZSTD_strategy.ZSTD_fast - ), - new ZSTD_compressionParameters( - windowLog: 14, - chainLog: 14, - hashLog: 15, - searchLog: 2, - minMatch: 4, - targetLength: 0, - strategy: ZSTD_strategy.ZSTD_dfast - ), - new ZSTD_compressionParameters( - windowLog: 14, - chainLog: 14, - hashLog: 14, - searchLog: 4, - minMatch: 4, - targetLength: 2, - strategy: ZSTD_strategy.ZSTD_greedy - ), - new ZSTD_compressionParameters( - windowLog: 14, - chainLog: 14, - hashLog: 14, - searchLog: 3, - minMatch: 4, - targetLength: 4, - strategy: ZSTD_strategy.ZSTD_lazy - ), - new ZSTD_compressionParameters( - windowLog: 14, - chainLog: 14, - hashLog: 14, - searchLog: 4, - minMatch: 4, - targetLength: 8, - strategy: ZSTD_strategy.ZSTD_lazy2 - ), - new ZSTD_compressionParameters( - windowLog: 14, - chainLog: 14, - hashLog: 14, - searchLog: 6, - minMatch: 4, - targetLength: 8, - strategy: ZSTD_strategy.ZSTD_lazy2 - ), - new ZSTD_compressionParameters( - windowLog: 14, - chainLog: 14, - hashLog: 14, - searchLog: 8, - minMatch: 4, - targetLength: 8, - strategy: ZSTD_strategy.ZSTD_lazy2 - ), - new ZSTD_compressionParameters( - windowLog: 14, - chainLog: 15, - hashLog: 14, - searchLog: 5, - minMatch: 4, - targetLength: 8, - strategy: ZSTD_strategy.ZSTD_btlazy2 - ), - new ZSTD_compressionParameters( - windowLog: 14, - chainLog: 15, - hashLog: 14, - searchLog: 9, - minMatch: 4, - targetLength: 8, - strategy: ZSTD_strategy.ZSTD_btlazy2 - ), - new ZSTD_compressionParameters( - windowLog: 14, - chainLog: 15, - hashLog: 14, - searchLog: 3, - minMatch: 4, - targetLength: 12, - strategy: ZSTD_strategy.ZSTD_btopt - ), - new ZSTD_compressionParameters( - windowLog: 14, - chainLog: 15, - hashLog: 14, - searchLog: 4, - minMatch: 3, - targetLength: 24, - strategy: ZSTD_strategy.ZSTD_btopt - ), - new ZSTD_compressionParameters( - windowLog: 14, - chainLog: 15, - hashLog: 14, - searchLog: 5, - minMatch: 3, - targetLength: 32, - strategy: ZSTD_strategy.ZSTD_btultra - ), - new ZSTD_compressionParameters( - windowLog: 14, - chainLog: 15, - hashLog: 15, - searchLog: 6, - minMatch: 3, - targetLength: 64, - strategy: ZSTD_strategy.ZSTD_btultra - ), - new ZSTD_compressionParameters( - windowLog: 14, - chainLog: 15, - hashLog: 15, - searchLog: 7, - minMatch: 3, - targetLength: 256, - strategy: ZSTD_strategy.ZSTD_btultra - ), - new ZSTD_compressionParameters( - windowLog: 14, - chainLog: 15, - hashLog: 15, - searchLog: 5, - minMatch: 3, - targetLength: 48, - strategy: ZSTD_strategy.ZSTD_btultra2 - ), - new ZSTD_compressionParameters( - windowLog: 14, - chainLog: 15, - hashLog: 15, - searchLog: 6, - minMatch: 3, - targetLength: 128, - strategy: ZSTD_strategy.ZSTD_btultra2 - ), - new ZSTD_compressionParameters( - windowLog: 14, - chainLog: 15, - hashLog: 15, - searchLog: 7, - minMatch: 3, - targetLength: 256, - strategy: ZSTD_strategy.ZSTD_btultra2 - ), - new ZSTD_compressionParameters( - windowLog: 14, - chainLog: 15, - hashLog: 15, - searchLog: 8, - minMatch: 3, - targetLength: 256, - strategy: ZSTD_strategy.ZSTD_btultra2 - ), - new ZSTD_compressionParameters( - windowLog: 14, - chainLog: 15, - hashLog: 15, - searchLog: 8, - minMatch: 3, - targetLength: 512, - strategy: ZSTD_strategy.ZSTD_btultra2 - ), - new ZSTD_compressionParameters( - windowLog: 14, - chainLog: 15, - hashLog: 15, - searchLog: 9, - minMatch: 3, - targetLength: 512, - strategy: ZSTD_strategy.ZSTD_btultra2 - ), - new ZSTD_compressionParameters( - windowLog: 14, - chainLog: 15, - hashLog: 15, - searchLog: 10, - minMatch: 3, - targetLength: 999, - strategy: ZSTD_strategy.ZSTD_btultra2 - ), - }, - }; - } + new ZSTD_compressionParameters( + windowLog: 19, + chainLog: 12, + hashLog: 13, + searchLog: 1, + minMatch: 6, + targetLength: 1, + strategy: ZSTD_strategy.ZSTD_fast + ), + new ZSTD_compressionParameters( + windowLog: 19, + chainLog: 13, + hashLog: 14, + searchLog: 1, + minMatch: 7, + targetLength: 0, + strategy: ZSTD_strategy.ZSTD_fast + ), + new ZSTD_compressionParameters( + windowLog: 20, + chainLog: 15, + hashLog: 16, + searchLog: 1, + minMatch: 6, + targetLength: 0, + strategy: ZSTD_strategy.ZSTD_fast + ), + new ZSTD_compressionParameters( + windowLog: 21, + chainLog: 16, + hashLog: 17, + searchLog: 1, + minMatch: 5, + targetLength: 0, + strategy: ZSTD_strategy.ZSTD_dfast + ), + new ZSTD_compressionParameters( + windowLog: 21, + chainLog: 18, + hashLog: 18, + searchLog: 1, + minMatch: 5, + targetLength: 0, + strategy: ZSTD_strategy.ZSTD_dfast + ), + new ZSTD_compressionParameters( + windowLog: 21, + chainLog: 18, + hashLog: 19, + searchLog: 3, + minMatch: 5, + targetLength: 2, + strategy: ZSTD_strategy.ZSTD_greedy + ), + new ZSTD_compressionParameters( + windowLog: 21, + chainLog: 18, + hashLog: 19, + searchLog: 3, + minMatch: 5, + targetLength: 4, + strategy: ZSTD_strategy.ZSTD_lazy + ), + new ZSTD_compressionParameters( + windowLog: 21, + chainLog: 19, + hashLog: 20, + searchLog: 4, + minMatch: 5, + targetLength: 8, + strategy: ZSTD_strategy.ZSTD_lazy + ), + new ZSTD_compressionParameters( + windowLog: 21, + chainLog: 19, + hashLog: 20, + searchLog: 4, + minMatch: 5, + targetLength: 16, + strategy: ZSTD_strategy.ZSTD_lazy2 + ), + new ZSTD_compressionParameters( + windowLog: 22, + chainLog: 20, + hashLog: 21, + searchLog: 4, + minMatch: 5, + targetLength: 16, + strategy: ZSTD_strategy.ZSTD_lazy2 + ), + new ZSTD_compressionParameters( + windowLog: 22, + chainLog: 21, + hashLog: 22, + searchLog: 5, + minMatch: 5, + targetLength: 16, + strategy: ZSTD_strategy.ZSTD_lazy2 + ), + new ZSTD_compressionParameters( + windowLog: 22, + chainLog: 21, + hashLog: 22, + searchLog: 6, + minMatch: 5, + targetLength: 16, + strategy: ZSTD_strategy.ZSTD_lazy2 + ), + new ZSTD_compressionParameters( + windowLog: 22, + chainLog: 22, + hashLog: 23, + searchLog: 6, + minMatch: 5, + targetLength: 32, + strategy: ZSTD_strategy.ZSTD_lazy2 + ), + new ZSTD_compressionParameters( + windowLog: 22, + chainLog: 22, + hashLog: 22, + searchLog: 4, + minMatch: 5, + targetLength: 32, + strategy: ZSTD_strategy.ZSTD_btlazy2 + ), + new ZSTD_compressionParameters( + windowLog: 22, + chainLog: 22, + hashLog: 23, + searchLog: 5, + minMatch: 5, + targetLength: 32, + strategy: ZSTD_strategy.ZSTD_btlazy2 + ), + new ZSTD_compressionParameters( + windowLog: 22, + chainLog: 23, + hashLog: 23, + searchLog: 6, + minMatch: 5, + targetLength: 32, + strategy: ZSTD_strategy.ZSTD_btlazy2 + ), + new ZSTD_compressionParameters( + windowLog: 22, + chainLog: 22, + hashLog: 22, + searchLog: 5, + minMatch: 5, + targetLength: 48, + strategy: ZSTD_strategy.ZSTD_btopt + ), + new ZSTD_compressionParameters( + windowLog: 23, + chainLog: 23, + hashLog: 22, + searchLog: 5, + minMatch: 4, + targetLength: 64, + strategy: ZSTD_strategy.ZSTD_btopt + ), + new ZSTD_compressionParameters( + windowLog: 23, + chainLog: 23, + hashLog: 22, + searchLog: 6, + minMatch: 3, + targetLength: 64, + strategy: ZSTD_strategy.ZSTD_btultra + ), + new ZSTD_compressionParameters( + windowLog: 23, + chainLog: 24, + hashLog: 22, + searchLog: 7, + minMatch: 3, + targetLength: 256, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + new ZSTD_compressionParameters( + windowLog: 25, + chainLog: 25, + hashLog: 23, + searchLog: 7, + minMatch: 3, + targetLength: 256, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + new ZSTD_compressionParameters( + windowLog: 26, + chainLog: 26, + hashLog: 24, + searchLog: 7, + minMatch: 3, + targetLength: 512, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + new ZSTD_compressionParameters( + windowLog: 27, + chainLog: 27, + hashLog: 25, + searchLog: 9, + minMatch: 3, + targetLength: 999, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + }, + new ZSTD_compressionParameters[23] + { + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 12, + hashLog: 13, + searchLog: 1, + minMatch: 5, + targetLength: 1, + strategy: ZSTD_strategy.ZSTD_fast + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 13, + hashLog: 14, + searchLog: 1, + minMatch: 6, + targetLength: 0, + strategy: ZSTD_strategy.ZSTD_fast + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 14, + hashLog: 14, + searchLog: 1, + minMatch: 5, + targetLength: 0, + strategy: ZSTD_strategy.ZSTD_dfast + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 16, + hashLog: 16, + searchLog: 1, + minMatch: 4, + targetLength: 0, + strategy: ZSTD_strategy.ZSTD_dfast + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 16, + hashLog: 17, + searchLog: 3, + minMatch: 5, + targetLength: 2, + strategy: ZSTD_strategy.ZSTD_greedy + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 17, + hashLog: 18, + searchLog: 5, + minMatch: 5, + targetLength: 2, + strategy: ZSTD_strategy.ZSTD_greedy + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 18, + hashLog: 19, + searchLog: 3, + minMatch: 5, + targetLength: 4, + strategy: ZSTD_strategy.ZSTD_lazy + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 18, + hashLog: 19, + searchLog: 4, + minMatch: 4, + targetLength: 4, + strategy: ZSTD_strategy.ZSTD_lazy + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 18, + hashLog: 19, + searchLog: 4, + minMatch: 4, + targetLength: 8, + strategy: ZSTD_strategy.ZSTD_lazy2 + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 18, + hashLog: 19, + searchLog: 5, + minMatch: 4, + targetLength: 8, + strategy: ZSTD_strategy.ZSTD_lazy2 + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 18, + hashLog: 19, + searchLog: 6, + minMatch: 4, + targetLength: 8, + strategy: ZSTD_strategy.ZSTD_lazy2 + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 18, + hashLog: 19, + searchLog: 5, + minMatch: 4, + targetLength: 12, + strategy: ZSTD_strategy.ZSTD_btlazy2 + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 19, + hashLog: 19, + searchLog: 7, + minMatch: 4, + targetLength: 12, + strategy: ZSTD_strategy.ZSTD_btlazy2 + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 18, + hashLog: 19, + searchLog: 4, + minMatch: 4, + targetLength: 16, + strategy: ZSTD_strategy.ZSTD_btopt + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 18, + hashLog: 19, + searchLog: 4, + minMatch: 3, + targetLength: 32, + strategy: ZSTD_strategy.ZSTD_btopt + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 18, + hashLog: 19, + searchLog: 6, + minMatch: 3, + targetLength: 128, + strategy: ZSTD_strategy.ZSTD_btopt + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 19, + hashLog: 19, + searchLog: 6, + minMatch: 3, + targetLength: 128, + strategy: ZSTD_strategy.ZSTD_btultra + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 19, + hashLog: 19, + searchLog: 8, + minMatch: 3, + targetLength: 256, + strategy: ZSTD_strategy.ZSTD_btultra + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 19, + hashLog: 19, + searchLog: 6, + minMatch: 3, + targetLength: 128, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 19, + hashLog: 19, + searchLog: 8, + minMatch: 3, + targetLength: 256, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 19, + hashLog: 19, + searchLog: 10, + minMatch: 3, + targetLength: 512, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 19, + hashLog: 19, + searchLog: 12, + minMatch: 3, + targetLength: 512, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + new ZSTD_compressionParameters( + windowLog: 18, + chainLog: 19, + hashLog: 19, + searchLog: 13, + minMatch: 3, + targetLength: 999, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + }, + new ZSTD_compressionParameters[23] + { + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 12, + hashLog: 12, + searchLog: 1, + minMatch: 5, + targetLength: 1, + strategy: ZSTD_strategy.ZSTD_fast + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 12, + hashLog: 13, + searchLog: 1, + minMatch: 6, + targetLength: 0, + strategy: ZSTD_strategy.ZSTD_fast + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 13, + hashLog: 15, + searchLog: 1, + minMatch: 5, + targetLength: 0, + strategy: ZSTD_strategy.ZSTD_fast + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 15, + hashLog: 16, + searchLog: 2, + minMatch: 5, + targetLength: 0, + strategy: ZSTD_strategy.ZSTD_dfast + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 17, + hashLog: 17, + searchLog: 2, + minMatch: 4, + targetLength: 0, + strategy: ZSTD_strategy.ZSTD_dfast + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 16, + hashLog: 17, + searchLog: 3, + minMatch: 4, + targetLength: 2, + strategy: ZSTD_strategy.ZSTD_greedy + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 16, + hashLog: 17, + searchLog: 3, + minMatch: 4, + targetLength: 4, + strategy: ZSTD_strategy.ZSTD_lazy + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 16, + hashLog: 17, + searchLog: 3, + minMatch: 4, + targetLength: 8, + strategy: ZSTD_strategy.ZSTD_lazy2 + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 16, + hashLog: 17, + searchLog: 4, + minMatch: 4, + targetLength: 8, + strategy: ZSTD_strategy.ZSTD_lazy2 + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 16, + hashLog: 17, + searchLog: 5, + minMatch: 4, + targetLength: 8, + strategy: ZSTD_strategy.ZSTD_lazy2 + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 16, + hashLog: 17, + searchLog: 6, + minMatch: 4, + targetLength: 8, + strategy: ZSTD_strategy.ZSTD_lazy2 + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 17, + hashLog: 17, + searchLog: 5, + minMatch: 4, + targetLength: 8, + strategy: ZSTD_strategy.ZSTD_btlazy2 + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 18, + hashLog: 17, + searchLog: 7, + minMatch: 4, + targetLength: 12, + strategy: ZSTD_strategy.ZSTD_btlazy2 + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 18, + hashLog: 17, + searchLog: 3, + minMatch: 4, + targetLength: 12, + strategy: ZSTD_strategy.ZSTD_btopt + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 18, + hashLog: 17, + searchLog: 4, + minMatch: 3, + targetLength: 32, + strategy: ZSTD_strategy.ZSTD_btopt + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 18, + hashLog: 17, + searchLog: 6, + minMatch: 3, + targetLength: 256, + strategy: ZSTD_strategy.ZSTD_btopt + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 18, + hashLog: 17, + searchLog: 6, + minMatch: 3, + targetLength: 128, + strategy: ZSTD_strategy.ZSTD_btultra + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 18, + hashLog: 17, + searchLog: 8, + minMatch: 3, + targetLength: 256, + strategy: ZSTD_strategy.ZSTD_btultra + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 18, + hashLog: 17, + searchLog: 10, + minMatch: 3, + targetLength: 512, + strategy: ZSTD_strategy.ZSTD_btultra + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 18, + hashLog: 17, + searchLog: 5, + minMatch: 3, + targetLength: 256, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 18, + hashLog: 17, + searchLog: 7, + minMatch: 3, + targetLength: 512, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 18, + hashLog: 17, + searchLog: 9, + minMatch: 3, + targetLength: 512, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + new ZSTD_compressionParameters( + windowLog: 17, + chainLog: 18, + hashLog: 17, + searchLog: 11, + minMatch: 3, + targetLength: 999, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + }, + new ZSTD_compressionParameters[23] + { + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 12, + hashLog: 13, + searchLog: 1, + minMatch: 5, + targetLength: 1, + strategy: ZSTD_strategy.ZSTD_fast + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 14, + hashLog: 15, + searchLog: 1, + minMatch: 5, + targetLength: 0, + strategy: ZSTD_strategy.ZSTD_fast + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 14, + hashLog: 15, + searchLog: 1, + minMatch: 4, + targetLength: 0, + strategy: ZSTD_strategy.ZSTD_fast + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 14, + hashLog: 15, + searchLog: 2, + minMatch: 4, + targetLength: 0, + strategy: ZSTD_strategy.ZSTD_dfast + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 14, + hashLog: 14, + searchLog: 4, + minMatch: 4, + targetLength: 2, + strategy: ZSTD_strategy.ZSTD_greedy + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 14, + hashLog: 14, + searchLog: 3, + minMatch: 4, + targetLength: 4, + strategy: ZSTD_strategy.ZSTD_lazy + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 14, + hashLog: 14, + searchLog: 4, + minMatch: 4, + targetLength: 8, + strategy: ZSTD_strategy.ZSTD_lazy2 + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 14, + hashLog: 14, + searchLog: 6, + minMatch: 4, + targetLength: 8, + strategy: ZSTD_strategy.ZSTD_lazy2 + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 14, + hashLog: 14, + searchLog: 8, + minMatch: 4, + targetLength: 8, + strategy: ZSTD_strategy.ZSTD_lazy2 + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 15, + hashLog: 14, + searchLog: 5, + minMatch: 4, + targetLength: 8, + strategy: ZSTD_strategy.ZSTD_btlazy2 + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 15, + hashLog: 14, + searchLog: 9, + minMatch: 4, + targetLength: 8, + strategy: ZSTD_strategy.ZSTD_btlazy2 + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 15, + hashLog: 14, + searchLog: 3, + minMatch: 4, + targetLength: 12, + strategy: ZSTD_strategy.ZSTD_btopt + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 15, + hashLog: 14, + searchLog: 4, + minMatch: 3, + targetLength: 24, + strategy: ZSTD_strategy.ZSTD_btopt + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 15, + hashLog: 14, + searchLog: 5, + minMatch: 3, + targetLength: 32, + strategy: ZSTD_strategy.ZSTD_btultra + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 15, + hashLog: 15, + searchLog: 6, + minMatch: 3, + targetLength: 64, + strategy: ZSTD_strategy.ZSTD_btultra + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 15, + hashLog: 15, + searchLog: 7, + minMatch: 3, + targetLength: 256, + strategy: ZSTD_strategy.ZSTD_btultra + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 15, + hashLog: 15, + searchLog: 5, + minMatch: 3, + targetLength: 48, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 15, + hashLog: 15, + searchLog: 6, + minMatch: 3, + targetLength: 128, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 15, + hashLog: 15, + searchLog: 7, + minMatch: 3, + targetLength: 256, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 15, + hashLog: 15, + searchLog: 8, + minMatch: 3, + targetLength: 256, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 15, + hashLog: 15, + searchLog: 8, + minMatch: 3, + targetLength: 512, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 15, + hashLog: 15, + searchLog: 9, + minMatch: 3, + targetLength: 512, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + new ZSTD_compressionParameters( + windowLog: 14, + chainLog: 15, + hashLog: 15, + searchLog: 10, + minMatch: 3, + targetLength: 999, + strategy: ZSTD_strategy.ZSTD_btultra2 + ), + }, + }; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Compiler.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Compiler.cs index ab5def3d6..97fead59d 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Compiler.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Compiler.cs @@ -1,62 +1,61 @@ using System.Runtime.CompilerServices; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods + /* @return 1 if @u is a 2^n value, 0 otherwise + * useful to check a value is valid for alignment restrictions */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int ZSTD_isPower2(nuint u) { - /* @return 1 if @u is a 2^n value, 0 otherwise - * useful to check a value is valid for alignment restrictions */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static int ZSTD_isPower2(nuint u) - { - return (u & u - 1) == 0 ? 1 : 0; - } + return (u & u - 1) == 0 ? 1 : 0; + } - /** - * Helper function to perform a wrapped pointer difference without triggering - * UBSAN. - * - * @returns lhs - rhs with wrapping - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nint ZSTD_wrappedPtrDiff(byte* lhs, byte* rhs) - { - return (nint)(lhs - rhs); - } + /** + * Helper function to perform a wrapped pointer difference without triggering + * UBSAN. + * + * @returns lhs - rhs with wrapping + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nint ZSTD_wrappedPtrDiff(byte* lhs, byte* rhs) + { + return (nint)(lhs - rhs); + } - /** - * Helper function to perform a wrapped pointer add without triggering UBSAN. - * - * @return ptr + add with wrapping - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static byte* ZSTD_wrappedPtrAdd(byte* ptr, nint add) - { - return ptr + add; - } + /** + * Helper function to perform a wrapped pointer add without triggering UBSAN. + * + * @return ptr + add with wrapping + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static byte* ZSTD_wrappedPtrAdd(byte* ptr, nint add) + { + return ptr + add; + } - /** - * Helper function to perform a wrapped pointer subtraction without triggering - * UBSAN. - * - * @return ptr - sub with wrapping - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static byte* ZSTD_wrappedPtrSub(byte* ptr, nint sub) - { - return ptr - sub; - } + /** + * Helper function to perform a wrapped pointer subtraction without triggering + * UBSAN. + * + * @return ptr - sub with wrapping + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static byte* ZSTD_wrappedPtrSub(byte* ptr, nint sub) + { + return ptr - sub; + } - /** - * Helper function to add to a pointer that works around C's undefined behavior - * of adding 0 to NULL. - * - * @returns `ptr + add` except it defines `NULL + 0 == NULL`. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static byte* ZSTD_maybeNullPtrAdd(byte* ptr, nint add) - { - return add > 0 ? ptr + add : ptr; - } + /** + * Helper function to add to a pointer that works around C's undefined behavior + * of adding 0 to NULL. + * + * @returns `ptr + add` except it defines `NULL + 0 == NULL`. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static byte* ZSTD_maybeNullPtrAdd(byte* ptr, nint add) + { + return add > 0 ? ptr + add : ptr; } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Cover.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Cover.cs index e90943ac6..a199a19d9 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Cover.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Cover.cs @@ -1,361 +1,407 @@ -using static ZstdSharp.UnsafeHelper; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods - { - private static int g_displayLevel = 0; + private static int g_displayLevel = 0; - /** - * Returns the sum of the sample sizes. - */ - private static nuint COVER_sum(nuint* samplesSizes, uint nbSamples) + /** + * Returns the sum of the sample sizes. + */ + private static nuint COVER_sum(nuint* samplesSizes, uint nbSamples) + { + nuint sum = 0; + uint i; + for (i = 0; i < nbSamples; ++i) { - nuint sum = 0; - uint i; - for (i = 0; i < nbSamples; ++i) - { - sum += samplesSizes[i]; - } - - return sum; + sum += samplesSizes[i]; } - /** - * Warns the user when their corpus is too small. - */ - private static void COVER_warnOnSmallCorpus( - nuint maxDictSize, - nuint nbDmers, - int displayLevel - ) + return sum; + } + + /** + * Warns the user when their corpus is too small. + */ + private static void COVER_warnOnSmallCorpus( + nuint maxDictSize, + nuint nbDmers, + int displayLevel + ) + { + double ratio = nbDmers / (double)maxDictSize; + if (ratio >= 10) { - double ratio = nbDmers / (double)maxDictSize; - if (ratio >= 10) - { - return; - } + return; } + } - /** - * Computes the number of epochs and the size of each epoch. - * We will make sure that each epoch gets at least 10 * k bytes. - * - * The COVER algorithms divide the data up into epochs of equal size and - * select one segment from each epoch. - * - * @param maxDictSize The maximum allowed dictionary size. - * @param nbDmers The number of dmers we are training on. - * @param k The parameter k (segment size). - * @param passes The target number of passes over the dmer corpus. - * More passes means a better dictionary. - */ - private static COVER_epoch_info_t COVER_computeEpochs( - uint maxDictSize, - uint nbDmers, - uint k, - uint passes - ) + /** + * Computes the number of epochs and the size of each epoch. + * We will make sure that each epoch gets at least 10 * k bytes. + * + * The COVER algorithms divide the data up into epochs of equal size and + * select one segment from each epoch. + * + * @param maxDictSize The maximum allowed dictionary size. + * @param nbDmers The number of dmers we are training on. + * @param k The parameter k (segment size). + * @param passes The target number of passes over the dmer corpus. + * More passes means a better dictionary. + */ + private static COVER_epoch_info_t COVER_computeEpochs( + uint maxDictSize, + uint nbDmers, + uint k, + uint passes + ) + { + uint minEpochSize = k * 10; + COVER_epoch_info_t epochs; + epochs.num = 1 > maxDictSize / k / passes ? 1 : maxDictSize / k / passes; + epochs.size = nbDmers / epochs.num; + if (epochs.size >= minEpochSize) { - uint minEpochSize = k * 10; - COVER_epoch_info_t epochs; - epochs.num = 1 > maxDictSize / k / passes ? 1 : maxDictSize / k / passes; - epochs.size = nbDmers / epochs.num; - if (epochs.size >= minEpochSize) - { - assert(epochs.size * epochs.num <= nbDmers); - return epochs; - } - - epochs.size = minEpochSize < nbDmers ? minEpochSize : nbDmers; - epochs.num = nbDmers / epochs.size; assert(epochs.size * epochs.num <= nbDmers); return epochs; } - /** - * Checks total compressed size of a dictionary - */ - private static nuint COVER_checkTotalCompressedSize( - ZDICT_cover_params_t parameters, - nuint* samplesSizes, - byte* samples, - nuint* offsets, - nuint nbTrainSamples, - nuint nbSamples, - byte* dict, - nuint dictBufferCapacity - ) - { - nuint totalCompressedSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); - /* Pointers */ - ZSTD_CCtx_s* cctx; - ZSTD_CDict_s* cdict; - void* dst; - /* Local variables */ - nuint dstCapacity; - nuint i; - { - nuint maxSampleSize = 0; - i = parameters.splitPoint < 1 ? nbTrainSamples : 0; - for (; i < nbSamples; ++i) - { - maxSampleSize = - samplesSizes[i] > maxSampleSize ? samplesSizes[i] : maxSampleSize; - } - - dstCapacity = ZSTD_compressBound(maxSampleSize); - dst = malloc(dstCapacity); - } - - cctx = ZSTD_createCCtx(); - cdict = ZSTD_createCDict(dict, dictBufferCapacity, parameters.zParams.compressionLevel); - if (dst == null || cctx == null || cdict == null) - { - goto _compressCleanup; - } + epochs.size = minEpochSize < nbDmers ? minEpochSize : nbDmers; + epochs.num = nbDmers / epochs.size; + assert(epochs.size * epochs.num <= nbDmers); + return epochs; + } - totalCompressedSize = dictBufferCapacity; + /** + * Checks total compressed size of a dictionary + */ + private static nuint COVER_checkTotalCompressedSize( + ZDICT_cover_params_t parameters, + nuint* samplesSizes, + byte* samples, + nuint* offsets, + nuint nbTrainSamples, + nuint nbSamples, + byte* dict, + nuint dictBufferCapacity + ) + { + nuint totalCompressedSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + /* Pointers */ + ZSTD_CCtx_s* cctx; + ZSTD_CDict_s* cdict; + void* dst; + /* Local variables */ + nuint dstCapacity; + nuint i; + { + nuint maxSampleSize = 0; i = parameters.splitPoint < 1 ? nbTrainSamples : 0; for (; i < nbSamples; ++i) { - nuint size = ZSTD_compress_usingCDict( - cctx, - dst, - dstCapacity, - samples + offsets[i], - samplesSizes[i], - cdict - ); - if (ERR_isError(size)) - { - totalCompressedSize = size; - goto _compressCleanup; - } - - totalCompressedSize += size; - } - - _compressCleanup: - ZSTD_freeCCtx(cctx); - ZSTD_freeCDict(cdict); - if (dst != null) - { - free(dst); + maxSampleSize = + samplesSizes[i] > maxSampleSize ? samplesSizes[i] : maxSampleSize; } - return totalCompressedSize; + dstCapacity = ZSTD_compressBound(maxSampleSize); + dst = malloc(dstCapacity); } - /** - * Initialize the `COVER_best_t`. - */ - private static void COVER_best_init(COVER_best_s* best) + cctx = ZSTD_createCCtx(); + cdict = ZSTD_createCDict(dict, dictBufferCapacity, parameters.zParams.compressionLevel); + if (dst == null || cctx == null || cdict == null) { - if (best == null) - return; - SynchronizationWrapper.Init(&best->mutex); - best->liveJobs = 0; - best->dict = null; - best->dictSize = 0; - best->compressedSize = unchecked((nuint)(-1)); - best->parameters = new ZDICT_cover_params_t(); + goto _compressCleanup; } - /** - * Wait until liveJobs == 0. - */ - private static void COVER_best_wait(COVER_best_s* best) + totalCompressedSize = dictBufferCapacity; + i = parameters.splitPoint < 1 ? nbTrainSamples : 0; + for (; i < nbSamples; ++i) { - if (best == null) + nuint size = ZSTD_compress_usingCDict( + cctx, + dst, + dstCapacity, + samples + offsets[i], + samplesSizes[i], + cdict + ); + if (ERR_isError(size)) { - return; + totalCompressedSize = size; + goto _compressCleanup; } - SynchronizationWrapper.Enter(&best->mutex); - while (best->liveJobs != 0) - { - SynchronizationWrapper.Wait(&best->mutex); - } + totalCompressedSize += size; + } - SynchronizationWrapper.Exit(&best->mutex); + _compressCleanup: + ZSTD_freeCCtx(cctx); + ZSTD_freeCDict(cdict); + if (dst != null) + { + free(dst); } - /** - * Call COVER_best_wait() and then destroy the COVER_best_t. - */ - private static void COVER_best_destroy(COVER_best_s* best) + return totalCompressedSize; + } + + /** + * Initialize the `COVER_best_t`. + */ + private static void COVER_best_init(COVER_best_s* best) + { + if (best == null) + return; + SynchronizationWrapper.Init(&best->mutex); + best->liveJobs = 0; + best->dict = null; + best->dictSize = 0; + best->compressedSize = unchecked((nuint)(-1)); + best->parameters = new ZDICT_cover_params_t(); + } + + /** + * Wait until liveJobs == 0. + */ + private static void COVER_best_wait(COVER_best_s* best) + { + if (best == null) { - if (best == null) - { - return; - } + return; + } - COVER_best_wait(best); - if (best->dict != null) - { - free(best->dict); - } + SynchronizationWrapper.Enter(&best->mutex); + while (best->liveJobs != 0) + { + SynchronizationWrapper.Wait(&best->mutex); + } + + SynchronizationWrapper.Exit(&best->mutex); + } - SynchronizationWrapper.Free(&best->mutex); + /** + * Call COVER_best_wait() and then destroy the COVER_best_t. + */ + private static void COVER_best_destroy(COVER_best_s* best) + { + if (best == null) + { + return; } - /** - * Called when a thread is about to be launched. - * Increments liveJobs. - */ - private static void COVER_best_start(COVER_best_s* best) + COVER_best_wait(best); + if (best->dict != null) { - if (best == null) - { - return; - } + free(best->dict); + } - SynchronizationWrapper.Enter(&best->mutex); - ++best->liveJobs; - SynchronizationWrapper.Exit(&best->mutex); + SynchronizationWrapper.Free(&best->mutex); + } + + /** + * Called when a thread is about to be launched. + * Increments liveJobs. + */ + private static void COVER_best_start(COVER_best_s* best) + { + if (best == null) + { + return; } - /** - * Called when a thread finishes executing, both on error or success. - * Decrements liveJobs and signals any waiting threads if liveJobs == 0. - * If this dictionary is the best so far save it and its parameters. - */ - private static void COVER_best_finish( - COVER_best_s* best, - ZDICT_cover_params_t parameters, - COVER_dictSelection selection - ) + SynchronizationWrapper.Enter(&best->mutex); + ++best->liveJobs; + SynchronizationWrapper.Exit(&best->mutex); + } + + /** + * Called when a thread finishes executing, both on error or success. + * Decrements liveJobs and signals any waiting threads if liveJobs == 0. + * If this dictionary is the best so far save it and its parameters. + */ + private static void COVER_best_finish( + COVER_best_s* best, + ZDICT_cover_params_t parameters, + COVER_dictSelection selection + ) + { + void* dict = selection.dictContent; + nuint compressedSize = selection.totalCompressedSize; + nuint dictSize = selection.dictSize; + if (best == null) { - void* dict = selection.dictContent; - nuint compressedSize = selection.totalCompressedSize; - nuint dictSize = selection.dictSize; - if (best == null) - { - return; - } + return; + } + { + nuint liveJobs; + SynchronizationWrapper.Enter(&best->mutex); + --best->liveJobs; + liveJobs = best->liveJobs; + if (compressedSize < best->compressedSize) { - nuint liveJobs; - SynchronizationWrapper.Enter(&best->mutex); - --best->liveJobs; - liveJobs = best->liveJobs; - if (compressedSize < best->compressedSize) + if (best->dict == null || best->dictSize < dictSize) { - if (best->dict == null || best->dictSize < dictSize) + if (best->dict != null) { - if (best->dict != null) - { - free(best->dict); - } - - best->dict = malloc(dictSize); - if (best->dict == null) - { - best->compressedSize = unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC) - ); - best->dictSize = 0; - SynchronizationWrapper.Pulse(&best->mutex); - SynchronizationWrapper.Exit(&best->mutex); - return; - } + free(best->dict); } - if (dict != null) + best->dict = malloc(dictSize); + if (best->dict == null) { - memcpy(best->dict, dict, (uint)dictSize); - best->dictSize = dictSize; - best->parameters = parameters; - best->compressedSize = compressedSize; + best->compressedSize = unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC) + ); + best->dictSize = 0; + SynchronizationWrapper.Pulse(&best->mutex); + SynchronizationWrapper.Exit(&best->mutex); + return; } } - if (liveJobs == 0) + if (dict != null) { - SynchronizationWrapper.PulseAll(&best->mutex); + memcpy(best->dict, dict, (uint)dictSize); + best->dictSize = dictSize; + best->parameters = parameters; + best->compressedSize = compressedSize; } + } - SynchronizationWrapper.Exit(&best->mutex); + if (liveJobs == 0) + { + SynchronizationWrapper.PulseAll(&best->mutex); } + + SynchronizationWrapper.Exit(&best->mutex); } + } + + private static COVER_dictSelection setDictSelection(byte* buf, nuint s, nuint csz) + { + COVER_dictSelection ds; + ds.dictContent = buf; + ds.dictSize = s; + ds.totalCompressedSize = csz; + return ds; + } + + /** + * Error function for COVER_selectDict function. Returns a struct where + * return.totalCompressedSize is a ZSTD error. + */ + private static COVER_dictSelection COVER_dictSelectionError(nuint error) + { + return setDictSelection(null, 0, error); + } + + /** + * Error function for COVER_selectDict function. Checks if the return + * value is an error. + */ + private static uint COVER_dictSelectionIsError(COVER_dictSelection selection) + { + return ERR_isError(selection.totalCompressedSize) || selection.dictContent == null + ? 1U + : 0U; + } + + /** + * Always call after selectDict is called to free up used memory from + * newly created dictionary. + */ + private static void COVER_dictSelectionFree(COVER_dictSelection selection) + { + free(selection.dictContent); + } - private static COVER_dictSelection setDictSelection(byte* buf, nuint s, nuint csz) + /** + * Called to finalize the dictionary and select one based on whether or not + * the shrink-dict flag was enabled. If enabled the dictionary used is the + * smallest dictionary within a specified regression of the compressed size + * from the largest dictionary. + */ + private static COVER_dictSelection COVER_selectDict( + byte* customDictContent, + nuint dictBufferCapacity, + nuint dictContentSize, + byte* samplesBuffer, + nuint* samplesSizes, + uint nbFinalizeSamples, + nuint nbCheckSamples, + nuint nbSamples, + ZDICT_cover_params_t @params, + nuint* offsets, + nuint totalCompressedSize + ) + { + nuint largestDict = 0; + nuint largestCompressed = 0; + byte* customDictContentEnd = customDictContent + dictContentSize; + byte* largestDictbuffer = (byte*)malloc(dictBufferCapacity); + byte* candidateDictBuffer = (byte*)malloc(dictBufferCapacity); + double regressionTolerance = (double)@params.shrinkDictMaxRegression / 100 + 1; + if (largestDictbuffer == null || candidateDictBuffer == null) { - COVER_dictSelection ds; - ds.dictContent = buf; - ds.dictSize = s; - ds.totalCompressedSize = csz; - return ds; + free(largestDictbuffer); + free(candidateDictBuffer); + return COVER_dictSelectionError(dictContentSize); } - /** - * Error function for COVER_selectDict function. Returns a struct where - * return.totalCompressedSize is a ZSTD error. - */ - private static COVER_dictSelection COVER_dictSelectionError(nuint error) + memcpy(largestDictbuffer, customDictContent, (uint)dictContentSize); + dictContentSize = ZDICT_finalizeDictionary( + largestDictbuffer, + dictBufferCapacity, + customDictContent, + dictContentSize, + samplesBuffer, + samplesSizes, + nbFinalizeSamples, + @params.zParams + ); + if (ZDICT_isError(dictContentSize)) { - return setDictSelection(null, 0, error); + free(largestDictbuffer); + free(candidateDictBuffer); + return COVER_dictSelectionError(dictContentSize); } - /** - * Error function for COVER_selectDict function. Checks if the return - * value is an error. - */ - private static uint COVER_dictSelectionIsError(COVER_dictSelection selection) + totalCompressedSize = COVER_checkTotalCompressedSize( + @params, + samplesSizes, + samplesBuffer, + offsets, + nbCheckSamples, + nbSamples, + largestDictbuffer, + dictContentSize + ); + if (ERR_isError(totalCompressedSize)) { - return ERR_isError(selection.totalCompressedSize) || selection.dictContent == null - ? 1U - : 0U; + free(largestDictbuffer); + free(candidateDictBuffer); + return COVER_dictSelectionError(totalCompressedSize); } - /** - * Always call after selectDict is called to free up used memory from - * newly created dictionary. - */ - private static void COVER_dictSelectionFree(COVER_dictSelection selection) + if (@params.shrinkDict == 0) { - free(selection.dictContent); + free(candidateDictBuffer); + return setDictSelection(largestDictbuffer, dictContentSize, totalCompressedSize); } - /** - * Called to finalize the dictionary and select one based on whether or not - * the shrink-dict flag was enabled. If enabled the dictionary used is the - * smallest dictionary within a specified regression of the compressed size - * from the largest dictionary. - */ - private static COVER_dictSelection COVER_selectDict( - byte* customDictContent, - nuint dictBufferCapacity, - nuint dictContentSize, - byte* samplesBuffer, - nuint* samplesSizes, - uint nbFinalizeSamples, - nuint nbCheckSamples, - nuint nbSamples, - ZDICT_cover_params_t @params, - nuint* offsets, - nuint totalCompressedSize - ) + largestDict = dictContentSize; + largestCompressed = totalCompressedSize; + dictContentSize = 256; + while (dictContentSize < largestDict) { - nuint largestDict = 0; - nuint largestCompressed = 0; - byte* customDictContentEnd = customDictContent + dictContentSize; - byte* largestDictbuffer = (byte*)malloc(dictBufferCapacity); - byte* candidateDictBuffer = (byte*)malloc(dictBufferCapacity); - double regressionTolerance = (double)@params.shrinkDictMaxRegression / 100 + 1; - if (largestDictbuffer == null || candidateDictBuffer == null) - { - free(largestDictbuffer); - free(candidateDictBuffer); - return COVER_dictSelectionError(dictContentSize); - } - - memcpy(largestDictbuffer, customDictContent, (uint)dictContentSize); + memcpy(candidateDictBuffer, largestDictbuffer, (uint)largestDict); dictContentSize = ZDICT_finalizeDictionary( - largestDictbuffer, + candidateDictBuffer, dictBufferCapacity, - customDictContent, + customDictContentEnd - dictContentSize, dictContentSize, samplesBuffer, samplesSizes, @@ -376,7 +422,7 @@ nuint totalCompressedSize offsets, nbCheckSamples, nbSamples, - largestDictbuffer, + candidateDictBuffer, dictContentSize ); if (ERR_isError(totalCompressedSize)) @@ -386,69 +432,22 @@ nuint totalCompressedSize return COVER_dictSelectionError(totalCompressedSize); } - if (@params.shrinkDict == 0) - { - free(candidateDictBuffer); - return setDictSelection(largestDictbuffer, dictContentSize, totalCompressedSize); - } - - largestDict = dictContentSize; - largestCompressed = totalCompressedSize; - dictContentSize = 256; - while (dictContentSize < largestDict) + if (totalCompressedSize <= largestCompressed * regressionTolerance) { - memcpy(candidateDictBuffer, largestDictbuffer, (uint)largestDict); - dictContentSize = ZDICT_finalizeDictionary( + free(largestDictbuffer); + return setDictSelection( candidateDictBuffer, - dictBufferCapacity, - customDictContentEnd - dictContentSize, dictContentSize, - samplesBuffer, - samplesSizes, - nbFinalizeSamples, - @params.zParams - ); - if (ZDICT_isError(dictContentSize)) - { - free(largestDictbuffer); - free(candidateDictBuffer); - return COVER_dictSelectionError(dictContentSize); - } - - totalCompressedSize = COVER_checkTotalCompressedSize( - @params, - samplesSizes, - samplesBuffer, - offsets, - nbCheckSamples, - nbSamples, - candidateDictBuffer, - dictContentSize + totalCompressedSize ); - if (ERR_isError(totalCompressedSize)) - { - free(largestDictbuffer); - free(candidateDictBuffer); - return COVER_dictSelectionError(totalCompressedSize); - } - - if (totalCompressedSize <= largestCompressed * regressionTolerance) - { - free(largestDictbuffer); - return setDictSelection( - candidateDictBuffer, - dictContentSize, - totalCompressedSize - ); - } - - dictContentSize *= 2; } - dictContentSize = largestDict; - totalCompressedSize = largestCompressed; - free(candidateDictBuffer); - return setDictSelection(largestDictbuffer, dictContentSize, totalCompressedSize); + dictContentSize *= 2; } + + dictContentSize = largestDict; + totalCompressedSize = largestCompressed; + free(candidateDictBuffer); + return setDictSelection(largestDictbuffer, dictContentSize, totalCompressedSize); } -} +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/DTableDesc.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/DTableDesc.cs index a0baad8d2..c89de564d 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/DTableDesc.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/DTableDesc.cs @@ -1,13 +1,12 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/*-***************************/ +/* generic DTableDesc */ +/*-***************************/ +public struct DTableDesc { - /*-***************************/ - /* generic DTableDesc */ - /*-***************************/ - public struct DTableDesc - { - public byte maxTableLog; - public byte tableType; - public byte tableLog; - public byte reserved; - } -} + public byte maxTableLog; + public byte tableType; + public byte tableLog; + public byte reserved; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/EStats_ress_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/EStats_ress_t.cs index f55e6f7fc..93d18c382 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/EStats_ress_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/EStats_ress_t.cs @@ -1,14 +1,13 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct EStats_ress_t { - public unsafe struct EStats_ress_t - { - /* dictionary */ - public ZSTD_CDict_s* dict; + /* dictionary */ + public ZSTD_CDict_s* dict; - /* working context */ - public ZSTD_CCtx_s* zc; + /* working context */ + public ZSTD_CCtx_s* zc; - /* must be ZSTD_BLOCKSIZE_MAX allocated */ - public void* workPlace; - } -} + /* must be ZSTD_BLOCKSIZE_MAX allocated */ + public void* workPlace; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/EntropyCommon.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/EntropyCommon.cs index 38f1729f5..77283d846 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/EntropyCommon.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/EntropyCommon.cs @@ -1,460 +1,459 @@ using System.Runtime.CompilerServices; -using static ZstdSharp.UnsafeHelper; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods + /*=== Version ===*/ + private static uint FSE_versionNumber() { - /*=== Version ===*/ - private static uint FSE_versionNumber() - { - return 0 * 100 * 100 + 9 * 100 + 0; - } + return 0 * 100 * 100 + 9 * 100 + 0; + } - /*=== Error Management ===*/ - private static bool FSE_isError(nuint code) - { - return ERR_isError(code); - } + /*=== Error Management ===*/ + private static bool FSE_isError(nuint code) + { + return ERR_isError(code); + } - private static string FSE_getErrorName(nuint code) - { - return ERR_getErrorName(code); - } + private static string FSE_getErrorName(nuint code) + { + return ERR_getErrorName(code); + } - /* Error Management */ - private static bool HUF_isError(nuint code) - { - return ERR_isError(code); - } + /* Error Management */ + private static bool HUF_isError(nuint code) + { + return ERR_isError(code); + } - private static string HUF_getErrorName(nuint code) - { - return ERR_getErrorName(code); - } + private static string HUF_getErrorName(nuint code) + { + return ERR_getErrorName(code); + } - /*-************************************************************** - * FSE NCount encoding-decoding - ****************************************************************/ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint FSE_readNCount_body( - short* normalizedCounter, - uint* maxSVPtr, - uint* tableLogPtr, - void* headerBuffer, - nuint hbSize - ) + /*-************************************************************** + * FSE NCount encoding-decoding + ****************************************************************/ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint FSE_readNCount_body( + short* normalizedCounter, + uint* maxSVPtr, + uint* tableLogPtr, + void* headerBuffer, + nuint hbSize + ) + { + byte* istart = (byte*)headerBuffer; + byte* iend = istart + hbSize; + byte* ip = istart; + int nbBits; + int remaining; + int threshold; + uint bitStream; + int bitCount; + uint charnum = 0; + uint maxSV1 = *maxSVPtr + 1; + int previous0 = 0; + if (hbSize < 8) { - byte* istart = (byte*)headerBuffer; - byte* iend = istart + hbSize; - byte* ip = istart; - int nbBits; - int remaining; - int threshold; - uint bitStream; - int bitCount; - uint charnum = 0; - uint maxSV1 = *maxSVPtr + 1; - int previous0 = 0; - if (hbSize < 8) + sbyte* buffer = stackalloc sbyte[8]; + /* This function only works when hbSize >= 8 */ + memset(buffer, 0, sizeof(sbyte) * 8); + memcpy(buffer, headerBuffer, (uint)hbSize); { - sbyte* buffer = stackalloc sbyte[8]; - /* This function only works when hbSize >= 8 */ - memset(buffer, 0, sizeof(sbyte) * 8); - memcpy(buffer, headerBuffer, (uint)hbSize); - { - nuint countSize = FSE_readNCount( - normalizedCounter, - maxSVPtr, - tableLogPtr, - buffer, - sizeof(sbyte) * 8 - ); - if (FSE_isError(countSize)) - return countSize; - if (countSize > hbSize) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); + nuint countSize = FSE_readNCount( + normalizedCounter, + maxSVPtr, + tableLogPtr, + buffer, + sizeof(sbyte) * 8 + ); + if (FSE_isError(countSize)) return countSize; - } + if (countSize > hbSize) + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); + return countSize; } + } - assert(hbSize >= 8); - memset(normalizedCounter, 0, (*maxSVPtr + 1) * sizeof(short)); - bitStream = MEM_readLE32(ip); - nbBits = (int)((bitStream & 0xF) + 5); - if (nbBits > 15) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); - bitStream >>= 4; - bitCount = 4; - *tableLogPtr = (uint)nbBits; - remaining = (1 << nbBits) + 1; - threshold = 1 << nbBits; - nbBits++; - for (; ; ) + assert(hbSize >= 8); + memset(normalizedCounter, 0, (*maxSVPtr + 1) * sizeof(short)); + bitStream = MEM_readLE32(ip); + nbBits = (int)((bitStream & 0xF) + 5); + if (nbBits > 15) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); + bitStream >>= 4; + bitCount = 4; + *tableLogPtr = (uint)nbBits; + remaining = (1 << nbBits) + 1; + threshold = 1 << nbBits; + nbBits++; + for (; ; ) + { + if (previous0 != 0) { - if (previous0 != 0) + /* Count the number of repeats. Each time the + * 2-bit repeat code is 0b11 there is another + * repeat. + * Avoid UB by setting the high bit to 1. + */ + int repeats = (int)(ZSTD_countTrailingZeros32(~bitStream | 0x80000000) >> 1); + while (repeats >= 12) { - /* Count the number of repeats. Each time the - * 2-bit repeat code is 0b11 there is another - * repeat. - * Avoid UB by setting the high bit to 1. - */ - int repeats = (int)(ZSTD_countTrailingZeros32(~bitStream | 0x80000000) >> 1); - while (repeats >= 12) + charnum += 3 * 12; + if (ip <= iend - 7) { - charnum += 3 * 12; - if (ip <= iend - 7) - { - ip += 3; - } - else - { - bitCount -= (int)(8 * (iend - 7 - ip)); - bitCount &= 31; - ip = iend - 4; - } - - bitStream = MEM_readLE32(ip) >> bitCount; - repeats = (int)(ZSTD_countTrailingZeros32(~bitStream | 0x80000000) >> 1); - } - - charnum += (uint)(3 * repeats); - bitStream >>= 2 * repeats; - bitCount += 2 * repeats; - assert((bitStream & 3) < 3); - charnum += bitStream & 3; - bitCount += 2; - if (charnum >= maxSV1) - break; - if (ip <= iend - 7 || ip + (bitCount >> 3) <= iend - 4) - { - assert(bitCount >> 3 <= 3); - ip += bitCount >> 3; - bitCount &= 7; + ip += 3; } else { - bitCount -= (int)(8 * (iend - 4 - ip)); + bitCount -= (int)(8 * (iend - 7 - ip)); bitCount &= 31; ip = iend - 4; } bitStream = MEM_readLE32(ip) >> bitCount; + repeats = (int)(ZSTD_countTrailingZeros32(~bitStream | 0x80000000) >> 1); } + charnum += (uint)(3 * repeats); + bitStream >>= 2 * repeats; + bitCount += 2 * repeats; + assert((bitStream & 3) < 3); + charnum += bitStream & 3; + bitCount += 2; + if (charnum >= maxSV1) + break; + if (ip <= iend - 7 || ip + (bitCount >> 3) <= iend - 4) { - int max = 2 * threshold - 1 - remaining; - int count; - if ((bitStream & (uint)(threshold - 1)) < (uint)max) - { - count = (int)(bitStream & (uint)(threshold - 1)); - bitCount += nbBits - 1; - } - else - { - count = (int)(bitStream & (uint)(2 * threshold - 1)); - if (count >= threshold) - count -= max; - bitCount += nbBits; - } + assert(bitCount >> 3 <= 3); + ip += bitCount >> 3; + bitCount &= 7; + } + else + { + bitCount -= (int)(8 * (iend - 4 - ip)); + bitCount &= 31; + ip = iend - 4; + } - count--; - if (count >= 0) - { - remaining -= count; - } - else - { - assert(count == -1); - remaining += count; - } + bitStream = MEM_readLE32(ip) >> bitCount; + } - normalizedCounter[charnum++] = (short)count; - previous0 = count == 0 ? 1 : 0; - assert(threshold > 1); - if (remaining < threshold) - { - if (remaining <= 1) - break; - nbBits = (int)(ZSTD_highbit32((uint)remaining) + 1); - threshold = 1 << nbBits - 1; - } + { + int max = 2 * threshold - 1 - remaining; + int count; + if ((bitStream & (uint)(threshold - 1)) < (uint)max) + { + count = (int)(bitStream & (uint)(threshold - 1)); + bitCount += nbBits - 1; + } + else + { + count = (int)(bitStream & (uint)(2 * threshold - 1)); + if (count >= threshold) + count -= max; + bitCount += nbBits; + } - if (charnum >= maxSV1) + count--; + if (count >= 0) + { + remaining -= count; + } + else + { + assert(count == -1); + remaining += count; + } + + normalizedCounter[charnum++] = (short)count; + previous0 = count == 0 ? 1 : 0; + assert(threshold > 1); + if (remaining < threshold) + { + if (remaining <= 1) break; - if (ip <= iend - 7 || ip + (bitCount >> 3) <= iend - 4) - { - ip += bitCount >> 3; - bitCount &= 7; - } - else - { - bitCount -= (int)(8 * (iend - 4 - ip)); - bitCount &= 31; - ip = iend - 4; - } + nbBits = (int)(ZSTD_highbit32((uint)remaining) + 1); + threshold = 1 << nbBits - 1; + } - bitStream = MEM_readLE32(ip) >> bitCount; + if (charnum >= maxSV1) + break; + if (ip <= iend - 7 || ip + (bitCount >> 3) <= iend - 4) + { + ip += bitCount >> 3; + bitCount &= 7; + } + else + { + bitCount -= (int)(8 * (iend - 4 - ip)); + bitCount &= 31; + ip = iend - 4; } - } - if (remaining != 1) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - if (charnum > maxSV1) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooSmall)); - if (bitCount > 32) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - *maxSVPtr = charnum - 1; - ip += bitCount + 7 >> 3; - return (nuint)(ip - istart); + bitStream = MEM_readLE32(ip) >> bitCount; + } } - /* Avoids the FORCE_INLINE of the _body() function. */ - private static nuint FSE_readNCount_body_default( - short* normalizedCounter, - uint* maxSVPtr, - uint* tableLogPtr, - void* headerBuffer, - nuint hbSize - ) - { - return FSE_readNCount_body( - normalizedCounter, - maxSVPtr, - tableLogPtr, - headerBuffer, - hbSize - ); - } + if (remaining != 1) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + if (charnum > maxSV1) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooSmall)); + if (bitCount > 32) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + *maxSVPtr = charnum - 1; + ip += bitCount + 7 >> 3; + return (nuint)(ip - istart); + } - /*! FSE_readNCount_bmi2(): - * Same as FSE_readNCount() but pass bmi2=1 when your CPU supports BMI2 and 0 otherwise. - */ - private static nuint FSE_readNCount_bmi2( - short* normalizedCounter, - uint* maxSVPtr, - uint* tableLogPtr, - void* headerBuffer, - nuint hbSize, - int bmi2 - ) - { - return FSE_readNCount_body_default( - normalizedCounter, - maxSVPtr, - tableLogPtr, - headerBuffer, - hbSize - ); - } + /* Avoids the FORCE_INLINE of the _body() function. */ + private static nuint FSE_readNCount_body_default( + short* normalizedCounter, + uint* maxSVPtr, + uint* tableLogPtr, + void* headerBuffer, + nuint hbSize + ) + { + return FSE_readNCount_body( + normalizedCounter, + maxSVPtr, + tableLogPtr, + headerBuffer, + hbSize + ); + } - /*! FSE_readNCount(): - Read compactly saved 'normalizedCounter' from 'rBuffer'. - @return : size read from 'rBuffer', - or an errorCode, which can be tested using FSE_isError(). - maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */ - private static nuint FSE_readNCount( - short* normalizedCounter, - uint* maxSVPtr, - uint* tableLogPtr, - void* headerBuffer, - nuint hbSize - ) - { - return FSE_readNCount_bmi2( - normalizedCounter, - maxSVPtr, - tableLogPtr, - headerBuffer, - hbSize, - 0 - ); - } + /*! FSE_readNCount_bmi2(): + * Same as FSE_readNCount() but pass bmi2=1 when your CPU supports BMI2 and 0 otherwise. + */ + private static nuint FSE_readNCount_bmi2( + short* normalizedCounter, + uint* maxSVPtr, + uint* tableLogPtr, + void* headerBuffer, + nuint hbSize, + int bmi2 + ) + { + return FSE_readNCount_body_default( + normalizedCounter, + maxSVPtr, + tableLogPtr, + headerBuffer, + hbSize + ); + } - /*! HUF_readStats() : - Read compact Huffman tree, saved by HUF_writeCTable(). - `huffWeight` is destination buffer. - `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32. - @return : size read from `src` , or an error Code . - Note : Needed by HUF_readCTable() and HUF_readDTableX?() . - */ - private static nuint HUF_readStats( - byte* huffWeight, - nuint hwSize, - uint* rankStats, - uint* nbSymbolsPtr, - uint* tableLogPtr, - void* src, - nuint srcSize - ) - { - uint* wksp = stackalloc uint[219]; - return HUF_readStats_wksp( - huffWeight, - hwSize, - rankStats, - nbSymbolsPtr, - tableLogPtr, - src, - srcSize, - wksp, - sizeof(uint) * 219, - 0 - ); - } + /*! FSE_readNCount(): + Read compactly saved 'normalizedCounter' from 'rBuffer'. + @return : size read from 'rBuffer', + or an errorCode, which can be tested using FSE_isError(). + maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */ + private static nuint FSE_readNCount( + short* normalizedCounter, + uint* maxSVPtr, + uint* tableLogPtr, + void* headerBuffer, + nuint hbSize + ) + { + return FSE_readNCount_bmi2( + normalizedCounter, + maxSVPtr, + tableLogPtr, + headerBuffer, + hbSize, + 0 + ); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint HUF_readStats_body( - byte* huffWeight, - nuint hwSize, - uint* rankStats, - uint* nbSymbolsPtr, - uint* tableLogPtr, - void* src, - nuint srcSize, - void* workSpace, - nuint wkspSize, - int bmi2 - ) + /*! HUF_readStats() : + Read compact Huffman tree, saved by HUF_writeCTable(). + `huffWeight` is destination buffer. + `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32. + @return : size read from `src` , or an error Code . + Note : Needed by HUF_readCTable() and HUF_readDTableX?() . + */ + private static nuint HUF_readStats( + byte* huffWeight, + nuint hwSize, + uint* rankStats, + uint* nbSymbolsPtr, + uint* tableLogPtr, + void* src, + nuint srcSize + ) + { + uint* wksp = stackalloc uint[219]; + return HUF_readStats_wksp( + huffWeight, + hwSize, + rankStats, + nbSymbolsPtr, + tableLogPtr, + src, + srcSize, + wksp, + sizeof(uint) * 219, + 0 + ); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint HUF_readStats_body( + byte* huffWeight, + nuint hwSize, + uint* rankStats, + uint* nbSymbolsPtr, + uint* tableLogPtr, + void* src, + nuint srcSize, + void* workSpace, + nuint wkspSize, + int bmi2 + ) + { + uint weightTotal; + byte* ip = (byte*)src; + nuint iSize; + nuint oSize; + if (srcSize == 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + iSize = ip[0]; + if (iSize >= 128) { - uint weightTotal; - byte* ip = (byte*)src; - nuint iSize; - nuint oSize; - if (srcSize == 0) + oSize = iSize - 127; + iSize = (oSize + 1) / 2; + if (iSize + 1 > srcSize) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); - iSize = ip[0]; - if (iSize >= 128) - { - oSize = iSize - 127; - iSize = (oSize + 1) / 2; - if (iSize + 1 > srcSize) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); - if (oSize >= hwSize) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - ip += 1; - { - uint n; - for (n = 0; n < oSize; n += 2) - { - huffWeight[n] = (byte)(ip[n / 2] >> 4); - huffWeight[n + 1] = (byte)(ip[n / 2] & 15); - } - } - } - else - { - if (iSize + 1 > srcSize) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); - oSize = FSE_decompress_wksp_bmi2( - huffWeight, - hwSize - 1, - ip + 1, - iSize, - 6, - workSpace, - wkspSize, - bmi2 - ); - if (FSE_isError(oSize)) - return oSize; - } - - memset(rankStats, 0, (12 + 1) * sizeof(uint)); - weightTotal = 0; - { - uint n; - for (n = 0; n < oSize; n++) - { - if (huffWeight[n] > 12) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); - rankStats[huffWeight[n]]++; - weightTotal += (uint)(1 << huffWeight[n] >> 1); - } - } - - if (weightTotal == 0) + if (oSize >= hwSize) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + ip += 1; { - uint tableLog = ZSTD_highbit32(weightTotal) + 1; - if (tableLog > 12) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - *tableLogPtr = tableLog; + uint n; + for (n = 0; n < oSize; n += 2) { - uint total = (uint)(1 << (int)tableLog); - uint rest = total - weightTotal; - uint verif = (uint)(1 << (int)ZSTD_highbit32(rest)); - uint lastWeight = ZSTD_highbit32(rest) + 1; - if (verif != rest) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); - huffWeight[oSize] = (byte)lastWeight; - rankStats[lastWeight]++; + huffWeight[n] = (byte)(ip[n / 2] >> 4); + huffWeight[n + 1] = (byte)(ip[n / 2] & 15); } } - - if (rankStats[1] < 2 || (rankStats[1] & 1) != 0) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - *nbSymbolsPtr = (uint)(oSize + 1); - return iSize + 1; } - - /* Avoids the FORCE_INLINE of the _body() function. */ - private static nuint HUF_readStats_body_default( - byte* huffWeight, - nuint hwSize, - uint* rankStats, - uint* nbSymbolsPtr, - uint* tableLogPtr, - void* src, - nuint srcSize, - void* workSpace, - nuint wkspSize - ) + else { - return HUF_readStats_body( + if (iSize + 1 > srcSize) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + oSize = FSE_decompress_wksp_bmi2( huffWeight, - hwSize, - rankStats, - nbSymbolsPtr, - tableLogPtr, - src, - srcSize, + hwSize - 1, + ip + 1, + iSize, + 6, workSpace, wkspSize, - 0 + bmi2 ); + if (FSE_isError(oSize)) + return oSize; } - private static nuint HUF_readStats_wksp( - byte* huffWeight, - nuint hwSize, - uint* rankStats, - uint* nbSymbolsPtr, - uint* tableLogPtr, - void* src, - nuint srcSize, - void* workSpace, - nuint wkspSize, - int flags - ) + memset(rankStats, 0, (12 + 1) * sizeof(uint)); + weightTotal = 0; { - return HUF_readStats_body_default( - huffWeight, - hwSize, - rankStats, - nbSymbolsPtr, - tableLogPtr, - src, - srcSize, - workSpace, - wkspSize - ); + uint n; + for (n = 0; n < oSize; n++) + { + if (huffWeight[n] > 12) + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); + rankStats[huffWeight[n]]++; + weightTotal += (uint)(1 << huffWeight[n] >> 1); + } } + + if (weightTotal == 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + { + uint tableLog = ZSTD_highbit32(weightTotal) + 1; + if (tableLog > 12) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + *tableLogPtr = tableLog; + { + uint total = (uint)(1 << (int)tableLog); + uint rest = total - weightTotal; + uint verif = (uint)(1 << (int)ZSTD_highbit32(rest)); + uint lastWeight = ZSTD_highbit32(rest) + 1; + if (verif != rest) + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); + huffWeight[oSize] = (byte)lastWeight; + rankStats[lastWeight]++; + } + } + + if (rankStats[1] < 2 || (rankStats[1] & 1) != 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + *nbSymbolsPtr = (uint)(oSize + 1); + return iSize + 1; + } + + /* Avoids the FORCE_INLINE of the _body() function. */ + private static nuint HUF_readStats_body_default( + byte* huffWeight, + nuint hwSize, + uint* rankStats, + uint* nbSymbolsPtr, + uint* tableLogPtr, + void* src, + nuint srcSize, + void* workSpace, + nuint wkspSize + ) + { + return HUF_readStats_body( + huffWeight, + hwSize, + rankStats, + nbSymbolsPtr, + tableLogPtr, + src, + srcSize, + workSpace, + wkspSize, + 0 + ); + } + + private static nuint HUF_readStats_wksp( + byte* huffWeight, + nuint hwSize, + uint* rankStats, + uint* nbSymbolsPtr, + uint* tableLogPtr, + void* src, + nuint srcSize, + void* workSpace, + nuint wkspSize, + int flags + ) + { + return HUF_readStats_body_default( + huffWeight, + hwSize, + rankStats, + nbSymbolsPtr, + tableLogPtr, + src, + srcSize, + workSpace, + wkspSize + ); } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ErrorPrivate.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ErrorPrivate.cs index 7c4956a6e..e211bda7d 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ErrorPrivate.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ErrorPrivate.cs @@ -1,111 +1,110 @@ using System.Runtime.CompilerServices; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static bool ERR_isError(nuint code) { - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static bool ERR_isError(nuint code) - { - return code > unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxCode)); - } + return code > unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxCode)); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static ZSTD_ErrorCode ERR_getErrorCode(nuint code) - { - if (!ERR_isError(code)) - return 0; - return (ZSTD_ErrorCode)(0 - code); - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ZSTD_ErrorCode ERR_getErrorCode(nuint code) + { + if (!ERR_isError(code)) + return 0; + return (ZSTD_ErrorCode)(0 - code); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static string ERR_getErrorName(nuint code) - { - return ERR_getErrorString(ERR_getErrorCode(code)); - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static string ERR_getErrorName(nuint code) + { + return ERR_getErrorString(ERR_getErrorCode(code)); + } - /*-**************************************** - * Error Strings - ******************************************/ - private static string ERR_getErrorString(ZSTD_ErrorCode code) + /*-**************************************** + * Error Strings + ******************************************/ + private static string ERR_getErrorString(ZSTD_ErrorCode code) + { + const string notErrorCode = "Unspecified error code"; + switch (code) { - const string notErrorCode = "Unspecified error code"; - switch (code) - { - case ZSTD_ErrorCode.ZSTD_error_no_error: - return "No error detected"; - case ZSTD_ErrorCode.ZSTD_error_GENERIC: - return "Error (generic)"; - case ZSTD_ErrorCode.ZSTD_error_prefix_unknown: - return "Unknown frame descriptor"; - case ZSTD_ErrorCode.ZSTD_error_version_unsupported: - return "Version not supported"; - case ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported: - return "Unsupported frame parameter"; - case ZSTD_ErrorCode.ZSTD_error_frameParameter_windowTooLarge: - return "Frame requires too much memory for decoding"; - case ZSTD_ErrorCode.ZSTD_error_corruption_detected: - return "Data corruption detected"; - case ZSTD_ErrorCode.ZSTD_error_checksum_wrong: - return "Restored data doesn't match checksum"; - case ZSTD_ErrorCode.ZSTD_error_literals_headerWrong: - return "Header of Literals' block doesn't respect format specification"; - case ZSTD_ErrorCode.ZSTD_error_parameter_unsupported: - return "Unsupported parameter"; - case ZSTD_ErrorCode.ZSTD_error_parameter_combination_unsupported: - return "Unsupported combination of parameters"; - case ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound: - return "Parameter is out of bound"; - case ZSTD_ErrorCode.ZSTD_error_init_missing: - return "Context should be init first"; - case ZSTD_ErrorCode.ZSTD_error_memory_allocation: - return "Allocation error : not enough memory"; - case ZSTD_ErrorCode.ZSTD_error_workSpace_tooSmall: - return "workSpace buffer is not large enough"; - case ZSTD_ErrorCode.ZSTD_error_stage_wrong: - return "Operation not authorized at current processing stage"; - case ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge: - return "tableLog requires too much memory : unsupported"; - case ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooLarge: - return "Unsupported max Symbol Value : too large"; - case ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooSmall: - return "Specified maxSymbolValue is too small"; - case ZSTD_ErrorCode.ZSTD_error_cannotProduce_uncompressedBlock: - return "This mode cannot generate an uncompressed block"; - case ZSTD_ErrorCode.ZSTD_error_stabilityCondition_notRespected: - return "pledged buffer stability condition is not respected"; - case ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted: - return "Dictionary is corrupted"; - case ZSTD_ErrorCode.ZSTD_error_dictionary_wrong: - return "Dictionary mismatch"; - case ZSTD_ErrorCode.ZSTD_error_dictionaryCreation_failed: - return "Cannot create Dictionary from provided samples"; - case ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall: - return "Destination buffer is too small"; - case ZSTD_ErrorCode.ZSTD_error_srcSize_wrong: - return "Src size is incorrect"; - case ZSTD_ErrorCode.ZSTD_error_dstBuffer_null: - return "Operation on NULL destination buffer"; - case ZSTD_ErrorCode.ZSTD_error_noForwardProgress_destFull: - return "Operation made no progress over multiple calls, due to output buffer being full"; - case ZSTD_ErrorCode.ZSTD_error_noForwardProgress_inputEmpty: - return "Operation made no progress over multiple calls, due to input being empty"; - case ZSTD_ErrorCode.ZSTD_error_frameIndex_tooLarge: - return "Frame index is too large"; - case ZSTD_ErrorCode.ZSTD_error_seekableIO: - return "An I/O error occurred when reading/seeking"; - case ZSTD_ErrorCode.ZSTD_error_dstBuffer_wrong: - return "Destination buffer is wrong"; - case ZSTD_ErrorCode.ZSTD_error_srcBuffer_wrong: - return "Source buffer is wrong"; - case ZSTD_ErrorCode.ZSTD_error_sequenceProducer_failed: - return "Block-level external sequence producer returned an error code"; - case ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid: - return "External sequences are not valid"; - case ZSTD_ErrorCode.ZSTD_error_maxCode: - default: - return notErrorCode; - } + case ZSTD_ErrorCode.ZSTD_error_no_error: + return "No error detected"; + case ZSTD_ErrorCode.ZSTD_error_GENERIC: + return "Error (generic)"; + case ZSTD_ErrorCode.ZSTD_error_prefix_unknown: + return "Unknown frame descriptor"; + case ZSTD_ErrorCode.ZSTD_error_version_unsupported: + return "Version not supported"; + case ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported: + return "Unsupported frame parameter"; + case ZSTD_ErrorCode.ZSTD_error_frameParameter_windowTooLarge: + return "Frame requires too much memory for decoding"; + case ZSTD_ErrorCode.ZSTD_error_corruption_detected: + return "Data corruption detected"; + case ZSTD_ErrorCode.ZSTD_error_checksum_wrong: + return "Restored data doesn't match checksum"; + case ZSTD_ErrorCode.ZSTD_error_literals_headerWrong: + return "Header of Literals' block doesn't respect format specification"; + case ZSTD_ErrorCode.ZSTD_error_parameter_unsupported: + return "Unsupported parameter"; + case ZSTD_ErrorCode.ZSTD_error_parameter_combination_unsupported: + return "Unsupported combination of parameters"; + case ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound: + return "Parameter is out of bound"; + case ZSTD_ErrorCode.ZSTD_error_init_missing: + return "Context should be init first"; + case ZSTD_ErrorCode.ZSTD_error_memory_allocation: + return "Allocation error : not enough memory"; + case ZSTD_ErrorCode.ZSTD_error_workSpace_tooSmall: + return "workSpace buffer is not large enough"; + case ZSTD_ErrorCode.ZSTD_error_stage_wrong: + return "Operation not authorized at current processing stage"; + case ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge: + return "tableLog requires too much memory : unsupported"; + case ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooLarge: + return "Unsupported max Symbol Value : too large"; + case ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooSmall: + return "Specified maxSymbolValue is too small"; + case ZSTD_ErrorCode.ZSTD_error_cannotProduce_uncompressedBlock: + return "This mode cannot generate an uncompressed block"; + case ZSTD_ErrorCode.ZSTD_error_stabilityCondition_notRespected: + return "pledged buffer stability condition is not respected"; + case ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted: + return "Dictionary is corrupted"; + case ZSTD_ErrorCode.ZSTD_error_dictionary_wrong: + return "Dictionary mismatch"; + case ZSTD_ErrorCode.ZSTD_error_dictionaryCreation_failed: + return "Cannot create Dictionary from provided samples"; + case ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall: + return "Destination buffer is too small"; + case ZSTD_ErrorCode.ZSTD_error_srcSize_wrong: + return "Src size is incorrect"; + case ZSTD_ErrorCode.ZSTD_error_dstBuffer_null: + return "Operation on NULL destination buffer"; + case ZSTD_ErrorCode.ZSTD_error_noForwardProgress_destFull: + return "Operation made no progress over multiple calls, due to output buffer being full"; + case ZSTD_ErrorCode.ZSTD_error_noForwardProgress_inputEmpty: + return "Operation made no progress over multiple calls, due to input being empty"; + case ZSTD_ErrorCode.ZSTD_error_frameIndex_tooLarge: + return "Frame index is too large"; + case ZSTD_ErrorCode.ZSTD_error_seekableIO: + return "An I/O error occurred when reading/seeking"; + case ZSTD_ErrorCode.ZSTD_error_dstBuffer_wrong: + return "Destination buffer is wrong"; + case ZSTD_ErrorCode.ZSTD_error_srcBuffer_wrong: + return "Source buffer is wrong"; + case ZSTD_ErrorCode.ZSTD_error_sequenceProducer_failed: + return "Block-level external sequence producer returned an error code"; + case ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid: + return "External sequences are not valid"; + case ZSTD_ErrorCode.ZSTD_error_maxCode: + default: + return notErrorCode; } } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/EstimatedBlockSize.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/EstimatedBlockSize.cs index 6ea7f508d..a1718372a 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/EstimatedBlockSize.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/EstimatedBlockSize.cs @@ -1,8 +1,7 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public struct EstimatedBlockSize { - public struct EstimatedBlockSize - { - public nuint estLitSize; - public nuint estBlockSize; - } -} + public nuint estLitSize; + public nuint estBlockSize; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_accel_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_accel_t.cs index 773e913ff..c5d239feb 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_accel_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_accel_t.cs @@ -1,20 +1,19 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/*-************************************* + * Acceleration + ***************************************/ +public struct FASTCOVER_accel_t { - /*-************************************* - * Acceleration - ***************************************/ - public struct FASTCOVER_accel_t - { - /* Percentage of training samples used for ZDICT_finalizeDictionary */ - public uint finalize; + /* Percentage of training samples used for ZDICT_finalizeDictionary */ + public uint finalize; - /* Number of dmer skipped between each dmer counted in computeFrequency */ - public uint skip; + /* Number of dmer skipped between each dmer counted in computeFrequency */ + public uint skip; - public FASTCOVER_accel_t(uint finalize, uint skip) - { - this.finalize = finalize; - this.skip = skip; - } + public FASTCOVER_accel_t(uint finalize, uint skip) + { + this.finalize = finalize; + this.skip = skip; } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_ctx_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_ctx_t.cs index d5fa4f4ef..d404ccc0b 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_ctx_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_ctx_t.cs @@ -1,20 +1,19 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/*-************************************* + * Context + ***************************************/ +public unsafe struct FASTCOVER_ctx_t { - /*-************************************* - * Context - ***************************************/ - public unsafe struct FASTCOVER_ctx_t - { - public byte* samples; - public nuint* offsets; - public nuint* samplesSizes; - public nuint nbSamples; - public nuint nbTrainSamples; - public nuint nbTestSamples; - public nuint nbDmers; - public uint* freqs; - public uint d; - public uint f; - public FASTCOVER_accel_t accelParams; - } -} + public byte* samples; + public nuint* offsets; + public nuint* samplesSizes; + public nuint nbSamples; + public nuint nbTrainSamples; + public nuint nbTestSamples; + public nuint nbDmers; + public uint* freqs; + public uint d; + public uint f; + public FASTCOVER_accel_t accelParams; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_tryParameters_data_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_tryParameters_data_s.cs index 45d67a0a8..b166fe457 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_tryParameters_data_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_tryParameters_data_s.cs @@ -1,13 +1,12 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/** + * Parameters for FASTCOVER_tryParameters(). + */ +public unsafe struct FASTCOVER_tryParameters_data_s { - /** - * Parameters for FASTCOVER_tryParameters(). - */ - public unsafe struct FASTCOVER_tryParameters_data_s - { - public FASTCOVER_ctx_t* ctx; - public COVER_best_s* best; - public nuint dictBufferCapacity; - public ZDICT_cover_params_t parameters; - } -} + public FASTCOVER_ctx_t* ctx; + public COVER_best_s* best; + public nuint dictBufferCapacity; + public ZDICT_cover_params_t parameters; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FPStats.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FPStats.cs index 70ed5f8c2..7a97d8be1 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/FPStats.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FPStats.cs @@ -1,8 +1,7 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public struct FPStats { - public struct FPStats - { - public Fingerprint pastEvents; - public Fingerprint newEvents; - } + public Fingerprint pastEvents; + public Fingerprint newEvents; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_CState_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_CState_t.cs index b6bf1e42e..dfdc049ba 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_CState_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_CState_t.cs @@ -1,17 +1,16 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/* ***************************************** + * FSE symbol compression API + *******************************************/ +/*! +This API consists of small unitary functions, which highly benefit from being inlined. +Hence their body are included in next section. + */ +public unsafe struct FSE_CState_t { - /* ***************************************** - * FSE symbol compression API - *******************************************/ - /*! - This API consists of small unitary functions, which highly benefit from being inlined. - Hence their body are included in next section. - */ - public unsafe struct FSE_CState_t - { - public nint value; - public void* stateTable; - public void* symbolTT; - public uint stateLog; - } + public nint value; + public void* stateTable; + public void* symbolTT; + public uint stateLog; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DState_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DState_t.cs index 0ad9873c7..61e0fb680 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DState_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DState_t.cs @@ -1,13 +1,12 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/* ***************************************** + * FSE symbol decompression API + *******************************************/ +public unsafe struct FSE_DState_t { - /* ***************************************** - * FSE symbol decompression API - *******************************************/ - public unsafe struct FSE_DState_t - { - public nuint state; + public nuint state; - /* precise table may vary, depending on U16 */ - public void* table; - } + /* precise table may vary, depending on U16 */ + public void* table; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DTableHeader.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DTableHeader.cs index 9fa98926b..ae6269ec1 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DTableHeader.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DTableHeader.cs @@ -1,9 +1,8 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/* ====== Decompression ====== */ +public struct FSE_DTableHeader { - /* ====== Decompression ====== */ - public struct FSE_DTableHeader - { - public ushort tableLog; - public ushort fastMode; - } -} + public ushort tableLog; + public ushort fastMode; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DecompressWksp.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DecompressWksp.cs index 09e7c440e..3ecda2bc2 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DecompressWksp.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DecompressWksp.cs @@ -1,7 +1,6 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct FSE_DecompressWksp { - public unsafe struct FSE_DecompressWksp - { - public fixed short ncount[256]; - } -} + public fixed short ncount[256]; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_decode_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_decode_t.cs index 49f07ea3d..66c496359 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_decode_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_decode_t.cs @@ -1,9 +1,8 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public struct FSE_decode_t { - public struct FSE_decode_t - { - public ushort newState; - public byte symbol; - public byte nbBits; - } + public ushort newState; + public byte symbol; + public byte nbBits; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_repeat.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_repeat.cs index 52bc103ce..6f2ceb531 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_repeat.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_repeat.cs @@ -1,14 +1,13 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public enum FSE_repeat { - public enum FSE_repeat - { - /**< Cannot use the previous table */ - FSE_repeat_none, + /**< Cannot use the previous table */ + FSE_repeat_none, - /**< Can use the previous table but it must be checked */ - FSE_repeat_check, + /**< Can use the previous table but it must be checked */ + FSE_repeat_check, - /**< Can use the previous table and it is assumed to be valid */ - FSE_repeat_valid, - } + /**< Can use the previous table and it is assumed to be valid */ + FSE_repeat_valid, } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_symbolCompressionTransform.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_symbolCompressionTransform.cs index 55dd48349..c4b43033d 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_symbolCompressionTransform.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_symbolCompressionTransform.cs @@ -1,11 +1,10 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/* ***************************************** + * Implementation of inlined functions + *******************************************/ +public struct FSE_symbolCompressionTransform { - /* ***************************************** - * Implementation of inlined functions - *******************************************/ - public struct FSE_symbolCompressionTransform - { - public int deltaFindState; - public uint deltaNbBits; - } + public int deltaFindState; + public uint deltaNbBits; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Fastcover.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Fastcover.cs index 03e4179be..ed6f0912a 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Fastcover.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Fastcover.cs @@ -1,776 +1,775 @@ -using static ZstdSharp.UnsafeHelper; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods + /*-************************************* + * Hash Functions + ***************************************/ + /** + * Hash the d-byte value pointed to by p and mod 2^f into the frequency vector + */ + private static nuint FASTCOVER_hashPtrToIndex(void* p, uint f, uint d) { - /*-************************************* - * Hash Functions - ***************************************/ - /** - * Hash the d-byte value pointed to by p and mod 2^f into the frequency vector - */ - private static nuint FASTCOVER_hashPtrToIndex(void* p, uint f, uint d) - { - if (d == 6) - { - return ZSTD_hash6Ptr(p, f); - } - - return ZSTD_hash8Ptr(p, f); + if (d == 6) + { + return ZSTD_hash6Ptr(p, f); } - private static readonly FASTCOVER_accel_t* FASTCOVER_defaultAccelParameters = - GetArrayPointer( - new FASTCOVER_accel_t[11] - { - new FASTCOVER_accel_t(finalize: 100, skip: 0), - new FASTCOVER_accel_t(finalize: 100, skip: 0), - new FASTCOVER_accel_t(finalize: 50, skip: 1), - new FASTCOVER_accel_t(finalize: 34, skip: 2), - new FASTCOVER_accel_t(finalize: 25, skip: 3), - new FASTCOVER_accel_t(finalize: 20, skip: 4), - new FASTCOVER_accel_t(finalize: 17, skip: 5), - new FASTCOVER_accel_t(finalize: 14, skip: 6), - new FASTCOVER_accel_t(finalize: 13, skip: 7), - new FASTCOVER_accel_t(finalize: 11, skip: 8), - new FASTCOVER_accel_t(finalize: 10, skip: 9), - } - ); + return ZSTD_hash8Ptr(p, f); + } - /*-************************************* - * Helper functions - ***************************************/ - /** - * Selects the best segment in an epoch. - * Segments of are scored according to the function: - * - * Let F(d) be the frequency of all dmers with hash value d. - * Let S_i be hash value of the dmer at position i of segment S which has length k. - * - * Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1}) - * - * Once the dmer with hash value d is in the dictionary we set F(d) = 0. - */ - private static COVER_segment_t FASTCOVER_selectSegment( - FASTCOVER_ctx_t* ctx, - uint* freqs, - uint begin, - uint end, - ZDICT_cover_params_t parameters, - ushort* segmentFreqs - ) + private static readonly FASTCOVER_accel_t* FASTCOVER_defaultAccelParameters = + GetArrayPointer( + new FASTCOVER_accel_t[11] + { + new FASTCOVER_accel_t(finalize: 100, skip: 0), + new FASTCOVER_accel_t(finalize: 100, skip: 0), + new FASTCOVER_accel_t(finalize: 50, skip: 1), + new FASTCOVER_accel_t(finalize: 34, skip: 2), + new FASTCOVER_accel_t(finalize: 25, skip: 3), + new FASTCOVER_accel_t(finalize: 20, skip: 4), + new FASTCOVER_accel_t(finalize: 17, skip: 5), + new FASTCOVER_accel_t(finalize: 14, skip: 6), + new FASTCOVER_accel_t(finalize: 13, skip: 7), + new FASTCOVER_accel_t(finalize: 11, skip: 8), + new FASTCOVER_accel_t(finalize: 10, skip: 9), + } + ); + + /*-************************************* + * Helper functions + ***************************************/ + /** + * Selects the best segment in an epoch. + * Segments of are scored according to the function: + * + * Let F(d) be the frequency of all dmers with hash value d. + * Let S_i be hash value of the dmer at position i of segment S which has length k. + * + * Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1}) + * + * Once the dmer with hash value d is in the dictionary we set F(d) = 0. + */ + private static COVER_segment_t FASTCOVER_selectSegment( + FASTCOVER_ctx_t* ctx, + uint* freqs, + uint begin, + uint end, + ZDICT_cover_params_t parameters, + ushort* segmentFreqs + ) + { + /* Constants */ + uint k = parameters.k; + uint d = parameters.d; + uint f = ctx->f; + uint dmersInK = k - d + 1; + /* Try each segment (activeSegment) and save the best (bestSegment) */ + COVER_segment_t bestSegment = new COVER_segment_t { - /* Constants */ - uint k = parameters.k; - uint d = parameters.d; - uint f = ctx->f; - uint dmersInK = k - d + 1; - /* Try each segment (activeSegment) and save the best (bestSegment) */ - COVER_segment_t bestSegment = new COVER_segment_t - { - begin = 0, - end = 0, - score = 0, - }; - COVER_segment_t activeSegment; - activeSegment.begin = begin; - activeSegment.end = begin; - activeSegment.score = 0; - while (activeSegment.end < end) + begin = 0, + end = 0, + score = 0, + }; + COVER_segment_t activeSegment; + activeSegment.begin = begin; + activeSegment.end = begin; + activeSegment.score = 0; + while (activeSegment.end < end) + { + /* Get hash value of current dmer */ + nuint idx = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.end, f, d); + if (segmentFreqs[idx] == 0) { - /* Get hash value of current dmer */ - nuint idx = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.end, f, d); - if (segmentFreqs[idx] == 0) - { - activeSegment.score += freqs[idx]; - } - - activeSegment.end += 1; - segmentFreqs[idx] += 1; - if (activeSegment.end - activeSegment.begin == dmersInK + 1) - { - /* Get hash value of the dmer to be eliminated from active segment */ - nuint delIndex = FASTCOVER_hashPtrToIndex( - ctx->samples + activeSegment.begin, - f, - d - ); - segmentFreqs[delIndex] -= 1; - if (segmentFreqs[delIndex] == 0) - { - activeSegment.score -= freqs[delIndex]; - } - - activeSegment.begin += 1; - } - - if (activeSegment.score > bestSegment.score) - { - bestSegment = activeSegment; - } + activeSegment.score += freqs[idx]; } - while (activeSegment.begin < end) + activeSegment.end += 1; + segmentFreqs[idx] += 1; + if (activeSegment.end - activeSegment.begin == dmersInK + 1) { - nuint delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d); + /* Get hash value of the dmer to be eliminated from active segment */ + nuint delIndex = FASTCOVER_hashPtrToIndex( + ctx->samples + activeSegment.begin, + f, + d + ); segmentFreqs[delIndex] -= 1; + if (segmentFreqs[delIndex] == 0) + { + activeSegment.score -= freqs[delIndex]; + } + activeSegment.begin += 1; } + if (activeSegment.score > bestSegment.score) { - /* Zero the frequency of hash value of each dmer covered by the chosen segment. */ - uint pos; - for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) - { - nuint i = FASTCOVER_hashPtrToIndex(ctx->samples + pos, f, d); - freqs[i] = 0; - } + bestSegment = activeSegment; } + } - return bestSegment; + while (activeSegment.begin < end) + { + nuint delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d); + segmentFreqs[delIndex] -= 1; + activeSegment.begin += 1; } - private static int FASTCOVER_checkParameters( - ZDICT_cover_params_t parameters, - nuint maxDictSize, - uint f, - uint accel - ) { - if (parameters.d == 0 || parameters.k == 0) + /* Zero the frequency of hash value of each dmer covered by the chosen segment. */ + uint pos; + for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) { - return 0; + nuint i = FASTCOVER_hashPtrToIndex(ctx->samples + pos, f, d); + freqs[i] = 0; } + } - if (parameters.d != 6 && parameters.d != 8) - { - return 0; - } + return bestSegment; + } - if (parameters.k > maxDictSize) - { - return 0; - } + private static int FASTCOVER_checkParameters( + ZDICT_cover_params_t parameters, + nuint maxDictSize, + uint f, + uint accel + ) + { + if (parameters.d == 0 || parameters.k == 0) + { + return 0; + } - if (parameters.d > parameters.k) - { - return 0; - } + if (parameters.d != 6 && parameters.d != 8) + { + return 0; + } - if (f > 31 || f == 0) - { - return 0; - } + if (parameters.k > maxDictSize) + { + return 0; + } - if (parameters.splitPoint <= 0 || parameters.splitPoint > 1) - { - return 0; - } + if (parameters.d > parameters.k) + { + return 0; + } - if (accel > 10 || accel == 0) - { - return 0; - } + if (f > 31 || f == 0) + { + return 0; + } - return 1; + if (parameters.splitPoint <= 0 || parameters.splitPoint > 1) + { + return 0; } - /** - * Clean up a context initialized with `FASTCOVER_ctx_init()`. - */ - private static void FASTCOVER_ctx_destroy(FASTCOVER_ctx_t* ctx) + if (accel > 10 || accel == 0) { - if (ctx == null) - return; - free(ctx->freqs); - ctx->freqs = null; - free(ctx->offsets); - ctx->offsets = null; + return 0; } - /** - * Calculate for frequency of hash value of each dmer in ctx->samples - */ - private static void FASTCOVER_computeFrequency(uint* freqs, FASTCOVER_ctx_t* ctx) + return 1; + } + + /** + * Clean up a context initialized with `FASTCOVER_ctx_init()`. + */ + private static void FASTCOVER_ctx_destroy(FASTCOVER_ctx_t* ctx) + { + if (ctx == null) + return; + free(ctx->freqs); + ctx->freqs = null; + free(ctx->offsets); + ctx->offsets = null; + } + + /** + * Calculate for frequency of hash value of each dmer in ctx->samples + */ + private static void FASTCOVER_computeFrequency(uint* freqs, FASTCOVER_ctx_t* ctx) + { + uint f = ctx->f; + uint d = ctx->d; + uint skip = ctx->accelParams.skip; + uint readLength = d > 8 ? d : 8; + nuint i; + assert(ctx->nbTrainSamples >= 5); + assert(ctx->nbTrainSamples <= ctx->nbSamples); + for (i = 0; i < ctx->nbTrainSamples; i++) { - uint f = ctx->f; - uint d = ctx->d; - uint skip = ctx->accelParams.skip; - uint readLength = d > 8 ? d : 8; - nuint i; - assert(ctx->nbTrainSamples >= 5); - assert(ctx->nbTrainSamples <= ctx->nbSamples); - for (i = 0; i < ctx->nbTrainSamples; i++) + /* start of current dmer */ + nuint start = ctx->offsets[i]; + nuint currSampleEnd = ctx->offsets[i + 1]; + while (start + readLength <= currSampleEnd) { - /* start of current dmer */ - nuint start = ctx->offsets[i]; - nuint currSampleEnd = ctx->offsets[i + 1]; - while (start + readLength <= currSampleEnd) - { - nuint dmerIndex = FASTCOVER_hashPtrToIndex(ctx->samples + start, f, d); - freqs[dmerIndex]++; - start = start + skip + 1; - } + nuint dmerIndex = FASTCOVER_hashPtrToIndex(ctx->samples + start, f, d); + freqs[dmerIndex]++; + start = start + skip + 1; } } + } - /** - * Prepare a context for dictionary building. - * The context is only dependent on the parameter `d` and can be used multiple - * times. - * Returns 0 on success or error code on error. - * The context must be destroyed with `FASTCOVER_ctx_destroy()`. - */ - private static nuint FASTCOVER_ctx_init( - FASTCOVER_ctx_t* ctx, - void* samplesBuffer, - nuint* samplesSizes, - uint nbSamples, - uint d, - double splitPoint, - uint f, - FASTCOVER_accel_t accelParams + /** + * Prepare a context for dictionary building. + * The context is only dependent on the parameter `d` and can be used multiple + * times. + * Returns 0 on success or error code on error. + * The context must be destroyed with `FASTCOVER_ctx_destroy()`. + */ + private static nuint FASTCOVER_ctx_init( + FASTCOVER_ctx_t* ctx, + void* samplesBuffer, + nuint* samplesSizes, + uint nbSamples, + uint d, + double splitPoint, + uint f, + FASTCOVER_accel_t accelParams + ) + { + byte* samples = (byte*)samplesBuffer; + nuint totalSamplesSize = COVER_sum(samplesSizes, nbSamples); + /* Split samples into testing and training sets */ + uint nbTrainSamples = splitPoint < 1 ? (uint)(nbSamples * splitPoint) : nbSamples; + uint nbTestSamples = splitPoint < 1 ? nbSamples - nbTrainSamples : nbSamples; + nuint trainingSamplesSize = + splitPoint < 1 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize; + nuint testSamplesSize = + splitPoint < 1 + ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) + : totalSamplesSize; + if ( + totalSamplesSize < (d > sizeof(ulong) ? d : sizeof(ulong)) + || totalSamplesSize >= (sizeof(nuint) == 8 ? unchecked((uint)-1) : 1 * (1U << 30)) ) { - byte* samples = (byte*)samplesBuffer; - nuint totalSamplesSize = COVER_sum(samplesSizes, nbSamples); - /* Split samples into testing and training sets */ - uint nbTrainSamples = splitPoint < 1 ? (uint)(nbSamples * splitPoint) : nbSamples; - uint nbTestSamples = splitPoint < 1 ? nbSamples - nbTrainSamples : nbSamples; - nuint trainingSamplesSize = - splitPoint < 1 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize; - nuint testSamplesSize = - splitPoint < 1 - ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) - : totalSamplesSize; - if ( - totalSamplesSize < (d > sizeof(ulong) ? d : sizeof(ulong)) - || totalSamplesSize >= (sizeof(nuint) == 8 ? unchecked((uint)-1) : 1 * (1U << 30)) - ) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } - if (nbTrainSamples < 5) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); - } + if (nbTrainSamples < 5) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } - if (nbTestSamples < 1) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); - } + if (nbTestSamples < 1) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } - *ctx = new FASTCOVER_ctx_t - { - samples = samples, - samplesSizes = samplesSizes, - nbSamples = nbSamples, - nbTrainSamples = nbTrainSamples, - nbTestSamples = nbTestSamples, - nbDmers = trainingSamplesSize - (d > sizeof(ulong) ? d : sizeof(ulong)) + 1, - d = d, - f = f, - accelParams = accelParams, - offsets = (nuint*)calloc(nbSamples + 1, (ulong)sizeof(nuint)), - }; - if (ctx->offsets == null) + *ctx = new FASTCOVER_ctx_t + { + samples = samples, + samplesSizes = samplesSizes, + nbSamples = nbSamples, + nbTrainSamples = nbTrainSamples, + nbTestSamples = nbTestSamples, + nbDmers = trainingSamplesSize - (d > sizeof(ulong) ? d : sizeof(ulong)) + 1, + d = d, + f = f, + accelParams = accelParams, + offsets = (nuint*)calloc(nbSamples + 1, (ulong)sizeof(nuint)), + }; + if (ctx->offsets == null) + { + FASTCOVER_ctx_destroy(ctx); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } + + { + uint i; + ctx->offsets[0] = 0; + assert(nbSamples >= 5); + for (i = 1; i <= nbSamples; ++i) { - FASTCOVER_ctx_destroy(ctx); - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1]; } + } + + ctx->freqs = (uint*)calloc((ulong)1 << (int)f, sizeof(uint)); + if (ctx->freqs == null) + { + FASTCOVER_ctx_destroy(ctx); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } + FASTCOVER_computeFrequency(ctx->freqs, ctx); + return 0; + } + + /** + * Given the prepared context build the dictionary. + */ + private static nuint FASTCOVER_buildDictionary( + FASTCOVER_ctx_t* ctx, + uint* freqs, + void* dictBuffer, + nuint dictBufferCapacity, + ZDICT_cover_params_t parameters, + ushort* segmentFreqs + ) + { + byte* dict = (byte*)dictBuffer; + nuint tail = dictBufferCapacity; + /* Divide the data into epochs. We will select one segment from each epoch. */ + COVER_epoch_info_t epochs = COVER_computeEpochs( + (uint)dictBufferCapacity, + (uint)ctx->nbDmers, + parameters.k, + 1 + ); + const nuint maxZeroScoreRun = 10; + nuint zeroScoreRun = 0; + nuint epoch; + for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs.num) + { + uint epochBegin = (uint)(epoch * epochs.size); + uint epochEnd = epochBegin + epochs.size; + nuint segmentSize; + /* Select a segment */ + COVER_segment_t segment = FASTCOVER_selectSegment( + ctx, + freqs, + epochBegin, + epochEnd, + parameters, + segmentFreqs + ); + if (segment.score == 0) { - uint i; - ctx->offsets[0] = 0; - assert(nbSamples >= 5); - for (i = 1; i <= nbSamples; ++i) + if (++zeroScoreRun >= maxZeroScoreRun) { - ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1]; + break; } + + continue; } - ctx->freqs = (uint*)calloc((ulong)1 << (int)f, sizeof(uint)); - if (ctx->freqs == null) + zeroScoreRun = 0; + segmentSize = + segment.end - segment.begin + parameters.d - 1 < tail + ? segment.end - segment.begin + parameters.d - 1 + : tail; + if (segmentSize < parameters.d) { - FASTCOVER_ctx_destroy(ctx); - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + break; } - FASTCOVER_computeFrequency(ctx->freqs, ctx); - return 0; + tail -= segmentSize; + memcpy(dict + tail, ctx->samples + segment.begin, (uint)segmentSize); } - /** - * Given the prepared context build the dictionary. - */ - private static nuint FASTCOVER_buildDictionary( - FASTCOVER_ctx_t* ctx, - uint* freqs, - void* dictBuffer, - nuint dictBufferCapacity, - ZDICT_cover_params_t parameters, - ushort* segmentFreqs - ) + return tail; + } + + /** + * Tries a set of parameters and updates the COVER_best_t with the results. + * This function is thread safe if zstd is compiled with multithreaded support. + * It takes its parameters as an *OWNING* opaque pointer to support threading. + */ + private static void FASTCOVER_tryParameters(void* opaque) + { + /* Save parameters as local variables */ + FASTCOVER_tryParameters_data_s* data = (FASTCOVER_tryParameters_data_s*)opaque; + FASTCOVER_ctx_t* ctx = data->ctx; + ZDICT_cover_params_t parameters = data->parameters; + nuint dictBufferCapacity = data->dictBufferCapacity; + nuint totalCompressedSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + /* Initialize array to keep track of frequency of dmer within activeSegment */ + ushort* segmentFreqs = (ushort*)calloc((ulong)1 << (int)ctx->f, sizeof(ushort)); + /* Allocate space for hash table, dict, and freqs */ + byte* dict = (byte*)malloc(dictBufferCapacity); + COVER_dictSelection selection = COVER_dictSelectionError( + unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)) + ); + uint* freqs = (uint*)malloc(((ulong)1 << (int)ctx->f) * sizeof(uint)); + if (segmentFreqs == null || dict == null || freqs == null) { - byte* dict = (byte*)dictBuffer; - nuint tail = dictBufferCapacity; - /* Divide the data into epochs. We will select one segment from each epoch. */ - COVER_epoch_info_t epochs = COVER_computeEpochs( - (uint)dictBufferCapacity, - (uint)ctx->nbDmers, - parameters.k, - 1 + goto _cleanup; + } + + memcpy(freqs, ctx->freqs, (uint)(((ulong)1 << (int)ctx->f) * sizeof(uint))); + { + nuint tail = FASTCOVER_buildDictionary( + ctx, + freqs, + dict, + dictBufferCapacity, + parameters, + segmentFreqs + ); + uint nbFinalizeSamples = (uint)( + ctx->nbTrainSamples * ctx->accelParams.finalize / 100 ); - const nuint maxZeroScoreRun = 10; - nuint zeroScoreRun = 0; - nuint epoch; - for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs.num) + selection = COVER_selectDict( + dict + tail, + dictBufferCapacity, + dictBufferCapacity - tail, + ctx->samples, + ctx->samplesSizes, + nbFinalizeSamples, + ctx->nbTrainSamples, + ctx->nbSamples, + parameters, + ctx->offsets, + totalCompressedSize + ); + if (COVER_dictSelectionIsError(selection) != 0) { - uint epochBegin = (uint)(epoch * epochs.size); - uint epochEnd = epochBegin + epochs.size; - nuint segmentSize; - /* Select a segment */ - COVER_segment_t segment = FASTCOVER_selectSegment( - ctx, - freqs, - epochBegin, - epochEnd, - parameters, - segmentFreqs - ); - if (segment.score == 0) - { - if (++zeroScoreRun >= maxZeroScoreRun) - { - break; - } + goto _cleanup; + } + } - continue; - } + _cleanup: + free(dict); + COVER_best_finish(data->best, parameters, selection); + free(data); + free(segmentFreqs); + COVER_dictSelectionFree(selection); + free(freqs); + } - zeroScoreRun = 0; - segmentSize = - segment.end - segment.begin + parameters.d - 1 < tail - ? segment.end - segment.begin + parameters.d - 1 - : tail; - if (segmentSize < parameters.d) - { - break; - } + private static void FASTCOVER_convertToCoverParams( + ZDICT_fastCover_params_t fastCoverParams, + ZDICT_cover_params_t* coverParams + ) + { + coverParams->k = fastCoverParams.k; + coverParams->d = fastCoverParams.d; + coverParams->steps = fastCoverParams.steps; + coverParams->nbThreads = fastCoverParams.nbThreads; + coverParams->splitPoint = fastCoverParams.splitPoint; + coverParams->zParams = fastCoverParams.zParams; + coverParams->shrinkDict = fastCoverParams.shrinkDict; + } - tail -= segmentSize; - memcpy(dict + tail, ctx->samples + segment.begin, (uint)segmentSize); - } + private static void FASTCOVER_convertToFastCoverParams( + ZDICT_cover_params_t coverParams, + ZDICT_fastCover_params_t* fastCoverParams, + uint f, + uint accel + ) + { + fastCoverParams->k = coverParams.k; + fastCoverParams->d = coverParams.d; + fastCoverParams->steps = coverParams.steps; + fastCoverParams->nbThreads = coverParams.nbThreads; + fastCoverParams->splitPoint = coverParams.splitPoint; + fastCoverParams->f = f; + fastCoverParams->accel = accel; + fastCoverParams->zParams = coverParams.zParams; + fastCoverParams->shrinkDict = coverParams.shrinkDict; + } - return tail; + /*! ZDICT_trainFromBuffer_fastCover(): + * Train a dictionary from an array of samples using a modified version of COVER algorithm. + * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, + * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. + * d and k are required. + * All other parameters are optional, will use default values if not provided + * The resulting dictionary will be saved into `dictBuffer`. + * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) + * or an error code, which can be tested with ZDICT_isError(). + * See ZDICT_trainFromBuffer() for details on failure modes. + * Note: ZDICT_trainFromBuffer_fastCover() requires 6 * 2^f bytes of memory. + * Tips: In general, a reasonable dictionary has a size of ~ 100 KB. + * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`. + * In general, it's recommended to provide a few thousands samples, though this can vary a lot. + * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. + */ + public static nuint ZDICT_trainFromBuffer_fastCover( + void* dictBuffer, + nuint dictBufferCapacity, + void* samplesBuffer, + nuint* samplesSizes, + uint nbSamples, + ZDICT_fastCover_params_t parameters + ) + { + byte* dict = (byte*)dictBuffer; + FASTCOVER_ctx_t ctx; + ZDICT_cover_params_t coverParams; + FASTCOVER_accel_t accelParams; + g_displayLevel = (int)parameters.zParams.notificationLevel; + parameters.splitPoint = 1; + parameters.f = parameters.f == 0 ? 20 : parameters.f; + parameters.accel = parameters.accel == 0 ? 1 : parameters.accel; + coverParams = new ZDICT_cover_params_t(); + FASTCOVER_convertToCoverParams(parameters, &coverParams); + if ( + FASTCOVER_checkParameters( + coverParams, + dictBufferCapacity, + parameters.f, + parameters.accel + ) == 0 + ) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } - /** - * Tries a set of parameters and updates the COVER_best_t with the results. - * This function is thread safe if zstd is compiled with multithreaded support. - * It takes its parameters as an *OWNING* opaque pointer to support threading. - */ - private static void FASTCOVER_tryParameters(void* opaque) + if (nbSamples == 0) { - /* Save parameters as local variables */ - FASTCOVER_tryParameters_data_s* data = (FASTCOVER_tryParameters_data_s*)opaque; - FASTCOVER_ctx_t* ctx = data->ctx; - ZDICT_cover_params_t parameters = data->parameters; - nuint dictBufferCapacity = data->dictBufferCapacity; - nuint totalCompressedSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); - /* Initialize array to keep track of frequency of dmer within activeSegment */ - ushort* segmentFreqs = (ushort*)calloc((ulong)1 << (int)ctx->f, sizeof(ushort)); - /* Allocate space for hash table, dict, and freqs */ - byte* dict = (byte*)malloc(dictBufferCapacity); - COVER_dictSelection selection = COVER_dictSelectionError( - unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + if (dictBufferCapacity < 256) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + accelParams = FASTCOVER_defaultAccelParameters[parameters.accel]; + { + nuint initVal = FASTCOVER_ctx_init( + &ctx, + samplesBuffer, + samplesSizes, + nbSamples, + coverParams.d, + parameters.splitPoint, + parameters.f, + accelParams ); - uint* freqs = (uint*)malloc(((ulong)1 << (int)ctx->f) * sizeof(uint)); - if (segmentFreqs == null || dict == null || freqs == null) + if (ERR_isError(initVal)) { - goto _cleanup; + return initVal; } + } - memcpy(freqs, ctx->freqs, (uint)(((ulong)1 << (int)ctx->f) * sizeof(uint))); - { - nuint tail = FASTCOVER_buildDictionary( - ctx, - freqs, - dict, - dictBufferCapacity, - parameters, - segmentFreqs - ); - uint nbFinalizeSamples = (uint)( - ctx->nbTrainSamples * ctx->accelParams.finalize / 100 - ); - selection = COVER_selectDict( - dict + tail, - dictBufferCapacity, - dictBufferCapacity - tail, - ctx->samples, - ctx->samplesSizes, - nbFinalizeSamples, - ctx->nbTrainSamples, - ctx->nbSamples, - parameters, - ctx->offsets, - totalCompressedSize - ); - if (COVER_dictSelectionIsError(selection) != 0) - { - goto _cleanup; - } - } + COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, g_displayLevel); + { + /* Initialize array to keep track of frequency of dmer within activeSegment */ + ushort* segmentFreqs = (ushort*)calloc( + (ulong)1 << (int)parameters.f, + sizeof(ushort) + ); + nuint tail = FASTCOVER_buildDictionary( + &ctx, + ctx.freqs, + dictBuffer, + dictBufferCapacity, + coverParams, + segmentFreqs + ); + uint nbFinalizeSamples = (uint)( + ctx.nbTrainSamples * ctx.accelParams.finalize / 100 + ); + nuint dictionarySize = ZDICT_finalizeDictionary( + dict, + dictBufferCapacity, + dict + tail, + dictBufferCapacity - tail, + samplesBuffer, + samplesSizes, + nbFinalizeSamples, + coverParams.zParams + ); + if (!ERR_isError(dictionarySize)) { } - _cleanup: - free(dict); - COVER_best_finish(data->best, parameters, selection); - free(data); + FASTCOVER_ctx_destroy(&ctx); free(segmentFreqs); - COVER_dictSelectionFree(selection); - free(freqs); + return dictionarySize; } + } - private static void FASTCOVER_convertToCoverParams( - ZDICT_fastCover_params_t fastCoverParams, - ZDICT_cover_params_t* coverParams - ) + /*! ZDICT_optimizeTrainFromBuffer_fastCover(): + * The same requirements as above hold for all the parameters except `parameters`. + * This function tries many parameter combinations (specifically, k and d combinations) + * and picks the best parameters. `*parameters` is filled with the best parameters found, + * dictionary constructed with those parameters is stored in `dictBuffer`. + * All of the parameters d, k, steps, f, and accel are optional. + * If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}. + * if steps is zero it defaults to its default value. + * If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000]. + * If f is zero, default value of 20 is used. + * If accel is zero, default value of 1 is used. + * + * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) + * or an error code, which can be tested with ZDICT_isError(). + * On success `*parameters` contains the parameters selected. + * See ZDICT_trainFromBuffer() for details on failure modes. + * Note: ZDICT_optimizeTrainFromBuffer_fastCover() requires about 6 * 2^f bytes of memory for each thread. + */ + public static nuint ZDICT_optimizeTrainFromBuffer_fastCover( + void* dictBuffer, + nuint dictBufferCapacity, + void* samplesBuffer, + nuint* samplesSizes, + uint nbSamples, + ZDICT_fastCover_params_t* parameters + ) + { + ZDICT_cover_params_t coverParams; + FASTCOVER_accel_t accelParams; + /* constants */ + uint nbThreads = parameters->nbThreads; + double splitPoint = parameters->splitPoint <= 0 ? 0.75 : parameters->splitPoint; + uint kMinD = parameters->d == 0 ? 6 : parameters->d; + uint kMaxD = parameters->d == 0 ? 8 : parameters->d; + uint kMinK = parameters->k == 0 ? 50 : parameters->k; + uint kMaxK = parameters->k == 0 ? 2000 : parameters->k; + uint kSteps = parameters->steps == 0 ? 40 : parameters->steps; + uint kStepSize = (kMaxK - kMinK) / kSteps > 1 ? (kMaxK - kMinK) / kSteps : 1; + uint kIterations = (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize); + uint f = parameters->f == 0 ? 20 : parameters->f; + uint accel = parameters->accel == 0 ? 1 : parameters->accel; + const uint shrinkDict = 0; + /* Local variables */ + int displayLevel = (int)parameters->zParams.notificationLevel; + uint iteration = 1; + uint d; + uint k; + COVER_best_s best; + void* pool = null; + int warned = 0; + if (splitPoint <= 0 || splitPoint > 1) { - coverParams->k = fastCoverParams.k; - coverParams->d = fastCoverParams.d; - coverParams->steps = fastCoverParams.steps; - coverParams->nbThreads = fastCoverParams.nbThreads; - coverParams->splitPoint = fastCoverParams.splitPoint; - coverParams->zParams = fastCoverParams.zParams; - coverParams->shrinkDict = fastCoverParams.shrinkDict; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } - private static void FASTCOVER_convertToFastCoverParams( - ZDICT_cover_params_t coverParams, - ZDICT_fastCover_params_t* fastCoverParams, - uint f, - uint accel - ) + if (accel == 0 || accel > 10) { - fastCoverParams->k = coverParams.k; - fastCoverParams->d = coverParams.d; - fastCoverParams->steps = coverParams.steps; - fastCoverParams->nbThreads = coverParams.nbThreads; - fastCoverParams->splitPoint = coverParams.splitPoint; - fastCoverParams->f = f; - fastCoverParams->accel = accel; - fastCoverParams->zParams = coverParams.zParams; - fastCoverParams->shrinkDict = coverParams.shrinkDict; - } - - /*! ZDICT_trainFromBuffer_fastCover(): - * Train a dictionary from an array of samples using a modified version of COVER algorithm. - * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, - * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. - * d and k are required. - * All other parameters are optional, will use default values if not provided - * The resulting dictionary will be saved into `dictBuffer`. - * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) - * or an error code, which can be tested with ZDICT_isError(). - * See ZDICT_trainFromBuffer() for details on failure modes. - * Note: ZDICT_trainFromBuffer_fastCover() requires 6 * 2^f bytes of memory. - * Tips: In general, a reasonable dictionary has a size of ~ 100 KB. - * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`. - * In general, it's recommended to provide a few thousands samples, though this can vary a lot. - * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. - */ - public static nuint ZDICT_trainFromBuffer_fastCover( - void* dictBuffer, - nuint dictBufferCapacity, - void* samplesBuffer, - nuint* samplesSizes, - uint nbSamples, - ZDICT_fastCover_params_t parameters - ) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } + + if (kMinK < kMaxD || kMaxK < kMinK) { - byte* dict = (byte*)dictBuffer; - FASTCOVER_ctx_t ctx; - ZDICT_cover_params_t coverParams; - FASTCOVER_accel_t accelParams; - g_displayLevel = (int)parameters.zParams.notificationLevel; - parameters.splitPoint = 1; - parameters.f = parameters.f == 0 ? 20 : parameters.f; - parameters.accel = parameters.accel == 0 ? 1 : parameters.accel; - coverParams = new ZDICT_cover_params_t(); - FASTCOVER_convertToCoverParams(parameters, &coverParams); - if ( - FASTCOVER_checkParameters( - coverParams, - dictBufferCapacity, - parameters.f, - parameters.accel - ) == 0 - ) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } - if (nbSamples == 0) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); - } + if (nbSamples == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } - if (dictBufferCapacity < 256) + if (dictBufferCapacity < 256) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + if (nbThreads > 1) + { + pool = POOL_create(nbThreads, 1); + if (pool == null) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); } + } - accelParams = FASTCOVER_defaultAccelParameters[parameters.accel]; + COVER_best_init(&best); + coverParams = new ZDICT_cover_params_t(); + FASTCOVER_convertToCoverParams(*parameters, &coverParams); + accelParams = FASTCOVER_defaultAccelParameters[accel]; + g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1; + for (d = kMinD; d <= kMaxD; d += 2) + { + /* Initialize the context for this value of d */ + FASTCOVER_ctx_t ctx; { nuint initVal = FASTCOVER_ctx_init( &ctx, samplesBuffer, samplesSizes, nbSamples, - coverParams.d, - parameters.splitPoint, - parameters.f, + d, + splitPoint, + f, accelParams ); if (ERR_isError(initVal)) { + COVER_best_destroy(&best); + POOL_free(pool); return initVal; } } - COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, g_displayLevel); - { - /* Initialize array to keep track of frequency of dmer within activeSegment */ - ushort* segmentFreqs = (ushort*)calloc( - (ulong)1 << (int)parameters.f, - sizeof(ushort) - ); - nuint tail = FASTCOVER_buildDictionary( - &ctx, - ctx.freqs, - dictBuffer, - dictBufferCapacity, - coverParams, - segmentFreqs - ); - uint nbFinalizeSamples = (uint)( - ctx.nbTrainSamples * ctx.accelParams.finalize / 100 - ); - nuint dictionarySize = ZDICT_finalizeDictionary( - dict, - dictBufferCapacity, - dict + tail, - dictBufferCapacity - tail, - samplesBuffer, - samplesSizes, - nbFinalizeSamples, - coverParams.zParams - ); - if (!ERR_isError(dictionarySize)) { } - - FASTCOVER_ctx_destroy(&ctx); - free(segmentFreqs); - return dictionarySize; - } - } - - /*! ZDICT_optimizeTrainFromBuffer_fastCover(): - * The same requirements as above hold for all the parameters except `parameters`. - * This function tries many parameter combinations (specifically, k and d combinations) - * and picks the best parameters. `*parameters` is filled with the best parameters found, - * dictionary constructed with those parameters is stored in `dictBuffer`. - * All of the parameters d, k, steps, f, and accel are optional. - * If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}. - * if steps is zero it defaults to its default value. - * If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000]. - * If f is zero, default value of 20 is used. - * If accel is zero, default value of 1 is used. - * - * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) - * or an error code, which can be tested with ZDICT_isError(). - * On success `*parameters` contains the parameters selected. - * See ZDICT_trainFromBuffer() for details on failure modes. - * Note: ZDICT_optimizeTrainFromBuffer_fastCover() requires about 6 * 2^f bytes of memory for each thread. - */ - public static nuint ZDICT_optimizeTrainFromBuffer_fastCover( - void* dictBuffer, - nuint dictBufferCapacity, - void* samplesBuffer, - nuint* samplesSizes, - uint nbSamples, - ZDICT_fastCover_params_t* parameters - ) - { - ZDICT_cover_params_t coverParams; - FASTCOVER_accel_t accelParams; - /* constants */ - uint nbThreads = parameters->nbThreads; - double splitPoint = parameters->splitPoint <= 0 ? 0.75 : parameters->splitPoint; - uint kMinD = parameters->d == 0 ? 6 : parameters->d; - uint kMaxD = parameters->d == 0 ? 8 : parameters->d; - uint kMinK = parameters->k == 0 ? 50 : parameters->k; - uint kMaxK = parameters->k == 0 ? 2000 : parameters->k; - uint kSteps = parameters->steps == 0 ? 40 : parameters->steps; - uint kStepSize = (kMaxK - kMinK) / kSteps > 1 ? (kMaxK - kMinK) / kSteps : 1; - uint kIterations = (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize); - uint f = parameters->f == 0 ? 20 : parameters->f; - uint accel = parameters->accel == 0 ? 1 : parameters->accel; - const uint shrinkDict = 0; - /* Local variables */ - int displayLevel = (int)parameters->zParams.notificationLevel; - uint iteration = 1; - uint d; - uint k; - COVER_best_s best; - void* pool = null; - int warned = 0; - if (splitPoint <= 0 || splitPoint > 1) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); - } - - if (accel == 0 || accel > 10) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); - } - - if (kMinK < kMaxD || kMaxK < kMinK) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); - } - - if (nbSamples == 0) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); - } - - if (dictBufferCapacity < 256) + if (warned == 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, displayLevel); + warned = 1; } - if (nbThreads > 1) + for (k = kMinK; k <= kMaxK; k += kStepSize) { - pool = POOL_create(nbThreads, 1); - if (pool == null) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); - } - } - - COVER_best_init(&best); - coverParams = new ZDICT_cover_params_t(); - FASTCOVER_convertToCoverParams(*parameters, &coverParams); - accelParams = FASTCOVER_defaultAccelParameters[accel]; - g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1; - for (d = kMinD; d <= kMaxD; d += 2) - { - /* Initialize the context for this value of d */ - FASTCOVER_ctx_t ctx; + /* Prepare the arguments */ + FASTCOVER_tryParameters_data_s* data = (FASTCOVER_tryParameters_data_s*)malloc( + (ulong)sizeof(FASTCOVER_tryParameters_data_s) + ); + if (data == null) { - nuint initVal = FASTCOVER_ctx_init( - &ctx, - samplesBuffer, - samplesSizes, - nbSamples, - d, - splitPoint, - f, - accelParams + COVER_best_destroy(&best); + FASTCOVER_ctx_destroy(&ctx); + POOL_free(pool); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) ); - if (ERR_isError(initVal)) - { - COVER_best_destroy(&best); - POOL_free(pool); - return initVal; - } } - if (warned == 0) + data->ctx = &ctx; + data->best = &best; + data->dictBufferCapacity = dictBufferCapacity; + data->parameters = coverParams; + data->parameters.k = k; + data->parameters.d = d; + data->parameters.splitPoint = splitPoint; + data->parameters.steps = kSteps; + data->parameters.shrinkDict = shrinkDict; + data->parameters.zParams.notificationLevel = (uint)g_displayLevel; + if ( + FASTCOVER_checkParameters( + data->parameters, + dictBufferCapacity, + data->ctx->f, + accel + ) == 0 + ) { - COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, displayLevel); - warned = 1; + free(data); + continue; } - for (k = kMinK; k <= kMaxK; k += kStepSize) + COVER_best_start(&best); + if (pool != null) { - /* Prepare the arguments */ - FASTCOVER_tryParameters_data_s* data = (FASTCOVER_tryParameters_data_s*)malloc( - (ulong)sizeof(FASTCOVER_tryParameters_data_s) + POOL_add( + pool, + (delegate* managed)(&FASTCOVER_tryParameters), + data ); - if (data == null) - { - COVER_best_destroy(&best); - FASTCOVER_ctx_destroy(&ctx); - POOL_free(pool); - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) - ); - } - - data->ctx = &ctx; - data->best = &best; - data->dictBufferCapacity = dictBufferCapacity; - data->parameters = coverParams; - data->parameters.k = k; - data->parameters.d = d; - data->parameters.splitPoint = splitPoint; - data->parameters.steps = kSteps; - data->parameters.shrinkDict = shrinkDict; - data->parameters.zParams.notificationLevel = (uint)g_displayLevel; - if ( - FASTCOVER_checkParameters( - data->parameters, - dictBufferCapacity, - data->ctx->f, - accel - ) == 0 - ) - { - free(data); - continue; - } - - COVER_best_start(&best); - if (pool != null) - { - POOL_add( - pool, - (delegate* managed)(&FASTCOVER_tryParameters), - data - ); - } - else - { - FASTCOVER_tryParameters(data); - } - - ++iteration; + } + else + { + FASTCOVER_tryParameters(data); } - COVER_best_wait(&best); - FASTCOVER_ctx_destroy(&ctx); + ++iteration; } - { - nuint dictSize = best.dictSize; - if (ERR_isError(best.compressedSize)) - { - nuint compressedSize = best.compressedSize; - COVER_best_destroy(&best); - POOL_free(pool); - return compressedSize; - } + COVER_best_wait(&best); + FASTCOVER_ctx_destroy(&ctx); + } - FASTCOVER_convertToFastCoverParams(best.parameters, parameters, f, accel); - memcpy(dictBuffer, best.dict, (uint)dictSize); + { + nuint dictSize = best.dictSize; + if (ERR_isError(best.compressedSize)) + { + nuint compressedSize = best.compressedSize; COVER_best_destroy(&best); POOL_free(pool); - return dictSize; + return compressedSize; } + + FASTCOVER_convertToFastCoverParams(best.parameters, parameters, f, accel); + memcpy(dictBuffer, best.dict, (uint)dictSize); + COVER_best_destroy(&best); + POOL_free(pool); + return dictSize; } } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Fingerprint.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Fingerprint.cs index 90559d8fa..c48772357 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Fingerprint.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Fingerprint.cs @@ -1,8 +1,7 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct Fingerprint { - public unsafe struct Fingerprint - { - public fixed uint events[1024]; - public nuint nbEvents; - } + public fixed uint events[1024]; + public nuint nbEvents; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Fse.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Fse.cs index f42b45a14..05e4b30ae 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Fse.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Fse.cs @@ -1,210 +1,209 @@ using System.Runtime.CompilerServices; -using static ZstdSharp.UnsafeHelper; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void FSE_initCState(FSE_CState_t* statePtr, uint* ct) { - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void FSE_initCState(FSE_CState_t* statePtr, uint* ct) - { - void* ptr = ct; - ushort* u16ptr = (ushort*)ptr; - uint tableLog = MEM_read16(ptr); - statePtr->value = (nint)1 << (int)tableLog; - statePtr->stateTable = u16ptr + 2; - statePtr->symbolTT = ct + 1 + (tableLog != 0 ? 1 << (int)(tableLog - 1) : 1); - statePtr->stateLog = tableLog; - } - - /*! FSE_initCState2() : - * Same as FSE_initCState(), but the first symbol to include (which will be the last to be read) - * uses the smallest state value possible, saving the cost of this symbol */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void FSE_initCState2(ref FSE_CState_t statePtr, uint* ct, uint symbol) - { - FSE_initCState(ref statePtr, ct); - { - FSE_symbolCompressionTransform symbolTT = ( - (FSE_symbolCompressionTransform*)statePtr.symbolTT - )[symbol]; - ushort* stateTable = (ushort*)statePtr.stateTable; - uint nbBitsOut = symbolTT.deltaNbBits + (1 << 15) >> 16; - statePtr.value = (nint)((nbBitsOut << 16) - symbolTT.deltaNbBits); - statePtr.value = stateTable[ - (statePtr.value >> (int)nbBitsOut) + symbolTT.deltaFindState - ]; - } - } + void* ptr = ct; + ushort* u16ptr = (ushort*)ptr; + uint tableLog = MEM_read16(ptr); + statePtr->value = (nint)1 << (int)tableLog; + statePtr->stateTable = u16ptr + 2; + statePtr->symbolTT = ct + 1 + (tableLog != 0 ? 1 << (int)(tableLog - 1) : 1); + statePtr->stateLog = tableLog; + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void FSE_encodeSymbol( - ref nuint bitC_bitContainer, - ref uint bitC_bitPos, - ref FSE_CState_t statePtr, - uint symbol - ) + /*! FSE_initCState2() : + * Same as FSE_initCState(), but the first symbol to include (which will be the last to be read) + * uses the smallest state value possible, saving the cost of this symbol */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void FSE_initCState2(ref FSE_CState_t statePtr, uint* ct, uint symbol) + { + FSE_initCState(ref statePtr, ct); { FSE_symbolCompressionTransform symbolTT = ( (FSE_symbolCompressionTransform*)statePtr.symbolTT )[symbol]; ushort* stateTable = (ushort*)statePtr.stateTable; - uint nbBitsOut = (uint)statePtr.value + symbolTT.deltaNbBits >> 16; - BIT_addBits(ref bitC_bitContainer, ref bitC_bitPos, (nuint)statePtr.value, nbBitsOut); + uint nbBitsOut = symbolTT.deltaNbBits + (1 << 15) >> 16; + statePtr.value = (nint)((nbBitsOut << 16) - symbolTT.deltaNbBits); statePtr.value = stateTable[ (statePtr.value >> (int)nbBitsOut) + symbolTT.deltaFindState ]; } + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void FSE_flushCState( - ref nuint bitC_bitContainer, - ref uint bitC_bitPos, - ref sbyte* bitC_ptr, - sbyte* bitC_endPtr, - ref FSE_CState_t statePtr - ) - { - BIT_addBits( - ref bitC_bitContainer, - ref bitC_bitPos, - (nuint)statePtr.value, - statePtr.stateLog - ); - BIT_flushBits(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr); - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void FSE_encodeSymbol( + ref nuint bitC_bitContainer, + ref uint bitC_bitPos, + ref FSE_CState_t statePtr, + uint symbol + ) + { + FSE_symbolCompressionTransform symbolTT = ( + (FSE_symbolCompressionTransform*)statePtr.symbolTT + )[symbol]; + ushort* stateTable = (ushort*)statePtr.stateTable; + uint nbBitsOut = (uint)statePtr.value + symbolTT.deltaNbBits >> 16; + BIT_addBits(ref bitC_bitContainer, ref bitC_bitPos, (nuint)statePtr.value, nbBitsOut); + statePtr.value = stateTable[ + (statePtr.value >> (int)nbBitsOut) + symbolTT.deltaFindState + ]; + } - /* FSE_getMaxNbBits() : - * Approximate maximum cost of a symbol, in bits. - * Fractional get rounded up (i.e. a symbol with a normalized frequency of 3 gives the same result as a frequency of 2) - * note 1 : assume symbolValue is valid (<= maxSymbolValue) - * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint FSE_getMaxNbBits(void* symbolTTPtr, uint symbolValue) - { - FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*)symbolTTPtr; - return symbolTT[symbolValue].deltaNbBits + ((1 << 16) - 1) >> 16; - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void FSE_flushCState( + ref nuint bitC_bitContainer, + ref uint bitC_bitPos, + ref sbyte* bitC_ptr, + sbyte* bitC_endPtr, + ref FSE_CState_t statePtr + ) + { + BIT_addBits( + ref bitC_bitContainer, + ref bitC_bitPos, + (nuint)statePtr.value, + statePtr.stateLog + ); + BIT_flushBits(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr); + } - /* FSE_bitCost() : - * Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) - * note 1 : assume symbolValue is valid (<= maxSymbolValue) - * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint FSE_bitCost( - void* symbolTTPtr, - uint tableLog, - uint symbolValue, - uint accuracyLog - ) - { - FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*)symbolTTPtr; - uint minNbBits = symbolTT[symbolValue].deltaNbBits >> 16; - uint threshold = minNbBits + 1 << 16; - assert(tableLog < 16); - assert(accuracyLog < 31 - tableLog); - { - uint tableSize = (uint)(1 << (int)tableLog); - uint deltaFromThreshold = - threshold - (symbolTT[symbolValue].deltaNbBits + tableSize); - /* linear interpolation (very approximate) */ - uint normalizedDeltaFromThreshold = - deltaFromThreshold << (int)accuracyLog >> (int)tableLog; - uint bitMultiplier = (uint)(1 << (int)accuracyLog); - assert(symbolTT[symbolValue].deltaNbBits + tableSize <= threshold); - assert(normalizedDeltaFromThreshold <= bitMultiplier); - return (minNbBits + 1) * bitMultiplier - normalizedDeltaFromThreshold; - } - } + /* FSE_getMaxNbBits() : + * Approximate maximum cost of a symbol, in bits. + * Fractional get rounded up (i.e. a symbol with a normalized frequency of 3 gives the same result as a frequency of 2) + * note 1 : assume symbolValue is valid (<= maxSymbolValue) + * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint FSE_getMaxNbBits(void* symbolTTPtr, uint symbolValue) + { + FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*)symbolTTPtr; + return symbolTT[symbolValue].deltaNbBits + ((1 << 16) - 1) >> 16; + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void FSE_initDState( - ref FSE_DState_t DStatePtr, - ref BIT_DStream_t bitD, - uint* dt - ) + /* FSE_bitCost() : + * Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) + * note 1 : assume symbolValue is valid (<= maxSymbolValue) + * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint FSE_bitCost( + void* symbolTTPtr, + uint tableLog, + uint symbolValue, + uint accuracyLog + ) + { + FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*)symbolTTPtr; + uint minNbBits = symbolTT[symbolValue].deltaNbBits >> 16; + uint threshold = minNbBits + 1 << 16; + assert(tableLog < 16); + assert(accuracyLog < 31 - tableLog); { - void* ptr = dt; - FSE_DTableHeader* DTableH = (FSE_DTableHeader*)ptr; - DStatePtr.state = BIT_readBits( - bitD.bitContainer, - ref bitD.bitsConsumed, - DTableH->tableLog - ); - BIT_reloadDStream( - ref bitD.bitContainer, - ref bitD.bitsConsumed, - ref bitD.ptr, - bitD.start, - bitD.limitPtr - ); - DStatePtr.table = dt + 1; + uint tableSize = (uint)(1 << (int)tableLog); + uint deltaFromThreshold = + threshold - (symbolTT[symbolValue].deltaNbBits + tableSize); + /* linear interpolation (very approximate) */ + uint normalizedDeltaFromThreshold = + deltaFromThreshold << (int)accuracyLog >> (int)tableLog; + uint bitMultiplier = (uint)(1 << (int)accuracyLog); + assert(symbolTT[symbolValue].deltaNbBits + tableSize <= threshold); + assert(normalizedDeltaFromThreshold <= bitMultiplier); + return (minNbBits + 1) * bitMultiplier - normalizedDeltaFromThreshold; } + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static byte FSE_peekSymbol(FSE_DState_t* DStatePtr) - { - FSE_decode_t DInfo = ((FSE_decode_t*)DStatePtr->table)[DStatePtr->state]; - return DInfo.symbol; - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void FSE_initDState( + ref FSE_DState_t DStatePtr, + ref BIT_DStream_t bitD, + uint* dt + ) + { + void* ptr = dt; + FSE_DTableHeader* DTableH = (FSE_DTableHeader*)ptr; + DStatePtr.state = BIT_readBits( + bitD.bitContainer, + ref bitD.bitsConsumed, + DTableH->tableLog + ); + BIT_reloadDStream( + ref bitD.bitContainer, + ref bitD.bitsConsumed, + ref bitD.ptr, + bitD.start, + bitD.limitPtr + ); + DStatePtr.table = dt + 1; + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void FSE_updateState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) - { - FSE_decode_t DInfo = ((FSE_decode_t*)DStatePtr->table)[DStatePtr->state]; - uint nbBits = DInfo.nbBits; - nuint lowBits = BIT_readBits(bitD, nbBits); - DStatePtr->state = DInfo.newState + lowBits; - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static byte FSE_peekSymbol(FSE_DState_t* DStatePtr) + { + FSE_decode_t DInfo = ((FSE_decode_t*)DStatePtr->table)[DStatePtr->state]; + return DInfo.symbol; + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static byte FSE_decodeSymbol( - ref FSE_DState_t DStatePtr, - nuint bitD_bitContainer, - ref uint bitD_bitsConsumed - ) - { - FSE_decode_t DInfo = ((FSE_decode_t*)DStatePtr.table)[DStatePtr.state]; - uint nbBits = DInfo.nbBits; - byte symbol = DInfo.symbol; - nuint lowBits = BIT_readBits(bitD_bitContainer, ref bitD_bitsConsumed, nbBits); - DStatePtr.state = DInfo.newState + lowBits; - return symbol; - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void FSE_updateState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) + { + FSE_decode_t DInfo = ((FSE_decode_t*)DStatePtr->table)[DStatePtr->state]; + uint nbBits = DInfo.nbBits; + nuint lowBits = BIT_readBits(bitD, nbBits); + DStatePtr->state = DInfo.newState + lowBits; + } - /*! FSE_decodeSymbolFast() : - unsafe, only works if no symbol has a probability > 50% */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static byte FSE_decodeSymbolFast( - ref FSE_DState_t DStatePtr, - nuint bitD_bitContainer, - ref uint bitD_bitsConsumed - ) - { - FSE_decode_t DInfo = ((FSE_decode_t*)DStatePtr.table)[DStatePtr.state]; - uint nbBits = DInfo.nbBits; - byte symbol = DInfo.symbol; - nuint lowBits = BIT_readBitsFast(bitD_bitContainer, ref bitD_bitsConsumed, nbBits); - DStatePtr.state = DInfo.newState + lowBits; - return symbol; - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static byte FSE_decodeSymbol( + ref FSE_DState_t DStatePtr, + nuint bitD_bitContainer, + ref uint bitD_bitsConsumed + ) + { + FSE_decode_t DInfo = ((FSE_decode_t*)DStatePtr.table)[DStatePtr.state]; + uint nbBits = DInfo.nbBits; + byte symbol = DInfo.symbol; + nuint lowBits = BIT_readBits(bitD_bitContainer, ref bitD_bitsConsumed, nbBits); + DStatePtr.state = DInfo.newState + lowBits; + return symbol; + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint FSE_endOfDState(FSE_DState_t* DStatePtr) - { - return DStatePtr->state == 0 ? 1U : 0U; - } + /*! FSE_decodeSymbolFast() : + unsafe, only works if no symbol has a probability > 50% */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static byte FSE_decodeSymbolFast( + ref FSE_DState_t DStatePtr, + nuint bitD_bitContainer, + ref uint bitD_bitsConsumed + ) + { + FSE_decode_t DInfo = ((FSE_decode_t*)DStatePtr.table)[DStatePtr.state]; + uint nbBits = DInfo.nbBits; + byte symbol = DInfo.symbol; + nuint lowBits = BIT_readBitsFast(bitD_bitContainer, ref bitD_bitsConsumed, nbBits); + DStatePtr.state = DInfo.newState + lowBits; + return symbol; + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void FSE_initCState(ref FSE_CState_t statePtr, uint* ct) - { - void* ptr = ct; - ushort* u16ptr = (ushort*)ptr; - uint tableLog = MEM_read16(ptr); - statePtr.value = (nint)1 << (int)tableLog; - statePtr.stateTable = u16ptr + 2; - statePtr.symbolTT = ct + 1 + (tableLog != 0 ? 1 << (int)(tableLog - 1) : 1); - statePtr.stateLog = tableLog; - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint FSE_endOfDState(FSE_DState_t* DStatePtr) + { + return DStatePtr->state == 0 ? 1U : 0U; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void FSE_initCState(ref FSE_CState_t statePtr, uint* ct) + { + void* ptr = ct; + ushort* u16ptr = (ushort*)ptr; + uint tableLog = MEM_read16(ptr); + statePtr.value = (nint)1 << (int)tableLog; + statePtr.stateTable = u16ptr + 2; + statePtr.symbolTT = ct + 1 + (tableLog != 0 ? 1 << (int)(tableLog - 1) : 1); + statePtr.stateLog = tableLog; } -} +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FseCompress.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FseCompress.cs index 2a7de3aba..abe85537f 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/FseCompress.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FseCompress.cs @@ -1,293 +1,261 @@ using System; using System.Runtime.InteropServices; -using static ZstdSharp.UnsafeHelper; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods + /* FSE_buildCTable_wksp() : + * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`). + * wkspSize should be sized to handle worst case situation, which is `1<> 1 : 1); + FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*)FSCT; + uint step = (tableSize >> 1) + (tableSize >> 3) + 3; + uint maxSV1 = maxSymbolValue + 1; + /* size = maxSV1 */ + ushort* cumul = (ushort*)workSpace; + /* size = tableSize */ + byte* tableSymbol = (byte*)(cumul + (maxSV1 + 1)); + uint highThreshold = tableSize - 1; + assert(((nuint)workSpace & 1) == 0); + if ( + sizeof(uint) + * ( + (maxSymbolValue + 2 + (1UL << (int)tableLog)) / 2 + + sizeof(ulong) / sizeof(uint) + ) + > wkspSize ) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); + tableU16[-2] = (ushort)tableLog; + tableU16[-1] = (ushort)maxSymbolValue; + assert(tableLog < 16); { - uint tableSize = (uint)(1 << (int)tableLog); - uint tableMask = tableSize - 1; - void* ptr = ct; - ushort* tableU16 = (ushort*)ptr + 2; - /* header */ - void* FSCT = (uint*)ptr + 1 + (tableLog != 0 ? tableSize >> 1 : 1); - FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*)FSCT; - uint step = (tableSize >> 1) + (tableSize >> 3) + 3; - uint maxSV1 = maxSymbolValue + 1; - /* size = maxSV1 */ - ushort* cumul = (ushort*)workSpace; - /* size = tableSize */ - byte* tableSymbol = (byte*)(cumul + (maxSV1 + 1)); - uint highThreshold = tableSize - 1; - assert(((nuint)workSpace & 1) == 0); - if ( - sizeof(uint) - * ( - (maxSymbolValue + 2 + (1UL << (int)tableLog)) / 2 - + sizeof(ulong) / sizeof(uint) - ) - > wkspSize - ) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); - tableU16[-2] = (ushort)tableLog; - tableU16[-1] = (ushort)maxSymbolValue; - assert(tableLog < 16); + uint u; + cumul[0] = 0; + for (u = 1; u <= maxSV1; u++) { - uint u; - cumul[0] = 0; - for (u = 1; u <= maxSV1; u++) + if (normalizedCounter[u - 1] == -1) { - if (normalizedCounter[u - 1] == -1) - { - cumul[u] = (ushort)(cumul[u - 1] + 1); - tableSymbol[highThreshold--] = (byte)(u - 1); - } - else - { - assert(normalizedCounter[u - 1] >= 0); - cumul[u] = (ushort)(cumul[u - 1] + (ushort)normalizedCounter[u - 1]); - assert(cumul[u] >= cumul[u - 1]); - } + cumul[u] = (ushort)(cumul[u - 1] + 1); + tableSymbol[highThreshold--] = (byte)(u - 1); } - - cumul[maxSV1] = (ushort)(tableSize + 1); - } - - if (highThreshold == tableSize - 1) - { - /* size = tableSize + 8 (may write beyond tableSize) */ - byte* spread = tableSymbol + tableSize; + else { - const ulong add = 0x0101010101010101UL; - nuint pos = 0; - ulong sv = 0; - uint s; - for (s = 0; s < maxSV1; ++s, sv += add) - { - int i; - int n = normalizedCounter[s]; - MEM_write64(spread + pos, sv); - for (i = 8; i < n; i += 8) - { - MEM_write64(spread + pos + i, sv); - } - - assert(n >= 0); - pos += (nuint)n; - } + assert(normalizedCounter[u - 1] >= 0); + cumul[u] = (ushort)(cumul[u - 1] + (ushort)normalizedCounter[u - 1]); + assert(cumul[u] >= cumul[u - 1]); } + } + cumul[maxSV1] = (ushort)(tableSize + 1); + } + + if (highThreshold == tableSize - 1) + { + /* size = tableSize + 8 (may write beyond tableSize) */ + byte* spread = tableSymbol + tableSize; + { + const ulong add = 0x0101010101010101UL; + nuint pos = 0; + ulong sv = 0; + uint s; + for (s = 0; s < maxSV1; ++s, sv += add) { - nuint position = 0; - nuint s; - /* Experimentally determined optimal unroll */ - const nuint unroll = 2; - assert(tableSize % unroll == 0); - for (s = 0; s < tableSize; s += unroll) + int i; + int n = normalizedCounter[s]; + MEM_write64(spread + pos, sv); + for (i = 8; i < n; i += 8) { - nuint u; - for (u = 0; u < unroll; ++u) - { - nuint uPosition = position + u * step & tableMask; - tableSymbol[uPosition] = spread[s + u]; - } - - position = position + unroll * step & tableMask; + MEM_write64(spread + pos + i, sv); } - assert(position == 0); + assert(n >= 0); + pos += (nuint)n; } } - else + { - uint position = 0; - uint symbol; - for (symbol = 0; symbol < maxSV1; symbol++) + nuint position = 0; + nuint s; + /* Experimentally determined optimal unroll */ + const nuint unroll = 2; + assert(tableSize % unroll == 0); + for (s = 0; s < tableSize; s += unroll) { - int nbOccurrences; - int freq = normalizedCounter[symbol]; - for (nbOccurrences = 0; nbOccurrences < freq; nbOccurrences++) + nuint u; + for (u = 0; u < unroll; ++u) { - tableSymbol[position] = (byte)symbol; - position = position + step & tableMask; - while (position > highThreshold) - position = position + step & tableMask; + nuint uPosition = position + u * step & tableMask; + tableSymbol[uPosition] = spread[s + u]; } + + position = position + unroll * step & tableMask; } assert(position == 0); } - - { - uint u; - for (u = 0; u < tableSize; u++) - { - /* note : static analyzer may not understand tableSymbol is properly initialized */ - byte s = tableSymbol[u]; - tableU16[cumul[s]++] = (ushort)(tableSize + u); - } - } - + } + else + { + uint position = 0; + uint symbol; + for (symbol = 0; symbol < maxSV1; symbol++) { - uint total = 0; - uint s; - for (s = 0; s <= maxSymbolValue; s++) + int nbOccurrences; + int freq = normalizedCounter[symbol]; + for (nbOccurrences = 0; nbOccurrences < freq; nbOccurrences++) { - switch (normalizedCounter[s]) - { - case 0: - symbolTT[s].deltaNbBits = - (tableLog + 1 << 16) - (uint)(1 << (int)tableLog); - break; - case -1: - case 1: - symbolTT[s].deltaNbBits = (tableLog << 16) - (uint)(1 << (int)tableLog); - assert(total <= 2147483647); - symbolTT[s].deltaFindState = (int)(total - 1); - total++; - break; - default: - assert(normalizedCounter[s] > 1); - - { - uint maxBitsOut = - tableLog - ZSTD_highbit32((uint)normalizedCounter[s] - 1); - uint minStatePlus = (uint)normalizedCounter[s] << (int)maxBitsOut; - symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus; - symbolTT[s].deltaFindState = (int)( - total - (uint)normalizedCounter[s] - ); - total += (uint)normalizedCounter[s]; - } - - break; - } + tableSymbol[position] = (byte)symbol; + position = position + step & tableMask; + while (position > highThreshold) + position = position + step & tableMask; } } - return 0; + assert(position == 0); } - /*-************************************************************** - * FSE NCount encoding - ****************************************************************/ - private static nuint FSE_NCountWriteBound(uint maxSymbolValue, uint tableLog) { - nuint maxHeaderSize = ((maxSymbolValue + 1) * tableLog + 4 + 2) / 8 + 1 + 2; - return maxSymbolValue != 0 ? maxHeaderSize : 512; + uint u; + for (u = 0; u < tableSize; u++) + { + /* note : static analyzer may not understand tableSymbol is properly initialized */ + byte s = tableSymbol[u]; + tableU16[cumul[s]++] = (ushort)(tableSize + u); + } } - private static nuint FSE_writeNCount_generic( - void* header, - nuint headerBufferSize, - short* normalizedCounter, - uint maxSymbolValue, - uint tableLog, - uint writeIsSafe - ) { - byte* ostart = (byte*)header; - byte* @out = ostart; - byte* oend = ostart + headerBufferSize; - int nbBits; - int tableSize = 1 << (int)tableLog; - int remaining; - int threshold; - uint bitStream = 0; - int bitCount = 0; - uint symbol = 0; - uint alphabetSize = maxSymbolValue + 1; - int previousIs0 = 0; - bitStream += tableLog - 5 << bitCount; - bitCount += 4; - remaining = tableSize + 1; - threshold = tableSize; - nbBits = (int)tableLog + 1; - while (symbol < alphabetSize && remaining > 1) + uint total = 0; + uint s; + for (s = 0; s <= maxSymbolValue; s++) { - if (previousIs0 != 0) + switch (normalizedCounter[s]) { - uint start = symbol; - while (symbol < alphabetSize && normalizedCounter[symbol] == 0) - symbol++; - if (symbol == alphabetSize) + case 0: + symbolTT[s].deltaNbBits = + (tableLog + 1 << 16) - (uint)(1 << (int)tableLog); break; - while (symbol >= start + 24) - { - start += 24; - bitStream += 0xFFFFU << bitCount; - if (writeIsSafe == 0 && @out > oend - 2) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) - ); - @out[0] = (byte)bitStream; - @out[1] = (byte)(bitStream >> 8); - @out += 2; - bitStream >>= 16; - } + case -1: + case 1: + symbolTT[s].deltaNbBits = (tableLog << 16) - (uint)(1 << (int)tableLog); + assert(total <= 2147483647); + symbolTT[s].deltaFindState = (int)(total - 1); + total++; + break; + default: + assert(normalizedCounter[s] > 1); - while (symbol >= start + 3) { - start += 3; - bitStream += 3U << bitCount; - bitCount += 2; + uint maxBitsOut = + tableLog - ZSTD_highbit32((uint)normalizedCounter[s] - 1); + uint minStatePlus = (uint)normalizedCounter[s] << (int)maxBitsOut; + symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus; + symbolTT[s].deltaFindState = (int)( + total - (uint)normalizedCounter[s] + ); + total += (uint)normalizedCounter[s]; } - bitStream += symbol - start << bitCount; - bitCount += 2; - if (bitCount > 16) - { - if (writeIsSafe == 0 && @out > oend - 2) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) - ); - @out[0] = (byte)bitStream; - @out[1] = (byte)(bitStream >> 8); - @out += 2; - bitStream >>= 16; - bitCount -= 16; - } + break; + } + } + } + + return 0; + } + + /*-************************************************************** + * FSE NCount encoding + ****************************************************************/ + private static nuint FSE_NCountWriteBound(uint maxSymbolValue, uint tableLog) + { + nuint maxHeaderSize = ((maxSymbolValue + 1) * tableLog + 4 + 2) / 8 + 1 + 2; + return maxSymbolValue != 0 ? maxHeaderSize : 512; + } + + private static nuint FSE_writeNCount_generic( + void* header, + nuint headerBufferSize, + short* normalizedCounter, + uint maxSymbolValue, + uint tableLog, + uint writeIsSafe + ) + { + byte* ostart = (byte*)header; + byte* @out = ostart; + byte* oend = ostart + headerBufferSize; + int nbBits; + int tableSize = 1 << (int)tableLog; + int remaining; + int threshold; + uint bitStream = 0; + int bitCount = 0; + uint symbol = 0; + uint alphabetSize = maxSymbolValue + 1; + int previousIs0 = 0; + bitStream += tableLog - 5 << bitCount; + bitCount += 4; + remaining = tableSize + 1; + threshold = tableSize; + nbBits = (int)tableLog + 1; + while (symbol < alphabetSize && remaining > 1) + { + if (previousIs0 != 0) + { + uint start = symbol; + while (symbol < alphabetSize && normalizedCounter[symbol] == 0) + symbol++; + if (symbol == alphabetSize) + break; + while (symbol >= start + 24) + { + start += 24; + bitStream += 0xFFFFU << bitCount; + if (writeIsSafe == 0 && @out > oend - 2) + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) + ); + @out[0] = (byte)bitStream; + @out[1] = (byte)(bitStream >> 8); + @out += 2; + bitStream >>= 16; } + while (symbol >= start + 3) { - int count = normalizedCounter[symbol++]; - int max = 2 * threshold - 1 - remaining; - remaining -= count < 0 ? -count : count; - count++; - if (count >= threshold) - count += max; - bitStream += (uint)count << bitCount; - bitCount += nbBits; - bitCount -= count < max ? 1 : 0; - previousIs0 = count == 1 ? 1 : 0; - if (remaining < 1) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); - while (remaining < threshold) - { - nbBits--; - threshold >>= 1; - } + start += 3; + bitStream += 3U << bitCount; + bitCount += 2; } + bitStream += symbol - start << bitCount; + bitCount += 2; if (bitCount > 16) { if (writeIsSafe == 0 && @out > oend - 2) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) + ); @out[0] = (byte)bitStream; @out[1] = (byte)(bitStream >> 8); @out += 2; @@ -296,223 +264,255 @@ uint writeIsSafe } } - if (remaining != 1) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); - assert(symbol <= alphabetSize); - if (writeIsSafe == 0 && @out > oend - 2) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - @out[0] = (byte)bitStream; - @out[1] = (byte)(bitStream >> 8); - @out += (bitCount + 7) / 8; - assert(@out >= ostart); - return (nuint)(@out - ostart); + { + int count = normalizedCounter[symbol++]; + int max = 2 * threshold - 1 - remaining; + remaining -= count < 0 ? -count : count; + count++; + if (count >= threshold) + count += max; + bitStream += (uint)count << bitCount; + bitCount += nbBits; + bitCount -= count < max ? 1 : 0; + previousIs0 = count == 1 ? 1 : 0; + if (remaining < 1) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + while (remaining < threshold) + { + nbBits--; + threshold >>= 1; + } + } + + if (bitCount > 16) + { + if (writeIsSafe == 0 && @out > oend - 2) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + @out[0] = (byte)bitStream; + @out[1] = (byte)(bitStream >> 8); + @out += 2; + bitStream >>= 16; + bitCount -= 16; + } } - /*! FSE_writeNCount(): - Compactly save 'normalizedCounter' into 'buffer'. - @return : size of the compressed table, - or an errorCode, which can be tested using FSE_isError(). */ - private static nuint FSE_writeNCount( - void* buffer, - nuint bufferSize, - short* normalizedCounter, - uint maxSymbolValue, - uint tableLog - ) - { - if (tableLog > 14 - 2) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); - if (tableLog < 5) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); - if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog)) - return FSE_writeNCount_generic( - buffer, - bufferSize, - normalizedCounter, - maxSymbolValue, - tableLog, - 0 - ); + if (remaining != 1) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + assert(symbol <= alphabetSize); + if (writeIsSafe == 0 && @out > oend - 2) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + @out[0] = (byte)bitStream; + @out[1] = (byte)(bitStream >> 8); + @out += (bitCount + 7) / 8; + assert(@out >= ostart); + return (nuint)(@out - ostart); + } + + /*! FSE_writeNCount(): + Compactly save 'normalizedCounter' into 'buffer'. + @return : size of the compressed table, + or an errorCode, which can be tested using FSE_isError(). */ + private static nuint FSE_writeNCount( + void* buffer, + nuint bufferSize, + short* normalizedCounter, + uint maxSymbolValue, + uint tableLog + ) + { + if (tableLog > 14 - 2) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); + if (tableLog < 5) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog)) return FSE_writeNCount_generic( buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, - 1 + 0 ); - } + return FSE_writeNCount_generic( + buffer, + bufferSize, + normalizedCounter, + maxSymbolValue, + tableLog, + 1 + ); + } - /* provides the minimum logSize to safely represent a distribution */ - private static uint FSE_minTableLog(nuint srcSize, uint maxSymbolValue) - { - uint minBitsSrc = ZSTD_highbit32((uint)srcSize) + 1; - uint minBitsSymbols = ZSTD_highbit32(maxSymbolValue) + 2; - uint minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols; - assert(srcSize > 1); - return minBits; - } + /* provides the minimum logSize to safely represent a distribution */ + private static uint FSE_minTableLog(nuint srcSize, uint maxSymbolValue) + { + uint minBitsSrc = ZSTD_highbit32((uint)srcSize) + 1; + uint minBitsSymbols = ZSTD_highbit32(maxSymbolValue) + 2; + uint minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols; + assert(srcSize > 1); + return minBits; + } - /* ***************************************** - * FSE advanced API - ***************************************** */ - private static uint FSE_optimalTableLog_internal( - uint maxTableLog, - nuint srcSize, - uint maxSymbolValue, - uint minus - ) - { - uint maxBitsSrc = ZSTD_highbit32((uint)(srcSize - 1)) - minus; - uint tableLog = maxTableLog; - uint minBits = FSE_minTableLog(srcSize, maxSymbolValue); - assert(srcSize > 1); - if (tableLog == 0) - tableLog = 13 - 2; - if (maxBitsSrc < tableLog) - tableLog = maxBitsSrc; - if (minBits > tableLog) - tableLog = minBits; - if (tableLog < 5) - tableLog = 5; - if (tableLog > 14 - 2) - tableLog = 14 - 2; - return tableLog; - } + /* ***************************************** + * FSE advanced API + ***************************************** */ + private static uint FSE_optimalTableLog_internal( + uint maxTableLog, + nuint srcSize, + uint maxSymbolValue, + uint minus + ) + { + uint maxBitsSrc = ZSTD_highbit32((uint)(srcSize - 1)) - minus; + uint tableLog = maxTableLog; + uint minBits = FSE_minTableLog(srcSize, maxSymbolValue); + assert(srcSize > 1); + if (tableLog == 0) + tableLog = 13 - 2; + if (maxBitsSrc < tableLog) + tableLog = maxBitsSrc; + if (minBits > tableLog) + tableLog = minBits; + if (tableLog < 5) + tableLog = 5; + if (tableLog > 14 - 2) + tableLog = 14 - 2; + return tableLog; + } - /*! FSE_optimalTableLog(): - dynamically downsize 'tableLog' when conditions are met. - It saves CPU time, by using smaller tables, while preserving or even improving compression ratio. - @return : recommended tableLog (necessarily <= 'maxTableLog') */ - private static uint FSE_optimalTableLog( - uint maxTableLog, - nuint srcSize, - uint maxSymbolValue - ) + /*! FSE_optimalTableLog(): + dynamically downsize 'tableLog' when conditions are met. + It saves CPU time, by using smaller tables, while preserving or even improving compression ratio. + @return : recommended tableLog (necessarily <= 'maxTableLog') */ + private static uint FSE_optimalTableLog( + uint maxTableLog, + nuint srcSize, + uint maxSymbolValue + ) + { + return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2); + } + + /* Secondary normalization method. + To be used when primary method fails. */ + private static nuint FSE_normalizeM2( + short* norm, + uint tableLog, + uint* count, + nuint total, + uint maxSymbolValue, + short lowProbCount + ) + { + const short NOT_YET_ASSIGNED = -2; + uint s; + uint distributed = 0; + uint ToDistribute; + /* Init */ + uint lowThreshold = (uint)(total >> (int)tableLog); + uint lowOne = (uint)(total * 3 >> (int)(tableLog + 1)); + for (s = 0; s <= maxSymbolValue; s++) { - return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2); + if (count[s] == 0) + { + norm[s] = 0; + continue; + } + + if (count[s] <= lowThreshold) + { + norm[s] = lowProbCount; + distributed++; + total -= count[s]; + continue; + } + + if (count[s] <= lowOne) + { + norm[s] = 1; + distributed++; + total -= count[s]; + continue; + } + + norm[s] = NOT_YET_ASSIGNED; } - /* Secondary normalization method. - To be used when primary method fails. */ - private static nuint FSE_normalizeM2( - short* norm, - uint tableLog, - uint* count, - nuint total, - uint maxSymbolValue, - short lowProbCount - ) + ToDistribute = (uint)(1 << (int)tableLog) - distributed; + if (ToDistribute == 0) + return 0; + if (total / ToDistribute > lowOne) { - const short NOT_YET_ASSIGNED = -2; - uint s; - uint distributed = 0; - uint ToDistribute; - /* Init */ - uint lowThreshold = (uint)(total >> (int)tableLog); - uint lowOne = (uint)(total * 3 >> (int)(tableLog + 1)); + lowOne = (uint)(total * 3 / (ToDistribute * 2)); for (s = 0; s <= maxSymbolValue; s++) { - if (count[s] == 0) - { - norm[s] = 0; - continue; - } - - if (count[s] <= lowThreshold) - { - norm[s] = lowProbCount; - distributed++; - total -= count[s]; - continue; - } - - if (count[s] <= lowOne) + if (norm[s] == NOT_YET_ASSIGNED && count[s] <= lowOne) { norm[s] = 1; distributed++; total -= count[s]; continue; } - - norm[s] = NOT_YET_ASSIGNED; } ToDistribute = (uint)(1 << (int)tableLog) - distributed; - if (ToDistribute == 0) - return 0; - if (total / ToDistribute > lowOne) - { - lowOne = (uint)(total * 3 / (ToDistribute * 2)); - for (s = 0; s <= maxSymbolValue; s++) + } + + if (distributed == maxSymbolValue + 1) + { + /* all values are pretty poor; + probably incompressible data (should have already been detected); + find max, then give all remaining points to max */ + uint maxV = 0, + maxC = 0; + for (s = 0; s <= maxSymbolValue; s++) + if (count[s] > maxC) { - if (norm[s] == NOT_YET_ASSIGNED && count[s] <= lowOne) - { - norm[s] = 1; - distributed++; - total -= count[s]; - continue; - } + maxV = s; + maxC = count[s]; } - ToDistribute = (uint)(1 << (int)tableLog) - distributed; - } - - if (distributed == maxSymbolValue + 1) - { - /* all values are pretty poor; - probably incompressible data (should have already been detected); - find max, then give all remaining points to max */ - uint maxV = 0, - maxC = 0; - for (s = 0; s <= maxSymbolValue; s++) - if (count[s] > maxC) - { - maxV = s; - maxC = count[s]; - } - - norm[maxV] += (short)ToDistribute; - return 0; - } + norm[maxV] += (short)ToDistribute; + return 0; + } - if (total == 0) - { - for (s = 0; ToDistribute > 0; s = (s + 1) % (maxSymbolValue + 1)) - if (norm[s] > 0) - { - ToDistribute--; - norm[s]++; - } + if (total == 0) + { + for (s = 0; ToDistribute > 0; s = (s + 1) % (maxSymbolValue + 1)) + if (norm[s] > 0) + { + ToDistribute--; + norm[s]++; + } - return 0; - } + return 0; + } + { + ulong vStepLog = 62 - tableLog; + ulong mid = (1UL << (int)(vStepLog - 1)) - 1; + /* scale on remaining */ + ulong rStep = (((ulong)1 << (int)vStepLog) * ToDistribute + mid) / (uint)total; + ulong tmpTotal = mid; + for (s = 0; s <= maxSymbolValue; s++) { - ulong vStepLog = 62 - tableLog; - ulong mid = (1UL << (int)(vStepLog - 1)) - 1; - /* scale on remaining */ - ulong rStep = (((ulong)1 << (int)vStepLog) * ToDistribute + mid) / (uint)total; - ulong tmpTotal = mid; - for (s = 0; s <= maxSymbolValue; s++) + if (norm[s] == NOT_YET_ASSIGNED) { - if (norm[s] == NOT_YET_ASSIGNED) - { - ulong end = tmpTotal + count[s] * rStep; - uint sStart = (uint)(tmpTotal >> (int)vStepLog); - uint sEnd = (uint)(end >> (int)vStepLog); - uint weight = sEnd - sStart; - if (weight < 1) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); - norm[s] = (short)weight; - tmpTotal = end; - } + ulong end = tmpTotal + count[s] * rStep; + uint sStart = (uint)(tmpTotal >> (int)vStepLog); + uint sEnd = (uint)(end >> (int)vStepLog); + uint weight = sEnd - sStart; + if (weight < 1) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + norm[s] = (short)weight; + tmpTotal = end; } } - - return 0; } + return 0; + } + #if NET7_0_OR_GREATER private static ReadOnlySpan Span_rtbTable => new uint[8] { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 }; @@ -523,184 +523,204 @@ ref MemoryMarshal.GetReference(Span_rtbTable) ); #else - private static readonly uint* rtbTable = GetArrayPointer( - new uint[8] { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 } - ); + private static readonly uint* rtbTable = GetArrayPointer( + new uint[8] { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 } + ); #endif - /*! FSE_normalizeCount(): - normalize counts so that sum(count[]) == Power_of_2 (2^tableLog) - 'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1). - useLowProbCount is a boolean parameter which trades off compressed size for - faster header decoding. When it is set to 1, the compressed data will be slightly - smaller. And when it is set to 0, FSE_readNCount() and FSE_buildDTable() will be - faster. If you are compressing a small amount of data (< 2 KB) then useLowProbCount=0 - is a good default, since header deserialization makes a big speed difference. - Otherwise, useLowProbCount=1 is a good default, since the speed difference is small. - @return : tableLog, - or an errorCode, which can be tested using FSE_isError() */ - private static nuint FSE_normalizeCount( - short* normalizedCounter, - uint tableLog, - uint* count, - nuint total, - uint maxSymbolValue, - uint useLowProbCount - ) + /*! FSE_normalizeCount(): + normalize counts so that sum(count[]) == Power_of_2 (2^tableLog) + 'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1). + useLowProbCount is a boolean parameter which trades off compressed size for + faster header decoding. When it is set to 1, the compressed data will be slightly + smaller. And when it is set to 0, FSE_readNCount() and FSE_buildDTable() will be + faster. If you are compressing a small amount of data (< 2 KB) then useLowProbCount=0 + is a good default, since header deserialization makes a big speed difference. + Otherwise, useLowProbCount=1 is a good default, since the speed difference is small. + @return : tableLog, + or an errorCode, which can be tested using FSE_isError() */ + private static nuint FSE_normalizeCount( + short* normalizedCounter, + uint tableLog, + uint* count, + nuint total, + uint maxSymbolValue, + uint useLowProbCount + ) + { + if (tableLog == 0) + tableLog = 13 - 2; + if (tableLog < 5) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + if (tableLog > 14 - 2) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); + if (tableLog < FSE_minTableLog(total, maxSymbolValue)) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); { - if (tableLog == 0) - tableLog = 13 - 2; - if (tableLog < 5) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); - if (tableLog > 14 - 2) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); - if (tableLog < FSE_minTableLog(total, maxSymbolValue)) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + short lowProbCount = (short)(useLowProbCount != 0 ? -1 : 1); + ulong scale = 62 - tableLog; + /* <== here, one division ! */ + ulong step = ((ulong)1 << 62) / (uint)total; + ulong vStep = 1UL << (int)(scale - 20); + int stillToDistribute = 1 << (int)tableLog; + uint s; + uint largest = 0; + short largestP = 0; + uint lowThreshold = (uint)(total >> (int)tableLog); + for (s = 0; s <= maxSymbolValue; s++) { - short lowProbCount = (short)(useLowProbCount != 0 ? -1 : 1); - ulong scale = 62 - tableLog; - /* <== here, one division ! */ - ulong step = ((ulong)1 << 62) / (uint)total; - ulong vStep = 1UL << (int)(scale - 20); - int stillToDistribute = 1 << (int)tableLog; - uint s; - uint largest = 0; - short largestP = 0; - uint lowThreshold = (uint)(total >> (int)tableLog); - for (s = 0; s <= maxSymbolValue; s++) + if (count[s] == total) + return 0; + if (count[s] == 0) { - if (count[s] == total) - return 0; - if (count[s] == 0) - { - normalizedCounter[s] = 0; - continue; - } + normalizedCounter[s] = 0; + continue; + } - if (count[s] <= lowThreshold) + if (count[s] <= lowThreshold) + { + normalizedCounter[s] = lowProbCount; + stillToDistribute--; + } + else + { + short proba = (short)(count[s] * step >> (int)scale); + if (proba < 8) { - normalizedCounter[s] = lowProbCount; - stillToDistribute--; + ulong restToBeat = vStep * rtbTable[proba]; + proba += (short)( + count[s] * step - ((ulong)proba << (int)scale) > restToBeat ? 1 : 0 + ); } - else + + if (proba > largestP) { - short proba = (short)(count[s] * step >> (int)scale); - if (proba < 8) - { - ulong restToBeat = vStep * rtbTable[proba]; - proba += (short)( - count[s] * step - ((ulong)proba << (int)scale) > restToBeat ? 1 : 0 - ); - } - - if (proba > largestP) - { - largestP = proba; - largest = s; - } - - normalizedCounter[s] = proba; - stillToDistribute -= proba; + largestP = proba; + largest = s; } - } - if (-stillToDistribute >= normalizedCounter[largest] >> 1) - { - /* corner case, need another normalization method */ - nuint errorCode = FSE_normalizeM2( - normalizedCounter, - tableLog, - count, - total, - maxSymbolValue, - lowProbCount - ); - if (ERR_isError(errorCode)) - return errorCode; + normalizedCounter[s] = proba; + stillToDistribute -= proba; } - else - normalizedCounter[largest] += (short)stillToDistribute; } - return tableLog; + if (-stillToDistribute >= normalizedCounter[largest] >> 1) + { + /* corner case, need another normalization method */ + nuint errorCode = FSE_normalizeM2( + normalizedCounter, + tableLog, + count, + total, + maxSymbolValue, + lowProbCount + ); + if (ERR_isError(errorCode)) + return errorCode; + } + else + normalizedCounter[largest] += (short)stillToDistribute; } - /* fake FSE_CTable, for rle input (always same symbol) */ - private static nuint FSE_buildCTable_rle(uint* ct, byte symbolValue) - { - void* ptr = ct; - ushort* tableU16 = (ushort*)ptr + 2; - void* FSCTptr = (uint*)ptr + 2; - FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*)FSCTptr; - tableU16[-2] = 0; - tableU16[-1] = symbolValue; - tableU16[0] = 0; - tableU16[1] = 0; - symbolTT[symbolValue].deltaNbBits = 0; - symbolTT[symbolValue].deltaFindState = 0; + return tableLog; + } + + /* fake FSE_CTable, for rle input (always same symbol) */ + private static nuint FSE_buildCTable_rle(uint* ct, byte symbolValue) + { + void* ptr = ct; + ushort* tableU16 = (ushort*)ptr + 2; + void* FSCTptr = (uint*)ptr + 2; + FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*)FSCTptr; + tableU16[-2] = 0; + tableU16[-1] = symbolValue; + tableU16[0] = 0; + tableU16[1] = 0; + symbolTT[symbolValue].deltaNbBits = 0; + symbolTT[symbolValue].deltaFindState = 0; + return 0; + } + + private static nuint FSE_compress_usingCTable_generic( + void* dst, + nuint dstSize, + void* src, + nuint srcSize, + uint* ct, + uint fast + ) + { + byte* istart = (byte*)src; + byte* iend = istart + srcSize; + byte* ip = iend; + BIT_CStream_t bitC; + System.Runtime.CompilerServices.Unsafe.SkipInit(out bitC); + FSE_CState_t CState1, + CState2; + System.Runtime.CompilerServices.Unsafe.SkipInit(out CState1); + System.Runtime.CompilerServices.Unsafe.SkipInit(out CState2); + if (srcSize <= 2) return 0; + { + nuint initError = BIT_initCStream(ref bitC, dst, dstSize); + if (ERR_isError(initError)) + return 0; } - private static nuint FSE_compress_usingCTable_generic( - void* dst, - nuint dstSize, - void* src, - nuint srcSize, - uint* ct, - uint fast - ) + nuint bitC_bitContainer = bitC.bitContainer; + uint bitC_bitPos = bitC.bitPos; + sbyte* bitC_ptr = bitC.ptr; + sbyte* bitC_endPtr = bitC.endPtr; + if ((srcSize & 1) != 0) { - byte* istart = (byte*)src; - byte* iend = istart + srcSize; - byte* ip = iend; - BIT_CStream_t bitC; - System.Runtime.CompilerServices.Unsafe.SkipInit(out bitC); - FSE_CState_t CState1, - CState2; - System.Runtime.CompilerServices.Unsafe.SkipInit(out CState1); - System.Runtime.CompilerServices.Unsafe.SkipInit(out CState2); - if (srcSize <= 2) - return 0; - { - nuint initError = BIT_initCStream(ref bitC, dst, dstSize); - if (ERR_isError(initError)) - return 0; - } + FSE_initCState2(ref CState1, ct, *--ip); + FSE_initCState2(ref CState2, ct, *--ip); + FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState1, *--ip); + if (fast != 0) + BIT_flushBitsFast( + ref bitC_bitContainer, + ref bitC_bitPos, + ref bitC_ptr, + bitC_endPtr + ); + else + BIT_flushBits( + ref bitC_bitContainer, + ref bitC_bitPos, + ref bitC_ptr, + bitC_endPtr + ); + } + else + { + FSE_initCState2(ref CState2, ct, *--ip); + FSE_initCState2(ref CState1, ct, *--ip); + } - nuint bitC_bitContainer = bitC.bitContainer; - uint bitC_bitPos = bitC.bitPos; - sbyte* bitC_ptr = bitC.ptr; - sbyte* bitC_endPtr = bitC.endPtr; - if ((srcSize & 1) != 0) - { - FSE_initCState2(ref CState1, ct, *--ip); - FSE_initCState2(ref CState2, ct, *--ip); - FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState1, *--ip); - if (fast != 0) - BIT_flushBitsFast( - ref bitC_bitContainer, - ref bitC_bitPos, - ref bitC_ptr, - bitC_endPtr - ); - else - BIT_flushBits( - ref bitC_bitContainer, - ref bitC_bitPos, - ref bitC_ptr, - bitC_endPtr - ); - } + srcSize -= 2; + if (sizeof(nuint) * 8 > (14 - 2) * 4 + 7 && (srcSize & 2) != 0) + { + FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState2, *--ip); + FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState1, *--ip); + if (fast != 0) + BIT_flushBitsFast( + ref bitC_bitContainer, + ref bitC_bitPos, + ref bitC_ptr, + bitC_endPtr + ); else - { - FSE_initCState2(ref CState2, ct, *--ip); - FSE_initCState2(ref CState1, ct, *--ip); - } + BIT_flushBits( + ref bitC_bitContainer, + ref bitC_bitPos, + ref bitC_ptr, + bitC_endPtr + ); + } - srcSize -= 2; - if (sizeof(nuint) * 8 > (14 - 2) * 4 + 7 && (srcSize & 2) != 0) - { - FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState2, *--ip); - FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState1, *--ip); + while (ip > istart) + { + FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState2, *--ip); + if (sizeof(nuint) * 8 < (14 - 2) * 2 + 7) if (fast != 0) BIT_flushBitsFast( ref bitC_bitContainer, @@ -715,98 +735,77 @@ uint fast ref bitC_ptr, bitC_endPtr ); - } - - while (ip > istart) + FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState1, *--ip); + if (sizeof(nuint) * 8 > (14 - 2) * 4 + 7) { FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState2, *--ip); - if (sizeof(nuint) * 8 < (14 - 2) * 2 + 7) - if (fast != 0) - BIT_flushBitsFast( - ref bitC_bitContainer, - ref bitC_bitPos, - ref bitC_ptr, - bitC_endPtr - ); - else - BIT_flushBits( - ref bitC_bitContainer, - ref bitC_bitPos, - ref bitC_ptr, - bitC_endPtr - ); FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState1, *--ip); - if (sizeof(nuint) * 8 > (14 - 2) * 4 + 7) - { - FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState2, *--ip); - FSE_encodeSymbol(ref bitC_bitContainer, ref bitC_bitPos, ref CState1, *--ip); - } - - if (fast != 0) - BIT_flushBitsFast( - ref bitC_bitContainer, - ref bitC_bitPos, - ref bitC_ptr, - bitC_endPtr - ); - else - BIT_flushBits( - ref bitC_bitContainer, - ref bitC_bitPos, - ref bitC_ptr, - bitC_endPtr - ); } - FSE_flushCState( - ref bitC_bitContainer, - ref bitC_bitPos, - ref bitC_ptr, - bitC_endPtr, - ref CState2 - ); - FSE_flushCState( - ref bitC_bitContainer, - ref bitC_bitPos, - ref bitC_ptr, - bitC_endPtr, - ref CState1 - ); - return BIT_closeCStream( - ref bitC_bitContainer, - ref bitC_bitPos, - bitC_ptr, - bitC_endPtr, - bitC.startPtr - ); - } - - /*! FSE_compress_usingCTable(): - Compress `src` using `ct` into `dst` which must be already allocated. - @return : size of compressed data (<= `dstCapacity`), - or 0 if compressed data could not fit into `dst`, - or an errorCode, which can be tested using FSE_isError() */ - private static nuint FSE_compress_usingCTable( - void* dst, - nuint dstSize, - void* src, - nuint srcSize, - uint* ct - ) - { - uint fast = dstSize >= srcSize + (srcSize >> 7) + 4 + (nuint)sizeof(nuint) ? 1U : 0U; if (fast != 0) - return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1); + BIT_flushBitsFast( + ref bitC_bitContainer, + ref bitC_bitPos, + ref bitC_ptr, + bitC_endPtr + ); else - return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0); + BIT_flushBits( + ref bitC_bitContainer, + ref bitC_bitPos, + ref bitC_ptr, + bitC_endPtr + ); } - /*-***************************************** - * Tool functions - ******************************************/ - private static nuint FSE_compressBound(nuint size) - { - return 512 + (size + (size >> 7) + 4 + (nuint)sizeof(nuint)); - } + FSE_flushCState( + ref bitC_bitContainer, + ref bitC_bitPos, + ref bitC_ptr, + bitC_endPtr, + ref CState2 + ); + FSE_flushCState( + ref bitC_bitContainer, + ref bitC_bitPos, + ref bitC_ptr, + bitC_endPtr, + ref CState1 + ); + return BIT_closeCStream( + ref bitC_bitContainer, + ref bitC_bitPos, + bitC_ptr, + bitC_endPtr, + bitC.startPtr + ); + } + + /*! FSE_compress_usingCTable(): + Compress `src` using `ct` into `dst` which must be already allocated. + @return : size of compressed data (<= `dstCapacity`), + or 0 if compressed data could not fit into `dst`, + or an errorCode, which can be tested using FSE_isError() */ + private static nuint FSE_compress_usingCTable( + void* dst, + nuint dstSize, + void* src, + nuint srcSize, + uint* ct + ) + { + uint fast = dstSize >= srcSize + (srcSize >> 7) + 4 + (nuint)sizeof(nuint) ? 1U : 0U; + if (fast != 0) + return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1); + else + return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0); + } + + /*-***************************************** + * Tool functions + ******************************************/ + private static nuint FSE_compressBound(nuint size) + { + return 512 + (size + (size >> 7) + 4 + (nuint)sizeof(nuint)); } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FseDecompress.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FseDecompress.cs index 9dee183ab..e869c5752 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/FseDecompress.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FseDecompress.cs @@ -1,196 +1,281 @@ using System.Runtime.CompilerServices; -using static ZstdSharp.UnsafeHelper; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods + private static nuint FSE_buildDTable_internal( + uint* dt, + short* normalizedCounter, + uint maxSymbolValue, + uint tableLog, + void* workSpace, + nuint wkspSize + ) { - private static nuint FSE_buildDTable_internal( - uint* dt, - short* normalizedCounter, - uint maxSymbolValue, - uint tableLog, - void* workSpace, - nuint wkspSize - ) + /* because *dt is unsigned, 32-bits aligned on 32-bits */ + void* tdPtr = dt + 1; + FSE_decode_t* tableDecode = (FSE_decode_t*)tdPtr; + ushort* symbolNext = (ushort*)workSpace; + byte* spread = (byte*)(symbolNext + maxSymbolValue + 1); + uint maxSV1 = maxSymbolValue + 1; + uint tableSize = (uint)(1 << (int)tableLog); + uint highThreshold = tableSize - 1; + if (sizeof(short) * (maxSymbolValue + 1) + (1UL << (int)tableLog) + 8 > wkspSize) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooLarge)); + if (maxSymbolValue > 255) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooLarge)); + if (tableLog > 14 - 2) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); { - /* because *dt is unsigned, 32-bits aligned on 32-bits */ - void* tdPtr = dt + 1; - FSE_decode_t* tableDecode = (FSE_decode_t*)tdPtr; - ushort* symbolNext = (ushort*)workSpace; - byte* spread = (byte*)(symbolNext + maxSymbolValue + 1); - uint maxSV1 = maxSymbolValue + 1; - uint tableSize = (uint)(1 << (int)tableLog); - uint highThreshold = tableSize - 1; - if (sizeof(short) * (maxSymbolValue + 1) + (1UL << (int)tableLog) + 8 > wkspSize) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooLarge)); - if (maxSymbolValue > 255) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooLarge)); - if (tableLog > 14 - 2) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); + FSE_DTableHeader DTableH; + DTableH.tableLog = (ushort)tableLog; + DTableH.fastMode = 1; { - FSE_DTableHeader DTableH; - DTableH.tableLog = (ushort)tableLog; - DTableH.fastMode = 1; + short largeLimit = (short)(1 << (int)(tableLog - 1)); + uint s; + for (s = 0; s < maxSV1; s++) { - short largeLimit = (short)(1 << (int)(tableLog - 1)); - uint s; - for (s = 0; s < maxSV1; s++) + if (normalizedCounter[s] == -1) + { + tableDecode[highThreshold--].symbol = (byte)s; + symbolNext[s] = 1; + } + else { - if (normalizedCounter[s] == -1) - { - tableDecode[highThreshold--].symbol = (byte)s; - symbolNext[s] = 1; - } - else - { - if (normalizedCounter[s] >= largeLimit) - DTableH.fastMode = 0; - symbolNext[s] = (ushort)normalizedCounter[s]; - } + if (normalizedCounter[s] >= largeLimit) + DTableH.fastMode = 0; + symbolNext[s] = (ushort)normalizedCounter[s]; } } - - memcpy(dt, &DTableH, (uint)sizeof(FSE_DTableHeader)); } - if (highThreshold == tableSize - 1) + memcpy(dt, &DTableH, (uint)sizeof(FSE_DTableHeader)); + } + + if (highThreshold == tableSize - 1) + { + nuint tableMask = tableSize - 1; + nuint step = (tableSize >> 1) + (tableSize >> 3) + 3; { - nuint tableMask = tableSize - 1; - nuint step = (tableSize >> 1) + (tableSize >> 3) + 3; + const ulong add = 0x0101010101010101UL; + nuint pos = 0; + ulong sv = 0; + uint s; + for (s = 0; s < maxSV1; ++s, sv += add) { - const ulong add = 0x0101010101010101UL; - nuint pos = 0; - ulong sv = 0; - uint s; - for (s = 0; s < maxSV1; ++s, sv += add) + int i; + int n = normalizedCounter[s]; + MEM_write64(spread + pos, sv); + for (i = 8; i < n; i += 8) { - int i; - int n = normalizedCounter[s]; - MEM_write64(spread + pos, sv); - for (i = 8; i < n; i += 8) - { - MEM_write64(spread + pos + i, sv); - } - - pos += (nuint)n; + MEM_write64(spread + pos + i, sv); } + + pos += (nuint)n; } + } + { + nuint position = 0; + nuint s; + const nuint unroll = 2; + assert(tableSize % unroll == 0); + for (s = 0; s < tableSize; s += unroll) { - nuint position = 0; - nuint s; - const nuint unroll = 2; - assert(tableSize % unroll == 0); - for (s = 0; s < tableSize; s += unroll) + nuint u; + for (u = 0; u < unroll; ++u) { - nuint u; - for (u = 0; u < unroll; ++u) - { - nuint uPosition = position + u * step & tableMask; - tableDecode[uPosition].symbol = spread[s + u]; - } - - position = position + unroll * step & tableMask; + nuint uPosition = position + u * step & tableMask; + tableDecode[uPosition].symbol = spread[s + u]; } - assert(position == 0); + position = position + unroll * step & tableMask; } + + assert(position == 0); } - else + } + else + { + uint tableMask = tableSize - 1; + uint step = (tableSize >> 1) + (tableSize >> 3) + 3; + uint s, + position = 0; + for (s = 0; s < maxSV1; s++) { - uint tableMask = tableSize - 1; - uint step = (tableSize >> 1) + (tableSize >> 3) + 3; - uint s, - position = 0; - for (s = 0; s < maxSV1; s++) + int i; + for (i = 0; i < normalizedCounter[s]; i++) { - int i; - for (i = 0; i < normalizedCounter[s]; i++) - { - tableDecode[position].symbol = (byte)s; + tableDecode[position].symbol = (byte)s; + position = position + step & tableMask; + while (position > highThreshold) position = position + step & tableMask; - while (position > highThreshold) - position = position + step & tableMask; - } } - - if (position != 0) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); } + if (position != 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + } + + { + uint u; + for (u = 0; u < tableSize; u++) { - uint u; - for (u = 0; u < tableSize; u++) - { - byte symbol = tableDecode[u].symbol; - uint nextState = symbolNext[symbol]++; - tableDecode[u].nbBits = (byte)(tableLog - ZSTD_highbit32(nextState)); - tableDecode[u].newState = (ushort)( - (nextState << tableDecode[u].nbBits) - tableSize - ); - } + byte symbol = tableDecode[u].symbol; + uint nextState = symbolNext[symbol]++; + tableDecode[u].nbBits = (byte)(tableLog - ZSTD_highbit32(nextState)); + tableDecode[u].newState = (ushort)( + (nextState << tableDecode[u].nbBits) - tableSize + ); } + } + + return 0; + } + + private static nuint FSE_buildDTable_wksp( + uint* dt, + short* normalizedCounter, + uint maxSymbolValue, + uint tableLog, + void* workSpace, + nuint wkspSize + ) + { + return FSE_buildDTable_internal( + dt, + normalizedCounter, + maxSymbolValue, + tableLog, + workSpace, + wkspSize + ); + } - return 0; + /*-******************************************************* + * Decompression (Byte symbols) + *********************************************************/ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint FSE_decompress_usingDTable_generic( + void* dst, + nuint maxDstSize, + void* cSrc, + nuint cSrcSize, + uint* dt, + uint fast + ) + { + byte* ostart = (byte*)dst; + byte* op = ostart; + byte* omax = op + maxDstSize; + byte* olimit = omax - 3; + BIT_DStream_t bitD; + System.Runtime.CompilerServices.Unsafe.SkipInit(out bitD); + FSE_DState_t state1; + System.Runtime.CompilerServices.Unsafe.SkipInit(out state1); + FSE_DState_t state2; + System.Runtime.CompilerServices.Unsafe.SkipInit(out state2); + { + /* Init */ + nuint _var_err__ = BIT_initDStream(ref bitD, cSrc, cSrcSize); + if (ERR_isError(_var_err__)) + return _var_err__; } - private static nuint FSE_buildDTable_wksp( - uint* dt, - short* normalizedCounter, - uint maxSymbolValue, - uint tableLog, - void* workSpace, - nuint wkspSize + FSE_initDState(ref state1, ref bitD, dt); + FSE_initDState(ref state2, ref bitD, dt); + nuint bitD_bitContainer = bitD.bitContainer; + uint bitD_bitsConsumed = bitD.bitsConsumed; + sbyte* bitD_ptr = bitD.ptr; + sbyte* bitD_start = bitD.start; + sbyte* bitD_limitPtr = bitD.limitPtr; + if ( + BIT_reloadDStream( + ref bitD_bitContainer, + ref bitD_bitsConsumed, + ref bitD_ptr, + bitD_start, + bitD_limitPtr + ) == BIT_DStream_status.BIT_DStream_overflow ) { - return FSE_buildDTable_internal( - dt, - normalizedCounter, - maxSymbolValue, - tableLog, - workSpace, - wkspSize - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); } - /*-******************************************************* - * Decompression (Byte symbols) - *********************************************************/ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint FSE_decompress_usingDTable_generic( - void* dst, - nuint maxDstSize, - void* cSrc, - nuint cSrcSize, - uint* dt, - uint fast + for ( + ; + BIT_reloadDStream( + ref bitD_bitContainer, + ref bitD_bitsConsumed, + ref bitD_ptr, + bitD_start, + bitD_limitPtr + ) == BIT_DStream_status.BIT_DStream_unfinished + && op < olimit; + op += 4 ) { - byte* ostart = (byte*)dst; - byte* op = ostart; - byte* omax = op + maxDstSize; - byte* olimit = omax - 3; - BIT_DStream_t bitD; - System.Runtime.CompilerServices.Unsafe.SkipInit(out bitD); - FSE_DState_t state1; - System.Runtime.CompilerServices.Unsafe.SkipInit(out state1); - FSE_DState_t state2; - System.Runtime.CompilerServices.Unsafe.SkipInit(out state2); + op[0] = + fast != 0 + ? FSE_decodeSymbolFast(ref state1, bitD_bitContainer, ref bitD_bitsConsumed) + : FSE_decodeSymbol(ref state1, bitD_bitContainer, ref bitD_bitsConsumed); + if ((14 - 2) * 2 + 7 > sizeof(nuint) * 8) + BIT_reloadDStream( + ref bitD_bitContainer, + ref bitD_bitsConsumed, + ref bitD_ptr, + bitD_start, + bitD_limitPtr + ); + op[1] = + fast != 0 + ? FSE_decodeSymbolFast(ref state2, bitD_bitContainer, ref bitD_bitsConsumed) + : FSE_decodeSymbol(ref state2, bitD_bitContainer, ref bitD_bitsConsumed); + if ((14 - 2) * 4 + 7 > sizeof(nuint) * 8) { - /* Init */ - nuint _var_err__ = BIT_initDStream(ref bitD, cSrc, cSrcSize); - if (ERR_isError(_var_err__)) - return _var_err__; + if ( + BIT_reloadDStream( + ref bitD_bitContainer, + ref bitD_bitsConsumed, + ref bitD_ptr, + bitD_start, + bitD_limitPtr + ) > BIT_DStream_status.BIT_DStream_unfinished + ) + { + op += 2; + break; + } } - FSE_initDState(ref state1, ref bitD, dt); - FSE_initDState(ref state2, ref bitD, dt); - nuint bitD_bitContainer = bitD.bitContainer; - uint bitD_bitsConsumed = bitD.bitsConsumed; - sbyte* bitD_ptr = bitD.ptr; - sbyte* bitD_start = bitD.start; - sbyte* bitD_limitPtr = bitD.limitPtr; + op[2] = + fast != 0 + ? FSE_decodeSymbolFast(ref state1, bitD_bitContainer, ref bitD_bitsConsumed) + : FSE_decodeSymbol(ref state1, bitD_bitContainer, ref bitD_bitsConsumed); + if ((14 - 2) * 2 + 7 > sizeof(nuint) * 8) + BIT_reloadDStream( + ref bitD_bitContainer, + ref bitD_bitsConsumed, + ref bitD_ptr, + bitD_start, + bitD_limitPtr + ); + op[3] = + fast != 0 + ? FSE_decodeSymbolFast(ref state2, bitD_bitContainer, ref bitD_bitsConsumed) + : FSE_decodeSymbol(ref state2, bitD_bitContainer, ref bitD_bitsConsumed); + } + + while (true) + { + if (op > omax - 2) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + *op++ = + fast != 0 + ? FSE_decodeSymbolFast(ref state1, bitD_bitContainer, ref bitD_bitsConsumed) + : FSE_decodeSymbol(ref state1, bitD_bitContainer, ref bitD_bitsConsumed); if ( BIT_reloadDStream( ref bitD_bitContainer, @@ -201,288 +286,202 @@ uint fast ) == BIT_DStream_status.BIT_DStream_overflow ) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + *op++ = + fast != 0 + ? FSE_decodeSymbolFast( + ref state2, + bitD_bitContainer, + ref bitD_bitsConsumed + ) + : FSE_decodeSymbol( + ref state2, + bitD_bitContainer, + ref bitD_bitsConsumed + ); + break; } - for ( - ; + if (op > omax - 2) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + *op++ = + fast != 0 + ? FSE_decodeSymbolFast(ref state2, bitD_bitContainer, ref bitD_bitsConsumed) + : FSE_decodeSymbol(ref state2, bitD_bitContainer, ref bitD_bitsConsumed); + if ( BIT_reloadDStream( ref bitD_bitContainer, ref bitD_bitsConsumed, ref bitD_ptr, bitD_start, bitD_limitPtr - ) == BIT_DStream_status.BIT_DStream_unfinished - && op < olimit; - op += 4 + ) == BIT_DStream_status.BIT_DStream_overflow ) { - op[0] = - fast != 0 - ? FSE_decodeSymbolFast(ref state1, bitD_bitContainer, ref bitD_bitsConsumed) - : FSE_decodeSymbol(ref state1, bitD_bitContainer, ref bitD_bitsConsumed); - if ((14 - 2) * 2 + 7 > sizeof(nuint) * 8) - BIT_reloadDStream( - ref bitD_bitContainer, - ref bitD_bitsConsumed, - ref bitD_ptr, - bitD_start, - bitD_limitPtr - ); - op[1] = - fast != 0 - ? FSE_decodeSymbolFast(ref state2, bitD_bitContainer, ref bitD_bitsConsumed) - : FSE_decodeSymbol(ref state2, bitD_bitContainer, ref bitD_bitsConsumed); - if ((14 - 2) * 4 + 7 > sizeof(nuint) * 8) - { - if ( - BIT_reloadDStream( - ref bitD_bitContainer, - ref bitD_bitsConsumed, - ref bitD_ptr, - bitD_start, - bitD_limitPtr - ) > BIT_DStream_status.BIT_DStream_unfinished - ) - { - op += 2; - break; - } - } - - op[2] = - fast != 0 - ? FSE_decodeSymbolFast(ref state1, bitD_bitContainer, ref bitD_bitsConsumed) - : FSE_decodeSymbol(ref state1, bitD_bitContainer, ref bitD_bitsConsumed); - if ((14 - 2) * 2 + 7 > sizeof(nuint) * 8) - BIT_reloadDStream( - ref bitD_bitContainer, - ref bitD_bitsConsumed, - ref bitD_ptr, - bitD_start, - bitD_limitPtr - ); - op[3] = - fast != 0 - ? FSE_decodeSymbolFast(ref state2, bitD_bitContainer, ref bitD_bitsConsumed) - : FSE_decodeSymbol(ref state2, bitD_bitContainer, ref bitD_bitsConsumed); - } - - while (true) - { - if (op > omax - 2) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - *op++ = - fast != 0 - ? FSE_decodeSymbolFast(ref state1, bitD_bitContainer, ref bitD_bitsConsumed) - : FSE_decodeSymbol(ref state1, bitD_bitContainer, ref bitD_bitsConsumed); - if ( - BIT_reloadDStream( - ref bitD_bitContainer, - ref bitD_bitsConsumed, - ref bitD_ptr, - bitD_start, - bitD_limitPtr - ) == BIT_DStream_status.BIT_DStream_overflow - ) - { - *op++ = - fast != 0 - ? FSE_decodeSymbolFast( - ref state2, - bitD_bitContainer, - ref bitD_bitsConsumed - ) - : FSE_decodeSymbol( - ref state2, - bitD_bitContainer, - ref bitD_bitsConsumed - ); - break; - } - - if (op > omax - 2) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); *op++ = fast != 0 - ? FSE_decodeSymbolFast(ref state2, bitD_bitContainer, ref bitD_bitsConsumed) - : FSE_decodeSymbol(ref state2, bitD_bitContainer, ref bitD_bitsConsumed); - if ( - BIT_reloadDStream( - ref bitD_bitContainer, - ref bitD_bitsConsumed, - ref bitD_ptr, - bitD_start, - bitD_limitPtr - ) == BIT_DStream_status.BIT_DStream_overflow - ) - { - *op++ = - fast != 0 - ? FSE_decodeSymbolFast( - ref state1, - bitD_bitContainer, - ref bitD_bitsConsumed - ) - : FSE_decodeSymbol( - ref state1, - bitD_bitContainer, - ref bitD_bitsConsumed - ); - break; - } + ? FSE_decodeSymbolFast( + ref state1, + bitD_bitContainer, + ref bitD_bitsConsumed + ) + : FSE_decodeSymbol( + ref state1, + bitD_bitContainer, + ref bitD_bitsConsumed + ); + break; } + } - assert(op >= ostart); - return (nuint)(op - ostart); + assert(op >= ostart); + return (nuint)(op - ostart); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint FSE_decompress_wksp_body( + void* dst, + nuint dstCapacity, + void* cSrc, + nuint cSrcSize, + uint maxLog, + void* workSpace, + nuint wkspSize, + int bmi2 + ) + { + byte* istart = (byte*)cSrc; + byte* ip = istart; + uint tableLog; + uint maxSymbolValue = 255; + FSE_DecompressWksp* wksp = (FSE_DecompressWksp*)workSpace; + nuint dtablePos = (nuint)(sizeof(FSE_DecompressWksp) / sizeof(uint)); + uint* dtable = (uint*)workSpace + dtablePos; + if (wkspSize < (nuint)sizeof(FSE_DecompressWksp)) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + { + nuint NCountLength = FSE_readNCount_bmi2( + wksp->ncount, + &maxSymbolValue, + &tableLog, + istart, + cSrcSize, + bmi2 + ); + if (ERR_isError(NCountLength)) + return NCountLength; + if (tableLog > maxLog) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); + assert(NCountLength <= cSrcSize); + ip += NCountLength; + cSrcSize -= NCountLength; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint FSE_decompress_wksp_body( - void* dst, - nuint dstCapacity, - void* cSrc, - nuint cSrcSize, - uint maxLog, - void* workSpace, - nuint wkspSize, - int bmi2 + if ( + ( + (ulong)(1 + (1 << (int)tableLog) + 1) + + ( + sizeof(short) * (maxSymbolValue + 1) + + (1UL << (int)tableLog) + + 8 + + sizeof(uint) + - 1 + ) / sizeof(uint) + + (255 + 1) / 2 + + 1 + ) * sizeof(uint) + > wkspSize ) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); + assert( + (nuint)(sizeof(FSE_DecompressWksp) + (1 + (1 << (int)tableLog)) * sizeof(uint)) + <= wkspSize + ); + workSpace = + (byte*)workSpace + + sizeof(FSE_DecompressWksp) + + (1 + (1 << (int)tableLog)) * sizeof(uint); + wkspSize -= (nuint)( + sizeof(FSE_DecompressWksp) + (1 + (1 << (int)tableLog)) * sizeof(uint) + ); { - byte* istart = (byte*)cSrc; - byte* ip = istart; - uint tableLog; - uint maxSymbolValue = 255; - FSE_DecompressWksp* wksp = (FSE_DecompressWksp*)workSpace; - nuint dtablePos = (nuint)(sizeof(FSE_DecompressWksp) / sizeof(uint)); - uint* dtable = (uint*)workSpace + dtablePos; - if (wkspSize < (nuint)sizeof(FSE_DecompressWksp)) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); - { - nuint NCountLength = FSE_readNCount_bmi2( - wksp->ncount, - &maxSymbolValue, - &tableLog, - istart, - cSrcSize, - bmi2 - ); - if (ERR_isError(NCountLength)) - return NCountLength; - if (tableLog > maxLog) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); - assert(NCountLength <= cSrcSize); - ip += NCountLength; - cSrcSize -= NCountLength; - } - - if ( - ( - (ulong)(1 + (1 << (int)tableLog) + 1) - + ( - sizeof(short) * (maxSymbolValue + 1) - + (1UL << (int)tableLog) - + 8 - + sizeof(uint) - - 1 - ) / sizeof(uint) - + (255 + 1) / 2 - + 1 - ) * sizeof(uint) - > wkspSize - ) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); - assert( - (nuint)(sizeof(FSE_DecompressWksp) + (1 + (1 << (int)tableLog)) * sizeof(uint)) - <= wkspSize - ); - workSpace = - (byte*)workSpace - + sizeof(FSE_DecompressWksp) - + (1 + (1 << (int)tableLog)) * sizeof(uint); - wkspSize -= (nuint)( - sizeof(FSE_DecompressWksp) + (1 + (1 << (int)tableLog)) * sizeof(uint) + nuint _var_err__ = FSE_buildDTable_internal( + dtable, + wksp->ncount, + maxSymbolValue, + tableLog, + workSpace, + wkspSize ); - { - nuint _var_err__ = FSE_buildDTable_internal( - dtable, - wksp->ncount, - maxSymbolValue, - tableLog, - workSpace, - wkspSize - ); - if (ERR_isError(_var_err__)) - return _var_err__; - } + if (ERR_isError(_var_err__)) + return _var_err__; + } - { - void* ptr = dtable; - FSE_DTableHeader* DTableH = (FSE_DTableHeader*)ptr; - uint fastMode = DTableH->fastMode; - if (fastMode != 0) - return FSE_decompress_usingDTable_generic( - dst, - dstCapacity, - ip, - cSrcSize, - dtable, - 1 - ); + { + void* ptr = dtable; + FSE_DTableHeader* DTableH = (FSE_DTableHeader*)ptr; + uint fastMode = DTableH->fastMode; + if (fastMode != 0) return FSE_decompress_usingDTable_generic( dst, dstCapacity, ip, cSrcSize, dtable, - 0 + 1 ); - } - } - - /* Avoids the FORCE_INLINE of the _body() function. */ - private static nuint FSE_decompress_wksp_body_default( - void* dst, - nuint dstCapacity, - void* cSrc, - nuint cSrcSize, - uint maxLog, - void* workSpace, - nuint wkspSize - ) - { - return FSE_decompress_wksp_body( + return FSE_decompress_usingDTable_generic( dst, dstCapacity, - cSrc, + ip, cSrcSize, - maxLog, - workSpace, - wkspSize, + dtable, 0 ); } + } - private static nuint FSE_decompress_wksp_bmi2( - void* dst, - nuint dstCapacity, - void* cSrc, - nuint cSrcSize, - uint maxLog, - void* workSpace, - nuint wkspSize, - int bmi2 - ) - { - return FSE_decompress_wksp_body_default( - dst, - dstCapacity, - cSrc, - cSrcSize, - maxLog, - workSpace, - wkspSize - ); - } + /* Avoids the FORCE_INLINE of the _body() function. */ + private static nuint FSE_decompress_wksp_body_default( + void* dst, + nuint dstCapacity, + void* cSrc, + nuint cSrcSize, + uint maxLog, + void* workSpace, + nuint wkspSize + ) + { + return FSE_decompress_wksp_body( + dst, + dstCapacity, + cSrc, + cSrcSize, + maxLog, + workSpace, + wkspSize, + 0 + ); + } + + private static nuint FSE_decompress_wksp_bmi2( + void* dst, + nuint dstCapacity, + void* cSrc, + nuint cSrcSize, + uint maxLog, + void* workSpace, + nuint wkspSize, + int bmi2 + ) + { + return FSE_decompress_wksp_body_default( + dst, + dstCapacity, + cSrc, + cSrcSize, + maxLog, + workSpace, + wkspSize + ); } -} +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HIST_checkInput_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HIST_checkInput_e.cs index a36d71635..c3a8a82b7 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HIST_checkInput_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HIST_checkInput_e.cs @@ -1,8 +1,7 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public enum HIST_checkInput_e { - public enum HIST_checkInput_e - { - trustInput, - checkMaxSymbolValue, - } -} + trustInput, + checkMaxSymbolValue, +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CStream_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CStream_t.cs index e00f59c84..b86d99f3d 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CStream_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CStream_t.cs @@ -1,23 +1,22 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct HUF_CStream_t { - public unsafe struct HUF_CStream_t - { - public _bitContainer_e__FixedBuffer bitContainer; - public _bitPos_e__FixedBuffer bitPos; - public byte* startPtr; - public byte* ptr; - public byte* endPtr; + public _bitContainer_e__FixedBuffer bitContainer; + public _bitPos_e__FixedBuffer bitPos; + public byte* startPtr; + public byte* ptr; + public byte* endPtr; - public unsafe struct _bitContainer_e__FixedBuffer - { - public nuint e0; - public nuint e1; - } + public unsafe struct _bitContainer_e__FixedBuffer + { + public nuint e0; + public nuint e1; + } - public unsafe struct _bitPos_e__FixedBuffer - { - public nuint e0; - public nuint e1; - } + public unsafe struct _bitPos_e__FixedBuffer + { + public nuint e0; + public nuint e1; } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CTableHeader.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CTableHeader.cs index f89e380cd..cc542493a 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CTableHeader.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CTableHeader.cs @@ -1,9 +1,8 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct HUF_CTableHeader { - public unsafe struct HUF_CTableHeader - { - public byte tableLog; - public byte maxSymbolValue; - public fixed byte unused[6]; - } + public byte tableLog; + public byte maxSymbolValue; + public fixed byte unused[6]; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CompressWeightsWksp.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CompressWeightsWksp.cs index f3017cfa6..2ee9b03ac 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CompressWeightsWksp.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CompressWeightsWksp.cs @@ -1,10 +1,9 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct HUF_CompressWeightsWksp { - public unsafe struct HUF_CompressWeightsWksp - { - public fixed uint CTable[59]; - public fixed uint scratchBuffer[41]; - public fixed uint count[13]; - public fixed short norm[13]; - } -} + public fixed uint CTable[59]; + public fixed uint scratchBuffer[41]; + public fixed uint count[13]; + public fixed short norm[13]; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DEltX1.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DEltX1.cs index 1ce3290b3..94d25d9d1 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DEltX1.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DEltX1.cs @@ -1,12 +1,11 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/*-***************************/ +/* single-symbol decoding */ +/*-***************************/ +public struct HUF_DEltX1 { - /*-***************************/ - /* single-symbol decoding */ - /*-***************************/ - public struct HUF_DEltX1 - { - /* single-symbol decoding */ - public byte nbBits; - public byte @byte; - } + /* single-symbol decoding */ + public byte nbBits; + public byte @byte; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DEltX2.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DEltX2.cs index 534b89829..0ab3f1d49 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DEltX2.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DEltX2.cs @@ -1,13 +1,12 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/* *************************/ +/* double-symbols decoding */ +/* *************************/ +public struct HUF_DEltX2 { - /* *************************/ /* double-symbols decoding */ - /* *************************/ - public struct HUF_DEltX2 - { - /* double-symbols decoding */ - public ushort sequence; - public byte nbBits; - public byte length; - } + public ushort sequence; + public byte nbBits; + public byte length; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DecompressFastArgs.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DecompressFastArgs.cs index 4add3e9de..ca170534a 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DecompressFastArgs.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DecompressFastArgs.cs @@ -1,50 +1,49 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/** + * The input/output arguments to the Huffman fast decoding loop: + * + * ip [in/out] - The input pointers, must be updated to reflect what is consumed. + * op [in/out] - The output pointers, must be updated to reflect what is written. + * bits [in/out] - The bitstream containers, must be updated to reflect the current state. + * dt [in] - The decoding table. + * ilowest [in] - The beginning of the valid range of the input. Decoders may read + * down to this pointer. It may be below iend[0]. + * oend [in] - The end of the output stream. op[3] must not cross oend. + * iend [in] - The end of each input stream. ip[i] may cross iend[i], + * as long as it is above ilowest, but that indicates corruption. + */ +public unsafe struct HUF_DecompressFastArgs { - /** - * The input/output arguments to the Huffman fast decoding loop: - * - * ip [in/out] - The input pointers, must be updated to reflect what is consumed. - * op [in/out] - The output pointers, must be updated to reflect what is written. - * bits [in/out] - The bitstream containers, must be updated to reflect the current state. - * dt [in] - The decoding table. - * ilowest [in] - The beginning of the valid range of the input. Decoders may read - * down to this pointer. It may be below iend[0]. - * oend [in] - The end of the output stream. op[3] must not cross oend. - * iend [in] - The end of each input stream. ip[i] may cross iend[i], - * as long as it is above ilowest, but that indicates corruption. - */ - public unsafe struct HUF_DecompressFastArgs - { - public _ip_e__FixedBuffer ip; - public _op_e__FixedBuffer op; - public fixed ulong bits[4]; - public void* dt; - public byte* ilowest; - public byte* oend; - public _iend_e__FixedBuffer iend; + public _ip_e__FixedBuffer ip; + public _op_e__FixedBuffer op; + public fixed ulong bits[4]; + public void* dt; + public byte* ilowest; + public byte* oend; + public _iend_e__FixedBuffer iend; - public unsafe struct _ip_e__FixedBuffer - { - public byte* e0; - public byte* e1; - public byte* e2; - public byte* e3; - } + public unsafe struct _ip_e__FixedBuffer + { + public byte* e0; + public byte* e1; + public byte* e2; + public byte* e3; + } - public unsafe struct _op_e__FixedBuffer - { - public byte* e0; - public byte* e1; - public byte* e2; - public byte* e3; - } + public unsafe struct _op_e__FixedBuffer + { + public byte* e0; + public byte* e1; + public byte* e2; + public byte* e3; + } - public unsafe struct _iend_e__FixedBuffer - { - public byte* e0; - public byte* e1; - public byte* e2; - public byte* e3; - } + public unsafe struct _iend_e__FixedBuffer + { + public byte* e0; + public byte* e1; + public byte* e2; + public byte* e3; } -} +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_ReadDTableX1_Workspace.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_ReadDTableX1_Workspace.cs index 47e6cca50..5ba94164c 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_ReadDTableX1_Workspace.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_ReadDTableX1_Workspace.cs @@ -1,11 +1,10 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct HUF_ReadDTableX1_Workspace { - public unsafe struct HUF_ReadDTableX1_Workspace - { - public fixed uint rankVal[13]; - public fixed uint rankStart[13]; - public fixed uint statsWksp[219]; - public fixed byte symbols[256]; - public fixed byte huffWeight[256]; - } -} + public fixed uint rankVal[13]; + public fixed uint rankStart[13]; + public fixed uint statsWksp[219]; + public fixed byte symbols[256]; + public fixed byte huffWeight[256]; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_ReadDTableX2_Workspace.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_ReadDTableX2_Workspace.cs index 03dc19a92..9d188e9b4 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_ReadDTableX2_Workspace.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_ReadDTableX2_Workspace.cs @@ -1,15 +1,15 @@ using System.Runtime.CompilerServices; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct HUF_ReadDTableX2_Workspace { - public unsafe struct HUF_ReadDTableX2_Workspace - { - public _rankVal_e__FixedBuffer rankVal; - public fixed uint rankStats[13]; - public fixed uint rankStart0[15]; - public _sortedSymbol_e__FixedBuffer sortedSymbol; - public fixed byte weightList[256]; - public fixed uint calleeWksp[219]; + public _rankVal_e__FixedBuffer rankVal; + public fixed uint rankStats[13]; + public fixed uint rankStart0[15]; + public _sortedSymbol_e__FixedBuffer sortedSymbol; + public fixed byte weightList[256]; + public fixed uint calleeWksp[219]; #if NET8_0_OR_GREATER [InlineArray(12)] @@ -19,21 +19,21 @@ public unsafe struct _rankVal_e__FixedBuffer } #else - public unsafe struct _rankVal_e__FixedBuffer - { - public rankValCol_t e0; - public rankValCol_t e1; - public rankValCol_t e2; - public rankValCol_t e3; - public rankValCol_t e4; - public rankValCol_t e5; - public rankValCol_t e6; - public rankValCol_t e7; - public rankValCol_t e8; - public rankValCol_t e9; - public rankValCol_t e10; - public rankValCol_t e11; - } + public unsafe struct _rankVal_e__FixedBuffer + { + public rankValCol_t e0; + public rankValCol_t e1; + public rankValCol_t e2; + public rankValCol_t e3; + public rankValCol_t e4; + public rankValCol_t e5; + public rankValCol_t e6; + public rankValCol_t e7; + public rankValCol_t e8; + public rankValCol_t e9; + public rankValCol_t e10; + public rankValCol_t e11; + } #endif #if NET8_0_OR_GREATER @@ -44,265 +44,264 @@ public unsafe struct _sortedSymbol_e__FixedBuffer } #else - public unsafe struct _sortedSymbol_e__FixedBuffer - { - public sortedSymbol_t e0; - public sortedSymbol_t e1; - public sortedSymbol_t e2; - public sortedSymbol_t e3; - public sortedSymbol_t e4; - public sortedSymbol_t e5; - public sortedSymbol_t e6; - public sortedSymbol_t e7; - public sortedSymbol_t e8; - public sortedSymbol_t e9; - public sortedSymbol_t e10; - public sortedSymbol_t e11; - public sortedSymbol_t e12; - public sortedSymbol_t e13; - public sortedSymbol_t e14; - public sortedSymbol_t e15; - public sortedSymbol_t e16; - public sortedSymbol_t e17; - public sortedSymbol_t e18; - public sortedSymbol_t e19; - public sortedSymbol_t e20; - public sortedSymbol_t e21; - public sortedSymbol_t e22; - public sortedSymbol_t e23; - public sortedSymbol_t e24; - public sortedSymbol_t e25; - public sortedSymbol_t e26; - public sortedSymbol_t e27; - public sortedSymbol_t e28; - public sortedSymbol_t e29; - public sortedSymbol_t e30; - public sortedSymbol_t e31; - public sortedSymbol_t e32; - public sortedSymbol_t e33; - public sortedSymbol_t e34; - public sortedSymbol_t e35; - public sortedSymbol_t e36; - public sortedSymbol_t e37; - public sortedSymbol_t e38; - public sortedSymbol_t e39; - public sortedSymbol_t e40; - public sortedSymbol_t e41; - public sortedSymbol_t e42; - public sortedSymbol_t e43; - public sortedSymbol_t e44; - public sortedSymbol_t e45; - public sortedSymbol_t e46; - public sortedSymbol_t e47; - public sortedSymbol_t e48; - public sortedSymbol_t e49; - public sortedSymbol_t e50; - public sortedSymbol_t e51; - public sortedSymbol_t e52; - public sortedSymbol_t e53; - public sortedSymbol_t e54; - public sortedSymbol_t e55; - public sortedSymbol_t e56; - public sortedSymbol_t e57; - public sortedSymbol_t e58; - public sortedSymbol_t e59; - public sortedSymbol_t e60; - public sortedSymbol_t e61; - public sortedSymbol_t e62; - public sortedSymbol_t e63; - public sortedSymbol_t e64; - public sortedSymbol_t e65; - public sortedSymbol_t e66; - public sortedSymbol_t e67; - public sortedSymbol_t e68; - public sortedSymbol_t e69; - public sortedSymbol_t e70; - public sortedSymbol_t e71; - public sortedSymbol_t e72; - public sortedSymbol_t e73; - public sortedSymbol_t e74; - public sortedSymbol_t e75; - public sortedSymbol_t e76; - public sortedSymbol_t e77; - public sortedSymbol_t e78; - public sortedSymbol_t e79; - public sortedSymbol_t e80; - public sortedSymbol_t e81; - public sortedSymbol_t e82; - public sortedSymbol_t e83; - public sortedSymbol_t e84; - public sortedSymbol_t e85; - public sortedSymbol_t e86; - public sortedSymbol_t e87; - public sortedSymbol_t e88; - public sortedSymbol_t e89; - public sortedSymbol_t e90; - public sortedSymbol_t e91; - public sortedSymbol_t e92; - public sortedSymbol_t e93; - public sortedSymbol_t e94; - public sortedSymbol_t e95; - public sortedSymbol_t e96; - public sortedSymbol_t e97; - public sortedSymbol_t e98; - public sortedSymbol_t e99; - public sortedSymbol_t e100; - public sortedSymbol_t e101; - public sortedSymbol_t e102; - public sortedSymbol_t e103; - public sortedSymbol_t e104; - public sortedSymbol_t e105; - public sortedSymbol_t e106; - public sortedSymbol_t e107; - public sortedSymbol_t e108; - public sortedSymbol_t e109; - public sortedSymbol_t e110; - public sortedSymbol_t e111; - public sortedSymbol_t e112; - public sortedSymbol_t e113; - public sortedSymbol_t e114; - public sortedSymbol_t e115; - public sortedSymbol_t e116; - public sortedSymbol_t e117; - public sortedSymbol_t e118; - public sortedSymbol_t e119; - public sortedSymbol_t e120; - public sortedSymbol_t e121; - public sortedSymbol_t e122; - public sortedSymbol_t e123; - public sortedSymbol_t e124; - public sortedSymbol_t e125; - public sortedSymbol_t e126; - public sortedSymbol_t e127; - public sortedSymbol_t e128; - public sortedSymbol_t e129; - public sortedSymbol_t e130; - public sortedSymbol_t e131; - public sortedSymbol_t e132; - public sortedSymbol_t e133; - public sortedSymbol_t e134; - public sortedSymbol_t e135; - public sortedSymbol_t e136; - public sortedSymbol_t e137; - public sortedSymbol_t e138; - public sortedSymbol_t e139; - public sortedSymbol_t e140; - public sortedSymbol_t e141; - public sortedSymbol_t e142; - public sortedSymbol_t e143; - public sortedSymbol_t e144; - public sortedSymbol_t e145; - public sortedSymbol_t e146; - public sortedSymbol_t e147; - public sortedSymbol_t e148; - public sortedSymbol_t e149; - public sortedSymbol_t e150; - public sortedSymbol_t e151; - public sortedSymbol_t e152; - public sortedSymbol_t e153; - public sortedSymbol_t e154; - public sortedSymbol_t e155; - public sortedSymbol_t e156; - public sortedSymbol_t e157; - public sortedSymbol_t e158; - public sortedSymbol_t e159; - public sortedSymbol_t e160; - public sortedSymbol_t e161; - public sortedSymbol_t e162; - public sortedSymbol_t e163; - public sortedSymbol_t e164; - public sortedSymbol_t e165; - public sortedSymbol_t e166; - public sortedSymbol_t e167; - public sortedSymbol_t e168; - public sortedSymbol_t e169; - public sortedSymbol_t e170; - public sortedSymbol_t e171; - public sortedSymbol_t e172; - public sortedSymbol_t e173; - public sortedSymbol_t e174; - public sortedSymbol_t e175; - public sortedSymbol_t e176; - public sortedSymbol_t e177; - public sortedSymbol_t e178; - public sortedSymbol_t e179; - public sortedSymbol_t e180; - public sortedSymbol_t e181; - public sortedSymbol_t e182; - public sortedSymbol_t e183; - public sortedSymbol_t e184; - public sortedSymbol_t e185; - public sortedSymbol_t e186; - public sortedSymbol_t e187; - public sortedSymbol_t e188; - public sortedSymbol_t e189; - public sortedSymbol_t e190; - public sortedSymbol_t e191; - public sortedSymbol_t e192; - public sortedSymbol_t e193; - public sortedSymbol_t e194; - public sortedSymbol_t e195; - public sortedSymbol_t e196; - public sortedSymbol_t e197; - public sortedSymbol_t e198; - public sortedSymbol_t e199; - public sortedSymbol_t e200; - public sortedSymbol_t e201; - public sortedSymbol_t e202; - public sortedSymbol_t e203; - public sortedSymbol_t e204; - public sortedSymbol_t e205; - public sortedSymbol_t e206; - public sortedSymbol_t e207; - public sortedSymbol_t e208; - public sortedSymbol_t e209; - public sortedSymbol_t e210; - public sortedSymbol_t e211; - public sortedSymbol_t e212; - public sortedSymbol_t e213; - public sortedSymbol_t e214; - public sortedSymbol_t e215; - public sortedSymbol_t e216; - public sortedSymbol_t e217; - public sortedSymbol_t e218; - public sortedSymbol_t e219; - public sortedSymbol_t e220; - public sortedSymbol_t e221; - public sortedSymbol_t e222; - public sortedSymbol_t e223; - public sortedSymbol_t e224; - public sortedSymbol_t e225; - public sortedSymbol_t e226; - public sortedSymbol_t e227; - public sortedSymbol_t e228; - public sortedSymbol_t e229; - public sortedSymbol_t e230; - public sortedSymbol_t e231; - public sortedSymbol_t e232; - public sortedSymbol_t e233; - public sortedSymbol_t e234; - public sortedSymbol_t e235; - public sortedSymbol_t e236; - public sortedSymbol_t e237; - public sortedSymbol_t e238; - public sortedSymbol_t e239; - public sortedSymbol_t e240; - public sortedSymbol_t e241; - public sortedSymbol_t e242; - public sortedSymbol_t e243; - public sortedSymbol_t e244; - public sortedSymbol_t e245; - public sortedSymbol_t e246; - public sortedSymbol_t e247; - public sortedSymbol_t e248; - public sortedSymbol_t e249; - public sortedSymbol_t e250; - public sortedSymbol_t e251; - public sortedSymbol_t e252; - public sortedSymbol_t e253; - public sortedSymbol_t e254; - public sortedSymbol_t e255; - } -#endif + public unsafe struct _sortedSymbol_e__FixedBuffer + { + public sortedSymbol_t e0; + public sortedSymbol_t e1; + public sortedSymbol_t e2; + public sortedSymbol_t e3; + public sortedSymbol_t e4; + public sortedSymbol_t e5; + public sortedSymbol_t e6; + public sortedSymbol_t e7; + public sortedSymbol_t e8; + public sortedSymbol_t e9; + public sortedSymbol_t e10; + public sortedSymbol_t e11; + public sortedSymbol_t e12; + public sortedSymbol_t e13; + public sortedSymbol_t e14; + public sortedSymbol_t e15; + public sortedSymbol_t e16; + public sortedSymbol_t e17; + public sortedSymbol_t e18; + public sortedSymbol_t e19; + public sortedSymbol_t e20; + public sortedSymbol_t e21; + public sortedSymbol_t e22; + public sortedSymbol_t e23; + public sortedSymbol_t e24; + public sortedSymbol_t e25; + public sortedSymbol_t e26; + public sortedSymbol_t e27; + public sortedSymbol_t e28; + public sortedSymbol_t e29; + public sortedSymbol_t e30; + public sortedSymbol_t e31; + public sortedSymbol_t e32; + public sortedSymbol_t e33; + public sortedSymbol_t e34; + public sortedSymbol_t e35; + public sortedSymbol_t e36; + public sortedSymbol_t e37; + public sortedSymbol_t e38; + public sortedSymbol_t e39; + public sortedSymbol_t e40; + public sortedSymbol_t e41; + public sortedSymbol_t e42; + public sortedSymbol_t e43; + public sortedSymbol_t e44; + public sortedSymbol_t e45; + public sortedSymbol_t e46; + public sortedSymbol_t e47; + public sortedSymbol_t e48; + public sortedSymbol_t e49; + public sortedSymbol_t e50; + public sortedSymbol_t e51; + public sortedSymbol_t e52; + public sortedSymbol_t e53; + public sortedSymbol_t e54; + public sortedSymbol_t e55; + public sortedSymbol_t e56; + public sortedSymbol_t e57; + public sortedSymbol_t e58; + public sortedSymbol_t e59; + public sortedSymbol_t e60; + public sortedSymbol_t e61; + public sortedSymbol_t e62; + public sortedSymbol_t e63; + public sortedSymbol_t e64; + public sortedSymbol_t e65; + public sortedSymbol_t e66; + public sortedSymbol_t e67; + public sortedSymbol_t e68; + public sortedSymbol_t e69; + public sortedSymbol_t e70; + public sortedSymbol_t e71; + public sortedSymbol_t e72; + public sortedSymbol_t e73; + public sortedSymbol_t e74; + public sortedSymbol_t e75; + public sortedSymbol_t e76; + public sortedSymbol_t e77; + public sortedSymbol_t e78; + public sortedSymbol_t e79; + public sortedSymbol_t e80; + public sortedSymbol_t e81; + public sortedSymbol_t e82; + public sortedSymbol_t e83; + public sortedSymbol_t e84; + public sortedSymbol_t e85; + public sortedSymbol_t e86; + public sortedSymbol_t e87; + public sortedSymbol_t e88; + public sortedSymbol_t e89; + public sortedSymbol_t e90; + public sortedSymbol_t e91; + public sortedSymbol_t e92; + public sortedSymbol_t e93; + public sortedSymbol_t e94; + public sortedSymbol_t e95; + public sortedSymbol_t e96; + public sortedSymbol_t e97; + public sortedSymbol_t e98; + public sortedSymbol_t e99; + public sortedSymbol_t e100; + public sortedSymbol_t e101; + public sortedSymbol_t e102; + public sortedSymbol_t e103; + public sortedSymbol_t e104; + public sortedSymbol_t e105; + public sortedSymbol_t e106; + public sortedSymbol_t e107; + public sortedSymbol_t e108; + public sortedSymbol_t e109; + public sortedSymbol_t e110; + public sortedSymbol_t e111; + public sortedSymbol_t e112; + public sortedSymbol_t e113; + public sortedSymbol_t e114; + public sortedSymbol_t e115; + public sortedSymbol_t e116; + public sortedSymbol_t e117; + public sortedSymbol_t e118; + public sortedSymbol_t e119; + public sortedSymbol_t e120; + public sortedSymbol_t e121; + public sortedSymbol_t e122; + public sortedSymbol_t e123; + public sortedSymbol_t e124; + public sortedSymbol_t e125; + public sortedSymbol_t e126; + public sortedSymbol_t e127; + public sortedSymbol_t e128; + public sortedSymbol_t e129; + public sortedSymbol_t e130; + public sortedSymbol_t e131; + public sortedSymbol_t e132; + public sortedSymbol_t e133; + public sortedSymbol_t e134; + public sortedSymbol_t e135; + public sortedSymbol_t e136; + public sortedSymbol_t e137; + public sortedSymbol_t e138; + public sortedSymbol_t e139; + public sortedSymbol_t e140; + public sortedSymbol_t e141; + public sortedSymbol_t e142; + public sortedSymbol_t e143; + public sortedSymbol_t e144; + public sortedSymbol_t e145; + public sortedSymbol_t e146; + public sortedSymbol_t e147; + public sortedSymbol_t e148; + public sortedSymbol_t e149; + public sortedSymbol_t e150; + public sortedSymbol_t e151; + public sortedSymbol_t e152; + public sortedSymbol_t e153; + public sortedSymbol_t e154; + public sortedSymbol_t e155; + public sortedSymbol_t e156; + public sortedSymbol_t e157; + public sortedSymbol_t e158; + public sortedSymbol_t e159; + public sortedSymbol_t e160; + public sortedSymbol_t e161; + public sortedSymbol_t e162; + public sortedSymbol_t e163; + public sortedSymbol_t e164; + public sortedSymbol_t e165; + public sortedSymbol_t e166; + public sortedSymbol_t e167; + public sortedSymbol_t e168; + public sortedSymbol_t e169; + public sortedSymbol_t e170; + public sortedSymbol_t e171; + public sortedSymbol_t e172; + public sortedSymbol_t e173; + public sortedSymbol_t e174; + public sortedSymbol_t e175; + public sortedSymbol_t e176; + public sortedSymbol_t e177; + public sortedSymbol_t e178; + public sortedSymbol_t e179; + public sortedSymbol_t e180; + public sortedSymbol_t e181; + public sortedSymbol_t e182; + public sortedSymbol_t e183; + public sortedSymbol_t e184; + public sortedSymbol_t e185; + public sortedSymbol_t e186; + public sortedSymbol_t e187; + public sortedSymbol_t e188; + public sortedSymbol_t e189; + public sortedSymbol_t e190; + public sortedSymbol_t e191; + public sortedSymbol_t e192; + public sortedSymbol_t e193; + public sortedSymbol_t e194; + public sortedSymbol_t e195; + public sortedSymbol_t e196; + public sortedSymbol_t e197; + public sortedSymbol_t e198; + public sortedSymbol_t e199; + public sortedSymbol_t e200; + public sortedSymbol_t e201; + public sortedSymbol_t e202; + public sortedSymbol_t e203; + public sortedSymbol_t e204; + public sortedSymbol_t e205; + public sortedSymbol_t e206; + public sortedSymbol_t e207; + public sortedSymbol_t e208; + public sortedSymbol_t e209; + public sortedSymbol_t e210; + public sortedSymbol_t e211; + public sortedSymbol_t e212; + public sortedSymbol_t e213; + public sortedSymbol_t e214; + public sortedSymbol_t e215; + public sortedSymbol_t e216; + public sortedSymbol_t e217; + public sortedSymbol_t e218; + public sortedSymbol_t e219; + public sortedSymbol_t e220; + public sortedSymbol_t e221; + public sortedSymbol_t e222; + public sortedSymbol_t e223; + public sortedSymbol_t e224; + public sortedSymbol_t e225; + public sortedSymbol_t e226; + public sortedSymbol_t e227; + public sortedSymbol_t e228; + public sortedSymbol_t e229; + public sortedSymbol_t e230; + public sortedSymbol_t e231; + public sortedSymbol_t e232; + public sortedSymbol_t e233; + public sortedSymbol_t e234; + public sortedSymbol_t e235; + public sortedSymbol_t e236; + public sortedSymbol_t e237; + public sortedSymbol_t e238; + public sortedSymbol_t e239; + public sortedSymbol_t e240; + public sortedSymbol_t e241; + public sortedSymbol_t e242; + public sortedSymbol_t e243; + public sortedSymbol_t e244; + public sortedSymbol_t e245; + public sortedSymbol_t e246; + public sortedSymbol_t e247; + public sortedSymbol_t e248; + public sortedSymbol_t e249; + public sortedSymbol_t e250; + public sortedSymbol_t e251; + public sortedSymbol_t e252; + public sortedSymbol_t e253; + public sortedSymbol_t e254; + public sortedSymbol_t e255; } +#endif } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_WriteCTableWksp.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_WriteCTableWksp.cs index 2c6f9c87e..2400dba76 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_WriteCTableWksp.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_WriteCTableWksp.cs @@ -1,11 +1,10 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct HUF_WriteCTableWksp { - public unsafe struct HUF_WriteCTableWksp - { - public HUF_CompressWeightsWksp wksp; + public HUF_CompressWeightsWksp wksp; - /* precomputed conversion table */ - public fixed byte bitsToWeight[13]; - public fixed byte huffWeight[255]; - } + /* precomputed conversion table */ + public fixed byte bitsToWeight[13]; + public fixed byte huffWeight[255]; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_buildCTable_wksp_tables.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_buildCTable_wksp_tables.cs index f259c5b0d..0fb8400a0 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_buildCTable_wksp_tables.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_buildCTable_wksp_tables.cs @@ -1,11 +1,11 @@ using System.Runtime.CompilerServices; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public struct HUF_buildCTable_wksp_tables { - public struct HUF_buildCTable_wksp_tables - { - public _huffNodeTbl_e__FixedBuffer huffNodeTbl; - public _rankPosition_e__FixedBuffer rankPosition; + public _huffNodeTbl_e__FixedBuffer huffNodeTbl; + public _rankPosition_e__FixedBuffer rankPosition; #if NET8_0_OR_GREATER [InlineArray(512)] @@ -15,521 +15,521 @@ public unsafe struct _huffNodeTbl_e__FixedBuffer } #else - public unsafe struct _huffNodeTbl_e__FixedBuffer - { - public nodeElt_s e0; - public nodeElt_s e1; - public nodeElt_s e2; - public nodeElt_s e3; - public nodeElt_s e4; - public nodeElt_s e5; - public nodeElt_s e6; - public nodeElt_s e7; - public nodeElt_s e8; - public nodeElt_s e9; - public nodeElt_s e10; - public nodeElt_s e11; - public nodeElt_s e12; - public nodeElt_s e13; - public nodeElt_s e14; - public nodeElt_s e15; - public nodeElt_s e16; - public nodeElt_s e17; - public nodeElt_s e18; - public nodeElt_s e19; - public nodeElt_s e20; - public nodeElt_s e21; - public nodeElt_s e22; - public nodeElt_s e23; - public nodeElt_s e24; - public nodeElt_s e25; - public nodeElt_s e26; - public nodeElt_s e27; - public nodeElt_s e28; - public nodeElt_s e29; - public nodeElt_s e30; - public nodeElt_s e31; - public nodeElt_s e32; - public nodeElt_s e33; - public nodeElt_s e34; - public nodeElt_s e35; - public nodeElt_s e36; - public nodeElt_s e37; - public nodeElt_s e38; - public nodeElt_s e39; - public nodeElt_s e40; - public nodeElt_s e41; - public nodeElt_s e42; - public nodeElt_s e43; - public nodeElt_s e44; - public nodeElt_s e45; - public nodeElt_s e46; - public nodeElt_s e47; - public nodeElt_s e48; - public nodeElt_s e49; - public nodeElt_s e50; - public nodeElt_s e51; - public nodeElt_s e52; - public nodeElt_s e53; - public nodeElt_s e54; - public nodeElt_s e55; - public nodeElt_s e56; - public nodeElt_s e57; - public nodeElt_s e58; - public nodeElt_s e59; - public nodeElt_s e60; - public nodeElt_s e61; - public nodeElt_s e62; - public nodeElt_s e63; - public nodeElt_s e64; - public nodeElt_s e65; - public nodeElt_s e66; - public nodeElt_s e67; - public nodeElt_s e68; - public nodeElt_s e69; - public nodeElt_s e70; - public nodeElt_s e71; - public nodeElt_s e72; - public nodeElt_s e73; - public nodeElt_s e74; - public nodeElt_s e75; - public nodeElt_s e76; - public nodeElt_s e77; - public nodeElt_s e78; - public nodeElt_s e79; - public nodeElt_s e80; - public nodeElt_s e81; - public nodeElt_s e82; - public nodeElt_s e83; - public nodeElt_s e84; - public nodeElt_s e85; - public nodeElt_s e86; - public nodeElt_s e87; - public nodeElt_s e88; - public nodeElt_s e89; - public nodeElt_s e90; - public nodeElt_s e91; - public nodeElt_s e92; - public nodeElt_s e93; - public nodeElt_s e94; - public nodeElt_s e95; - public nodeElt_s e96; - public nodeElt_s e97; - public nodeElt_s e98; - public nodeElt_s e99; - public nodeElt_s e100; - public nodeElt_s e101; - public nodeElt_s e102; - public nodeElt_s e103; - public nodeElt_s e104; - public nodeElt_s e105; - public nodeElt_s e106; - public nodeElt_s e107; - public nodeElt_s e108; - public nodeElt_s e109; - public nodeElt_s e110; - public nodeElt_s e111; - public nodeElt_s e112; - public nodeElt_s e113; - public nodeElt_s e114; - public nodeElt_s e115; - public nodeElt_s e116; - public nodeElt_s e117; - public nodeElt_s e118; - public nodeElt_s e119; - public nodeElt_s e120; - public nodeElt_s e121; - public nodeElt_s e122; - public nodeElt_s e123; - public nodeElt_s e124; - public nodeElt_s e125; - public nodeElt_s e126; - public nodeElt_s e127; - public nodeElt_s e128; - public nodeElt_s e129; - public nodeElt_s e130; - public nodeElt_s e131; - public nodeElt_s e132; - public nodeElt_s e133; - public nodeElt_s e134; - public nodeElt_s e135; - public nodeElt_s e136; - public nodeElt_s e137; - public nodeElt_s e138; - public nodeElt_s e139; - public nodeElt_s e140; - public nodeElt_s e141; - public nodeElt_s e142; - public nodeElt_s e143; - public nodeElt_s e144; - public nodeElt_s e145; - public nodeElt_s e146; - public nodeElt_s e147; - public nodeElt_s e148; - public nodeElt_s e149; - public nodeElt_s e150; - public nodeElt_s e151; - public nodeElt_s e152; - public nodeElt_s e153; - public nodeElt_s e154; - public nodeElt_s e155; - public nodeElt_s e156; - public nodeElt_s e157; - public nodeElt_s e158; - public nodeElt_s e159; - public nodeElt_s e160; - public nodeElt_s e161; - public nodeElt_s e162; - public nodeElt_s e163; - public nodeElt_s e164; - public nodeElt_s e165; - public nodeElt_s e166; - public nodeElt_s e167; - public nodeElt_s e168; - public nodeElt_s e169; - public nodeElt_s e170; - public nodeElt_s e171; - public nodeElt_s e172; - public nodeElt_s e173; - public nodeElt_s e174; - public nodeElt_s e175; - public nodeElt_s e176; - public nodeElt_s e177; - public nodeElt_s e178; - public nodeElt_s e179; - public nodeElt_s e180; - public nodeElt_s e181; - public nodeElt_s e182; - public nodeElt_s e183; - public nodeElt_s e184; - public nodeElt_s e185; - public nodeElt_s e186; - public nodeElt_s e187; - public nodeElt_s e188; - public nodeElt_s e189; - public nodeElt_s e190; - public nodeElt_s e191; - public nodeElt_s e192; - public nodeElt_s e193; - public nodeElt_s e194; - public nodeElt_s e195; - public nodeElt_s e196; - public nodeElt_s e197; - public nodeElt_s e198; - public nodeElt_s e199; - public nodeElt_s e200; - public nodeElt_s e201; - public nodeElt_s e202; - public nodeElt_s e203; - public nodeElt_s e204; - public nodeElt_s e205; - public nodeElt_s e206; - public nodeElt_s e207; - public nodeElt_s e208; - public nodeElt_s e209; - public nodeElt_s e210; - public nodeElt_s e211; - public nodeElt_s e212; - public nodeElt_s e213; - public nodeElt_s e214; - public nodeElt_s e215; - public nodeElt_s e216; - public nodeElt_s e217; - public nodeElt_s e218; - public nodeElt_s e219; - public nodeElt_s e220; - public nodeElt_s e221; - public nodeElt_s e222; - public nodeElt_s e223; - public nodeElt_s e224; - public nodeElt_s e225; - public nodeElt_s e226; - public nodeElt_s e227; - public nodeElt_s e228; - public nodeElt_s e229; - public nodeElt_s e230; - public nodeElt_s e231; - public nodeElt_s e232; - public nodeElt_s e233; - public nodeElt_s e234; - public nodeElt_s e235; - public nodeElt_s e236; - public nodeElt_s e237; - public nodeElt_s e238; - public nodeElt_s e239; - public nodeElt_s e240; - public nodeElt_s e241; - public nodeElt_s e242; - public nodeElt_s e243; - public nodeElt_s e244; - public nodeElt_s e245; - public nodeElt_s e246; - public nodeElt_s e247; - public nodeElt_s e248; - public nodeElt_s e249; - public nodeElt_s e250; - public nodeElt_s e251; - public nodeElt_s e252; - public nodeElt_s e253; - public nodeElt_s e254; - public nodeElt_s e255; - public nodeElt_s e256; - public nodeElt_s e257; - public nodeElt_s e258; - public nodeElt_s e259; - public nodeElt_s e260; - public nodeElt_s e261; - public nodeElt_s e262; - public nodeElt_s e263; - public nodeElt_s e264; - public nodeElt_s e265; - public nodeElt_s e266; - public nodeElt_s e267; - public nodeElt_s e268; - public nodeElt_s e269; - public nodeElt_s e270; - public nodeElt_s e271; - public nodeElt_s e272; - public nodeElt_s e273; - public nodeElt_s e274; - public nodeElt_s e275; - public nodeElt_s e276; - public nodeElt_s e277; - public nodeElt_s e278; - public nodeElt_s e279; - public nodeElt_s e280; - public nodeElt_s e281; - public nodeElt_s e282; - public nodeElt_s e283; - public nodeElt_s e284; - public nodeElt_s e285; - public nodeElt_s e286; - public nodeElt_s e287; - public nodeElt_s e288; - public nodeElt_s e289; - public nodeElt_s e290; - public nodeElt_s e291; - public nodeElt_s e292; - public nodeElt_s e293; - public nodeElt_s e294; - public nodeElt_s e295; - public nodeElt_s e296; - public nodeElt_s e297; - public nodeElt_s e298; - public nodeElt_s e299; - public nodeElt_s e300; - public nodeElt_s e301; - public nodeElt_s e302; - public nodeElt_s e303; - public nodeElt_s e304; - public nodeElt_s e305; - public nodeElt_s e306; - public nodeElt_s e307; - public nodeElt_s e308; - public nodeElt_s e309; - public nodeElt_s e310; - public nodeElt_s e311; - public nodeElt_s e312; - public nodeElt_s e313; - public nodeElt_s e314; - public nodeElt_s e315; - public nodeElt_s e316; - public nodeElt_s e317; - public nodeElt_s e318; - public nodeElt_s e319; - public nodeElt_s e320; - public nodeElt_s e321; - public nodeElt_s e322; - public nodeElt_s e323; - public nodeElt_s e324; - public nodeElt_s e325; - public nodeElt_s e326; - public nodeElt_s e327; - public nodeElt_s e328; - public nodeElt_s e329; - public nodeElt_s e330; - public nodeElt_s e331; - public nodeElt_s e332; - public nodeElt_s e333; - public nodeElt_s e334; - public nodeElt_s e335; - public nodeElt_s e336; - public nodeElt_s e337; - public nodeElt_s e338; - public nodeElt_s e339; - public nodeElt_s e340; - public nodeElt_s e341; - public nodeElt_s e342; - public nodeElt_s e343; - public nodeElt_s e344; - public nodeElt_s e345; - public nodeElt_s e346; - public nodeElt_s e347; - public nodeElt_s e348; - public nodeElt_s e349; - public nodeElt_s e350; - public nodeElt_s e351; - public nodeElt_s e352; - public nodeElt_s e353; - public nodeElt_s e354; - public nodeElt_s e355; - public nodeElt_s e356; - public nodeElt_s e357; - public nodeElt_s e358; - public nodeElt_s e359; - public nodeElt_s e360; - public nodeElt_s e361; - public nodeElt_s e362; - public nodeElt_s e363; - public nodeElt_s e364; - public nodeElt_s e365; - public nodeElt_s e366; - public nodeElt_s e367; - public nodeElt_s e368; - public nodeElt_s e369; - public nodeElt_s e370; - public nodeElt_s e371; - public nodeElt_s e372; - public nodeElt_s e373; - public nodeElt_s e374; - public nodeElt_s e375; - public nodeElt_s e376; - public nodeElt_s e377; - public nodeElt_s e378; - public nodeElt_s e379; - public nodeElt_s e380; - public nodeElt_s e381; - public nodeElt_s e382; - public nodeElt_s e383; - public nodeElt_s e384; - public nodeElt_s e385; - public nodeElt_s e386; - public nodeElt_s e387; - public nodeElt_s e388; - public nodeElt_s e389; - public nodeElt_s e390; - public nodeElt_s e391; - public nodeElt_s e392; - public nodeElt_s e393; - public nodeElt_s e394; - public nodeElt_s e395; - public nodeElt_s e396; - public nodeElt_s e397; - public nodeElt_s e398; - public nodeElt_s e399; - public nodeElt_s e400; - public nodeElt_s e401; - public nodeElt_s e402; - public nodeElt_s e403; - public nodeElt_s e404; - public nodeElt_s e405; - public nodeElt_s e406; - public nodeElt_s e407; - public nodeElt_s e408; - public nodeElt_s e409; - public nodeElt_s e410; - public nodeElt_s e411; - public nodeElt_s e412; - public nodeElt_s e413; - public nodeElt_s e414; - public nodeElt_s e415; - public nodeElt_s e416; - public nodeElt_s e417; - public nodeElt_s e418; - public nodeElt_s e419; - public nodeElt_s e420; - public nodeElt_s e421; - public nodeElt_s e422; - public nodeElt_s e423; - public nodeElt_s e424; - public nodeElt_s e425; - public nodeElt_s e426; - public nodeElt_s e427; - public nodeElt_s e428; - public nodeElt_s e429; - public nodeElt_s e430; - public nodeElt_s e431; - public nodeElt_s e432; - public nodeElt_s e433; - public nodeElt_s e434; - public nodeElt_s e435; - public nodeElt_s e436; - public nodeElt_s e437; - public nodeElt_s e438; - public nodeElt_s e439; - public nodeElt_s e440; - public nodeElt_s e441; - public nodeElt_s e442; - public nodeElt_s e443; - public nodeElt_s e444; - public nodeElt_s e445; - public nodeElt_s e446; - public nodeElt_s e447; - public nodeElt_s e448; - public nodeElt_s e449; - public nodeElt_s e450; - public nodeElt_s e451; - public nodeElt_s e452; - public nodeElt_s e453; - public nodeElt_s e454; - public nodeElt_s e455; - public nodeElt_s e456; - public nodeElt_s e457; - public nodeElt_s e458; - public nodeElt_s e459; - public nodeElt_s e460; - public nodeElt_s e461; - public nodeElt_s e462; - public nodeElt_s e463; - public nodeElt_s e464; - public nodeElt_s e465; - public nodeElt_s e466; - public nodeElt_s e467; - public nodeElt_s e468; - public nodeElt_s e469; - public nodeElt_s e470; - public nodeElt_s e471; - public nodeElt_s e472; - public nodeElt_s e473; - public nodeElt_s e474; - public nodeElt_s e475; - public nodeElt_s e476; - public nodeElt_s e477; - public nodeElt_s e478; - public nodeElt_s e479; - public nodeElt_s e480; - public nodeElt_s e481; - public nodeElt_s e482; - public nodeElt_s e483; - public nodeElt_s e484; - public nodeElt_s e485; - public nodeElt_s e486; - public nodeElt_s e487; - public nodeElt_s e488; - public nodeElt_s e489; - public nodeElt_s e490; - public nodeElt_s e491; - public nodeElt_s e492; - public nodeElt_s e493; - public nodeElt_s e494; - public nodeElt_s e495; - public nodeElt_s e496; - public nodeElt_s e497; - public nodeElt_s e498; - public nodeElt_s e499; - public nodeElt_s e500; - public nodeElt_s e501; - public nodeElt_s e502; - public nodeElt_s e503; - public nodeElt_s e504; - public nodeElt_s e505; - public nodeElt_s e506; - public nodeElt_s e507; - public nodeElt_s e508; - public nodeElt_s e509; - public nodeElt_s e510; - public nodeElt_s e511; - } + public unsafe struct _huffNodeTbl_e__FixedBuffer + { + public nodeElt_s e0; + public nodeElt_s e1; + public nodeElt_s e2; + public nodeElt_s e3; + public nodeElt_s e4; + public nodeElt_s e5; + public nodeElt_s e6; + public nodeElt_s e7; + public nodeElt_s e8; + public nodeElt_s e9; + public nodeElt_s e10; + public nodeElt_s e11; + public nodeElt_s e12; + public nodeElt_s e13; + public nodeElt_s e14; + public nodeElt_s e15; + public nodeElt_s e16; + public nodeElt_s e17; + public nodeElt_s e18; + public nodeElt_s e19; + public nodeElt_s e20; + public nodeElt_s e21; + public nodeElt_s e22; + public nodeElt_s e23; + public nodeElt_s e24; + public nodeElt_s e25; + public nodeElt_s e26; + public nodeElt_s e27; + public nodeElt_s e28; + public nodeElt_s e29; + public nodeElt_s e30; + public nodeElt_s e31; + public nodeElt_s e32; + public nodeElt_s e33; + public nodeElt_s e34; + public nodeElt_s e35; + public nodeElt_s e36; + public nodeElt_s e37; + public nodeElt_s e38; + public nodeElt_s e39; + public nodeElt_s e40; + public nodeElt_s e41; + public nodeElt_s e42; + public nodeElt_s e43; + public nodeElt_s e44; + public nodeElt_s e45; + public nodeElt_s e46; + public nodeElt_s e47; + public nodeElt_s e48; + public nodeElt_s e49; + public nodeElt_s e50; + public nodeElt_s e51; + public nodeElt_s e52; + public nodeElt_s e53; + public nodeElt_s e54; + public nodeElt_s e55; + public nodeElt_s e56; + public nodeElt_s e57; + public nodeElt_s e58; + public nodeElt_s e59; + public nodeElt_s e60; + public nodeElt_s e61; + public nodeElt_s e62; + public nodeElt_s e63; + public nodeElt_s e64; + public nodeElt_s e65; + public nodeElt_s e66; + public nodeElt_s e67; + public nodeElt_s e68; + public nodeElt_s e69; + public nodeElt_s e70; + public nodeElt_s e71; + public nodeElt_s e72; + public nodeElt_s e73; + public nodeElt_s e74; + public nodeElt_s e75; + public nodeElt_s e76; + public nodeElt_s e77; + public nodeElt_s e78; + public nodeElt_s e79; + public nodeElt_s e80; + public nodeElt_s e81; + public nodeElt_s e82; + public nodeElt_s e83; + public nodeElt_s e84; + public nodeElt_s e85; + public nodeElt_s e86; + public nodeElt_s e87; + public nodeElt_s e88; + public nodeElt_s e89; + public nodeElt_s e90; + public nodeElt_s e91; + public nodeElt_s e92; + public nodeElt_s e93; + public nodeElt_s e94; + public nodeElt_s e95; + public nodeElt_s e96; + public nodeElt_s e97; + public nodeElt_s e98; + public nodeElt_s e99; + public nodeElt_s e100; + public nodeElt_s e101; + public nodeElt_s e102; + public nodeElt_s e103; + public nodeElt_s e104; + public nodeElt_s e105; + public nodeElt_s e106; + public nodeElt_s e107; + public nodeElt_s e108; + public nodeElt_s e109; + public nodeElt_s e110; + public nodeElt_s e111; + public nodeElt_s e112; + public nodeElt_s e113; + public nodeElt_s e114; + public nodeElt_s e115; + public nodeElt_s e116; + public nodeElt_s e117; + public nodeElt_s e118; + public nodeElt_s e119; + public nodeElt_s e120; + public nodeElt_s e121; + public nodeElt_s e122; + public nodeElt_s e123; + public nodeElt_s e124; + public nodeElt_s e125; + public nodeElt_s e126; + public nodeElt_s e127; + public nodeElt_s e128; + public nodeElt_s e129; + public nodeElt_s e130; + public nodeElt_s e131; + public nodeElt_s e132; + public nodeElt_s e133; + public nodeElt_s e134; + public nodeElt_s e135; + public nodeElt_s e136; + public nodeElt_s e137; + public nodeElt_s e138; + public nodeElt_s e139; + public nodeElt_s e140; + public nodeElt_s e141; + public nodeElt_s e142; + public nodeElt_s e143; + public nodeElt_s e144; + public nodeElt_s e145; + public nodeElt_s e146; + public nodeElt_s e147; + public nodeElt_s e148; + public nodeElt_s e149; + public nodeElt_s e150; + public nodeElt_s e151; + public nodeElt_s e152; + public nodeElt_s e153; + public nodeElt_s e154; + public nodeElt_s e155; + public nodeElt_s e156; + public nodeElt_s e157; + public nodeElt_s e158; + public nodeElt_s e159; + public nodeElt_s e160; + public nodeElt_s e161; + public nodeElt_s e162; + public nodeElt_s e163; + public nodeElt_s e164; + public nodeElt_s e165; + public nodeElt_s e166; + public nodeElt_s e167; + public nodeElt_s e168; + public nodeElt_s e169; + public nodeElt_s e170; + public nodeElt_s e171; + public nodeElt_s e172; + public nodeElt_s e173; + public nodeElt_s e174; + public nodeElt_s e175; + public nodeElt_s e176; + public nodeElt_s e177; + public nodeElt_s e178; + public nodeElt_s e179; + public nodeElt_s e180; + public nodeElt_s e181; + public nodeElt_s e182; + public nodeElt_s e183; + public nodeElt_s e184; + public nodeElt_s e185; + public nodeElt_s e186; + public nodeElt_s e187; + public nodeElt_s e188; + public nodeElt_s e189; + public nodeElt_s e190; + public nodeElt_s e191; + public nodeElt_s e192; + public nodeElt_s e193; + public nodeElt_s e194; + public nodeElt_s e195; + public nodeElt_s e196; + public nodeElt_s e197; + public nodeElt_s e198; + public nodeElt_s e199; + public nodeElt_s e200; + public nodeElt_s e201; + public nodeElt_s e202; + public nodeElt_s e203; + public nodeElt_s e204; + public nodeElt_s e205; + public nodeElt_s e206; + public nodeElt_s e207; + public nodeElt_s e208; + public nodeElt_s e209; + public nodeElt_s e210; + public nodeElt_s e211; + public nodeElt_s e212; + public nodeElt_s e213; + public nodeElt_s e214; + public nodeElt_s e215; + public nodeElt_s e216; + public nodeElt_s e217; + public nodeElt_s e218; + public nodeElt_s e219; + public nodeElt_s e220; + public nodeElt_s e221; + public nodeElt_s e222; + public nodeElt_s e223; + public nodeElt_s e224; + public nodeElt_s e225; + public nodeElt_s e226; + public nodeElt_s e227; + public nodeElt_s e228; + public nodeElt_s e229; + public nodeElt_s e230; + public nodeElt_s e231; + public nodeElt_s e232; + public nodeElt_s e233; + public nodeElt_s e234; + public nodeElt_s e235; + public nodeElt_s e236; + public nodeElt_s e237; + public nodeElt_s e238; + public nodeElt_s e239; + public nodeElt_s e240; + public nodeElt_s e241; + public nodeElt_s e242; + public nodeElt_s e243; + public nodeElt_s e244; + public nodeElt_s e245; + public nodeElt_s e246; + public nodeElt_s e247; + public nodeElt_s e248; + public nodeElt_s e249; + public nodeElt_s e250; + public nodeElt_s e251; + public nodeElt_s e252; + public nodeElt_s e253; + public nodeElt_s e254; + public nodeElt_s e255; + public nodeElt_s e256; + public nodeElt_s e257; + public nodeElt_s e258; + public nodeElt_s e259; + public nodeElt_s e260; + public nodeElt_s e261; + public nodeElt_s e262; + public nodeElt_s e263; + public nodeElt_s e264; + public nodeElt_s e265; + public nodeElt_s e266; + public nodeElt_s e267; + public nodeElt_s e268; + public nodeElt_s e269; + public nodeElt_s e270; + public nodeElt_s e271; + public nodeElt_s e272; + public nodeElt_s e273; + public nodeElt_s e274; + public nodeElt_s e275; + public nodeElt_s e276; + public nodeElt_s e277; + public nodeElt_s e278; + public nodeElt_s e279; + public nodeElt_s e280; + public nodeElt_s e281; + public nodeElt_s e282; + public nodeElt_s e283; + public nodeElt_s e284; + public nodeElt_s e285; + public nodeElt_s e286; + public nodeElt_s e287; + public nodeElt_s e288; + public nodeElt_s e289; + public nodeElt_s e290; + public nodeElt_s e291; + public nodeElt_s e292; + public nodeElt_s e293; + public nodeElt_s e294; + public nodeElt_s e295; + public nodeElt_s e296; + public nodeElt_s e297; + public nodeElt_s e298; + public nodeElt_s e299; + public nodeElt_s e300; + public nodeElt_s e301; + public nodeElt_s e302; + public nodeElt_s e303; + public nodeElt_s e304; + public nodeElt_s e305; + public nodeElt_s e306; + public nodeElt_s e307; + public nodeElt_s e308; + public nodeElt_s e309; + public nodeElt_s e310; + public nodeElt_s e311; + public nodeElt_s e312; + public nodeElt_s e313; + public nodeElt_s e314; + public nodeElt_s e315; + public nodeElt_s e316; + public nodeElt_s e317; + public nodeElt_s e318; + public nodeElt_s e319; + public nodeElt_s e320; + public nodeElt_s e321; + public nodeElt_s e322; + public nodeElt_s e323; + public nodeElt_s e324; + public nodeElt_s e325; + public nodeElt_s e326; + public nodeElt_s e327; + public nodeElt_s e328; + public nodeElt_s e329; + public nodeElt_s e330; + public nodeElt_s e331; + public nodeElt_s e332; + public nodeElt_s e333; + public nodeElt_s e334; + public nodeElt_s e335; + public nodeElt_s e336; + public nodeElt_s e337; + public nodeElt_s e338; + public nodeElt_s e339; + public nodeElt_s e340; + public nodeElt_s e341; + public nodeElt_s e342; + public nodeElt_s e343; + public nodeElt_s e344; + public nodeElt_s e345; + public nodeElt_s e346; + public nodeElt_s e347; + public nodeElt_s e348; + public nodeElt_s e349; + public nodeElt_s e350; + public nodeElt_s e351; + public nodeElt_s e352; + public nodeElt_s e353; + public nodeElt_s e354; + public nodeElt_s e355; + public nodeElt_s e356; + public nodeElt_s e357; + public nodeElt_s e358; + public nodeElt_s e359; + public nodeElt_s e360; + public nodeElt_s e361; + public nodeElt_s e362; + public nodeElt_s e363; + public nodeElt_s e364; + public nodeElt_s e365; + public nodeElt_s e366; + public nodeElt_s e367; + public nodeElt_s e368; + public nodeElt_s e369; + public nodeElt_s e370; + public nodeElt_s e371; + public nodeElt_s e372; + public nodeElt_s e373; + public nodeElt_s e374; + public nodeElt_s e375; + public nodeElt_s e376; + public nodeElt_s e377; + public nodeElt_s e378; + public nodeElt_s e379; + public nodeElt_s e380; + public nodeElt_s e381; + public nodeElt_s e382; + public nodeElt_s e383; + public nodeElt_s e384; + public nodeElt_s e385; + public nodeElt_s e386; + public nodeElt_s e387; + public nodeElt_s e388; + public nodeElt_s e389; + public nodeElt_s e390; + public nodeElt_s e391; + public nodeElt_s e392; + public nodeElt_s e393; + public nodeElt_s e394; + public nodeElt_s e395; + public nodeElt_s e396; + public nodeElt_s e397; + public nodeElt_s e398; + public nodeElt_s e399; + public nodeElt_s e400; + public nodeElt_s e401; + public nodeElt_s e402; + public nodeElt_s e403; + public nodeElt_s e404; + public nodeElt_s e405; + public nodeElt_s e406; + public nodeElt_s e407; + public nodeElt_s e408; + public nodeElt_s e409; + public nodeElt_s e410; + public nodeElt_s e411; + public nodeElt_s e412; + public nodeElt_s e413; + public nodeElt_s e414; + public nodeElt_s e415; + public nodeElt_s e416; + public nodeElt_s e417; + public nodeElt_s e418; + public nodeElt_s e419; + public nodeElt_s e420; + public nodeElt_s e421; + public nodeElt_s e422; + public nodeElt_s e423; + public nodeElt_s e424; + public nodeElt_s e425; + public nodeElt_s e426; + public nodeElt_s e427; + public nodeElt_s e428; + public nodeElt_s e429; + public nodeElt_s e430; + public nodeElt_s e431; + public nodeElt_s e432; + public nodeElt_s e433; + public nodeElt_s e434; + public nodeElt_s e435; + public nodeElt_s e436; + public nodeElt_s e437; + public nodeElt_s e438; + public nodeElt_s e439; + public nodeElt_s e440; + public nodeElt_s e441; + public nodeElt_s e442; + public nodeElt_s e443; + public nodeElt_s e444; + public nodeElt_s e445; + public nodeElt_s e446; + public nodeElt_s e447; + public nodeElt_s e448; + public nodeElt_s e449; + public nodeElt_s e450; + public nodeElt_s e451; + public nodeElt_s e452; + public nodeElt_s e453; + public nodeElt_s e454; + public nodeElt_s e455; + public nodeElt_s e456; + public nodeElt_s e457; + public nodeElt_s e458; + public nodeElt_s e459; + public nodeElt_s e460; + public nodeElt_s e461; + public nodeElt_s e462; + public nodeElt_s e463; + public nodeElt_s e464; + public nodeElt_s e465; + public nodeElt_s e466; + public nodeElt_s e467; + public nodeElt_s e468; + public nodeElt_s e469; + public nodeElt_s e470; + public nodeElt_s e471; + public nodeElt_s e472; + public nodeElt_s e473; + public nodeElt_s e474; + public nodeElt_s e475; + public nodeElt_s e476; + public nodeElt_s e477; + public nodeElt_s e478; + public nodeElt_s e479; + public nodeElt_s e480; + public nodeElt_s e481; + public nodeElt_s e482; + public nodeElt_s e483; + public nodeElt_s e484; + public nodeElt_s e485; + public nodeElt_s e486; + public nodeElt_s e487; + public nodeElt_s e488; + public nodeElt_s e489; + public nodeElt_s e490; + public nodeElt_s e491; + public nodeElt_s e492; + public nodeElt_s e493; + public nodeElt_s e494; + public nodeElt_s e495; + public nodeElt_s e496; + public nodeElt_s e497; + public nodeElt_s e498; + public nodeElt_s e499; + public nodeElt_s e500; + public nodeElt_s e501; + public nodeElt_s e502; + public nodeElt_s e503; + public nodeElt_s e504; + public nodeElt_s e505; + public nodeElt_s e506; + public nodeElt_s e507; + public nodeElt_s e508; + public nodeElt_s e509; + public nodeElt_s e510; + public nodeElt_s e511; + } #endif #if NET8_0_OR_GREATER @@ -540,201 +540,200 @@ public unsafe struct _rankPosition_e__FixedBuffer } #else - public unsafe struct _rankPosition_e__FixedBuffer - { - public rankPos e0; - public rankPos e1; - public rankPos e2; - public rankPos e3; - public rankPos e4; - public rankPos e5; - public rankPos e6; - public rankPos e7; - public rankPos e8; - public rankPos e9; - public rankPos e10; - public rankPos e11; - public rankPos e12; - public rankPos e13; - public rankPos e14; - public rankPos e15; - public rankPos e16; - public rankPos e17; - public rankPos e18; - public rankPos e19; - public rankPos e20; - public rankPos e21; - public rankPos e22; - public rankPos e23; - public rankPos e24; - public rankPos e25; - public rankPos e26; - public rankPos e27; - public rankPos e28; - public rankPos e29; - public rankPos e30; - public rankPos e31; - public rankPos e32; - public rankPos e33; - public rankPos e34; - public rankPos e35; - public rankPos e36; - public rankPos e37; - public rankPos e38; - public rankPos e39; - public rankPos e40; - public rankPos e41; - public rankPos e42; - public rankPos e43; - public rankPos e44; - public rankPos e45; - public rankPos e46; - public rankPos e47; - public rankPos e48; - public rankPos e49; - public rankPos e50; - public rankPos e51; - public rankPos e52; - public rankPos e53; - public rankPos e54; - public rankPos e55; - public rankPos e56; - public rankPos e57; - public rankPos e58; - public rankPos e59; - public rankPos e60; - public rankPos e61; - public rankPos e62; - public rankPos e63; - public rankPos e64; - public rankPos e65; - public rankPos e66; - public rankPos e67; - public rankPos e68; - public rankPos e69; - public rankPos e70; - public rankPos e71; - public rankPos e72; - public rankPos e73; - public rankPos e74; - public rankPos e75; - public rankPos e76; - public rankPos e77; - public rankPos e78; - public rankPos e79; - public rankPos e80; - public rankPos e81; - public rankPos e82; - public rankPos e83; - public rankPos e84; - public rankPos e85; - public rankPos e86; - public rankPos e87; - public rankPos e88; - public rankPos e89; - public rankPos e90; - public rankPos e91; - public rankPos e92; - public rankPos e93; - public rankPos e94; - public rankPos e95; - public rankPos e96; - public rankPos e97; - public rankPos e98; - public rankPos e99; - public rankPos e100; - public rankPos e101; - public rankPos e102; - public rankPos e103; - public rankPos e104; - public rankPos e105; - public rankPos e106; - public rankPos e107; - public rankPos e108; - public rankPos e109; - public rankPos e110; - public rankPos e111; - public rankPos e112; - public rankPos e113; - public rankPos e114; - public rankPos e115; - public rankPos e116; - public rankPos e117; - public rankPos e118; - public rankPos e119; - public rankPos e120; - public rankPos e121; - public rankPos e122; - public rankPos e123; - public rankPos e124; - public rankPos e125; - public rankPos e126; - public rankPos e127; - public rankPos e128; - public rankPos e129; - public rankPos e130; - public rankPos e131; - public rankPos e132; - public rankPos e133; - public rankPos e134; - public rankPos e135; - public rankPos e136; - public rankPos e137; - public rankPos e138; - public rankPos e139; - public rankPos e140; - public rankPos e141; - public rankPos e142; - public rankPos e143; - public rankPos e144; - public rankPos e145; - public rankPos e146; - public rankPos e147; - public rankPos e148; - public rankPos e149; - public rankPos e150; - public rankPos e151; - public rankPos e152; - public rankPos e153; - public rankPos e154; - public rankPos e155; - public rankPos e156; - public rankPos e157; - public rankPos e158; - public rankPos e159; - public rankPos e160; - public rankPos e161; - public rankPos e162; - public rankPos e163; - public rankPos e164; - public rankPos e165; - public rankPos e166; - public rankPos e167; - public rankPos e168; - public rankPos e169; - public rankPos e170; - public rankPos e171; - public rankPos e172; - public rankPos e173; - public rankPos e174; - public rankPos e175; - public rankPos e176; - public rankPos e177; - public rankPos e178; - public rankPos e179; - public rankPos e180; - public rankPos e181; - public rankPos e182; - public rankPos e183; - public rankPos e184; - public rankPos e185; - public rankPos e186; - public rankPos e187; - public rankPos e188; - public rankPos e189; - public rankPos e190; - public rankPos e191; - } -#endif + public unsafe struct _rankPosition_e__FixedBuffer + { + public rankPos e0; + public rankPos e1; + public rankPos e2; + public rankPos e3; + public rankPos e4; + public rankPos e5; + public rankPos e6; + public rankPos e7; + public rankPos e8; + public rankPos e9; + public rankPos e10; + public rankPos e11; + public rankPos e12; + public rankPos e13; + public rankPos e14; + public rankPos e15; + public rankPos e16; + public rankPos e17; + public rankPos e18; + public rankPos e19; + public rankPos e20; + public rankPos e21; + public rankPos e22; + public rankPos e23; + public rankPos e24; + public rankPos e25; + public rankPos e26; + public rankPos e27; + public rankPos e28; + public rankPos e29; + public rankPos e30; + public rankPos e31; + public rankPos e32; + public rankPos e33; + public rankPos e34; + public rankPos e35; + public rankPos e36; + public rankPos e37; + public rankPos e38; + public rankPos e39; + public rankPos e40; + public rankPos e41; + public rankPos e42; + public rankPos e43; + public rankPos e44; + public rankPos e45; + public rankPos e46; + public rankPos e47; + public rankPos e48; + public rankPos e49; + public rankPos e50; + public rankPos e51; + public rankPos e52; + public rankPos e53; + public rankPos e54; + public rankPos e55; + public rankPos e56; + public rankPos e57; + public rankPos e58; + public rankPos e59; + public rankPos e60; + public rankPos e61; + public rankPos e62; + public rankPos e63; + public rankPos e64; + public rankPos e65; + public rankPos e66; + public rankPos e67; + public rankPos e68; + public rankPos e69; + public rankPos e70; + public rankPos e71; + public rankPos e72; + public rankPos e73; + public rankPos e74; + public rankPos e75; + public rankPos e76; + public rankPos e77; + public rankPos e78; + public rankPos e79; + public rankPos e80; + public rankPos e81; + public rankPos e82; + public rankPos e83; + public rankPos e84; + public rankPos e85; + public rankPos e86; + public rankPos e87; + public rankPos e88; + public rankPos e89; + public rankPos e90; + public rankPos e91; + public rankPos e92; + public rankPos e93; + public rankPos e94; + public rankPos e95; + public rankPos e96; + public rankPos e97; + public rankPos e98; + public rankPos e99; + public rankPos e100; + public rankPos e101; + public rankPos e102; + public rankPos e103; + public rankPos e104; + public rankPos e105; + public rankPos e106; + public rankPos e107; + public rankPos e108; + public rankPos e109; + public rankPos e110; + public rankPos e111; + public rankPos e112; + public rankPos e113; + public rankPos e114; + public rankPos e115; + public rankPos e116; + public rankPos e117; + public rankPos e118; + public rankPos e119; + public rankPos e120; + public rankPos e121; + public rankPos e122; + public rankPos e123; + public rankPos e124; + public rankPos e125; + public rankPos e126; + public rankPos e127; + public rankPos e128; + public rankPos e129; + public rankPos e130; + public rankPos e131; + public rankPos e132; + public rankPos e133; + public rankPos e134; + public rankPos e135; + public rankPos e136; + public rankPos e137; + public rankPos e138; + public rankPos e139; + public rankPos e140; + public rankPos e141; + public rankPos e142; + public rankPos e143; + public rankPos e144; + public rankPos e145; + public rankPos e146; + public rankPos e147; + public rankPos e148; + public rankPos e149; + public rankPos e150; + public rankPos e151; + public rankPos e152; + public rankPos e153; + public rankPos e154; + public rankPos e155; + public rankPos e156; + public rankPos e157; + public rankPos e158; + public rankPos e159; + public rankPos e160; + public rankPos e161; + public rankPos e162; + public rankPos e163; + public rankPos e164; + public rankPos e165; + public rankPos e166; + public rankPos e167; + public rankPos e168; + public rankPos e169; + public rankPos e170; + public rankPos e171; + public rankPos e172; + public rankPos e173; + public rankPos e174; + public rankPos e175; + public rankPos e176; + public rankPos e177; + public rankPos e178; + public rankPos e179; + public rankPos e180; + public rankPos e181; + public rankPos e182; + public rankPos e183; + public rankPos e184; + public rankPos e185; + public rankPos e186; + public rankPos e187; + public rankPos e188; + public rankPos e189; + public rankPos e190; + public rankPos e191; } -} +#endif +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_compress_tables_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_compress_tables_t.cs index 14bf2714c..4d8fde232 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_compress_tables_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_compress_tables_t.cs @@ -1,12 +1,12 @@ using System.Runtime.CompilerServices; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct HUF_compress_tables_t { - public unsafe struct HUF_compress_tables_t - { - public fixed uint count[256]; - public _CTable_e__FixedBuffer CTable; - public _wksps_e__Union wksps; + public fixed uint count[256]; + public _CTable_e__FixedBuffer CTable; + public _wksps_e__Union wksps; #if NET8_0_OR_GREATER [InlineArray(257)] @@ -16,266 +16,265 @@ public unsafe struct _CTable_e__FixedBuffer } #else - public unsafe struct _CTable_e__FixedBuffer - { - public nuint e0; - public nuint e1; - public nuint e2; - public nuint e3; - public nuint e4; - public nuint e5; - public nuint e6; - public nuint e7; - public nuint e8; - public nuint e9; - public nuint e10; - public nuint e11; - public nuint e12; - public nuint e13; - public nuint e14; - public nuint e15; - public nuint e16; - public nuint e17; - public nuint e18; - public nuint e19; - public nuint e20; - public nuint e21; - public nuint e22; - public nuint e23; - public nuint e24; - public nuint e25; - public nuint e26; - public nuint e27; - public nuint e28; - public nuint e29; - public nuint e30; - public nuint e31; - public nuint e32; - public nuint e33; - public nuint e34; - public nuint e35; - public nuint e36; - public nuint e37; - public nuint e38; - public nuint e39; - public nuint e40; - public nuint e41; - public nuint e42; - public nuint e43; - public nuint e44; - public nuint e45; - public nuint e46; - public nuint e47; - public nuint e48; - public nuint e49; - public nuint e50; - public nuint e51; - public nuint e52; - public nuint e53; - public nuint e54; - public nuint e55; - public nuint e56; - public nuint e57; - public nuint e58; - public nuint e59; - public nuint e60; - public nuint e61; - public nuint e62; - public nuint e63; - public nuint e64; - public nuint e65; - public nuint e66; - public nuint e67; - public nuint e68; - public nuint e69; - public nuint e70; - public nuint e71; - public nuint e72; - public nuint e73; - public nuint e74; - public nuint e75; - public nuint e76; - public nuint e77; - public nuint e78; - public nuint e79; - public nuint e80; - public nuint e81; - public nuint e82; - public nuint e83; - public nuint e84; - public nuint e85; - public nuint e86; - public nuint e87; - public nuint e88; - public nuint e89; - public nuint e90; - public nuint e91; - public nuint e92; - public nuint e93; - public nuint e94; - public nuint e95; - public nuint e96; - public nuint e97; - public nuint e98; - public nuint e99; - public nuint e100; - public nuint e101; - public nuint e102; - public nuint e103; - public nuint e104; - public nuint e105; - public nuint e106; - public nuint e107; - public nuint e108; - public nuint e109; - public nuint e110; - public nuint e111; - public nuint e112; - public nuint e113; - public nuint e114; - public nuint e115; - public nuint e116; - public nuint e117; - public nuint e118; - public nuint e119; - public nuint e120; - public nuint e121; - public nuint e122; - public nuint e123; - public nuint e124; - public nuint e125; - public nuint e126; - public nuint e127; - public nuint e128; - public nuint e129; - public nuint e130; - public nuint e131; - public nuint e132; - public nuint e133; - public nuint e134; - public nuint e135; - public nuint e136; - public nuint e137; - public nuint e138; - public nuint e139; - public nuint e140; - public nuint e141; - public nuint e142; - public nuint e143; - public nuint e144; - public nuint e145; - public nuint e146; - public nuint e147; - public nuint e148; - public nuint e149; - public nuint e150; - public nuint e151; - public nuint e152; - public nuint e153; - public nuint e154; - public nuint e155; - public nuint e156; - public nuint e157; - public nuint e158; - public nuint e159; - public nuint e160; - public nuint e161; - public nuint e162; - public nuint e163; - public nuint e164; - public nuint e165; - public nuint e166; - public nuint e167; - public nuint e168; - public nuint e169; - public nuint e170; - public nuint e171; - public nuint e172; - public nuint e173; - public nuint e174; - public nuint e175; - public nuint e176; - public nuint e177; - public nuint e178; - public nuint e179; - public nuint e180; - public nuint e181; - public nuint e182; - public nuint e183; - public nuint e184; - public nuint e185; - public nuint e186; - public nuint e187; - public nuint e188; - public nuint e189; - public nuint e190; - public nuint e191; - public nuint e192; - public nuint e193; - public nuint e194; - public nuint e195; - public nuint e196; - public nuint e197; - public nuint e198; - public nuint e199; - public nuint e200; - public nuint e201; - public nuint e202; - public nuint e203; - public nuint e204; - public nuint e205; - public nuint e206; - public nuint e207; - public nuint e208; - public nuint e209; - public nuint e210; - public nuint e211; - public nuint e212; - public nuint e213; - public nuint e214; - public nuint e215; - public nuint e216; - public nuint e217; - public nuint e218; - public nuint e219; - public nuint e220; - public nuint e221; - public nuint e222; - public nuint e223; - public nuint e224; - public nuint e225; - public nuint e226; - public nuint e227; - public nuint e228; - public nuint e229; - public nuint e230; - public nuint e231; - public nuint e232; - public nuint e233; - public nuint e234; - public nuint e235; - public nuint e236; - public nuint e237; - public nuint e238; - public nuint e239; - public nuint e240; - public nuint e241; - public nuint e242; - public nuint e243; - public nuint e244; - public nuint e245; - public nuint e246; - public nuint e247; - public nuint e248; - public nuint e249; - public nuint e250; - public nuint e251; - public nuint e252; - public nuint e253; - public nuint e254; - public nuint e255; - public nuint e256; - } -#endif + public unsafe struct _CTable_e__FixedBuffer + { + public nuint e0; + public nuint e1; + public nuint e2; + public nuint e3; + public nuint e4; + public nuint e5; + public nuint e6; + public nuint e7; + public nuint e8; + public nuint e9; + public nuint e10; + public nuint e11; + public nuint e12; + public nuint e13; + public nuint e14; + public nuint e15; + public nuint e16; + public nuint e17; + public nuint e18; + public nuint e19; + public nuint e20; + public nuint e21; + public nuint e22; + public nuint e23; + public nuint e24; + public nuint e25; + public nuint e26; + public nuint e27; + public nuint e28; + public nuint e29; + public nuint e30; + public nuint e31; + public nuint e32; + public nuint e33; + public nuint e34; + public nuint e35; + public nuint e36; + public nuint e37; + public nuint e38; + public nuint e39; + public nuint e40; + public nuint e41; + public nuint e42; + public nuint e43; + public nuint e44; + public nuint e45; + public nuint e46; + public nuint e47; + public nuint e48; + public nuint e49; + public nuint e50; + public nuint e51; + public nuint e52; + public nuint e53; + public nuint e54; + public nuint e55; + public nuint e56; + public nuint e57; + public nuint e58; + public nuint e59; + public nuint e60; + public nuint e61; + public nuint e62; + public nuint e63; + public nuint e64; + public nuint e65; + public nuint e66; + public nuint e67; + public nuint e68; + public nuint e69; + public nuint e70; + public nuint e71; + public nuint e72; + public nuint e73; + public nuint e74; + public nuint e75; + public nuint e76; + public nuint e77; + public nuint e78; + public nuint e79; + public nuint e80; + public nuint e81; + public nuint e82; + public nuint e83; + public nuint e84; + public nuint e85; + public nuint e86; + public nuint e87; + public nuint e88; + public nuint e89; + public nuint e90; + public nuint e91; + public nuint e92; + public nuint e93; + public nuint e94; + public nuint e95; + public nuint e96; + public nuint e97; + public nuint e98; + public nuint e99; + public nuint e100; + public nuint e101; + public nuint e102; + public nuint e103; + public nuint e104; + public nuint e105; + public nuint e106; + public nuint e107; + public nuint e108; + public nuint e109; + public nuint e110; + public nuint e111; + public nuint e112; + public nuint e113; + public nuint e114; + public nuint e115; + public nuint e116; + public nuint e117; + public nuint e118; + public nuint e119; + public nuint e120; + public nuint e121; + public nuint e122; + public nuint e123; + public nuint e124; + public nuint e125; + public nuint e126; + public nuint e127; + public nuint e128; + public nuint e129; + public nuint e130; + public nuint e131; + public nuint e132; + public nuint e133; + public nuint e134; + public nuint e135; + public nuint e136; + public nuint e137; + public nuint e138; + public nuint e139; + public nuint e140; + public nuint e141; + public nuint e142; + public nuint e143; + public nuint e144; + public nuint e145; + public nuint e146; + public nuint e147; + public nuint e148; + public nuint e149; + public nuint e150; + public nuint e151; + public nuint e152; + public nuint e153; + public nuint e154; + public nuint e155; + public nuint e156; + public nuint e157; + public nuint e158; + public nuint e159; + public nuint e160; + public nuint e161; + public nuint e162; + public nuint e163; + public nuint e164; + public nuint e165; + public nuint e166; + public nuint e167; + public nuint e168; + public nuint e169; + public nuint e170; + public nuint e171; + public nuint e172; + public nuint e173; + public nuint e174; + public nuint e175; + public nuint e176; + public nuint e177; + public nuint e178; + public nuint e179; + public nuint e180; + public nuint e181; + public nuint e182; + public nuint e183; + public nuint e184; + public nuint e185; + public nuint e186; + public nuint e187; + public nuint e188; + public nuint e189; + public nuint e190; + public nuint e191; + public nuint e192; + public nuint e193; + public nuint e194; + public nuint e195; + public nuint e196; + public nuint e197; + public nuint e198; + public nuint e199; + public nuint e200; + public nuint e201; + public nuint e202; + public nuint e203; + public nuint e204; + public nuint e205; + public nuint e206; + public nuint e207; + public nuint e208; + public nuint e209; + public nuint e210; + public nuint e211; + public nuint e212; + public nuint e213; + public nuint e214; + public nuint e215; + public nuint e216; + public nuint e217; + public nuint e218; + public nuint e219; + public nuint e220; + public nuint e221; + public nuint e222; + public nuint e223; + public nuint e224; + public nuint e225; + public nuint e226; + public nuint e227; + public nuint e228; + public nuint e229; + public nuint e230; + public nuint e231; + public nuint e232; + public nuint e233; + public nuint e234; + public nuint e235; + public nuint e236; + public nuint e237; + public nuint e238; + public nuint e239; + public nuint e240; + public nuint e241; + public nuint e242; + public nuint e243; + public nuint e244; + public nuint e245; + public nuint e246; + public nuint e247; + public nuint e248; + public nuint e249; + public nuint e250; + public nuint e251; + public nuint e252; + public nuint e253; + public nuint e254; + public nuint e255; + public nuint e256; } +#endif } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_flags_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_flags_e.cs index fe365c454..a38bc711a 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_flags_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_flags_e.cs @@ -1,45 +1,44 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/** + * Huffman flags bitset. + * For all flags, 0 is the default value. + */ +public enum HUF_flags_e { /** - * Huffman flags bitset. - * For all flags, 0 is the default value. + * If compiled with DYNAMIC_BMI2: Set flag only if the CPU supports BMI2 at runtime. + * Otherwise: Ignored. */ - public enum HUF_flags_e - { - /** - * If compiled with DYNAMIC_BMI2: Set flag only if the CPU supports BMI2 at runtime. - * Otherwise: Ignored. - */ - HUF_flags_bmi2 = 1 << 0, + HUF_flags_bmi2 = 1 << 0, - /** - * If set: Test possible table depths to find the one that produces the smallest header + encoded size. - * If unset: Use heuristic to find the table depth. - */ - HUF_flags_optimalDepth = 1 << 1, + /** + * If set: Test possible table depths to find the one that produces the smallest header + encoded size. + * If unset: Use heuristic to find the table depth. + */ + HUF_flags_optimalDepth = 1 << 1, - /** - * If set: If the previous table can encode the input, always reuse the previous table. - * If unset: If the previous table can encode the input, reuse the previous table if it results in a smaller output. - */ - HUF_flags_preferRepeat = 1 << 2, + /** + * If set: If the previous table can encode the input, always reuse the previous table. + * If unset: If the previous table can encode the input, reuse the previous table if it results in a smaller output. + */ + HUF_flags_preferRepeat = 1 << 2, - /** - * If set: Sample the input and check if the sample is uncompressible, if it is then don't attempt to compress. - * If unset: Always histogram the entire input. - */ - HUF_flags_suspectUncompressible = 1 << 3, + /** + * If set: Sample the input and check if the sample is uncompressible, if it is then don't attempt to compress. + * If unset: Always histogram the entire input. + */ + HUF_flags_suspectUncompressible = 1 << 3, - /** - * If set: Don't use assembly implementations - * If unset: Allow using assembly implementations - */ - HUF_flags_disableAsm = 1 << 4, + /** + * If set: Don't use assembly implementations + * If unset: Allow using assembly implementations + */ + HUF_flags_disableAsm = 1 << 4, - /** - * If set: Don't use the fast decoding loop, always use the fallback decoding loop. - * If unset: Use the fast decoding loop when possible. - */ - HUF_flags_disableFast = 1 << 5, - } + /** + * If set: Don't use the fast decoding loop, always use the fallback decoding loop. + * If unset: Use the fast decoding loop when possible. + */ + HUF_flags_disableFast = 1 << 5, } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_nbStreams_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_nbStreams_e.cs index 55a931ac1..ff7e26e7b 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_nbStreams_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_nbStreams_e.cs @@ -1,8 +1,7 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public enum HUF_nbStreams_e { - public enum HUF_nbStreams_e - { - HUF_singleStream, - HUF_fourStreams, - } + HUF_singleStream, + HUF_fourStreams, } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_repeat.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_repeat.cs index 2bae2cae0..5ab8b1da1 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_repeat.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_repeat.cs @@ -1,14 +1,13 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public enum HUF_repeat { - public enum HUF_repeat - { - /**< Cannot use the previous table */ - HUF_repeat_none, + /**< Cannot use the previous table */ + HUF_repeat_none, - /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */ - HUF_repeat_check, + /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */ + HUF_repeat_check, - /**< Can use the previous table and it is assumed to be valid */ - HUF_repeat_valid, - } + /**< Can use the previous table and it is assumed to be valid */ + HUF_repeat_valid, } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Hist.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Hist.cs index 586b280d9..7f6a6bb1a 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Hist.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Hist.cs @@ -1,281 +1,280 @@ -using static ZstdSharp.UnsafeHelper; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods + /* --- Error management --- */ + private static bool HIST_isError(nuint code) { - /* --- Error management --- */ - private static bool HIST_isError(nuint code) + return ERR_isError(code); + } + + /*-************************************************************** + * Histogram functions + ****************************************************************/ + private static void HIST_add(uint* count, void* src, nuint srcSize) + { + byte* ip = (byte*)src; + byte* end = ip + srcSize; + while (ip < end) { - return ERR_isError(code); + count[*ip++]++; } + } - /*-************************************************************** - * Histogram functions - ****************************************************************/ - private static void HIST_add(uint* count, void* src, nuint srcSize) + /*! HIST_count_simple() : + * Same as HIST_countFast(), this function is unsafe, + * and will segfault if any value within `src` is `> *maxSymbolValuePtr`. + * It is also a bit slower for large inputs. + * However, it does not need any additional memory (not even on stack). + * @return : count of the most frequent symbol. + * Note this function doesn't produce any error (i.e. it must succeed). + */ + private static uint HIST_count_simple( + uint* count, + uint* maxSymbolValuePtr, + void* src, + nuint srcSize + ) + { + byte* ip = (byte*)src; + byte* end = ip + srcSize; + uint maxSymbolValue = *maxSymbolValuePtr; + uint largestCount = 0; + memset(count, 0, (maxSymbolValue + 1) * sizeof(uint)); + if (srcSize == 0) { - byte* ip = (byte*)src; - byte* end = ip + srcSize; - while (ip < end) - { - count[*ip++]++; - } + *maxSymbolValuePtr = 0; + return 0; } - /*! HIST_count_simple() : - * Same as HIST_countFast(), this function is unsafe, - * and will segfault if any value within `src` is `> *maxSymbolValuePtr`. - * It is also a bit slower for large inputs. - * However, it does not need any additional memory (not even on stack). - * @return : count of the most frequent symbol. - * Note this function doesn't produce any error (i.e. it must succeed). - */ - private static uint HIST_count_simple( - uint* count, - uint* maxSymbolValuePtr, - void* src, - nuint srcSize - ) + while (ip < end) { - byte* ip = (byte*)src; - byte* end = ip + srcSize; - uint maxSymbolValue = *maxSymbolValuePtr; - uint largestCount = 0; - memset(count, 0, (maxSymbolValue + 1) * sizeof(uint)); - if (srcSize == 0) - { - *maxSymbolValuePtr = 0; - return 0; - } + assert(*ip <= maxSymbolValue); + count[*ip++]++; + } - while (ip < end) - { - assert(*ip <= maxSymbolValue); - count[*ip++]++; - } + while (count[maxSymbolValue] == 0) + maxSymbolValue--; + *maxSymbolValuePtr = maxSymbolValue; + { + uint s; + for (s = 0; s <= maxSymbolValue; s++) + if (count[s] > largestCount) + largestCount = count[s]; + } - while (count[maxSymbolValue] == 0) - maxSymbolValue--; - *maxSymbolValuePtr = maxSymbolValue; - { - uint s; - for (s = 0; s <= maxSymbolValue; s++) - if (count[s] > largestCount) - largestCount = count[s]; - } + return largestCount; + } - return largestCount; + /* HIST_count_parallel_wksp() : + * store histogram into 4 intermediate tables, recombined at the end. + * this design makes better use of OoO cpus, + * and is noticeably faster when some values are heavily repeated. + * But it needs some additional workspace for intermediate tables. + * `workSpace` must be a U32 table of size >= HIST_WKSP_SIZE_U32. + * @return : largest histogram frequency, + * or an error code (notably when histogram's alphabet is larger than *maxSymbolValuePtr) */ + private static nuint HIST_count_parallel_wksp( + uint* count, + uint* maxSymbolValuePtr, + void* source, + nuint sourceSize, + HIST_checkInput_e check, + uint* workSpace + ) + { + byte* ip = (byte*)source; + byte* iend = ip + sourceSize; + nuint countSize = (*maxSymbolValuePtr + 1) * sizeof(uint); + uint max = 0; + uint* Counting1 = workSpace; + uint* Counting2 = Counting1 + 256; + uint* Counting3 = Counting2 + 256; + uint* Counting4 = Counting3 + 256; + assert(*maxSymbolValuePtr <= 255); + if (sourceSize == 0) + { + memset(count, 0, (uint)countSize); + *maxSymbolValuePtr = 0; + return 0; } - /* HIST_count_parallel_wksp() : - * store histogram into 4 intermediate tables, recombined at the end. - * this design makes better use of OoO cpus, - * and is noticeably faster when some values are heavily repeated. - * But it needs some additional workspace for intermediate tables. - * `workSpace` must be a U32 table of size >= HIST_WKSP_SIZE_U32. - * @return : largest histogram frequency, - * or an error code (notably when histogram's alphabet is larger than *maxSymbolValuePtr) */ - private static nuint HIST_count_parallel_wksp( - uint* count, - uint* maxSymbolValuePtr, - void* source, - nuint sourceSize, - HIST_checkInput_e check, - uint* workSpace - ) + memset(workSpace, 0, 4 * 256 * sizeof(uint)); { - byte* ip = (byte*)source; - byte* iend = ip + sourceSize; - nuint countSize = (*maxSymbolValuePtr + 1) * sizeof(uint); - uint max = 0; - uint* Counting1 = workSpace; - uint* Counting2 = Counting1 + 256; - uint* Counting3 = Counting2 + 256; - uint* Counting4 = Counting3 + 256; - assert(*maxSymbolValuePtr <= 255); - if (sourceSize == 0) + uint cached = MEM_read32(ip); + ip += 4; + while (ip < iend - 15) { - memset(count, 0, (uint)countSize); - *maxSymbolValuePtr = 0; - return 0; - } - - memset(workSpace, 0, 4 * 256 * sizeof(uint)); - { - uint cached = MEM_read32(ip); + uint c = cached; + cached = MEM_read32(ip); ip += 4; - while (ip < iend - 15) - { - uint c = cached; - cached = MEM_read32(ip); - ip += 4; - Counting1[(byte)c]++; - Counting2[(byte)(c >> 8)]++; - Counting3[(byte)(c >> 16)]++; - Counting4[c >> 24]++; - c = cached; - cached = MEM_read32(ip); - ip += 4; - Counting1[(byte)c]++; - Counting2[(byte)(c >> 8)]++; - Counting3[(byte)(c >> 16)]++; - Counting4[c >> 24]++; - c = cached; - cached = MEM_read32(ip); - ip += 4; - Counting1[(byte)c]++; - Counting2[(byte)(c >> 8)]++; - Counting3[(byte)(c >> 16)]++; - Counting4[c >> 24]++; - c = cached; - cached = MEM_read32(ip); - ip += 4; - Counting1[(byte)c]++; - Counting2[(byte)(c >> 8)]++; - Counting3[(byte)(c >> 16)]++; - Counting4[c >> 24]++; - } - - ip -= 4; - } - - while (ip < iend) - Counting1[*ip++]++; - { - uint s; - for (s = 0; s < 256; s++) - { - Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s]; - if (Counting1[s] > max) - max = Counting1[s]; - } - } - - { - uint maxSymbolValue = 255; - while (Counting1[maxSymbolValue] == 0) - maxSymbolValue--; - if (check != default && maxSymbolValue > *maxSymbolValuePtr) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooSmall) - ); - *maxSymbolValuePtr = maxSymbolValue; - memmove(count, Counting1, countSize); + Counting1[(byte)c]++; + Counting2[(byte)(c >> 8)]++; + Counting3[(byte)(c >> 16)]++; + Counting4[c >> 24]++; + c = cached; + cached = MEM_read32(ip); + ip += 4; + Counting1[(byte)c]++; + Counting2[(byte)(c >> 8)]++; + Counting3[(byte)(c >> 16)]++; + Counting4[c >> 24]++; + c = cached; + cached = MEM_read32(ip); + ip += 4; + Counting1[(byte)c]++; + Counting2[(byte)(c >> 8)]++; + Counting3[(byte)(c >> 16)]++; + Counting4[c >> 24]++; + c = cached; + cached = MEM_read32(ip); + ip += 4; + Counting1[(byte)c]++; + Counting2[(byte)(c >> 8)]++; + Counting3[(byte)(c >> 16)]++; + Counting4[c >> 24]++; } - return max; + ip -= 4; } - /* HIST_countFast_wksp() : - * Same as HIST_countFast(), but using an externally provided scratch buffer. - * `workSpace` is a writable buffer which must be 4-bytes aligned, - * `workSpaceSize` must be >= HIST_WKSP_SIZE - */ - private static nuint HIST_countFast_wksp( - uint* count, - uint* maxSymbolValuePtr, - void* source, - nuint sourceSize, - void* workSpace, - nuint workSpaceSize - ) + while (ip < iend) + Counting1[*ip++]++; { - if (sourceSize < 1500) - return HIST_count_simple(count, maxSymbolValuePtr, source, sourceSize); - if (((nuint)workSpace & 3) != 0) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); - if (workSpaceSize < 1024 * sizeof(uint)) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_workSpace_tooSmall)); - return HIST_count_parallel_wksp( - count, - maxSymbolValuePtr, - source, - sourceSize, - HIST_checkInput_e.trustInput, - (uint*)workSpace - ); + uint s; + for (s = 0; s < 256; s++) + { + Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s]; + if (Counting1[s] > max) + max = Counting1[s]; + } } - /* HIST_count_wksp() : - * Same as HIST_count(), but using an externally provided scratch buffer. - * `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */ - private static nuint HIST_count_wksp( - uint* count, - uint* maxSymbolValuePtr, - void* source, - nuint sourceSize, - void* workSpace, - nuint workSpaceSize - ) { - if (((nuint)workSpace & 3) != 0) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); - if (workSpaceSize < 1024 * sizeof(uint)) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_workSpace_tooSmall)); - if (*maxSymbolValuePtr < 255) - return HIST_count_parallel_wksp( - count, - maxSymbolValuePtr, - source, - sourceSize, - HIST_checkInput_e.checkMaxSymbolValue, - (uint*)workSpace + uint maxSymbolValue = 255; + while (Counting1[maxSymbolValue] == 0) + maxSymbolValue--; + if (check != default && maxSymbolValue > *maxSymbolValuePtr) + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooSmall) ); - *maxSymbolValuePtr = 255; - return HIST_countFast_wksp( - count, - maxSymbolValuePtr, - source, - sourceSize, - workSpace, - workSpaceSize - ); + *maxSymbolValuePtr = maxSymbolValue; + memmove(count, Counting1, countSize); } - /* fast variant (unsafe : won't check if src contains values beyond count[] limit) */ - private static nuint HIST_countFast( - uint* count, - uint* maxSymbolValuePtr, - void* source, - nuint sourceSize - ) - { - uint* tmpCounters = stackalloc uint[1024]; - return HIST_countFast_wksp( + return max; + } + + /* HIST_countFast_wksp() : + * Same as HIST_countFast(), but using an externally provided scratch buffer. + * `workSpace` is a writable buffer which must be 4-bytes aligned, + * `workSpaceSize` must be >= HIST_WKSP_SIZE + */ + private static nuint HIST_countFast_wksp( + uint* count, + uint* maxSymbolValuePtr, + void* source, + nuint sourceSize, + void* workSpace, + nuint workSpaceSize + ) + { + if (sourceSize < 1500) + return HIST_count_simple(count, maxSymbolValuePtr, source, sourceSize); + if (((nuint)workSpace & 3) != 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + if (workSpaceSize < 1024 * sizeof(uint)) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_workSpace_tooSmall)); + return HIST_count_parallel_wksp( + count, + maxSymbolValuePtr, + source, + sourceSize, + HIST_checkInput_e.trustInput, + (uint*)workSpace + ); + } + + /* HIST_count_wksp() : + * Same as HIST_count(), but using an externally provided scratch buffer. + * `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */ + private static nuint HIST_count_wksp( + uint* count, + uint* maxSymbolValuePtr, + void* source, + nuint sourceSize, + void* workSpace, + nuint workSpaceSize + ) + { + if (((nuint)workSpace & 3) != 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + if (workSpaceSize < 1024 * sizeof(uint)) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_workSpace_tooSmall)); + if (*maxSymbolValuePtr < 255) + return HIST_count_parallel_wksp( count, maxSymbolValuePtr, source, sourceSize, - tmpCounters, - sizeof(uint) * 1024 + HIST_checkInput_e.checkMaxSymbolValue, + (uint*)workSpace ); - } + *maxSymbolValuePtr = 255; + return HIST_countFast_wksp( + count, + maxSymbolValuePtr, + source, + sourceSize, + workSpace, + workSpaceSize + ); + } - /*! HIST_count(): - * Provides the precise count of each byte within a table 'count'. - * 'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1). - * Updates *maxSymbolValuePtr with actual largest symbol value detected. - * @return : count of the most frequent symbol (which isn't identified). - * or an error code, which can be tested using HIST_isError(). - * note : if return == srcSize, there is only one symbol. - */ - private static nuint HIST_count( - uint* count, - uint* maxSymbolValuePtr, - void* src, - nuint srcSize - ) - { - uint* tmpCounters = stackalloc uint[1024]; - return HIST_count_wksp( - count, - maxSymbolValuePtr, - src, - srcSize, - tmpCounters, - sizeof(uint) * 1024 - ); - } + /* fast variant (unsafe : won't check if src contains values beyond count[] limit) */ + private static nuint HIST_countFast( + uint* count, + uint* maxSymbolValuePtr, + void* source, + nuint sourceSize + ) + { + uint* tmpCounters = stackalloc uint[1024]; + return HIST_countFast_wksp( + count, + maxSymbolValuePtr, + source, + sourceSize, + tmpCounters, + sizeof(uint) * 1024 + ); + } + + /*! HIST_count(): + * Provides the precise count of each byte within a table 'count'. + * 'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1). + * Updates *maxSymbolValuePtr with actual largest symbol value detected. + * @return : count of the most frequent symbol (which isn't identified). + * or an error code, which can be tested using HIST_isError(). + * note : if return == srcSize, there is only one symbol. + */ + private static nuint HIST_count( + uint* count, + uint* maxSymbolValuePtr, + void* src, + nuint srcSize + ) + { + uint* tmpCounters = stackalloc uint[1024]; + return HIST_count_wksp( + count, + maxSymbolValuePtr, + src, + srcSize, + tmpCounters, + sizeof(uint) * 1024 + ); } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HufCompress.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HufCompress.cs index 15bfe2608..9d00fb3e2 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HufCompress.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HufCompress.cs @@ -1,1794 +1,1700 @@ using System.Runtime.CompilerServices; -using static ZstdSharp.UnsafeHelper; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods + private static void* HUF_alignUpWorkspace( + void* workspace, + nuint* workspaceSizePtr, + nuint align + ) { - private static void* HUF_alignUpWorkspace( - void* workspace, - nuint* workspaceSizePtr, - nuint align - ) + nuint mask = align - 1; + nuint rem = (nuint)workspace & mask; + nuint add = align - rem & mask; + byte* aligned = (byte*)workspace + add; + assert((align & align - 1) == 0); + assert(align <= 8); + if (*workspaceSizePtr >= add) { - nuint mask = align - 1; - nuint rem = (nuint)workspace & mask; - nuint add = align - rem & mask; - byte* aligned = (byte*)workspace + add; - assert((align & align - 1) == 0); - assert(align <= 8); - if (*workspaceSizePtr >= add) - { - assert(add < align); - assert(((nuint)aligned & mask) == 0); - *workspaceSizePtr -= add; - return aligned; - } - else - { - *workspaceSizePtr = 0; - return null; - } + assert(add < align); + assert(((nuint)aligned & mask) == 0); + *workspaceSizePtr -= add; + return aligned; + } + else + { + *workspaceSizePtr = 0; + return null; } + } - private static nuint HUF_compressWeights( - void* dst, - nuint dstSize, - void* weightTable, - nuint wtSize, - void* workspace, - nuint workspaceSize - ) + private static nuint HUF_compressWeights( + void* dst, + nuint dstSize, + void* weightTable, + nuint wtSize, + void* workspace, + nuint workspaceSize + ) + { + byte* ostart = (byte*)dst; + byte* op = ostart; + byte* oend = ostart + dstSize; + uint maxSymbolValue = 12; + uint tableLog = 6; + HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)HUF_alignUpWorkspace( + workspace, + &workspaceSize, + sizeof(uint) + ); + if (workspaceSize < (nuint)sizeof(HUF_CompressWeightsWksp)) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + if (wtSize <= 1) + return 0; { - byte* ostart = (byte*)dst; - byte* op = ostart; - byte* oend = ostart + dstSize; - uint maxSymbolValue = 12; - uint tableLog = 6; - HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)HUF_alignUpWorkspace( - workspace, - &workspaceSize, - sizeof(uint) + /* never fails */ + uint maxCount = HIST_count_simple( + wksp->count, + &maxSymbolValue, + weightTable, + wtSize ); - if (workspaceSize < (nuint)sizeof(HUF_CompressWeightsWksp)) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); - if (wtSize <= 1) + if (maxCount == wtSize) + return 1; + if (maxCount == 1) return 0; - { - /* never fails */ - uint maxCount = HIST_count_simple( - wksp->count, - &maxSymbolValue, - weightTable, - wtSize - ); - if (maxCount == wtSize) - return 1; - if (maxCount == 1) - return 0; - } - - tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue); - { - /* useLowProbCount */ - nuint _var_err__ = FSE_normalizeCount( - wksp->norm, - tableLog, - wksp->count, - wtSize, - maxSymbolValue, - 0 - ); - if (ERR_isError(_var_err__)) - return _var_err__; - } - - { - nuint hSize = FSE_writeNCount( - op, - (nuint)(oend - op), - wksp->norm, - maxSymbolValue, - tableLog - ); - if (ERR_isError(hSize)) - return hSize; - op += hSize; - } - - { - /* Compress */ - nuint _var_err__ = FSE_buildCTable_wksp( - wksp->CTable, - wksp->norm, - maxSymbolValue, - tableLog, - wksp->scratchBuffer, - sizeof(uint) * 41 - ); - if (ERR_isError(_var_err__)) - return _var_err__; - } - - { - nuint cSize = FSE_compress_usingCTable( - op, - (nuint)(oend - op), - weightTable, - wtSize, - wksp->CTable - ); - if (ERR_isError(cSize)) - return cSize; - if (cSize == 0) - return 0; - op += cSize; - } - - return (nuint)(op - ostart); } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint HUF_getNbBits(nuint elt) + tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue); { - return elt & 0xFF; + /* useLowProbCount */ + nuint _var_err__ = FSE_normalizeCount( + wksp->norm, + tableLog, + wksp->count, + wtSize, + maxSymbolValue, + 0 + ); + if (ERR_isError(_var_err__)) + return _var_err__; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint HUF_getNbBitsFast(nuint elt) { - return elt; + nuint hSize = FSE_writeNCount( + op, + (nuint)(oend - op), + wksp->norm, + maxSymbolValue, + tableLog + ); + if (ERR_isError(hSize)) + return hSize; + op += hSize; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint HUF_getValue(nuint elt) { - return elt & ~(nuint)0xFF; + /* Compress */ + nuint _var_err__ = FSE_buildCTable_wksp( + wksp->CTable, + wksp->norm, + maxSymbolValue, + tableLog, + wksp->scratchBuffer, + sizeof(uint) * 41 + ); + if (ERR_isError(_var_err__)) + return _var_err__; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint HUF_getValueFast(nuint elt) { - return elt; + nuint cSize = FSE_compress_usingCTable( + op, + (nuint)(oend - op), + weightTable, + wtSize, + wksp->CTable + ); + if (ERR_isError(cSize)) + return cSize; + if (cSize == 0) + return 0; + op += cSize; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void HUF_setNbBits(nuint* elt, nuint nbBits) + return (nuint)(op - ostart); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint HUF_getNbBits(nuint elt) + { + return elt & 0xFF; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint HUF_getNbBitsFast(nuint elt) + { + return elt; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint HUF_getValue(nuint elt) + { + return elt & ~(nuint)0xFF; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint HUF_getValueFast(nuint elt) + { + return elt; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void HUF_setNbBits(nuint* elt, nuint nbBits) + { + assert(nbBits <= 12); + *elt = nbBits; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void HUF_setValue(nuint* elt, nuint value) + { + nuint nbBits = HUF_getNbBits(*elt); + if (nbBits > 0) { - assert(nbBits <= 12); - *elt = nbBits; + assert(value >> (int)nbBits == 0); + *elt |= value << (int)((nuint)(sizeof(nuint) * 8) - nbBits); } + } + + /** HUF_readCTableHeader() : + * @returns The header from the CTable specifying the tableLog and the maxSymbolValue. + */ + private static HUF_CTableHeader HUF_readCTableHeader(nuint* ctable) + { + HUF_CTableHeader header; + memcpy(&header, ctable, (uint)sizeof(nuint)); + return header; + } + + private static void HUF_writeCTableHeader(nuint* ctable, uint tableLog, uint maxSymbolValue) + { + HUF_CTableHeader header; + memset(&header, 0, (uint)sizeof(nuint)); + assert(tableLog < 256); + header.tableLog = (byte)tableLog; + assert(maxSymbolValue < 256); + header.maxSymbolValue = (byte)maxSymbolValue; + memcpy(ctable, &header, (uint)sizeof(nuint)); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void HUF_setValue(nuint* elt, nuint value) + private static nuint HUF_writeCTable_wksp( + void* dst, + nuint maxDstSize, + nuint* CTable, + uint maxSymbolValue, + uint huffLog, + void* workspace, + nuint workspaceSize + ) + { + nuint* ct = CTable + 1; + byte* op = (byte*)dst; + uint n; + HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)HUF_alignUpWorkspace( + workspace, + &workspaceSize, + sizeof(uint) + ); + assert(HUF_readCTableHeader(CTable).maxSymbolValue == maxSymbolValue); + assert(HUF_readCTableHeader(CTable).tableLog == huffLog); + if (workspaceSize < (nuint)sizeof(HUF_WriteCTableWksp)) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + if (maxSymbolValue > 255) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooLarge)); + wksp->bitsToWeight[0] = 0; + for (n = 1; n < huffLog + 1; n++) + wksp->bitsToWeight[n] = (byte)(huffLog + 1 - n); + for (n = 0; n < maxSymbolValue; n++) + wksp->huffWeight[n] = wksp->bitsToWeight[HUF_getNbBits(ct[n])]; + if (maxDstSize < 1) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); { - nuint nbBits = HUF_getNbBits(*elt); - if (nbBits > 0) + nuint hSize = HUF_compressWeights( + op + 1, + maxDstSize - 1, + wksp->huffWeight, + maxSymbolValue, + &wksp->wksp, + (nuint)sizeof(HUF_CompressWeightsWksp) + ); + if (ERR_isError(hSize)) + return hSize; + if (hSize > 1 && hSize < maxSymbolValue / 2) { - assert(value >> (int)nbBits == 0); - *elt |= value << (int)((nuint)(sizeof(nuint) * 8) - nbBits); + op[0] = (byte)hSize; + return hSize + 1; } } - /** HUF_readCTableHeader() : - * @returns The header from the CTable specifying the tableLog and the maxSymbolValue. - */ - private static HUF_CTableHeader HUF_readCTableHeader(nuint* ctable) - { - HUF_CTableHeader header; - memcpy(&header, ctable, (uint)sizeof(nuint)); - return header; - } + if (maxSymbolValue > 256 - 128) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + if ((maxSymbolValue + 1) / 2 + 1 > maxDstSize) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + op[0] = (byte)(128 + (maxSymbolValue - 1)); + wksp->huffWeight[maxSymbolValue] = 0; + for (n = 0; n < maxSymbolValue; n += 2) + op[n / 2 + 1] = (byte)((wksp->huffWeight[n] << 4) + wksp->huffWeight[n + 1]); + return (maxSymbolValue + 1) / 2 + 1; + } - private static void HUF_writeCTableHeader(nuint* ctable, uint tableLog, uint maxSymbolValue) + /** HUF_readCTable() : + * Loading a CTable saved with HUF_writeCTable() */ + private static nuint HUF_readCTable( + nuint* CTable, + uint* maxSymbolValuePtr, + void* src, + nuint srcSize, + uint* hasZeroWeights + ) + { + /* init not required, even though some static analyzer may complain */ + byte* huffWeight = stackalloc byte[256]; + /* large enough for values from 0 to 16 */ + uint* rankVal = stackalloc uint[13]; + uint tableLog = 0; + uint nbSymbols = 0; + nuint* ct = CTable + 1; + /* get symbol weights */ + nuint readSize = HUF_readStats( + huffWeight, + 255 + 1, + rankVal, + &nbSymbols, + &tableLog, + src, + srcSize + ); + if (ERR_isError(readSize)) + return readSize; + *hasZeroWeights = rankVal[0] > 0 ? 1U : 0U; + if (tableLog > 12) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); + if (nbSymbols > *maxSymbolValuePtr + 1) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooSmall)); + *maxSymbolValuePtr = nbSymbols - 1; + HUF_writeCTableHeader(CTable, tableLog, *maxSymbolValuePtr); { - HUF_CTableHeader header; - memset(&header, 0, (uint)sizeof(nuint)); - assert(tableLog < 256); - header.tableLog = (byte)tableLog; - assert(maxSymbolValue < 256); - header.maxSymbolValue = (byte)maxSymbolValue; - memcpy(ctable, &header, (uint)sizeof(nuint)); + uint n, + nextRankStart = 0; + for (n = 1; n <= tableLog; n++) + { + uint curr = nextRankStart; + nextRankStart += rankVal[n] << (int)(n - 1); + rankVal[n] = curr; + } } - private static nuint HUF_writeCTable_wksp( - void* dst, - nuint maxDstSize, - nuint* CTable, - uint maxSymbolValue, - uint huffLog, - void* workspace, - nuint workspaceSize - ) { - nuint* ct = CTable + 1; - byte* op = (byte*)dst; uint n; - HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)HUF_alignUpWorkspace( - workspace, - &workspaceSize, - sizeof(uint) - ); - assert(HUF_readCTableHeader(CTable).maxSymbolValue == maxSymbolValue); - assert(HUF_readCTableHeader(CTable).tableLog == huffLog); - if (workspaceSize < (nuint)sizeof(HUF_WriteCTableWksp)) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); - if (maxSymbolValue > 255) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooLarge)); - wksp->bitsToWeight[0] = 0; - for (n = 1; n < huffLog + 1; n++) - wksp->bitsToWeight[n] = (byte)(huffLog + 1 - n); - for (n = 0; n < maxSymbolValue; n++) - wksp->huffWeight[n] = wksp->bitsToWeight[HUF_getNbBits(ct[n])]; - if (maxDstSize < 1) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + for (n = 0; n < nbSymbols; n++) { - nuint hSize = HUF_compressWeights( - op + 1, - maxDstSize - 1, - wksp->huffWeight, - maxSymbolValue, - &wksp->wksp, - (nuint)sizeof(HUF_CompressWeightsWksp) - ); - if (ERR_isError(hSize)) - return hSize; - if (hSize > 1 && hSize < maxSymbolValue / 2) - { - op[0] = (byte)hSize; - return hSize + 1; - } + uint w = huffWeight[n]; + HUF_setNbBits(ct + n, (nuint)((byte)(tableLog + 1 - w) & -(w != 0 ? 1 : 0))); } - - if (maxSymbolValue > 256 - 128) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); - if ((maxSymbolValue + 1) / 2 + 1 > maxDstSize) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - op[0] = (byte)(128 + (maxSymbolValue - 1)); - wksp->huffWeight[maxSymbolValue] = 0; - for (n = 0; n < maxSymbolValue; n += 2) - op[n / 2 + 1] = (byte)((wksp->huffWeight[n] << 4) + wksp->huffWeight[n + 1]); - return (maxSymbolValue + 1) / 2 + 1; } - /** HUF_readCTable() : - * Loading a CTable saved with HUF_writeCTable() */ - private static nuint HUF_readCTable( - nuint* CTable, - uint* maxSymbolValuePtr, - void* src, - nuint srcSize, - uint* hasZeroWeights - ) { - /* init not required, even though some static analyzer may complain */ - byte* huffWeight = stackalloc byte[256]; - /* large enough for values from 0 to 16 */ - uint* rankVal = stackalloc uint[13]; - uint tableLog = 0; - uint nbSymbols = 0; - nuint* ct = CTable + 1; - /* get symbol weights */ - nuint readSize = HUF_readStats( - huffWeight, - 255 + 1, - rankVal, - &nbSymbols, - &tableLog, - src, - srcSize - ); - if (ERR_isError(readSize)) - return readSize; - *hasZeroWeights = rankVal[0] > 0 ? 1U : 0U; - if (tableLog > 12) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); - if (nbSymbols > *maxSymbolValuePtr + 1) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooSmall)); - *maxSymbolValuePtr = nbSymbols - 1; - HUF_writeCTableHeader(CTable, tableLog, *maxSymbolValuePtr); + ushort* nbPerRank = stackalloc ushort[14]; + /* support w=0=>n=tableLog+1 */ + memset(nbPerRank, 0, sizeof(ushort) * 14); + ushort* valPerRank = stackalloc ushort[14]; + memset(valPerRank, 0, sizeof(ushort) * 14); { - uint n, - nextRankStart = 0; - for (n = 1; n <= tableLog; n++) - { - uint curr = nextRankStart; - nextRankStart += rankVal[n] << (int)(n - 1); - rankVal[n] = curr; - } + uint n; + for (n = 0; n < nbSymbols; n++) + nbPerRank[HUF_getNbBits(ct[n])]++; } + valPerRank[tableLog + 1] = 0; { + ushort min = 0; + /* start at n=tablelog <-> w=1 */ uint n; - for (n = 0; n < nbSymbols; n++) + for (n = tableLog; n > 0; n--) { - uint w = huffWeight[n]; - HUF_setNbBits(ct + n, (nuint)((byte)(tableLog + 1 - w) & -(w != 0 ? 1 : 0))); + valPerRank[n] = min; + min += nbPerRank[n]; + min >>= 1; } } { - ushort* nbPerRank = stackalloc ushort[14]; - /* support w=0=>n=tableLog+1 */ - memset(nbPerRank, 0, sizeof(ushort) * 14); - ushort* valPerRank = stackalloc ushort[14]; - memset(valPerRank, 0, sizeof(ushort) * 14); - { - uint n; - for (n = 0; n < nbSymbols; n++) - nbPerRank[HUF_getNbBits(ct[n])]++; - } - - valPerRank[tableLog + 1] = 0; - { - ushort min = 0; - /* start at n=tablelog <-> w=1 */ - uint n; - for (n = tableLog; n > 0; n--) - { - valPerRank[n] = min; - min += nbPerRank[n]; - min >>= 1; - } - } - - { - uint n; - for (n = 0; n < nbSymbols; n++) - HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); - } + uint n; + for (n = 0; n < nbSymbols; n++) + HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); } - - return readSize; } - /** HUF_getNbBitsFromCTable() : - * Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX - * Note 1 : If symbolValue > HUF_readCTableHeader(symbolTable).maxSymbolValue, returns 0 - * Note 2 : is not inlined, as HUF_CElt definition is private - */ - private static uint HUF_getNbBitsFromCTable(nuint* CTable, uint symbolValue) - { - nuint* ct = CTable + 1; - assert(symbolValue <= 255); - if (symbolValue > HUF_readCTableHeader(CTable).maxSymbolValue) - return 0; - return (uint)HUF_getNbBits(ct[symbolValue]); - } + return readSize; + } - /** - * HUF_setMaxHeight(): - * Try to enforce @targetNbBits on the Huffman tree described in @huffNode. - * - * It attempts to convert all nodes with nbBits > @targetNbBits - * to employ @targetNbBits instead. Then it adjusts the tree - * so that it remains a valid canonical Huffman tree. - * - * @pre The sum of the ranks of each symbol == 2^largestBits, - * where largestBits == huffNode[lastNonNull].nbBits. - * @post The sum of the ranks of each symbol == 2^largestBits, - * where largestBits is the return value (expected <= targetNbBits). - * - * @param huffNode The Huffman tree modified in place to enforce targetNbBits. - * It's presumed sorted, from most frequent to rarest symbol. - * @param lastNonNull The symbol with the lowest count in the Huffman tree. - * @param targetNbBits The allowed number of bits, which the Huffman tree - * may not respect. After this function the Huffman tree will - * respect targetNbBits. - * @return The maximum number of bits of the Huffman tree after adjustment. - */ - private static uint HUF_setMaxHeight( - nodeElt_s* huffNode, - uint lastNonNull, - uint targetNbBits - ) + /** HUF_getNbBitsFromCTable() : + * Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX + * Note 1 : If symbolValue > HUF_readCTableHeader(symbolTable).maxSymbolValue, returns 0 + * Note 2 : is not inlined, as HUF_CElt definition is private + */ + private static uint HUF_getNbBitsFromCTable(nuint* CTable, uint symbolValue) + { + nuint* ct = CTable + 1; + assert(symbolValue <= 255); + if (symbolValue > HUF_readCTableHeader(CTable).maxSymbolValue) + return 0; + return (uint)HUF_getNbBits(ct[symbolValue]); + } + + /** + * HUF_setMaxHeight(): + * Try to enforce @targetNbBits on the Huffman tree described in @huffNode. + * + * It attempts to convert all nodes with nbBits > @targetNbBits + * to employ @targetNbBits instead. Then it adjusts the tree + * so that it remains a valid canonical Huffman tree. + * + * @pre The sum of the ranks of each symbol == 2^largestBits, + * where largestBits == huffNode[lastNonNull].nbBits. + * @post The sum of the ranks of each symbol == 2^largestBits, + * where largestBits is the return value (expected <= targetNbBits). + * + * @param huffNode The Huffman tree modified in place to enforce targetNbBits. + * It's presumed sorted, from most frequent to rarest symbol. + * @param lastNonNull The symbol with the lowest count in the Huffman tree. + * @param targetNbBits The allowed number of bits, which the Huffman tree + * may not respect. After this function the Huffman tree will + * respect targetNbBits. + * @return The maximum number of bits of the Huffman tree after adjustment. + */ + private static uint HUF_setMaxHeight( + nodeElt_s* huffNode, + uint lastNonNull, + uint targetNbBits + ) + { + uint largestBits = huffNode[lastNonNull].nbBits; + if (largestBits <= targetNbBits) + return largestBits; { - uint largestBits = huffNode[lastNonNull].nbBits; - if (largestBits <= targetNbBits) - return largestBits; + int totalCost = 0; + uint baseCost = (uint)(1 << (int)(largestBits - targetNbBits)); + int n = (int)lastNonNull; + while (huffNode[n].nbBits > targetNbBits) { - int totalCost = 0; - uint baseCost = (uint)(1 << (int)(largestBits - targetNbBits)); - int n = (int)lastNonNull; - while (huffNode[n].nbBits > targetNbBits) - { - totalCost += (int)( - baseCost - (uint)(1 << (int)(largestBits - huffNode[n].nbBits)) - ); - huffNode[n].nbBits = (byte)targetNbBits; - n--; - } + totalCost += (int)( + baseCost - (uint)(1 << (int)(largestBits - huffNode[n].nbBits)) + ); + huffNode[n].nbBits = (byte)targetNbBits; + n--; + } - assert(huffNode[n].nbBits <= targetNbBits); - while (huffNode[n].nbBits == targetNbBits) - --n; - assert(((uint)totalCost & baseCost - 1) == 0); - totalCost >>= (int)(largestBits - targetNbBits); - assert(totalCost > 0); + assert(huffNode[n].nbBits <= targetNbBits); + while (huffNode[n].nbBits == targetNbBits) + --n; + assert(((uint)totalCost & baseCost - 1) == 0); + totalCost >>= (int)(largestBits - targetNbBits); + assert(totalCost > 0); + { + const uint noSymbol = 0xF0F0F0F0; + uint* rankLast = stackalloc uint[14]; + memset(rankLast, 0xF0, sizeof(uint) * 14); { - const uint noSymbol = 0xF0F0F0F0; - uint* rankLast = stackalloc uint[14]; - memset(rankLast, 0xF0, sizeof(uint) * 14); + uint currentNbBits = targetNbBits; + int pos; + for (pos = n; pos >= 0; pos--) { - uint currentNbBits = targetNbBits; - int pos; - for (pos = n; pos >= 0; pos--) - { - if (huffNode[pos].nbBits >= currentNbBits) - continue; - currentNbBits = huffNode[pos].nbBits; - rankLast[targetNbBits - currentNbBits] = (uint)pos; - } + if (huffNode[pos].nbBits >= currentNbBits) + continue; + currentNbBits = huffNode[pos].nbBits; + rankLast[targetNbBits - currentNbBits] = (uint)pos; } + } - while (totalCost > 0) + while (totalCost > 0) + { + /* Try to reduce the next power of 2 above totalCost because we + * gain back half the rank. + */ + uint nBitsToDecrease = ZSTD_highbit32((uint)totalCost) + 1; + for (; nBitsToDecrease > 1; nBitsToDecrease--) { - /* Try to reduce the next power of 2 above totalCost because we - * gain back half the rank. - */ - uint nBitsToDecrease = ZSTD_highbit32((uint)totalCost) + 1; - for (; nBitsToDecrease > 1; nBitsToDecrease--) + uint highPos = rankLast[nBitsToDecrease]; + uint lowPos = rankLast[nBitsToDecrease - 1]; + if (highPos == noSymbol) + continue; + if (lowPos == noSymbol) + break; { - uint highPos = rankLast[nBitsToDecrease]; - uint lowPos = rankLast[nBitsToDecrease - 1]; - if (highPos == noSymbol) - continue; - if (lowPos == noSymbol) + uint highTotal = huffNode[highPos].count; + uint lowTotal = 2 * huffNode[lowPos].count; + if (highTotal <= lowTotal) break; - { - uint highTotal = huffNode[highPos].count; - uint lowTotal = 2 * huffNode[lowPos].count; - if (highTotal <= lowTotal) - break; - } } + } - assert(rankLast[nBitsToDecrease] != noSymbol || nBitsToDecrease == 1); - while (nBitsToDecrease <= 12 && rankLast[nBitsToDecrease] == noSymbol) - nBitsToDecrease++; - assert(rankLast[nBitsToDecrease] != noSymbol); - totalCost -= 1 << (int)(nBitsToDecrease - 1); - huffNode[rankLast[nBitsToDecrease]].nbBits++; - if (rankLast[nBitsToDecrease - 1] == noSymbol) - rankLast[nBitsToDecrease - 1] = rankLast[nBitsToDecrease]; - if (rankLast[nBitsToDecrease] == 0) + assert(rankLast[nBitsToDecrease] != noSymbol || nBitsToDecrease == 1); + while (nBitsToDecrease <= 12 && rankLast[nBitsToDecrease] == noSymbol) + nBitsToDecrease++; + assert(rankLast[nBitsToDecrease] != noSymbol); + totalCost -= 1 << (int)(nBitsToDecrease - 1); + huffNode[rankLast[nBitsToDecrease]].nbBits++; + if (rankLast[nBitsToDecrease - 1] == noSymbol) + rankLast[nBitsToDecrease - 1] = rankLast[nBitsToDecrease]; + if (rankLast[nBitsToDecrease] == 0) + rankLast[nBitsToDecrease] = noSymbol; + else + { + rankLast[nBitsToDecrease]--; + if ( + huffNode[rankLast[nBitsToDecrease]].nbBits + != targetNbBits - nBitsToDecrease + ) rankLast[nBitsToDecrease] = noSymbol; - else - { - rankLast[nBitsToDecrease]--; - if ( - huffNode[rankLast[nBitsToDecrease]].nbBits - != targetNbBits - nBitsToDecrease - ) - rankLast[nBitsToDecrease] = noSymbol; - } } + } - while (totalCost < 0) + while (totalCost < 0) + { + if (rankLast[1] == noSymbol) { - if (rankLast[1] == noSymbol) - { - while (huffNode[n].nbBits == targetNbBits) - n--; - huffNode[n + 1].nbBits--; - assert(n >= 0); - rankLast[1] = (uint)(n + 1); - totalCost++; - continue; - } - - huffNode[rankLast[1] + 1].nbBits--; - rankLast[1]++; + while (huffNode[n].nbBits == targetNbBits) + n--; + huffNode[n + 1].nbBits--; + assert(n >= 0); + rankLast[1] = (uint)(n + 1); totalCost++; + continue; } + + huffNode[rankLast[1] + 1].nbBits--; + rankLast[1]++; + totalCost++; } } - - return targetNbBits; - } - - /* Return the appropriate bucket index for a given count. See definition of - * RANK_POSITION_DISTINCT_COUNT_CUTOFF for explanation of bucketing strategy. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint HUF_getIndex(uint count) - { - return count < 192 - 1 - 32 - 1 + ZSTD_highbit32(192 - 1 - 32 - 1) - ? count - : ZSTD_highbit32(count) + (192 - 1 - 32 - 1); } - /* Helper swap function for HUF_quickSortPartition() */ - private static void HUF_swapNodes(nodeElt_s* a, nodeElt_s* b) - { - nodeElt_s tmp = *a; - *a = *b; - *b = tmp; - } + return targetNbBits; + } - /* Returns 0 if the huffNode array is not sorted by descending count */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static int HUF_isSorted(nodeElt_s* huffNode, uint maxSymbolValue1) - { - uint i; - for (i = 1; i < maxSymbolValue1; ++i) - { - if (huffNode[i].count > huffNode[i - 1].count) - { - return 0; - } - } + /* Return the appropriate bucket index for a given count. See definition of + * RANK_POSITION_DISTINCT_COUNT_CUTOFF for explanation of bucketing strategy. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint HUF_getIndex(uint count) + { + return count < 192 - 1 - 32 - 1 + ZSTD_highbit32(192 - 1 - 32 - 1) + ? count + : ZSTD_highbit32(count) + (192 - 1 - 32 - 1); + } - return 1; - } + /* Helper swap function for HUF_quickSortPartition() */ + private static void HUF_swapNodes(nodeElt_s* a, nodeElt_s* b) + { + nodeElt_s tmp = *a; + *a = *b; + *b = tmp; + } - /* Insertion sort by descending order */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void HUF_insertionSort(nodeElt_s* huffNode, int low, int high) + /* Returns 0 if the huffNode array is not sorted by descending count */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int HUF_isSorted(nodeElt_s* huffNode, uint maxSymbolValue1) + { + uint i; + for (i = 1; i < maxSymbolValue1; ++i) { - int i; - int size = high - low + 1; - huffNode += low; - for (i = 1; i < size; ++i) + if (huffNode[i].count > huffNode[i - 1].count) { - nodeElt_s key = huffNode[i]; - int j = i - 1; - while (j >= 0 && huffNode[j].count < key.count) - { - huffNode[j + 1] = huffNode[j]; - j--; - } - - huffNode[j + 1] = key; + return 0; } } - /* Pivot helper function for quicksort. */ - private static int HUF_quickSortPartition(nodeElt_s* arr, int low, int high) + return 1; + } + + /* Insertion sort by descending order */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void HUF_insertionSort(nodeElt_s* huffNode, int low, int high) + { + int i; + int size = high - low + 1; + huffNode += low; + for (i = 1; i < size; ++i) { - /* Simply select rightmost element as pivot. "Better" selectors like - * median-of-three don't experimentally appear to have any benefit. - */ - uint pivot = arr[high].count; - int i = low - 1; - int j = low; - for (; j < high; j++) + nodeElt_s key = huffNode[i]; + int j = i - 1; + while (j >= 0 && huffNode[j].count < key.count) { - if (arr[j].count > pivot) - { - i++; - HUF_swapNodes(&arr[i], &arr[j]); - } + huffNode[j + 1] = huffNode[j]; + j--; } - HUF_swapNodes(&arr[i + 1], &arr[high]); - return i + 1; + huffNode[j + 1] = key; } + } - /* Classic quicksort by descending with partially iterative calls - * to reduce worst case callstack size. + /* Pivot helper function for quicksort. */ + private static int HUF_quickSortPartition(nodeElt_s* arr, int low, int high) + { + /* Simply select rightmost element as pivot. "Better" selectors like + * median-of-three don't experimentally appear to have any benefit. */ - private static void HUF_simpleQuickSort(nodeElt_s* arr, int low, int high) + uint pivot = arr[high].count; + int i = low - 1; + int j = low; + for (; j < high; j++) { - const int kInsertionSortThreshold = 8; - if (high - low < kInsertionSortThreshold) + if (arr[j].count > pivot) { - HUF_insertionSort(arr, low, high); - return; - } - - while (low < high) - { - int idx = HUF_quickSortPartition(arr, low, high); - if (idx - low < high - idx) - { - HUF_simpleQuickSort(arr, low, idx - 1); - low = idx + 1; - } - else - { - HUF_simpleQuickSort(arr, idx + 1, high); - high = idx - 1; - } + i++; + HUF_swapNodes(&arr[i], &arr[j]); } } - /** - * HUF_sort(): - * Sorts the symbols [0, maxSymbolValue] by count[symbol] in decreasing order. - * This is a typical bucket sorting strategy that uses either quicksort or insertion sort to sort each bucket. - * - * @param[out] huffNode Sorted symbols by decreasing count. Only members `.count` and `.byte` are filled. - * Must have (maxSymbolValue + 1) entries. - * @param[in] count Histogram of the symbols. - * @param[in] maxSymbolValue Maximum symbol value. - * @param rankPosition This is a scratch workspace. Must have RANK_POSITION_TABLE_SIZE entries. - */ - private static void HUF_sort( - nodeElt_s* huffNode, - uint* count, - uint maxSymbolValue, - rankPos* rankPosition - ) - { - uint n; - uint maxSymbolValue1 = maxSymbolValue + 1; - memset(rankPosition, 0, (uint)(sizeof(rankPos) * 192)); - for (n = 0; n < maxSymbolValue1; ++n) - { - uint lowerRank = HUF_getIndex(count[n]); - assert(lowerRank < 192 - 1); - rankPosition[lowerRank].@base++; - } + HUF_swapNodes(&arr[i + 1], &arr[high]); + return i + 1; + } - assert(rankPosition[192 - 1].@base == 0); - for (n = 192 - 1; n > 0; --n) - { - rankPosition[n - 1].@base += rankPosition[n].@base; - rankPosition[n - 1].curr = rankPosition[n - 1].@base; - } + /* Classic quicksort by descending with partially iterative calls + * to reduce worst case callstack size. + */ + private static void HUF_simpleQuickSort(nodeElt_s* arr, int low, int high) + { + const int kInsertionSortThreshold = 8; + if (high - low < kInsertionSortThreshold) + { + HUF_insertionSort(arr, low, high); + return; + } - for (n = 0; n < maxSymbolValue1; ++n) + while (low < high) + { + int idx = HUF_quickSortPartition(arr, low, high); + if (idx - low < high - idx) { - uint c = count[n]; - uint r = HUF_getIndex(c) + 1; - uint pos = rankPosition[r].curr++; - assert(pos < maxSymbolValue1); - huffNode[pos].count = c; - huffNode[pos].@byte = (byte)n; + HUF_simpleQuickSort(arr, low, idx - 1); + low = idx + 1; } - - for (n = 192 - 1 - 32 - 1 + ZSTD_highbit32(192 - 1 - 32 - 1); n < 192 - 1; ++n) + else { - int bucketSize = rankPosition[n].curr - rankPosition[n].@base; - uint bucketStartIdx = rankPosition[n].@base; - if (bucketSize > 1) - { - assert(bucketStartIdx < maxSymbolValue1); - HUF_simpleQuickSort(huffNode + bucketStartIdx, 0, bucketSize - 1); - } + HUF_simpleQuickSort(arr, idx + 1, high); + high = idx - 1; } - - assert(HUF_isSorted(huffNode, maxSymbolValue1) != 0); } + } - /* HUF_buildTree(): - * Takes the huffNode array sorted by HUF_sort() and builds an unlimited-depth Huffman tree. - * - * @param huffNode The array sorted by HUF_sort(). Builds the Huffman tree in this array. - * @param maxSymbolValue The maximum symbol value. - * @return The smallest node in the Huffman tree (by count). - */ - private static int HUF_buildTree(nodeElt_s* huffNode, uint maxSymbolValue) + /** + * HUF_sort(): + * Sorts the symbols [0, maxSymbolValue] by count[symbol] in decreasing order. + * This is a typical bucket sorting strategy that uses either quicksort or insertion sort to sort each bucket. + * + * @param[out] huffNode Sorted symbols by decreasing count. Only members `.count` and `.byte` are filled. + * Must have (maxSymbolValue + 1) entries. + * @param[in] count Histogram of the symbols. + * @param[in] maxSymbolValue Maximum symbol value. + * @param rankPosition This is a scratch workspace. Must have RANK_POSITION_TABLE_SIZE entries. + */ + private static void HUF_sort( + nodeElt_s* huffNode, + uint* count, + uint maxSymbolValue, + rankPos* rankPosition + ) + { + uint n; + uint maxSymbolValue1 = maxSymbolValue + 1; + memset(rankPosition, 0, (uint)(sizeof(rankPos) * 192)); + for (n = 0; n < maxSymbolValue1; ++n) { - nodeElt_s* huffNode0 = huffNode - 1; - int nonNullRank; - int lowS, - lowN; - int nodeNb = 255 + 1; - int n, - nodeRoot; - nonNullRank = (int)maxSymbolValue; - while (huffNode[nonNullRank].count == 0) - nonNullRank--; - lowS = nonNullRank; - nodeRoot = nodeNb + lowS - 1; - lowN = nodeNb; - huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS - 1].count; - huffNode[lowS].parent = huffNode[lowS - 1].parent = (ushort)nodeNb; - nodeNb++; - lowS -= 2; - for (n = nodeNb; n <= nodeRoot; n++) - huffNode[n].count = 1U << 30; - huffNode0[0].count = 1U << 31; - while (nodeNb <= nodeRoot) - { - int n1 = huffNode[lowS].count < huffNode[lowN].count ? lowS-- : lowN++; - int n2 = huffNode[lowS].count < huffNode[lowN].count ? lowS-- : lowN++; - huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count; - huffNode[n1].parent = huffNode[n2].parent = (ushort)nodeNb; - nodeNb++; - } - - huffNode[nodeRoot].nbBits = 0; - for (n = nodeRoot - 1; n >= 255 + 1; n--) - huffNode[n].nbBits = (byte)(huffNode[huffNode[n].parent].nbBits + 1); - for (n = 0; n <= nonNullRank; n++) - huffNode[n].nbBits = (byte)(huffNode[huffNode[n].parent].nbBits + 1); - return nonNullRank; + uint lowerRank = HUF_getIndex(count[n]); + assert(lowerRank < 192 - 1); + rankPosition[lowerRank].@base++; } - /** - * HUF_buildCTableFromTree(): - * Build the CTable given the Huffman tree in huffNode. - * - * @param[out] CTable The output Huffman CTable. - * @param huffNode The Huffman tree. - * @param nonNullRank The last and smallest node in the Huffman tree. - * @param maxSymbolValue The maximum symbol value. - * @param maxNbBits The exact maximum number of bits used in the Huffman tree. - */ - private static void HUF_buildCTableFromTree( - nuint* CTable, - nodeElt_s* huffNode, - int nonNullRank, - uint maxSymbolValue, - uint maxNbBits - ) + assert(rankPosition[192 - 1].@base == 0); + for (n = 192 - 1; n > 0; --n) { - nuint* ct = CTable + 1; - /* fill result into ctable (val, nbBits) */ - int n; - ushort* nbPerRank = stackalloc ushort[13]; - memset(nbPerRank, 0, sizeof(ushort) * 13); - ushort* valPerRank = stackalloc ushort[13]; - memset(valPerRank, 0, sizeof(ushort) * 13); - int alphabetSize = (int)(maxSymbolValue + 1); - for (n = 0; n <= nonNullRank; n++) - nbPerRank[huffNode[n].nbBits]++; - { - ushort min = 0; - for (n = (int)maxNbBits; n > 0; n--) - { - valPerRank[n] = min; - min += nbPerRank[n]; - min >>= 1; - } - } - - for (n = 0; n < alphabetSize; n++) - HUF_setNbBits(ct + huffNode[n].@byte, huffNode[n].nbBits); - for (n = 0; n < alphabetSize; n++) - HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); - HUF_writeCTableHeader(CTable, maxNbBits, maxSymbolValue); + rankPosition[n - 1].@base += rankPosition[n].@base; + rankPosition[n - 1].curr = rankPosition[n - 1].@base; } - private static nuint HUF_buildCTable_wksp( - nuint* CTable, - uint* count, - uint maxSymbolValue, - uint maxNbBits, - void* workSpace, - nuint wkspSize - ) + for (n = 0; n < maxSymbolValue1; ++n) { - HUF_buildCTable_wksp_tables* wksp_tables = - (HUF_buildCTable_wksp_tables*)HUF_alignUpWorkspace( - workSpace, - &wkspSize, - sizeof(uint) - ); - nodeElt_s* huffNode0 = &wksp_tables->huffNodeTbl.e0; - nodeElt_s* huffNode = huffNode0 + 1; - int nonNullRank; - if (wkspSize < (nuint)sizeof(HUF_buildCTable_wksp_tables)) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_workSpace_tooSmall)); - if (maxNbBits == 0) - maxNbBits = 11; - if (maxSymbolValue > 255) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooLarge)); - memset(huffNode0, 0, (uint)(sizeof(nodeElt_s) * 512)); - HUF_sort(huffNode, count, maxSymbolValue, &wksp_tables->rankPosition.e0); - nonNullRank = HUF_buildTree(huffNode, maxSymbolValue); - maxNbBits = HUF_setMaxHeight(huffNode, (uint)nonNullRank, maxNbBits); - if (maxNbBits > 12) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); - HUF_buildCTableFromTree(CTable, huffNode, nonNullRank, maxSymbolValue, maxNbBits); - return maxNbBits; + uint c = count[n]; + uint r = HUF_getIndex(c) + 1; + uint pos = rankPosition[r].curr++; + assert(pos < maxSymbolValue1); + huffNode[pos].count = c; + huffNode[pos].@byte = (byte)n; } - private static nuint HUF_estimateCompressedSize( - nuint* CTable, - uint* count, - uint maxSymbolValue - ) + for (n = 192 - 1 - 32 - 1 + ZSTD_highbit32(192 - 1 - 32 - 1); n < 192 - 1; ++n) { - nuint* ct = CTable + 1; - nuint nbBits = 0; - int s; - for (s = 0; s <= (int)maxSymbolValue; ++s) + int bucketSize = rankPosition[n].curr - rankPosition[n].@base; + uint bucketStartIdx = rankPosition[n].@base; + if (bucketSize > 1) { - nbBits += HUF_getNbBits(ct[s]) * count[s]; + assert(bucketStartIdx < maxSymbolValue1); + HUF_simpleQuickSort(huffNode + bucketStartIdx, 0, bucketSize - 1); } + } + + assert(HUF_isSorted(huffNode, maxSymbolValue1) != 0); + } - return nbBits >> 3; + /* HUF_buildTree(): + * Takes the huffNode array sorted by HUF_sort() and builds an unlimited-depth Huffman tree. + * + * @param huffNode The array sorted by HUF_sort(). Builds the Huffman tree in this array. + * @param maxSymbolValue The maximum symbol value. + * @return The smallest node in the Huffman tree (by count). + */ + private static int HUF_buildTree(nodeElt_s* huffNode, uint maxSymbolValue) + { + nodeElt_s* huffNode0 = huffNode - 1; + int nonNullRank; + int lowS, + lowN; + int nodeNb = 255 + 1; + int n, + nodeRoot; + nonNullRank = (int)maxSymbolValue; + while (huffNode[nonNullRank].count == 0) + nonNullRank--; + lowS = nonNullRank; + nodeRoot = nodeNb + lowS - 1; + lowN = nodeNb; + huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS - 1].count; + huffNode[lowS].parent = huffNode[lowS - 1].parent = (ushort)nodeNb; + nodeNb++; + lowS -= 2; + for (n = nodeNb; n <= nodeRoot; n++) + huffNode[n].count = 1U << 30; + huffNode0[0].count = 1U << 31; + while (nodeNb <= nodeRoot) + { + int n1 = huffNode[lowS].count < huffNode[lowN].count ? lowS-- : lowN++; + int n2 = huffNode[lowS].count < huffNode[lowN].count ? lowS-- : lowN++; + huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count; + huffNode[n1].parent = huffNode[n2].parent = (ushort)nodeNb; + nodeNb++; } - private static int HUF_validateCTable(nuint* CTable, uint* count, uint maxSymbolValue) + huffNode[nodeRoot].nbBits = 0; + for (n = nodeRoot - 1; n >= 255 + 1; n--) + huffNode[n].nbBits = (byte)(huffNode[huffNode[n].parent].nbBits + 1); + for (n = 0; n <= nonNullRank; n++) + huffNode[n].nbBits = (byte)(huffNode[huffNode[n].parent].nbBits + 1); + return nonNullRank; + } + + /** + * HUF_buildCTableFromTree(): + * Build the CTable given the Huffman tree in huffNode. + * + * @param[out] CTable The output Huffman CTable. + * @param huffNode The Huffman tree. + * @param nonNullRank The last and smallest node in the Huffman tree. + * @param maxSymbolValue The maximum symbol value. + * @param maxNbBits The exact maximum number of bits used in the Huffman tree. + */ + private static void HUF_buildCTableFromTree( + nuint* CTable, + nodeElt_s* huffNode, + int nonNullRank, + uint maxSymbolValue, + uint maxNbBits + ) + { + nuint* ct = CTable + 1; + /* fill result into ctable (val, nbBits) */ + int n; + ushort* nbPerRank = stackalloc ushort[13]; + memset(nbPerRank, 0, sizeof(ushort) * 13); + ushort* valPerRank = stackalloc ushort[13]; + memset(valPerRank, 0, sizeof(ushort) * 13); + int alphabetSize = (int)(maxSymbolValue + 1); + for (n = 0; n <= nonNullRank; n++) + nbPerRank[huffNode[n].nbBits]++; { - HUF_CTableHeader header = HUF_readCTableHeader(CTable); - nuint* ct = CTable + 1; - int bad = 0; - int s; - assert(header.tableLog <= 12); - if (header.maxSymbolValue < maxSymbolValue) - return 0; - for (s = 0; s <= (int)maxSymbolValue; ++s) + ushort min = 0; + for (n = (int)maxNbBits; n > 0; n--) { - bad |= count[s] != 0 && HUF_getNbBits(ct[s]) == 0 ? 1 : 0; + valPerRank[n] = min; + min += nbPerRank[n]; + min >>= 1; } - - return bad == 0 ? 1 : 0; } - private static nuint HUF_compressBound(nuint size) - { - return 129 + (size + (size >> 8) + 8); - } + for (n = 0; n < alphabetSize; n++) + HUF_setNbBits(ct + huffNode[n].@byte, huffNode[n].nbBits); + for (n = 0; n < alphabetSize; n++) + HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); + HUF_writeCTableHeader(CTable, maxNbBits, maxSymbolValue); + } - /**! HUF_initCStream(): - * Initializes the bitstream. - * @returns 0 or an error code. - */ - private static nuint HUF_initCStream( - ref HUF_CStream_t bitC, - void* startPtr, - nuint dstCapacity - ) - { - bitC = new HUF_CStream_t - { - startPtr = (byte*)startPtr, - ptr = (byte*)startPtr, - endPtr = (byte*)startPtr + dstCapacity - sizeof(nuint), - }; - if (dstCapacity <= (nuint)sizeof(nuint)) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - return 0; - } + private static nuint HUF_buildCTable_wksp( + nuint* CTable, + uint* count, + uint maxSymbolValue, + uint maxNbBits, + void* workSpace, + nuint wkspSize + ) + { + HUF_buildCTable_wksp_tables* wksp_tables = + (HUF_buildCTable_wksp_tables*)HUF_alignUpWorkspace( + workSpace, + &wkspSize, + sizeof(uint) + ); + nodeElt_s* huffNode0 = &wksp_tables->huffNodeTbl.e0; + nodeElt_s* huffNode = huffNode0 + 1; + int nonNullRank; + if (wkspSize < (nuint)sizeof(HUF_buildCTable_wksp_tables)) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_workSpace_tooSmall)); + if (maxNbBits == 0) + maxNbBits = 11; + if (maxSymbolValue > 255) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooLarge)); + memset(huffNode0, 0, (uint)(sizeof(nodeElt_s) * 512)); + HUF_sort(huffNode, count, maxSymbolValue, &wksp_tables->rankPosition.e0); + nonNullRank = HUF_buildTree(huffNode, maxSymbolValue); + maxNbBits = HUF_setMaxHeight(huffNode, (uint)nonNullRank, maxNbBits); + if (maxNbBits > 12) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + HUF_buildCTableFromTree(CTable, huffNode, nonNullRank, maxSymbolValue, maxNbBits); + return maxNbBits; + } - /*! HUF_addBits(): - * Adds the symbol stored in HUF_CElt elt to the bitstream. - * - * @param elt The element we're adding. This is a (nbBits, value) pair. - * See the HUF_CStream_t docs for the format. - * @param idx Insert into the bitstream at this idx. - * @param kFast This is a template parameter. If the bitstream is guaranteed - * to have at least 4 unused bits after this call it may be 1, - * otherwise it must be 0. HUF_addBits() is faster when fast is set. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void HUF_addBits( - ref nuint bitC_bitContainer_e0, - ref nuint bitC_bitPos_e0, - nuint elt, - int kFast - ) + private static nuint HUF_estimateCompressedSize( + nuint* CTable, + uint* count, + uint maxSymbolValue + ) + { + nuint* ct = CTable + 1; + nuint nbBits = 0; + int s; + for (s = 0; s <= (int)maxSymbolValue; ++s) { - assert(HUF_getNbBits(elt) <= 12); - bitC_bitContainer_e0 >>= (int)HUF_getNbBits(elt); - bitC_bitContainer_e0 |= kFast != 0 ? HUF_getValueFast(elt) : HUF_getValue(elt); - bitC_bitPos_e0 += HUF_getNbBitsFast(elt); - assert((bitC_bitPos_e0 & 0xFF) <= (nuint)(sizeof(nuint) * 8)); + nbBits += HUF_getNbBits(ct[s]) * count[s]; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void HUF_zeroIndex1(ref nuint bitC_bitContainer_e1, ref nuint bitC_bitPos_e1) - { - bitC_bitContainer_e1 = 0; - bitC_bitPos_e1 = 0; - } + return nbBits >> 3; + } - /*! HUF_mergeIndex1() : - * Merges the bit container @ index 1 into the bit container @ index 0 - * and zeros the bit container @ index 1. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void HUF_mergeIndex1( - ref nuint bitC_bitContainer_e0, - ref nuint bitC_bitPos_e0, - ref nuint bitC_bitContainer_e1, - ref nuint bitC_bitPos_e1 - ) + private static int HUF_validateCTable(nuint* CTable, uint* count, uint maxSymbolValue) + { + HUF_CTableHeader header = HUF_readCTableHeader(CTable); + nuint* ct = CTable + 1; + int bad = 0; + int s; + assert(header.tableLog <= 12); + if (header.maxSymbolValue < maxSymbolValue) + return 0; + for (s = 0; s <= (int)maxSymbolValue; ++s) { - assert((bitC_bitPos_e1 & 0xFF) < (nuint)(sizeof(nuint) * 8)); - bitC_bitContainer_e0 >>= (int)(bitC_bitPos_e1 & 0xFF); - bitC_bitContainer_e0 |= bitC_bitContainer_e1; - bitC_bitPos_e0 += bitC_bitPos_e1; - assert((bitC_bitPos_e0 & 0xFF) <= (nuint)(sizeof(nuint) * 8)); + bad |= count[s] != 0 && HUF_getNbBits(ct[s]) == 0 ? 1 : 0; } - /*! HUF_flushBits() : - * Flushes the bits in the bit container @ index 0. - * - * @post bitPos will be < 8. - * @param kFast If kFast is set then we must know a-priori that - * the bit container will not overflow. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void HUF_flushBits( - ref nuint bitC_bitContainer_e0, - ref nuint bitC_bitPos_e0, - ref byte* bitC_ptr, - byte* bitC_endPtr, - int kFast - ) + return bad == 0 ? 1 : 0; + } + + private static nuint HUF_compressBound(nuint size) + { + return 129 + (size + (size >> 8) + 8); + } + + /**! HUF_initCStream(): + * Initializes the bitstream. + * @returns 0 or an error code. + */ + private static nuint HUF_initCStream( + ref HUF_CStream_t bitC, + void* startPtr, + nuint dstCapacity + ) + { + bitC = new HUF_CStream_t { - /* The upper bits of bitPos are noisy, so we must mask by 0xFF. */ - nuint nbBits = bitC_bitPos_e0 & 0xFF; - nuint nbBytes = nbBits >> 3; - /* The top nbBits bits of bitContainer are the ones we need. */ - nuint bitContainer = bitC_bitContainer_e0 >> (int)((nuint)(sizeof(nuint) * 8) - nbBits); - bitC_bitPos_e0 &= 7; - assert(nbBits > 0); - assert(nbBits <= (nuint)(sizeof(nuint) * 8)); - assert(bitC_ptr <= bitC_endPtr); - MEM_writeLEST(bitC_ptr, bitContainer); - bitC_ptr += nbBytes; - assert(kFast == 0 || bitC_ptr <= bitC_endPtr); - if (kFast == 0 && bitC_ptr > bitC_endPtr) - bitC_ptr = bitC_endPtr; - } + startPtr = (byte*)startPtr, + ptr = (byte*)startPtr, + endPtr = (byte*)startPtr + dstCapacity - sizeof(nuint), + }; + if (dstCapacity <= (nuint)sizeof(nuint)) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + return 0; + } - /*! HUF_endMark() - * @returns The Huffman stream end mark: A 1-bit value = 1. - */ - private static nuint HUF_endMark() + /*! HUF_addBits(): + * Adds the symbol stored in HUF_CElt elt to the bitstream. + * + * @param elt The element we're adding. This is a (nbBits, value) pair. + * See the HUF_CStream_t docs for the format. + * @param idx Insert into the bitstream at this idx. + * @param kFast This is a template parameter. If the bitstream is guaranteed + * to have at least 4 unused bits after this call it may be 1, + * otherwise it must be 0. HUF_addBits() is faster when fast is set. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void HUF_addBits( + ref nuint bitC_bitContainer_e0, + ref nuint bitC_bitPos_e0, + nuint elt, + int kFast + ) + { + assert(HUF_getNbBits(elt) <= 12); + bitC_bitContainer_e0 >>= (int)HUF_getNbBits(elt); + bitC_bitContainer_e0 |= kFast != 0 ? HUF_getValueFast(elt) : HUF_getValue(elt); + bitC_bitPos_e0 += HUF_getNbBitsFast(elt); + assert((bitC_bitPos_e0 & 0xFF) <= (nuint)(sizeof(nuint) * 8)); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void HUF_zeroIndex1(ref nuint bitC_bitContainer_e1, ref nuint bitC_bitPos_e1) + { + bitC_bitContainer_e1 = 0; + bitC_bitPos_e1 = 0; + } + + /*! HUF_mergeIndex1() : + * Merges the bit container @ index 1 into the bit container @ index 0 + * and zeros the bit container @ index 1. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void HUF_mergeIndex1( + ref nuint bitC_bitContainer_e0, + ref nuint bitC_bitPos_e0, + ref nuint bitC_bitContainer_e1, + ref nuint bitC_bitPos_e1 + ) + { + assert((bitC_bitPos_e1 & 0xFF) < (nuint)(sizeof(nuint) * 8)); + bitC_bitContainer_e0 >>= (int)(bitC_bitPos_e1 & 0xFF); + bitC_bitContainer_e0 |= bitC_bitContainer_e1; + bitC_bitPos_e0 += bitC_bitPos_e1; + assert((bitC_bitPos_e0 & 0xFF) <= (nuint)(sizeof(nuint) * 8)); + } + + /*! HUF_flushBits() : + * Flushes the bits in the bit container @ index 0. + * + * @post bitPos will be < 8. + * @param kFast If kFast is set then we must know a-priori that + * the bit container will not overflow. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void HUF_flushBits( + ref nuint bitC_bitContainer_e0, + ref nuint bitC_bitPos_e0, + ref byte* bitC_ptr, + byte* bitC_endPtr, + int kFast + ) + { + /* The upper bits of bitPos are noisy, so we must mask by 0xFF. */ + nuint nbBits = bitC_bitPos_e0 & 0xFF; + nuint nbBytes = nbBits >> 3; + /* The top nbBits bits of bitContainer are the ones we need. */ + nuint bitContainer = bitC_bitContainer_e0 >> (int)((nuint)(sizeof(nuint) * 8) - nbBits); + bitC_bitPos_e0 &= 7; + assert(nbBits > 0); + assert(nbBits <= (nuint)(sizeof(nuint) * 8)); + assert(bitC_ptr <= bitC_endPtr); + MEM_writeLEST(bitC_ptr, bitContainer); + bitC_ptr += nbBytes; + assert(kFast == 0 || bitC_ptr <= bitC_endPtr); + if (kFast == 0 && bitC_ptr > bitC_endPtr) + bitC_ptr = bitC_endPtr; + } + + /*! HUF_endMark() + * @returns The Huffman stream end mark: A 1-bit value = 1. + */ + private static nuint HUF_endMark() + { + nuint endMark; + HUF_setNbBits(&endMark, 1); + HUF_setValue(&endMark, 1); + return endMark; + } + + /*! HUF_closeCStream() : + * @return Size of CStream, in bytes, + * or 0 if it could not fit into dstBuffer */ + private static nuint HUF_closeCStream(ref HUF_CStream_t bitC) + { + HUF_addBits(ref bitC.bitContainer.e0, ref bitC.bitPos.e0, HUF_endMark(), 0); + HUF_flushBits( + ref bitC.bitContainer.e0, + ref bitC.bitPos.e0, + ref bitC.ptr, + bitC.endPtr, + 0 + ); { - nuint endMark; - HUF_setNbBits(&endMark, 1); - HUF_setValue(&endMark, 1); - return endMark; + nuint nbBits = bitC.bitPos.e0 & 0xFF; + if (bitC.ptr >= bitC.endPtr) + return 0; + return (nuint)(bitC.ptr - bitC.startPtr) + (nuint)(nbBits > 0 ? 1 : 0); } + } - /*! HUF_closeCStream() : - * @return Size of CStream, in bytes, - * or 0 if it could not fit into dstBuffer */ - private static nuint HUF_closeCStream(ref HUF_CStream_t bitC) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void HUF_encodeSymbol( + ref nuint bitCPtr_bitContainer_e0, + ref nuint bitCPtr_bitPos_e0, + uint symbol, + nuint* CTable, + int fast + ) + { + HUF_addBits(ref bitCPtr_bitContainer_e0, ref bitCPtr_bitPos_e0, CTable[symbol], fast); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void HUF_compress1X_usingCTable_internal_body_loop( + ref HUF_CStream_t bitC, + byte* ip, + nuint srcSize, + nuint* ct, + int kUnroll, + int kFastFlush, + int kLastFast + ) + { + byte* bitC_ptr = bitC.ptr; + byte* bitC_endPtr = bitC.endPtr; + nuint bitC_bitContainer_e0 = bitC.bitContainer.e0; + nuint bitC_bitPos_e0 = bitC.bitPos.e0; + nuint bitC_bitContainer_e1 = bitC.bitContainer.e1; + nuint bitC_bitPos_e1 = bitC.bitPos.e1; + /* Join to kUnroll */ + int n = (int)srcSize; + int rem = n % kUnroll; + if (rem > 0) { - HUF_addBits(ref bitC.bitContainer.e0, ref bitC.bitPos.e0, HUF_endMark(), 0); - HUF_flushBits( - ref bitC.bitContainer.e0, - ref bitC.bitPos.e0, - ref bitC.ptr, - bitC.endPtr, - 0 - ); + for (; rem > 0; --rem) { - nuint nbBits = bitC.bitPos.e0 & 0xFF; - if (bitC.ptr >= bitC.endPtr) - return 0; - return (nuint)(bitC.ptr - bitC.startPtr) + (nuint)(nbBits > 0 ? 1 : 0); + HUF_encodeSymbol(ref bitC_bitContainer_e0, ref bitC_bitPos_e0, ip[--n], ct, 0); } - } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void HUF_encodeSymbol( - ref nuint bitCPtr_bitContainer_e0, - ref nuint bitCPtr_bitPos_e0, - uint symbol, - nuint* CTable, - int fast - ) - { - HUF_addBits(ref bitCPtr_bitContainer_e0, ref bitCPtr_bitPos_e0, CTable[symbol], fast); + HUF_flushBits( + ref bitC_bitContainer_e0, + ref bitC_bitPos_e0, + ref bitC_ptr, + bitC_endPtr, + kFastFlush + ); } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void HUF_compress1X_usingCTable_internal_body_loop( - ref HUF_CStream_t bitC, - byte* ip, - nuint srcSize, - nuint* ct, - int kUnroll, - int kFastFlush, - int kLastFast - ) + assert(n % kUnroll == 0); + if (n % (2 * kUnroll) != 0) { - byte* bitC_ptr = bitC.ptr; - byte* bitC_endPtr = bitC.endPtr; - nuint bitC_bitContainer_e0 = bitC.bitContainer.e0; - nuint bitC_bitPos_e0 = bitC.bitPos.e0; - nuint bitC_bitContainer_e1 = bitC.bitContainer.e1; - nuint bitC_bitPos_e1 = bitC.bitPos.e1; - /* Join to kUnroll */ - int n = (int)srcSize; - int rem = n % kUnroll; - if (rem > 0) - { - for (; rem > 0; --rem) - { - HUF_encodeSymbol(ref bitC_bitContainer_e0, ref bitC_bitPos_e0, ip[--n], ct, 0); - } - - HUF_flushBits( - ref bitC_bitContainer_e0, - ref bitC_bitPos_e0, - ref bitC_ptr, - bitC_endPtr, - kFastFlush - ); - } - - assert(n % kUnroll == 0); - if (n % (2 * kUnroll) != 0) + int u; + for (u = 1; u < kUnroll; ++u) { - int u; - for (u = 1; u < kUnroll; ++u) - { - HUF_encodeSymbol( - ref bitC_bitContainer_e0, - ref bitC_bitPos_e0, - ip[n - u], - ct, - 1 - ); - } - HUF_encodeSymbol( ref bitC_bitContainer_e0, ref bitC_bitPos_e0, - ip[n - kUnroll], + ip[n - u], ct, - kLastFast - ); - HUF_flushBits( - ref bitC_bitContainer_e0, - ref bitC_bitPos_e0, - ref bitC_ptr, - bitC_endPtr, - kFastFlush + 1 ); - n -= kUnroll; } - assert(n % (2 * kUnroll) == 0); - for (; n > 0; n -= 2 * kUnroll) - { - /* Encode kUnroll symbols into the bitstream @ index 0. */ - int u; - for (u = 1; u < kUnroll; ++u) - { - HUF_encodeSymbol( - ref bitC_bitContainer_e0, - ref bitC_bitPos_e0, - ip[n - u], - ct, - 1 - ); - } + HUF_encodeSymbol( + ref bitC_bitContainer_e0, + ref bitC_bitPos_e0, + ip[n - kUnroll], + ct, + kLastFast + ); + HUF_flushBits( + ref bitC_bitContainer_e0, + ref bitC_bitPos_e0, + ref bitC_ptr, + bitC_endPtr, + kFastFlush + ); + n -= kUnroll; + } + assert(n % (2 * kUnroll) == 0); + for (; n > 0; n -= 2 * kUnroll) + { + /* Encode kUnroll symbols into the bitstream @ index 0. */ + int u; + for (u = 1; u < kUnroll; ++u) + { HUF_encodeSymbol( ref bitC_bitContainer_e0, ref bitC_bitPos_e0, - ip[n - kUnroll], + ip[n - u], ct, - kLastFast - ); - HUF_flushBits( - ref bitC_bitContainer_e0, - ref bitC_bitPos_e0, - ref bitC_ptr, - bitC_endPtr, - kFastFlush + 1 ); - HUF_zeroIndex1(ref bitC_bitContainer_e1, ref bitC_bitPos_e1); - for (u = 1; u < kUnroll; ++u) - { - HUF_encodeSymbol( - ref bitC_bitContainer_e1, - ref bitC_bitPos_e1, - ip[n - kUnroll - u], - ct, - 1 - ); - } + } + HUF_encodeSymbol( + ref bitC_bitContainer_e0, + ref bitC_bitPos_e0, + ip[n - kUnroll], + ct, + kLastFast + ); + HUF_flushBits( + ref bitC_bitContainer_e0, + ref bitC_bitPos_e0, + ref bitC_ptr, + bitC_endPtr, + kFastFlush + ); + HUF_zeroIndex1(ref bitC_bitContainer_e1, ref bitC_bitPos_e1); + for (u = 1; u < kUnroll; ++u) + { HUF_encodeSymbol( ref bitC_bitContainer_e1, ref bitC_bitPos_e1, - ip[n - kUnroll - kUnroll], + ip[n - kUnroll - u], ct, - kLastFast - ); - HUF_mergeIndex1( - ref bitC_bitContainer_e0, - ref bitC_bitPos_e0, - ref bitC_bitContainer_e1, - ref bitC_bitPos_e1 - ); - HUF_flushBits( - ref bitC_bitContainer_e0, - ref bitC_bitPos_e0, - ref bitC_ptr, - bitC_endPtr, - kFastFlush + 1 ); } - assert(n == 0); - bitC.ptr = bitC_ptr; - bitC.endPtr = bitC_endPtr; - bitC.bitContainer.e0 = bitC_bitContainer_e0; - bitC.bitPos.e0 = bitC_bitPos_e0; - bitC.bitContainer.e1 = bitC_bitContainer_e1; - bitC.bitPos.e1 = bitC_bitPos_e1; + HUF_encodeSymbol( + ref bitC_bitContainer_e1, + ref bitC_bitPos_e1, + ip[n - kUnroll - kUnroll], + ct, + kLastFast + ); + HUF_mergeIndex1( + ref bitC_bitContainer_e0, + ref bitC_bitPos_e0, + ref bitC_bitContainer_e1, + ref bitC_bitPos_e1 + ); + HUF_flushBits( + ref bitC_bitContainer_e0, + ref bitC_bitPos_e0, + ref bitC_ptr, + bitC_endPtr, + kFastFlush + ); } - /** - * Returns a tight upper bound on the output space needed by Huffman - * with 8 bytes buffer to handle over-writes. If the output is at least - * this large we don't need to do bounds checks during Huffman encoding. - */ - private static nuint HUF_tightCompressBound(nuint srcSize, nuint tableLog) + assert(n == 0); + bitC.ptr = bitC_ptr; + bitC.endPtr = bitC_endPtr; + bitC.bitContainer.e0 = bitC_bitContainer_e0; + bitC.bitPos.e0 = bitC_bitPos_e0; + bitC.bitContainer.e1 = bitC_bitContainer_e1; + bitC.bitPos.e1 = bitC_bitPos_e1; + } + + /** + * Returns a tight upper bound on the output space needed by Huffman + * with 8 bytes buffer to handle over-writes. If the output is at least + * this large we don't need to do bounds checks during Huffman encoding. + */ + private static nuint HUF_tightCompressBound(nuint srcSize, nuint tableLog) + { + return (srcSize * tableLog >> 3) + 8; + } + + private static nuint HUF_compress1X_usingCTable_internal_body( + void* dst, + nuint dstSize, + void* src, + nuint srcSize, + nuint* CTable + ) + { + uint tableLog = HUF_readCTableHeader(CTable).tableLog; + nuint* ct = CTable + 1; + byte* ip = (byte*)src; + byte* ostart = (byte*)dst; + byte* oend = ostart + dstSize; + HUF_CStream_t bitC; + System.Runtime.CompilerServices.Unsafe.SkipInit(out bitC); + if (dstSize < 8) + return 0; { - return (srcSize * tableLog >> 3) + 8; + byte* op = ostart; + nuint initErr = HUF_initCStream(ref bitC, op, (nuint)(oend - op)); + if (ERR_isError(initErr)) + return 0; } - private static nuint HUF_compress1X_usingCTable_internal_body( - void* dst, - nuint dstSize, - void* src, - nuint srcSize, - nuint* CTable - ) + if (dstSize < HUF_tightCompressBound(srcSize, tableLog) || tableLog > 11) + HUF_compress1X_usingCTable_internal_body_loop( + ref bitC, + ip, + srcSize, + ct, + MEM_32bits ? 2 : 4, + 0, + 0 + ); + else { - uint tableLog = HUF_readCTableHeader(CTable).tableLog; - nuint* ct = CTable + 1; - byte* ip = (byte*)src; - byte* ostart = (byte*)dst; - byte* oend = ostart + dstSize; - HUF_CStream_t bitC; - System.Runtime.CompilerServices.Unsafe.SkipInit(out bitC); - if (dstSize < 8) - return 0; + if (MEM_32bits) { - byte* op = ostart; - nuint initErr = HUF_initCStream(ref bitC, op, (nuint)(oend - op)); - if (ERR_isError(initErr)) - return 0; + switch (tableLog) + { + case 11: + HUF_compress1X_usingCTable_internal_body_loop( + ref bitC, + ip, + srcSize, + ct, + 2, + 1, + 0 + ); + break; + case 10: + case 9: + case 8: + HUF_compress1X_usingCTable_internal_body_loop( + ref bitC, + ip, + srcSize, + ct, + 2, + 1, + 1 + ); + break; + case 7: + default: + HUF_compress1X_usingCTable_internal_body_loop( + ref bitC, + ip, + srcSize, + ct, + 3, + 1, + 1 + ); + break; + } } - - if (dstSize < HUF_tightCompressBound(srcSize, tableLog) || tableLog > 11) - HUF_compress1X_usingCTable_internal_body_loop( - ref bitC, - ip, - srcSize, - ct, - MEM_32bits ? 2 : 4, - 0, - 0 - ); else { - if (MEM_32bits) - { - switch (tableLog) - { - case 11: - HUF_compress1X_usingCTable_internal_body_loop( - ref bitC, - ip, - srcSize, - ct, - 2, - 1, - 0 - ); - break; - case 10: - case 9: - case 8: - HUF_compress1X_usingCTable_internal_body_loop( - ref bitC, - ip, - srcSize, - ct, - 2, - 1, - 1 - ); - break; - case 7: - default: - HUF_compress1X_usingCTable_internal_body_loop( - ref bitC, - ip, - srcSize, - ct, - 3, - 1, - 1 - ); - break; - } - } - else + switch (tableLog) { - switch (tableLog) - { - case 11: - HUF_compress1X_usingCTable_internal_body_loop( - ref bitC, - ip, - srcSize, - ct, - 5, - 1, - 0 - ); - break; - case 10: - HUF_compress1X_usingCTable_internal_body_loop( - ref bitC, - ip, - srcSize, - ct, - 5, - 1, - 1 - ); - break; - case 9: - HUF_compress1X_usingCTable_internal_body_loop( - ref bitC, - ip, - srcSize, - ct, - 6, - 1, - 0 - ); - break; - case 8: - HUF_compress1X_usingCTable_internal_body_loop( - ref bitC, - ip, - srcSize, - ct, - 7, - 1, - 0 - ); - break; - case 7: - HUF_compress1X_usingCTable_internal_body_loop( - ref bitC, - ip, - srcSize, - ct, - 8, - 1, - 0 - ); - break; - case 6: - default: - HUF_compress1X_usingCTable_internal_body_loop( - ref bitC, - ip, - srcSize, - ct, - 9, - 1, - 1 - ); - break; - } + case 11: + HUF_compress1X_usingCTable_internal_body_loop( + ref bitC, + ip, + srcSize, + ct, + 5, + 1, + 0 + ); + break; + case 10: + HUF_compress1X_usingCTable_internal_body_loop( + ref bitC, + ip, + srcSize, + ct, + 5, + 1, + 1 + ); + break; + case 9: + HUF_compress1X_usingCTable_internal_body_loop( + ref bitC, + ip, + srcSize, + ct, + 6, + 1, + 0 + ); + break; + case 8: + HUF_compress1X_usingCTable_internal_body_loop( + ref bitC, + ip, + srcSize, + ct, + 7, + 1, + 0 + ); + break; + case 7: + HUF_compress1X_usingCTable_internal_body_loop( + ref bitC, + ip, + srcSize, + ct, + 8, + 1, + 0 + ); + break; + case 6: + default: + HUF_compress1X_usingCTable_internal_body_loop( + ref bitC, + ip, + srcSize, + ct, + 9, + 1, + 1 + ); + break; } } - - assert(bitC.ptr <= bitC.endPtr); - return HUF_closeCStream(ref bitC); } - private static nuint HUF_compress1X_usingCTable_internal( - void* dst, - nuint dstSize, - void* src, - nuint srcSize, - nuint* CTable, - int flags - ) + assert(bitC.ptr <= bitC.endPtr); + return HUF_closeCStream(ref bitC); + } + + private static nuint HUF_compress1X_usingCTable_internal( + void* dst, + nuint dstSize, + void* src, + nuint srcSize, + nuint* CTable, + int flags + ) + { + return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); + } + + /* ====================== */ + /* single stream variants */ + /* ====================== */ + private static nuint HUF_compress1X_usingCTable( + void* dst, + nuint dstSize, + void* src, + nuint srcSize, + nuint* CTable, + int flags + ) + { + return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, flags); + } + + private static nuint HUF_compress4X_usingCTable_internal( + void* dst, + nuint dstSize, + void* src, + nuint srcSize, + nuint* CTable, + int flags + ) + { + /* first 3 segments */ + nuint segmentSize = (srcSize + 3) / 4; + byte* ip = (byte*)src; + byte* iend = ip + srcSize; + byte* ostart = (byte*)dst; + byte* oend = ostart + dstSize; + byte* op = ostart; + if (dstSize < 6 + 1 + 1 + 1 + 8) + return 0; + if (srcSize < 12) + return 0; + op += 6; + assert(op <= oend); { - return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); + nuint cSize = HUF_compress1X_usingCTable_internal( + op, + (nuint)(oend - op), + ip, + segmentSize, + CTable, + flags + ); + if (ERR_isError(cSize)) + return cSize; + if (cSize == 0 || cSize > 65535) + return 0; + MEM_writeLE16(ostart, (ushort)cSize); + op += cSize; } - /* ====================== */ - /* single stream variants */ - /* ====================== */ - private static nuint HUF_compress1X_usingCTable( - void* dst, - nuint dstSize, - void* src, - nuint srcSize, - nuint* CTable, - int flags - ) + ip += segmentSize; + assert(op <= oend); { - return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, flags); + nuint cSize = HUF_compress1X_usingCTable_internal( + op, + (nuint)(oend - op), + ip, + segmentSize, + CTable, + flags + ); + if (ERR_isError(cSize)) + return cSize; + if (cSize == 0 || cSize > 65535) + return 0; + MEM_writeLE16(ostart + 2, (ushort)cSize); + op += cSize; } - private static nuint HUF_compress4X_usingCTable_internal( - void* dst, - nuint dstSize, - void* src, - nuint srcSize, - nuint* CTable, - int flags - ) + ip += segmentSize; + assert(op <= oend); { - /* first 3 segments */ - nuint segmentSize = (srcSize + 3) / 4; - byte* ip = (byte*)src; - byte* iend = ip + srcSize; - byte* ostart = (byte*)dst; - byte* oend = ostart + dstSize; - byte* op = ostart; - if (dstSize < 6 + 1 + 1 + 1 + 8) + nuint cSize = HUF_compress1X_usingCTable_internal( + op, + (nuint)(oend - op), + ip, + segmentSize, + CTable, + flags + ); + if (ERR_isError(cSize)) + return cSize; + if (cSize == 0 || cSize > 65535) return 0; - if (srcSize < 12) + MEM_writeLE16(ostart + 4, (ushort)cSize); + op += cSize; + } + + ip += segmentSize; + assert(op <= oend); + assert(ip <= iend); + { + nuint cSize = HUF_compress1X_usingCTable_internal( + op, + (nuint)(oend - op), + ip, + (nuint)(iend - ip), + CTable, + flags + ); + if (ERR_isError(cSize)) + return cSize; + if (cSize == 0 || cSize > 65535) return 0; - op += 6; - assert(op <= oend); - { - nuint cSize = HUF_compress1X_usingCTable_internal( - op, - (nuint)(oend - op), - ip, - segmentSize, - CTable, - flags - ); - if (ERR_isError(cSize)) - return cSize; - if (cSize == 0 || cSize > 65535) - return 0; - MEM_writeLE16(ostart, (ushort)cSize); - op += cSize; - } + op += cSize; + } - ip += segmentSize; - assert(op <= oend); - { - nuint cSize = HUF_compress1X_usingCTable_internal( - op, - (nuint)(oend - op), - ip, - segmentSize, - CTable, - flags - ); - if (ERR_isError(cSize)) - return cSize; - if (cSize == 0 || cSize > 65535) - return 0; - MEM_writeLE16(ostart + 2, (ushort)cSize); - op += cSize; - } + return (nuint)(op - ostart); + } - ip += segmentSize; - assert(op <= oend); - { - nuint cSize = HUF_compress1X_usingCTable_internal( + private static nuint HUF_compress4X_usingCTable( + void* dst, + nuint dstSize, + void* src, + nuint srcSize, + nuint* CTable, + int flags + ) + { + return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, flags); + } + + private static nuint HUF_compressCTable_internal( + byte* ostart, + byte* op, + byte* oend, + void* src, + nuint srcSize, + HUF_nbStreams_e nbStreams, + nuint* CTable, + int flags + ) + { + nuint cSize = + nbStreams == HUF_nbStreams_e.HUF_singleStream + ? HUF_compress1X_usingCTable_internal( op, (nuint)(oend - op), - ip, - segmentSize, + src, + srcSize, CTable, flags - ); - if (ERR_isError(cSize)) - return cSize; - if (cSize == 0 || cSize > 65535) - return 0; - MEM_writeLE16(ostart + 4, (ushort)cSize); - op += cSize; - } - - ip += segmentSize; - assert(op <= oend); - assert(ip <= iend); - { - nuint cSize = HUF_compress1X_usingCTable_internal( + ) + : HUF_compress4X_usingCTable_internal( op, (nuint)(oend - op), - ip, - (nuint)(iend - ip), + src, + srcSize, CTable, flags ); - if (ERR_isError(cSize)) - return cSize; - if (cSize == 0 || cSize > 65535) - return 0; - op += cSize; - } - - return (nuint)(op - ostart); + if (ERR_isError(cSize)) + { + return cSize; } - private static nuint HUF_compress4X_usingCTable( - void* dst, - nuint dstSize, - void* src, - nuint srcSize, - nuint* CTable, - int flags - ) + if (cSize == 0) { - return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, flags); + return 0; } - private static nuint HUF_compressCTable_internal( - byte* ostart, - byte* op, - byte* oend, - void* src, - nuint srcSize, - HUF_nbStreams_e nbStreams, - nuint* CTable, - int flags - ) + op += cSize; + assert(op >= ostart); + if ((nuint)(op - ostart) >= srcSize - 1) { - nuint cSize = - nbStreams == HUF_nbStreams_e.HUF_singleStream - ? HUF_compress1X_usingCTable_internal( - op, - (nuint)(oend - op), - src, - srcSize, - CTable, - flags - ) - : HUF_compress4X_usingCTable_internal( - op, - (nuint)(oend - op), - src, - srcSize, - CTable, - flags - ); - if (ERR_isError(cSize)) - { - return cSize; - } - - if (cSize == 0) - { - return 0; - } + return 0; + } - op += cSize; - assert(op >= ostart); - if ((nuint)(op - ostart) >= srcSize - 1) - { - return 0; - } + return (nuint)(op - ostart); + } - return (nuint)(op - ostart); + private static uint HUF_cardinality(uint* count, uint maxSymbolValue) + { + uint cardinality = 0; + uint i; + for (i = 0; i < maxSymbolValue + 1; i++) + { + if (count[i] != 0) + cardinality += 1; } - private static uint HUF_cardinality(uint* count, uint maxSymbolValue) - { - uint cardinality = 0; - uint i; - for (i = 0; i < maxSymbolValue + 1; i++) - { - if (count[i] != 0) - cardinality += 1; - } + return cardinality; + } - return cardinality; - } + /*! HUF_compress() does the following: + * 1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h") + * 2. (optional) refine tableLog using HUF_optimalTableLog() + * 3. build Huffman table from count using HUF_buildCTable() + * 4. save Huffman table to memory buffer using HUF_writeCTable() + * 5. encode the data stream using HUF_compress4X_usingCTable() + * + * The following API allows targeting specific sub-functions for advanced tasks. + * For example, it's possible to compress several blocks using the same 'CTable', + * or to save and regenerate 'CTable' using external methods. + */ + private static uint HUF_minTableLog(uint symbolCardinality) + { + uint minBitsSymbols = ZSTD_highbit32(symbolCardinality) + 1; + return minBitsSymbols; + } - /*! HUF_compress() does the following: - * 1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h") - * 2. (optional) refine tableLog using HUF_optimalTableLog() - * 3. build Huffman table from count using HUF_buildCTable() - * 4. save Huffman table to memory buffer using HUF_writeCTable() - * 5. encode the data stream using HUF_compress4X_usingCTable() - * - * The following API allows targeting specific sub-functions for advanced tasks. - * For example, it's possible to compress several blocks using the same 'CTable', - * or to save and regenerate 'CTable' using external methods. - */ - private static uint HUF_minTableLog(uint symbolCardinality) + private static uint HUF_optimalTableLog( + uint maxTableLog, + nuint srcSize, + uint maxSymbolValue, + void* workSpace, + nuint wkspSize, + nuint* table, + uint* count, + int flags + ) + { + assert(srcSize > 1); + assert(wkspSize >= (nuint)sizeof(HUF_buildCTable_wksp_tables)); + if ((flags & (int)HUF_flags_e.HUF_flags_optimalDepth) == 0) { - uint minBitsSymbols = ZSTD_highbit32(symbolCardinality) + 1; - return minBitsSymbols; + return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1); } - private static uint HUF_optimalTableLog( - uint maxTableLog, - nuint srcSize, - uint maxSymbolValue, - void* workSpace, - nuint wkspSize, - nuint* table, - uint* count, - int flags - ) { - assert(srcSize > 1); - assert(wkspSize >= (nuint)sizeof(HUF_buildCTable_wksp_tables)); - if ((flags & (int)HUF_flags_e.HUF_flags_optimalDepth) == 0) - { - return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1); - } - + byte* dst = (byte*)workSpace + sizeof(HUF_WriteCTableWksp); + nuint dstSize = wkspSize - (nuint)sizeof(HUF_WriteCTableWksp); + nuint hSize, + newSize; + uint symbolCardinality = HUF_cardinality(count, maxSymbolValue); + uint minTableLog = HUF_minTableLog(symbolCardinality); + nuint optSize = unchecked((nuint)~0) - 1; + uint optLog = maxTableLog, + optLogGuess; + for (optLogGuess = minTableLog; optLogGuess <= maxTableLog; optLogGuess++) { - byte* dst = (byte*)workSpace + sizeof(HUF_WriteCTableWksp); - nuint dstSize = wkspSize - (nuint)sizeof(HUF_WriteCTableWksp); - nuint hSize, - newSize; - uint symbolCardinality = HUF_cardinality(count, maxSymbolValue); - uint minTableLog = HUF_minTableLog(symbolCardinality); - nuint optSize = unchecked((nuint)~0) - 1; - uint optLog = maxTableLog, - optLogGuess; - for (optLogGuess = minTableLog; optLogGuess <= maxTableLog; optLogGuess++) { - { - nuint maxBits = HUF_buildCTable_wksp( - table, - count, - maxSymbolValue, - optLogGuess, - workSpace, - wkspSize - ); - if (ERR_isError(maxBits)) - continue; - if (maxBits < optLogGuess && optLogGuess > minTableLog) - break; - hSize = HUF_writeCTable_wksp( - dst, - dstSize, - table, - maxSymbolValue, - (uint)maxBits, - workSpace, - wkspSize - ); - } - - if (ERR_isError(hSize)) + nuint maxBits = HUF_buildCTable_wksp( + table, + count, + maxSymbolValue, + optLogGuess, + workSpace, + wkspSize + ); + if (ERR_isError(maxBits)) continue; - newSize = HUF_estimateCompressedSize(table, count, maxSymbolValue) + hSize; - if (newSize > optSize + 1) - { + if (maxBits < optLogGuess && optLogGuess > minTableLog) break; - } + hSize = HUF_writeCTable_wksp( + dst, + dstSize, + table, + maxSymbolValue, + (uint)maxBits, + workSpace, + wkspSize + ); + } - if (newSize < optSize) - { - optSize = newSize; - optLog = optLogGuess; - } + if (ERR_isError(hSize)) + continue; + newSize = HUF_estimateCompressedSize(table, count, maxSymbolValue) + hSize; + if (newSize > optSize + 1) + { + break; } - assert(optLog <= 12); - return optLog; + if (newSize < optSize) + { + optSize = newSize; + optLog = optLogGuess; + } } + + assert(optLog <= 12); + return optLog; } + } - /* HUF_compress_internal() : - * `workSpace_align4` must be aligned on 4-bytes boundaries, - * and occupies the same space as a table of HUF_WORKSPACE_SIZE_U64 unsigned */ - private static nuint HUF_compress_internal( - void* dst, - nuint dstSize, - void* src, - nuint srcSize, - uint maxSymbolValue, - uint huffLog, - HUF_nbStreams_e nbStreams, - void* workSpace, - nuint wkspSize, - nuint* oldHufTable, - HUF_repeat* repeat, - int flags + /* HUF_compress_internal() : + * `workSpace_align4` must be aligned on 4-bytes boundaries, + * and occupies the same space as a table of HUF_WORKSPACE_SIZE_U64 unsigned */ + private static nuint HUF_compress_internal( + void* dst, + nuint dstSize, + void* src, + nuint srcSize, + uint maxSymbolValue, + uint huffLog, + HUF_nbStreams_e nbStreams, + void* workSpace, + nuint wkspSize, + nuint* oldHufTable, + HUF_repeat* repeat, + int flags + ) + { + HUF_compress_tables_t* table = (HUF_compress_tables_t*)HUF_alignUpWorkspace( + workSpace, + &wkspSize, + sizeof(ulong) + ); + byte* ostart = (byte*)dst; + byte* oend = ostart + dstSize; + byte* op = ostart; + if (wkspSize < (nuint)sizeof(HUF_compress_tables_t)) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_workSpace_tooSmall)); + if (srcSize == 0) + return 0; + if (dstSize == 0) + return 0; + if (srcSize > 128 * 1024) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + if (huffLog > 12) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); + if (maxSymbolValue > 255) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooLarge)); + if (maxSymbolValue == 0) + maxSymbolValue = 255; + if (huffLog == 0) + huffLog = 11; + if ( + (flags & (int)HUF_flags_e.HUF_flags_preferRepeat) != 0 + && repeat != null + && *repeat == HUF_repeat.HUF_repeat_valid ) { - HUF_compress_tables_t* table = (HUF_compress_tables_t*)HUF_alignUpWorkspace( - workSpace, - &wkspSize, - sizeof(ulong) + return HUF_compressCTable_internal( + ostart, + op, + oend, + src, + srcSize, + nbStreams, + oldHufTable, + flags ); - byte* ostart = (byte*)dst; - byte* oend = ostart + dstSize; - byte* op = ostart; - if (wkspSize < (nuint)sizeof(HUF_compress_tables_t)) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_workSpace_tooSmall)); - if (srcSize == 0) - return 0; - if (dstSize == 0) - return 0; - if (srcSize > 128 * 1024) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); - if (huffLog > 12) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); - if (maxSymbolValue > 255) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooLarge)); - if (maxSymbolValue == 0) - maxSymbolValue = 255; - if (huffLog == 0) - huffLog = 11; - if ( - (flags & (int)HUF_flags_e.HUF_flags_preferRepeat) != 0 - && repeat != null - && *repeat == HUF_repeat.HUF_repeat_valid - ) - { - return HUF_compressCTable_internal( - ostart, - op, - oend, - src, - srcSize, - nbStreams, - oldHufTable, - flags - ); - } - - if ( - (flags & (int)HUF_flags_e.HUF_flags_suspectUncompressible) != 0 - && srcSize >= 4096 * 10 - ) - { - nuint largestTotal = 0; - { - uint maxSymbolValueBegin = maxSymbolValue; - nuint largestBegin = HIST_count_simple( - table->count, - &maxSymbolValueBegin, - (byte*)src, - 4096 - ); - if (ERR_isError(largestBegin)) - return largestBegin; - largestTotal += largestBegin; - } - - { - uint maxSymbolValueEnd = maxSymbolValue; - nuint largestEnd = HIST_count_simple( - table->count, - &maxSymbolValueEnd, - (byte*)src + srcSize - 4096, - 4096 - ); - if (ERR_isError(largestEnd)) - return largestEnd; - largestTotal += largestEnd; - } - - if (largestTotal <= (2 * 4096 >> 7) + 4) - return 0; - } + } + if ( + (flags & (int)HUF_flags_e.HUF_flags_suspectUncompressible) != 0 + && srcSize >= 4096 * 10 + ) + { + nuint largestTotal = 0; { - nuint largest = HIST_count_wksp( + uint maxSymbolValueBegin = maxSymbolValue; + nuint largestBegin = HIST_count_simple( table->count, - &maxSymbolValue, + &maxSymbolValueBegin, (byte*)src, - srcSize, - table->wksps.hist_wksp, - sizeof(uint) * 1024 + 4096 ); - if (ERR_isError(largest)) - return largest; - if (largest == srcSize) - { - *ostart = ((byte*)src)[0]; - return 1; - } - - if (largest <= (srcSize >> 7) + 4) - return 0; + if (ERR_isError(largestBegin)) + return largestBegin; + largestTotal += largestBegin; } - if ( - repeat != null - && *repeat == HUF_repeat.HUF_repeat_check - && HUF_validateCTable(oldHufTable, table->count, maxSymbolValue) == 0 - ) { - *repeat = HUF_repeat.HUF_repeat_none; - } - - if ( - (flags & (int)HUF_flags_e.HUF_flags_preferRepeat) != 0 - && repeat != null - && *repeat != HUF_repeat.HUF_repeat_none - ) - { - return HUF_compressCTable_internal( - ostart, - op, - oend, - src, - srcSize, - nbStreams, - oldHufTable, - flags + uint maxSymbolValueEnd = maxSymbolValue; + nuint largestEnd = HIST_count_simple( + table->count, + &maxSymbolValueEnd, + (byte*)src + srcSize - 4096, + 4096 ); + if (ERR_isError(largestEnd)) + return largestEnd; + largestTotal += largestEnd; } - huffLog = HUF_optimalTableLog( - huffLog, - srcSize, - maxSymbolValue, - &table->wksps, - (nuint)sizeof(_wksps_e__Union), - &table->CTable.e0, + if (largestTotal <= (2 * 4096 >> 7) + 4) + return 0; + } + + { + nuint largest = HIST_count_wksp( table->count, - flags + &maxSymbolValue, + (byte*)src, + srcSize, + table->wksps.hist_wksp, + sizeof(uint) * 1024 ); + if (ERR_isError(largest)) + return largest; + if (largest == srcSize) { - nuint maxBits = HUF_buildCTable_wksp( - &table->CTable.e0, - table->count, - maxSymbolValue, - huffLog, - &table->wksps.buildCTable_wksp, - (nuint)sizeof(HUF_buildCTable_wksp_tables) - ); - { - nuint _var_err__ = maxBits; - if (ERR_isError(_var_err__)) - return _var_err__; - } - - huffLog = (uint)maxBits; + *ostart = ((byte*)src)[0]; + return 1; } - { - nuint hSize = HUF_writeCTable_wksp( - op, - dstSize, - &table->CTable.e0, - maxSymbolValue, - huffLog, - &table->wksps.writeCTable_wksp, - (nuint)sizeof(HUF_WriteCTableWksp) - ); - if (ERR_isError(hSize)) - return hSize; - if (repeat != null && *repeat != HUF_repeat.HUF_repeat_none) - { - nuint oldSize = HUF_estimateCompressedSize( - oldHufTable, - table->count, - maxSymbolValue - ); - nuint newSize = HUF_estimateCompressedSize( - &table->CTable.e0, - table->count, - maxSymbolValue - ); - if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) - { - return HUF_compressCTable_internal( - ostart, - op, - oend, - src, - srcSize, - nbStreams, - oldHufTable, - flags - ); - } - } - - if (hSize + 12U >= srcSize) - { - return 0; - } - - op += hSize; - if (repeat != null) - { - *repeat = HUF_repeat.HUF_repeat_none; - } + if (largest <= (srcSize >> 7) + 4) + return 0; + } - if (oldHufTable != null) - memcpy(oldHufTable, &table->CTable.e0, sizeof(ulong) * 257); - } + if ( + repeat != null + && *repeat == HUF_repeat.HUF_repeat_check + && HUF_validateCTable(oldHufTable, table->count, maxSymbolValue) == 0 + ) + { + *repeat = HUF_repeat.HUF_repeat_none; + } + if ( + (flags & (int)HUF_flags_e.HUF_flags_preferRepeat) != 0 + && repeat != null + && *repeat != HUF_repeat.HUF_repeat_none + ) + { return HUF_compressCTable_internal( ostart, op, @@ -1796,79 +1702,172 @@ int flags src, srcSize, nbStreams, - &table->CTable.e0, + oldHufTable, flags ); } - /** HUF_compress1X_repeat() : - * Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. - * If it uses hufTable it does not modify hufTable or repeat. - * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. - * If preferRepeat then the old table will always be used if valid. - * If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */ - private static nuint HUF_compress1X_repeat( - void* dst, - nuint dstSize, - void* src, - nuint srcSize, - uint maxSymbolValue, - uint huffLog, - void* workSpace, - nuint wkspSize, - nuint* hufTable, - HUF_repeat* repeat, - int flags - ) + huffLog = HUF_optimalTableLog( + huffLog, + srcSize, + maxSymbolValue, + &table->wksps, + (nuint)sizeof(_wksps_e__Union), + &table->CTable.e0, + table->count, + flags + ); { - return HUF_compress_internal( - dst, - dstSize, - src, - srcSize, + nuint maxBits = HUF_buildCTable_wksp( + &table->CTable.e0, + table->count, maxSymbolValue, huffLog, - HUF_nbStreams_e.HUF_singleStream, - workSpace, - wkspSize, - hufTable, - repeat, - flags + &table->wksps.buildCTable_wksp, + (nuint)sizeof(HUF_buildCTable_wksp_tables) ); + { + nuint _var_err__ = maxBits; + if (ERR_isError(_var_err__)) + return _var_err__; + } + + huffLog = (uint)maxBits; } - /* HUF_compress4X_repeat(): - * compress input using 4 streams. - * consider skipping quickly - * reuse an existing huffman compression table */ - private static nuint HUF_compress4X_repeat( - void* dst, - nuint dstSize, - void* src, - nuint srcSize, - uint maxSymbolValue, - uint huffLog, - void* workSpace, - nuint wkspSize, - nuint* hufTable, - HUF_repeat* repeat, - int flags - ) { - return HUF_compress_internal( - dst, + nuint hSize = HUF_writeCTable_wksp( + op, dstSize, - src, - srcSize, + &table->CTable.e0, maxSymbolValue, huffLog, - HUF_nbStreams_e.HUF_fourStreams, - workSpace, - wkspSize, - hufTable, - repeat, - flags + &table->wksps.writeCTable_wksp, + (nuint)sizeof(HUF_WriteCTableWksp) ); + if (ERR_isError(hSize)) + return hSize; + if (repeat != null && *repeat != HUF_repeat.HUF_repeat_none) + { + nuint oldSize = HUF_estimateCompressedSize( + oldHufTable, + table->count, + maxSymbolValue + ); + nuint newSize = HUF_estimateCompressedSize( + &table->CTable.e0, + table->count, + maxSymbolValue + ); + if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) + { + return HUF_compressCTable_internal( + ostart, + op, + oend, + src, + srcSize, + nbStreams, + oldHufTable, + flags + ); + } + } + + if (hSize + 12U >= srcSize) + { + return 0; + } + + op += hSize; + if (repeat != null) + { + *repeat = HUF_repeat.HUF_repeat_none; + } + + if (oldHufTable != null) + memcpy(oldHufTable, &table->CTable.e0, sizeof(ulong) * 257); } + + return HUF_compressCTable_internal( + ostart, + op, + oend, + src, + srcSize, + nbStreams, + &table->CTable.e0, + flags + ); + } + + /** HUF_compress1X_repeat() : + * Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. + * If it uses hufTable it does not modify hufTable or repeat. + * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. + * If preferRepeat then the old table will always be used if valid. + * If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */ + private static nuint HUF_compress1X_repeat( + void* dst, + nuint dstSize, + void* src, + nuint srcSize, + uint maxSymbolValue, + uint huffLog, + void* workSpace, + nuint wkspSize, + nuint* hufTable, + HUF_repeat* repeat, + int flags + ) + { + return HUF_compress_internal( + dst, + dstSize, + src, + srcSize, + maxSymbolValue, + huffLog, + HUF_nbStreams_e.HUF_singleStream, + workSpace, + wkspSize, + hufTable, + repeat, + flags + ); + } + + /* HUF_compress4X_repeat(): + * compress input using 4 streams. + * consider skipping quickly + * reuse an existing huffman compression table */ + private static nuint HUF_compress4X_repeat( + void* dst, + nuint dstSize, + void* src, + nuint srcSize, + uint maxSymbolValue, + uint huffLog, + void* workSpace, + nuint wkspSize, + nuint* hufTable, + HUF_repeat* repeat, + int flags + ) + { + return HUF_compress_internal( + dst, + dstSize, + src, + srcSize, + maxSymbolValue, + huffLog, + HUF_nbStreams_e.HUF_fourStreams, + workSpace, + wkspSize, + hufTable, + repeat, + flags + ); } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HufDecompress.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HufDecompress.cs index e3a2048d3..5da4f66d7 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HufDecompress.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HufDecompress.cs @@ -1,2575 +1,2574 @@ using System; using System.Runtime.CompilerServices; -using static ZstdSharp.UnsafeHelper; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods + private static DTableDesc HUF_getDTableDesc(uint* table) { - private static DTableDesc HUF_getDTableDesc(uint* table) - { - DTableDesc dtd; - memcpy(&dtd, table, (uint)sizeof(DTableDesc)); - return dtd; - } + DTableDesc dtd; + memcpy(&dtd, table, (uint)sizeof(DTableDesc)); + return dtd; + } - private static nuint HUF_initFastDStream(byte* ip) - { - byte lastByte = ip[7]; - nuint bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0; - nuint value = MEM_readLEST(ip) | 1; - assert(bitsConsumed <= 8); - assert(sizeof(nuint) == 8); - return value << (int)bitsConsumed; - } + private static nuint HUF_initFastDStream(byte* ip) + { + byte lastByte = ip[7]; + nuint bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0; + nuint value = MEM_readLEST(ip) | 1; + assert(bitsConsumed <= 8); + assert(sizeof(nuint) == 8); + return value << (int)bitsConsumed; + } - /** - * Initializes args for the fast decoding loop. - * @returns 1 on success - * 0 if the fallback implementation should be used. - * Or an error code on failure. - */ - private static nuint HUF_DecompressFastArgs_init( - HUF_DecompressFastArgs* args, - void* dst, - nuint dstSize, - void* src, - nuint srcSize, - uint* DTable - ) + /** + * Initializes args for the fast decoding loop. + * @returns 1 on success + * 0 if the fallback implementation should be used. + * Or an error code on failure. + */ + private static nuint HUF_DecompressFastArgs_init( + HUF_DecompressFastArgs* args, + void* dst, + nuint dstSize, + void* src, + nuint srcSize, + uint* DTable + ) + { + void* dt = DTable + 1; + uint dtLog = HUF_getDTableDesc(DTable).tableLog; + byte* istart = (byte*)src; + byte* oend = ZSTD_maybeNullPtrAdd((byte*)dst, (nint)dstSize); + if (!BitConverter.IsLittleEndian || MEM_32bits) + return 0; + if (dstSize == 0) + return 0; + assert(dst != null); + if (srcSize < 10) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + if (dtLog != 11) + return 0; { - void* dt = DTable + 1; - uint dtLog = HUF_getDTableDesc(DTable).tableLog; - byte* istart = (byte*)src; - byte* oend = ZSTD_maybeNullPtrAdd((byte*)dst, (nint)dstSize); - if (!BitConverter.IsLittleEndian || MEM_32bits) + nuint length1 = MEM_readLE16(istart); + nuint length2 = MEM_readLE16(istart + 2); + nuint length3 = MEM_readLE16(istart + 4); + nuint length4 = srcSize - (length1 + length2 + length3 + 6); + args->iend.e0 = istart + 6; + args->iend.e1 = args->iend.e0 + length1; + args->iend.e2 = args->iend.e1 + length2; + args->iend.e3 = args->iend.e2 + length3; + if (length1 < 8 || length2 < 8 || length3 < 8 || length4 < 8) return 0; - if (dstSize == 0) - return 0; - assert(dst != null); - if (srcSize < 10) + if (length4 > srcSize) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - if (dtLog != 11) - return 0; - { - nuint length1 = MEM_readLE16(istart); - nuint length2 = MEM_readLE16(istart + 2); - nuint length3 = MEM_readLE16(istart + 4); - nuint length4 = srcSize - (length1 + length2 + length3 + 6); - args->iend.e0 = istart + 6; - args->iend.e1 = args->iend.e0 + length1; - args->iend.e2 = args->iend.e1 + length2; - args->iend.e3 = args->iend.e2 + length3; - if (length1 < 8 || length2 < 8 || length3 < 8 || length4 < 8) - return 0; - if (length4 > srcSize) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - } - - args->ip.e0 = args->iend.e1 - sizeof(ulong); - args->ip.e1 = args->iend.e2 - sizeof(ulong); - args->ip.e2 = args->iend.e3 - sizeof(ulong); - args->ip.e3 = (byte*)src + srcSize - sizeof(ulong); - args->op.e0 = (byte*)dst; - args->op.e1 = args->op.e0 + (dstSize + 3) / 4; - args->op.e2 = args->op.e1 + (dstSize + 3) / 4; - args->op.e3 = args->op.e2 + (dstSize + 3) / 4; - if (args->op.e3 >= oend) - return 0; - args->bits[0] = HUF_initFastDStream(args->ip.e0); - args->bits[1] = HUF_initFastDStream(args->ip.e1); - args->bits[2] = HUF_initFastDStream(args->ip.e2); - args->bits[3] = HUF_initFastDStream(args->ip.e3); - args->ilowest = istart; - args->oend = oend; - args->dt = dt; - return 1; } - private static nuint HUF_initRemainingDStream( - BIT_DStream_t* bit, - HUF_DecompressFastArgs* args, - int stream, - byte* segmentEnd - ) - { - if ((&args->op.e0)[stream] > segmentEnd) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - if ((&args->ip.e0)[stream] < (&args->iend.e0)[stream] - 8) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - assert(sizeof(nuint) == 8); - bit->bitContainer = MEM_readLEST((&args->ip.e0)[stream]); - bit->bitsConsumed = ZSTD_countTrailingZeros64(args->bits[stream]); - bit->start = (sbyte*)args->ilowest; - bit->limitPtr = bit->start + sizeof(nuint); - bit->ptr = (sbyte*)(&args->ip.e0)[stream]; + args->ip.e0 = args->iend.e1 - sizeof(ulong); + args->ip.e1 = args->iend.e2 - sizeof(ulong); + args->ip.e2 = args->iend.e3 - sizeof(ulong); + args->ip.e3 = (byte*)src + srcSize - sizeof(ulong); + args->op.e0 = (byte*)dst; + args->op.e1 = args->op.e0 + (dstSize + 3) / 4; + args->op.e2 = args->op.e1 + (dstSize + 3) / 4; + args->op.e3 = args->op.e2 + (dstSize + 3) / 4; + if (args->op.e3 >= oend) return 0; + args->bits[0] = HUF_initFastDStream(args->ip.e0); + args->bits[1] = HUF_initFastDStream(args->ip.e1); + args->bits[2] = HUF_initFastDStream(args->ip.e2); + args->bits[3] = HUF_initFastDStream(args->ip.e3); + args->ilowest = istart; + args->oend = oend; + args->dt = dt; + return 1; + } + + private static nuint HUF_initRemainingDStream( + BIT_DStream_t* bit, + HUF_DecompressFastArgs* args, + int stream, + byte* segmentEnd + ) + { + if ((&args->op.e0)[stream] > segmentEnd) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + if ((&args->ip.e0)[stream] < (&args->iend.e0)[stream] - 8) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + assert(sizeof(nuint) == 8); + bit->bitContainer = MEM_readLEST((&args->ip.e0)[stream]); + bit->bitsConsumed = ZSTD_countTrailingZeros64(args->bits[stream]); + bit->start = (sbyte*)args->ilowest; + bit->limitPtr = bit->start + sizeof(nuint); + bit->ptr = (sbyte*)(&args->ip.e0)[stream]; + return 0; + } + + /** + * Packs 4 HUF_DEltX1 structs into a U64. This is used to lay down 4 entries at + * a time. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ulong HUF_DEltX1_set4(byte symbol, byte nbBits) + { + ulong D4; + if (BitConverter.IsLittleEndian) + { + D4 = (ulong)((symbol << 8) + nbBits); } + else + { + D4 = (ulong)(symbol + (nbBits << 8)); + } + + assert(D4 < 1U << 16); + D4 *= 0x0001000100010001UL; + return D4; + } - /** - * Packs 4 HUF_DEltX1 structs into a U64. This is used to lay down 4 entries at - * a time. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static ulong HUF_DEltX1_set4(byte symbol, byte nbBits) + /** + * Increase the tableLog to targetTableLog and rescales the stats. + * If tableLog > targetTableLog this is a no-op. + * @returns New tableLog + */ + private static uint HUF_rescaleStats( + byte* huffWeight, + uint* rankVal, + uint nbSymbols, + uint tableLog, + uint targetTableLog + ) + { + if (tableLog > targetTableLog) + return tableLog; + if (tableLog < targetTableLog) { - ulong D4; - if (BitConverter.IsLittleEndian) + uint scale = targetTableLog - tableLog; + uint s; + for (s = 0; s < nbSymbols; ++s) { - D4 = (ulong)((symbol << 8) + nbBits); + huffWeight[s] += (byte)(huffWeight[s] == 0 ? 0 : scale); } - else + + for (s = targetTableLog; s > scale; --s) { - D4 = (ulong)(symbol + (nbBits << 8)); + rankVal[s] = rankVal[s - scale]; } - assert(D4 < 1U << 16); - D4 *= 0x0001000100010001UL; - return D4; - } - - /** - * Increase the tableLog to targetTableLog and rescales the stats. - * If tableLog > targetTableLog this is a no-op. - * @returns New tableLog - */ - private static uint HUF_rescaleStats( - byte* huffWeight, - uint* rankVal, - uint nbSymbols, - uint tableLog, - uint targetTableLog - ) - { - if (tableLog > targetTableLog) - return tableLog; - if (tableLog < targetTableLog) + for (s = scale; s > 0; --s) { - uint scale = targetTableLog - tableLog; - uint s; - for (s = 0; s < nbSymbols; ++s) - { - huffWeight[s] += (byte)(huffWeight[s] == 0 ? 0 : scale); - } - - for (s = targetTableLog; s > scale; --s) - { - rankVal[s] = rankVal[s - scale]; - } - - for (s = scale; s > 0; --s) - { - rankVal[s] = 0; - } + rankVal[s] = 0; } - - return targetTableLog; } - private static nuint HUF_readDTableX1_wksp( - uint* DTable, - void* src, - nuint srcSize, - void* workSpace, - nuint wkspSize, - int flags - ) + return targetTableLog; + } + + private static nuint HUF_readDTableX1_wksp( + uint* DTable, + void* src, + nuint srcSize, + void* workSpace, + nuint wkspSize, + int flags + ) + { + uint tableLog = 0; + uint nbSymbols = 0; + nuint iSize; + void* dtPtr = DTable + 1; + HUF_DEltX1* dt = (HUF_DEltX1*)dtPtr; + HUF_ReadDTableX1_Workspace* wksp = (HUF_ReadDTableX1_Workspace*)workSpace; + if ((nuint)sizeof(HUF_ReadDTableX1_Workspace) > wkspSize) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); + iSize = HUF_readStats_wksp( + wksp->huffWeight, + 255 + 1, + wksp->rankVal, + &nbSymbols, + &tableLog, + src, + srcSize, + wksp->statsWksp, + sizeof(uint) * 219, + flags + ); + if (ERR_isError(iSize)) + return iSize; { - uint tableLog = 0; - uint nbSymbols = 0; - nuint iSize; - void* dtPtr = DTable + 1; - HUF_DEltX1* dt = (HUF_DEltX1*)dtPtr; - HUF_ReadDTableX1_Workspace* wksp = (HUF_ReadDTableX1_Workspace*)workSpace; - if ((nuint)sizeof(HUF_ReadDTableX1_Workspace) > wkspSize) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); - iSize = HUF_readStats_wksp( + DTableDesc dtd = HUF_getDTableDesc(DTable); + uint maxTableLog = (uint)(dtd.maxTableLog + 1); + uint targetTableLog = maxTableLog < 11 ? maxTableLog : 11; + tableLog = HUF_rescaleStats( wksp->huffWeight, - 255 + 1, wksp->rankVal, - &nbSymbols, - &tableLog, - src, - srcSize, - wksp->statsWksp, - sizeof(uint) * 219, - flags + nbSymbols, + tableLog, + targetTableLog ); - if (ERR_isError(iSize)) - return iSize; + if (tableLog > (uint)(dtd.maxTableLog + 1)) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); + dtd.tableType = 0; + dtd.tableLog = (byte)tableLog; + memcpy(DTable, &dtd, (uint)sizeof(DTableDesc)); + } + + { + int n; + uint nextRankStart = 0; + const int unroll = 4; + int nLimit = (int)nbSymbols - unroll + 1; + for (n = 0; n < (int)tableLog + 1; n++) { - DTableDesc dtd = HUF_getDTableDesc(DTable); - uint maxTableLog = (uint)(dtd.maxTableLog + 1); - uint targetTableLog = maxTableLog < 11 ? maxTableLog : 11; - tableLog = HUF_rescaleStats( - wksp->huffWeight, - wksp->rankVal, - nbSymbols, - tableLog, - targetTableLog - ); - if (tableLog > (uint)(dtd.maxTableLog + 1)) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); - dtd.tableType = 0; - dtd.tableLog = (byte)tableLog; - memcpy(DTable, &dtd, (uint)sizeof(DTableDesc)); + uint curr = nextRankStart; + nextRankStart += wksp->rankVal[n]; + wksp->rankStart[n] = curr; } + for (n = 0; n < nLimit; n += unroll) { - int n; - uint nextRankStart = 0; - const int unroll = 4; - int nLimit = (int)nbSymbols - unroll + 1; - for (n = 0; n < (int)tableLog + 1; n++) - { - uint curr = nextRankStart; - nextRankStart += wksp->rankVal[n]; - wksp->rankStart[n] = curr; - } - - for (n = 0; n < nLimit; n += unroll) + int u; + for (u = 0; u < unroll; ++u) { - int u; - for (u = 0; u < unroll; ++u) - { - nuint w = wksp->huffWeight[n + u]; - wksp->symbols[wksp->rankStart[w]++] = (byte)(n + u); - } + nuint w = wksp->huffWeight[n + u]; + wksp->symbols[wksp->rankStart[w]++] = (byte)(n + u); } + } - for (; n < (int)nbSymbols; ++n) - { - nuint w = wksp->huffWeight[n]; - wksp->symbols[wksp->rankStart[w]++] = (byte)n; - } + for (; n < (int)nbSymbols; ++n) + { + nuint w = wksp->huffWeight[n]; + wksp->symbols[wksp->rankStart[w]++] = (byte)n; } + } + { + uint w; + int symbol = (int)wksp->rankVal[0]; + int rankStart = 0; + for (w = 1; w < tableLog + 1; ++w) { - uint w; - int symbol = (int)wksp->rankVal[0]; - int rankStart = 0; - for (w = 1; w < tableLog + 1; ++w) + int symbolCount = (int)wksp->rankVal[w]; + int length = 1 << (int)w >> 1; + int uStart = rankStart; + byte nbBits = (byte)(tableLog + 1 - w); + int s; + int u; + switch (length) { - int symbolCount = (int)wksp->rankVal[w]; - int length = 1 << (int)w >> 1; - int uStart = rankStart; - byte nbBits = (byte)(tableLog + 1 - w); - int s; - int u; - switch (length) - { - case 1: - for (s = 0; s < symbolCount; ++s) - { - HUF_DEltX1 D; - D.@byte = wksp->symbols[symbol + s]; - D.nbBits = nbBits; - dt[uStart] = D; - uStart += 1; - } + case 1: + for (s = 0; s < symbolCount; ++s) + { + HUF_DEltX1 D; + D.@byte = wksp->symbols[symbol + s]; + D.nbBits = nbBits; + dt[uStart] = D; + uStart += 1; + } - break; - case 2: - for (s = 0; s < symbolCount; ++s) - { - HUF_DEltX1 D; - D.@byte = wksp->symbols[symbol + s]; - D.nbBits = nbBits; - dt[uStart + 0] = D; - dt[uStart + 1] = D; - uStart += 2; - } + break; + case 2: + for (s = 0; s < symbolCount; ++s) + { + HUF_DEltX1 D; + D.@byte = wksp->symbols[symbol + s]; + D.nbBits = nbBits; + dt[uStart + 0] = D; + dt[uStart + 1] = D; + uStart += 2; + } - break; - case 4: - for (s = 0; s < symbolCount; ++s) - { - ulong D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits); - MEM_write64(dt + uStart, D4); - uStart += 4; - } + break; + case 4: + for (s = 0; s < symbolCount; ++s) + { + ulong D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits); + MEM_write64(dt + uStart, D4); + uStart += 4; + } - break; - case 8: - for (s = 0; s < symbolCount; ++s) - { - ulong D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits); - MEM_write64(dt + uStart, D4); - MEM_write64(dt + uStart + 4, D4); - uStart += 8; - } + break; + case 8: + for (s = 0; s < symbolCount; ++s) + { + ulong D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits); + MEM_write64(dt + uStart, D4); + MEM_write64(dt + uStart + 4, D4); + uStart += 8; + } - break; - default: - for (s = 0; s < symbolCount; ++s) + break; + default: + for (s = 0; s < symbolCount; ++s) + { + ulong D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits); + for (u = 0; u < length; u += 16) { - ulong D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits); - for (u = 0; u < length; u += 16) - { - MEM_write64(dt + uStart + u + 0, D4); - MEM_write64(dt + uStart + u + 4, D4); - MEM_write64(dt + uStart + u + 8, D4); - MEM_write64(dt + uStart + u + 12, D4); - } - - assert(u == length); - uStart += length; + MEM_write64(dt + uStart + u + 0, D4); + MEM_write64(dt + uStart + u + 4, D4); + MEM_write64(dt + uStart + u + 8, D4); + MEM_write64(dt + uStart + u + 12, D4); } - break; - } + assert(u == length); + uStart += length; + } - symbol += symbolCount; - rankStart += symbolCount * length; + break; } - } - return iSize; + symbol += symbolCount; + rankStart += symbolCount * length; + } } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static byte HUF_decodeSymbolX1(BIT_DStream_t* Dstream, HUF_DEltX1* dt, uint dtLog) - { - /* note : dtLog >= 1 */ - nuint val = BIT_lookBitsFast(Dstream, dtLog); - byte c = dt[val].@byte; - BIT_skipBits(Dstream, dt[val].nbBits); - return c; - } + return iSize; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static byte HUF_decodeSymbolX1(BIT_DStream_t* Dstream, HUF_DEltX1* dt, uint dtLog) + { + /* note : dtLog >= 1 */ + nuint val = BIT_lookBitsFast(Dstream, dtLog); + byte c = dt[val].@byte; + BIT_skipBits(Dstream, dt[val].nbBits); + return c; + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint HUF_decodeStreamX1( - byte* p, - BIT_DStream_t* bitDPtr, - byte* pEnd, - HUF_DEltX1* dt, - uint dtLog - ) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint HUF_decodeStreamX1( + byte* p, + BIT_DStream_t* bitDPtr, + byte* pEnd, + HUF_DEltX1* dt, + uint dtLog + ) + { + byte* pStart = p; + if (pEnd - p > 3) { - byte* pStart = p; - if (pEnd - p > 3) + while ( + BIT_reloadDStream(bitDPtr) == BIT_DStream_status.BIT_DStream_unfinished + && p < pEnd - 3 + ) { - while ( - BIT_reloadDStream(bitDPtr) == BIT_DStream_status.BIT_DStream_unfinished - && p < pEnd - 3 - ) - { - if (MEM_64bits) - *p++ = HUF_decodeSymbolX1(bitDPtr, dt, dtLog); + if (MEM_64bits) *p++ = HUF_decodeSymbolX1(bitDPtr, dt, dtLog); - if (MEM_64bits) - *p++ = HUF_decodeSymbolX1(bitDPtr, dt, dtLog); + *p++ = HUF_decodeSymbolX1(bitDPtr, dt, dtLog); + if (MEM_64bits) *p++ = HUF_decodeSymbolX1(bitDPtr, dt, dtLog); - } - } - else - { - BIT_reloadDStream(bitDPtr); + *p++ = HUF_decodeSymbolX1(bitDPtr, dt, dtLog); } + } + else + { + BIT_reloadDStream(bitDPtr); + } - if (MEM_32bits) - while ( - BIT_reloadDStream(bitDPtr) == BIT_DStream_status.BIT_DStream_unfinished - && p < pEnd - ) - *p++ = HUF_decodeSymbolX1(bitDPtr, dt, dtLog); - while (p < pEnd) + if (MEM_32bits) + while ( + BIT_reloadDStream(bitDPtr) == BIT_DStream_status.BIT_DStream_unfinished + && p < pEnd + ) *p++ = HUF_decodeSymbolX1(bitDPtr, dt, dtLog); - return (nuint)(pEnd - pStart); + while (p < pEnd) + *p++ = HUF_decodeSymbolX1(bitDPtr, dt, dtLog); + return (nuint)(pEnd - pStart); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint HUF_decompress1X1_usingDTable_internal_body( + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + uint* DTable + ) + { + byte* op = (byte*)dst; + byte* oend = ZSTD_maybeNullPtrAdd(op, (nint)dstSize); + void* dtPtr = DTable + 1; + HUF_DEltX1* dt = (HUF_DEltX1*)dtPtr; + BIT_DStream_t bitD; + DTableDesc dtd = HUF_getDTableDesc(DTable); + uint dtLog = dtd.tableLog; + { + nuint _var_err__ = BIT_initDStream(&bitD, cSrc, cSrcSize); + if (ERR_isError(_var_err__)) + return _var_err__; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint HUF_decompress1X1_usingDTable_internal_body( - void* dst, - nuint dstSize, - void* cSrc, - nuint cSrcSize, - uint* DTable - ) + HUF_decodeStreamX1(op, &bitD, oend, dt, dtLog); + if (BIT_endOfDStream(&bitD) == 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + return dstSize; + } + + /* HUF_decompress4X1_usingDTable_internal_body(): + * Conditions : + * @dstSize >= 6 + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint HUF_decompress4X1_usingDTable_internal_body( + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + uint* DTable + ) + { + if (cSrcSize < 10) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + if (dstSize < 6) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); { - byte* op = (byte*)dst; - byte* oend = ZSTD_maybeNullPtrAdd(op, (nint)dstSize); + byte* istart = (byte*)cSrc; + byte* ostart = (byte*)dst; + byte* oend = ostart + dstSize; + byte* olimit = oend - 3; void* dtPtr = DTable + 1; HUF_DEltX1* dt = (HUF_DEltX1*)dtPtr; - BIT_DStream_t bitD; + /* Init */ + BIT_DStream_t bitD1; + BIT_DStream_t bitD2; + BIT_DStream_t bitD3; + BIT_DStream_t bitD4; + nuint length1 = MEM_readLE16(istart); + nuint length2 = MEM_readLE16(istart + 2); + nuint length3 = MEM_readLE16(istart + 4); + nuint length4 = cSrcSize - (length1 + length2 + length3 + 6); + /* jumpTable */ + byte* istart1 = istart + 6; + byte* istart2 = istart1 + length1; + byte* istart3 = istart2 + length2; + byte* istart4 = istart3 + length3; + nuint segmentSize = (dstSize + 3) / 4; + byte* opStart2 = ostart + segmentSize; + byte* opStart3 = opStart2 + segmentSize; + byte* opStart4 = opStart3 + segmentSize; + byte* op1 = ostart; + byte* op2 = opStart2; + byte* op3 = opStart3; + byte* op4 = opStart4; DTableDesc dtd = HUF_getDTableDesc(DTable); uint dtLog = dtd.tableLog; + uint endSignal = 1; + if (length4 > cSrcSize) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + if (opStart4 > oend) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + assert(dstSize >= 6); { - nuint _var_err__ = BIT_initDStream(&bitD, cSrc, cSrcSize); + nuint _var_err__ = BIT_initDStream(&bitD1, istart1, length1); if (ERR_isError(_var_err__)) return _var_err__; } - HUF_decodeStreamX1(op, &bitD, oend, dt, dtLog); - if (BIT_endOfDStream(&bitD) == 0) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - return dstSize; - } - - /* HUF_decompress4X1_usingDTable_internal_body(): - * Conditions : - * @dstSize >= 6 - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint HUF_decompress4X1_usingDTable_internal_body( - void* dst, - nuint dstSize, - void* cSrc, - nuint cSrcSize, - uint* DTable - ) - { - if (cSrcSize < 10) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - if (dstSize < 6) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); { - byte* istart = (byte*)cSrc; - byte* ostart = (byte*)dst; - byte* oend = ostart + dstSize; - byte* olimit = oend - 3; - void* dtPtr = DTable + 1; - HUF_DEltX1* dt = (HUF_DEltX1*)dtPtr; - /* Init */ - BIT_DStream_t bitD1; - BIT_DStream_t bitD2; - BIT_DStream_t bitD3; - BIT_DStream_t bitD4; - nuint length1 = MEM_readLE16(istart); - nuint length2 = MEM_readLE16(istart + 2); - nuint length3 = MEM_readLE16(istart + 4); - nuint length4 = cSrcSize - (length1 + length2 + length3 + 6); - /* jumpTable */ - byte* istart1 = istart + 6; - byte* istart2 = istart1 + length1; - byte* istart3 = istart2 + length2; - byte* istart4 = istart3 + length3; - nuint segmentSize = (dstSize + 3) / 4; - byte* opStart2 = ostart + segmentSize; - byte* opStart3 = opStart2 + segmentSize; - byte* opStart4 = opStart3 + segmentSize; - byte* op1 = ostart; - byte* op2 = opStart2; - byte* op3 = opStart3; - byte* op4 = opStart4; - DTableDesc dtd = HUF_getDTableDesc(DTable); - uint dtLog = dtd.tableLog; - uint endSignal = 1; - if (length4 > cSrcSize) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - if (opStart4 > oend) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - assert(dstSize >= 6); - { - nuint _var_err__ = BIT_initDStream(&bitD1, istart1, length1); - if (ERR_isError(_var_err__)) - return _var_err__; - } - - { - nuint _var_err__ = BIT_initDStream(&bitD2, istart2, length2); - if (ERR_isError(_var_err__)) - return _var_err__; - } + nuint _var_err__ = BIT_initDStream(&bitD2, istart2, length2); + if (ERR_isError(_var_err__)) + return _var_err__; + } - { - nuint _var_err__ = BIT_initDStream(&bitD3, istart3, length3); - if (ERR_isError(_var_err__)) - return _var_err__; - } + { + nuint _var_err__ = BIT_initDStream(&bitD3, istart3, length3); + if (ERR_isError(_var_err__)) + return _var_err__; + } - { - nuint _var_err__ = BIT_initDStream(&bitD4, istart4, length4); - if (ERR_isError(_var_err__)) - return _var_err__; - } + { + nuint _var_err__ = BIT_initDStream(&bitD4, istart4, length4); + if (ERR_isError(_var_err__)) + return _var_err__; + } - if ((nuint)(oend - op4) >= (nuint)sizeof(nuint)) + if ((nuint)(oend - op4) >= (nuint)sizeof(nuint)) + { + for (; (endSignal & (uint)(op4 < olimit ? 1 : 0)) != 0; ) { - for (; (endSignal & (uint)(op4 < olimit ? 1 : 0)) != 0; ) - { - if (MEM_64bits) - *op1++ = HUF_decodeSymbolX1(&bitD1, dt, dtLog); - if (MEM_64bits) - *op2++ = HUF_decodeSymbolX1(&bitD2, dt, dtLog); - if (MEM_64bits) - *op3++ = HUF_decodeSymbolX1(&bitD3, dt, dtLog); - if (MEM_64bits) - *op4++ = HUF_decodeSymbolX1(&bitD4, dt, dtLog); + if (MEM_64bits) *op1++ = HUF_decodeSymbolX1(&bitD1, dt, dtLog); + if (MEM_64bits) *op2++ = HUF_decodeSymbolX1(&bitD2, dt, dtLog); + if (MEM_64bits) *op3++ = HUF_decodeSymbolX1(&bitD3, dt, dtLog); + if (MEM_64bits) *op4++ = HUF_decodeSymbolX1(&bitD4, dt, dtLog); - if (MEM_64bits) - *op1++ = HUF_decodeSymbolX1(&bitD1, dt, dtLog); - if (MEM_64bits) - *op2++ = HUF_decodeSymbolX1(&bitD2, dt, dtLog); - if (MEM_64bits) - *op3++ = HUF_decodeSymbolX1(&bitD3, dt, dtLog); - if (MEM_64bits) - *op4++ = HUF_decodeSymbolX1(&bitD4, dt, dtLog); + *op1++ = HUF_decodeSymbolX1(&bitD1, dt, dtLog); + *op2++ = HUF_decodeSymbolX1(&bitD2, dt, dtLog); + *op3++ = HUF_decodeSymbolX1(&bitD3, dt, dtLog); + *op4++ = HUF_decodeSymbolX1(&bitD4, dt, dtLog); + if (MEM_64bits) *op1++ = HUF_decodeSymbolX1(&bitD1, dt, dtLog); + if (MEM_64bits) *op2++ = HUF_decodeSymbolX1(&bitD2, dt, dtLog); + if (MEM_64bits) *op3++ = HUF_decodeSymbolX1(&bitD3, dt, dtLog); + if (MEM_64bits) *op4++ = HUF_decodeSymbolX1(&bitD4, dt, dtLog); - endSignal &= - BIT_reloadDStreamFast(&bitD1) - == BIT_DStream_status.BIT_DStream_unfinished - ? 1U - : 0U; - endSignal &= - BIT_reloadDStreamFast(&bitD2) - == BIT_DStream_status.BIT_DStream_unfinished - ? 1U - : 0U; - endSignal &= - BIT_reloadDStreamFast(&bitD3) - == BIT_DStream_status.BIT_DStream_unfinished - ? 1U - : 0U; - endSignal &= - BIT_reloadDStreamFast(&bitD4) - == BIT_DStream_status.BIT_DStream_unfinished - ? 1U - : 0U; - } - } - - if (op1 > opStart2) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - if (op2 > opStart3) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - if (op3 > opStart4) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - HUF_decodeStreamX1(op1, &bitD1, opStart2, dt, dtLog); - HUF_decodeStreamX1(op2, &bitD2, opStart3, dt, dtLog); - HUF_decodeStreamX1(op3, &bitD3, opStart4, dt, dtLog); - HUF_decodeStreamX1(op4, &bitD4, oend, dt, dtLog); - { - uint endCheck = - BIT_endOfDStream(&bitD1) - & BIT_endOfDStream(&bitD2) - & BIT_endOfDStream(&bitD3) - & BIT_endOfDStream(&bitD4); - if (endCheck == 0) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); + *op1++ = HUF_decodeSymbolX1(&bitD1, dt, dtLog); + *op2++ = HUF_decodeSymbolX1(&bitD2, dt, dtLog); + *op3++ = HUF_decodeSymbolX1(&bitD3, dt, dtLog); + *op4++ = HUF_decodeSymbolX1(&bitD4, dt, dtLog); + endSignal &= + BIT_reloadDStreamFast(&bitD1) + == BIT_DStream_status.BIT_DStream_unfinished + ? 1U + : 0U; + endSignal &= + BIT_reloadDStreamFast(&bitD2) + == BIT_DStream_status.BIT_DStream_unfinished + ? 1U + : 0U; + endSignal &= + BIT_reloadDStreamFast(&bitD3) + == BIT_DStream_status.BIT_DStream_unfinished + ? 1U + : 0U; + endSignal &= + BIT_reloadDStreamFast(&bitD4) + == BIT_DStream_status.BIT_DStream_unfinished + ? 1U + : 0U; } + } - return dstSize; + if (op1 > opStart2) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + if (op2 > opStart3) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + if (op3 > opStart4) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + HUF_decodeStreamX1(op1, &bitD1, opStart2, dt, dtLog); + HUF_decodeStreamX1(op2, &bitD2, opStart3, dt, dtLog); + HUF_decodeStreamX1(op3, &bitD3, opStart4, dt, dtLog); + HUF_decodeStreamX1(op4, &bitD4, oend, dt, dtLog); + { + uint endCheck = + BIT_endOfDStream(&bitD1) + & BIT_endOfDStream(&bitD2) + & BIT_endOfDStream(&bitD3) + & BIT_endOfDStream(&bitD4); + if (endCheck == 0) + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); } - } - private static nuint HUF_decompress4X1_usingDTable_internal_default( - void* dst, - nuint dstSize, - void* cSrc, - nuint cSrcSize, - uint* DTable - ) - { - return HUF_decompress4X1_usingDTable_internal_body( - dst, - dstSize, - cSrc, - cSrcSize, - DTable - ); + return dstSize; } + } + + private static nuint HUF_decompress4X1_usingDTable_internal_default( + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + uint* DTable + ) + { + return HUF_decompress4X1_usingDTable_internal_body( + dst, + dstSize, + cSrc, + cSrcSize, + DTable + ); + } - private static void HUF_decompress4X1_usingDTable_internal_fast_c_loop( - HUF_DecompressFastArgs* args - ) + private static void HUF_decompress4X1_usingDTable_internal_fast_c_loop( + HUF_DecompressFastArgs* args + ) + { + ulong bits0, + bits1, + bits2, + bits3; + byte* ip0, + ip1, + ip2, + ip3; + byte* op0, + op1, + op2, + op3; + ushort* dtable = (ushort*)args->dt; + byte* oend = args->oend; + byte* ilowest = args->ilowest; + bits0 = args->bits[0]; + bits1 = args->bits[1]; + bits2 = args->bits[2]; + bits3 = args->bits[3]; + ip0 = args->ip.e0; + ip1 = args->ip.e1; + ip2 = args->ip.e2; + ip3 = args->ip.e3; + op0 = args->op.e0; + op1 = args->op.e1; + op2 = args->op.e2; + op3 = args->op.e3; + assert(BitConverter.IsLittleEndian); + assert(!MEM_32bits); + for (; ; ) { - ulong bits0, - bits1, - bits2, - bits3; - byte* ip0, - ip1, - ip2, - ip3; - byte* op0, - op1, - op2, - op3; - ushort* dtable = (ushort*)args->dt; - byte* oend = args->oend; - byte* ilowest = args->ilowest; - bits0 = args->bits[0]; - bits1 = args->bits[1]; - bits2 = args->bits[2]; - bits3 = args->bits[3]; - ip0 = args->ip.e0; - ip1 = args->ip.e1; - ip2 = args->ip.e2; - ip3 = args->ip.e3; - op0 = args->op.e0; - op1 = args->op.e1; - op2 = args->op.e2; - op3 = args->op.e3; - assert(BitConverter.IsLittleEndian); - assert(!MEM_32bits); - for (; ; ) + byte* olimit; { - byte* olimit; - { - assert(op0 <= op1); - assert(ip0 >= ilowest); - } + assert(op0 <= op1); + assert(ip0 >= ilowest); + } + + { + assert(op1 <= op2); + assert(ip1 >= ilowest); + } + { + assert(op2 <= op3); + assert(ip2 >= ilowest); + } + + { + assert(op3 <= oend); + assert(ip3 >= ilowest); + } + + { + /* Each iteration produces 5 output symbols per stream */ + nuint oiters = (nuint)(oend - op3) / 5; + /* Each iteration consumes up to 11 bits * 5 = 55 bits < 7 bytes + * per stream. + */ + nuint iiters = (nuint)(ip0 - ilowest) / 7; + /* We can safely run iters iterations before running bounds checks */ + nuint iters = oiters < iiters ? oiters : iiters; + nuint symbols = iters * 5; + olimit = op3 + symbols; + if (op3 == olimit) + break; { - assert(op1 <= op2); - assert(ip1 >= ilowest); + if (ip1 < ip0) + goto _out; } { - assert(op2 <= op3); - assert(ip2 >= ilowest); + if (ip2 < ip1) + goto _out; } { - assert(op3 <= oend); - assert(ip3 >= ilowest); + if (ip3 < ip2) + goto _out; } + } + + { + assert(ip1 >= ip0); + } + + { + assert(ip2 >= ip1); + } + + { + assert(ip3 >= ip2); + } + do + { { - /* Each iteration produces 5 output symbols per stream */ - nuint oiters = (nuint)(oend - op3) / 5; - /* Each iteration consumes up to 11 bits * 5 = 55 bits < 7 bytes - * per stream. - */ - nuint iiters = (nuint)(ip0 - ilowest) / 7; - /* We can safely run iters iterations before running bounds checks */ - nuint iters = oiters < iiters ? oiters : iiters; - nuint symbols = iters * 5; - olimit = op3 + symbols; - if (op3 == olimit) - break; { - if (ip1 < ip0) - goto _out; + /* Decode 5 symbols in each of the 4 streams */ + int index = (int)(bits0 >> 53); + int entry = dtable[index]; + bits0 <<= entry & 0x3F; + op0[0] = (byte)(entry >> 8 & 0xFF); } { - if (ip2 < ip1) - goto _out; + int index = (int)(bits1 >> 53); + int entry = dtable[index]; + bits1 <<= entry & 0x3F; + op1[0] = (byte)(entry >> 8 & 0xFF); } { - if (ip3 < ip2) - goto _out; + int index = (int)(bits2 >> 53); + int entry = dtable[index]; + bits2 <<= entry & 0x3F; + op2[0] = (byte)(entry >> 8 & 0xFF); } - } - { - assert(ip1 >= ip0); + { + int index = (int)(bits3 >> 53); + int entry = dtable[index]; + bits3 <<= entry & 0x3F; + op3[0] = (byte)(entry >> 8 & 0xFF); + } } { - assert(ip2 >= ip1); - } + { + int index = (int)(bits0 >> 53); + int entry = dtable[index]; + bits0 <<= entry & 0x3F; + op0[1] = (byte)(entry >> 8 & 0xFF); + } - { - assert(ip3 >= ip2); + { + int index = (int)(bits1 >> 53); + int entry = dtable[index]; + bits1 <<= entry & 0x3F; + op1[1] = (byte)(entry >> 8 & 0xFF); + } + + { + int index = (int)(bits2 >> 53); + int entry = dtable[index]; + bits2 <<= entry & 0x3F; + op2[1] = (byte)(entry >> 8 & 0xFF); + } + + { + int index = (int)(bits3 >> 53); + int entry = dtable[index]; + bits3 <<= entry & 0x3F; + op3[1] = (byte)(entry >> 8 & 0xFF); + } } - do { { - { - /* Decode 5 symbols in each of the 4 streams */ - int index = (int)(bits0 >> 53); - int entry = dtable[index]; - bits0 <<= entry & 0x3F; - op0[0] = (byte)(entry >> 8 & 0xFF); - } - - { - int index = (int)(bits1 >> 53); - int entry = dtable[index]; - bits1 <<= entry & 0x3F; - op1[0] = (byte)(entry >> 8 & 0xFF); - } + int index = (int)(bits0 >> 53); + int entry = dtable[index]; + bits0 <<= entry & 0x3F; + op0[2] = (byte)(entry >> 8 & 0xFF); + } - { - int index = (int)(bits2 >> 53); - int entry = dtable[index]; - bits2 <<= entry & 0x3F; - op2[0] = (byte)(entry >> 8 & 0xFF); - } + { + int index = (int)(bits1 >> 53); + int entry = dtable[index]; + bits1 <<= entry & 0x3F; + op1[2] = (byte)(entry >> 8 & 0xFF); + } - { - int index = (int)(bits3 >> 53); - int entry = dtable[index]; - bits3 <<= entry & 0x3F; - op3[0] = (byte)(entry >> 8 & 0xFF); - } + { + int index = (int)(bits2 >> 53); + int entry = dtable[index]; + bits2 <<= entry & 0x3F; + op2[2] = (byte)(entry >> 8 & 0xFF); } { - { - int index = (int)(bits0 >> 53); - int entry = dtable[index]; - bits0 <<= entry & 0x3F; - op0[1] = (byte)(entry >> 8 & 0xFF); - } - - { - int index = (int)(bits1 >> 53); - int entry = dtable[index]; - bits1 <<= entry & 0x3F; - op1[1] = (byte)(entry >> 8 & 0xFF); - } - - { - int index = (int)(bits2 >> 53); - int entry = dtable[index]; - bits2 <<= entry & 0x3F; - op2[1] = (byte)(entry >> 8 & 0xFF); - } - - { - int index = (int)(bits3 >> 53); - int entry = dtable[index]; - bits3 <<= entry & 0x3F; - op3[1] = (byte)(entry >> 8 & 0xFF); - } + int index = (int)(bits3 >> 53); + int entry = dtable[index]; + bits3 <<= entry & 0x3F; + op3[2] = (byte)(entry >> 8 & 0xFF); } + } + { { - { - int index = (int)(bits0 >> 53); - int entry = dtable[index]; - bits0 <<= entry & 0x3F; - op0[2] = (byte)(entry >> 8 & 0xFF); - } - - { - int index = (int)(bits1 >> 53); - int entry = dtable[index]; - bits1 <<= entry & 0x3F; - op1[2] = (byte)(entry >> 8 & 0xFF); - } - - { - int index = (int)(bits2 >> 53); - int entry = dtable[index]; - bits2 <<= entry & 0x3F; - op2[2] = (byte)(entry >> 8 & 0xFF); - } - - { - int index = (int)(bits3 >> 53); - int entry = dtable[index]; - bits3 <<= entry & 0x3F; - op3[2] = (byte)(entry >> 8 & 0xFF); - } + int index = (int)(bits0 >> 53); + int entry = dtable[index]; + bits0 <<= entry & 0x3F; + op0[3] = (byte)(entry >> 8 & 0xFF); } { - { - int index = (int)(bits0 >> 53); - int entry = dtable[index]; - bits0 <<= entry & 0x3F; - op0[3] = (byte)(entry >> 8 & 0xFF); - } - - { - int index = (int)(bits1 >> 53); - int entry = dtable[index]; - bits1 <<= entry & 0x3F; - op1[3] = (byte)(entry >> 8 & 0xFF); - } - - { - int index = (int)(bits2 >> 53); - int entry = dtable[index]; - bits2 <<= entry & 0x3F; - op2[3] = (byte)(entry >> 8 & 0xFF); - } - - { - int index = (int)(bits3 >> 53); - int entry = dtable[index]; - bits3 <<= entry & 0x3F; - op3[3] = (byte)(entry >> 8 & 0xFF); - } + int index = (int)(bits1 >> 53); + int entry = dtable[index]; + bits1 <<= entry & 0x3F; + op1[3] = (byte)(entry >> 8 & 0xFF); } { - { - int index = (int)(bits0 >> 53); - int entry = dtable[index]; - bits0 <<= entry & 0x3F; - op0[4] = (byte)(entry >> 8 & 0xFF); - } - - { - int index = (int)(bits1 >> 53); - int entry = dtable[index]; - bits1 <<= entry & 0x3F; - op1[4] = (byte)(entry >> 8 & 0xFF); - } - - { - int index = (int)(bits2 >> 53); - int entry = dtable[index]; - bits2 <<= entry & 0x3F; - op2[4] = (byte)(entry >> 8 & 0xFF); - } - - { - int index = (int)(bits3 >> 53); - int entry = dtable[index]; - bits3 <<= entry & 0x3F; - op3[4] = (byte)(entry >> 8 & 0xFF); - } + int index = (int)(bits2 >> 53); + int entry = dtable[index]; + bits2 <<= entry & 0x3F; + op2[3] = (byte)(entry >> 8 & 0xFF); } { - { - /* Reload each of the 4 the bitstreams */ - int ctz = (int)ZSTD_countTrailingZeros64(bits0); - int nbBits = ctz & 7; - int nbBytes = ctz >> 3; - op0 += 5; - ip0 -= nbBytes; - bits0 = MEM_read64(ip0) | 1; - bits0 <<= nbBits; - } + int index = (int)(bits3 >> 53); + int entry = dtable[index]; + bits3 <<= entry & 0x3F; + op3[3] = (byte)(entry >> 8 & 0xFF); + } + } - { - int ctz = (int)ZSTD_countTrailingZeros64(bits1); - int nbBits = ctz & 7; - int nbBytes = ctz >> 3; - op1 += 5; - ip1 -= nbBytes; - bits1 = MEM_read64(ip1) | 1; - bits1 <<= nbBits; - } + { + { + int index = (int)(bits0 >> 53); + int entry = dtable[index]; + bits0 <<= entry & 0x3F; + op0[4] = (byte)(entry >> 8 & 0xFF); + } - { - int ctz = (int)ZSTD_countTrailingZeros64(bits2); - int nbBits = ctz & 7; - int nbBytes = ctz >> 3; - op2 += 5; - ip2 -= nbBytes; - bits2 = MEM_read64(ip2) | 1; - bits2 <<= nbBits; - } + { + int index = (int)(bits1 >> 53); + int entry = dtable[index]; + bits1 <<= entry & 0x3F; + op1[4] = (byte)(entry >> 8 & 0xFF); + } - { - int ctz = (int)ZSTD_countTrailingZeros64(bits3); - int nbBits = ctz & 7; - int nbBytes = ctz >> 3; - op3 += 5; - ip3 -= nbBytes; - bits3 = MEM_read64(ip3) | 1; - bits3 <<= nbBits; - } + { + int index = (int)(bits2 >> 53); + int entry = dtable[index]; + bits2 <<= entry & 0x3F; + op2[4] = (byte)(entry >> 8 & 0xFF); } - } while (op3 < olimit); - } - _out: - args->bits[0] = bits0; - args->bits[1] = bits1; - args->bits[2] = bits2; - args->bits[3] = bits3; - args->ip.e0 = ip0; - args->ip.e1 = ip1; - args->ip.e2 = ip2; - args->ip.e3 = ip3; - args->op.e0 = op0; - args->op.e1 = op1; - args->op.e2 = op2; - args->op.e3 = op3; - } + { + int index = (int)(bits3 >> 53); + int entry = dtable[index]; + bits3 <<= entry & 0x3F; + op3[4] = (byte)(entry >> 8 & 0xFF); + } + } - /** - * @returns @p dstSize on success (>= 6) - * 0 if the fallback implementation should be used - * An error if an error occurred - */ - private static nuint HUF_decompress4X1_usingDTable_internal_fast( - void* dst, - nuint dstSize, - void* cSrc, - nuint cSrcSize, - uint* DTable, - void* loopFn - ) - { - void* dt = DTable + 1; - byte* ilowest = (byte*)cSrc; - byte* oend = ZSTD_maybeNullPtrAdd((byte*)dst, (nint)dstSize); - HUF_DecompressFastArgs args; - { - nuint ret = HUF_DecompressFastArgs_init( - &args, - dst, - dstSize, - cSrc, - cSrcSize, - DTable - ); { - nuint err_code = ret; - if (ERR_isError(err_code)) { - return err_code; + /* Reload each of the 4 the bitstreams */ + int ctz = (int)ZSTD_countTrailingZeros64(bits0); + int nbBits = ctz & 7; + int nbBytes = ctz >> 3; + op0 += 5; + ip0 -= nbBytes; + bits0 = MEM_read64(ip0) | 1; + bits0 <<= nbBits; } - } - if (ret == 0) - return 0; - } + { + int ctz = (int)ZSTD_countTrailingZeros64(bits1); + int nbBits = ctz & 7; + int nbBytes = ctz >> 3; + op1 += 5; + ip1 -= nbBytes; + bits1 = MEM_read64(ip1) | 1; + bits1 <<= nbBits; + } - assert(args.ip.e0 >= args.ilowest); - ((delegate* managed)loopFn)(&args); - assert(args.ip.e0 >= ilowest); - assert(args.ip.e0 >= ilowest); - assert(args.ip.e1 >= ilowest); - assert(args.ip.e2 >= ilowest); - assert(args.ip.e3 >= ilowest); - assert(args.op.e3 <= oend); - assert(ilowest == args.ilowest); - assert(ilowest + 6 == args.iend.e0); - { - nuint segmentSize = (dstSize + 3) / 4; - byte* segmentEnd = (byte*)dst; - int i; - for (i = 0; i < 4; ++i) - { - BIT_DStream_t bit; - if (segmentSize <= (nuint)(oend - segmentEnd)) - segmentEnd += segmentSize; - else - segmentEnd = oend; { - nuint err_code = HUF_initRemainingDStream(&bit, &args, i, segmentEnd); - if (ERR_isError(err_code)) - { - return err_code; - } + int ctz = (int)ZSTD_countTrailingZeros64(bits2); + int nbBits = ctz & 7; + int nbBytes = ctz >> 3; + op2 += 5; + ip2 -= nbBytes; + bits2 = MEM_read64(ip2) | 1; + bits2 <<= nbBits; } - (&args.op.e0)[i] += HUF_decodeStreamX1( - (&args.op.e0)[i], - &bit, - segmentEnd, - (HUF_DEltX1*)dt, - 11 - ); - if ((&args.op.e0)[i] != segmentEnd) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); + { + int ctz = (int)ZSTD_countTrailingZeros64(bits3); + int nbBits = ctz & 7; + int nbBytes = ctz >> 3; + op3 += 5; + ip3 -= nbBytes; + bits3 = MEM_read64(ip3) | 1; + bits3 <<= nbBits; + } } - } - - assert(dstSize != 0); - return dstSize; + } while (op3 < olimit); } - private static nuint HUF_decompress1X1_usingDTable_internal( - void* dst, - nuint dstSize, - void* cSrc, - nuint cSrcSize, - uint* DTable, - int flags - ) + _out: + args->bits[0] = bits0; + args->bits[1] = bits1; + args->bits[2] = bits2; + args->bits[3] = bits3; + args->ip.e0 = ip0; + args->ip.e1 = ip1; + args->ip.e2 = ip2; + args->ip.e3 = ip3; + args->op.e0 = op0; + args->op.e1 = op1; + args->op.e2 = op2; + args->op.e3 = op3; + } + + /** + * @returns @p dstSize on success (>= 6) + * 0 if the fallback implementation should be used + * An error if an error occurred + */ + private static nuint HUF_decompress4X1_usingDTable_internal_fast( + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + uint* DTable, + void* loopFn + ) + { + void* dt = DTable + 1; + byte* ilowest = (byte*)cSrc; + byte* oend = ZSTD_maybeNullPtrAdd((byte*)dst, (nint)dstSize); + HUF_DecompressFastArgs args; { - return HUF_decompress1X1_usingDTable_internal_body( + nuint ret = HUF_DecompressFastArgs_init( + &args, dst, dstSize, cSrc, cSrcSize, DTable ); + { + nuint err_code = ret; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (ret == 0) + return 0; } - private static nuint HUF_decompress4X1_usingDTable_internal( - void* dst, - nuint dstSize, - void* cSrc, - nuint cSrcSize, - uint* DTable, - int flags - ) + assert(args.ip.e0 >= args.ilowest); + ((delegate* managed)loopFn)(&args); + assert(args.ip.e0 >= ilowest); + assert(args.ip.e0 >= ilowest); + assert(args.ip.e1 >= ilowest); + assert(args.ip.e2 >= ilowest); + assert(args.ip.e3 >= ilowest); + assert(args.op.e3 <= oend); + assert(ilowest == args.ilowest); + assert(ilowest + 6 == args.iend.e0); { - void* fallbackFn = (delegate* managed)( - &HUF_decompress4X1_usingDTable_internal_default - ); - void* loopFn = (delegate* managed)( - &HUF_decompress4X1_usingDTable_internal_fast_c_loop - ); - if ((flags & (int)HUF_flags_e.HUF_flags_disableFast) == 0) + nuint segmentSize = (dstSize + 3) / 4; + byte* segmentEnd = (byte*)dst; + int i; + for (i = 0; i < 4; ++i) { - nuint ret = HUF_decompress4X1_usingDTable_internal_fast( - dst, - dstSize, - cSrc, - cSrcSize, - DTable, - loopFn + BIT_DStream_t bit; + if (segmentSize <= (nuint)(oend - segmentEnd)) + segmentEnd += segmentSize; + else + segmentEnd = oend; + { + nuint err_code = HUF_initRemainingDStream(&bit, &args, i, segmentEnd); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + (&args.op.e0)[i] += HUF_decodeStreamX1( + (&args.op.e0)[i], + &bit, + segmentEnd, + (HUF_DEltX1*)dt, + 11 ); - if (ret != 0) - return ret; + if ((&args.op.e0)[i] != segmentEnd) + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); } + } + + assert(dstSize != 0); + return dstSize; + } - return ((delegate* managed)fallbackFn)( + private static nuint HUF_decompress1X1_usingDTable_internal( + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + uint* DTable, + int flags + ) + { + return HUF_decompress1X1_usingDTable_internal_body( + dst, + dstSize, + cSrc, + cSrcSize, + DTable + ); + } + + private static nuint HUF_decompress4X1_usingDTable_internal( + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + uint* DTable, + int flags + ) + { + void* fallbackFn = (delegate* managed)( + &HUF_decompress4X1_usingDTable_internal_default + ); + void* loopFn = (delegate* managed)( + &HUF_decompress4X1_usingDTable_internal_fast_c_loop + ); + if ((flags & (int)HUF_flags_e.HUF_flags_disableFast) == 0) + { + nuint ret = HUF_decompress4X1_usingDTable_internal_fast( dst, dstSize, cSrc, cSrcSize, - DTable + DTable, + loopFn ); + if (ret != 0) + return ret; } - private static nuint HUF_decompress4X1_DCtx_wksp( - uint* dctx, - void* dst, - nuint dstSize, - void* cSrc, - nuint cSrcSize, - void* workSpace, - nuint wkspSize, - int flags - ) - { - byte* ip = (byte*)cSrc; - nuint hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize, flags); - if (ERR_isError(hSize)) - return hSize; - if (hSize >= cSrcSize) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); - ip += hSize; - cSrcSize -= hSize; - return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags); - } + return ((delegate* managed)fallbackFn)( + dst, + dstSize, + cSrc, + cSrcSize, + DTable + ); + } - /** - * Constructs a HUF_DEltX2 in a U32. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint HUF_buildDEltX2U32(uint symbol, uint nbBits, uint baseSeq, int level) - { - uint seq; - if (BitConverter.IsLittleEndian) - { - seq = level == 1 ? symbol : baseSeq + (symbol << 8); - return seq + (nbBits << 16) + ((uint)level << 24); - } - else - { - seq = level == 1 ? symbol << 8 : (baseSeq << 8) + symbol; - return (seq << 16) + (nbBits << 8) + (uint)level; - } - } + private static nuint HUF_decompress4X1_DCtx_wksp( + uint* dctx, + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + void* workSpace, + nuint wkspSize, + int flags + ) + { + byte* ip = (byte*)cSrc; + nuint hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize, flags); + if (ERR_isError(hSize)) + return hSize; + if (hSize >= cSrcSize) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + ip += hSize; + cSrcSize -= hSize; + return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags); + } - /** - * Constructs a HUF_DEltX2. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static HUF_DEltX2 HUF_buildDEltX2(uint symbol, uint nbBits, uint baseSeq, int level) + /** + * Constructs a HUF_DEltX2 in a U32. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint HUF_buildDEltX2U32(uint symbol, uint nbBits, uint baseSeq, int level) + { + uint seq; + if (BitConverter.IsLittleEndian) { - HUF_DEltX2 DElt; - uint val = HUF_buildDEltX2U32(symbol, nbBits, baseSeq, level); - memcpy(&DElt, &val, sizeof(uint)); - return DElt; + seq = level == 1 ? symbol : baseSeq + (symbol << 8); + return seq + (nbBits << 16) + ((uint)level << 24); } - - /** - * Constructs 2 HUF_DEltX2s and packs them into a U64. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static ulong HUF_buildDEltX2U64(uint symbol, uint nbBits, ushort baseSeq, int level) + else { - uint DElt = HUF_buildDEltX2U32(symbol, nbBits, baseSeq, level); - return DElt + ((ulong)DElt << 32); + seq = level == 1 ? symbol << 8 : (baseSeq << 8) + symbol; + return (seq << 16) + (nbBits << 8) + (uint)level; } + } + + /** + * Constructs a HUF_DEltX2. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static HUF_DEltX2 HUF_buildDEltX2(uint symbol, uint nbBits, uint baseSeq, int level) + { + HUF_DEltX2 DElt; + uint val = HUF_buildDEltX2U32(symbol, nbBits, baseSeq, level); + memcpy(&DElt, &val, sizeof(uint)); + return DElt; + } - /** - * Fills the DTable rank with all the symbols from [begin, end) that are each - * nbBits long. - * - * @param DTableRank The start of the rank in the DTable. - * @param begin The first symbol to fill (inclusive). - * @param end The last symbol to fill (exclusive). - * @param nbBits Each symbol is nbBits long. - * @param tableLog The table log. - * @param baseSeq If level == 1 { 0 } else { the first level symbol } - * @param level The level in the table. Must be 1 or 2. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void HUF_fillDTableX2ForWeight( - HUF_DEltX2* DTableRank, - sortedSymbol_t* begin, - sortedSymbol_t* end, - uint nbBits, - uint tableLog, - ushort baseSeq, - int level - ) + /** + * Constructs 2 HUF_DEltX2s and packs them into a U64. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ulong HUF_buildDEltX2U64(uint symbol, uint nbBits, ushort baseSeq, int level) + { + uint DElt = HUF_buildDEltX2U32(symbol, nbBits, baseSeq, level); + return DElt + ((ulong)DElt << 32); + } + + /** + * Fills the DTable rank with all the symbols from [begin, end) that are each + * nbBits long. + * + * @param DTableRank The start of the rank in the DTable. + * @param begin The first symbol to fill (inclusive). + * @param end The last symbol to fill (exclusive). + * @param nbBits Each symbol is nbBits long. + * @param tableLog The table log. + * @param baseSeq If level == 1 { 0 } else { the first level symbol } + * @param level The level in the table. Must be 1 or 2. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void HUF_fillDTableX2ForWeight( + HUF_DEltX2* DTableRank, + sortedSymbol_t* begin, + sortedSymbol_t* end, + uint nbBits, + uint tableLog, + ushort baseSeq, + int level + ) + { + /* quiet static-analyzer */ + uint length = 1U << (int)(tableLog - nbBits & 0x1F); + sortedSymbol_t* ptr; + assert(level >= 1 && level <= 2); + switch (length) { - /* quiet static-analyzer */ - uint length = 1U << (int)(tableLog - nbBits & 0x1F); - sortedSymbol_t* ptr; - assert(level >= 1 && level <= 2); - switch (length) - { - case 1: - for (ptr = begin; ptr != end; ++ptr) - { - HUF_DEltX2 DElt = HUF_buildDEltX2(ptr->symbol, nbBits, baseSeq, level); - *DTableRank++ = DElt; - } + case 1: + for (ptr = begin; ptr != end; ++ptr) + { + HUF_DEltX2 DElt = HUF_buildDEltX2(ptr->symbol, nbBits, baseSeq, level); + *DTableRank++ = DElt; + } - break; - case 2: - for (ptr = begin; ptr != end; ++ptr) - { - HUF_DEltX2 DElt = HUF_buildDEltX2(ptr->symbol, nbBits, baseSeq, level); - DTableRank[0] = DElt; - DTableRank[1] = DElt; - DTableRank += 2; - } + break; + case 2: + for (ptr = begin; ptr != end; ++ptr) + { + HUF_DEltX2 DElt = HUF_buildDEltX2(ptr->symbol, nbBits, baseSeq, level); + DTableRank[0] = DElt; + DTableRank[1] = DElt; + DTableRank += 2; + } - break; - case 4: - for (ptr = begin; ptr != end; ++ptr) - { - ulong DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level); - memcpy(DTableRank + 0, &DEltX2, sizeof(ulong)); - memcpy(DTableRank + 2, &DEltX2, sizeof(ulong)); - DTableRank += 4; - } + break; + case 4: + for (ptr = begin; ptr != end; ++ptr) + { + ulong DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level); + memcpy(DTableRank + 0, &DEltX2, sizeof(ulong)); + memcpy(DTableRank + 2, &DEltX2, sizeof(ulong)); + DTableRank += 4; + } - break; - case 8: - for (ptr = begin; ptr != end; ++ptr) + break; + case 8: + for (ptr = begin; ptr != end; ++ptr) + { + ulong DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level); + memcpy(DTableRank + 0, &DEltX2, sizeof(ulong)); + memcpy(DTableRank + 2, &DEltX2, sizeof(ulong)); + memcpy(DTableRank + 4, &DEltX2, sizeof(ulong)); + memcpy(DTableRank + 6, &DEltX2, sizeof(ulong)); + DTableRank += 8; + } + + break; + default: + for (ptr = begin; ptr != end; ++ptr) + { + ulong DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level); + HUF_DEltX2* DTableRankEnd = DTableRank + length; + for (; DTableRank != DTableRankEnd; DTableRank += 8) { - ulong DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level); memcpy(DTableRank + 0, &DEltX2, sizeof(ulong)); memcpy(DTableRank + 2, &DEltX2, sizeof(ulong)); memcpy(DTableRank + 4, &DEltX2, sizeof(ulong)); memcpy(DTableRank + 6, &DEltX2, sizeof(ulong)); - DTableRank += 8; } + } + + break; + } + } + /* HUF_fillDTableX2Level2() : + * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void HUF_fillDTableX2Level2( + HUF_DEltX2* DTable, + uint targetLog, + uint consumedBits, + uint* rankVal, + int minWeight, + int maxWeight1, + sortedSymbol_t* sortedSymbols, + uint* rankStart, + uint nbBitsBaseline, + ushort baseSeq + ) + { + if (minWeight > 1) + { + /* quiet static-analyzer */ + uint length = 1U << (int)(targetLog - consumedBits & 0x1F); + /* baseSeq */ + ulong DEltX2 = HUF_buildDEltX2U64(baseSeq, consumedBits, 0, 1); + int skipSize = (int)rankVal[minWeight]; + assert(length > 1); + assert((uint)skipSize < length); + switch (length) + { + case 2: + assert(skipSize == 1); + memcpy(DTable, &DEltX2, sizeof(ulong)); + break; + case 4: + assert(skipSize <= 4); + memcpy(DTable + 0, &DEltX2, sizeof(ulong)); + memcpy(DTable + 2, &DEltX2, sizeof(ulong)); break; default: - for (ptr = begin; ptr != end; ++ptr) + { + int i; + for (i = 0; i < skipSize; i += 8) { - ulong DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level); - HUF_DEltX2* DTableRankEnd = DTableRank + length; - for (; DTableRank != DTableRankEnd; DTableRank += 8) - { - memcpy(DTableRank + 0, &DEltX2, sizeof(ulong)); - memcpy(DTableRank + 2, &DEltX2, sizeof(ulong)); - memcpy(DTableRank + 4, &DEltX2, sizeof(ulong)); - memcpy(DTableRank + 6, &DEltX2, sizeof(ulong)); - } + memcpy(DTable + i + 0, &DEltX2, sizeof(ulong)); + memcpy(DTable + i + 2, &DEltX2, sizeof(ulong)); + memcpy(DTable + i + 4, &DEltX2, sizeof(ulong)); + memcpy(DTable + i + 6, &DEltX2, sizeof(ulong)); } + } break; } } - /* HUF_fillDTableX2Level2() : - * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void HUF_fillDTableX2Level2( - HUF_DEltX2* DTable, - uint targetLog, - uint consumedBits, - uint* rankVal, - int minWeight, - int maxWeight1, - sortedSymbol_t* sortedSymbols, - uint* rankStart, - uint nbBitsBaseline, - ushort baseSeq - ) { - if (minWeight > 1) + int w; + for (w = minWeight; w < maxWeight1; ++w) { - /* quiet static-analyzer */ - uint length = 1U << (int)(targetLog - consumedBits & 0x1F); - /* baseSeq */ - ulong DEltX2 = HUF_buildDEltX2U64(baseSeq, consumedBits, 0, 1); - int skipSize = (int)rankVal[minWeight]; - assert(length > 1); - assert((uint)skipSize < length); - switch (length) - { - case 2: - assert(skipSize == 1); - memcpy(DTable, &DEltX2, sizeof(ulong)); - break; - case 4: - assert(skipSize <= 4); - memcpy(DTable + 0, &DEltX2, sizeof(ulong)); - memcpy(DTable + 2, &DEltX2, sizeof(ulong)); - break; - default: - { - int i; - for (i = 0; i < skipSize; i += 8) - { - memcpy(DTable + i + 0, &DEltX2, sizeof(ulong)); - memcpy(DTable + i + 2, &DEltX2, sizeof(ulong)); - memcpy(DTable + i + 4, &DEltX2, sizeof(ulong)); - memcpy(DTable + i + 6, &DEltX2, sizeof(ulong)); - } - } - - break; - } + int begin = (int)rankStart[w]; + int end = (int)rankStart[w + 1]; + uint nbBits = nbBitsBaseline - (uint)w; + uint totalBits = nbBits + consumedBits; + HUF_fillDTableX2ForWeight( + DTable + rankVal[w], + sortedSymbols + begin, + sortedSymbols + end, + totalBits, + targetLog, + baseSeq, + 2 + ); } + } + } + private static void HUF_fillDTableX2( + HUF_DEltX2* DTable, + uint targetLog, + sortedSymbol_t* sortedList, + uint* rankStart, + rankValCol_t* rankValOrigin, + uint maxWeight, + uint nbBitsBaseline + ) + { + uint* rankVal = (uint*)&rankValOrigin[0]; + /* note : targetLog >= srcLog, hence scaleLog <= 1 */ + int scaleLog = (int)(nbBitsBaseline - targetLog); + uint minBits = nbBitsBaseline - maxWeight; + int w; + int wEnd = (int)maxWeight + 1; + for (w = 1; w < wEnd; ++w) + { + int begin = (int)rankStart[w]; + int end = (int)rankStart[w + 1]; + uint nbBits = nbBitsBaseline - (uint)w; + if (targetLog - nbBits >= minBits) { - int w; - for (w = minWeight; w < maxWeight1; ++w) + /* Enough room for a second symbol. */ + int start = (int)rankVal[w]; + /* quiet static-analyzer */ + uint length = 1U << (int)(targetLog - nbBits & 0x1F); + int minWeight = (int)(nbBits + (uint)scaleLog); + int s; + if (minWeight < 1) + minWeight = 1; + for (s = begin; s != end; ++s) { - int begin = (int)rankStart[w]; - int end = (int)rankStart[w + 1]; - uint nbBits = nbBitsBaseline - (uint)w; - uint totalBits = nbBits + consumedBits; - HUF_fillDTableX2ForWeight( - DTable + rankVal[w], - sortedSymbols + begin, - sortedSymbols + end, - totalBits, + HUF_fillDTableX2Level2( + DTable + start, targetLog, - baseSeq, - 2 + nbBits, + (uint*)&rankValOrigin[nbBits], + minWeight, + wEnd, + sortedList, + rankStart, + nbBitsBaseline, + sortedList[s].symbol ); + start += (int)length; } } + else + { + HUF_fillDTableX2ForWeight( + DTable + rankVal[w], + sortedList + begin, + sortedList + end, + nbBits, + targetLog, + 0, + 1 + ); + } } + } + + private static nuint HUF_readDTableX2_wksp( + uint* DTable, + void* src, + nuint srcSize, + void* workSpace, + nuint wkspSize, + int flags + ) + { + uint tableLog, + maxW, + nbSymbols; + DTableDesc dtd = HUF_getDTableDesc(DTable); + uint maxTableLog = dtd.maxTableLog; + nuint iSize; + /* force compiler to avoid strict-aliasing */ + void* dtPtr = DTable + 1; + HUF_DEltX2* dt = (HUF_DEltX2*)dtPtr; + uint* rankStart; + HUF_ReadDTableX2_Workspace* wksp = (HUF_ReadDTableX2_Workspace*)workSpace; + if ((nuint)sizeof(HUF_ReadDTableX2_Workspace) > wkspSize) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + rankStart = wksp->rankStart0 + 1; + memset(wksp->rankStats, 0, sizeof(uint) * 13); + memset(wksp->rankStart0, 0, sizeof(uint) * 15); + if (maxTableLog > 12) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); + iSize = HUF_readStats_wksp( + wksp->weightList, + 255 + 1, + wksp->rankStats, + &nbSymbols, + &tableLog, + src, + srcSize, + wksp->calleeWksp, + sizeof(uint) * 219, + flags + ); + if (ERR_isError(iSize)) + return iSize; + if (tableLog > maxTableLog) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); + if (tableLog <= 11 && maxTableLog > 11) + maxTableLog = 11; + for (maxW = tableLog; wksp->rankStats[maxW] == 0; maxW--) { } - private static void HUF_fillDTableX2( - HUF_DEltX2* DTable, - uint targetLog, - sortedSymbol_t* sortedList, - uint* rankStart, - rankValCol_t* rankValOrigin, - uint maxWeight, - uint nbBitsBaseline - ) { - uint* rankVal = (uint*)&rankValOrigin[0]; - /* note : targetLog >= srcLog, hence scaleLog <= 1 */ - int scaleLog = (int)(nbBitsBaseline - targetLog); - uint minBits = nbBitsBaseline - maxWeight; - int w; - int wEnd = (int)maxWeight + 1; - for (w = 1; w < wEnd; ++w) + uint w, + nextRankStart = 0; + for (w = 1; w < maxW + 1; w++) { - int begin = (int)rankStart[w]; - int end = (int)rankStart[w + 1]; - uint nbBits = nbBitsBaseline - (uint)w; - if (targetLog - nbBits >= minBits) - { - /* Enough room for a second symbol. */ - int start = (int)rankVal[w]; - /* quiet static-analyzer */ - uint length = 1U << (int)(targetLog - nbBits & 0x1F); - int minWeight = (int)(nbBits + (uint)scaleLog); - int s; - if (minWeight < 1) - minWeight = 1; - for (s = begin; s != end; ++s) - { - HUF_fillDTableX2Level2( - DTable + start, - targetLog, - nbBits, - (uint*)&rankValOrigin[nbBits], - minWeight, - wEnd, - sortedList, - rankStart, - nbBitsBaseline, - sortedList[s].symbol - ); - start += (int)length; - } - } - else - { - HUF_fillDTableX2ForWeight( - DTable + rankVal[w], - sortedList + begin, - sortedList + end, - nbBits, - targetLog, - 0, - 1 - ); - } + uint curr = nextRankStart; + nextRankStart += wksp->rankStats[w]; + rankStart[w] = curr; } + + rankStart[0] = nextRankStart; + rankStart[maxW + 1] = nextRankStart; } - private static nuint HUF_readDTableX2_wksp( - uint* DTable, - void* src, - nuint srcSize, - void* workSpace, - nuint wkspSize, - int flags - ) { - uint tableLog, - maxW, - nbSymbols; - DTableDesc dtd = HUF_getDTableDesc(DTable); - uint maxTableLog = dtd.maxTableLog; - nuint iSize; - /* force compiler to avoid strict-aliasing */ - void* dtPtr = DTable + 1; - HUF_DEltX2* dt = (HUF_DEltX2*)dtPtr; - uint* rankStart; - HUF_ReadDTableX2_Workspace* wksp = (HUF_ReadDTableX2_Workspace*)workSpace; - if ((nuint)sizeof(HUF_ReadDTableX2_Workspace) > wkspSize) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); - rankStart = wksp->rankStart0 + 1; - memset(wksp->rankStats, 0, sizeof(uint) * 13); - memset(wksp->rankStart0, 0, sizeof(uint) * 15); - if (maxTableLog > 12) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); - iSize = HUF_readStats_wksp( - wksp->weightList, - 255 + 1, - wksp->rankStats, - &nbSymbols, - &tableLog, - src, - srcSize, - wksp->calleeWksp, - sizeof(uint) * 219, - flags - ); - if (ERR_isError(iSize)) - return iSize; - if (tableLog > maxTableLog) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); - if (tableLog <= 11 && maxTableLog > 11) - maxTableLog = 11; - for (maxW = tableLog; wksp->rankStats[maxW] == 0; maxW--) { } - + uint s; + for (s = 0; s < nbSymbols; s++) { - uint w, - nextRankStart = 0; - for (w = 1; w < maxW + 1; w++) - { - uint curr = nextRankStart; - nextRankStart += wksp->rankStats[w]; - rankStart[w] = curr; - } - - rankStart[0] = nextRankStart; - rankStart[maxW + 1] = nextRankStart; + uint w = wksp->weightList[s]; + uint r = rankStart[w]++; + (&wksp->sortedSymbol.e0)[r].symbol = (byte)s; } + rankStart[0] = 0; + } + + { + uint* rankVal0 = (uint*)&wksp->rankVal.e0; { - uint s; - for (s = 0; s < nbSymbols; s++) + /* tableLog <= maxTableLog */ + int rescale = (int)(maxTableLog - tableLog - 1); + uint nextRankVal = 0; + uint w; + for (w = 1; w < maxW + 1; w++) { - uint w = wksp->weightList[s]; - uint r = rankStart[w]++; - (&wksp->sortedSymbol.e0)[r].symbol = (byte)s; + uint curr = nextRankVal; + nextRankVal += wksp->rankStats[w] << (int)(w + (uint)rescale); + rankVal0[w] = curr; } - - rankStart[0] = 0; } { - uint* rankVal0 = (uint*)&wksp->rankVal.e0; + uint minBits = tableLog + 1 - maxW; + uint consumed; + for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) { - /* tableLog <= maxTableLog */ - int rescale = (int)(maxTableLog - tableLog - 1); - uint nextRankVal = 0; + uint* rankValPtr = (uint*)&(&wksp->rankVal.e0)[consumed]; uint w; for (w = 1; w < maxW + 1; w++) { - uint curr = nextRankVal; - nextRankVal += wksp->rankStats[w] << (int)(w + (uint)rescale); - rankVal0[w] = curr; - } - } - - { - uint minBits = tableLog + 1 - maxW; - uint consumed; - for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) - { - uint* rankValPtr = (uint*)&(&wksp->rankVal.e0)[consumed]; - uint w; - for (w = 1; w < maxW + 1; w++) - { - rankValPtr[w] = rankVal0[w] >> (int)consumed; - } + rankValPtr[w] = rankVal0[w] >> (int)consumed; } } } - - HUF_fillDTableX2( - dt, - maxTableLog, - &wksp->sortedSymbol.e0, - wksp->rankStart0, - &wksp->rankVal.e0, - maxW, - tableLog + 1 - ); - dtd.tableLog = (byte)maxTableLog; - dtd.tableType = 1; - memcpy(DTable, &dtd, (uint)sizeof(DTableDesc)); - return iSize; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint HUF_decodeSymbolX2( - void* op, - BIT_DStream_t* DStream, - HUF_DEltX2* dt, - uint dtLog - ) + HUF_fillDTableX2( + dt, + maxTableLog, + &wksp->sortedSymbol.e0, + wksp->rankStart0, + &wksp->rankVal.e0, + maxW, + tableLog + 1 + ); + dtd.tableLog = (byte)maxTableLog; + dtd.tableType = 1; + memcpy(DTable, &dtd, (uint)sizeof(DTableDesc)); + return iSize; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint HUF_decodeSymbolX2( + void* op, + BIT_DStream_t* DStream, + HUF_DEltX2* dt, + uint dtLog + ) + { + /* note : dtLog >= 1 */ + nuint val = BIT_lookBitsFast(DStream, dtLog); + memcpy(op, &dt[val].sequence, 2); + BIT_skipBits(DStream, dt[val].nbBits); + return dt[val].length; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint HUF_decodeLastSymbolX2( + void* op, + BIT_DStream_t* DStream, + HUF_DEltX2* dt, + uint dtLog + ) + { + /* note : dtLog >= 1 */ + nuint val = BIT_lookBitsFast(DStream, dtLog); + memcpy(op, &dt[val].sequence, 1); + if (dt[val].length == 1) { - /* note : dtLog >= 1 */ - nuint val = BIT_lookBitsFast(DStream, dtLog); - memcpy(op, &dt[val].sequence, 2); BIT_skipBits(DStream, dt[val].nbBits); - return dt[val].length; } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint HUF_decodeLastSymbolX2( - void* op, - BIT_DStream_t* DStream, - HUF_DEltX2* dt, - uint dtLog - ) + else { - /* note : dtLog >= 1 */ - nuint val = BIT_lookBitsFast(DStream, dtLog); - memcpy(op, &dt[val].sequence, 1); - if (dt[val].length == 1) + if (DStream->bitsConsumed < (uint)(sizeof(nuint) * 8)) { BIT_skipBits(DStream, dt[val].nbBits); + if (DStream->bitsConsumed > (uint)(sizeof(nuint) * 8)) + DStream->bitsConsumed = (uint)(sizeof(nuint) * 8); } - else - { - if (DStream->bitsConsumed < (uint)(sizeof(nuint) * 8)) - { - BIT_skipBits(DStream, dt[val].nbBits); - if (DStream->bitsConsumed > (uint)(sizeof(nuint) * 8)) - DStream->bitsConsumed = (uint)(sizeof(nuint) * 8); - } - } - - return 1; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint HUF_decodeStreamX2( - byte* p, - BIT_DStream_t* bitDPtr, - byte* pEnd, - HUF_DEltX2* dt, - uint dtLog - ) + return 1; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint HUF_decodeStreamX2( + byte* p, + BIT_DStream_t* bitDPtr, + byte* pEnd, + HUF_DEltX2* dt, + uint dtLog + ) + { + byte* pStart = p; + if ((nuint)(pEnd - p) >= (nuint)sizeof(nuint)) { - byte* pStart = p; - if ((nuint)(pEnd - p) >= (nuint)sizeof(nuint)) + if (dtLog <= 11 && MEM_64bits) { - if (dtLog <= 11 && MEM_64bits) - { - while ( - BIT_reloadDStream(bitDPtr) == BIT_DStream_status.BIT_DStream_unfinished - && p < pEnd - 9 - ) - { - p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); - p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); - p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); - p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); - p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); - } - } - else + while ( + BIT_reloadDStream(bitDPtr) == BIT_DStream_status.BIT_DStream_unfinished + && p < pEnd - 9 + ) { - while ( - BIT_reloadDStream(bitDPtr) == BIT_DStream_status.BIT_DStream_unfinished - && p < pEnd - (sizeof(nuint) - 1) - ) - { - if (MEM_64bits) - p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); - p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); - if (MEM_64bits) - p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); - p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); - } + p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); + p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); + p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); + p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); + p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); } } else - { - BIT_reloadDStream(bitDPtr); - } - - if ((nuint)(pEnd - p) >= 2) { while ( BIT_reloadDStream(bitDPtr) == BIT_DStream_status.BIT_DStream_unfinished - && p <= pEnd - 2 + && p < pEnd - (sizeof(nuint) - 1) ) + { + if (MEM_64bits) + p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); - while (p <= pEnd - 2) + if (MEM_64bits) + p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); + } } - - if (p < pEnd) - p += HUF_decodeLastSymbolX2(p, bitDPtr, dt, dtLog); - return (nuint)(p - pStart); + } + else + { + BIT_reloadDStream(bitDPtr); } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint HUF_decompress1X2_usingDTable_internal_body( - void* dst, - nuint dstSize, - void* cSrc, - nuint cSrcSize, - uint* DTable - ) + if ((nuint)(pEnd - p) >= 2) { - BIT_DStream_t bitD; - { - /* Init */ - nuint _var_err__ = BIT_initDStream(&bitD, cSrc, cSrcSize); - if (ERR_isError(_var_err__)) - return _var_err__; - } + while ( + BIT_reloadDStream(bitDPtr) == BIT_DStream_status.BIT_DStream_unfinished + && p <= pEnd - 2 + ) + p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); + while (p <= pEnd - 2) + p += HUF_decodeSymbolX2(p, bitDPtr, dt, dtLog); + } - { - byte* ostart = (byte*)dst; - byte* oend = ZSTD_maybeNullPtrAdd(ostart, (nint)dstSize); - /* force compiler to not use strict-aliasing */ - void* dtPtr = DTable + 1; - HUF_DEltX2* dt = (HUF_DEltX2*)dtPtr; - DTableDesc dtd = HUF_getDTableDesc(DTable); - HUF_decodeStreamX2(ostart, &bitD, oend, dt, dtd.tableLog); - } + if (p < pEnd) + p += HUF_decodeLastSymbolX2(p, bitDPtr, dt, dtLog); + return (nuint)(p - pStart); + } - if (BIT_endOfDStream(&bitD) == 0) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - return dstSize; + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint HUF_decompress1X2_usingDTable_internal_body( + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + uint* DTable + ) + { + BIT_DStream_t bitD; + { + /* Init */ + nuint _var_err__ = BIT_initDStream(&bitD, cSrc, cSrcSize); + if (ERR_isError(_var_err__)) + return _var_err__; + } + + { + byte* ostart = (byte*)dst; + byte* oend = ZSTD_maybeNullPtrAdd(ostart, (nint)dstSize); + /* force compiler to not use strict-aliasing */ + void* dtPtr = DTable + 1; + HUF_DEltX2* dt = (HUF_DEltX2*)dtPtr; + DTableDesc dtd = HUF_getDTableDesc(DTable); + HUF_decodeStreamX2(ostart, &bitD, oend, dt, dtd.tableLog); } - /* HUF_decompress4X2_usingDTable_internal_body(): - * Conditions: - * @dstSize >= 6 - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint HUF_decompress4X2_usingDTable_internal_body( - void* dst, - nuint dstSize, - void* cSrc, - nuint cSrcSize, - uint* DTable - ) + if (BIT_endOfDStream(&bitD) == 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + return dstSize; + } + + /* HUF_decompress4X2_usingDTable_internal_body(): + * Conditions: + * @dstSize >= 6 + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint HUF_decompress4X2_usingDTable_internal_body( + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + uint* DTable + ) + { + if (cSrcSize < 10) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + if (dstSize < 6) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); { - if (cSrcSize < 10) + byte* istart = (byte*)cSrc; + byte* ostart = (byte*)dst; + byte* oend = ostart + dstSize; + byte* olimit = oend - (sizeof(nuint) - 1); + void* dtPtr = DTable + 1; + HUF_DEltX2* dt = (HUF_DEltX2*)dtPtr; + /* Init */ + BIT_DStream_t bitD1; + BIT_DStream_t bitD2; + BIT_DStream_t bitD3; + BIT_DStream_t bitD4; + nuint length1 = MEM_readLE16(istart); + nuint length2 = MEM_readLE16(istart + 2); + nuint length3 = MEM_readLE16(istart + 4); + nuint length4 = cSrcSize - (length1 + length2 + length3 + 6); + /* jumpTable */ + byte* istart1 = istart + 6; + byte* istart2 = istart1 + length1; + byte* istart3 = istart2 + length2; + byte* istart4 = istart3 + length3; + nuint segmentSize = (dstSize + 3) / 4; + byte* opStart2 = ostart + segmentSize; + byte* opStart3 = opStart2 + segmentSize; + byte* opStart4 = opStart3 + segmentSize; + byte* op1 = ostart; + byte* op2 = opStart2; + byte* op3 = opStart3; + byte* op4 = opStart4; + uint endSignal = 1; + DTableDesc dtd = HUF_getDTableDesc(DTable); + uint dtLog = dtd.tableLog; + if (length4 > cSrcSize) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - if (dstSize < 6) + if (opStart4 > oend) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + assert(dstSize >= 6); { - byte* istart = (byte*)cSrc; - byte* ostart = (byte*)dst; - byte* oend = ostart + dstSize; - byte* olimit = oend - (sizeof(nuint) - 1); - void* dtPtr = DTable + 1; - HUF_DEltX2* dt = (HUF_DEltX2*)dtPtr; - /* Init */ - BIT_DStream_t bitD1; - BIT_DStream_t bitD2; - BIT_DStream_t bitD3; - BIT_DStream_t bitD4; - nuint length1 = MEM_readLE16(istart); - nuint length2 = MEM_readLE16(istart + 2); - nuint length3 = MEM_readLE16(istart + 4); - nuint length4 = cSrcSize - (length1 + length2 + length3 + 6); - /* jumpTable */ - byte* istart1 = istart + 6; - byte* istart2 = istart1 + length1; - byte* istart3 = istart2 + length2; - byte* istart4 = istart3 + length3; - nuint segmentSize = (dstSize + 3) / 4; - byte* opStart2 = ostart + segmentSize; - byte* opStart3 = opStart2 + segmentSize; - byte* opStart4 = opStart3 + segmentSize; - byte* op1 = ostart; - byte* op2 = opStart2; - byte* op3 = opStart3; - byte* op4 = opStart4; - uint endSignal = 1; - DTableDesc dtd = HUF_getDTableDesc(DTable); - uint dtLog = dtd.tableLog; - if (length4 > cSrcSize) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - if (opStart4 > oend) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - assert(dstSize >= 6); - { - nuint _var_err__ = BIT_initDStream(&bitD1, istart1, length1); - if (ERR_isError(_var_err__)) - return _var_err__; - } + nuint _var_err__ = BIT_initDStream(&bitD1, istart1, length1); + if (ERR_isError(_var_err__)) + return _var_err__; + } - { - nuint _var_err__ = BIT_initDStream(&bitD2, istart2, length2); - if (ERR_isError(_var_err__)) - return _var_err__; - } + { + nuint _var_err__ = BIT_initDStream(&bitD2, istart2, length2); + if (ERR_isError(_var_err__)) + return _var_err__; + } - { - nuint _var_err__ = BIT_initDStream(&bitD3, istart3, length3); - if (ERR_isError(_var_err__)) - return _var_err__; - } + { + nuint _var_err__ = BIT_initDStream(&bitD3, istart3, length3); + if (ERR_isError(_var_err__)) + return _var_err__; + } - { - nuint _var_err__ = BIT_initDStream(&bitD4, istart4, length4); - if (ERR_isError(_var_err__)) - return _var_err__; - } + { + nuint _var_err__ = BIT_initDStream(&bitD4, istart4, length4); + if (ERR_isError(_var_err__)) + return _var_err__; + } - if ((nuint)(oend - op4) >= (nuint)sizeof(nuint)) + if ((nuint)(oend - op4) >= (nuint)sizeof(nuint)) + { + for (; (endSignal & (uint)(op4 < olimit ? 1 : 0)) != 0; ) { - for (; (endSignal & (uint)(op4 < olimit ? 1 : 0)) != 0; ) - { - if (MEM_64bits) - op1 += HUF_decodeSymbolX2(op1, &bitD1, dt, dtLog); + if (MEM_64bits) op1 += HUF_decodeSymbolX2(op1, &bitD1, dt, dtLog); - if (MEM_64bits) - op1 += HUF_decodeSymbolX2(op1, &bitD1, dt, dtLog); + op1 += HUF_decodeSymbolX2(op1, &bitD1, dt, dtLog); + if (MEM_64bits) op1 += HUF_decodeSymbolX2(op1, &bitD1, dt, dtLog); - if (MEM_64bits) - op2 += HUF_decodeSymbolX2(op2, &bitD2, dt, dtLog); + op1 += HUF_decodeSymbolX2(op1, &bitD1, dt, dtLog); + if (MEM_64bits) op2 += HUF_decodeSymbolX2(op2, &bitD2, dt, dtLog); - if (MEM_64bits) - op2 += HUF_decodeSymbolX2(op2, &bitD2, dt, dtLog); + op2 += HUF_decodeSymbolX2(op2, &bitD2, dt, dtLog); + if (MEM_64bits) op2 += HUF_decodeSymbolX2(op2, &bitD2, dt, dtLog); - endSignal &= - BIT_reloadDStreamFast(&bitD1) - == BIT_DStream_status.BIT_DStream_unfinished - ? 1U - : 0U; - endSignal &= - BIT_reloadDStreamFast(&bitD2) - == BIT_DStream_status.BIT_DStream_unfinished - ? 1U - : 0U; - if (MEM_64bits) - op3 += HUF_decodeSymbolX2(op3, &bitD3, dt, dtLog); + op2 += HUF_decodeSymbolX2(op2, &bitD2, dt, dtLog); + endSignal &= + BIT_reloadDStreamFast(&bitD1) + == BIT_DStream_status.BIT_DStream_unfinished + ? 1U + : 0U; + endSignal &= + BIT_reloadDStreamFast(&bitD2) + == BIT_DStream_status.BIT_DStream_unfinished + ? 1U + : 0U; + if (MEM_64bits) op3 += HUF_decodeSymbolX2(op3, &bitD3, dt, dtLog); - if (MEM_64bits) - op3 += HUF_decodeSymbolX2(op3, &bitD3, dt, dtLog); + op3 += HUF_decodeSymbolX2(op3, &bitD3, dt, dtLog); + if (MEM_64bits) op3 += HUF_decodeSymbolX2(op3, &bitD3, dt, dtLog); - if (MEM_64bits) - op4 += HUF_decodeSymbolX2(op4, &bitD4, dt, dtLog); + op3 += HUF_decodeSymbolX2(op3, &bitD3, dt, dtLog); + if (MEM_64bits) op4 += HUF_decodeSymbolX2(op4, &bitD4, dt, dtLog); - if (MEM_64bits) - op4 += HUF_decodeSymbolX2(op4, &bitD4, dt, dtLog); + op4 += HUF_decodeSymbolX2(op4, &bitD4, dt, dtLog); + if (MEM_64bits) op4 += HUF_decodeSymbolX2(op4, &bitD4, dt, dtLog); - endSignal &= - BIT_reloadDStreamFast(&bitD3) - == BIT_DStream_status.BIT_DStream_unfinished - ? 1U - : 0U; - endSignal &= - BIT_reloadDStreamFast(&bitD4) - == BIT_DStream_status.BIT_DStream_unfinished - ? 1U - : 0U; - } - } - - if (op1 > opStart2) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - if (op2 > opStart3) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - if (op3 > opStart4) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog); - HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog); - HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog); - HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog); - { - uint endCheck = - BIT_endOfDStream(&bitD1) - & BIT_endOfDStream(&bitD2) - & BIT_endOfDStream(&bitD3) - & BIT_endOfDStream(&bitD4); - if (endCheck == 0) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); + op4 += HUF_decodeSymbolX2(op4, &bitD4, dt, dtLog); + endSignal &= + BIT_reloadDStreamFast(&bitD3) + == BIT_DStream_status.BIT_DStream_unfinished + ? 1U + : 0U; + endSignal &= + BIT_reloadDStreamFast(&bitD4) + == BIT_DStream_status.BIT_DStream_unfinished + ? 1U + : 0U; } + } - return dstSize; + if (op1 > opStart2) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + if (op2 > opStart3) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + if (op3 > opStart4) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog); + HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog); + HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog); + HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog); + { + uint endCheck = + BIT_endOfDStream(&bitD1) + & BIT_endOfDStream(&bitD2) + & BIT_endOfDStream(&bitD3) + & BIT_endOfDStream(&bitD4); + if (endCheck == 0) + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); } - } - private static nuint HUF_decompress4X2_usingDTable_internal_default( - void* dst, - nuint dstSize, - void* cSrc, - nuint cSrcSize, - uint* DTable - ) - { - return HUF_decompress4X2_usingDTable_internal_body( - dst, - dstSize, - cSrc, - cSrcSize, - DTable - ); + return dstSize; } + } - private static void HUF_decompress4X2_usingDTable_internal_fast_c_loop( - HUF_DecompressFastArgs* args - ) + private static nuint HUF_decompress4X2_usingDTable_internal_default( + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + uint* DTable + ) + { + return HUF_decompress4X2_usingDTable_internal_body( + dst, + dstSize, + cSrc, + cSrcSize, + DTable + ); + } + + private static void HUF_decompress4X2_usingDTable_internal_fast_c_loop( + HUF_DecompressFastArgs* args + ) + { + ulong bits0, + bits1, + bits2, + bits3; + byte* ip0, + ip1, + ip2, + ip3; + byte* op0, + op1, + op2, + op3; + byte* oend0, + oend1, + oend2, + oend3; + HUF_DEltX2* dtable = (HUF_DEltX2*)args->dt; + byte* ilowest = args->ilowest; + bits0 = args->bits[0]; + bits1 = args->bits[1]; + bits2 = args->bits[2]; + bits3 = args->bits[3]; + ip0 = args->ip.e0; + ip1 = args->ip.e1; + ip2 = args->ip.e2; + ip3 = args->ip.e3; + op0 = args->op.e0; + op1 = args->op.e1; + op2 = args->op.e2; + op3 = args->op.e3; + oend0 = op1; + oend1 = op2; + oend2 = op3; + oend3 = args->oend; + assert(BitConverter.IsLittleEndian); + assert(!MEM_32bits); + for (; ; ) { - ulong bits0, - bits1, - bits2, - bits3; - byte* ip0, - ip1, - ip2, - ip3; - byte* op0, - op1, - op2, - op3; - byte* oend0, - oend1, - oend2, - oend3; - HUF_DEltX2* dtable = (HUF_DEltX2*)args->dt; - byte* ilowest = args->ilowest; - bits0 = args->bits[0]; - bits1 = args->bits[1]; - bits2 = args->bits[2]; - bits3 = args->bits[3]; - ip0 = args->ip.e0; - ip1 = args->ip.e1; - ip2 = args->ip.e2; - ip3 = args->ip.e3; - op0 = args->op.e0; - op1 = args->op.e1; - op2 = args->op.e2; - op3 = args->op.e3; - oend0 = op1; - oend1 = op2; - oend2 = op3; - oend3 = args->oend; - assert(BitConverter.IsLittleEndian); - assert(!MEM_32bits); - for (; ; ) + byte* olimit; { - byte* olimit; + assert(op0 <= oend0); + assert(ip0 >= ilowest); + } + + { + assert(op1 <= oend1); + assert(ip1 >= ilowest); + } + + { + assert(op2 <= oend2); + assert(ip2 >= ilowest); + } + + { + assert(op3 <= oend3); + assert(ip3 >= ilowest); + } + + { + /* Each loop does 5 table lookups for each of the 4 streams. + * Each table lookup consumes up to 11 bits of input, and produces + * up to 2 bytes of output. + */ + /* We can consume up to 7 bytes of input per iteration per stream. + * We also know that each input pointer is >= ip[0]. So we can run + * iters loops before running out of input. + */ + nuint iters = (nuint)(ip0 - ilowest) / 7; { - assert(op0 <= oend0); - assert(ip0 >= ilowest); + nuint oiters = (nuint)(oend0 - op0) / 10; + iters = iters < oiters ? iters : oiters; } { - assert(op1 <= oend1); - assert(ip1 >= ilowest); + nuint oiters = (nuint)(oend1 - op1) / 10; + iters = iters < oiters ? iters : oiters; } { - assert(op2 <= oend2); - assert(ip2 >= ilowest); + nuint oiters = (nuint)(oend2 - op2) / 10; + iters = iters < oiters ? iters : oiters; } { - assert(op3 <= oend3); - assert(ip3 >= ilowest); + nuint oiters = (nuint)(oend3 - op3) / 10; + iters = iters < oiters ? iters : oiters; } + olimit = op3 + iters * 5; + if (op3 == olimit) + break; { - /* Each loop does 5 table lookups for each of the 4 streams. - * Each table lookup consumes up to 11 bits of input, and produces - * up to 2 bytes of output. - */ - /* We can consume up to 7 bytes of input per iteration per stream. - * We also know that each input pointer is >= ip[0]. So we can run - * iters loops before running out of input. - */ - nuint iters = (nuint)(ip0 - ilowest) / 7; - { - nuint oiters = (nuint)(oend0 - op0) / 10; - iters = iters < oiters ? iters : oiters; - } + if (ip1 < ip0) + goto _out; + } + { + if (ip2 < ip1) + goto _out; + } + + { + if (ip3 < ip2) + goto _out; + } + } + + { + assert(ip1 >= ip0); + } + + { + assert(ip2 >= ip1); + } + + { + assert(ip3 >= ip2); + } + + do + { + { { - nuint oiters = (nuint)(oend1 - op1) / 10; - iters = iters < oiters ? iters : oiters; + /* Decode 5 symbols from each of the first 3 streams. + * The final stream will be decoded during the reload phase + * to reduce register pressure. + */ + int index = (int)(bits0 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op0, entry.sequence); + bits0 <<= entry.nbBits & 0x3F; + op0 += entry.length; } { - nuint oiters = (nuint)(oend2 - op2) / 10; - iters = iters < oiters ? iters : oiters; + int index = (int)(bits1 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op1, entry.sequence); + bits1 <<= entry.nbBits & 0x3F; + op1 += entry.length; } { - nuint oiters = (nuint)(oend3 - op3) / 10; - iters = iters < oiters ? iters : oiters; + int index = (int)(bits2 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op2, entry.sequence); + bits2 <<= entry.nbBits & 0x3F; + op2 += entry.length; } + } - olimit = op3 + iters * 5; - if (op3 == olimit) - break; + { { - if (ip1 < ip0) - goto _out; + int index = (int)(bits0 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op0, entry.sequence); + bits0 <<= entry.nbBits & 0x3F; + op0 += entry.length; } { - if (ip2 < ip1) - goto _out; + int index = (int)(bits1 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op1, entry.sequence); + bits1 <<= entry.nbBits & 0x3F; + op1 += entry.length; } { - if (ip3 < ip2) - goto _out; + int index = (int)(bits2 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op2, entry.sequence); + bits2 <<= entry.nbBits & 0x3F; + op2 += entry.length; } } { - assert(ip1 >= ip0); - } + { + int index = (int)(bits0 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op0, entry.sequence); + bits0 <<= entry.nbBits & 0x3F; + op0 += entry.length; + } - { - assert(ip2 >= ip1); - } + { + int index = (int)(bits1 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op1, entry.sequence); + bits1 <<= entry.nbBits & 0x3F; + op1 += entry.length; + } - { - assert(ip3 >= ip2); + { + int index = (int)(bits2 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op2, entry.sequence); + bits2 <<= entry.nbBits & 0x3F; + op2 += entry.length; + } } - do { { - { - /* Decode 5 symbols from each of the first 3 streams. - * The final stream will be decoded during the reload phase - * to reduce register pressure. - */ - int index = (int)(bits0 >> 53); - HUF_DEltX2 entry = dtable[index]; - MEM_write16(op0, entry.sequence); - bits0 <<= entry.nbBits & 0x3F; - op0 += entry.length; - } + int index = (int)(bits0 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op0, entry.sequence); + bits0 <<= entry.nbBits & 0x3F; + op0 += entry.length; + } - { - int index = (int)(bits1 >> 53); - HUF_DEltX2 entry = dtable[index]; - MEM_write16(op1, entry.sequence); - bits1 <<= entry.nbBits & 0x3F; - op1 += entry.length; - } + { + int index = (int)(bits1 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op1, entry.sequence); + bits1 <<= entry.nbBits & 0x3F; + op1 += entry.length; + } - { - int index = (int)(bits2 >> 53); - HUF_DEltX2 entry = dtable[index]; - MEM_write16(op2, entry.sequence); - bits2 <<= entry.nbBits & 0x3F; - op2 += entry.length; - } + { + int index = (int)(bits2 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op2, entry.sequence); + bits2 <<= entry.nbBits & 0x3F; + op2 += entry.length; + } + } + + { + { + int index = (int)(bits0 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op0, entry.sequence); + bits0 <<= entry.nbBits & 0x3F; + op0 += entry.length; } { - { - int index = (int)(bits0 >> 53); - HUF_DEltX2 entry = dtable[index]; - MEM_write16(op0, entry.sequence); - bits0 <<= entry.nbBits & 0x3F; - op0 += entry.length; - } - - { - int index = (int)(bits1 >> 53); - HUF_DEltX2 entry = dtable[index]; - MEM_write16(op1, entry.sequence); - bits1 <<= entry.nbBits & 0x3F; - op1 += entry.length; - } - - { - int index = (int)(bits2 >> 53); - HUF_DEltX2 entry = dtable[index]; - MEM_write16(op2, entry.sequence); - bits2 <<= entry.nbBits & 0x3F; - op2 += entry.length; - } + int index = (int)(bits1 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op1, entry.sequence); + bits1 <<= entry.nbBits & 0x3F; + op1 += entry.length; } { - { - int index = (int)(bits0 >> 53); - HUF_DEltX2 entry = dtable[index]; - MEM_write16(op0, entry.sequence); - bits0 <<= entry.nbBits & 0x3F; - op0 += entry.length; - } + int index = (int)(bits2 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op2, entry.sequence); + bits2 <<= entry.nbBits & 0x3F; + op2 += entry.length; + } + } + + { + /* Decode one symbol from the final stream */ + int index = (int)(bits3 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op3, entry.sequence); + bits3 <<= entry.nbBits & 0x3F; + op3 += entry.length; + } + { + { { - int index = (int)(bits1 >> 53); + /* Decode 4 symbols from the final stream & reload bitstreams. + * The final stream is reloaded last, meaning that all 5 symbols + * are decoded from the final stream before it is reloaded. + */ + int index = (int)(bits3 >> 53); HUF_DEltX2 entry = dtable[index]; - MEM_write16(op1, entry.sequence); - bits1 <<= entry.nbBits & 0x3F; - op1 += entry.length; + MEM_write16(op3, entry.sequence); + bits3 <<= entry.nbBits & 0x3F; + op3 += entry.length; } { - int index = (int)(bits2 >> 53); - HUF_DEltX2 entry = dtable[index]; - MEM_write16(op2, entry.sequence); - bits2 <<= entry.nbBits & 0x3F; - op2 += entry.length; + int ctz = (int)ZSTD_countTrailingZeros64(bits0); + int nbBits = ctz & 7; + int nbBytes = ctz >> 3; + ip0 -= nbBytes; + bits0 = MEM_read64(ip0) | 1; + bits0 <<= nbBits; } } { { - int index = (int)(bits0 >> 53); - HUF_DEltX2 entry = dtable[index]; - MEM_write16(op0, entry.sequence); - bits0 <<= entry.nbBits & 0x3F; - op0 += entry.length; - } - - { - int index = (int)(bits1 >> 53); + int index = (int)(bits3 >> 53); HUF_DEltX2 entry = dtable[index]; - MEM_write16(op1, entry.sequence); - bits1 <<= entry.nbBits & 0x3F; - op1 += entry.length; + MEM_write16(op3, entry.sequence); + bits3 <<= entry.nbBits & 0x3F; + op3 += entry.length; } { - int index = (int)(bits2 >> 53); - HUF_DEltX2 entry = dtable[index]; - MEM_write16(op2, entry.sequence); - bits2 <<= entry.nbBits & 0x3F; - op2 += entry.length; + int ctz = (int)ZSTD_countTrailingZeros64(bits1); + int nbBits = ctz & 7; + int nbBytes = ctz >> 3; + ip1 -= nbBytes; + bits1 = MEM_read64(ip1) | 1; + bits1 <<= nbBits; } } { { - int index = (int)(bits0 >> 53); - HUF_DEltX2 entry = dtable[index]; - MEM_write16(op0, entry.sequence); - bits0 <<= entry.nbBits & 0x3F; - op0 += entry.length; - } - - { - int index = (int)(bits1 >> 53); + int index = (int)(bits3 >> 53); HUF_DEltX2 entry = dtable[index]; - MEM_write16(op1, entry.sequence); - bits1 <<= entry.nbBits & 0x3F; - op1 += entry.length; + MEM_write16(op3, entry.sequence); + bits3 <<= entry.nbBits & 0x3F; + op3 += entry.length; } { - int index = (int)(bits2 >> 53); - HUF_DEltX2 entry = dtable[index]; - MEM_write16(op2, entry.sequence); - bits2 <<= entry.nbBits & 0x3F; - op2 += entry.length; + int ctz = (int)ZSTD_countTrailingZeros64(bits2); + int nbBits = ctz & 7; + int nbBytes = ctz >> 3; + ip2 -= nbBytes; + bits2 = MEM_read64(ip2) | 1; + bits2 <<= nbBits; } } { - /* Decode one symbol from the final stream */ - int index = (int)(bits3 >> 53); - HUF_DEltX2 entry = dtable[index]; - MEM_write16(op3, entry.sequence); - bits3 <<= entry.nbBits & 0x3F; - op3 += entry.length; - } - - { - { - { - /* Decode 4 symbols from the final stream & reload bitstreams. - * The final stream is reloaded last, meaning that all 5 symbols - * are decoded from the final stream before it is reloaded. - */ - int index = (int)(bits3 >> 53); - HUF_DEltX2 entry = dtable[index]; - MEM_write16(op3, entry.sequence); - bits3 <<= entry.nbBits & 0x3F; - op3 += entry.length; - } - - { - int ctz = (int)ZSTD_countTrailingZeros64(bits0); - int nbBits = ctz & 7; - int nbBytes = ctz >> 3; - ip0 -= nbBytes; - bits0 = MEM_read64(ip0) | 1; - bits0 <<= nbBits; - } - } - { - { - int index = (int)(bits3 >> 53); - HUF_DEltX2 entry = dtable[index]; - MEM_write16(op3, entry.sequence); - bits3 <<= entry.nbBits & 0x3F; - op3 += entry.length; - } - - { - int ctz = (int)ZSTD_countTrailingZeros64(bits1); - int nbBits = ctz & 7; - int nbBytes = ctz >> 3; - ip1 -= nbBytes; - bits1 = MEM_read64(ip1) | 1; - bits1 <<= nbBits; - } + int index = (int)(bits3 >> 53); + HUF_DEltX2 entry = dtable[index]; + MEM_write16(op3, entry.sequence); + bits3 <<= entry.nbBits & 0x3F; + op3 += entry.length; } { - { - int index = (int)(bits3 >> 53); - HUF_DEltX2 entry = dtable[index]; - MEM_write16(op3, entry.sequence); - bits3 <<= entry.nbBits & 0x3F; - op3 += entry.length; - } - - { - int ctz = (int)ZSTD_countTrailingZeros64(bits2); - int nbBits = ctz & 7; - int nbBytes = ctz >> 3; - ip2 -= nbBytes; - bits2 = MEM_read64(ip2) | 1; - bits2 <<= nbBits; - } + int ctz = (int)ZSTD_countTrailingZeros64(bits3); + int nbBits = ctz & 7; + int nbBytes = ctz >> 3; + ip3 -= nbBytes; + bits3 = MEM_read64(ip3) | 1; + bits3 <<= nbBits; } + } + } + } while (op3 < olimit); + } - { - { - int index = (int)(bits3 >> 53); - HUF_DEltX2 entry = dtable[index]; - MEM_write16(op3, entry.sequence); - bits3 <<= entry.nbBits & 0x3F; - op3 += entry.length; - } + _out: + args->bits[0] = bits0; + args->bits[1] = bits1; + args->bits[2] = bits2; + args->bits[3] = bits3; + args->ip.e0 = ip0; + args->ip.e1 = ip1; + args->ip.e2 = ip2; + args->ip.e3 = ip3; + args->op.e0 = op0; + args->op.e1 = op1; + args->op.e2 = op2; + args->op.e3 = op3; + } - { - int ctz = (int)ZSTD_countTrailingZeros64(bits3); - int nbBits = ctz & 7; - int nbBytes = ctz >> 3; - ip3 -= nbBytes; - bits3 = MEM_read64(ip3) | 1; - bits3 <<= nbBits; - } - } - } - } while (op3 < olimit); + private static nuint HUF_decompress4X2_usingDTable_internal_fast( + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + uint* DTable, + void* loopFn + ) + { + void* dt = DTable + 1; + byte* ilowest = (byte*)cSrc; + byte* oend = ZSTD_maybeNullPtrAdd((byte*)dst, (nint)dstSize); + HUF_DecompressFastArgs args; + { + nuint ret = HUF_DecompressFastArgs_init( + &args, + dst, + dstSize, + cSrc, + cSrcSize, + DTable + ); + { + nuint err_code = ret; + if (ERR_isError(err_code)) + { + return err_code; + } } - _out: - args->bits[0] = bits0; - args->bits[1] = bits1; - args->bits[2] = bits2; - args->bits[3] = bits3; - args->ip.e0 = ip0; - args->ip.e1 = ip1; - args->ip.e2 = ip2; - args->ip.e3 = ip3; - args->op.e0 = op0; - args->op.e1 = op1; - args->op.e2 = op2; - args->op.e3 = op3; + if (ret == 0) + return 0; } - private static nuint HUF_decompress4X2_usingDTable_internal_fast( - void* dst, - nuint dstSize, - void* cSrc, - nuint cSrcSize, - uint* DTable, - void* loopFn - ) + assert(args.ip.e0 >= args.ilowest); + ((delegate* managed)loopFn)(&args); + assert(args.ip.e0 >= ilowest); + assert(args.ip.e1 >= ilowest); + assert(args.ip.e2 >= ilowest); + assert(args.ip.e3 >= ilowest); + assert(args.op.e3 <= oend); + assert(ilowest == args.ilowest); + assert(ilowest + 6 == args.iend.e0); { - void* dt = DTable + 1; - byte* ilowest = (byte*)cSrc; - byte* oend = ZSTD_maybeNullPtrAdd((byte*)dst, (nint)dstSize); - HUF_DecompressFastArgs args; + nuint segmentSize = (dstSize + 3) / 4; + byte* segmentEnd = (byte*)dst; + int i; + for (i = 0; i < 4; ++i) { - nuint ret = HUF_DecompressFastArgs_init( - &args, - dst, - dstSize, - cSrc, - cSrcSize, - DTable - ); + BIT_DStream_t bit; + if (segmentSize <= (nuint)(oend - segmentEnd)) + segmentEnd += segmentSize; + else + segmentEnd = oend; { - nuint err_code = ret; + nuint err_code = HUF_initRemainingDStream(&bit, &args, i, segmentEnd); if (ERR_isError(err_code)) { return err_code; } } - if (ret == 0) - return 0; - } - - assert(args.ip.e0 >= args.ilowest); - ((delegate* managed)loopFn)(&args); - assert(args.ip.e0 >= ilowest); - assert(args.ip.e1 >= ilowest); - assert(args.ip.e2 >= ilowest); - assert(args.ip.e3 >= ilowest); - assert(args.op.e3 <= oend); - assert(ilowest == args.ilowest); - assert(ilowest + 6 == args.iend.e0); - { - nuint segmentSize = (dstSize + 3) / 4; - byte* segmentEnd = (byte*)dst; - int i; - for (i = 0; i < 4; ++i) - { - BIT_DStream_t bit; - if (segmentSize <= (nuint)(oend - segmentEnd)) - segmentEnd += segmentSize; - else - segmentEnd = oend; - { - nuint err_code = HUF_initRemainingDStream(&bit, &args, i, segmentEnd); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - (&args.op.e0)[i] += HUF_decodeStreamX2( - (&args.op.e0)[i], - &bit, - segmentEnd, - (HUF_DEltX2*)dt, - 11 + (&args.op.e0)[i] += HUF_decodeStreamX2( + (&args.op.e0)[i], + &bit, + segmentEnd, + (HUF_DEltX2*)dt, + 11 + ); + if ((&args.op.e0)[i] != segmentEnd) + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) ); - if ((&args.op.e0)[i] != segmentEnd) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); - } } - - return dstSize; } - private static nuint HUF_decompress4X2_usingDTable_internal( - void* dst, - nuint dstSize, - void* cSrc, - nuint cSrcSize, - uint* DTable, - int flags - ) - { - void* fallbackFn = (delegate* managed)( - &HUF_decompress4X2_usingDTable_internal_default - ); - void* loopFn = (delegate* managed)( - &HUF_decompress4X2_usingDTable_internal_fast_c_loop - ); - if ((flags & (int)HUF_flags_e.HUF_flags_disableFast) == 0) - { - nuint ret = HUF_decompress4X2_usingDTable_internal_fast( - dst, - dstSize, - cSrc, - cSrcSize, - DTable, - loopFn - ); - if (ret != 0) - return ret; - } - - return ((delegate* managed)fallbackFn)( - dst, - dstSize, - cSrc, - cSrcSize, - DTable - ); - } + return dstSize; + } - private static nuint HUF_decompress1X2_usingDTable_internal( - void* dst, - nuint dstSize, - void* cSrc, - nuint cSrcSize, - uint* DTable, - int flags - ) + private static nuint HUF_decompress4X2_usingDTable_internal( + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + uint* DTable, + int flags + ) + { + void* fallbackFn = (delegate* managed)( + &HUF_decompress4X2_usingDTable_internal_default + ); + void* loopFn = (delegate* managed)( + &HUF_decompress4X2_usingDTable_internal_fast_c_loop + ); + if ((flags & (int)HUF_flags_e.HUF_flags_disableFast) == 0) { - return HUF_decompress1X2_usingDTable_internal_body( + nuint ret = HUF_decompress4X2_usingDTable_internal_fast( dst, dstSize, cSrc, cSrcSize, - DTable + DTable, + loopFn ); + if (ret != 0) + return ret; } - private static nuint HUF_decompress1X2_DCtx_wksp( - uint* DCtx, - void* dst, - nuint dstSize, - void* cSrc, - nuint cSrcSize, - void* workSpace, - nuint wkspSize, - int flags - ) - { - byte* ip = (byte*)cSrc; - nuint hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize, flags); - if (ERR_isError(hSize)) - return hSize; - if (hSize >= cSrcSize) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); - ip += hSize; - cSrcSize -= hSize; - return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, flags); - } + return ((delegate* managed)fallbackFn)( + dst, + dstSize, + cSrc, + cSrcSize, + DTable + ); + } - private static nuint HUF_decompress4X2_DCtx_wksp( - uint* dctx, - void* dst, - nuint dstSize, - void* cSrc, - nuint cSrcSize, - void* workSpace, - nuint wkspSize, - int flags - ) - { - byte* ip = (byte*)cSrc; - nuint hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize, flags); - if (ERR_isError(hSize)) - return hSize; - if (hSize >= cSrcSize) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); - ip += hSize; - cSrcSize -= hSize; - return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags); - } + private static nuint HUF_decompress1X2_usingDTable_internal( + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + uint* DTable, + int flags + ) + { + return HUF_decompress1X2_usingDTable_internal_body( + dst, + dstSize, + cSrc, + cSrcSize, + DTable + ); + } + + private static nuint HUF_decompress1X2_DCtx_wksp( + uint* DCtx, + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + void* workSpace, + nuint wkspSize, + int flags + ) + { + byte* ip = (byte*)cSrc; + nuint hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize, flags); + if (ERR_isError(hSize)) + return hSize; + if (hSize >= cSrcSize) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + ip += hSize; + cSrcSize -= hSize; + return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, flags); + } - private static readonly algo_time_t[][] algoTime = new algo_time_t[16][] + private static nuint HUF_decompress4X2_DCtx_wksp( + uint* dctx, + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + void* workSpace, + nuint wkspSize, + int flags + ) + { + byte* ip = (byte*)cSrc; + nuint hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize, flags); + if (ERR_isError(hSize)) + return hSize; + if (hSize >= cSrcSize) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + ip += hSize; + cSrcSize -= hSize; + return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags); + } + + private static readonly algo_time_t[][] algoTime = new algo_time_t[16][] + { + new algo_time_t[2] { - new algo_time_t[2] - { - new algo_time_t(tableTime: 0, decode256Time: 0), - new algo_time_t(tableTime: 1, decode256Time: 1), - }, - new algo_time_t[2] - { - new algo_time_t(tableTime: 0, decode256Time: 0), - new algo_time_t(tableTime: 1, decode256Time: 1), - }, - new algo_time_t[2] - { - new algo_time_t(tableTime: 150, decode256Time: 216), - new algo_time_t(tableTime: 381, decode256Time: 119), - }, - new algo_time_t[2] - { - new algo_time_t(tableTime: 170, decode256Time: 205), - new algo_time_t(tableTime: 514, decode256Time: 112), - }, - new algo_time_t[2] - { - new algo_time_t(tableTime: 177, decode256Time: 199), - new algo_time_t(tableTime: 539, decode256Time: 110), - }, - new algo_time_t[2] - { - new algo_time_t(tableTime: 197, decode256Time: 194), - new algo_time_t(tableTime: 644, decode256Time: 107), - }, - new algo_time_t[2] - { - new algo_time_t(tableTime: 221, decode256Time: 192), - new algo_time_t(tableTime: 735, decode256Time: 107), - }, - new algo_time_t[2] - { - new algo_time_t(tableTime: 256, decode256Time: 189), - new algo_time_t(tableTime: 881, decode256Time: 106), - }, - new algo_time_t[2] - { - new algo_time_t(tableTime: 359, decode256Time: 188), - new algo_time_t(tableTime: 1167, decode256Time: 109), - }, - new algo_time_t[2] - { - new algo_time_t(tableTime: 582, decode256Time: 187), - new algo_time_t(tableTime: 1570, decode256Time: 114), - }, - new algo_time_t[2] - { - new algo_time_t(tableTime: 688, decode256Time: 187), - new algo_time_t(tableTime: 1712, decode256Time: 122), - }, - new algo_time_t[2] - { - new algo_time_t(tableTime: 825, decode256Time: 186), - new algo_time_t(tableTime: 1965, decode256Time: 136), - }, - new algo_time_t[2] - { - new algo_time_t(tableTime: 976, decode256Time: 185), - new algo_time_t(tableTime: 2131, decode256Time: 150), - }, - new algo_time_t[2] - { - new algo_time_t(tableTime: 1180, decode256Time: 186), - new algo_time_t(tableTime: 2070, decode256Time: 175), - }, - new algo_time_t[2] - { - new algo_time_t(tableTime: 1377, decode256Time: 185), - new algo_time_t(tableTime: 1731, decode256Time: 202), - }, - new algo_time_t[2] - { - new algo_time_t(tableTime: 1412, decode256Time: 185), - new algo_time_t(tableTime: 1695, decode256Time: 202), - }, - }; - - /** HUF_selectDecoder() : - * Tells which decoder is likely to decode faster, - * based on a set of pre-computed metrics. - * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 . - * Assumption : 0 < dstSize <= 128 KB */ - private static uint HUF_selectDecoder(nuint dstSize, nuint cSrcSize) + new algo_time_t(tableTime: 0, decode256Time: 0), + new algo_time_t(tableTime: 1, decode256Time: 1), + }, + new algo_time_t[2] { - assert(dstSize > 0); - assert(dstSize <= 128 * 1024); - { - /* Q < 16 */ - uint Q = cSrcSize >= dstSize ? 15 : (uint)(cSrcSize * 16 / dstSize); - uint D256 = (uint)(dstSize >> 8); - uint DTime0 = algoTime[Q][0].tableTime + algoTime[Q][0].decode256Time * D256; - uint DTime1 = algoTime[Q][1].tableTime + algoTime[Q][1].decode256Time * D256; - DTime1 += DTime1 >> 5; - return DTime1 < DTime0 ? 1U : 0U; - } + new algo_time_t(tableTime: 0, decode256Time: 0), + new algo_time_t(tableTime: 1, decode256Time: 1), + }, + new algo_time_t[2] + { + new algo_time_t(tableTime: 150, decode256Time: 216), + new algo_time_t(tableTime: 381, decode256Time: 119), + }, + new algo_time_t[2] + { + new algo_time_t(tableTime: 170, decode256Time: 205), + new algo_time_t(tableTime: 514, decode256Time: 112), + }, + new algo_time_t[2] + { + new algo_time_t(tableTime: 177, decode256Time: 199), + new algo_time_t(tableTime: 539, decode256Time: 110), + }, + new algo_time_t[2] + { + new algo_time_t(tableTime: 197, decode256Time: 194), + new algo_time_t(tableTime: 644, decode256Time: 107), + }, + new algo_time_t[2] + { + new algo_time_t(tableTime: 221, decode256Time: 192), + new algo_time_t(tableTime: 735, decode256Time: 107), + }, + new algo_time_t[2] + { + new algo_time_t(tableTime: 256, decode256Time: 189), + new algo_time_t(tableTime: 881, decode256Time: 106), + }, + new algo_time_t[2] + { + new algo_time_t(tableTime: 359, decode256Time: 188), + new algo_time_t(tableTime: 1167, decode256Time: 109), + }, + new algo_time_t[2] + { + new algo_time_t(tableTime: 582, decode256Time: 187), + new algo_time_t(tableTime: 1570, decode256Time: 114), + }, + new algo_time_t[2] + { + new algo_time_t(tableTime: 688, decode256Time: 187), + new algo_time_t(tableTime: 1712, decode256Time: 122), + }, + new algo_time_t[2] + { + new algo_time_t(tableTime: 825, decode256Time: 186), + new algo_time_t(tableTime: 1965, decode256Time: 136), + }, + new algo_time_t[2] + { + new algo_time_t(tableTime: 976, decode256Time: 185), + new algo_time_t(tableTime: 2131, decode256Time: 150), + }, + new algo_time_t[2] + { + new algo_time_t(tableTime: 1180, decode256Time: 186), + new algo_time_t(tableTime: 2070, decode256Time: 175), + }, + new algo_time_t[2] + { + new algo_time_t(tableTime: 1377, decode256Time: 185), + new algo_time_t(tableTime: 1731, decode256Time: 202), + }, + new algo_time_t[2] + { + new algo_time_t(tableTime: 1412, decode256Time: 185), + new algo_time_t(tableTime: 1695, decode256Time: 202), + }, + }; + + /** HUF_selectDecoder() : + * Tells which decoder is likely to decode faster, + * based on a set of pre-computed metrics. + * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 . + * Assumption : 0 < dstSize <= 128 KB */ + private static uint HUF_selectDecoder(nuint dstSize, nuint cSrcSize) + { + assert(dstSize > 0); + assert(dstSize <= 128 * 1024); + { + /* Q < 16 */ + uint Q = cSrcSize >= dstSize ? 15 : (uint)(cSrcSize * 16 / dstSize); + uint D256 = (uint)(dstSize >> 8); + uint DTime0 = algoTime[Q][0].tableTime + algoTime[Q][0].decode256Time * D256; + uint DTime1 = algoTime[Q][1].tableTime + algoTime[Q][1].decode256Time * D256; + DTime1 += DTime1 >> 5; + return DTime1 < DTime0 ? 1U : 0U; } + } - private static nuint HUF_decompress1X_DCtx_wksp( - uint* dctx, - void* dst, - nuint dstSize, - void* cSrc, - nuint cSrcSize, - void* workSpace, - nuint wkspSize, - int flags - ) + private static nuint HUF_decompress1X_DCtx_wksp( + uint* dctx, + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + void* workSpace, + nuint wkspSize, + int flags + ) + { + if (dstSize == 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + if (cSrcSize > dstSize) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + if (cSrcSize == dstSize) { - if (dstSize == 0) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - if (cSrcSize > dstSize) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - if (cSrcSize == dstSize) - { - memcpy(dst, cSrc, (uint)dstSize); - return dstSize; - } - - if (cSrcSize == 1) - { - memset(dst, *(byte*)cSrc, (uint)dstSize); - return dstSize; - } + memcpy(dst, cSrc, (uint)dstSize); + return dstSize; + } - { - uint algoNb = HUF_selectDecoder(dstSize, cSrcSize); - return algoNb != 0 - ? HUF_decompress1X2_DCtx_wksp( - dctx, - dst, - dstSize, - cSrc, - cSrcSize, - workSpace, - wkspSize, - flags - ) - : HUF_decompress1X1_DCtx_wksp( - dctx, - dst, - dstSize, - cSrc, - cSrcSize, - workSpace, - wkspSize, - flags - ); - } + if (cSrcSize == 1) + { + memset(dst, *(byte*)cSrc, (uint)dstSize); + return dstSize; } - /* BMI2 variants. - * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0. - */ - private static nuint HUF_decompress1X_usingDTable( - void* dst, - nuint maxDstSize, - void* cSrc, - nuint cSrcSize, - uint* DTable, - int flags - ) { - DTableDesc dtd = HUF_getDTableDesc(DTable); - return dtd.tableType != 0 - ? HUF_decompress1X2_usingDTable_internal( + uint algoNb = HUF_selectDecoder(dstSize, cSrcSize); + return algoNb != 0 + ? HUF_decompress1X2_DCtx_wksp( + dctx, dst, - maxDstSize, + dstSize, cSrc, cSrcSize, - DTable, + workSpace, + wkspSize, flags ) - : HUF_decompress1X1_usingDTable_internal( + : HUF_decompress1X1_DCtx_wksp( + dctx, dst, - maxDstSize, + dstSize, cSrc, cSrcSize, - DTable, + workSpace, + wkspSize, flags ); } + } - private static nuint HUF_decompress1X1_DCtx_wksp( - uint* dctx, - void* dst, - nuint dstSize, - void* cSrc, - nuint cSrcSize, - void* workSpace, - nuint wkspSize, - int flags - ) - { - byte* ip = (byte*)cSrc; - nuint hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize, flags); - if (ERR_isError(hSize)) - return hSize; - if (hSize >= cSrcSize) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); - ip += hSize; - cSrcSize -= hSize; - return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags); - } + /* BMI2 variants. + * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0. + */ + private static nuint HUF_decompress1X_usingDTable( + void* dst, + nuint maxDstSize, + void* cSrc, + nuint cSrcSize, + uint* DTable, + int flags + ) + { + DTableDesc dtd = HUF_getDTableDesc(DTable); + return dtd.tableType != 0 + ? HUF_decompress1X2_usingDTable_internal( + dst, + maxDstSize, + cSrc, + cSrcSize, + DTable, + flags + ) + : HUF_decompress1X1_usingDTable_internal( + dst, + maxDstSize, + cSrc, + cSrcSize, + DTable, + flags + ); + } + + private static nuint HUF_decompress1X1_DCtx_wksp( + uint* dctx, + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + void* workSpace, + nuint wkspSize, + int flags + ) + { + byte* ip = (byte*)cSrc; + nuint hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize, flags); + if (ERR_isError(hSize)) + return hSize; + if (hSize >= cSrcSize) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + ip += hSize; + cSrcSize -= hSize; + return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags); + } + + private static nuint HUF_decompress4X_usingDTable( + void* dst, + nuint maxDstSize, + void* cSrc, + nuint cSrcSize, + uint* DTable, + int flags + ) + { + DTableDesc dtd = HUF_getDTableDesc(DTable); + return dtd.tableType != 0 + ? HUF_decompress4X2_usingDTable_internal( + dst, + maxDstSize, + cSrc, + cSrcSize, + DTable, + flags + ) + : HUF_decompress4X1_usingDTable_internal( + dst, + maxDstSize, + cSrc, + cSrcSize, + DTable, + flags + ); + } - private static nuint HUF_decompress4X_usingDTable( - void* dst, - nuint maxDstSize, - void* cSrc, - nuint cSrcSize, - uint* DTable, - int flags - ) + private static nuint HUF_decompress4X_hufOnly_wksp( + uint* dctx, + void* dst, + nuint dstSize, + void* cSrc, + nuint cSrcSize, + void* workSpace, + nuint wkspSize, + int flags + ) + { + if (dstSize == 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + if (cSrcSize == 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); { - DTableDesc dtd = HUF_getDTableDesc(DTable); - return dtd.tableType != 0 - ? HUF_decompress4X2_usingDTable_internal( + uint algoNb = HUF_selectDecoder(dstSize, cSrcSize); + return algoNb != 0 + ? HUF_decompress4X2_DCtx_wksp( + dctx, dst, - maxDstSize, + dstSize, cSrc, cSrcSize, - DTable, + workSpace, + wkspSize, flags ) - : HUF_decompress4X1_usingDTable_internal( + : HUF_decompress4X1_DCtx_wksp( + dctx, dst, - maxDstSize, + dstSize, cSrc, cSrcSize, - DTable, + workSpace, + wkspSize, flags ); } - - private static nuint HUF_decompress4X_hufOnly_wksp( - uint* dctx, - void* dst, - nuint dstSize, - void* cSrc, - nuint cSrcSize, - void* workSpace, - nuint wkspSize, - int flags - ) - { - if (dstSize == 0) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - if (cSrcSize == 0) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - { - uint algoNb = HUF_selectDecoder(dstSize, cSrcSize); - return algoNb != 0 - ? HUF_decompress4X2_DCtx_wksp( - dctx, - dst, - dstSize, - cSrc, - cSrcSize, - workSpace, - wkspSize, - flags - ) - : HUF_decompress4X1_DCtx_wksp( - dctx, - dst, - dstSize, - cSrc, - cSrcSize, - workSpace, - wkspSize, - flags - ); - } - } } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Mem.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Mem.cs index 284a043a3..aa18dfc2e 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Mem.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Mem.cs @@ -6,158 +6,157 @@ // ReSharper disable InconsistentNaming // ReSharper disable IdentifierTypo -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods + /*-************************************************************** + * Memory I/O API + *****************************************************************/ + /*=== Static platform detection ===*/ + private static bool MEM_32bits { - /*-************************************************************** - * Memory I/O API - *****************************************************************/ - /*=== Static platform detection ===*/ - private static bool MEM_32bits - { - [MethodImpl(MethodImplOptions.AggressiveInlining)] - get => sizeof(nint) == 4; - } - - private static bool MEM_64bits - { - [MethodImpl(MethodImplOptions.AggressiveInlining)] - get => sizeof(nint) == 8; - } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - /* default method, safe and standard. - can sometimes prove slower */ - private static ushort MEM_read16(void* memPtr) => BclUnsafe.ReadUnaligned(memPtr); + get => sizeof(nint) == 4; + } + private static bool MEM_64bits + { [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint MEM_read32(void* memPtr) => BclUnsafe.ReadUnaligned(memPtr); + get => sizeof(nint) == 8; + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static ulong MEM_read64(void* memPtr) => BclUnsafe.ReadUnaligned(memPtr); + [MethodImpl(MethodImplOptions.AggressiveInlining)] + /* default method, safe and standard. + can sometimes prove slower */ + private static ushort MEM_read16(void* memPtr) => BclUnsafe.ReadUnaligned(memPtr); - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint MEM_readST(void* memPtr) => BclUnsafe.ReadUnaligned(memPtr); + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint MEM_read32(void* memPtr) => BclUnsafe.ReadUnaligned(memPtr); - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void MEM_write16(void* memPtr, ushort value) => - BclUnsafe.WriteUnaligned(memPtr, value); + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ulong MEM_read64(void* memPtr) => BclUnsafe.ReadUnaligned(memPtr); - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void MEM_write64(void* memPtr, ulong value) => - BclUnsafe.WriteUnaligned(memPtr, value); + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint MEM_readST(void* memPtr) => BclUnsafe.ReadUnaligned(memPtr); - /*=== Little endian r/w ===*/ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static ushort MEM_readLE16(void* memPtr) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void MEM_write16(void* memPtr, ushort value) => + BclUnsafe.WriteUnaligned(memPtr, value); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void MEM_write64(void* memPtr, ulong value) => + BclUnsafe.WriteUnaligned(memPtr, value); + + /*=== Little endian r/w ===*/ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ushort MEM_readLE16(void* memPtr) + { + var val = BclUnsafe.ReadUnaligned(memPtr); + if (!BitConverter.IsLittleEndian) { - var val = BclUnsafe.ReadUnaligned(memPtr); - if (!BitConverter.IsLittleEndian) - { - val = BinaryPrimitives.ReverseEndianness(val); - } - return val; + val = BinaryPrimitives.ReverseEndianness(val); } + return val; + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void MEM_writeLE16(void* memPtr, ushort val) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void MEM_writeLE16(void* memPtr, ushort val) + { + if (!BitConverter.IsLittleEndian) { - if (!BitConverter.IsLittleEndian) - { - val = BinaryPrimitives.ReverseEndianness(val); - } - BclUnsafe.WriteUnaligned(memPtr, val); + val = BinaryPrimitives.ReverseEndianness(val); } + BclUnsafe.WriteUnaligned(memPtr, val); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint MEM_readLE24(void* memPtr) => - (uint)(MEM_readLE16(memPtr) + (((byte*)memPtr)[2] << 16)); + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint MEM_readLE24(void* memPtr) => + (uint)(MEM_readLE16(memPtr) + (((byte*)memPtr)[2] << 16)); - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void MEM_writeLE24(void* memPtr, uint val) - { - MEM_writeLE16(memPtr, (ushort)val); - ((byte*)memPtr)[2] = (byte)(val >> 16); - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void MEM_writeLE24(void* memPtr, uint val) + { + MEM_writeLE16(memPtr, (ushort)val); + ((byte*)memPtr)[2] = (byte)(val >> 16); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint MEM_readLE32(void* memPtr) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint MEM_readLE32(void* memPtr) + { + var val = BclUnsafe.ReadUnaligned(memPtr); + if (!BitConverter.IsLittleEndian) { - var val = BclUnsafe.ReadUnaligned(memPtr); - if (!BitConverter.IsLittleEndian) - { - val = BinaryPrimitives.ReverseEndianness(val); - } - return val; + val = BinaryPrimitives.ReverseEndianness(val); } + return val; + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void MEM_writeLE32(void* memPtr, uint val32) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void MEM_writeLE32(void* memPtr, uint val32) + { + if (!BitConverter.IsLittleEndian) { - if (!BitConverter.IsLittleEndian) - { - val32 = BinaryPrimitives.ReverseEndianness(val32); - } - BclUnsafe.WriteUnaligned(memPtr, val32); + val32 = BinaryPrimitives.ReverseEndianness(val32); } + BclUnsafe.WriteUnaligned(memPtr, val32); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static ulong MEM_readLE64(void* memPtr) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ulong MEM_readLE64(void* memPtr) + { + var val = BclUnsafe.ReadUnaligned(memPtr); + if (!BitConverter.IsLittleEndian) { - var val = BclUnsafe.ReadUnaligned(memPtr); - if (!BitConverter.IsLittleEndian) - { - val = BinaryPrimitives.ReverseEndianness(val); - } - return val; + val = BinaryPrimitives.ReverseEndianness(val); } + return val; + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void MEM_writeLE64(void* memPtr, ulong val64) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void MEM_writeLE64(void* memPtr, ulong val64) + { + if (!BitConverter.IsLittleEndian) { - if (!BitConverter.IsLittleEndian) - { - val64 = BinaryPrimitives.ReverseEndianness(val64); - } - BclUnsafe.WriteUnaligned(memPtr, val64); + val64 = BinaryPrimitives.ReverseEndianness(val64); } + BclUnsafe.WriteUnaligned(memPtr, val64); + } #if !NET8_0_OR_GREATER - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ReverseEndiannessNative(nuint val) => - MEM_32bits - ? BinaryPrimitives.ReverseEndianness((uint)val) - : (nuint)BinaryPrimitives.ReverseEndianness(val); + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ReverseEndiannessNative(nuint val) => + MEM_32bits + ? BinaryPrimitives.ReverseEndianness((uint)val) + : (nuint)BinaryPrimitives.ReverseEndianness(val); #endif - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint MEM_readLEST(void* memPtr) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint MEM_readLEST(void* memPtr) + { + var val = BclUnsafe.ReadUnaligned(memPtr); + if (!BitConverter.IsLittleEndian) { - var val = BclUnsafe.ReadUnaligned(memPtr); - if (!BitConverter.IsLittleEndian) - { #if NET8_0_OR_GREATER val = BinaryPrimitives.ReverseEndianness(val); #else - val = ReverseEndiannessNative(val); + val = ReverseEndiannessNative(val); #endif - } - return val; } + return val; + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void MEM_writeLEST(void* memPtr, nuint val) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void MEM_writeLEST(void* memPtr, nuint val) + { + if (!BitConverter.IsLittleEndian) { - if (!BitConverter.IsLittleEndian) - { #if NET8_0_OR_GREATER val = BinaryPrimitives.ReverseEndianness(val); #else - val = ReverseEndiannessNative(val); + val = ReverseEndiannessNative(val); #endif - } - BclUnsafe.WriteUnaligned(memPtr, val); } + BclUnsafe.WriteUnaligned(memPtr, val); } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Pool.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Pool.cs new file mode 100644 index 000000000..a7a4e5fec --- /dev/null +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Pool.cs @@ -0,0 +1,122 @@ +using SharpCompress.Compressors.ZStandard.Unsafe; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; + +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods +{ + private static JobThreadPool GetThreadPool(void* ctx) => + UnmanagedObject.Unwrap(ctx); + + /* ZSTD_createThreadPool() : public access point */ + public static void* ZSTD_createThreadPool(nuint numThreads) + { + return POOL_create(numThreads, 0); + } + + /*! POOL_create() : + * Create a thread pool with at most `numThreads` threads. + * `numThreads` must be at least 1. + * The maximum number of queued jobs before blocking is `queueSize`. + * @return : POOL_ctx pointer on success, else NULL. + */ + private static void* POOL_create(nuint numThreads, nuint queueSize) + { + return POOL_create_advanced(numThreads, queueSize, Unsafe.Methods.ZSTD_defaultCMem); + } + + private static void* POOL_create_advanced( + nuint numThreads, + nuint queueSize, + ZSTD_customMem customMem + ) + { + var jobThreadPool = new JobThreadPool((int)numThreads, (int)queueSize); + return UnmanagedObject.Wrap(jobThreadPool); + } + + /*! POOL_join() : + Shutdown the queue, wake any sleeping threads, and join all of the threads. + */ + private static void POOL_join(void* ctx) + { + GetThreadPool(ctx).Join(); + } + + /*! POOL_free() : + * Free a thread pool returned by POOL_create(). + */ + private static void POOL_free(void* ctx) + { + if (ctx == null) + { + return; + } + + var jobThreadPool = GetThreadPool(ctx); + jobThreadPool.Join(); + jobThreadPool.Dispose(); + UnmanagedObject.Free(ctx); + } + + /*! POOL_joinJobs() : + * Waits for all queued jobs to finish executing. + */ + private static void POOL_joinJobs(void* ctx) + { + var jobThreadPool = GetThreadPool(ctx); + jobThreadPool.Join(false); + } + + public static void ZSTD_freeThreadPool(void* pool) + { + POOL_free(pool); + } + + /*! POOL_sizeof() : + * @return threadpool memory usage + * note : compatible with NULL (returns 0 in this case) + */ + private static nuint POOL_sizeof(void* ctx) + { + if (ctx == null) + return 0; + var jobThreadPool = GetThreadPool(ctx); + return (nuint)jobThreadPool.Size(); + } + + /* @return : 0 on success, 1 on error */ + private static int POOL_resize(void* ctx, nuint numThreads) + { + if (ctx == null) + return 1; + var jobThreadPool = GetThreadPool(ctx); + jobThreadPool.Resize((int)numThreads); + return 0; + } + + /*! POOL_add() : + * Add the job `function(opaque)` to the thread pool. `ctx` must be valid. + * Possibly blocks until there is room in the queue. + * Note : The function may be executed asynchronously, + * therefore, `opaque` must live until function has been completed. + */ + private static void POOL_add(void* ctx, void* function, void* opaque) + { + assert(ctx != null); + var jobThreadPool = GetThreadPool(ctx); + jobThreadPool.Add(function, opaque); + } + + /*! POOL_tryAdd() : + * Add the job `function(opaque)` to thread pool _if_ a queue slot is available. + * Returns immediately even if not (does not block). + * @return : 1 if successful, 0 if not. + */ + private static int POOL_tryAdd(void* ctx, void* function, void* opaque) + { + assert(ctx != null); + var jobThreadPool = GetThreadPool(ctx); + return jobThreadPool.TryAdd(function, opaque) ? 1 : 0; + } +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/RSyncState_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/RSyncState_t.cs index e82e241ac..90f9dd50c 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/RSyncState_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/RSyncState_t.cs @@ -1,9 +1,8 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public struct RSyncState_t { - public struct RSyncState_t - { - public ulong hash; - public ulong hitMask; - public ulong primePower; - } -} + public ulong hash; + public ulong hitMask; + public ulong primePower; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Range.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Range.cs index 3829e85b9..fabf08b0a 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Range.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Range.cs @@ -1,15 +1,14 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/* ==== Serial State ==== */ +public unsafe struct Range { - /* ==== Serial State ==== */ - public unsafe struct Range - { - public void* start; - public nuint size; + public void* start; + public nuint size; - public Range(void* start, nuint size) - { - this.start = start; - this.size = size; - } + public Range(void* start, nuint size) + { + this.start = start; + this.size = size; } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/RawSeqStore_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/RawSeqStore_t.cs index 15b5b2ca2..b9e5d8a35 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/RawSeqStore_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/RawSeqStore_t.cs @@ -1,36 +1,35 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct RawSeqStore_t { - public unsafe struct RawSeqStore_t - { - /* The start of the sequences */ - public rawSeq* seq; + /* The start of the sequences */ + public rawSeq* seq; - /* The index in seq where reading stopped. pos <= size. */ - public nuint pos; + /* The index in seq where reading stopped. pos <= size. */ + public nuint pos; - /* The position within the sequence at seq[pos] where reading - stopped. posInSequence <= seq[pos].litLength + seq[pos].matchLength */ - public nuint posInSequence; + /* The position within the sequence at seq[pos] where reading + stopped. posInSequence <= seq[pos].litLength + seq[pos].matchLength */ + public nuint posInSequence; - /* The number of sequences. <= capacity. */ - public nuint size; + /* The number of sequences. <= capacity. */ + public nuint size; - /* The capacity starting from `seq` pointer */ - public nuint capacity; + /* The capacity starting from `seq` pointer */ + public nuint capacity; - public RawSeqStore_t( - rawSeq* seq, - nuint pos, - nuint posInSequence, - nuint size, - nuint capacity - ) - { - this.seq = seq; - this.pos = pos; - this.posInSequence = posInSequence; - this.size = size; - this.capacity = capacity; - } + public RawSeqStore_t( + rawSeq* seq, + nuint pos, + nuint posInSequence, + nuint size, + nuint capacity + ) + { + this.seq = seq; + this.pos = pos; + this.posInSequence = posInSequence; + this.size = size; + this.capacity = capacity; } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/RoundBuff_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/RoundBuff_t.cs index 90db6546f..ea0ed8a55 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/RoundBuff_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/RoundBuff_t.cs @@ -1,29 +1,28 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct RoundBuff_t { - public unsafe struct RoundBuff_t - { - /* The round input buffer. All jobs get references - * to pieces of the buffer. ZSTDMT_tryGetInputRange() - * handles handing out job input buffers, and makes - * sure it doesn't overlap with any pieces still in use. - */ - public byte* buffer; + /* The round input buffer. All jobs get references + * to pieces of the buffer. ZSTDMT_tryGetInputRange() + * handles handing out job input buffers, and makes + * sure it doesn't overlap with any pieces still in use. + */ + public byte* buffer; - /* The capacity of buffer. */ - public nuint capacity; + /* The capacity of buffer. */ + public nuint capacity; - /* The position of the current inBuff in the round - * buffer. Updated past the end if the inBuff once - * the inBuff is sent to the worker thread. - * pos <= capacity. - */ - public nuint pos; + /* The position of the current inBuff in the round + * buffer. Updated past the end if the inBuff once + * the inBuff is sent to the worker thread. + * pos <= capacity. + */ + public nuint pos; - public RoundBuff_t(byte* buffer, nuint capacity, nuint pos) - { - this.buffer = buffer; - this.capacity = capacity; - this.pos = pos; - } + public RoundBuff_t(byte* buffer, nuint capacity, nuint pos) + { + this.buffer = buffer; + this.capacity = capacity; + this.pos = pos; } -} +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqCollector.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqCollector.cs index b39d020f7..8026ed92a 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqCollector.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqCollector.cs @@ -1,10 +1,9 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct SeqCollector { - public unsafe struct SeqCollector - { - public int collectSequences; - public ZSTD_Sequence* seqStart; - public nuint seqIndex; - public nuint maxSequences; - } + public int collectSequences; + public ZSTD_Sequence* seqStart; + public nuint seqIndex; + public nuint maxSequences; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqDef_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqDef_s.cs index 20b849dc0..11c4aeba0 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqDef_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqDef_s.cs @@ -1,15 +1,14 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/*********************************************** + * Sequences * + ***********************************************/ +public struct SeqDef_s { - /*********************************************** - * Sequences * - ***********************************************/ - public struct SeqDef_s - { - /* offBase == Offset + ZSTD_REP_NUM, or repcode 1,2,3 */ - public uint offBase; - public ushort litLength; + /* offBase == Offset + ZSTD_REP_NUM, or repcode 1,2,3 */ + public uint offBase; + public ushort litLength; - /* mlBase == matchLength - MINMATCH */ - public ushort mlBase; - } -} + /* mlBase == matchLength - MINMATCH */ + public ushort mlBase; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqStore_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqStore_t.cs index d4d5cb973..6aa751619 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqStore_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqStore_t.cs @@ -1,28 +1,27 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct SeqStore_t { - public unsafe struct SeqStore_t - { - public SeqDef_s* sequencesStart; + public SeqDef_s* sequencesStart; - /* ptr to end of sequences */ - public SeqDef_s* sequences; - public byte* litStart; + /* ptr to end of sequences */ + public SeqDef_s* sequences; + public byte* litStart; - /* ptr to end of literals */ - public byte* lit; - public byte* llCode; - public byte* mlCode; - public byte* ofCode; - public nuint maxNbSeq; - public nuint maxNbLit; + /* ptr to end of literals */ + public byte* lit; + public byte* llCode; + public byte* mlCode; + public byte* ofCode; + public nuint maxNbSeq; + public nuint maxNbLit; - /* longLengthPos and longLengthType to allow us to represent either a single litLength or matchLength - * in the seqStore that has a value larger than U16 (if it exists). To do so, we increment - * the existing value of the litLength or matchLength by 0x10000. - */ - public ZSTD_longLengthType_e longLengthType; + /* longLengthPos and longLengthType to allow us to represent either a single litLength or matchLength + * in the seqStore that has a value larger than U16 (if it exists). To do so, we increment + * the existing value of the litLength or matchLength by 0x10000. + */ + public ZSTD_longLengthType_e longLengthType; - /* Index of the sequence to apply long length modification to */ - public uint longLengthPos; - } + /* Index of the sequence to apply long length modification to */ + public uint longLengthPos; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/SerialState.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/SerialState.cs index c88b175ca..91d403e3a 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/SerialState.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/SerialState.cs @@ -1,24 +1,23 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct SerialState { - public unsafe struct SerialState - { - /* All variables in the struct are protected by mutex. */ - public void* mutex; - public void* cond; - public ZSTD_CCtx_params_s @params; - public ldmState_t ldmState; - public XXH64_state_s xxhState; - public uint nextJobID; + /* All variables in the struct are protected by mutex. */ + public void* mutex; + public void* cond; + public ZSTD_CCtx_params_s @params; + public ldmState_t ldmState; + public XXH64_state_s xxhState; + public uint nextJobID; - /* Protects ldmWindow. - * Must be acquired after the main mutex when acquiring both. - */ - public void* ldmWindowMutex; + /* Protects ldmWindow. + * Must be acquired after the main mutex when acquiring both. + */ + public void* ldmWindowMutex; - /* Signaled when ldmWindow is updated */ - public void* ldmWindowCond; + /* Signaled when ldmWindow is updated */ + public void* ldmWindowCond; - /* A thread-safe copy of ldmState.window */ - public ZSTD_window_t ldmWindow; - } + /* A thread-safe copy of ldmState.window */ + public ZSTD_window_t ldmWindow; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/SymbolEncodingType_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/SymbolEncodingType_e.cs index 3fc6e04c9..06d78d45a 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/SymbolEncodingType_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/SymbolEncodingType_e.cs @@ -1,10 +1,9 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public enum SymbolEncodingType_e { - public enum SymbolEncodingType_e - { - set_basic, - set_rle, - set_compressed, - set_repeat, - } + set_basic, + set_rle, + set_compressed, + set_repeat, } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/SyncPoint.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/SyncPoint.cs index db9ada1dc..fda2ea51e 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/SyncPoint.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/SyncPoint.cs @@ -1,11 +1,10 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public struct SyncPoint { - public struct SyncPoint - { - /* The number of bytes to load from the input. */ - public nuint toLoad; + /* The number of bytes to load from the input. */ + public nuint toLoad; - /* Boolean declaring if we must flush because we found a synchronization point. */ - public int flush; - } + /* Boolean declaring if we must flush because we found a synchronization point. */ + public int flush; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH32_canonical_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH32_canonical_t.cs index c636b30f5..ecb030731 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH32_canonical_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH32_canonical_t.cs @@ -1,11 +1,10 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/*! + * @brief Canonical (big endian) representation of @ref XXH32_hash_t. + */ +public unsafe struct XXH32_canonical_t { - /*! - * @brief Canonical (big endian) representation of @ref XXH32_hash_t. - */ - public unsafe struct XXH32_canonical_t - { - /*!< Hash bytes, big endian */ - public fixed byte digest[4]; - } -} + /*!< Hash bytes, big endian */ + public fixed byte digest[4]; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH32_state_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH32_state_s.cs index 413ae77f4..09559a84a 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH32_state_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH32_state_s.cs @@ -1,35 +1,34 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/*! + * @internal + * @brief Structure for XXH32 streaming API. + * + * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, + * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is + * an opaque type. This allows fields to safely be changed. + * + * Typedef'd to @ref XXH32_state_t. + * Do not access the members of this struct directly. + * @see XXH64_state_s, XXH3_state_s + */ +public unsafe struct XXH32_state_s { - /*! - * @internal - * @brief Structure for XXH32 streaming API. - * - * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, - * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is - * an opaque type. This allows fields to safely be changed. - * - * Typedef'd to @ref XXH32_state_t. - * Do not access the members of this struct directly. - * @see XXH64_state_s, XXH3_state_s - */ - public unsafe struct XXH32_state_s - { - /*!< Total length hashed, modulo 2^32 */ - public uint total_len_32; + /*!< Total length hashed, modulo 2^32 */ + public uint total_len_32; - /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */ - public uint large_len; + /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */ + public uint large_len; - /*!< Accumulator lanes */ - public fixed uint v[4]; + /*!< Accumulator lanes */ + public fixed uint v[4]; - /*!< Internal buffer for partial reads. Treated as unsigned char[16]. */ - public fixed uint mem32[4]; + /*!< Internal buffer for partial reads. Treated as unsigned char[16]. */ + public fixed uint mem32[4]; - /*!< Amount of data in @ref mem32 */ - public uint memsize; + /*!< Amount of data in @ref mem32 */ + public uint memsize; - /*!< Reserved field. Do not read nor write to it. */ - public uint reserved; - } + /*!< Reserved field. Do not read nor write to it. */ + public uint reserved; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH64_canonical_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH64_canonical_t.cs index 1f18d31ab..a50409885 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH64_canonical_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH64_canonical_t.cs @@ -1,10 +1,9 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/*! + * @brief Canonical (big endian) representation of @ref XXH64_hash_t. + */ +public unsafe struct XXH64_canonical_t { - /*! - * @brief Canonical (big endian) representation of @ref XXH64_hash_t. - */ - public unsafe struct XXH64_canonical_t - { - public fixed byte digest[8]; - } + public fixed byte digest[8]; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH64_state_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH64_state_s.cs index 448af8cec..d2503d08c 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH64_state_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH64_state_s.cs @@ -1,35 +1,34 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/*! + * @internal + * @brief Structure for XXH64 streaming API. + * + * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, + * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is + * an opaque type. This allows fields to safely be changed. + * + * Typedef'd to @ref XXH64_state_t. + * Do not access the members of this struct directly. + * @see XXH32_state_s, XXH3_state_s + */ +public unsafe struct XXH64_state_s { - /*! - * @internal - * @brief Structure for XXH64 streaming API. - * - * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, - * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is - * an opaque type. This allows fields to safely be changed. - * - * Typedef'd to @ref XXH64_state_t. - * Do not access the members of this struct directly. - * @see XXH32_state_s, XXH3_state_s - */ - public unsafe struct XXH64_state_s - { - /*!< Total length hashed. This is always 64-bit. */ - public ulong total_len; + /*!< Total length hashed. This is always 64-bit. */ + public ulong total_len; - /*!< Accumulator lanes */ - public fixed ulong v[4]; + /*!< Accumulator lanes */ + public fixed ulong v[4]; - /*!< Internal buffer for partial reads. Treated as unsigned char[32]. */ - public fixed ulong mem64[4]; + /*!< Internal buffer for partial reads. Treated as unsigned char[32]. */ + public fixed ulong mem64[4]; - /*!< Amount of data in @ref mem64 */ - public uint memsize; + /*!< Amount of data in @ref mem64 */ + public uint memsize; - /*!< Reserved field, needed for padding anyways*/ - public uint reserved32; + /*!< Reserved field, needed for padding anyways*/ + public uint reserved32; - /*!< Reserved field. Do not read or write to it. */ - public ulong reserved64; - } + /*!< Reserved field. Do not read or write to it. */ + public ulong reserved64; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH_alignment.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH_alignment.cs index 1a14c9178..57b988099 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH_alignment.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH_alignment.cs @@ -1,15 +1,14 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/*! + * @internal + * @brief Enum to indicate whether a pointer is aligned. + */ +public enum XXH_alignment { - /*! - * @internal - * @brief Enum to indicate whether a pointer is aligned. - */ - public enum XXH_alignment - { - /*!< Aligned */ - XXH_aligned, + /*!< Aligned */ + XXH_aligned, - /*!< Possibly unaligned */ - XXH_unaligned, - } + /*!< Possibly unaligned */ + XXH_unaligned, } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH_errorcode.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH_errorcode.cs index f88ee6364..eb65ceb14 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH_errorcode.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH_errorcode.cs @@ -1,14 +1,13 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/*! + * @brief Exit code for the streaming API. + */ +public enum XXH_errorcode { - /*! - * @brief Exit code for the streaming API. - */ - public enum XXH_errorcode - { - /*!< OK */ - XXH_OK = 0, + /*!< OK */ + XXH_OK = 0, - /*!< Error */ - XXH_ERROR, - } + /*!< Error */ + XXH_ERROR, } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Xxhash.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Xxhash.cs index ee25063c6..396a720dd 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Xxhash.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Xxhash.cs @@ -2,645 +2,644 @@ using System.Buffers.Binary; using System.Numerics; using System.Runtime.CompilerServices; -using static ZstdSharp.UnsafeHelper; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods + /*! + * @internal + * @brief Modify this function to use a different routine than malloc(). + */ + private static void* XXH_malloc(nuint s) { - /*! - * @internal - * @brief Modify this function to use a different routine than malloc(). - */ - private static void* XXH_malloc(nuint s) - { - return malloc(s); - } - - /*! - * @internal - * @brief Modify this function to use a different routine than free(). - */ - private static void XXH_free(void* p) - { - free(p); - } + return malloc(s); + } - /*! - * @internal - * @brief Modify this function to use a different routine than memcpy(). - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void XXH_memcpy(void* dest, void* src, nuint size) - { - memcpy(dest, src, (uint)size); - } + /*! + * @internal + * @brief Modify this function to use a different routine than free(). + */ + private static void XXH_free(void* p) + { + free(p); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint XXH_readLE32(void* ptr) - { - return BitConverter.IsLittleEndian - ? MEM_read32(ptr) - : BinaryPrimitives.ReverseEndianness(MEM_read32(ptr)); - } + /*! + * @internal + * @brief Modify this function to use a different routine than memcpy(). + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void XXH_memcpy(void* dest, void* src, nuint size) + { + memcpy(dest, src, (uint)size); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint XXH_readBE32(void* ptr) - { - return BitConverter.IsLittleEndian - ? BinaryPrimitives.ReverseEndianness(MEM_read32(ptr)) - : MEM_read32(ptr); - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint XXH_readLE32(void* ptr) + { + return BitConverter.IsLittleEndian + ? MEM_read32(ptr) + : BinaryPrimitives.ReverseEndianness(MEM_read32(ptr)); + } - private static uint XXH_readLE32_align(void* ptr, XXH_alignment align) - { - if (align == XXH_alignment.XXH_unaligned) - { - return XXH_readLE32(ptr); - } - else - { - return BitConverter.IsLittleEndian - ? *(uint*)ptr - : BinaryPrimitives.ReverseEndianness(*(uint*)ptr); - } - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint XXH_readBE32(void* ptr) + { + return BitConverter.IsLittleEndian + ? BinaryPrimitives.ReverseEndianness(MEM_read32(ptr)) + : MEM_read32(ptr); + } - /* ************************************* - * Misc - ***************************************/ - /*! @ingroup public */ - private static uint ZSTD_XXH_versionNumber() + private static uint XXH_readLE32_align(void* ptr, XXH_alignment align) + { + if (align == XXH_alignment.XXH_unaligned) { - return 0 * 100 * 100 + 8 * 100 + 2; + return XXH_readLE32(ptr); } - - /*! - * @internal - * @brief Normal stripe processing routine. - * - * This shuffles the bits so that any bit from @p input impacts several bits in - * @p acc. - * - * @param acc The accumulator lane. - * @param input The stripe of input to mix. - * @return The mixed accumulator lane. - */ - private static uint XXH32_round(uint acc, uint input) + else { - acc += input * 0x85EBCA77U; - acc = BitOperations.RotateLeft(acc, 13); - acc *= 0x9E3779B1U; - return acc; + return BitConverter.IsLittleEndian + ? *(uint*)ptr + : BinaryPrimitives.ReverseEndianness(*(uint*)ptr); } + } - /*! - * @internal - * @brief Mixes all bits to finalize the hash. - * - * The final mix ensures that all input bits have a chance to impact any bit in - * the output digest, resulting in an unbiased distribution. - * - * @param hash The hash to avalanche. - * @return The avalanched hash. - */ - private static uint XXH32_avalanche(uint hash) - { - hash ^= hash >> 15; - hash *= 0x85EBCA77U; - hash ^= hash >> 13; - hash *= 0xC2B2AE3DU; - hash ^= hash >> 16; - return hash; - } + /* ************************************* + * Misc + ***************************************/ + /*! @ingroup public */ + private static uint ZSTD_XXH_versionNumber() + { + return 0 * 100 * 100 + 8 * 100 + 2; + } - /*! - * @internal - * @brief Processes the last 0-15 bytes of @p ptr. - * - * There may be up to 15 bytes remaining to consume from the input. - * This final stage will digest them to ensure that all input bytes are present - * in the final mix. - * - * @param hash The hash to finalize. - * @param ptr The pointer to the remaining input. - * @param len The remaining length, modulo 16. - * @param align Whether @p ptr is aligned. - * @return The finalized hash. - * @see XXH64_finalize(). - */ - private static uint XXH32_finalize(uint hash, byte* ptr, nuint len, XXH_alignment align) - { - len &= 15; - while (len >= 4) - { - { - hash += XXH_readLE32_align(ptr, align) * 0xC2B2AE3DU; - ptr += 4; - hash = BitOperations.RotateLeft(hash, 17) * 0x27D4EB2FU; - } + /*! + * @internal + * @brief Normal stripe processing routine. + * + * This shuffles the bits so that any bit from @p input impacts several bits in + * @p acc. + * + * @param acc The accumulator lane. + * @param input The stripe of input to mix. + * @return The mixed accumulator lane. + */ + private static uint XXH32_round(uint acc, uint input) + { + acc += input * 0x85EBCA77U; + acc = BitOperations.RotateLeft(acc, 13); + acc *= 0x9E3779B1U; + return acc; + } - len -= 4; - } + /*! + * @internal + * @brief Mixes all bits to finalize the hash. + * + * The final mix ensures that all input bits have a chance to impact any bit in + * the output digest, resulting in an unbiased distribution. + * + * @param hash The hash to avalanche. + * @return The avalanched hash. + */ + private static uint XXH32_avalanche(uint hash) + { + hash ^= hash >> 15; + hash *= 0x85EBCA77U; + hash ^= hash >> 13; + hash *= 0xC2B2AE3DU; + hash ^= hash >> 16; + return hash; + } - while (len > 0) + /*! + * @internal + * @brief Processes the last 0-15 bytes of @p ptr. + * + * There may be up to 15 bytes remaining to consume from the input. + * This final stage will digest them to ensure that all input bytes are present + * in the final mix. + * + * @param hash The hash to finalize. + * @param ptr The pointer to the remaining input. + * @param len The remaining length, modulo 16. + * @param align Whether @p ptr is aligned. + * @return The finalized hash. + * @see XXH64_finalize(). + */ + private static uint XXH32_finalize(uint hash, byte* ptr, nuint len, XXH_alignment align) + { + len &= 15; + while (len >= 4) + { { - { - hash += *ptr++ * 0x165667B1U; - hash = BitOperations.RotateLeft(hash, 11) * 0x9E3779B1U; - } - - --len; + hash += XXH_readLE32_align(ptr, align) * 0xC2B2AE3DU; + ptr += 4; + hash = BitOperations.RotateLeft(hash, 17) * 0x27D4EB2FU; } - return XXH32_avalanche(hash); + len -= 4; } - /*! - * @internal - * @brief The implementation for @ref XXH32(). - * - * @param input , len , seed Directly passed from @ref XXH32(). - * @param align Whether @p input is aligned. - * @return The calculated hash. - */ - private static uint XXH32_endian_align( - byte* input, - nuint len, - uint seed, - XXH_alignment align - ) + while (len > 0) { - uint h32; - if (len >= 16) { - byte* bEnd = input + len; - byte* limit = bEnd - 15; - uint v1 = seed + 0x9E3779B1U + 0x85EBCA77U; - uint v2 = seed + 0x85EBCA77U; - uint v3 = seed + 0; - uint v4 = seed - 0x9E3779B1U; - do - { - v1 = XXH32_round(v1, XXH_readLE32_align(input, align)); - input += 4; - v2 = XXH32_round(v2, XXH_readLE32_align(input, align)); - input += 4; - v3 = XXH32_round(v3, XXH_readLE32_align(input, align)); - input += 4; - v4 = XXH32_round(v4, XXH_readLE32_align(input, align)); - input += 4; - } while (input < limit); - h32 = - BitOperations.RotateLeft(v1, 1) - + BitOperations.RotateLeft(v2, 7) - + BitOperations.RotateLeft(v3, 12) - + BitOperations.RotateLeft(v4, 18); - } - else - { - h32 = seed + 0x165667B1U; + hash += *ptr++ * 0x165667B1U; + hash = BitOperations.RotateLeft(hash, 11) * 0x9E3779B1U; } - h32 += (uint)len; - return XXH32_finalize(h32, input, len & 15, align); + --len; } - /*! @ingroup XXH32_family */ - private static uint ZSTD_XXH32(void* input, nuint len, uint seed) - { - return XXH32_endian_align((byte*)input, len, seed, XXH_alignment.XXH_unaligned); - } + return XXH32_avalanche(hash); + } - /*! @ingroup XXH32_family */ - private static XXH32_state_s* ZSTD_XXH32_createState() - { - return (XXH32_state_s*)XXH_malloc((nuint)sizeof(XXH32_state_s)); - } + /*! + * @internal + * @brief The implementation for @ref XXH32(). + * + * @param input , len , seed Directly passed from @ref XXH32(). + * @param align Whether @p input is aligned. + * @return The calculated hash. + */ + private static uint XXH32_endian_align( + byte* input, + nuint len, + uint seed, + XXH_alignment align + ) + { + uint h32; + if (len >= 16) + { + byte* bEnd = input + len; + byte* limit = bEnd - 15; + uint v1 = seed + 0x9E3779B1U + 0x85EBCA77U; + uint v2 = seed + 0x85EBCA77U; + uint v3 = seed + 0; + uint v4 = seed - 0x9E3779B1U; + do + { + v1 = XXH32_round(v1, XXH_readLE32_align(input, align)); + input += 4; + v2 = XXH32_round(v2, XXH_readLE32_align(input, align)); + input += 4; + v3 = XXH32_round(v3, XXH_readLE32_align(input, align)); + input += 4; + v4 = XXH32_round(v4, XXH_readLE32_align(input, align)); + input += 4; + } while (input < limit); + h32 = + BitOperations.RotateLeft(v1, 1) + + BitOperations.RotateLeft(v2, 7) + + BitOperations.RotateLeft(v3, 12) + + BitOperations.RotateLeft(v4, 18); + } + else + { + h32 = seed + 0x165667B1U; + } + + h32 += (uint)len; + return XXH32_finalize(h32, input, len & 15, align); + } - /*! @ingroup XXH32_family */ - private static XXH_errorcode ZSTD_XXH32_freeState(XXH32_state_s* statePtr) - { - XXH_free(statePtr); - return XXH_errorcode.XXH_OK; - } + /*! @ingroup XXH32_family */ + private static uint ZSTD_XXH32(void* input, nuint len, uint seed) + { + return XXH32_endian_align((byte*)input, len, seed, XXH_alignment.XXH_unaligned); + } - /*! @ingroup XXH32_family */ - private static void ZSTD_XXH32_copyState(XXH32_state_s* dstState, XXH32_state_s* srcState) - { - XXH_memcpy(dstState, srcState, (nuint)sizeof(XXH32_state_s)); - } + /*! @ingroup XXH32_family */ + private static XXH32_state_s* ZSTD_XXH32_createState() + { + return (XXH32_state_s*)XXH_malloc((nuint)sizeof(XXH32_state_s)); + } + + /*! @ingroup XXH32_family */ + private static XXH_errorcode ZSTD_XXH32_freeState(XXH32_state_s* statePtr) + { + XXH_free(statePtr); + return XXH_errorcode.XXH_OK; + } + + /*! @ingroup XXH32_family */ + private static void ZSTD_XXH32_copyState(XXH32_state_s* dstState, XXH32_state_s* srcState) + { + XXH_memcpy(dstState, srcState, (nuint)sizeof(XXH32_state_s)); + } + + /*! @ingroup XXH32_family */ + private static XXH_errorcode ZSTD_XXH32_reset(XXH32_state_s* statePtr, uint seed) + { + *statePtr = new XXH32_state_s(); + statePtr->v[0] = seed + 0x9E3779B1U + 0x85EBCA77U; + statePtr->v[1] = seed + 0x85EBCA77U; + statePtr->v[2] = seed + 0; + statePtr->v[3] = seed - 0x9E3779B1U; + return XXH_errorcode.XXH_OK; + } - /*! @ingroup XXH32_family */ - private static XXH_errorcode ZSTD_XXH32_reset(XXH32_state_s* statePtr, uint seed) + /*! @ingroup XXH32_family */ + private static XXH_errorcode ZSTD_XXH32_update(XXH32_state_s* state, void* input, nuint len) + { + if (input == null) { - *statePtr = new XXH32_state_s(); - statePtr->v[0] = seed + 0x9E3779B1U + 0x85EBCA77U; - statePtr->v[1] = seed + 0x85EBCA77U; - statePtr->v[2] = seed + 0; - statePtr->v[3] = seed - 0x9E3779B1U; return XXH_errorcode.XXH_OK; } - /*! @ingroup XXH32_family */ - private static XXH_errorcode ZSTD_XXH32_update(XXH32_state_s* state, void* input, nuint len) { - if (input == null) + byte* p = (byte*)input; + byte* bEnd = p + len; + state->total_len_32 += (uint)len; + state->large_len |= len >= 16 || state->total_len_32 >= 16 ? 1U : 0U; + if (state->memsize + len < 16) { + XXH_memcpy((byte*)state->mem32 + state->memsize, input, len); + state->memsize += (uint)len; return XXH_errorcode.XXH_OK; } + if (state->memsize != 0) { - byte* p = (byte*)input; - byte* bEnd = p + len; - state->total_len_32 += (uint)len; - state->large_len |= len >= 16 || state->total_len_32 >= 16 ? 1U : 0U; - if (state->memsize + len < 16) + XXH_memcpy((byte*)state->mem32 + state->memsize, input, 16 - state->memsize); { - XXH_memcpy((byte*)state->mem32 + state->memsize, input, len); - state->memsize += (uint)len; - return XXH_errorcode.XXH_OK; + uint* p32 = state->mem32; + state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p32)); + p32++; + state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p32)); + p32++; + state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p32)); + p32++; + state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p32)); } - if (state->memsize != 0) - { - XXH_memcpy((byte*)state->mem32 + state->memsize, input, 16 - state->memsize); - { - uint* p32 = state->mem32; - state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p32)); - p32++; - state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p32)); - p32++; - state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p32)); - p32++; - state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p32)); - } - - p += 16 - state->memsize; - state->memsize = 0; - } - - if (p <= bEnd - 16) - { - byte* limit = bEnd - 16; - do - { - state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p)); - p += 4; - state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p)); - p += 4; - state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p)); - p += 4; - state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p)); - p += 4; - } while (p <= limit); - } - - if (p < bEnd) - { - XXH_memcpy(state->mem32, p, (nuint)(bEnd - p)); - state->memsize = (uint)(bEnd - p); - } + p += 16 - state->memsize; + state->memsize = 0; } - return XXH_errorcode.XXH_OK; - } - - /*! @ingroup XXH32_family */ - private static uint ZSTD_XXH32_digest(XXH32_state_s* state) - { - uint h32; - if (state->large_len != 0) + if (p <= bEnd - 16) { - h32 = - BitOperations.RotateLeft(state->v[0], 1) - + BitOperations.RotateLeft(state->v[1], 7) - + BitOperations.RotateLeft(state->v[2], 12) - + BitOperations.RotateLeft(state->v[3], 18); + byte* limit = bEnd - 16; + do + { + state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p)); + p += 4; + state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p)); + p += 4; + state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p)); + p += 4; + state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p)); + p += 4; + } while (p <= limit); } - else + + if (p < bEnd) { - h32 = state->v[2] + 0x165667B1U; + XXH_memcpy(state->mem32, p, (nuint)(bEnd - p)); + state->memsize = (uint)(bEnd - p); } - - h32 += state->total_len_32; - return XXH32_finalize( - h32, - (byte*)state->mem32, - state->memsize, - XXH_alignment.XXH_aligned - ); } - /*! @ingroup XXH32_family */ - private static void ZSTD_XXH32_canonicalFromHash(XXH32_canonical_t* dst, uint hash) - { - assert(sizeof(XXH32_canonical_t) == sizeof(uint)); - if (BitConverter.IsLittleEndian) - hash = BinaryPrimitives.ReverseEndianness(hash); - XXH_memcpy(dst, &hash, (nuint)sizeof(XXH32_canonical_t)); - } + return XXH_errorcode.XXH_OK; + } - /*! @ingroup XXH32_family */ - private static uint ZSTD_XXH32_hashFromCanonical(XXH32_canonical_t* src) + /*! @ingroup XXH32_family */ + private static uint ZSTD_XXH32_digest(XXH32_state_s* state) + { + uint h32; + if (state->large_len != 0) { - return XXH_readBE32(src); + h32 = + BitOperations.RotateLeft(state->v[0], 1) + + BitOperations.RotateLeft(state->v[1], 7) + + BitOperations.RotateLeft(state->v[2], 12) + + BitOperations.RotateLeft(state->v[3], 18); } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static ulong XXH_readLE64(void* ptr) + else { - return BitConverter.IsLittleEndian - ? MEM_read64(ptr) - : BinaryPrimitives.ReverseEndianness(MEM_read64(ptr)); + h32 = state->v[2] + 0x165667B1U; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static ulong XXH_readBE64(void* ptr) - { - return BitConverter.IsLittleEndian - ? BinaryPrimitives.ReverseEndianness(MEM_read64(ptr)) - : MEM_read64(ptr); - } + h32 += state->total_len_32; + return XXH32_finalize( + h32, + (byte*)state->mem32, + state->memsize, + XXH_alignment.XXH_aligned + ); + } - private static ulong XXH_readLE64_align(void* ptr, XXH_alignment align) - { - if (align == XXH_alignment.XXH_unaligned) - return XXH_readLE64(ptr); - else - return BitConverter.IsLittleEndian - ? *(ulong*)ptr - : BinaryPrimitives.ReverseEndianness(*(ulong*)ptr); - } + /*! @ingroup XXH32_family */ + private static void ZSTD_XXH32_canonicalFromHash(XXH32_canonical_t* dst, uint hash) + { + assert(sizeof(XXH32_canonical_t) == sizeof(uint)); + if (BitConverter.IsLittleEndian) + hash = BinaryPrimitives.ReverseEndianness(hash); + XXH_memcpy(dst, &hash, (nuint)sizeof(XXH32_canonical_t)); + } - /*! @copydoc XXH32_round */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static ulong XXH64_round(ulong acc, ulong input) - { - acc += input * 0xC2B2AE3D27D4EB4FUL; - acc = BitOperations.RotateLeft(acc, 31); - acc *= 0x9E3779B185EBCA87UL; - return acc; - } + /*! @ingroup XXH32_family */ + private static uint ZSTD_XXH32_hashFromCanonical(XXH32_canonical_t* src) + { + return XXH_readBE32(src); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static ulong XXH64_mergeRound(ulong acc, ulong val) - { - val = XXH64_round(0, val); - acc ^= val; - acc = acc * 0x9E3779B185EBCA87UL + 0x85EBCA77C2B2AE63UL; - return acc; - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ulong XXH_readLE64(void* ptr) + { + return BitConverter.IsLittleEndian + ? MEM_read64(ptr) + : BinaryPrimitives.ReverseEndianness(MEM_read64(ptr)); + } - /*! @copydoc XXH32_avalanche */ - private static ulong XXH64_avalanche(ulong hash) - { - hash ^= hash >> 33; - hash *= 0xC2B2AE3D27D4EB4FUL; - hash ^= hash >> 29; - hash *= 0x165667B19E3779F9UL; - hash ^= hash >> 32; - return hash; - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ulong XXH_readBE64(void* ptr) + { + return BitConverter.IsLittleEndian + ? BinaryPrimitives.ReverseEndianness(MEM_read64(ptr)) + : MEM_read64(ptr); + } - /*! - * @internal - * @brief Processes the last 0-31 bytes of @p ptr. - * - * There may be up to 31 bytes remaining to consume from the input. - * This final stage will digest them to ensure that all input bytes are present - * in the final mix. - * - * @param hash The hash to finalize. - * @param ptr The pointer to the remaining input. - * @param len The remaining length, modulo 32. - * @param align Whether @p ptr is aligned. - * @return The finalized hash - * @see XXH32_finalize(). - */ - private static ulong XXH64_finalize(ulong hash, byte* ptr, nuint len, XXH_alignment align) - { - len &= 31; - while (len >= 8) - { - ulong k1 = XXH64_round(0, XXH_readLE64_align(ptr, align)); - ptr += 8; - hash ^= k1; - hash = - BitOperations.RotateLeft(hash, 27) * 0x9E3779B185EBCA87UL - + 0x85EBCA77C2B2AE63UL; - len -= 8; - } + private static ulong XXH_readLE64_align(void* ptr, XXH_alignment align) + { + if (align == XXH_alignment.XXH_unaligned) + return XXH_readLE64(ptr); + else + return BitConverter.IsLittleEndian + ? *(ulong*)ptr + : BinaryPrimitives.ReverseEndianness(*(ulong*)ptr); + } - if (len >= 4) - { - hash ^= XXH_readLE32_align(ptr, align) * 0x9E3779B185EBCA87UL; - ptr += 4; - hash = - BitOperations.RotateLeft(hash, 23) * 0xC2B2AE3D27D4EB4FUL - + 0x165667B19E3779F9UL; - len -= 4; - } + /*! @copydoc XXH32_round */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ulong XXH64_round(ulong acc, ulong input) + { + acc += input * 0xC2B2AE3D27D4EB4FUL; + acc = BitOperations.RotateLeft(acc, 31); + acc *= 0x9E3779B185EBCA87UL; + return acc; + } - while (len > 0) - { - hash ^= *ptr++ * 0x27D4EB2F165667C5UL; - hash = BitOperations.RotateLeft(hash, 11) * 0x9E3779B185EBCA87UL; - --len; - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ulong XXH64_mergeRound(ulong acc, ulong val) + { + val = XXH64_round(0, val); + acc ^= val; + acc = acc * 0x9E3779B185EBCA87UL + 0x85EBCA77C2B2AE63UL; + return acc; + } - return XXH64_avalanche(hash); - } + /*! @copydoc XXH32_avalanche */ + private static ulong XXH64_avalanche(ulong hash) + { + hash ^= hash >> 33; + hash *= 0xC2B2AE3D27D4EB4FUL; + hash ^= hash >> 29; + hash *= 0x165667B19E3779F9UL; + hash ^= hash >> 32; + return hash; + } - /*! - * @internal - * @brief The implementation for @ref XXH64(). - * - * @param input , len , seed Directly passed from @ref XXH64(). - * @param align Whether @p input is aligned. - * @return The calculated hash. - */ - private static ulong XXH64_endian_align( - byte* input, - nuint len, - ulong seed, - XXH_alignment align - ) + /*! + * @internal + * @brief Processes the last 0-31 bytes of @p ptr. + * + * There may be up to 31 bytes remaining to consume from the input. + * This final stage will digest them to ensure that all input bytes are present + * in the final mix. + * + * @param hash The hash to finalize. + * @param ptr The pointer to the remaining input. + * @param len The remaining length, modulo 32. + * @param align Whether @p ptr is aligned. + * @return The finalized hash + * @see XXH32_finalize(). + */ + private static ulong XXH64_finalize(ulong hash, byte* ptr, nuint len, XXH_alignment align) + { + len &= 31; + while (len >= 8) { - ulong h64; - if (len >= 32) - { - byte* bEnd = input + len; - byte* limit = bEnd - 31; - ulong v1 = seed + 0x9E3779B185EBCA87UL + 0xC2B2AE3D27D4EB4FUL; - ulong v2 = seed + 0xC2B2AE3D27D4EB4FUL; - ulong v3 = seed + 0; - ulong v4 = seed - 0x9E3779B185EBCA87UL; - do - { - v1 = XXH64_round(v1, XXH_readLE64_align(input, align)); - input += 8; - v2 = XXH64_round(v2, XXH_readLE64_align(input, align)); - input += 8; - v3 = XXH64_round(v3, XXH_readLE64_align(input, align)); - input += 8; - v4 = XXH64_round(v4, XXH_readLE64_align(input, align)); - input += 8; - } while (input < limit); - h64 = - BitOperations.RotateLeft(v1, 1) - + BitOperations.RotateLeft(v2, 7) - + BitOperations.RotateLeft(v3, 12) - + BitOperations.RotateLeft(v4, 18); - h64 = XXH64_mergeRound(h64, v1); - h64 = XXH64_mergeRound(h64, v2); - h64 = XXH64_mergeRound(h64, v3); - h64 = XXH64_mergeRound(h64, v4); - } - else - { - h64 = seed + 0x27D4EB2F165667C5UL; - } - - h64 += len; - return XXH64_finalize(h64, input, len, align); + ulong k1 = XXH64_round(0, XXH_readLE64_align(ptr, align)); + ptr += 8; + hash ^= k1; + hash = + BitOperations.RotateLeft(hash, 27) * 0x9E3779B185EBCA87UL + + 0x85EBCA77C2B2AE63UL; + len -= 8; } - /*! @ingroup XXH64_family */ - private static ulong ZSTD_XXH64(void* input, nuint len, ulong seed) + if (len >= 4) { - return XXH64_endian_align((byte*)input, len, seed, XXH_alignment.XXH_unaligned); + hash ^= XXH_readLE32_align(ptr, align) * 0x9E3779B185EBCA87UL; + ptr += 4; + hash = + BitOperations.RotateLeft(hash, 23) * 0xC2B2AE3D27D4EB4FUL + + 0x165667B19E3779F9UL; + len -= 4; } - /*! @ingroup XXH64_family*/ - private static XXH64_state_s* ZSTD_XXH64_createState() + while (len > 0) { - return (XXH64_state_s*)XXH_malloc((nuint)sizeof(XXH64_state_s)); + hash ^= *ptr++ * 0x27D4EB2F165667C5UL; + hash = BitOperations.RotateLeft(hash, 11) * 0x9E3779B185EBCA87UL; + --len; } - /*! @ingroup XXH64_family */ - private static XXH_errorcode ZSTD_XXH64_freeState(XXH64_state_s* statePtr) - { - XXH_free(statePtr); - return XXH_errorcode.XXH_OK; - } + return XXH64_avalanche(hash); + } - /*! @ingroup XXH64_family */ - private static void ZSTD_XXH64_copyState(XXH64_state_s* dstState, XXH64_state_s* srcState) - { - XXH_memcpy(dstState, srcState, (nuint)sizeof(XXH64_state_s)); - } + /*! + * @internal + * @brief The implementation for @ref XXH64(). + * + * @param input , len , seed Directly passed from @ref XXH64(). + * @param align Whether @p input is aligned. + * @return The calculated hash. + */ + private static ulong XXH64_endian_align( + byte* input, + nuint len, + ulong seed, + XXH_alignment align + ) + { + ulong h64; + if (len >= 32) + { + byte* bEnd = input + len; + byte* limit = bEnd - 31; + ulong v1 = seed + 0x9E3779B185EBCA87UL + 0xC2B2AE3D27D4EB4FUL; + ulong v2 = seed + 0xC2B2AE3D27D4EB4FUL; + ulong v3 = seed + 0; + ulong v4 = seed - 0x9E3779B185EBCA87UL; + do + { + v1 = XXH64_round(v1, XXH_readLE64_align(input, align)); + input += 8; + v2 = XXH64_round(v2, XXH_readLE64_align(input, align)); + input += 8; + v3 = XXH64_round(v3, XXH_readLE64_align(input, align)); + input += 8; + v4 = XXH64_round(v4, XXH_readLE64_align(input, align)); + input += 8; + } while (input < limit); + h64 = + BitOperations.RotateLeft(v1, 1) + + BitOperations.RotateLeft(v2, 7) + + BitOperations.RotateLeft(v3, 12) + + BitOperations.RotateLeft(v4, 18); + h64 = XXH64_mergeRound(h64, v1); + h64 = XXH64_mergeRound(h64, v2); + h64 = XXH64_mergeRound(h64, v3); + h64 = XXH64_mergeRound(h64, v4); + } + else + { + h64 = seed + 0x27D4EB2F165667C5UL; + } + + h64 += len; + return XXH64_finalize(h64, input, len, align); + } + + /*! @ingroup XXH64_family */ + private static ulong ZSTD_XXH64(void* input, nuint len, ulong seed) + { + return XXH64_endian_align((byte*)input, len, seed, XXH_alignment.XXH_unaligned); + } + + /*! @ingroup XXH64_family*/ + private static XXH64_state_s* ZSTD_XXH64_createState() + { + return (XXH64_state_s*)XXH_malloc((nuint)sizeof(XXH64_state_s)); + } + + /*! @ingroup XXH64_family */ + private static XXH_errorcode ZSTD_XXH64_freeState(XXH64_state_s* statePtr) + { + XXH_free(statePtr); + return XXH_errorcode.XXH_OK; + } - /*! @ingroup XXH64_family */ - private static XXH_errorcode ZSTD_XXH64_reset(XXH64_state_s* statePtr, ulong seed) + /*! @ingroup XXH64_family */ + private static void ZSTD_XXH64_copyState(XXH64_state_s* dstState, XXH64_state_s* srcState) + { + XXH_memcpy(dstState, srcState, (nuint)sizeof(XXH64_state_s)); + } + + /*! @ingroup XXH64_family */ + private static XXH_errorcode ZSTD_XXH64_reset(XXH64_state_s* statePtr, ulong seed) + { + *statePtr = new XXH64_state_s(); + statePtr->v[0] = seed + 0x9E3779B185EBCA87UL + 0xC2B2AE3D27D4EB4FUL; + statePtr->v[1] = seed + 0xC2B2AE3D27D4EB4FUL; + statePtr->v[2] = seed + 0; + statePtr->v[3] = seed - 0x9E3779B185EBCA87UL; + return XXH_errorcode.XXH_OK; + } + + /*! @ingroup XXH64_family */ + private static XXH_errorcode ZSTD_XXH64_update(XXH64_state_s* state, void* input, nuint len) + { + if (input == null) { - *statePtr = new XXH64_state_s(); - statePtr->v[0] = seed + 0x9E3779B185EBCA87UL + 0xC2B2AE3D27D4EB4FUL; - statePtr->v[1] = seed + 0xC2B2AE3D27D4EB4FUL; - statePtr->v[2] = seed + 0; - statePtr->v[3] = seed - 0x9E3779B185EBCA87UL; return XXH_errorcode.XXH_OK; } - /*! @ingroup XXH64_family */ - private static XXH_errorcode ZSTD_XXH64_update(XXH64_state_s* state, void* input, nuint len) { - if (input == null) + byte* p = (byte*)input; + byte* bEnd = p + len; + state->total_len += len; + if (state->memsize + len < 32) { + XXH_memcpy((byte*)state->mem64 + state->memsize, input, len); + state->memsize += (uint)len; return XXH_errorcode.XXH_OK; } + if (state->memsize != 0) { - byte* p = (byte*)input; - byte* bEnd = p + len; - state->total_len += len; - if (state->memsize + len < 32) - { - XXH_memcpy((byte*)state->mem64 + state->memsize, input, len); - state->memsize += (uint)len; - return XXH_errorcode.XXH_OK; - } - - if (state->memsize != 0) - { - XXH_memcpy((byte*)state->mem64 + state->memsize, input, 32 - state->memsize); - state->v[0] = XXH64_round(state->v[0], XXH_readLE64(state->mem64 + 0)); - state->v[1] = XXH64_round(state->v[1], XXH_readLE64(state->mem64 + 1)); - state->v[2] = XXH64_round(state->v[2], XXH_readLE64(state->mem64 + 2)); - state->v[3] = XXH64_round(state->v[3], XXH_readLE64(state->mem64 + 3)); - p += 32 - state->memsize; - state->memsize = 0; - } - - if (p + 32 <= bEnd) - { - byte* limit = bEnd - 32; - do - { - state->v[0] = XXH64_round(state->v[0], XXH_readLE64(p)); - p += 8; - state->v[1] = XXH64_round(state->v[1], XXH_readLE64(p)); - p += 8; - state->v[2] = XXH64_round(state->v[2], XXH_readLE64(p)); - p += 8; - state->v[3] = XXH64_round(state->v[3], XXH_readLE64(p)); - p += 8; - } while (p <= limit); - } - - if (p < bEnd) - { - XXH_memcpy(state->mem64, p, (nuint)(bEnd - p)); - state->memsize = (uint)(bEnd - p); - } + XXH_memcpy((byte*)state->mem64 + state->memsize, input, 32 - state->memsize); + state->v[0] = XXH64_round(state->v[0], XXH_readLE64(state->mem64 + 0)); + state->v[1] = XXH64_round(state->v[1], XXH_readLE64(state->mem64 + 1)); + state->v[2] = XXH64_round(state->v[2], XXH_readLE64(state->mem64 + 2)); + state->v[3] = XXH64_round(state->v[3], XXH_readLE64(state->mem64 + 3)); + p += 32 - state->memsize; + state->memsize = 0; } - return XXH_errorcode.XXH_OK; - } - - /*! @ingroup XXH64_family */ - private static ulong ZSTD_XXH64_digest(XXH64_state_s* state) - { - ulong h64; - if (state->total_len >= 32) + if (p + 32 <= bEnd) { - h64 = - BitOperations.RotateLeft(state->v[0], 1) - + BitOperations.RotateLeft(state->v[1], 7) - + BitOperations.RotateLeft(state->v[2], 12) - + BitOperations.RotateLeft(state->v[3], 18); - h64 = XXH64_mergeRound(h64, state->v[0]); - h64 = XXH64_mergeRound(h64, state->v[1]); - h64 = XXH64_mergeRound(h64, state->v[2]); - h64 = XXH64_mergeRound(h64, state->v[3]); + byte* limit = bEnd - 32; + do + { + state->v[0] = XXH64_round(state->v[0], XXH_readLE64(p)); + p += 8; + state->v[1] = XXH64_round(state->v[1], XXH_readLE64(p)); + p += 8; + state->v[2] = XXH64_round(state->v[2], XXH_readLE64(p)); + p += 8; + state->v[3] = XXH64_round(state->v[3], XXH_readLE64(p)); + p += 8; + } while (p <= limit); } - else + + if (p < bEnd) { - h64 = state->v[2] + 0x27D4EB2F165667C5UL; + XXH_memcpy(state->mem64, p, (nuint)(bEnd - p)); + state->memsize = (uint)(bEnd - p); } - - h64 += state->total_len; - return XXH64_finalize( - h64, - (byte*)state->mem64, - (nuint)state->total_len, - XXH_alignment.XXH_aligned - ); } - /*! @ingroup XXH64_family */ - private static void ZSTD_XXH64_canonicalFromHash(XXH64_canonical_t* dst, ulong hash) - { - assert(sizeof(XXH64_canonical_t) == sizeof(ulong)); - if (BitConverter.IsLittleEndian) - hash = BinaryPrimitives.ReverseEndianness(hash); - XXH_memcpy(dst, &hash, (nuint)sizeof(XXH64_canonical_t)); - } + return XXH_errorcode.XXH_OK; + } - /*! @ingroup XXH64_family */ - private static ulong ZSTD_XXH64_hashFromCanonical(XXH64_canonical_t* src) - { - return XXH_readBE64(src); - } + /*! @ingroup XXH64_family */ + private static ulong ZSTD_XXH64_digest(XXH64_state_s* state) + { + ulong h64; + if (state->total_len >= 32) + { + h64 = + BitOperations.RotateLeft(state->v[0], 1) + + BitOperations.RotateLeft(state->v[1], 7) + + BitOperations.RotateLeft(state->v[2], 12) + + BitOperations.RotateLeft(state->v[3], 18); + h64 = XXH64_mergeRound(h64, state->v[0]); + h64 = XXH64_mergeRound(h64, state->v[1]); + h64 = XXH64_mergeRound(h64, state->v[2]); + h64 = XXH64_mergeRound(h64, state->v[3]); + } + else + { + h64 = state->v[2] + 0x27D4EB2F165667C5UL; + } + + h64 += state->total_len; + return XXH64_finalize( + h64, + (byte*)state->mem64, + (nuint)state->total_len, + XXH_alignment.XXH_aligned + ); + } + + /*! @ingroup XXH64_family */ + private static void ZSTD_XXH64_canonicalFromHash(XXH64_canonical_t* dst, ulong hash) + { + assert(sizeof(XXH64_canonical_t) == sizeof(ulong)); + if (BitConverter.IsLittleEndian) + hash = BinaryPrimitives.ReverseEndianness(hash); + XXH_memcpy(dst, &hash, (nuint)sizeof(XXH64_canonical_t)); + } + + /*! @ingroup XXH64_family */ + private static ulong ZSTD_XXH64_hashFromCanonical(XXH64_canonical_t* src) + { + return XXH_readBE64(src); } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_cover_params_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_cover_params_t.cs index 9913870c6..39811dfd4 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_cover_params_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_cover_params_t.cs @@ -1,31 +1,30 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/*! ZDICT_cover_params_t: + * k and d are the only required parameters. + * For others, value 0 means default. + */ +public struct ZDICT_cover_params_t { - /*! ZDICT_cover_params_t: - * k and d are the only required parameters. - * For others, value 0 means default. - */ - public struct ZDICT_cover_params_t - { - /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */ - public uint k; + /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */ + public uint k; - /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */ - public uint d; + /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */ + public uint d; - /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */ - public uint steps; + /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */ + public uint steps; - /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */ - public uint nbThreads; + /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */ + public uint nbThreads; - /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (1.0), 1.0 when all samples are used for both training and testing */ - public double splitPoint; + /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (1.0), 1.0 when all samples are used for both training and testing */ + public double splitPoint; - /* Train dictionaries to shrink in size starting from the minimum size and selects the smallest dictionary that is shrinkDictMaxRegression% worse than the largest dictionary. 0 means no shrinking and 1 means shrinking */ - public uint shrinkDict; + /* Train dictionaries to shrink in size starting from the minimum size and selects the smallest dictionary that is shrinkDictMaxRegression% worse than the largest dictionary. 0 means no shrinking and 1 means shrinking */ + public uint shrinkDict; - /* Sets shrinkDictMaxRegression so that a smaller dictionary can be at worse shrinkDictMaxRegression% worse than the max dict size dictionary. */ - public uint shrinkDictMaxRegression; - public ZDICT_params_t zParams; - } -} + /* Sets shrinkDictMaxRegression so that a smaller dictionary can be at worse shrinkDictMaxRegression% worse than the max dict size dictionary. */ + public uint shrinkDictMaxRegression; + public ZDICT_params_t zParams; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_fastCover_params_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_fastCover_params_t.cs index 8fa765d8f..a5af135db 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_fastCover_params_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_fastCover_params_t.cs @@ -1,33 +1,32 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public struct ZDICT_fastCover_params_t { - public struct ZDICT_fastCover_params_t - { - /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */ - public uint k; + /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */ + public uint k; - /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */ - public uint d; + /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */ + public uint d; - /* log of size of frequency array : constraint: 0 < f <= 31 : 1 means default(20)*/ - public uint f; + /* log of size of frequency array : constraint: 0 < f <= 31 : 1 means default(20)*/ + public uint f; - /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */ - public uint steps; + /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */ + public uint steps; - /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */ - public uint nbThreads; + /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */ + public uint nbThreads; - /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (0.75), 1.0 when all samples are used for both training and testing */ - public double splitPoint; + /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (0.75), 1.0 when all samples are used for both training and testing */ + public double splitPoint; - /* Acceleration level: constraint: 0 < accel <= 10, higher means faster and less accurate, 0 means default(1) */ - public uint accel; + /* Acceleration level: constraint: 0 < accel <= 10, higher means faster and less accurate, 0 means default(1) */ + public uint accel; - /* Train dictionaries to shrink in size starting from the minimum size and selects the smallest dictionary that is shrinkDictMaxRegression% worse than the largest dictionary. 0 means no shrinking and 1 means shrinking */ - public uint shrinkDict; + /* Train dictionaries to shrink in size starting from the minimum size and selects the smallest dictionary that is shrinkDictMaxRegression% worse than the largest dictionary. 0 means no shrinking and 1 means shrinking */ + public uint shrinkDict; - /* Sets shrinkDictMaxRegression so that a smaller dictionary can be at worse shrinkDictMaxRegression% worse than the max dict size dictionary. */ - public uint shrinkDictMaxRegression; - public ZDICT_params_t zParams; - } + /* Sets shrinkDictMaxRegression so that a smaller dictionary can be at worse shrinkDictMaxRegression% worse than the max dict size dictionary. */ + public uint shrinkDictMaxRegression; + public ZDICT_params_t zParams; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_legacy_params_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_legacy_params_t.cs index 001db2b90..fd061e592 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_legacy_params_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_legacy_params_t.cs @@ -1,9 +1,8 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public struct ZDICT_legacy_params_t { - public struct ZDICT_legacy_params_t - { - /* 0 means default; larger => select more => larger dictionary */ - public uint selectivityLevel; - public ZDICT_params_t zParams; - } + /* 0 means default; larger => select more => larger dictionary */ + public uint selectivityLevel; + public ZDICT_params_t zParams; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_params_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_params_t.cs index 6609d6289..6c3fe7588 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_params_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_params_t.cs @@ -1,21 +1,20 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public struct ZDICT_params_t { - public struct ZDICT_params_t - { - /**< optimize for a specific zstd compression level; 0 means default */ - public int compressionLevel; + /**< optimize for a specific zstd compression level; 0 means default */ + public int compressionLevel; - /**< Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */ - public uint notificationLevel; + /**< Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */ + public uint notificationLevel; - /**< force dictID value; 0 means auto mode (32-bits random value) - * NOTE: The zstd format reserves some dictionary IDs for future use. - * You may use them in private settings, but be warned that they - * may be used by zstd in a public dictionary registry in the future. - * These dictionary IDs are: - * - low range : <= 32767 - * - high range : >= (2^31) - */ - public uint dictID; - } + /**< force dictID value; 0 means auto mode (32-bits random value) + * NOTE: The zstd format reserves some dictionary IDs for future use. + * You may use them in private settings, but be warned that they + * may be used by zstd in a public dictionary registry in the future. + * These dictionary IDs are: + * - low range : <= 32767 + * - high range : >= (2^31) + */ + public uint dictID; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_CCtxPool.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_CCtxPool.cs index 3694a0d78..a9c7fccc4 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_CCtxPool.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_CCtxPool.cs @@ -1,13 +1,12 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/* ===== CCtx Pool ===== */ +/* a single CCtx Pool can be invoked from multiple threads in parallel */ +public unsafe struct ZSTDMT_CCtxPool { - /* ===== CCtx Pool ===== */ - /* a single CCtx Pool can be invoked from multiple threads in parallel */ - public unsafe struct ZSTDMT_CCtxPool - { - public void* poolMutex; - public int totalCCtx; - public int availCCtx; - public ZSTD_customMem cMem; - public ZSTD_CCtx_s** cctxs; - } + public void* poolMutex; + public int totalCCtx; + public int availCCtx; + public ZSTD_customMem cMem; + public ZSTD_CCtx_s** cctxs; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_CCtx_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_CCtx_s.cs index 3daf397fe..3e4e2cfe7 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_CCtx_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_CCtx_s.cs @@ -1,33 +1,32 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct ZSTDMT_CCtx_s { - public unsafe struct ZSTDMT_CCtx_s - { - public void* factory; - public ZSTDMT_jobDescription* jobs; - public ZSTDMT_bufferPool_s* bufPool; - public ZSTDMT_CCtxPool* cctxPool; - public ZSTDMT_bufferPool_s* seqPool; - public ZSTD_CCtx_params_s @params; - public nuint targetSectionSize; - public nuint targetPrefixSize; + public void* factory; + public ZSTDMT_jobDescription* jobs; + public ZSTDMT_bufferPool_s* bufPool; + public ZSTDMT_CCtxPool* cctxPool; + public ZSTDMT_bufferPool_s* seqPool; + public ZSTD_CCtx_params_s @params; + public nuint targetSectionSize; + public nuint targetPrefixSize; - /* 1 => one job is already prepared, but pool has shortage of workers. Don't create a new job. */ - public int jobReady; - public InBuff_t inBuff; - public RoundBuff_t roundBuff; - public SerialState serial; - public RSyncState_t rsync; - public uint jobIDMask; - public uint doneJobID; - public uint nextJobID; - public uint frameEnded; - public uint allJobsCompleted; - public ulong frameContentSize; - public ulong consumed; - public ulong produced; - public ZSTD_customMem cMem; - public ZSTD_CDict_s* cdictLocal; - public ZSTD_CDict_s* cdict; - public uint providedFactory; - } + /* 1 => one job is already prepared, but pool has shortage of workers. Don't create a new job. */ + public int jobReady; + public InBuff_t inBuff; + public RoundBuff_t roundBuff; + public SerialState serial; + public RSyncState_t rsync; + public uint jobIDMask; + public uint doneJobID; + public uint nextJobID; + public uint frameEnded; + public uint allJobsCompleted; + public ulong frameContentSize; + public ulong consumed; + public ulong produced; + public ZSTD_customMem cMem; + public ZSTD_CDict_s* cdictLocal; + public ZSTD_CDict_s* cdict; + public uint providedFactory; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_bufferPool_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_bufferPool_s.cs index 79d0aceb0..1b9d33b46 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_bufferPool_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_bufferPool_s.cs @@ -1,12 +1,11 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct ZSTDMT_bufferPool_s { - public unsafe struct ZSTDMT_bufferPool_s - { - public void* poolMutex; - public nuint bufferSize; - public uint totalBuffers; - public uint nbBuffers; - public ZSTD_customMem cMem; - public buffer_s* buffers; - } -} + public void* poolMutex; + public nuint bufferSize; + public uint totalBuffers; + public uint nbBuffers; + public ZSTD_customMem cMem; + public buffer_s* buffers; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_jobDescription.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_jobDescription.cs index f138a4e90..cb2c18f63 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_jobDescription.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_jobDescription.cs @@ -1,62 +1,61 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct ZSTDMT_jobDescription { - public unsafe struct ZSTDMT_jobDescription - { - /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx */ - public nuint consumed; + /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx */ + public nuint consumed; - /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx, then set0 by mtctx */ - public nuint cSize; + /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx, then set0 by mtctx */ + public nuint cSize; - /* Thread-safe - used by mtctx and worker */ - public void* job_mutex; + /* Thread-safe - used by mtctx and worker */ + public void* job_mutex; - /* Thread-safe - used by mtctx and worker */ - public void* job_cond; + /* Thread-safe - used by mtctx and worker */ + public void* job_cond; - /* Thread-safe - used by mtctx and (all) workers */ - public ZSTDMT_CCtxPool* cctxPool; + /* Thread-safe - used by mtctx and (all) workers */ + public ZSTDMT_CCtxPool* cctxPool; - /* Thread-safe - used by mtctx and (all) workers */ - public ZSTDMT_bufferPool_s* bufPool; + /* Thread-safe - used by mtctx and (all) workers */ + public ZSTDMT_bufferPool_s* bufPool; - /* Thread-safe - used by mtctx and (all) workers */ - public ZSTDMT_bufferPool_s* seqPool; + /* Thread-safe - used by mtctx and (all) workers */ + public ZSTDMT_bufferPool_s* seqPool; - /* Thread-safe - used by mtctx and (all) workers */ - public SerialState* serial; + /* Thread-safe - used by mtctx and (all) workers */ + public SerialState* serial; - /* set by worker (or mtctx), then read by worker & mtctx, then modified by mtctx => no barrier */ - public buffer_s dstBuff; + /* set by worker (or mtctx), then read by worker & mtctx, then modified by mtctx => no barrier */ + public buffer_s dstBuff; - /* set by mtctx, then read by worker & mtctx => no barrier */ - public Range prefix; + /* set by mtctx, then read by worker & mtctx => no barrier */ + public Range prefix; - /* set by mtctx, then read by worker & mtctx => no barrier */ - public Range src; + /* set by mtctx, then read by worker & mtctx => no barrier */ + public Range src; - /* set by mtctx, then read by worker => no barrier */ - public uint jobID; + /* set by mtctx, then read by worker => no barrier */ + public uint jobID; - /* set by mtctx, then read by worker => no barrier */ - public uint firstJob; + /* set by mtctx, then read by worker => no barrier */ + public uint firstJob; - /* set by mtctx, then read by worker => no barrier */ - public uint lastJob; + /* set by mtctx, then read by worker => no barrier */ + public uint lastJob; - /* set by mtctx, then read by worker => no barrier */ - public ZSTD_CCtx_params_s @params; + /* set by mtctx, then read by worker => no barrier */ + public ZSTD_CCtx_params_s @params; - /* set by mtctx, then read by worker => no barrier */ - public ZSTD_CDict_s* cdict; + /* set by mtctx, then read by worker => no barrier */ + public ZSTD_CDict_s* cdict; - /* set by mtctx, then read by worker => no barrier */ - public ulong fullFrameSize; + /* set by mtctx, then read by worker => no barrier */ + public ulong fullFrameSize; - /* used only by mtctx */ - public nuint dstFlushed; + /* used only by mtctx */ + public nuint dstFlushed; - /* used only by mtctx */ - public uint frameChecksumNeeded; - } + /* used only by mtctx */ + public uint frameChecksumNeeded; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BlockCompressor_f.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BlockCompressor_f.cs index f65e81c70..08a44808a 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BlockCompressor_f.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BlockCompressor_f.cs @@ -1,13 +1,12 @@ using System.Runtime.InteropServices; -namespace ZstdSharp.Unsafe -{ - [UnmanagedFunctionPointer(CallingConvention.Cdecl)] - public unsafe delegate nuint ZSTD_BlockCompressor_f( - ZSTD_MatchState_t* bs, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ); -} +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +[UnmanagedFunctionPointer(CallingConvention.Cdecl)] +public unsafe delegate nuint ZSTD_BlockCompressor_f( + ZSTD_MatchState_t* bs, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize +); diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BuildCTableWksp.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BuildCTableWksp.cs index 1020f6958..afdeff1d8 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BuildCTableWksp.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BuildCTableWksp.cs @@ -1,8 +1,7 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct ZSTD_BuildCTableWksp { - public unsafe struct ZSTD_BuildCTableWksp - { - public fixed short norm[53]; - public fixed uint wksp[285]; - } + public fixed short norm[53]; + public fixed uint wksp[285]; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BuildSeqStore_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BuildSeqStore_e.cs index 3f0b7d177..5378fc7e4 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BuildSeqStore_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_BuildSeqStore_e.cs @@ -1,8 +1,7 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public enum ZSTD_BuildSeqStore_e { - public enum ZSTD_BuildSeqStore_e - { - ZSTDbss_compress, - ZSTDbss_noCompress, - } + ZSTDbss_compress, + ZSTDbss_noCompress, } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_CCtx_params_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_CCtx_params_s.cs index 8281cc693..e7a45dbe0 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_CCtx_params_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_CCtx_params_s.cs @@ -1,87 +1,86 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct ZSTD_CCtx_params_s { - public unsafe struct ZSTD_CCtx_params_s - { - public ZSTD_format_e format; - public ZSTD_compressionParameters cParams; - public ZSTD_frameParameters fParams; - public int compressionLevel; - - /* force back-references to respect limit of - * 1< 0: - * If litLength != 0: - * rep == 1 --> offset == repeat_offset_1 - * rep == 2 --> offset == repeat_offset_2 - * rep == 3 --> offset == repeat_offset_3 - * If litLength == 0: - * rep == 1 --> offset == repeat_offset_2 - * rep == 2 --> offset == repeat_offset_3 - * rep == 3 --> offset == repeat_offset_1 - 1 - * - * Note: This field is optional. ZSTD_generateSequences() will calculate the value of - * 'rep', but repeat offsets do not necessarily need to be calculated from an external - * sequence provider perspective. For example, ZSTD_compressSequences() does not - * use this 'rep' field at all (as of now). - */ - public uint rep; - } + /* Represents which repeat offset is represented by the field 'offset'. + * Ranges from [0, 3]. + * + * Repeat offsets are essentially previous offsets from previous sequences sorted in + * recency order. For more detail, see doc/zstd_compression_format.md + * + * If rep == 0, then 'offset' does not contain a repeat offset. + * If rep > 0: + * If litLength != 0: + * rep == 1 --> offset == repeat_offset_1 + * rep == 2 --> offset == repeat_offset_2 + * rep == 3 --> offset == repeat_offset_3 + * If litLength == 0: + * rep == 1 --> offset == repeat_offset_2 + * rep == 2 --> offset == repeat_offset_3 + * rep == 3 --> offset == repeat_offset_1 - 1 + * + * Note: This field is optional. ZSTD_generateSequences() will calculate the value of + * 'rep', but repeat offsets do not necessarily need to be calculated from an external + * sequence provider perspective. For example, ZSTD_compressSequences() does not + * use this 'rep' field at all (as of now). + */ + public uint rep; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_SequenceLength.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_SequenceLength.cs index 6fa4dc761..80e67fc9b 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_SequenceLength.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_SequenceLength.cs @@ -1,8 +1,7 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public struct ZSTD_SequenceLength { - public struct ZSTD_SequenceLength - { - public uint litLength; - public uint matchLength; - } + public uint litLength; + public uint matchLength; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_SequencePosition.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_SequencePosition.cs index ef70ab74c..3b2d2d6f9 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_SequencePosition.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_SequencePosition.cs @@ -1,14 +1,13 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public struct ZSTD_SequencePosition { - public struct ZSTD_SequencePosition - { - /* Index in array of ZSTD_Sequence */ - public uint idx; + /* Index in array of ZSTD_Sequence */ + public uint idx; - /* Position within sequence at idx */ - public uint posInSequence; + /* Position within sequence at idx */ + public uint posInSequence; - /* Number of bytes given by sequences provided so far */ - public nuint posInSrc; - } -} + /* Number of bytes given by sequences provided so far */ + public nuint posInSrc; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_blockSplitCtx.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_blockSplitCtx.cs index 9364e2b3f..b83e4e1f0 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_blockSplitCtx.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_blockSplitCtx.cs @@ -1,13 +1,12 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct ZSTD_blockSplitCtx { - public unsafe struct ZSTD_blockSplitCtx - { - public SeqStore_t fullSeqStoreChunk; - public SeqStore_t firstHalfSeqStore; - public SeqStore_t secondHalfSeqStore; - public SeqStore_t currSeqStore; - public SeqStore_t nextSeqStore; - public fixed uint partitions[196]; - public ZSTD_entropyCTablesMetadata_t entropyMetadata; - } + public SeqStore_t fullSeqStoreChunk; + public SeqStore_t firstHalfSeqStore; + public SeqStore_t secondHalfSeqStore; + public SeqStore_t currSeqStore; + public SeqStore_t nextSeqStore; + public fixed uint partitions[196]; + public ZSTD_entropyCTablesMetadata_t entropyMetadata; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_blockState_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_blockState_t.cs index b630708ed..5dd067a1f 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_blockState_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_blockState_t.cs @@ -1,9 +1,8 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct ZSTD_blockState_t { - public unsafe struct ZSTD_blockState_t - { - public ZSTD_compressedBlockState_t* prevCBlock; - public ZSTD_compressedBlockState_t* nextCBlock; - public ZSTD_MatchState_t matchState; - } + public ZSTD_compressedBlockState_t* prevCBlock; + public ZSTD_compressedBlockState_t* nextCBlock; + public ZSTD_MatchState_t matchState; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_bounds.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_bounds.cs index 32fe2db8a..d82a2932f 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_bounds.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_bounds.cs @@ -1,9 +1,8 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public struct ZSTD_bounds { - public struct ZSTD_bounds - { - public nuint error; - public int lowerBound; - public int upperBound; - } + public nuint error; + public int lowerBound; + public int upperBound; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_bufferMode_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_bufferMode_e.cs index bbde458cb..6ae23f314 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_bufferMode_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_bufferMode_e.cs @@ -1,12 +1,11 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/* Controls whether the input/output buffer is buffered or stable. */ +public enum ZSTD_bufferMode_e { - /* Controls whether the input/output buffer is buffered or stable. */ - public enum ZSTD_bufferMode_e - { - /* Buffer the input/output */ - ZSTD_bm_buffered = 0, + /* Buffer the input/output */ + ZSTD_bm_buffered = 0, - /* ZSTD_inBuffer/ZSTD_outBuffer is stable */ - ZSTD_bm_stable = 1, - } + /* ZSTD_inBuffer/ZSTD_outBuffer is stable */ + ZSTD_bm_stable = 1, } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_buffered_policy_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_buffered_policy_e.cs index 387d11965..3ecc07f5b 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_buffered_policy_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_buffered_policy_e.cs @@ -1,13 +1,12 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/** + * Indicates whether this compression proceeds directly from user-provided + * source buffer to user-provided destination buffer (ZSTDb_not_buffered), or + * whether the context needs to buffer the input/output (ZSTDb_buffered). + */ +public enum ZSTD_buffered_policy_e { - /** - * Indicates whether this compression proceeds directly from user-provided - * source buffer to user-provided destination buffer (ZSTDb_not_buffered), or - * whether the context needs to buffer the input/output (ZSTDb_buffered). - */ - public enum ZSTD_buffered_policy_e - { - ZSTDb_not_buffered, - ZSTDb_buffered, - } -} + ZSTDb_not_buffered, + ZSTDb_buffered, +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cParameter.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cParameter.cs index c05b92fa1..cfe1f38a9 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cParameter.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cParameter.cs @@ -1,219 +1,218 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public enum ZSTD_cParameter { - public enum ZSTD_cParameter - { - /* Set compression parameters according to pre-defined cLevel table. - * Note that exact compression parameters are dynamically determined, - * depending on both compression level and srcSize (when known). - * Default level is ZSTD_CLEVEL_DEFAULT==3. - * Special: value 0 means default, which is controlled by ZSTD_CLEVEL_DEFAULT. - * Note 1 : it's possible to pass a negative compression level. - * Note 2 : setting a level does not automatically set all other compression parameters - * to default. Setting this will however eventually dynamically impact the compression - * parameters which have not been manually set. The manually set - * ones will 'stick'. */ - ZSTD_c_compressionLevel = 100, - - /* Maximum allowed back-reference distance, expressed as power of 2. - * This will set a memory budget for streaming decompression, - * with larger values requiring more memory - * and typically compressing more. - * Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX. - * Special: value 0 means "use default windowLog". - * Note: Using a windowLog greater than ZSTD_WINDOWLOG_LIMIT_DEFAULT - * requires explicitly allowing such size at streaming decompression stage. */ - ZSTD_c_windowLog = 101, - - /* Size of the initial probe table, as a power of 2. - * Resulting memory usage is (1 << (hashLog+2)). - * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX. - * Larger tables improve compression ratio of strategies <= dFast, - * and improve speed of strategies > dFast. - * Special: value 0 means "use default hashLog". */ - ZSTD_c_hashLog = 102, - - /* Size of the multi-probe search table, as a power of 2. - * Resulting memory usage is (1 << (chainLog+2)). - * Must be clamped between ZSTD_CHAINLOG_MIN and ZSTD_CHAINLOG_MAX. - * Larger tables result in better and slower compression. - * This parameter is useless for "fast" strategy. - * It's still useful when using "dfast" strategy, - * in which case it defines a secondary probe table. - * Special: value 0 means "use default chainLog". */ - ZSTD_c_chainLog = 103, - - /* Number of search attempts, as a power of 2. - * More attempts result in better and slower compression. - * This parameter is useless for "fast" and "dFast" strategies. - * Special: value 0 means "use default searchLog". */ - ZSTD_c_searchLog = 104, - - /* Minimum size of searched matches. - * Note that Zstandard can still find matches of smaller size, - * it just tweaks its search algorithm to look for this size and larger. - * Larger values increase compression and decompression speed, but decrease ratio. - * Must be clamped between ZSTD_MINMATCH_MIN and ZSTD_MINMATCH_MAX. - * Note that currently, for all strategies < btopt, effective minimum is 4. - * , for all strategies > fast, effective maximum is 6. - * Special: value 0 means "use default minMatchLength". */ - ZSTD_c_minMatch = 105, - - /* Impact of this field depends on strategy. - * For strategies btopt, btultra & btultra2: - * Length of Match considered "good enough" to stop search. - * Larger values make compression stronger, and slower. - * For strategy fast: - * Distance between match sampling. - * Larger values make compression faster, and weaker. - * Special: value 0 means "use default targetLength". */ - ZSTD_c_targetLength = 106, - - /* See ZSTD_strategy enum definition. - * The higher the value of selected strategy, the more complex it is, - * resulting in stronger and slower compression. - * Special: value 0 means "use default strategy". */ - ZSTD_c_strategy = 107, - - /* v1.5.6+ - * Attempts to fit compressed block size into approximately targetCBlockSize. - * Bound by ZSTD_TARGETCBLOCKSIZE_MIN and ZSTD_TARGETCBLOCKSIZE_MAX. - * Note that it's not a guarantee, just a convergence target (default:0). - * No target when targetCBlockSize == 0. - * This is helpful in low bandwidth streaming environments to improve end-to-end latency, - * when a client can make use of partial documents (a prominent example being Chrome). - * Note: this parameter is stable since v1.5.6. - * It was present as an experimental parameter in earlier versions, - * but it's not recommended using it with earlier library versions - * due to massive performance regressions. - */ - ZSTD_c_targetCBlockSize = 130, - - /* Enable long distance matching. - * This parameter is designed to improve compression ratio - * for large inputs, by finding large matches at long distance. - * It increases memory usage and window size. - * Note: enabling this parameter increases default ZSTD_c_windowLog to 128 MB - * except when expressly set to a different value. - * Note: will be enabled by default if ZSTD_c_windowLog >= 128 MB and - * compression strategy >= ZSTD_btopt (== compression level 16+) */ - ZSTD_c_enableLongDistanceMatching = 160, - - /* Size of the table for long distance matching, as a power of 2. - * Larger values increase memory usage and compression ratio, - * but decrease compression speed. - * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX - * default: windowlog - 7. - * Special: value 0 means "automatically determine hashlog". */ - ZSTD_c_ldmHashLog = 161, - - /* Minimum match size for long distance matcher. - * Larger/too small values usually decrease compression ratio. - * Must be clamped between ZSTD_LDM_MINMATCH_MIN and ZSTD_LDM_MINMATCH_MAX. - * Special: value 0 means "use default value" (default: 64). */ - ZSTD_c_ldmMinMatch = 162, - - /* Log size of each bucket in the LDM hash table for collision resolution. - * Larger values improve collision resolution but decrease compression speed. - * The maximum value is ZSTD_LDM_BUCKETSIZELOG_MAX. - * Special: value 0 means "use default value" (default: 3). */ - ZSTD_c_ldmBucketSizeLog = 163, - - /* Frequency of inserting/looking up entries into the LDM hash table. - * Must be clamped between 0 and (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN). - * Default is MAX(0, (windowLog - ldmHashLog)), optimizing hash table usage. - * Larger values improve compression speed. - * Deviating far from default value will likely result in a compression ratio decrease. - * Special: value 0 means "automatically determine hashRateLog". */ - ZSTD_c_ldmHashRateLog = 164, - - /* Content size will be written into frame header _whenever known_ (default:1) - * Content size must be known at the beginning of compression. - * This is automatically the case when using ZSTD_compress2(), - * For streaming scenarios, content size must be provided with ZSTD_CCtx_setPledgedSrcSize() */ - ZSTD_c_contentSizeFlag = 200, - - /* A 32-bits checksum of content is written at end of frame (default:0) */ - ZSTD_c_checksumFlag = 201, - - /* When applicable, dictionary's ID is written into frame header (default:1) */ - ZSTD_c_dictIDFlag = 202, - - /* Select how many threads will be spawned to compress in parallel. - * When nbWorkers >= 1, triggers asynchronous mode when invoking ZSTD_compressStream*() : - * ZSTD_compressStream*() consumes input and flush output if possible, but immediately gives back control to caller, - * while compression is performed in parallel, within worker thread(s). - * (note : a strong exception to this rule is when first invocation of ZSTD_compressStream2() sets ZSTD_e_end : - * in which case, ZSTD_compressStream2() delegates to ZSTD_compress2(), which is always a blocking call). - * More workers improve speed, but also increase memory usage. - * Default value is `0`, aka "single-threaded mode" : no worker is spawned, - * compression is performed inside Caller's thread, and all invocations are blocking */ - ZSTD_c_nbWorkers = 400, - - /* Size of a compression job. This value is enforced only when nbWorkers >= 1. - * Each compression job is completed in parallel, so this value can indirectly impact the nb of active threads. - * 0 means default, which is dynamically determined based on compression parameters. - * Job size must be a minimum of overlap size, or ZSTDMT_JOBSIZE_MIN (= 512 KB), whichever is largest. - * The minimum size is automatically and transparently enforced. */ - ZSTD_c_jobSize = 401, - - /* Control the overlap size, as a fraction of window size. - * The overlap size is an amount of data reloaded from previous job at the beginning of a new job. - * It helps preserve compression ratio, while each job is compressed in parallel. - * This value is enforced only when nbWorkers >= 1. - * Larger values increase compression ratio, but decrease speed. - * Possible values range from 0 to 9 : - * - 0 means "default" : value will be determined by the library, depending on strategy - * - 1 means "no overlap" - * - 9 means "full overlap", using a full window size. - * Each intermediate rank increases/decreases load size by a factor 2 : - * 9: full window; 8: w/2; 7: w/4; 6: w/8; 5:w/16; 4: w/32; 3:w/64; 2:w/128; 1:no overlap; 0:default - * default value varies between 6 and 9, depending on strategy */ - ZSTD_c_overlapLog = 402, - - /* note : additional experimental parameters are also available - * within the experimental section of the API. - * At the time of this writing, they include : - * ZSTD_c_rsyncable - * ZSTD_c_format - * ZSTD_c_forceMaxWindow - * ZSTD_c_forceAttachDict - * ZSTD_c_literalCompressionMode - * ZSTD_c_srcSizeHint - * ZSTD_c_enableDedicatedDictSearch - * ZSTD_c_stableInBuffer - * ZSTD_c_stableOutBuffer - * ZSTD_c_blockDelimiters - * ZSTD_c_validateSequences - * ZSTD_c_blockSplitterLevel - * ZSTD_c_splitAfterSequences - * ZSTD_c_useRowMatchFinder - * ZSTD_c_prefetchCDictTables - * ZSTD_c_enableSeqProducerFallback - * ZSTD_c_maxBlockSize - * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them. - * note : never ever use experimentalParam? names directly; - * also, the enums values themselves are unstable and can still change. - */ - ZSTD_c_experimentalParam1 = 500, - ZSTD_c_experimentalParam2 = 10, - ZSTD_c_experimentalParam3 = 1000, - ZSTD_c_experimentalParam4 = 1001, - ZSTD_c_experimentalParam5 = 1002, - - /* was ZSTD_c_experimentalParam6=1003; is now ZSTD_c_targetCBlockSize */ - ZSTD_c_experimentalParam7 = 1004, - ZSTD_c_experimentalParam8 = 1005, - ZSTD_c_experimentalParam9 = 1006, - ZSTD_c_experimentalParam10 = 1007, - ZSTD_c_experimentalParam11 = 1008, - ZSTD_c_experimentalParam12 = 1009, - ZSTD_c_experimentalParam13 = 1010, - ZSTD_c_experimentalParam14 = 1011, - ZSTD_c_experimentalParam15 = 1012, - ZSTD_c_experimentalParam16 = 1013, - ZSTD_c_experimentalParam17 = 1014, - ZSTD_c_experimentalParam18 = 1015, - ZSTD_c_experimentalParam19 = 1016, - ZSTD_c_experimentalParam20 = 1017, - } + /* Set compression parameters according to pre-defined cLevel table. + * Note that exact compression parameters are dynamically determined, + * depending on both compression level and srcSize (when known). + * Default level is ZSTD_CLEVEL_DEFAULT==3. + * Special: value 0 means default, which is controlled by ZSTD_CLEVEL_DEFAULT. + * Note 1 : it's possible to pass a negative compression level. + * Note 2 : setting a level does not automatically set all other compression parameters + * to default. Setting this will however eventually dynamically impact the compression + * parameters which have not been manually set. The manually set + * ones will 'stick'. */ + ZSTD_c_compressionLevel = 100, + + /* Maximum allowed back-reference distance, expressed as power of 2. + * This will set a memory budget for streaming decompression, + * with larger values requiring more memory + * and typically compressing more. + * Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX. + * Special: value 0 means "use default windowLog". + * Note: Using a windowLog greater than ZSTD_WINDOWLOG_LIMIT_DEFAULT + * requires explicitly allowing such size at streaming decompression stage. */ + ZSTD_c_windowLog = 101, + + /* Size of the initial probe table, as a power of 2. + * Resulting memory usage is (1 << (hashLog+2)). + * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX. + * Larger tables improve compression ratio of strategies <= dFast, + * and improve speed of strategies > dFast. + * Special: value 0 means "use default hashLog". */ + ZSTD_c_hashLog = 102, + + /* Size of the multi-probe search table, as a power of 2. + * Resulting memory usage is (1 << (chainLog+2)). + * Must be clamped between ZSTD_CHAINLOG_MIN and ZSTD_CHAINLOG_MAX. + * Larger tables result in better and slower compression. + * This parameter is useless for "fast" strategy. + * It's still useful when using "dfast" strategy, + * in which case it defines a secondary probe table. + * Special: value 0 means "use default chainLog". */ + ZSTD_c_chainLog = 103, + + /* Number of search attempts, as a power of 2. + * More attempts result in better and slower compression. + * This parameter is useless for "fast" and "dFast" strategies. + * Special: value 0 means "use default searchLog". */ + ZSTD_c_searchLog = 104, + + /* Minimum size of searched matches. + * Note that Zstandard can still find matches of smaller size, + * it just tweaks its search algorithm to look for this size and larger. + * Larger values increase compression and decompression speed, but decrease ratio. + * Must be clamped between ZSTD_MINMATCH_MIN and ZSTD_MINMATCH_MAX. + * Note that currently, for all strategies < btopt, effective minimum is 4. + * , for all strategies > fast, effective maximum is 6. + * Special: value 0 means "use default minMatchLength". */ + ZSTD_c_minMatch = 105, + + /* Impact of this field depends on strategy. + * For strategies btopt, btultra & btultra2: + * Length of Match considered "good enough" to stop search. + * Larger values make compression stronger, and slower. + * For strategy fast: + * Distance between match sampling. + * Larger values make compression faster, and weaker. + * Special: value 0 means "use default targetLength". */ + ZSTD_c_targetLength = 106, + + /* See ZSTD_strategy enum definition. + * The higher the value of selected strategy, the more complex it is, + * resulting in stronger and slower compression. + * Special: value 0 means "use default strategy". */ + ZSTD_c_strategy = 107, + + /* v1.5.6+ + * Attempts to fit compressed block size into approximately targetCBlockSize. + * Bound by ZSTD_TARGETCBLOCKSIZE_MIN and ZSTD_TARGETCBLOCKSIZE_MAX. + * Note that it's not a guarantee, just a convergence target (default:0). + * No target when targetCBlockSize == 0. + * This is helpful in low bandwidth streaming environments to improve end-to-end latency, + * when a client can make use of partial documents (a prominent example being Chrome). + * Note: this parameter is stable since v1.5.6. + * It was present as an experimental parameter in earlier versions, + * but it's not recommended using it with earlier library versions + * due to massive performance regressions. + */ + ZSTD_c_targetCBlockSize = 130, + + /* Enable long distance matching. + * This parameter is designed to improve compression ratio + * for large inputs, by finding large matches at long distance. + * It increases memory usage and window size. + * Note: enabling this parameter increases default ZSTD_c_windowLog to 128 MB + * except when expressly set to a different value. + * Note: will be enabled by default if ZSTD_c_windowLog >= 128 MB and + * compression strategy >= ZSTD_btopt (== compression level 16+) */ + ZSTD_c_enableLongDistanceMatching = 160, + + /* Size of the table for long distance matching, as a power of 2. + * Larger values increase memory usage and compression ratio, + * but decrease compression speed. + * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX + * default: windowlog - 7. + * Special: value 0 means "automatically determine hashlog". */ + ZSTD_c_ldmHashLog = 161, + + /* Minimum match size for long distance matcher. + * Larger/too small values usually decrease compression ratio. + * Must be clamped between ZSTD_LDM_MINMATCH_MIN and ZSTD_LDM_MINMATCH_MAX. + * Special: value 0 means "use default value" (default: 64). */ + ZSTD_c_ldmMinMatch = 162, + + /* Log size of each bucket in the LDM hash table for collision resolution. + * Larger values improve collision resolution but decrease compression speed. + * The maximum value is ZSTD_LDM_BUCKETSIZELOG_MAX. + * Special: value 0 means "use default value" (default: 3). */ + ZSTD_c_ldmBucketSizeLog = 163, + + /* Frequency of inserting/looking up entries into the LDM hash table. + * Must be clamped between 0 and (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN). + * Default is MAX(0, (windowLog - ldmHashLog)), optimizing hash table usage. + * Larger values improve compression speed. + * Deviating far from default value will likely result in a compression ratio decrease. + * Special: value 0 means "automatically determine hashRateLog". */ + ZSTD_c_ldmHashRateLog = 164, + + /* Content size will be written into frame header _whenever known_ (default:1) + * Content size must be known at the beginning of compression. + * This is automatically the case when using ZSTD_compress2(), + * For streaming scenarios, content size must be provided with ZSTD_CCtx_setPledgedSrcSize() */ + ZSTD_c_contentSizeFlag = 200, + + /* A 32-bits checksum of content is written at end of frame (default:0) */ + ZSTD_c_checksumFlag = 201, + + /* When applicable, dictionary's ID is written into frame header (default:1) */ + ZSTD_c_dictIDFlag = 202, + + /* Select how many threads will be spawned to compress in parallel. + * When nbWorkers >= 1, triggers asynchronous mode when invoking ZSTD_compressStream*() : + * ZSTD_compressStream*() consumes input and flush output if possible, but immediately gives back control to caller, + * while compression is performed in parallel, within worker thread(s). + * (note : a strong exception to this rule is when first invocation of ZSTD_compressStream2() sets ZSTD_e_end : + * in which case, ZSTD_compressStream2() delegates to ZSTD_compress2(), which is always a blocking call). + * More workers improve speed, but also increase memory usage. + * Default value is `0`, aka "single-threaded mode" : no worker is spawned, + * compression is performed inside Caller's thread, and all invocations are blocking */ + ZSTD_c_nbWorkers = 400, + + /* Size of a compression job. This value is enforced only when nbWorkers >= 1. + * Each compression job is completed in parallel, so this value can indirectly impact the nb of active threads. + * 0 means default, which is dynamically determined based on compression parameters. + * Job size must be a minimum of overlap size, or ZSTDMT_JOBSIZE_MIN (= 512 KB), whichever is largest. + * The minimum size is automatically and transparently enforced. */ + ZSTD_c_jobSize = 401, + + /* Control the overlap size, as a fraction of window size. + * The overlap size is an amount of data reloaded from previous job at the beginning of a new job. + * It helps preserve compression ratio, while each job is compressed in parallel. + * This value is enforced only when nbWorkers >= 1. + * Larger values increase compression ratio, but decrease speed. + * Possible values range from 0 to 9 : + * - 0 means "default" : value will be determined by the library, depending on strategy + * - 1 means "no overlap" + * - 9 means "full overlap", using a full window size. + * Each intermediate rank increases/decreases load size by a factor 2 : + * 9: full window; 8: w/2; 7: w/4; 6: w/8; 5:w/16; 4: w/32; 3:w/64; 2:w/128; 1:no overlap; 0:default + * default value varies between 6 and 9, depending on strategy */ + ZSTD_c_overlapLog = 402, + + /* note : additional experimental parameters are also available + * within the experimental section of the API. + * At the time of this writing, they include : + * ZSTD_c_rsyncable + * ZSTD_c_format + * ZSTD_c_forceMaxWindow + * ZSTD_c_forceAttachDict + * ZSTD_c_literalCompressionMode + * ZSTD_c_srcSizeHint + * ZSTD_c_enableDedicatedDictSearch + * ZSTD_c_stableInBuffer + * ZSTD_c_stableOutBuffer + * ZSTD_c_blockDelimiters + * ZSTD_c_validateSequences + * ZSTD_c_blockSplitterLevel + * ZSTD_c_splitAfterSequences + * ZSTD_c_useRowMatchFinder + * ZSTD_c_prefetchCDictTables + * ZSTD_c_enableSeqProducerFallback + * ZSTD_c_maxBlockSize + * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them. + * note : never ever use experimentalParam? names directly; + * also, the enums values themselves are unstable and can still change. + */ + ZSTD_c_experimentalParam1 = 500, + ZSTD_c_experimentalParam2 = 10, + ZSTD_c_experimentalParam3 = 1000, + ZSTD_c_experimentalParam4 = 1001, + ZSTD_c_experimentalParam5 = 1002, + + /* was ZSTD_c_experimentalParam6=1003; is now ZSTD_c_targetCBlockSize */ + ZSTD_c_experimentalParam7 = 1004, + ZSTD_c_experimentalParam8 = 1005, + ZSTD_c_experimentalParam9 = 1006, + ZSTD_c_experimentalParam10 = 1007, + ZSTD_c_experimentalParam11 = 1008, + ZSTD_c_experimentalParam12 = 1009, + ZSTD_c_experimentalParam13 = 1010, + ZSTD_c_experimentalParam14 = 1011, + ZSTD_c_experimentalParam15 = 1012, + ZSTD_c_experimentalParam16 = 1013, + ZSTD_c_experimentalParam17 = 1014, + ZSTD_c_experimentalParam18 = 1015, + ZSTD_c_experimentalParam19 = 1016, + ZSTD_c_experimentalParam20 = 1017, } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cStreamStage.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cStreamStage.cs index 443520a98..3c1343c49 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cStreamStage.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cStreamStage.cs @@ -1,9 +1,8 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public enum ZSTD_cStreamStage { - public enum ZSTD_cStreamStage - { - zcss_init = 0, - zcss_load, - zcss_flush, - } -} + zcss_init = 0, + zcss_load, + zcss_flush, +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compResetPolicy_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compResetPolicy_e.cs index 95c055d20..36fb340df 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compResetPolicy_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compResetPolicy_e.cs @@ -1,15 +1,14 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/** + * Controls, for this matchState reset, whether the tables need to be cleared / + * prepared for the coming compression (ZSTDcrp_makeClean), or whether the + * tables can be left unclean (ZSTDcrp_leaveDirty), because we know that a + * subsequent operation will overwrite the table space anyways (e.g., copying + * the matchState contents in from a CDict). + */ +public enum ZSTD_compResetPolicy_e { - /** - * Controls, for this matchState reset, whether the tables need to be cleared / - * prepared for the coming compression (ZSTDcrp_makeClean), or whether the - * tables can be left unclean (ZSTDcrp_leaveDirty), because we know that a - * subsequent operation will overwrite the table space anyways (e.g., copying - * the matchState contents in from a CDict). - */ - public enum ZSTD_compResetPolicy_e - { - ZSTDcrp_makeClean, - ZSTDcrp_leaveDirty, - } -} + ZSTDcrp_makeClean, + ZSTDcrp_leaveDirty, +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressedBlockState_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressedBlockState_t.cs index ab5dbe726..af281c96c 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressedBlockState_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressedBlockState_t.cs @@ -1,8 +1,7 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct ZSTD_compressedBlockState_t { - public unsafe struct ZSTD_compressedBlockState_t - { - public ZSTD_entropyCTables_t entropy; - public fixed uint rep[3]; - } + public ZSTD_entropyCTables_t entropy; + public fixed uint rep[3]; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressionParameters.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressionParameters.cs index 0fe0b5c53..bacbdea52 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressionParameters.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressionParameters.cs @@ -1,45 +1,44 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public struct ZSTD_compressionParameters { - public struct ZSTD_compressionParameters + /**< largest match distance : larger == more compression, more memory needed during decompression */ + public uint windowLog; + + /**< fully searched segment : larger == more compression, slower, more memory (useless for fast) */ + public uint chainLog; + + /**< dispatch table : larger == faster, more memory */ + public uint hashLog; + + /**< nb of searches : larger == more compression, slower */ + public uint searchLog; + + /**< match length searched : larger == faster decompression, sometimes less compression */ + public uint minMatch; + + /**< acceptable match size for optimal parser (only) : larger == more compression, slower */ + public uint targetLength; + + /**< see ZSTD_strategy definition above */ + public ZSTD_strategy strategy; + + public ZSTD_compressionParameters( + uint windowLog, + uint chainLog, + uint hashLog, + uint searchLog, + uint minMatch, + uint targetLength, + ZSTD_strategy strategy + ) { - /**< largest match distance : larger == more compression, more memory needed during decompression */ - public uint windowLog; - - /**< fully searched segment : larger == more compression, slower, more memory (useless for fast) */ - public uint chainLog; - - /**< dispatch table : larger == faster, more memory */ - public uint hashLog; - - /**< nb of searches : larger == more compression, slower */ - public uint searchLog; - - /**< match length searched : larger == faster decompression, sometimes less compression */ - public uint minMatch; - - /**< acceptable match size for optimal parser (only) : larger == more compression, slower */ - public uint targetLength; - - /**< see ZSTD_strategy definition above */ - public ZSTD_strategy strategy; - - public ZSTD_compressionParameters( - uint windowLog, - uint chainLog, - uint hashLog, - uint searchLog, - uint minMatch, - uint targetLength, - ZSTD_strategy strategy - ) - { - this.windowLog = windowLog; - this.chainLog = chainLog; - this.hashLog = hashLog; - this.searchLog = searchLog; - this.minMatch = minMatch; - this.targetLength = targetLength; - this.strategy = strategy; - } + this.windowLog = windowLog; + this.chainLog = chainLog; + this.hashLog = hashLog; + this.searchLog = searchLog; + this.minMatch = minMatch; + this.targetLength = targetLength; + this.strategy = strategy; } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressionStage_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressionStage_e.cs index a6c0f7ec8..4f31af03b 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressionStage_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressionStage_e.cs @@ -1,13 +1,12 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/*-************************************* + * Context memory management + ***************************************/ +public enum ZSTD_compressionStage_e { - /*-************************************* - * Context memory management - ***************************************/ - public enum ZSTD_compressionStage_e - { - ZSTDcs_created = 0, - ZSTDcs_init, - ZSTDcs_ongoing, - ZSTDcs_ending, - } -} + ZSTDcs_created = 0, + ZSTDcs_init, + ZSTDcs_ongoing, + ZSTDcs_ending, +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_customMem.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_customMem.cs index 311ac0a36..21654b295 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_customMem.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_customMem.cs @@ -1,16 +1,15 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct ZSTD_customMem { - public unsafe struct ZSTD_customMem - { - public void* customAlloc; - public void* customFree; - public void* opaque; + public void* customAlloc; + public void* customFree; + public void* opaque; - public ZSTD_customMem(void* customAlloc, void* customFree, void* opaque) - { - this.customAlloc = customAlloc; - this.customFree = customFree; - this.opaque = opaque; - } + public ZSTD_customMem(void* customAlloc, void* customFree, void* opaque) + { + this.customAlloc = customAlloc; + this.customFree = customFree; + this.opaque = opaque; } -} +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp.cs index 0e9e7ae2b..847b08666 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp.cs @@ -1,111 +1,110 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/** + * Zstd fits all its internal datastructures into a single continuous buffer, + * so that it only needs to perform a single OS allocation (or so that a buffer + * can be provided to it and it can perform no allocations at all). This buffer + * is called the workspace. + * + * Several optimizations complicate that process of allocating memory ranges + * from this workspace for each internal datastructure: + * + * - These different internal datastructures have different setup requirements: + * + * - The static objects need to be cleared once and can then be trivially + * reused for each compression. + * + * - Various buffers don't need to be initialized at all--they are always + * written into before they're read. + * + * - The matchstate tables have a unique requirement that they don't need + * their memory to be totally cleared, but they do need the memory to have + * some bound, i.e., a guarantee that all values in the memory they've been + * allocated is less than some maximum value (which is the starting value + * for the indices that they will then use for compression). When this + * guarantee is provided to them, they can use the memory without any setup + * work. When it can't, they have to clear the area. + * + * - These buffers also have different alignment requirements. + * + * - We would like to reuse the objects in the workspace for multiple + * compressions without having to perform any expensive reallocation or + * reinitialization work. + * + * - We would like to be able to efficiently reuse the workspace across + * multiple compressions **even when the compression parameters change** and + * we need to resize some of the objects (where possible). + * + * To attempt to manage this buffer, given these constraints, the ZSTD_cwksp + * abstraction was created. It works as follows: + * + * Workspace Layout: + * + * [ ... workspace ... ] + * [objects][tables ->] free space [<- buffers][<- aligned][<- init once] + * + * The various objects that live in the workspace are divided into the + * following categories, and are allocated separately: + * + * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict, + * so that literally everything fits in a single buffer. Note: if present, + * this must be the first object in the workspace, since ZSTD_customFree{CCtx, + * CDict}() rely on a pointer comparison to see whether one or two frees are + * required. + * + * - Fixed size objects: these are fixed-size, fixed-count objects that are + * nonetheless "dynamically" allocated in the workspace so that we can + * control how they're initialized separately from the broader ZSTD_CCtx. + * Examples: + * - Entropy Workspace + * - 2 x ZSTD_compressedBlockState_t + * - CDict dictionary contents + * + * - Tables: these are any of several different datastructures (hash tables, + * chain tables, binary trees) that all respect a common format: they are + * uint32_t arrays, all of whose values are between 0 and (nextSrc - base). + * Their sizes depend on the cparams. These tables are 64-byte aligned. + * + * - Init once: these buffers require to be initialized at least once before + * use. They should be used when we want to skip memory initialization + * while not triggering memory checkers (like Valgrind) when reading from + * from this memory without writing to it first. + * These buffers should be used carefully as they might contain data + * from previous compressions. + * Buffers are aligned to 64 bytes. + * + * - Aligned: these buffers don't require any initialization before they're + * used. The user of the buffer should make sure they write into a buffer + * location before reading from it. + * Buffers are aligned to 64 bytes. + * + * - Buffers: these buffers are used for various purposes that don't require + * any alignment or initialization before they're used. This means they can + * be moved around at no cost for a new compression. + * + * Allocating Memory: + * + * The various types of objects must be allocated in order, so they can be + * correctly packed into the workspace buffer. That order is: + * + * 1. Objects + * 2. Init once / Tables + * 3. Aligned / Tables + * 4. Buffers / Tables + * + * Attempts to reserve objects of different types out of order will fail. + */ +public unsafe struct ZSTD_cwksp { - /** - * Zstd fits all its internal datastructures into a single continuous buffer, - * so that it only needs to perform a single OS allocation (or so that a buffer - * can be provided to it and it can perform no allocations at all). This buffer - * is called the workspace. - * - * Several optimizations complicate that process of allocating memory ranges - * from this workspace for each internal datastructure: - * - * - These different internal datastructures have different setup requirements: - * - * - The static objects need to be cleared once and can then be trivially - * reused for each compression. - * - * - Various buffers don't need to be initialized at all--they are always - * written into before they're read. - * - * - The matchstate tables have a unique requirement that they don't need - * their memory to be totally cleared, but they do need the memory to have - * some bound, i.e., a guarantee that all values in the memory they've been - * allocated is less than some maximum value (which is the starting value - * for the indices that they will then use for compression). When this - * guarantee is provided to them, they can use the memory without any setup - * work. When it can't, they have to clear the area. - * - * - These buffers also have different alignment requirements. - * - * - We would like to reuse the objects in the workspace for multiple - * compressions without having to perform any expensive reallocation or - * reinitialization work. - * - * - We would like to be able to efficiently reuse the workspace across - * multiple compressions **even when the compression parameters change** and - * we need to resize some of the objects (where possible). - * - * To attempt to manage this buffer, given these constraints, the ZSTD_cwksp - * abstraction was created. It works as follows: - * - * Workspace Layout: - * - * [ ... workspace ... ] - * [objects][tables ->] free space [<- buffers][<- aligned][<- init once] - * - * The various objects that live in the workspace are divided into the - * following categories, and are allocated separately: - * - * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict, - * so that literally everything fits in a single buffer. Note: if present, - * this must be the first object in the workspace, since ZSTD_customFree{CCtx, - * CDict}() rely on a pointer comparison to see whether one or two frees are - * required. - * - * - Fixed size objects: these are fixed-size, fixed-count objects that are - * nonetheless "dynamically" allocated in the workspace so that we can - * control how they're initialized separately from the broader ZSTD_CCtx. - * Examples: - * - Entropy Workspace - * - 2 x ZSTD_compressedBlockState_t - * - CDict dictionary contents - * - * - Tables: these are any of several different datastructures (hash tables, - * chain tables, binary trees) that all respect a common format: they are - * uint32_t arrays, all of whose values are between 0 and (nextSrc - base). - * Their sizes depend on the cparams. These tables are 64-byte aligned. - * - * - Init once: these buffers require to be initialized at least once before - * use. They should be used when we want to skip memory initialization - * while not triggering memory checkers (like Valgrind) when reading from - * from this memory without writing to it first. - * These buffers should be used carefully as they might contain data - * from previous compressions. - * Buffers are aligned to 64 bytes. - * - * - Aligned: these buffers don't require any initialization before they're - * used. The user of the buffer should make sure they write into a buffer - * location before reading from it. - * Buffers are aligned to 64 bytes. - * - * - Buffers: these buffers are used for various purposes that don't require - * any alignment or initialization before they're used. This means they can - * be moved around at no cost for a new compression. - * - * Allocating Memory: - * - * The various types of objects must be allocated in order, so they can be - * correctly packed into the workspace buffer. That order is: - * - * 1. Objects - * 2. Init once / Tables - * 3. Aligned / Tables - * 4. Buffers / Tables - * - * Attempts to reserve objects of different types out of order will fail. - */ - public unsafe struct ZSTD_cwksp - { - public void* workspace; - public void* workspaceEnd; - public void* objectEnd; - public void* tableEnd; - public void* tableValidEnd; - public void* allocStart; - public void* initOnceStart; - public byte allocFailed; - public int workspaceOversizedDuration; - public ZSTD_cwksp_alloc_phase_e phase; - public ZSTD_cwksp_static_alloc_e isStatic; - } + public void* workspace; + public void* workspaceEnd; + public void* objectEnd; + public void* tableEnd; + public void* tableValidEnd; + public void* allocStart; + public void* initOnceStart; + public byte allocFailed; + public int workspaceOversizedDuration; + public ZSTD_cwksp_alloc_phase_e phase; + public ZSTD_cwksp_static_alloc_e isStatic; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp_alloc_phase_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp_alloc_phase_e.cs index f3f266f86..c9fd5aac0 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp_alloc_phase_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp_alloc_phase_e.cs @@ -1,13 +1,12 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/*-************************************* + * Structures + ***************************************/ +public enum ZSTD_cwksp_alloc_phase_e { - /*-************************************* - * Structures - ***************************************/ - public enum ZSTD_cwksp_alloc_phase_e - { - ZSTD_cwksp_alloc_objects, - ZSTD_cwksp_alloc_aligned_init_once, - ZSTD_cwksp_alloc_aligned, - ZSTD_cwksp_alloc_buffers, - } + ZSTD_cwksp_alloc_objects, + ZSTD_cwksp_alloc_aligned_init_once, + ZSTD_cwksp_alloc_aligned, + ZSTD_cwksp_alloc_buffers, } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp_static_alloc_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp_static_alloc_e.cs index 25e530df5..82f30a259 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp_static_alloc_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp_static_alloc_e.cs @@ -1,13 +1,12 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/** + * Used to describe whether the workspace is statically allocated (and will not + * necessarily ever be freed), or if it's dynamically allocated and we can + * expect a well-formed caller to free this. + */ +public enum ZSTD_cwksp_static_alloc_e { - /** - * Used to describe whether the workspace is statically allocated (and will not - * necessarily ever be freed), or if it's dynamically allocated and we can - * expect a well-formed caller to free this. - */ - public enum ZSTD_cwksp_static_alloc_e - { - ZSTD_cwksp_dynamic_alloc, - ZSTD_cwksp_static_alloc, - } -} + ZSTD_cwksp_dynamic_alloc, + ZSTD_cwksp_static_alloc, +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dParameter.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dParameter.cs index 200961439..2dc4ecba3 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dParameter.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dParameter.cs @@ -1,39 +1,38 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/* The advanced API pushes parameters one by one into an existing DCtx context. + * Parameters are sticky, and remain valid for all following frames + * using the same DCtx context. + * It's possible to reset parameters to default values using ZSTD_DCtx_reset(). + * Note : This API is compatible with existing ZSTD_decompressDCtx() and ZSTD_decompressStream(). + * Therefore, no new decompression function is necessary. + */ +public enum ZSTD_dParameter { - /* The advanced API pushes parameters one by one into an existing DCtx context. - * Parameters are sticky, and remain valid for all following frames - * using the same DCtx context. - * It's possible to reset parameters to default values using ZSTD_DCtx_reset(). - * Note : This API is compatible with existing ZSTD_decompressDCtx() and ZSTD_decompressStream(). - * Therefore, no new decompression function is necessary. - */ - public enum ZSTD_dParameter - { - /* Select a size limit (in power of 2) beyond which - * the streaming API will refuse to allocate memory buffer - * in order to protect the host from unreasonable memory requirements. - * This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode. - * By default, a decompression context accepts window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT). - * Special: value 0 means "use default maximum windowLog". */ - ZSTD_d_windowLogMax = 100, + /* Select a size limit (in power of 2) beyond which + * the streaming API will refuse to allocate memory buffer + * in order to protect the host from unreasonable memory requirements. + * This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode. + * By default, a decompression context accepts window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT). + * Special: value 0 means "use default maximum windowLog". */ + ZSTD_d_windowLogMax = 100, - /* note : additional experimental parameters are also available - * within the experimental section of the API. - * At the time of this writing, they include : - * ZSTD_d_format - * ZSTD_d_stableOutBuffer - * ZSTD_d_forceIgnoreChecksum - * ZSTD_d_refMultipleDDicts - * ZSTD_d_disableHuffmanAssembly - * ZSTD_d_maxBlockSize - * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them. - * note : never ever use experimentalParam? names directly - */ - ZSTD_d_experimentalParam1 = 1000, - ZSTD_d_experimentalParam2 = 1001, - ZSTD_d_experimentalParam3 = 1002, - ZSTD_d_experimentalParam4 = 1003, - ZSTD_d_experimentalParam5 = 1004, - ZSTD_d_experimentalParam6 = 1005, - } + /* note : additional experimental parameters are also available + * within the experimental section of the API. + * At the time of this writing, they include : + * ZSTD_d_format + * ZSTD_d_stableOutBuffer + * ZSTD_d_forceIgnoreChecksum + * ZSTD_d_refMultipleDDicts + * ZSTD_d_disableHuffmanAssembly + * ZSTD_d_maxBlockSize + * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them. + * note : never ever use experimentalParam? names directly + */ + ZSTD_d_experimentalParam1 = 1000, + ZSTD_d_experimentalParam2 = 1001, + ZSTD_d_experimentalParam3 = 1002, + ZSTD_d_experimentalParam4 = 1003, + ZSTD_d_experimentalParam5 = 1004, + ZSTD_d_experimentalParam6 = 1005, } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dStage.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dStage.cs index 104155fee..69b992a27 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dStage.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dStage.cs @@ -1,14 +1,13 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public enum ZSTD_dStage { - public enum ZSTD_dStage - { - ZSTDds_getFrameHeaderSize, - ZSTDds_decodeFrameHeader, - ZSTDds_decodeBlockHeader, - ZSTDds_decompressBlock, - ZSTDds_decompressLastBlock, - ZSTDds_checkChecksum, - ZSTDds_decodeSkippableHeader, - ZSTDds_skipFrame, - } + ZSTDds_getFrameHeaderSize, + ZSTDds_decodeFrameHeader, + ZSTDds_decodeBlockHeader, + ZSTDds_decompressBlock, + ZSTDds_decompressLastBlock, + ZSTDds_checkChecksum, + ZSTDds_decodeSkippableHeader, + ZSTDds_skipFrame, } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dStreamStage.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dStreamStage.cs index fb40404f6..7cc247e16 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dStreamStage.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dStreamStage.cs @@ -1,11 +1,10 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public enum ZSTD_dStreamStage { - public enum ZSTD_dStreamStage - { - zdss_init = 0, - zdss_loadHeader, - zdss_read, - zdss_load, - zdss_flush, - } -} + zdss_init = 0, + zdss_loadHeader, + zdss_read, + zdss_load, + zdss_flush, +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictAttachPref_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictAttachPref_e.cs index 33983b06e..8f989a423 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictAttachPref_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictAttachPref_e.cs @@ -1,17 +1,16 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public enum ZSTD_dictAttachPref_e { - public enum ZSTD_dictAttachPref_e - { - /* Use the default heuristic. */ - ZSTD_dictDefaultAttach = 0, + /* Use the default heuristic. */ + ZSTD_dictDefaultAttach = 0, - /* Never copy the dictionary. */ - ZSTD_dictForceAttach = 1, + /* Never copy the dictionary. */ + ZSTD_dictForceAttach = 1, - /* Always copy the dictionary. */ - ZSTD_dictForceCopy = 2, + /* Always copy the dictionary. */ + ZSTD_dictForceCopy = 2, - /* Always reload the dictionary */ - ZSTD_dictForceLoad = 3, - } + /* Always reload the dictionary */ + ZSTD_dictForceLoad = 3, } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictContentType_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictContentType_e.cs index 1a486605c..2b821ecf7 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictContentType_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictContentType_e.cs @@ -1,14 +1,13 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public enum ZSTD_dictContentType_e { - public enum ZSTD_dictContentType_e - { - /* dictionary is "full" when starting with ZSTD_MAGIC_DICTIONARY, otherwise it is "rawContent" */ - ZSTD_dct_auto = 0, + /* dictionary is "full" when starting with ZSTD_MAGIC_DICTIONARY, otherwise it is "rawContent" */ + ZSTD_dct_auto = 0, - /* ensures dictionary is always loaded as rawContent, even if it starts with ZSTD_MAGIC_DICTIONARY */ - ZSTD_dct_rawContent = 1, + /* ensures dictionary is always loaded as rawContent, even if it starts with ZSTD_MAGIC_DICTIONARY */ + ZSTD_dct_rawContent = 1, - /* refuses to load a dictionary if it does not respect Zstandard's specification, starting with ZSTD_MAGIC_DICTIONARY */ - ZSTD_dct_fullDict = 2, - } -} + /* refuses to load a dictionary if it does not respect Zstandard's specification, starting with ZSTD_MAGIC_DICTIONARY */ + ZSTD_dct_fullDict = 2, +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictLoadMethod_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictLoadMethod_e.cs index ea0d1ac2d..af7ed50b5 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictLoadMethod_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictLoadMethod_e.cs @@ -1,11 +1,10 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public enum ZSTD_dictLoadMethod_e { - public enum ZSTD_dictLoadMethod_e - { - /**< Copy dictionary content internally */ - ZSTD_dlm_byCopy = 0, + /**< Copy dictionary content internally */ + ZSTD_dlm_byCopy = 0, - /**< Reference dictionary content -- the dictionary buffer must outlive its users. */ - ZSTD_dlm_byRef = 1, - } + /**< Reference dictionary content -- the dictionary buffer must outlive its users. */ + ZSTD_dlm_byRef = 1, } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictMode_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictMode_e.cs index 70b51926a..f234551d9 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictMode_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictMode_e.cs @@ -1,10 +1,9 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public enum ZSTD_dictMode_e { - public enum ZSTD_dictMode_e - { - ZSTD_noDict = 0, - ZSTD_extDict = 1, - ZSTD_dictMatchState = 2, - ZSTD_dedicatedDictSearch = 3, - } + ZSTD_noDict = 0, + ZSTD_extDict = 1, + ZSTD_dictMatchState = 2, + ZSTD_dedicatedDictSearch = 3, } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictTableLoadMethod_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictTableLoadMethod_e.cs index 2fab404ed..7cf4f3e5d 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictTableLoadMethod_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictTableLoadMethod_e.cs @@ -1,8 +1,7 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public enum ZSTD_dictTableLoadMethod_e { - public enum ZSTD_dictTableLoadMethod_e - { - ZSTD_dtlm_fast, - ZSTD_dtlm_full, - } + ZSTD_dtlm_fast, + ZSTD_dtlm_full, } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictUses_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictUses_e.cs index 37893d7e8..e7f2d2ef0 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictUses_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictUses_e.cs @@ -1,14 +1,13 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public enum ZSTD_dictUses_e { - public enum ZSTD_dictUses_e - { - /* Use the dictionary indefinitely */ - ZSTD_use_indefinitely = -1, + /* Use the dictionary indefinitely */ + ZSTD_use_indefinitely = -1, - /* Do not use the dictionary (if one exists free it) */ - ZSTD_dont_use = 0, + /* Do not use the dictionary (if one exists free it) */ + ZSTD_dont_use = 0, - /* Use the dictionary once and set to ZSTD_dont_use */ - ZSTD_use_once = 1, - } -} + /* Use the dictionary once and set to ZSTD_dont_use */ + ZSTD_use_once = 1, +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyCTablesMetadata_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyCTablesMetadata_t.cs index 747e4ba47..2455590b9 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyCTablesMetadata_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyCTablesMetadata_t.cs @@ -1,8 +1,7 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public struct ZSTD_entropyCTablesMetadata_t { - public struct ZSTD_entropyCTablesMetadata_t - { - public ZSTD_hufCTablesMetadata_t hufMetadata; - public ZSTD_fseCTablesMetadata_t fseMetadata; - } + public ZSTD_hufCTablesMetadata_t hufMetadata; + public ZSTD_fseCTablesMetadata_t fseMetadata; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyCTables_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyCTables_t.cs index 57171c809..f3d983584 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyCTables_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyCTables_t.cs @@ -1,8 +1,7 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public struct ZSTD_entropyCTables_t { - public struct ZSTD_entropyCTables_t - { - public ZSTD_hufCTables_t huf; - public ZSTD_fseCTables_t fse; - } + public ZSTD_hufCTables_t huf; + public ZSTD_fseCTables_t fse; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyDTables_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyDTables_t.cs index 638cbd7bc..84a487a7d 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyDTables_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyDTables_t.cs @@ -1,22 +1,22 @@ using System.Runtime.CompilerServices; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct ZSTD_entropyDTables_t { - public unsafe struct ZSTD_entropyDTables_t - { - /* Note : Space reserved for FSE Tables */ - public _LLTable_e__FixedBuffer LLTable; + /* Note : Space reserved for FSE Tables */ + public _LLTable_e__FixedBuffer LLTable; - /* is also used as temporary workspace while building hufTable during DDict creation */ - public _OFTable_e__FixedBuffer OFTable; + /* is also used as temporary workspace while building hufTable during DDict creation */ + public _OFTable_e__FixedBuffer OFTable; - /* and therefore must be at least HUF_DECOMPRESS_WORKSPACE_SIZE large */ - public _MLTable_e__FixedBuffer MLTable; + /* and therefore must be at least HUF_DECOMPRESS_WORKSPACE_SIZE large */ + public _MLTable_e__FixedBuffer MLTable; - /* can accommodate HUF_decompress4X */ - public fixed uint hufTable[4097]; - public fixed uint rep[3]; - public fixed uint workspace[157]; + /* can accommodate HUF_decompress4X */ + public fixed uint hufTable[4097]; + public fixed uint rep[3]; + public fixed uint workspace[157]; #if NET8_0_OR_GREATER [InlineArray(513)] @@ -26,522 +26,522 @@ public unsafe struct _LLTable_e__FixedBuffer } #else - public unsafe struct _LLTable_e__FixedBuffer - { - public ZSTD_seqSymbol e0; - public ZSTD_seqSymbol e1; - public ZSTD_seqSymbol e2; - public ZSTD_seqSymbol e3; - public ZSTD_seqSymbol e4; - public ZSTD_seqSymbol e5; - public ZSTD_seqSymbol e6; - public ZSTD_seqSymbol e7; - public ZSTD_seqSymbol e8; - public ZSTD_seqSymbol e9; - public ZSTD_seqSymbol e10; - public ZSTD_seqSymbol e11; - public ZSTD_seqSymbol e12; - public ZSTD_seqSymbol e13; - public ZSTD_seqSymbol e14; - public ZSTD_seqSymbol e15; - public ZSTD_seqSymbol e16; - public ZSTD_seqSymbol e17; - public ZSTD_seqSymbol e18; - public ZSTD_seqSymbol e19; - public ZSTD_seqSymbol e20; - public ZSTD_seqSymbol e21; - public ZSTD_seqSymbol e22; - public ZSTD_seqSymbol e23; - public ZSTD_seqSymbol e24; - public ZSTD_seqSymbol e25; - public ZSTD_seqSymbol e26; - public ZSTD_seqSymbol e27; - public ZSTD_seqSymbol e28; - public ZSTD_seqSymbol e29; - public ZSTD_seqSymbol e30; - public ZSTD_seqSymbol e31; - public ZSTD_seqSymbol e32; - public ZSTD_seqSymbol e33; - public ZSTD_seqSymbol e34; - public ZSTD_seqSymbol e35; - public ZSTD_seqSymbol e36; - public ZSTD_seqSymbol e37; - public ZSTD_seqSymbol e38; - public ZSTD_seqSymbol e39; - public ZSTD_seqSymbol e40; - public ZSTD_seqSymbol e41; - public ZSTD_seqSymbol e42; - public ZSTD_seqSymbol e43; - public ZSTD_seqSymbol e44; - public ZSTD_seqSymbol e45; - public ZSTD_seqSymbol e46; - public ZSTD_seqSymbol e47; - public ZSTD_seqSymbol e48; - public ZSTD_seqSymbol e49; - public ZSTD_seqSymbol e50; - public ZSTD_seqSymbol e51; - public ZSTD_seqSymbol e52; - public ZSTD_seqSymbol e53; - public ZSTD_seqSymbol e54; - public ZSTD_seqSymbol e55; - public ZSTD_seqSymbol e56; - public ZSTD_seqSymbol e57; - public ZSTD_seqSymbol e58; - public ZSTD_seqSymbol e59; - public ZSTD_seqSymbol e60; - public ZSTD_seqSymbol e61; - public ZSTD_seqSymbol e62; - public ZSTD_seqSymbol e63; - public ZSTD_seqSymbol e64; - public ZSTD_seqSymbol e65; - public ZSTD_seqSymbol e66; - public ZSTD_seqSymbol e67; - public ZSTD_seqSymbol e68; - public ZSTD_seqSymbol e69; - public ZSTD_seqSymbol e70; - public ZSTD_seqSymbol e71; - public ZSTD_seqSymbol e72; - public ZSTD_seqSymbol e73; - public ZSTD_seqSymbol e74; - public ZSTD_seqSymbol e75; - public ZSTD_seqSymbol e76; - public ZSTD_seqSymbol e77; - public ZSTD_seqSymbol e78; - public ZSTD_seqSymbol e79; - public ZSTD_seqSymbol e80; - public ZSTD_seqSymbol e81; - public ZSTD_seqSymbol e82; - public ZSTD_seqSymbol e83; - public ZSTD_seqSymbol e84; - public ZSTD_seqSymbol e85; - public ZSTD_seqSymbol e86; - public ZSTD_seqSymbol e87; - public ZSTD_seqSymbol e88; - public ZSTD_seqSymbol e89; - public ZSTD_seqSymbol e90; - public ZSTD_seqSymbol e91; - public ZSTD_seqSymbol e92; - public ZSTD_seqSymbol e93; - public ZSTD_seqSymbol e94; - public ZSTD_seqSymbol e95; - public ZSTD_seqSymbol e96; - public ZSTD_seqSymbol e97; - public ZSTD_seqSymbol e98; - public ZSTD_seqSymbol e99; - public ZSTD_seqSymbol e100; - public ZSTD_seqSymbol e101; - public ZSTD_seqSymbol e102; - public ZSTD_seqSymbol e103; - public ZSTD_seqSymbol e104; - public ZSTD_seqSymbol e105; - public ZSTD_seqSymbol e106; - public ZSTD_seqSymbol e107; - public ZSTD_seqSymbol e108; - public ZSTD_seqSymbol e109; - public ZSTD_seqSymbol e110; - public ZSTD_seqSymbol e111; - public ZSTD_seqSymbol e112; - public ZSTD_seqSymbol e113; - public ZSTD_seqSymbol e114; - public ZSTD_seqSymbol e115; - public ZSTD_seqSymbol e116; - public ZSTD_seqSymbol e117; - public ZSTD_seqSymbol e118; - public ZSTD_seqSymbol e119; - public ZSTD_seqSymbol e120; - public ZSTD_seqSymbol e121; - public ZSTD_seqSymbol e122; - public ZSTD_seqSymbol e123; - public ZSTD_seqSymbol e124; - public ZSTD_seqSymbol e125; - public ZSTD_seqSymbol e126; - public ZSTD_seqSymbol e127; - public ZSTD_seqSymbol e128; - public ZSTD_seqSymbol e129; - public ZSTD_seqSymbol e130; - public ZSTD_seqSymbol e131; - public ZSTD_seqSymbol e132; - public ZSTD_seqSymbol e133; - public ZSTD_seqSymbol e134; - public ZSTD_seqSymbol e135; - public ZSTD_seqSymbol e136; - public ZSTD_seqSymbol e137; - public ZSTD_seqSymbol e138; - public ZSTD_seqSymbol e139; - public ZSTD_seqSymbol e140; - public ZSTD_seqSymbol e141; - public ZSTD_seqSymbol e142; - public ZSTD_seqSymbol e143; - public ZSTD_seqSymbol e144; - public ZSTD_seqSymbol e145; - public ZSTD_seqSymbol e146; - public ZSTD_seqSymbol e147; - public ZSTD_seqSymbol e148; - public ZSTD_seqSymbol e149; - public ZSTD_seqSymbol e150; - public ZSTD_seqSymbol e151; - public ZSTD_seqSymbol e152; - public ZSTD_seqSymbol e153; - public ZSTD_seqSymbol e154; - public ZSTD_seqSymbol e155; - public ZSTD_seqSymbol e156; - public ZSTD_seqSymbol e157; - public ZSTD_seqSymbol e158; - public ZSTD_seqSymbol e159; - public ZSTD_seqSymbol e160; - public ZSTD_seqSymbol e161; - public ZSTD_seqSymbol e162; - public ZSTD_seqSymbol e163; - public ZSTD_seqSymbol e164; - public ZSTD_seqSymbol e165; - public ZSTD_seqSymbol e166; - public ZSTD_seqSymbol e167; - public ZSTD_seqSymbol e168; - public ZSTD_seqSymbol e169; - public ZSTD_seqSymbol e170; - public ZSTD_seqSymbol e171; - public ZSTD_seqSymbol e172; - public ZSTD_seqSymbol e173; - public ZSTD_seqSymbol e174; - public ZSTD_seqSymbol e175; - public ZSTD_seqSymbol e176; - public ZSTD_seqSymbol e177; - public ZSTD_seqSymbol e178; - public ZSTD_seqSymbol e179; - public ZSTD_seqSymbol e180; - public ZSTD_seqSymbol e181; - public ZSTD_seqSymbol e182; - public ZSTD_seqSymbol e183; - public ZSTD_seqSymbol e184; - public ZSTD_seqSymbol e185; - public ZSTD_seqSymbol e186; - public ZSTD_seqSymbol e187; - public ZSTD_seqSymbol e188; - public ZSTD_seqSymbol e189; - public ZSTD_seqSymbol e190; - public ZSTD_seqSymbol e191; - public ZSTD_seqSymbol e192; - public ZSTD_seqSymbol e193; - public ZSTD_seqSymbol e194; - public ZSTD_seqSymbol e195; - public ZSTD_seqSymbol e196; - public ZSTD_seqSymbol e197; - public ZSTD_seqSymbol e198; - public ZSTD_seqSymbol e199; - public ZSTD_seqSymbol e200; - public ZSTD_seqSymbol e201; - public ZSTD_seqSymbol e202; - public ZSTD_seqSymbol e203; - public ZSTD_seqSymbol e204; - public ZSTD_seqSymbol e205; - public ZSTD_seqSymbol e206; - public ZSTD_seqSymbol e207; - public ZSTD_seqSymbol e208; - public ZSTD_seqSymbol e209; - public ZSTD_seqSymbol e210; - public ZSTD_seqSymbol e211; - public ZSTD_seqSymbol e212; - public ZSTD_seqSymbol e213; - public ZSTD_seqSymbol e214; - public ZSTD_seqSymbol e215; - public ZSTD_seqSymbol e216; - public ZSTD_seqSymbol e217; - public ZSTD_seqSymbol e218; - public ZSTD_seqSymbol e219; - public ZSTD_seqSymbol e220; - public ZSTD_seqSymbol e221; - public ZSTD_seqSymbol e222; - public ZSTD_seqSymbol e223; - public ZSTD_seqSymbol e224; - public ZSTD_seqSymbol e225; - public ZSTD_seqSymbol e226; - public ZSTD_seqSymbol e227; - public ZSTD_seqSymbol e228; - public ZSTD_seqSymbol e229; - public ZSTD_seqSymbol e230; - public ZSTD_seqSymbol e231; - public ZSTD_seqSymbol e232; - public ZSTD_seqSymbol e233; - public ZSTD_seqSymbol e234; - public ZSTD_seqSymbol e235; - public ZSTD_seqSymbol e236; - public ZSTD_seqSymbol e237; - public ZSTD_seqSymbol e238; - public ZSTD_seqSymbol e239; - public ZSTD_seqSymbol e240; - public ZSTD_seqSymbol e241; - public ZSTD_seqSymbol e242; - public ZSTD_seqSymbol e243; - public ZSTD_seqSymbol e244; - public ZSTD_seqSymbol e245; - public ZSTD_seqSymbol e246; - public ZSTD_seqSymbol e247; - public ZSTD_seqSymbol e248; - public ZSTD_seqSymbol e249; - public ZSTD_seqSymbol e250; - public ZSTD_seqSymbol e251; - public ZSTD_seqSymbol e252; - public ZSTD_seqSymbol e253; - public ZSTD_seqSymbol e254; - public ZSTD_seqSymbol e255; - public ZSTD_seqSymbol e256; - public ZSTD_seqSymbol e257; - public ZSTD_seqSymbol e258; - public ZSTD_seqSymbol e259; - public ZSTD_seqSymbol e260; - public ZSTD_seqSymbol e261; - public ZSTD_seqSymbol e262; - public ZSTD_seqSymbol e263; - public ZSTD_seqSymbol e264; - public ZSTD_seqSymbol e265; - public ZSTD_seqSymbol e266; - public ZSTD_seqSymbol e267; - public ZSTD_seqSymbol e268; - public ZSTD_seqSymbol e269; - public ZSTD_seqSymbol e270; - public ZSTD_seqSymbol e271; - public ZSTD_seqSymbol e272; - public ZSTD_seqSymbol e273; - public ZSTD_seqSymbol e274; - public ZSTD_seqSymbol e275; - public ZSTD_seqSymbol e276; - public ZSTD_seqSymbol e277; - public ZSTD_seqSymbol e278; - public ZSTD_seqSymbol e279; - public ZSTD_seqSymbol e280; - public ZSTD_seqSymbol e281; - public ZSTD_seqSymbol e282; - public ZSTD_seqSymbol e283; - public ZSTD_seqSymbol e284; - public ZSTD_seqSymbol e285; - public ZSTD_seqSymbol e286; - public ZSTD_seqSymbol e287; - public ZSTD_seqSymbol e288; - public ZSTD_seqSymbol e289; - public ZSTD_seqSymbol e290; - public ZSTD_seqSymbol e291; - public ZSTD_seqSymbol e292; - public ZSTD_seqSymbol e293; - public ZSTD_seqSymbol e294; - public ZSTD_seqSymbol e295; - public ZSTD_seqSymbol e296; - public ZSTD_seqSymbol e297; - public ZSTD_seqSymbol e298; - public ZSTD_seqSymbol e299; - public ZSTD_seqSymbol e300; - public ZSTD_seqSymbol e301; - public ZSTD_seqSymbol e302; - public ZSTD_seqSymbol e303; - public ZSTD_seqSymbol e304; - public ZSTD_seqSymbol e305; - public ZSTD_seqSymbol e306; - public ZSTD_seqSymbol e307; - public ZSTD_seqSymbol e308; - public ZSTD_seqSymbol e309; - public ZSTD_seqSymbol e310; - public ZSTD_seqSymbol e311; - public ZSTD_seqSymbol e312; - public ZSTD_seqSymbol e313; - public ZSTD_seqSymbol e314; - public ZSTD_seqSymbol e315; - public ZSTD_seqSymbol e316; - public ZSTD_seqSymbol e317; - public ZSTD_seqSymbol e318; - public ZSTD_seqSymbol e319; - public ZSTD_seqSymbol e320; - public ZSTD_seqSymbol e321; - public ZSTD_seqSymbol e322; - public ZSTD_seqSymbol e323; - public ZSTD_seqSymbol e324; - public ZSTD_seqSymbol e325; - public ZSTD_seqSymbol e326; - public ZSTD_seqSymbol e327; - public ZSTD_seqSymbol e328; - public ZSTD_seqSymbol e329; - public ZSTD_seqSymbol e330; - public ZSTD_seqSymbol e331; - public ZSTD_seqSymbol e332; - public ZSTD_seqSymbol e333; - public ZSTD_seqSymbol e334; - public ZSTD_seqSymbol e335; - public ZSTD_seqSymbol e336; - public ZSTD_seqSymbol e337; - public ZSTD_seqSymbol e338; - public ZSTD_seqSymbol e339; - public ZSTD_seqSymbol e340; - public ZSTD_seqSymbol e341; - public ZSTD_seqSymbol e342; - public ZSTD_seqSymbol e343; - public ZSTD_seqSymbol e344; - public ZSTD_seqSymbol e345; - public ZSTD_seqSymbol e346; - public ZSTD_seqSymbol e347; - public ZSTD_seqSymbol e348; - public ZSTD_seqSymbol e349; - public ZSTD_seqSymbol e350; - public ZSTD_seqSymbol e351; - public ZSTD_seqSymbol e352; - public ZSTD_seqSymbol e353; - public ZSTD_seqSymbol e354; - public ZSTD_seqSymbol e355; - public ZSTD_seqSymbol e356; - public ZSTD_seqSymbol e357; - public ZSTD_seqSymbol e358; - public ZSTD_seqSymbol e359; - public ZSTD_seqSymbol e360; - public ZSTD_seqSymbol e361; - public ZSTD_seqSymbol e362; - public ZSTD_seqSymbol e363; - public ZSTD_seqSymbol e364; - public ZSTD_seqSymbol e365; - public ZSTD_seqSymbol e366; - public ZSTD_seqSymbol e367; - public ZSTD_seqSymbol e368; - public ZSTD_seqSymbol e369; - public ZSTD_seqSymbol e370; - public ZSTD_seqSymbol e371; - public ZSTD_seqSymbol e372; - public ZSTD_seqSymbol e373; - public ZSTD_seqSymbol e374; - public ZSTD_seqSymbol e375; - public ZSTD_seqSymbol e376; - public ZSTD_seqSymbol e377; - public ZSTD_seqSymbol e378; - public ZSTD_seqSymbol e379; - public ZSTD_seqSymbol e380; - public ZSTD_seqSymbol e381; - public ZSTD_seqSymbol e382; - public ZSTD_seqSymbol e383; - public ZSTD_seqSymbol e384; - public ZSTD_seqSymbol e385; - public ZSTD_seqSymbol e386; - public ZSTD_seqSymbol e387; - public ZSTD_seqSymbol e388; - public ZSTD_seqSymbol e389; - public ZSTD_seqSymbol e390; - public ZSTD_seqSymbol e391; - public ZSTD_seqSymbol e392; - public ZSTD_seqSymbol e393; - public ZSTD_seqSymbol e394; - public ZSTD_seqSymbol e395; - public ZSTD_seqSymbol e396; - public ZSTD_seqSymbol e397; - public ZSTD_seqSymbol e398; - public ZSTD_seqSymbol e399; - public ZSTD_seqSymbol e400; - public ZSTD_seqSymbol e401; - public ZSTD_seqSymbol e402; - public ZSTD_seqSymbol e403; - public ZSTD_seqSymbol e404; - public ZSTD_seqSymbol e405; - public ZSTD_seqSymbol e406; - public ZSTD_seqSymbol e407; - public ZSTD_seqSymbol e408; - public ZSTD_seqSymbol e409; - public ZSTD_seqSymbol e410; - public ZSTD_seqSymbol e411; - public ZSTD_seqSymbol e412; - public ZSTD_seqSymbol e413; - public ZSTD_seqSymbol e414; - public ZSTD_seqSymbol e415; - public ZSTD_seqSymbol e416; - public ZSTD_seqSymbol e417; - public ZSTD_seqSymbol e418; - public ZSTD_seqSymbol e419; - public ZSTD_seqSymbol e420; - public ZSTD_seqSymbol e421; - public ZSTD_seqSymbol e422; - public ZSTD_seqSymbol e423; - public ZSTD_seqSymbol e424; - public ZSTD_seqSymbol e425; - public ZSTD_seqSymbol e426; - public ZSTD_seqSymbol e427; - public ZSTD_seqSymbol e428; - public ZSTD_seqSymbol e429; - public ZSTD_seqSymbol e430; - public ZSTD_seqSymbol e431; - public ZSTD_seqSymbol e432; - public ZSTD_seqSymbol e433; - public ZSTD_seqSymbol e434; - public ZSTD_seqSymbol e435; - public ZSTD_seqSymbol e436; - public ZSTD_seqSymbol e437; - public ZSTD_seqSymbol e438; - public ZSTD_seqSymbol e439; - public ZSTD_seqSymbol e440; - public ZSTD_seqSymbol e441; - public ZSTD_seqSymbol e442; - public ZSTD_seqSymbol e443; - public ZSTD_seqSymbol e444; - public ZSTD_seqSymbol e445; - public ZSTD_seqSymbol e446; - public ZSTD_seqSymbol e447; - public ZSTD_seqSymbol e448; - public ZSTD_seqSymbol e449; - public ZSTD_seqSymbol e450; - public ZSTD_seqSymbol e451; - public ZSTD_seqSymbol e452; - public ZSTD_seqSymbol e453; - public ZSTD_seqSymbol e454; - public ZSTD_seqSymbol e455; - public ZSTD_seqSymbol e456; - public ZSTD_seqSymbol e457; - public ZSTD_seqSymbol e458; - public ZSTD_seqSymbol e459; - public ZSTD_seqSymbol e460; - public ZSTD_seqSymbol e461; - public ZSTD_seqSymbol e462; - public ZSTD_seqSymbol e463; - public ZSTD_seqSymbol e464; - public ZSTD_seqSymbol e465; - public ZSTD_seqSymbol e466; - public ZSTD_seqSymbol e467; - public ZSTD_seqSymbol e468; - public ZSTD_seqSymbol e469; - public ZSTD_seqSymbol e470; - public ZSTD_seqSymbol e471; - public ZSTD_seqSymbol e472; - public ZSTD_seqSymbol e473; - public ZSTD_seqSymbol e474; - public ZSTD_seqSymbol e475; - public ZSTD_seqSymbol e476; - public ZSTD_seqSymbol e477; - public ZSTD_seqSymbol e478; - public ZSTD_seqSymbol e479; - public ZSTD_seqSymbol e480; - public ZSTD_seqSymbol e481; - public ZSTD_seqSymbol e482; - public ZSTD_seqSymbol e483; - public ZSTD_seqSymbol e484; - public ZSTD_seqSymbol e485; - public ZSTD_seqSymbol e486; - public ZSTD_seqSymbol e487; - public ZSTD_seqSymbol e488; - public ZSTD_seqSymbol e489; - public ZSTD_seqSymbol e490; - public ZSTD_seqSymbol e491; - public ZSTD_seqSymbol e492; - public ZSTD_seqSymbol e493; - public ZSTD_seqSymbol e494; - public ZSTD_seqSymbol e495; - public ZSTD_seqSymbol e496; - public ZSTD_seqSymbol e497; - public ZSTD_seqSymbol e498; - public ZSTD_seqSymbol e499; - public ZSTD_seqSymbol e500; - public ZSTD_seqSymbol e501; - public ZSTD_seqSymbol e502; - public ZSTD_seqSymbol e503; - public ZSTD_seqSymbol e504; - public ZSTD_seqSymbol e505; - public ZSTD_seqSymbol e506; - public ZSTD_seqSymbol e507; - public ZSTD_seqSymbol e508; - public ZSTD_seqSymbol e509; - public ZSTD_seqSymbol e510; - public ZSTD_seqSymbol e511; - public ZSTD_seqSymbol e512; - } + public unsafe struct _LLTable_e__FixedBuffer + { + public ZSTD_seqSymbol e0; + public ZSTD_seqSymbol e1; + public ZSTD_seqSymbol e2; + public ZSTD_seqSymbol e3; + public ZSTD_seqSymbol e4; + public ZSTD_seqSymbol e5; + public ZSTD_seqSymbol e6; + public ZSTD_seqSymbol e7; + public ZSTD_seqSymbol e8; + public ZSTD_seqSymbol e9; + public ZSTD_seqSymbol e10; + public ZSTD_seqSymbol e11; + public ZSTD_seqSymbol e12; + public ZSTD_seqSymbol e13; + public ZSTD_seqSymbol e14; + public ZSTD_seqSymbol e15; + public ZSTD_seqSymbol e16; + public ZSTD_seqSymbol e17; + public ZSTD_seqSymbol e18; + public ZSTD_seqSymbol e19; + public ZSTD_seqSymbol e20; + public ZSTD_seqSymbol e21; + public ZSTD_seqSymbol e22; + public ZSTD_seqSymbol e23; + public ZSTD_seqSymbol e24; + public ZSTD_seqSymbol e25; + public ZSTD_seqSymbol e26; + public ZSTD_seqSymbol e27; + public ZSTD_seqSymbol e28; + public ZSTD_seqSymbol e29; + public ZSTD_seqSymbol e30; + public ZSTD_seqSymbol e31; + public ZSTD_seqSymbol e32; + public ZSTD_seqSymbol e33; + public ZSTD_seqSymbol e34; + public ZSTD_seqSymbol e35; + public ZSTD_seqSymbol e36; + public ZSTD_seqSymbol e37; + public ZSTD_seqSymbol e38; + public ZSTD_seqSymbol e39; + public ZSTD_seqSymbol e40; + public ZSTD_seqSymbol e41; + public ZSTD_seqSymbol e42; + public ZSTD_seqSymbol e43; + public ZSTD_seqSymbol e44; + public ZSTD_seqSymbol e45; + public ZSTD_seqSymbol e46; + public ZSTD_seqSymbol e47; + public ZSTD_seqSymbol e48; + public ZSTD_seqSymbol e49; + public ZSTD_seqSymbol e50; + public ZSTD_seqSymbol e51; + public ZSTD_seqSymbol e52; + public ZSTD_seqSymbol e53; + public ZSTD_seqSymbol e54; + public ZSTD_seqSymbol e55; + public ZSTD_seqSymbol e56; + public ZSTD_seqSymbol e57; + public ZSTD_seqSymbol e58; + public ZSTD_seqSymbol e59; + public ZSTD_seqSymbol e60; + public ZSTD_seqSymbol e61; + public ZSTD_seqSymbol e62; + public ZSTD_seqSymbol e63; + public ZSTD_seqSymbol e64; + public ZSTD_seqSymbol e65; + public ZSTD_seqSymbol e66; + public ZSTD_seqSymbol e67; + public ZSTD_seqSymbol e68; + public ZSTD_seqSymbol e69; + public ZSTD_seqSymbol e70; + public ZSTD_seqSymbol e71; + public ZSTD_seqSymbol e72; + public ZSTD_seqSymbol e73; + public ZSTD_seqSymbol e74; + public ZSTD_seqSymbol e75; + public ZSTD_seqSymbol e76; + public ZSTD_seqSymbol e77; + public ZSTD_seqSymbol e78; + public ZSTD_seqSymbol e79; + public ZSTD_seqSymbol e80; + public ZSTD_seqSymbol e81; + public ZSTD_seqSymbol e82; + public ZSTD_seqSymbol e83; + public ZSTD_seqSymbol e84; + public ZSTD_seqSymbol e85; + public ZSTD_seqSymbol e86; + public ZSTD_seqSymbol e87; + public ZSTD_seqSymbol e88; + public ZSTD_seqSymbol e89; + public ZSTD_seqSymbol e90; + public ZSTD_seqSymbol e91; + public ZSTD_seqSymbol e92; + public ZSTD_seqSymbol e93; + public ZSTD_seqSymbol e94; + public ZSTD_seqSymbol e95; + public ZSTD_seqSymbol e96; + public ZSTD_seqSymbol e97; + public ZSTD_seqSymbol e98; + public ZSTD_seqSymbol e99; + public ZSTD_seqSymbol e100; + public ZSTD_seqSymbol e101; + public ZSTD_seqSymbol e102; + public ZSTD_seqSymbol e103; + public ZSTD_seqSymbol e104; + public ZSTD_seqSymbol e105; + public ZSTD_seqSymbol e106; + public ZSTD_seqSymbol e107; + public ZSTD_seqSymbol e108; + public ZSTD_seqSymbol e109; + public ZSTD_seqSymbol e110; + public ZSTD_seqSymbol e111; + public ZSTD_seqSymbol e112; + public ZSTD_seqSymbol e113; + public ZSTD_seqSymbol e114; + public ZSTD_seqSymbol e115; + public ZSTD_seqSymbol e116; + public ZSTD_seqSymbol e117; + public ZSTD_seqSymbol e118; + public ZSTD_seqSymbol e119; + public ZSTD_seqSymbol e120; + public ZSTD_seqSymbol e121; + public ZSTD_seqSymbol e122; + public ZSTD_seqSymbol e123; + public ZSTD_seqSymbol e124; + public ZSTD_seqSymbol e125; + public ZSTD_seqSymbol e126; + public ZSTD_seqSymbol e127; + public ZSTD_seqSymbol e128; + public ZSTD_seqSymbol e129; + public ZSTD_seqSymbol e130; + public ZSTD_seqSymbol e131; + public ZSTD_seqSymbol e132; + public ZSTD_seqSymbol e133; + public ZSTD_seqSymbol e134; + public ZSTD_seqSymbol e135; + public ZSTD_seqSymbol e136; + public ZSTD_seqSymbol e137; + public ZSTD_seqSymbol e138; + public ZSTD_seqSymbol e139; + public ZSTD_seqSymbol e140; + public ZSTD_seqSymbol e141; + public ZSTD_seqSymbol e142; + public ZSTD_seqSymbol e143; + public ZSTD_seqSymbol e144; + public ZSTD_seqSymbol e145; + public ZSTD_seqSymbol e146; + public ZSTD_seqSymbol e147; + public ZSTD_seqSymbol e148; + public ZSTD_seqSymbol e149; + public ZSTD_seqSymbol e150; + public ZSTD_seqSymbol e151; + public ZSTD_seqSymbol e152; + public ZSTD_seqSymbol e153; + public ZSTD_seqSymbol e154; + public ZSTD_seqSymbol e155; + public ZSTD_seqSymbol e156; + public ZSTD_seqSymbol e157; + public ZSTD_seqSymbol e158; + public ZSTD_seqSymbol e159; + public ZSTD_seqSymbol e160; + public ZSTD_seqSymbol e161; + public ZSTD_seqSymbol e162; + public ZSTD_seqSymbol e163; + public ZSTD_seqSymbol e164; + public ZSTD_seqSymbol e165; + public ZSTD_seqSymbol e166; + public ZSTD_seqSymbol e167; + public ZSTD_seqSymbol e168; + public ZSTD_seqSymbol e169; + public ZSTD_seqSymbol e170; + public ZSTD_seqSymbol e171; + public ZSTD_seqSymbol e172; + public ZSTD_seqSymbol e173; + public ZSTD_seqSymbol e174; + public ZSTD_seqSymbol e175; + public ZSTD_seqSymbol e176; + public ZSTD_seqSymbol e177; + public ZSTD_seqSymbol e178; + public ZSTD_seqSymbol e179; + public ZSTD_seqSymbol e180; + public ZSTD_seqSymbol e181; + public ZSTD_seqSymbol e182; + public ZSTD_seqSymbol e183; + public ZSTD_seqSymbol e184; + public ZSTD_seqSymbol e185; + public ZSTD_seqSymbol e186; + public ZSTD_seqSymbol e187; + public ZSTD_seqSymbol e188; + public ZSTD_seqSymbol e189; + public ZSTD_seqSymbol e190; + public ZSTD_seqSymbol e191; + public ZSTD_seqSymbol e192; + public ZSTD_seqSymbol e193; + public ZSTD_seqSymbol e194; + public ZSTD_seqSymbol e195; + public ZSTD_seqSymbol e196; + public ZSTD_seqSymbol e197; + public ZSTD_seqSymbol e198; + public ZSTD_seqSymbol e199; + public ZSTD_seqSymbol e200; + public ZSTD_seqSymbol e201; + public ZSTD_seqSymbol e202; + public ZSTD_seqSymbol e203; + public ZSTD_seqSymbol e204; + public ZSTD_seqSymbol e205; + public ZSTD_seqSymbol e206; + public ZSTD_seqSymbol e207; + public ZSTD_seqSymbol e208; + public ZSTD_seqSymbol e209; + public ZSTD_seqSymbol e210; + public ZSTD_seqSymbol e211; + public ZSTD_seqSymbol e212; + public ZSTD_seqSymbol e213; + public ZSTD_seqSymbol e214; + public ZSTD_seqSymbol e215; + public ZSTD_seqSymbol e216; + public ZSTD_seqSymbol e217; + public ZSTD_seqSymbol e218; + public ZSTD_seqSymbol e219; + public ZSTD_seqSymbol e220; + public ZSTD_seqSymbol e221; + public ZSTD_seqSymbol e222; + public ZSTD_seqSymbol e223; + public ZSTD_seqSymbol e224; + public ZSTD_seqSymbol e225; + public ZSTD_seqSymbol e226; + public ZSTD_seqSymbol e227; + public ZSTD_seqSymbol e228; + public ZSTD_seqSymbol e229; + public ZSTD_seqSymbol e230; + public ZSTD_seqSymbol e231; + public ZSTD_seqSymbol e232; + public ZSTD_seqSymbol e233; + public ZSTD_seqSymbol e234; + public ZSTD_seqSymbol e235; + public ZSTD_seqSymbol e236; + public ZSTD_seqSymbol e237; + public ZSTD_seqSymbol e238; + public ZSTD_seqSymbol e239; + public ZSTD_seqSymbol e240; + public ZSTD_seqSymbol e241; + public ZSTD_seqSymbol e242; + public ZSTD_seqSymbol e243; + public ZSTD_seqSymbol e244; + public ZSTD_seqSymbol e245; + public ZSTD_seqSymbol e246; + public ZSTD_seqSymbol e247; + public ZSTD_seqSymbol e248; + public ZSTD_seqSymbol e249; + public ZSTD_seqSymbol e250; + public ZSTD_seqSymbol e251; + public ZSTD_seqSymbol e252; + public ZSTD_seqSymbol e253; + public ZSTD_seqSymbol e254; + public ZSTD_seqSymbol e255; + public ZSTD_seqSymbol e256; + public ZSTD_seqSymbol e257; + public ZSTD_seqSymbol e258; + public ZSTD_seqSymbol e259; + public ZSTD_seqSymbol e260; + public ZSTD_seqSymbol e261; + public ZSTD_seqSymbol e262; + public ZSTD_seqSymbol e263; + public ZSTD_seqSymbol e264; + public ZSTD_seqSymbol e265; + public ZSTD_seqSymbol e266; + public ZSTD_seqSymbol e267; + public ZSTD_seqSymbol e268; + public ZSTD_seqSymbol e269; + public ZSTD_seqSymbol e270; + public ZSTD_seqSymbol e271; + public ZSTD_seqSymbol e272; + public ZSTD_seqSymbol e273; + public ZSTD_seqSymbol e274; + public ZSTD_seqSymbol e275; + public ZSTD_seqSymbol e276; + public ZSTD_seqSymbol e277; + public ZSTD_seqSymbol e278; + public ZSTD_seqSymbol e279; + public ZSTD_seqSymbol e280; + public ZSTD_seqSymbol e281; + public ZSTD_seqSymbol e282; + public ZSTD_seqSymbol e283; + public ZSTD_seqSymbol e284; + public ZSTD_seqSymbol e285; + public ZSTD_seqSymbol e286; + public ZSTD_seqSymbol e287; + public ZSTD_seqSymbol e288; + public ZSTD_seqSymbol e289; + public ZSTD_seqSymbol e290; + public ZSTD_seqSymbol e291; + public ZSTD_seqSymbol e292; + public ZSTD_seqSymbol e293; + public ZSTD_seqSymbol e294; + public ZSTD_seqSymbol e295; + public ZSTD_seqSymbol e296; + public ZSTD_seqSymbol e297; + public ZSTD_seqSymbol e298; + public ZSTD_seqSymbol e299; + public ZSTD_seqSymbol e300; + public ZSTD_seqSymbol e301; + public ZSTD_seqSymbol e302; + public ZSTD_seqSymbol e303; + public ZSTD_seqSymbol e304; + public ZSTD_seqSymbol e305; + public ZSTD_seqSymbol e306; + public ZSTD_seqSymbol e307; + public ZSTD_seqSymbol e308; + public ZSTD_seqSymbol e309; + public ZSTD_seqSymbol e310; + public ZSTD_seqSymbol e311; + public ZSTD_seqSymbol e312; + public ZSTD_seqSymbol e313; + public ZSTD_seqSymbol e314; + public ZSTD_seqSymbol e315; + public ZSTD_seqSymbol e316; + public ZSTD_seqSymbol e317; + public ZSTD_seqSymbol e318; + public ZSTD_seqSymbol e319; + public ZSTD_seqSymbol e320; + public ZSTD_seqSymbol e321; + public ZSTD_seqSymbol e322; + public ZSTD_seqSymbol e323; + public ZSTD_seqSymbol e324; + public ZSTD_seqSymbol e325; + public ZSTD_seqSymbol e326; + public ZSTD_seqSymbol e327; + public ZSTD_seqSymbol e328; + public ZSTD_seqSymbol e329; + public ZSTD_seqSymbol e330; + public ZSTD_seqSymbol e331; + public ZSTD_seqSymbol e332; + public ZSTD_seqSymbol e333; + public ZSTD_seqSymbol e334; + public ZSTD_seqSymbol e335; + public ZSTD_seqSymbol e336; + public ZSTD_seqSymbol e337; + public ZSTD_seqSymbol e338; + public ZSTD_seqSymbol e339; + public ZSTD_seqSymbol e340; + public ZSTD_seqSymbol e341; + public ZSTD_seqSymbol e342; + public ZSTD_seqSymbol e343; + public ZSTD_seqSymbol e344; + public ZSTD_seqSymbol e345; + public ZSTD_seqSymbol e346; + public ZSTD_seqSymbol e347; + public ZSTD_seqSymbol e348; + public ZSTD_seqSymbol e349; + public ZSTD_seqSymbol e350; + public ZSTD_seqSymbol e351; + public ZSTD_seqSymbol e352; + public ZSTD_seqSymbol e353; + public ZSTD_seqSymbol e354; + public ZSTD_seqSymbol e355; + public ZSTD_seqSymbol e356; + public ZSTD_seqSymbol e357; + public ZSTD_seqSymbol e358; + public ZSTD_seqSymbol e359; + public ZSTD_seqSymbol e360; + public ZSTD_seqSymbol e361; + public ZSTD_seqSymbol e362; + public ZSTD_seqSymbol e363; + public ZSTD_seqSymbol e364; + public ZSTD_seqSymbol e365; + public ZSTD_seqSymbol e366; + public ZSTD_seqSymbol e367; + public ZSTD_seqSymbol e368; + public ZSTD_seqSymbol e369; + public ZSTD_seqSymbol e370; + public ZSTD_seqSymbol e371; + public ZSTD_seqSymbol e372; + public ZSTD_seqSymbol e373; + public ZSTD_seqSymbol e374; + public ZSTD_seqSymbol e375; + public ZSTD_seqSymbol e376; + public ZSTD_seqSymbol e377; + public ZSTD_seqSymbol e378; + public ZSTD_seqSymbol e379; + public ZSTD_seqSymbol e380; + public ZSTD_seqSymbol e381; + public ZSTD_seqSymbol e382; + public ZSTD_seqSymbol e383; + public ZSTD_seqSymbol e384; + public ZSTD_seqSymbol e385; + public ZSTD_seqSymbol e386; + public ZSTD_seqSymbol e387; + public ZSTD_seqSymbol e388; + public ZSTD_seqSymbol e389; + public ZSTD_seqSymbol e390; + public ZSTD_seqSymbol e391; + public ZSTD_seqSymbol e392; + public ZSTD_seqSymbol e393; + public ZSTD_seqSymbol e394; + public ZSTD_seqSymbol e395; + public ZSTD_seqSymbol e396; + public ZSTD_seqSymbol e397; + public ZSTD_seqSymbol e398; + public ZSTD_seqSymbol e399; + public ZSTD_seqSymbol e400; + public ZSTD_seqSymbol e401; + public ZSTD_seqSymbol e402; + public ZSTD_seqSymbol e403; + public ZSTD_seqSymbol e404; + public ZSTD_seqSymbol e405; + public ZSTD_seqSymbol e406; + public ZSTD_seqSymbol e407; + public ZSTD_seqSymbol e408; + public ZSTD_seqSymbol e409; + public ZSTD_seqSymbol e410; + public ZSTD_seqSymbol e411; + public ZSTD_seqSymbol e412; + public ZSTD_seqSymbol e413; + public ZSTD_seqSymbol e414; + public ZSTD_seqSymbol e415; + public ZSTD_seqSymbol e416; + public ZSTD_seqSymbol e417; + public ZSTD_seqSymbol e418; + public ZSTD_seqSymbol e419; + public ZSTD_seqSymbol e420; + public ZSTD_seqSymbol e421; + public ZSTD_seqSymbol e422; + public ZSTD_seqSymbol e423; + public ZSTD_seqSymbol e424; + public ZSTD_seqSymbol e425; + public ZSTD_seqSymbol e426; + public ZSTD_seqSymbol e427; + public ZSTD_seqSymbol e428; + public ZSTD_seqSymbol e429; + public ZSTD_seqSymbol e430; + public ZSTD_seqSymbol e431; + public ZSTD_seqSymbol e432; + public ZSTD_seqSymbol e433; + public ZSTD_seqSymbol e434; + public ZSTD_seqSymbol e435; + public ZSTD_seqSymbol e436; + public ZSTD_seqSymbol e437; + public ZSTD_seqSymbol e438; + public ZSTD_seqSymbol e439; + public ZSTD_seqSymbol e440; + public ZSTD_seqSymbol e441; + public ZSTD_seqSymbol e442; + public ZSTD_seqSymbol e443; + public ZSTD_seqSymbol e444; + public ZSTD_seqSymbol e445; + public ZSTD_seqSymbol e446; + public ZSTD_seqSymbol e447; + public ZSTD_seqSymbol e448; + public ZSTD_seqSymbol e449; + public ZSTD_seqSymbol e450; + public ZSTD_seqSymbol e451; + public ZSTD_seqSymbol e452; + public ZSTD_seqSymbol e453; + public ZSTD_seqSymbol e454; + public ZSTD_seqSymbol e455; + public ZSTD_seqSymbol e456; + public ZSTD_seqSymbol e457; + public ZSTD_seqSymbol e458; + public ZSTD_seqSymbol e459; + public ZSTD_seqSymbol e460; + public ZSTD_seqSymbol e461; + public ZSTD_seqSymbol e462; + public ZSTD_seqSymbol e463; + public ZSTD_seqSymbol e464; + public ZSTD_seqSymbol e465; + public ZSTD_seqSymbol e466; + public ZSTD_seqSymbol e467; + public ZSTD_seqSymbol e468; + public ZSTD_seqSymbol e469; + public ZSTD_seqSymbol e470; + public ZSTD_seqSymbol e471; + public ZSTD_seqSymbol e472; + public ZSTD_seqSymbol e473; + public ZSTD_seqSymbol e474; + public ZSTD_seqSymbol e475; + public ZSTD_seqSymbol e476; + public ZSTD_seqSymbol e477; + public ZSTD_seqSymbol e478; + public ZSTD_seqSymbol e479; + public ZSTD_seqSymbol e480; + public ZSTD_seqSymbol e481; + public ZSTD_seqSymbol e482; + public ZSTD_seqSymbol e483; + public ZSTD_seqSymbol e484; + public ZSTD_seqSymbol e485; + public ZSTD_seqSymbol e486; + public ZSTD_seqSymbol e487; + public ZSTD_seqSymbol e488; + public ZSTD_seqSymbol e489; + public ZSTD_seqSymbol e490; + public ZSTD_seqSymbol e491; + public ZSTD_seqSymbol e492; + public ZSTD_seqSymbol e493; + public ZSTD_seqSymbol e494; + public ZSTD_seqSymbol e495; + public ZSTD_seqSymbol e496; + public ZSTD_seqSymbol e497; + public ZSTD_seqSymbol e498; + public ZSTD_seqSymbol e499; + public ZSTD_seqSymbol e500; + public ZSTD_seqSymbol e501; + public ZSTD_seqSymbol e502; + public ZSTD_seqSymbol e503; + public ZSTD_seqSymbol e504; + public ZSTD_seqSymbol e505; + public ZSTD_seqSymbol e506; + public ZSTD_seqSymbol e507; + public ZSTD_seqSymbol e508; + public ZSTD_seqSymbol e509; + public ZSTD_seqSymbol e510; + public ZSTD_seqSymbol e511; + public ZSTD_seqSymbol e512; + } #endif #if NET8_0_OR_GREATER @@ -552,266 +552,266 @@ public unsafe struct _OFTable_e__FixedBuffer } #else - public unsafe struct _OFTable_e__FixedBuffer - { - public ZSTD_seqSymbol e0; - public ZSTD_seqSymbol e1; - public ZSTD_seqSymbol e2; - public ZSTD_seqSymbol e3; - public ZSTD_seqSymbol e4; - public ZSTD_seqSymbol e5; - public ZSTD_seqSymbol e6; - public ZSTD_seqSymbol e7; - public ZSTD_seqSymbol e8; - public ZSTD_seqSymbol e9; - public ZSTD_seqSymbol e10; - public ZSTD_seqSymbol e11; - public ZSTD_seqSymbol e12; - public ZSTD_seqSymbol e13; - public ZSTD_seqSymbol e14; - public ZSTD_seqSymbol e15; - public ZSTD_seqSymbol e16; - public ZSTD_seqSymbol e17; - public ZSTD_seqSymbol e18; - public ZSTD_seqSymbol e19; - public ZSTD_seqSymbol e20; - public ZSTD_seqSymbol e21; - public ZSTD_seqSymbol e22; - public ZSTD_seqSymbol e23; - public ZSTD_seqSymbol e24; - public ZSTD_seqSymbol e25; - public ZSTD_seqSymbol e26; - public ZSTD_seqSymbol e27; - public ZSTD_seqSymbol e28; - public ZSTD_seqSymbol e29; - public ZSTD_seqSymbol e30; - public ZSTD_seqSymbol e31; - public ZSTD_seqSymbol e32; - public ZSTD_seqSymbol e33; - public ZSTD_seqSymbol e34; - public ZSTD_seqSymbol e35; - public ZSTD_seqSymbol e36; - public ZSTD_seqSymbol e37; - public ZSTD_seqSymbol e38; - public ZSTD_seqSymbol e39; - public ZSTD_seqSymbol e40; - public ZSTD_seqSymbol e41; - public ZSTD_seqSymbol e42; - public ZSTD_seqSymbol e43; - public ZSTD_seqSymbol e44; - public ZSTD_seqSymbol e45; - public ZSTD_seqSymbol e46; - public ZSTD_seqSymbol e47; - public ZSTD_seqSymbol e48; - public ZSTD_seqSymbol e49; - public ZSTD_seqSymbol e50; - public ZSTD_seqSymbol e51; - public ZSTD_seqSymbol e52; - public ZSTD_seqSymbol e53; - public ZSTD_seqSymbol e54; - public ZSTD_seqSymbol e55; - public ZSTD_seqSymbol e56; - public ZSTD_seqSymbol e57; - public ZSTD_seqSymbol e58; - public ZSTD_seqSymbol e59; - public ZSTD_seqSymbol e60; - public ZSTD_seqSymbol e61; - public ZSTD_seqSymbol e62; - public ZSTD_seqSymbol e63; - public ZSTD_seqSymbol e64; - public ZSTD_seqSymbol e65; - public ZSTD_seqSymbol e66; - public ZSTD_seqSymbol e67; - public ZSTD_seqSymbol e68; - public ZSTD_seqSymbol e69; - public ZSTD_seqSymbol e70; - public ZSTD_seqSymbol e71; - public ZSTD_seqSymbol e72; - public ZSTD_seqSymbol e73; - public ZSTD_seqSymbol e74; - public ZSTD_seqSymbol e75; - public ZSTD_seqSymbol e76; - public ZSTD_seqSymbol e77; - public ZSTD_seqSymbol e78; - public ZSTD_seqSymbol e79; - public ZSTD_seqSymbol e80; - public ZSTD_seqSymbol e81; - public ZSTD_seqSymbol e82; - public ZSTD_seqSymbol e83; - public ZSTD_seqSymbol e84; - public ZSTD_seqSymbol e85; - public ZSTD_seqSymbol e86; - public ZSTD_seqSymbol e87; - public ZSTD_seqSymbol e88; - public ZSTD_seqSymbol e89; - public ZSTD_seqSymbol e90; - public ZSTD_seqSymbol e91; - public ZSTD_seqSymbol e92; - public ZSTD_seqSymbol e93; - public ZSTD_seqSymbol e94; - public ZSTD_seqSymbol e95; - public ZSTD_seqSymbol e96; - public ZSTD_seqSymbol e97; - public ZSTD_seqSymbol e98; - public ZSTD_seqSymbol e99; - public ZSTD_seqSymbol e100; - public ZSTD_seqSymbol e101; - public ZSTD_seqSymbol e102; - public ZSTD_seqSymbol e103; - public ZSTD_seqSymbol e104; - public ZSTD_seqSymbol e105; - public ZSTD_seqSymbol e106; - public ZSTD_seqSymbol e107; - public ZSTD_seqSymbol e108; - public ZSTD_seqSymbol e109; - public ZSTD_seqSymbol e110; - public ZSTD_seqSymbol e111; - public ZSTD_seqSymbol e112; - public ZSTD_seqSymbol e113; - public ZSTD_seqSymbol e114; - public ZSTD_seqSymbol e115; - public ZSTD_seqSymbol e116; - public ZSTD_seqSymbol e117; - public ZSTD_seqSymbol e118; - public ZSTD_seqSymbol e119; - public ZSTD_seqSymbol e120; - public ZSTD_seqSymbol e121; - public ZSTD_seqSymbol e122; - public ZSTD_seqSymbol e123; - public ZSTD_seqSymbol e124; - public ZSTD_seqSymbol e125; - public ZSTD_seqSymbol e126; - public ZSTD_seqSymbol e127; - public ZSTD_seqSymbol e128; - public ZSTD_seqSymbol e129; - public ZSTD_seqSymbol e130; - public ZSTD_seqSymbol e131; - public ZSTD_seqSymbol e132; - public ZSTD_seqSymbol e133; - public ZSTD_seqSymbol e134; - public ZSTD_seqSymbol e135; - public ZSTD_seqSymbol e136; - public ZSTD_seqSymbol e137; - public ZSTD_seqSymbol e138; - public ZSTD_seqSymbol e139; - public ZSTD_seqSymbol e140; - public ZSTD_seqSymbol e141; - public ZSTD_seqSymbol e142; - public ZSTD_seqSymbol e143; - public ZSTD_seqSymbol e144; - public ZSTD_seqSymbol e145; - public ZSTD_seqSymbol e146; - public ZSTD_seqSymbol e147; - public ZSTD_seqSymbol e148; - public ZSTD_seqSymbol e149; - public ZSTD_seqSymbol e150; - public ZSTD_seqSymbol e151; - public ZSTD_seqSymbol e152; - public ZSTD_seqSymbol e153; - public ZSTD_seqSymbol e154; - public ZSTD_seqSymbol e155; - public ZSTD_seqSymbol e156; - public ZSTD_seqSymbol e157; - public ZSTD_seqSymbol e158; - public ZSTD_seqSymbol e159; - public ZSTD_seqSymbol e160; - public ZSTD_seqSymbol e161; - public ZSTD_seqSymbol e162; - public ZSTD_seqSymbol e163; - public ZSTD_seqSymbol e164; - public ZSTD_seqSymbol e165; - public ZSTD_seqSymbol e166; - public ZSTD_seqSymbol e167; - public ZSTD_seqSymbol e168; - public ZSTD_seqSymbol e169; - public ZSTD_seqSymbol e170; - public ZSTD_seqSymbol e171; - public ZSTD_seqSymbol e172; - public ZSTD_seqSymbol e173; - public ZSTD_seqSymbol e174; - public ZSTD_seqSymbol e175; - public ZSTD_seqSymbol e176; - public ZSTD_seqSymbol e177; - public ZSTD_seqSymbol e178; - public ZSTD_seqSymbol e179; - public ZSTD_seqSymbol e180; - public ZSTD_seqSymbol e181; - public ZSTD_seqSymbol e182; - public ZSTD_seqSymbol e183; - public ZSTD_seqSymbol e184; - public ZSTD_seqSymbol e185; - public ZSTD_seqSymbol e186; - public ZSTD_seqSymbol e187; - public ZSTD_seqSymbol e188; - public ZSTD_seqSymbol e189; - public ZSTD_seqSymbol e190; - public ZSTD_seqSymbol e191; - public ZSTD_seqSymbol e192; - public ZSTD_seqSymbol e193; - public ZSTD_seqSymbol e194; - public ZSTD_seqSymbol e195; - public ZSTD_seqSymbol e196; - public ZSTD_seqSymbol e197; - public ZSTD_seqSymbol e198; - public ZSTD_seqSymbol e199; - public ZSTD_seqSymbol e200; - public ZSTD_seqSymbol e201; - public ZSTD_seqSymbol e202; - public ZSTD_seqSymbol e203; - public ZSTD_seqSymbol e204; - public ZSTD_seqSymbol e205; - public ZSTD_seqSymbol e206; - public ZSTD_seqSymbol e207; - public ZSTD_seqSymbol e208; - public ZSTD_seqSymbol e209; - public ZSTD_seqSymbol e210; - public ZSTD_seqSymbol e211; - public ZSTD_seqSymbol e212; - public ZSTD_seqSymbol e213; - public ZSTD_seqSymbol e214; - public ZSTD_seqSymbol e215; - public ZSTD_seqSymbol e216; - public ZSTD_seqSymbol e217; - public ZSTD_seqSymbol e218; - public ZSTD_seqSymbol e219; - public ZSTD_seqSymbol e220; - public ZSTD_seqSymbol e221; - public ZSTD_seqSymbol e222; - public ZSTD_seqSymbol e223; - public ZSTD_seqSymbol e224; - public ZSTD_seqSymbol e225; - public ZSTD_seqSymbol e226; - public ZSTD_seqSymbol e227; - public ZSTD_seqSymbol e228; - public ZSTD_seqSymbol e229; - public ZSTD_seqSymbol e230; - public ZSTD_seqSymbol e231; - public ZSTD_seqSymbol e232; - public ZSTD_seqSymbol e233; - public ZSTD_seqSymbol e234; - public ZSTD_seqSymbol e235; - public ZSTD_seqSymbol e236; - public ZSTD_seqSymbol e237; - public ZSTD_seqSymbol e238; - public ZSTD_seqSymbol e239; - public ZSTD_seqSymbol e240; - public ZSTD_seqSymbol e241; - public ZSTD_seqSymbol e242; - public ZSTD_seqSymbol e243; - public ZSTD_seqSymbol e244; - public ZSTD_seqSymbol e245; - public ZSTD_seqSymbol e246; - public ZSTD_seqSymbol e247; - public ZSTD_seqSymbol e248; - public ZSTD_seqSymbol e249; - public ZSTD_seqSymbol e250; - public ZSTD_seqSymbol e251; - public ZSTD_seqSymbol e252; - public ZSTD_seqSymbol e253; - public ZSTD_seqSymbol e254; - public ZSTD_seqSymbol e255; - public ZSTD_seqSymbol e256; - } + public unsafe struct _OFTable_e__FixedBuffer + { + public ZSTD_seqSymbol e0; + public ZSTD_seqSymbol e1; + public ZSTD_seqSymbol e2; + public ZSTD_seqSymbol e3; + public ZSTD_seqSymbol e4; + public ZSTD_seqSymbol e5; + public ZSTD_seqSymbol e6; + public ZSTD_seqSymbol e7; + public ZSTD_seqSymbol e8; + public ZSTD_seqSymbol e9; + public ZSTD_seqSymbol e10; + public ZSTD_seqSymbol e11; + public ZSTD_seqSymbol e12; + public ZSTD_seqSymbol e13; + public ZSTD_seqSymbol e14; + public ZSTD_seqSymbol e15; + public ZSTD_seqSymbol e16; + public ZSTD_seqSymbol e17; + public ZSTD_seqSymbol e18; + public ZSTD_seqSymbol e19; + public ZSTD_seqSymbol e20; + public ZSTD_seqSymbol e21; + public ZSTD_seqSymbol e22; + public ZSTD_seqSymbol e23; + public ZSTD_seqSymbol e24; + public ZSTD_seqSymbol e25; + public ZSTD_seqSymbol e26; + public ZSTD_seqSymbol e27; + public ZSTD_seqSymbol e28; + public ZSTD_seqSymbol e29; + public ZSTD_seqSymbol e30; + public ZSTD_seqSymbol e31; + public ZSTD_seqSymbol e32; + public ZSTD_seqSymbol e33; + public ZSTD_seqSymbol e34; + public ZSTD_seqSymbol e35; + public ZSTD_seqSymbol e36; + public ZSTD_seqSymbol e37; + public ZSTD_seqSymbol e38; + public ZSTD_seqSymbol e39; + public ZSTD_seqSymbol e40; + public ZSTD_seqSymbol e41; + public ZSTD_seqSymbol e42; + public ZSTD_seqSymbol e43; + public ZSTD_seqSymbol e44; + public ZSTD_seqSymbol e45; + public ZSTD_seqSymbol e46; + public ZSTD_seqSymbol e47; + public ZSTD_seqSymbol e48; + public ZSTD_seqSymbol e49; + public ZSTD_seqSymbol e50; + public ZSTD_seqSymbol e51; + public ZSTD_seqSymbol e52; + public ZSTD_seqSymbol e53; + public ZSTD_seqSymbol e54; + public ZSTD_seqSymbol e55; + public ZSTD_seqSymbol e56; + public ZSTD_seqSymbol e57; + public ZSTD_seqSymbol e58; + public ZSTD_seqSymbol e59; + public ZSTD_seqSymbol e60; + public ZSTD_seqSymbol e61; + public ZSTD_seqSymbol e62; + public ZSTD_seqSymbol e63; + public ZSTD_seqSymbol e64; + public ZSTD_seqSymbol e65; + public ZSTD_seqSymbol e66; + public ZSTD_seqSymbol e67; + public ZSTD_seqSymbol e68; + public ZSTD_seqSymbol e69; + public ZSTD_seqSymbol e70; + public ZSTD_seqSymbol e71; + public ZSTD_seqSymbol e72; + public ZSTD_seqSymbol e73; + public ZSTD_seqSymbol e74; + public ZSTD_seqSymbol e75; + public ZSTD_seqSymbol e76; + public ZSTD_seqSymbol e77; + public ZSTD_seqSymbol e78; + public ZSTD_seqSymbol e79; + public ZSTD_seqSymbol e80; + public ZSTD_seqSymbol e81; + public ZSTD_seqSymbol e82; + public ZSTD_seqSymbol e83; + public ZSTD_seqSymbol e84; + public ZSTD_seqSymbol e85; + public ZSTD_seqSymbol e86; + public ZSTD_seqSymbol e87; + public ZSTD_seqSymbol e88; + public ZSTD_seqSymbol e89; + public ZSTD_seqSymbol e90; + public ZSTD_seqSymbol e91; + public ZSTD_seqSymbol e92; + public ZSTD_seqSymbol e93; + public ZSTD_seqSymbol e94; + public ZSTD_seqSymbol e95; + public ZSTD_seqSymbol e96; + public ZSTD_seqSymbol e97; + public ZSTD_seqSymbol e98; + public ZSTD_seqSymbol e99; + public ZSTD_seqSymbol e100; + public ZSTD_seqSymbol e101; + public ZSTD_seqSymbol e102; + public ZSTD_seqSymbol e103; + public ZSTD_seqSymbol e104; + public ZSTD_seqSymbol e105; + public ZSTD_seqSymbol e106; + public ZSTD_seqSymbol e107; + public ZSTD_seqSymbol e108; + public ZSTD_seqSymbol e109; + public ZSTD_seqSymbol e110; + public ZSTD_seqSymbol e111; + public ZSTD_seqSymbol e112; + public ZSTD_seqSymbol e113; + public ZSTD_seqSymbol e114; + public ZSTD_seqSymbol e115; + public ZSTD_seqSymbol e116; + public ZSTD_seqSymbol e117; + public ZSTD_seqSymbol e118; + public ZSTD_seqSymbol e119; + public ZSTD_seqSymbol e120; + public ZSTD_seqSymbol e121; + public ZSTD_seqSymbol e122; + public ZSTD_seqSymbol e123; + public ZSTD_seqSymbol e124; + public ZSTD_seqSymbol e125; + public ZSTD_seqSymbol e126; + public ZSTD_seqSymbol e127; + public ZSTD_seqSymbol e128; + public ZSTD_seqSymbol e129; + public ZSTD_seqSymbol e130; + public ZSTD_seqSymbol e131; + public ZSTD_seqSymbol e132; + public ZSTD_seqSymbol e133; + public ZSTD_seqSymbol e134; + public ZSTD_seqSymbol e135; + public ZSTD_seqSymbol e136; + public ZSTD_seqSymbol e137; + public ZSTD_seqSymbol e138; + public ZSTD_seqSymbol e139; + public ZSTD_seqSymbol e140; + public ZSTD_seqSymbol e141; + public ZSTD_seqSymbol e142; + public ZSTD_seqSymbol e143; + public ZSTD_seqSymbol e144; + public ZSTD_seqSymbol e145; + public ZSTD_seqSymbol e146; + public ZSTD_seqSymbol e147; + public ZSTD_seqSymbol e148; + public ZSTD_seqSymbol e149; + public ZSTD_seqSymbol e150; + public ZSTD_seqSymbol e151; + public ZSTD_seqSymbol e152; + public ZSTD_seqSymbol e153; + public ZSTD_seqSymbol e154; + public ZSTD_seqSymbol e155; + public ZSTD_seqSymbol e156; + public ZSTD_seqSymbol e157; + public ZSTD_seqSymbol e158; + public ZSTD_seqSymbol e159; + public ZSTD_seqSymbol e160; + public ZSTD_seqSymbol e161; + public ZSTD_seqSymbol e162; + public ZSTD_seqSymbol e163; + public ZSTD_seqSymbol e164; + public ZSTD_seqSymbol e165; + public ZSTD_seqSymbol e166; + public ZSTD_seqSymbol e167; + public ZSTD_seqSymbol e168; + public ZSTD_seqSymbol e169; + public ZSTD_seqSymbol e170; + public ZSTD_seqSymbol e171; + public ZSTD_seqSymbol e172; + public ZSTD_seqSymbol e173; + public ZSTD_seqSymbol e174; + public ZSTD_seqSymbol e175; + public ZSTD_seqSymbol e176; + public ZSTD_seqSymbol e177; + public ZSTD_seqSymbol e178; + public ZSTD_seqSymbol e179; + public ZSTD_seqSymbol e180; + public ZSTD_seqSymbol e181; + public ZSTD_seqSymbol e182; + public ZSTD_seqSymbol e183; + public ZSTD_seqSymbol e184; + public ZSTD_seqSymbol e185; + public ZSTD_seqSymbol e186; + public ZSTD_seqSymbol e187; + public ZSTD_seqSymbol e188; + public ZSTD_seqSymbol e189; + public ZSTD_seqSymbol e190; + public ZSTD_seqSymbol e191; + public ZSTD_seqSymbol e192; + public ZSTD_seqSymbol e193; + public ZSTD_seqSymbol e194; + public ZSTD_seqSymbol e195; + public ZSTD_seqSymbol e196; + public ZSTD_seqSymbol e197; + public ZSTD_seqSymbol e198; + public ZSTD_seqSymbol e199; + public ZSTD_seqSymbol e200; + public ZSTD_seqSymbol e201; + public ZSTD_seqSymbol e202; + public ZSTD_seqSymbol e203; + public ZSTD_seqSymbol e204; + public ZSTD_seqSymbol e205; + public ZSTD_seqSymbol e206; + public ZSTD_seqSymbol e207; + public ZSTD_seqSymbol e208; + public ZSTD_seqSymbol e209; + public ZSTD_seqSymbol e210; + public ZSTD_seqSymbol e211; + public ZSTD_seqSymbol e212; + public ZSTD_seqSymbol e213; + public ZSTD_seqSymbol e214; + public ZSTD_seqSymbol e215; + public ZSTD_seqSymbol e216; + public ZSTD_seqSymbol e217; + public ZSTD_seqSymbol e218; + public ZSTD_seqSymbol e219; + public ZSTD_seqSymbol e220; + public ZSTD_seqSymbol e221; + public ZSTD_seqSymbol e222; + public ZSTD_seqSymbol e223; + public ZSTD_seqSymbol e224; + public ZSTD_seqSymbol e225; + public ZSTD_seqSymbol e226; + public ZSTD_seqSymbol e227; + public ZSTD_seqSymbol e228; + public ZSTD_seqSymbol e229; + public ZSTD_seqSymbol e230; + public ZSTD_seqSymbol e231; + public ZSTD_seqSymbol e232; + public ZSTD_seqSymbol e233; + public ZSTD_seqSymbol e234; + public ZSTD_seqSymbol e235; + public ZSTD_seqSymbol e236; + public ZSTD_seqSymbol e237; + public ZSTD_seqSymbol e238; + public ZSTD_seqSymbol e239; + public ZSTD_seqSymbol e240; + public ZSTD_seqSymbol e241; + public ZSTD_seqSymbol e242; + public ZSTD_seqSymbol e243; + public ZSTD_seqSymbol e244; + public ZSTD_seqSymbol e245; + public ZSTD_seqSymbol e246; + public ZSTD_seqSymbol e247; + public ZSTD_seqSymbol e248; + public ZSTD_seqSymbol e249; + public ZSTD_seqSymbol e250; + public ZSTD_seqSymbol e251; + public ZSTD_seqSymbol e252; + public ZSTD_seqSymbol e253; + public ZSTD_seqSymbol e254; + public ZSTD_seqSymbol e255; + public ZSTD_seqSymbol e256; + } #endif #if NET8_0_OR_GREATER @@ -822,522 +822,521 @@ public unsafe struct _MLTable_e__FixedBuffer } #else - public unsafe struct _MLTable_e__FixedBuffer - { - public ZSTD_seqSymbol e0; - public ZSTD_seqSymbol e1; - public ZSTD_seqSymbol e2; - public ZSTD_seqSymbol e3; - public ZSTD_seqSymbol e4; - public ZSTD_seqSymbol e5; - public ZSTD_seqSymbol e6; - public ZSTD_seqSymbol e7; - public ZSTD_seqSymbol e8; - public ZSTD_seqSymbol e9; - public ZSTD_seqSymbol e10; - public ZSTD_seqSymbol e11; - public ZSTD_seqSymbol e12; - public ZSTD_seqSymbol e13; - public ZSTD_seqSymbol e14; - public ZSTD_seqSymbol e15; - public ZSTD_seqSymbol e16; - public ZSTD_seqSymbol e17; - public ZSTD_seqSymbol e18; - public ZSTD_seqSymbol e19; - public ZSTD_seqSymbol e20; - public ZSTD_seqSymbol e21; - public ZSTD_seqSymbol e22; - public ZSTD_seqSymbol e23; - public ZSTD_seqSymbol e24; - public ZSTD_seqSymbol e25; - public ZSTD_seqSymbol e26; - public ZSTD_seqSymbol e27; - public ZSTD_seqSymbol e28; - public ZSTD_seqSymbol e29; - public ZSTD_seqSymbol e30; - public ZSTD_seqSymbol e31; - public ZSTD_seqSymbol e32; - public ZSTD_seqSymbol e33; - public ZSTD_seqSymbol e34; - public ZSTD_seqSymbol e35; - public ZSTD_seqSymbol e36; - public ZSTD_seqSymbol e37; - public ZSTD_seqSymbol e38; - public ZSTD_seqSymbol e39; - public ZSTD_seqSymbol e40; - public ZSTD_seqSymbol e41; - public ZSTD_seqSymbol e42; - public ZSTD_seqSymbol e43; - public ZSTD_seqSymbol e44; - public ZSTD_seqSymbol e45; - public ZSTD_seqSymbol e46; - public ZSTD_seqSymbol e47; - public ZSTD_seqSymbol e48; - public ZSTD_seqSymbol e49; - public ZSTD_seqSymbol e50; - public ZSTD_seqSymbol e51; - public ZSTD_seqSymbol e52; - public ZSTD_seqSymbol e53; - public ZSTD_seqSymbol e54; - public ZSTD_seqSymbol e55; - public ZSTD_seqSymbol e56; - public ZSTD_seqSymbol e57; - public ZSTD_seqSymbol e58; - public ZSTD_seqSymbol e59; - public ZSTD_seqSymbol e60; - public ZSTD_seqSymbol e61; - public ZSTD_seqSymbol e62; - public ZSTD_seqSymbol e63; - public ZSTD_seqSymbol e64; - public ZSTD_seqSymbol e65; - public ZSTD_seqSymbol e66; - public ZSTD_seqSymbol e67; - public ZSTD_seqSymbol e68; - public ZSTD_seqSymbol e69; - public ZSTD_seqSymbol e70; - public ZSTD_seqSymbol e71; - public ZSTD_seqSymbol e72; - public ZSTD_seqSymbol e73; - public ZSTD_seqSymbol e74; - public ZSTD_seqSymbol e75; - public ZSTD_seqSymbol e76; - public ZSTD_seqSymbol e77; - public ZSTD_seqSymbol e78; - public ZSTD_seqSymbol e79; - public ZSTD_seqSymbol e80; - public ZSTD_seqSymbol e81; - public ZSTD_seqSymbol e82; - public ZSTD_seqSymbol e83; - public ZSTD_seqSymbol e84; - public ZSTD_seqSymbol e85; - public ZSTD_seqSymbol e86; - public ZSTD_seqSymbol e87; - public ZSTD_seqSymbol e88; - public ZSTD_seqSymbol e89; - public ZSTD_seqSymbol e90; - public ZSTD_seqSymbol e91; - public ZSTD_seqSymbol e92; - public ZSTD_seqSymbol e93; - public ZSTD_seqSymbol e94; - public ZSTD_seqSymbol e95; - public ZSTD_seqSymbol e96; - public ZSTD_seqSymbol e97; - public ZSTD_seqSymbol e98; - public ZSTD_seqSymbol e99; - public ZSTD_seqSymbol e100; - public ZSTD_seqSymbol e101; - public ZSTD_seqSymbol e102; - public ZSTD_seqSymbol e103; - public ZSTD_seqSymbol e104; - public ZSTD_seqSymbol e105; - public ZSTD_seqSymbol e106; - public ZSTD_seqSymbol e107; - public ZSTD_seqSymbol e108; - public ZSTD_seqSymbol e109; - public ZSTD_seqSymbol e110; - public ZSTD_seqSymbol e111; - public ZSTD_seqSymbol e112; - public ZSTD_seqSymbol e113; - public ZSTD_seqSymbol e114; - public ZSTD_seqSymbol e115; - public ZSTD_seqSymbol e116; - public ZSTD_seqSymbol e117; - public ZSTD_seqSymbol e118; - public ZSTD_seqSymbol e119; - public ZSTD_seqSymbol e120; - public ZSTD_seqSymbol e121; - public ZSTD_seqSymbol e122; - public ZSTD_seqSymbol e123; - public ZSTD_seqSymbol e124; - public ZSTD_seqSymbol e125; - public ZSTD_seqSymbol e126; - public ZSTD_seqSymbol e127; - public ZSTD_seqSymbol e128; - public ZSTD_seqSymbol e129; - public ZSTD_seqSymbol e130; - public ZSTD_seqSymbol e131; - public ZSTD_seqSymbol e132; - public ZSTD_seqSymbol e133; - public ZSTD_seqSymbol e134; - public ZSTD_seqSymbol e135; - public ZSTD_seqSymbol e136; - public ZSTD_seqSymbol e137; - public ZSTD_seqSymbol e138; - public ZSTD_seqSymbol e139; - public ZSTD_seqSymbol e140; - public ZSTD_seqSymbol e141; - public ZSTD_seqSymbol e142; - public ZSTD_seqSymbol e143; - public ZSTD_seqSymbol e144; - public ZSTD_seqSymbol e145; - public ZSTD_seqSymbol e146; - public ZSTD_seqSymbol e147; - public ZSTD_seqSymbol e148; - public ZSTD_seqSymbol e149; - public ZSTD_seqSymbol e150; - public ZSTD_seqSymbol e151; - public ZSTD_seqSymbol e152; - public ZSTD_seqSymbol e153; - public ZSTD_seqSymbol e154; - public ZSTD_seqSymbol e155; - public ZSTD_seqSymbol e156; - public ZSTD_seqSymbol e157; - public ZSTD_seqSymbol e158; - public ZSTD_seqSymbol e159; - public ZSTD_seqSymbol e160; - public ZSTD_seqSymbol e161; - public ZSTD_seqSymbol e162; - public ZSTD_seqSymbol e163; - public ZSTD_seqSymbol e164; - public ZSTD_seqSymbol e165; - public ZSTD_seqSymbol e166; - public ZSTD_seqSymbol e167; - public ZSTD_seqSymbol e168; - public ZSTD_seqSymbol e169; - public ZSTD_seqSymbol e170; - public ZSTD_seqSymbol e171; - public ZSTD_seqSymbol e172; - public ZSTD_seqSymbol e173; - public ZSTD_seqSymbol e174; - public ZSTD_seqSymbol e175; - public ZSTD_seqSymbol e176; - public ZSTD_seqSymbol e177; - public ZSTD_seqSymbol e178; - public ZSTD_seqSymbol e179; - public ZSTD_seqSymbol e180; - public ZSTD_seqSymbol e181; - public ZSTD_seqSymbol e182; - public ZSTD_seqSymbol e183; - public ZSTD_seqSymbol e184; - public ZSTD_seqSymbol e185; - public ZSTD_seqSymbol e186; - public ZSTD_seqSymbol e187; - public ZSTD_seqSymbol e188; - public ZSTD_seqSymbol e189; - public ZSTD_seqSymbol e190; - public ZSTD_seqSymbol e191; - public ZSTD_seqSymbol e192; - public ZSTD_seqSymbol e193; - public ZSTD_seqSymbol e194; - public ZSTD_seqSymbol e195; - public ZSTD_seqSymbol e196; - public ZSTD_seqSymbol e197; - public ZSTD_seqSymbol e198; - public ZSTD_seqSymbol e199; - public ZSTD_seqSymbol e200; - public ZSTD_seqSymbol e201; - public ZSTD_seqSymbol e202; - public ZSTD_seqSymbol e203; - public ZSTD_seqSymbol e204; - public ZSTD_seqSymbol e205; - public ZSTD_seqSymbol e206; - public ZSTD_seqSymbol e207; - public ZSTD_seqSymbol e208; - public ZSTD_seqSymbol e209; - public ZSTD_seqSymbol e210; - public ZSTD_seqSymbol e211; - public ZSTD_seqSymbol e212; - public ZSTD_seqSymbol e213; - public ZSTD_seqSymbol e214; - public ZSTD_seqSymbol e215; - public ZSTD_seqSymbol e216; - public ZSTD_seqSymbol e217; - public ZSTD_seqSymbol e218; - public ZSTD_seqSymbol e219; - public ZSTD_seqSymbol e220; - public ZSTD_seqSymbol e221; - public ZSTD_seqSymbol e222; - public ZSTD_seqSymbol e223; - public ZSTD_seqSymbol e224; - public ZSTD_seqSymbol e225; - public ZSTD_seqSymbol e226; - public ZSTD_seqSymbol e227; - public ZSTD_seqSymbol e228; - public ZSTD_seqSymbol e229; - public ZSTD_seqSymbol e230; - public ZSTD_seqSymbol e231; - public ZSTD_seqSymbol e232; - public ZSTD_seqSymbol e233; - public ZSTD_seqSymbol e234; - public ZSTD_seqSymbol e235; - public ZSTD_seqSymbol e236; - public ZSTD_seqSymbol e237; - public ZSTD_seqSymbol e238; - public ZSTD_seqSymbol e239; - public ZSTD_seqSymbol e240; - public ZSTD_seqSymbol e241; - public ZSTD_seqSymbol e242; - public ZSTD_seqSymbol e243; - public ZSTD_seqSymbol e244; - public ZSTD_seqSymbol e245; - public ZSTD_seqSymbol e246; - public ZSTD_seqSymbol e247; - public ZSTD_seqSymbol e248; - public ZSTD_seqSymbol e249; - public ZSTD_seqSymbol e250; - public ZSTD_seqSymbol e251; - public ZSTD_seqSymbol e252; - public ZSTD_seqSymbol e253; - public ZSTD_seqSymbol e254; - public ZSTD_seqSymbol e255; - public ZSTD_seqSymbol e256; - public ZSTD_seqSymbol e257; - public ZSTD_seqSymbol e258; - public ZSTD_seqSymbol e259; - public ZSTD_seqSymbol e260; - public ZSTD_seqSymbol e261; - public ZSTD_seqSymbol e262; - public ZSTD_seqSymbol e263; - public ZSTD_seqSymbol e264; - public ZSTD_seqSymbol e265; - public ZSTD_seqSymbol e266; - public ZSTD_seqSymbol e267; - public ZSTD_seqSymbol e268; - public ZSTD_seqSymbol e269; - public ZSTD_seqSymbol e270; - public ZSTD_seqSymbol e271; - public ZSTD_seqSymbol e272; - public ZSTD_seqSymbol e273; - public ZSTD_seqSymbol e274; - public ZSTD_seqSymbol e275; - public ZSTD_seqSymbol e276; - public ZSTD_seqSymbol e277; - public ZSTD_seqSymbol e278; - public ZSTD_seqSymbol e279; - public ZSTD_seqSymbol e280; - public ZSTD_seqSymbol e281; - public ZSTD_seqSymbol e282; - public ZSTD_seqSymbol e283; - public ZSTD_seqSymbol e284; - public ZSTD_seqSymbol e285; - public ZSTD_seqSymbol e286; - public ZSTD_seqSymbol e287; - public ZSTD_seqSymbol e288; - public ZSTD_seqSymbol e289; - public ZSTD_seqSymbol e290; - public ZSTD_seqSymbol e291; - public ZSTD_seqSymbol e292; - public ZSTD_seqSymbol e293; - public ZSTD_seqSymbol e294; - public ZSTD_seqSymbol e295; - public ZSTD_seqSymbol e296; - public ZSTD_seqSymbol e297; - public ZSTD_seqSymbol e298; - public ZSTD_seqSymbol e299; - public ZSTD_seqSymbol e300; - public ZSTD_seqSymbol e301; - public ZSTD_seqSymbol e302; - public ZSTD_seqSymbol e303; - public ZSTD_seqSymbol e304; - public ZSTD_seqSymbol e305; - public ZSTD_seqSymbol e306; - public ZSTD_seqSymbol e307; - public ZSTD_seqSymbol e308; - public ZSTD_seqSymbol e309; - public ZSTD_seqSymbol e310; - public ZSTD_seqSymbol e311; - public ZSTD_seqSymbol e312; - public ZSTD_seqSymbol e313; - public ZSTD_seqSymbol e314; - public ZSTD_seqSymbol e315; - public ZSTD_seqSymbol e316; - public ZSTD_seqSymbol e317; - public ZSTD_seqSymbol e318; - public ZSTD_seqSymbol e319; - public ZSTD_seqSymbol e320; - public ZSTD_seqSymbol e321; - public ZSTD_seqSymbol e322; - public ZSTD_seqSymbol e323; - public ZSTD_seqSymbol e324; - public ZSTD_seqSymbol e325; - public ZSTD_seqSymbol e326; - public ZSTD_seqSymbol e327; - public ZSTD_seqSymbol e328; - public ZSTD_seqSymbol e329; - public ZSTD_seqSymbol e330; - public ZSTD_seqSymbol e331; - public ZSTD_seqSymbol e332; - public ZSTD_seqSymbol e333; - public ZSTD_seqSymbol e334; - public ZSTD_seqSymbol e335; - public ZSTD_seqSymbol e336; - public ZSTD_seqSymbol e337; - public ZSTD_seqSymbol e338; - public ZSTD_seqSymbol e339; - public ZSTD_seqSymbol e340; - public ZSTD_seqSymbol e341; - public ZSTD_seqSymbol e342; - public ZSTD_seqSymbol e343; - public ZSTD_seqSymbol e344; - public ZSTD_seqSymbol e345; - public ZSTD_seqSymbol e346; - public ZSTD_seqSymbol e347; - public ZSTD_seqSymbol e348; - public ZSTD_seqSymbol e349; - public ZSTD_seqSymbol e350; - public ZSTD_seqSymbol e351; - public ZSTD_seqSymbol e352; - public ZSTD_seqSymbol e353; - public ZSTD_seqSymbol e354; - public ZSTD_seqSymbol e355; - public ZSTD_seqSymbol e356; - public ZSTD_seqSymbol e357; - public ZSTD_seqSymbol e358; - public ZSTD_seqSymbol e359; - public ZSTD_seqSymbol e360; - public ZSTD_seqSymbol e361; - public ZSTD_seqSymbol e362; - public ZSTD_seqSymbol e363; - public ZSTD_seqSymbol e364; - public ZSTD_seqSymbol e365; - public ZSTD_seqSymbol e366; - public ZSTD_seqSymbol e367; - public ZSTD_seqSymbol e368; - public ZSTD_seqSymbol e369; - public ZSTD_seqSymbol e370; - public ZSTD_seqSymbol e371; - public ZSTD_seqSymbol e372; - public ZSTD_seqSymbol e373; - public ZSTD_seqSymbol e374; - public ZSTD_seqSymbol e375; - public ZSTD_seqSymbol e376; - public ZSTD_seqSymbol e377; - public ZSTD_seqSymbol e378; - public ZSTD_seqSymbol e379; - public ZSTD_seqSymbol e380; - public ZSTD_seqSymbol e381; - public ZSTD_seqSymbol e382; - public ZSTD_seqSymbol e383; - public ZSTD_seqSymbol e384; - public ZSTD_seqSymbol e385; - public ZSTD_seqSymbol e386; - public ZSTD_seqSymbol e387; - public ZSTD_seqSymbol e388; - public ZSTD_seqSymbol e389; - public ZSTD_seqSymbol e390; - public ZSTD_seqSymbol e391; - public ZSTD_seqSymbol e392; - public ZSTD_seqSymbol e393; - public ZSTD_seqSymbol e394; - public ZSTD_seqSymbol e395; - public ZSTD_seqSymbol e396; - public ZSTD_seqSymbol e397; - public ZSTD_seqSymbol e398; - public ZSTD_seqSymbol e399; - public ZSTD_seqSymbol e400; - public ZSTD_seqSymbol e401; - public ZSTD_seqSymbol e402; - public ZSTD_seqSymbol e403; - public ZSTD_seqSymbol e404; - public ZSTD_seqSymbol e405; - public ZSTD_seqSymbol e406; - public ZSTD_seqSymbol e407; - public ZSTD_seqSymbol e408; - public ZSTD_seqSymbol e409; - public ZSTD_seqSymbol e410; - public ZSTD_seqSymbol e411; - public ZSTD_seqSymbol e412; - public ZSTD_seqSymbol e413; - public ZSTD_seqSymbol e414; - public ZSTD_seqSymbol e415; - public ZSTD_seqSymbol e416; - public ZSTD_seqSymbol e417; - public ZSTD_seqSymbol e418; - public ZSTD_seqSymbol e419; - public ZSTD_seqSymbol e420; - public ZSTD_seqSymbol e421; - public ZSTD_seqSymbol e422; - public ZSTD_seqSymbol e423; - public ZSTD_seqSymbol e424; - public ZSTD_seqSymbol e425; - public ZSTD_seqSymbol e426; - public ZSTD_seqSymbol e427; - public ZSTD_seqSymbol e428; - public ZSTD_seqSymbol e429; - public ZSTD_seqSymbol e430; - public ZSTD_seqSymbol e431; - public ZSTD_seqSymbol e432; - public ZSTD_seqSymbol e433; - public ZSTD_seqSymbol e434; - public ZSTD_seqSymbol e435; - public ZSTD_seqSymbol e436; - public ZSTD_seqSymbol e437; - public ZSTD_seqSymbol e438; - public ZSTD_seqSymbol e439; - public ZSTD_seqSymbol e440; - public ZSTD_seqSymbol e441; - public ZSTD_seqSymbol e442; - public ZSTD_seqSymbol e443; - public ZSTD_seqSymbol e444; - public ZSTD_seqSymbol e445; - public ZSTD_seqSymbol e446; - public ZSTD_seqSymbol e447; - public ZSTD_seqSymbol e448; - public ZSTD_seqSymbol e449; - public ZSTD_seqSymbol e450; - public ZSTD_seqSymbol e451; - public ZSTD_seqSymbol e452; - public ZSTD_seqSymbol e453; - public ZSTD_seqSymbol e454; - public ZSTD_seqSymbol e455; - public ZSTD_seqSymbol e456; - public ZSTD_seqSymbol e457; - public ZSTD_seqSymbol e458; - public ZSTD_seqSymbol e459; - public ZSTD_seqSymbol e460; - public ZSTD_seqSymbol e461; - public ZSTD_seqSymbol e462; - public ZSTD_seqSymbol e463; - public ZSTD_seqSymbol e464; - public ZSTD_seqSymbol e465; - public ZSTD_seqSymbol e466; - public ZSTD_seqSymbol e467; - public ZSTD_seqSymbol e468; - public ZSTD_seqSymbol e469; - public ZSTD_seqSymbol e470; - public ZSTD_seqSymbol e471; - public ZSTD_seqSymbol e472; - public ZSTD_seqSymbol e473; - public ZSTD_seqSymbol e474; - public ZSTD_seqSymbol e475; - public ZSTD_seqSymbol e476; - public ZSTD_seqSymbol e477; - public ZSTD_seqSymbol e478; - public ZSTD_seqSymbol e479; - public ZSTD_seqSymbol e480; - public ZSTD_seqSymbol e481; - public ZSTD_seqSymbol e482; - public ZSTD_seqSymbol e483; - public ZSTD_seqSymbol e484; - public ZSTD_seqSymbol e485; - public ZSTD_seqSymbol e486; - public ZSTD_seqSymbol e487; - public ZSTD_seqSymbol e488; - public ZSTD_seqSymbol e489; - public ZSTD_seqSymbol e490; - public ZSTD_seqSymbol e491; - public ZSTD_seqSymbol e492; - public ZSTD_seqSymbol e493; - public ZSTD_seqSymbol e494; - public ZSTD_seqSymbol e495; - public ZSTD_seqSymbol e496; - public ZSTD_seqSymbol e497; - public ZSTD_seqSymbol e498; - public ZSTD_seqSymbol e499; - public ZSTD_seqSymbol e500; - public ZSTD_seqSymbol e501; - public ZSTD_seqSymbol e502; - public ZSTD_seqSymbol e503; - public ZSTD_seqSymbol e504; - public ZSTD_seqSymbol e505; - public ZSTD_seqSymbol e506; - public ZSTD_seqSymbol e507; - public ZSTD_seqSymbol e508; - public ZSTD_seqSymbol e509; - public ZSTD_seqSymbol e510; - public ZSTD_seqSymbol e511; - public ZSTD_seqSymbol e512; - } -#endif + public unsafe struct _MLTable_e__FixedBuffer + { + public ZSTD_seqSymbol e0; + public ZSTD_seqSymbol e1; + public ZSTD_seqSymbol e2; + public ZSTD_seqSymbol e3; + public ZSTD_seqSymbol e4; + public ZSTD_seqSymbol e5; + public ZSTD_seqSymbol e6; + public ZSTD_seqSymbol e7; + public ZSTD_seqSymbol e8; + public ZSTD_seqSymbol e9; + public ZSTD_seqSymbol e10; + public ZSTD_seqSymbol e11; + public ZSTD_seqSymbol e12; + public ZSTD_seqSymbol e13; + public ZSTD_seqSymbol e14; + public ZSTD_seqSymbol e15; + public ZSTD_seqSymbol e16; + public ZSTD_seqSymbol e17; + public ZSTD_seqSymbol e18; + public ZSTD_seqSymbol e19; + public ZSTD_seqSymbol e20; + public ZSTD_seqSymbol e21; + public ZSTD_seqSymbol e22; + public ZSTD_seqSymbol e23; + public ZSTD_seqSymbol e24; + public ZSTD_seqSymbol e25; + public ZSTD_seqSymbol e26; + public ZSTD_seqSymbol e27; + public ZSTD_seqSymbol e28; + public ZSTD_seqSymbol e29; + public ZSTD_seqSymbol e30; + public ZSTD_seqSymbol e31; + public ZSTD_seqSymbol e32; + public ZSTD_seqSymbol e33; + public ZSTD_seqSymbol e34; + public ZSTD_seqSymbol e35; + public ZSTD_seqSymbol e36; + public ZSTD_seqSymbol e37; + public ZSTD_seqSymbol e38; + public ZSTD_seqSymbol e39; + public ZSTD_seqSymbol e40; + public ZSTD_seqSymbol e41; + public ZSTD_seqSymbol e42; + public ZSTD_seqSymbol e43; + public ZSTD_seqSymbol e44; + public ZSTD_seqSymbol e45; + public ZSTD_seqSymbol e46; + public ZSTD_seqSymbol e47; + public ZSTD_seqSymbol e48; + public ZSTD_seqSymbol e49; + public ZSTD_seqSymbol e50; + public ZSTD_seqSymbol e51; + public ZSTD_seqSymbol e52; + public ZSTD_seqSymbol e53; + public ZSTD_seqSymbol e54; + public ZSTD_seqSymbol e55; + public ZSTD_seqSymbol e56; + public ZSTD_seqSymbol e57; + public ZSTD_seqSymbol e58; + public ZSTD_seqSymbol e59; + public ZSTD_seqSymbol e60; + public ZSTD_seqSymbol e61; + public ZSTD_seqSymbol e62; + public ZSTD_seqSymbol e63; + public ZSTD_seqSymbol e64; + public ZSTD_seqSymbol e65; + public ZSTD_seqSymbol e66; + public ZSTD_seqSymbol e67; + public ZSTD_seqSymbol e68; + public ZSTD_seqSymbol e69; + public ZSTD_seqSymbol e70; + public ZSTD_seqSymbol e71; + public ZSTD_seqSymbol e72; + public ZSTD_seqSymbol e73; + public ZSTD_seqSymbol e74; + public ZSTD_seqSymbol e75; + public ZSTD_seqSymbol e76; + public ZSTD_seqSymbol e77; + public ZSTD_seqSymbol e78; + public ZSTD_seqSymbol e79; + public ZSTD_seqSymbol e80; + public ZSTD_seqSymbol e81; + public ZSTD_seqSymbol e82; + public ZSTD_seqSymbol e83; + public ZSTD_seqSymbol e84; + public ZSTD_seqSymbol e85; + public ZSTD_seqSymbol e86; + public ZSTD_seqSymbol e87; + public ZSTD_seqSymbol e88; + public ZSTD_seqSymbol e89; + public ZSTD_seqSymbol e90; + public ZSTD_seqSymbol e91; + public ZSTD_seqSymbol e92; + public ZSTD_seqSymbol e93; + public ZSTD_seqSymbol e94; + public ZSTD_seqSymbol e95; + public ZSTD_seqSymbol e96; + public ZSTD_seqSymbol e97; + public ZSTD_seqSymbol e98; + public ZSTD_seqSymbol e99; + public ZSTD_seqSymbol e100; + public ZSTD_seqSymbol e101; + public ZSTD_seqSymbol e102; + public ZSTD_seqSymbol e103; + public ZSTD_seqSymbol e104; + public ZSTD_seqSymbol e105; + public ZSTD_seqSymbol e106; + public ZSTD_seqSymbol e107; + public ZSTD_seqSymbol e108; + public ZSTD_seqSymbol e109; + public ZSTD_seqSymbol e110; + public ZSTD_seqSymbol e111; + public ZSTD_seqSymbol e112; + public ZSTD_seqSymbol e113; + public ZSTD_seqSymbol e114; + public ZSTD_seqSymbol e115; + public ZSTD_seqSymbol e116; + public ZSTD_seqSymbol e117; + public ZSTD_seqSymbol e118; + public ZSTD_seqSymbol e119; + public ZSTD_seqSymbol e120; + public ZSTD_seqSymbol e121; + public ZSTD_seqSymbol e122; + public ZSTD_seqSymbol e123; + public ZSTD_seqSymbol e124; + public ZSTD_seqSymbol e125; + public ZSTD_seqSymbol e126; + public ZSTD_seqSymbol e127; + public ZSTD_seqSymbol e128; + public ZSTD_seqSymbol e129; + public ZSTD_seqSymbol e130; + public ZSTD_seqSymbol e131; + public ZSTD_seqSymbol e132; + public ZSTD_seqSymbol e133; + public ZSTD_seqSymbol e134; + public ZSTD_seqSymbol e135; + public ZSTD_seqSymbol e136; + public ZSTD_seqSymbol e137; + public ZSTD_seqSymbol e138; + public ZSTD_seqSymbol e139; + public ZSTD_seqSymbol e140; + public ZSTD_seqSymbol e141; + public ZSTD_seqSymbol e142; + public ZSTD_seqSymbol e143; + public ZSTD_seqSymbol e144; + public ZSTD_seqSymbol e145; + public ZSTD_seqSymbol e146; + public ZSTD_seqSymbol e147; + public ZSTD_seqSymbol e148; + public ZSTD_seqSymbol e149; + public ZSTD_seqSymbol e150; + public ZSTD_seqSymbol e151; + public ZSTD_seqSymbol e152; + public ZSTD_seqSymbol e153; + public ZSTD_seqSymbol e154; + public ZSTD_seqSymbol e155; + public ZSTD_seqSymbol e156; + public ZSTD_seqSymbol e157; + public ZSTD_seqSymbol e158; + public ZSTD_seqSymbol e159; + public ZSTD_seqSymbol e160; + public ZSTD_seqSymbol e161; + public ZSTD_seqSymbol e162; + public ZSTD_seqSymbol e163; + public ZSTD_seqSymbol e164; + public ZSTD_seqSymbol e165; + public ZSTD_seqSymbol e166; + public ZSTD_seqSymbol e167; + public ZSTD_seqSymbol e168; + public ZSTD_seqSymbol e169; + public ZSTD_seqSymbol e170; + public ZSTD_seqSymbol e171; + public ZSTD_seqSymbol e172; + public ZSTD_seqSymbol e173; + public ZSTD_seqSymbol e174; + public ZSTD_seqSymbol e175; + public ZSTD_seqSymbol e176; + public ZSTD_seqSymbol e177; + public ZSTD_seqSymbol e178; + public ZSTD_seqSymbol e179; + public ZSTD_seqSymbol e180; + public ZSTD_seqSymbol e181; + public ZSTD_seqSymbol e182; + public ZSTD_seqSymbol e183; + public ZSTD_seqSymbol e184; + public ZSTD_seqSymbol e185; + public ZSTD_seqSymbol e186; + public ZSTD_seqSymbol e187; + public ZSTD_seqSymbol e188; + public ZSTD_seqSymbol e189; + public ZSTD_seqSymbol e190; + public ZSTD_seqSymbol e191; + public ZSTD_seqSymbol e192; + public ZSTD_seqSymbol e193; + public ZSTD_seqSymbol e194; + public ZSTD_seqSymbol e195; + public ZSTD_seqSymbol e196; + public ZSTD_seqSymbol e197; + public ZSTD_seqSymbol e198; + public ZSTD_seqSymbol e199; + public ZSTD_seqSymbol e200; + public ZSTD_seqSymbol e201; + public ZSTD_seqSymbol e202; + public ZSTD_seqSymbol e203; + public ZSTD_seqSymbol e204; + public ZSTD_seqSymbol e205; + public ZSTD_seqSymbol e206; + public ZSTD_seqSymbol e207; + public ZSTD_seqSymbol e208; + public ZSTD_seqSymbol e209; + public ZSTD_seqSymbol e210; + public ZSTD_seqSymbol e211; + public ZSTD_seqSymbol e212; + public ZSTD_seqSymbol e213; + public ZSTD_seqSymbol e214; + public ZSTD_seqSymbol e215; + public ZSTD_seqSymbol e216; + public ZSTD_seqSymbol e217; + public ZSTD_seqSymbol e218; + public ZSTD_seqSymbol e219; + public ZSTD_seqSymbol e220; + public ZSTD_seqSymbol e221; + public ZSTD_seqSymbol e222; + public ZSTD_seqSymbol e223; + public ZSTD_seqSymbol e224; + public ZSTD_seqSymbol e225; + public ZSTD_seqSymbol e226; + public ZSTD_seqSymbol e227; + public ZSTD_seqSymbol e228; + public ZSTD_seqSymbol e229; + public ZSTD_seqSymbol e230; + public ZSTD_seqSymbol e231; + public ZSTD_seqSymbol e232; + public ZSTD_seqSymbol e233; + public ZSTD_seqSymbol e234; + public ZSTD_seqSymbol e235; + public ZSTD_seqSymbol e236; + public ZSTD_seqSymbol e237; + public ZSTD_seqSymbol e238; + public ZSTD_seqSymbol e239; + public ZSTD_seqSymbol e240; + public ZSTD_seqSymbol e241; + public ZSTD_seqSymbol e242; + public ZSTD_seqSymbol e243; + public ZSTD_seqSymbol e244; + public ZSTD_seqSymbol e245; + public ZSTD_seqSymbol e246; + public ZSTD_seqSymbol e247; + public ZSTD_seqSymbol e248; + public ZSTD_seqSymbol e249; + public ZSTD_seqSymbol e250; + public ZSTD_seqSymbol e251; + public ZSTD_seqSymbol e252; + public ZSTD_seqSymbol e253; + public ZSTD_seqSymbol e254; + public ZSTD_seqSymbol e255; + public ZSTD_seqSymbol e256; + public ZSTD_seqSymbol e257; + public ZSTD_seqSymbol e258; + public ZSTD_seqSymbol e259; + public ZSTD_seqSymbol e260; + public ZSTD_seqSymbol e261; + public ZSTD_seqSymbol e262; + public ZSTD_seqSymbol e263; + public ZSTD_seqSymbol e264; + public ZSTD_seqSymbol e265; + public ZSTD_seqSymbol e266; + public ZSTD_seqSymbol e267; + public ZSTD_seqSymbol e268; + public ZSTD_seqSymbol e269; + public ZSTD_seqSymbol e270; + public ZSTD_seqSymbol e271; + public ZSTD_seqSymbol e272; + public ZSTD_seqSymbol e273; + public ZSTD_seqSymbol e274; + public ZSTD_seqSymbol e275; + public ZSTD_seqSymbol e276; + public ZSTD_seqSymbol e277; + public ZSTD_seqSymbol e278; + public ZSTD_seqSymbol e279; + public ZSTD_seqSymbol e280; + public ZSTD_seqSymbol e281; + public ZSTD_seqSymbol e282; + public ZSTD_seqSymbol e283; + public ZSTD_seqSymbol e284; + public ZSTD_seqSymbol e285; + public ZSTD_seqSymbol e286; + public ZSTD_seqSymbol e287; + public ZSTD_seqSymbol e288; + public ZSTD_seqSymbol e289; + public ZSTD_seqSymbol e290; + public ZSTD_seqSymbol e291; + public ZSTD_seqSymbol e292; + public ZSTD_seqSymbol e293; + public ZSTD_seqSymbol e294; + public ZSTD_seqSymbol e295; + public ZSTD_seqSymbol e296; + public ZSTD_seqSymbol e297; + public ZSTD_seqSymbol e298; + public ZSTD_seqSymbol e299; + public ZSTD_seqSymbol e300; + public ZSTD_seqSymbol e301; + public ZSTD_seqSymbol e302; + public ZSTD_seqSymbol e303; + public ZSTD_seqSymbol e304; + public ZSTD_seqSymbol e305; + public ZSTD_seqSymbol e306; + public ZSTD_seqSymbol e307; + public ZSTD_seqSymbol e308; + public ZSTD_seqSymbol e309; + public ZSTD_seqSymbol e310; + public ZSTD_seqSymbol e311; + public ZSTD_seqSymbol e312; + public ZSTD_seqSymbol e313; + public ZSTD_seqSymbol e314; + public ZSTD_seqSymbol e315; + public ZSTD_seqSymbol e316; + public ZSTD_seqSymbol e317; + public ZSTD_seqSymbol e318; + public ZSTD_seqSymbol e319; + public ZSTD_seqSymbol e320; + public ZSTD_seqSymbol e321; + public ZSTD_seqSymbol e322; + public ZSTD_seqSymbol e323; + public ZSTD_seqSymbol e324; + public ZSTD_seqSymbol e325; + public ZSTD_seqSymbol e326; + public ZSTD_seqSymbol e327; + public ZSTD_seqSymbol e328; + public ZSTD_seqSymbol e329; + public ZSTD_seqSymbol e330; + public ZSTD_seqSymbol e331; + public ZSTD_seqSymbol e332; + public ZSTD_seqSymbol e333; + public ZSTD_seqSymbol e334; + public ZSTD_seqSymbol e335; + public ZSTD_seqSymbol e336; + public ZSTD_seqSymbol e337; + public ZSTD_seqSymbol e338; + public ZSTD_seqSymbol e339; + public ZSTD_seqSymbol e340; + public ZSTD_seqSymbol e341; + public ZSTD_seqSymbol e342; + public ZSTD_seqSymbol e343; + public ZSTD_seqSymbol e344; + public ZSTD_seqSymbol e345; + public ZSTD_seqSymbol e346; + public ZSTD_seqSymbol e347; + public ZSTD_seqSymbol e348; + public ZSTD_seqSymbol e349; + public ZSTD_seqSymbol e350; + public ZSTD_seqSymbol e351; + public ZSTD_seqSymbol e352; + public ZSTD_seqSymbol e353; + public ZSTD_seqSymbol e354; + public ZSTD_seqSymbol e355; + public ZSTD_seqSymbol e356; + public ZSTD_seqSymbol e357; + public ZSTD_seqSymbol e358; + public ZSTD_seqSymbol e359; + public ZSTD_seqSymbol e360; + public ZSTD_seqSymbol e361; + public ZSTD_seqSymbol e362; + public ZSTD_seqSymbol e363; + public ZSTD_seqSymbol e364; + public ZSTD_seqSymbol e365; + public ZSTD_seqSymbol e366; + public ZSTD_seqSymbol e367; + public ZSTD_seqSymbol e368; + public ZSTD_seqSymbol e369; + public ZSTD_seqSymbol e370; + public ZSTD_seqSymbol e371; + public ZSTD_seqSymbol e372; + public ZSTD_seqSymbol e373; + public ZSTD_seqSymbol e374; + public ZSTD_seqSymbol e375; + public ZSTD_seqSymbol e376; + public ZSTD_seqSymbol e377; + public ZSTD_seqSymbol e378; + public ZSTD_seqSymbol e379; + public ZSTD_seqSymbol e380; + public ZSTD_seqSymbol e381; + public ZSTD_seqSymbol e382; + public ZSTD_seqSymbol e383; + public ZSTD_seqSymbol e384; + public ZSTD_seqSymbol e385; + public ZSTD_seqSymbol e386; + public ZSTD_seqSymbol e387; + public ZSTD_seqSymbol e388; + public ZSTD_seqSymbol e389; + public ZSTD_seqSymbol e390; + public ZSTD_seqSymbol e391; + public ZSTD_seqSymbol e392; + public ZSTD_seqSymbol e393; + public ZSTD_seqSymbol e394; + public ZSTD_seqSymbol e395; + public ZSTD_seqSymbol e396; + public ZSTD_seqSymbol e397; + public ZSTD_seqSymbol e398; + public ZSTD_seqSymbol e399; + public ZSTD_seqSymbol e400; + public ZSTD_seqSymbol e401; + public ZSTD_seqSymbol e402; + public ZSTD_seqSymbol e403; + public ZSTD_seqSymbol e404; + public ZSTD_seqSymbol e405; + public ZSTD_seqSymbol e406; + public ZSTD_seqSymbol e407; + public ZSTD_seqSymbol e408; + public ZSTD_seqSymbol e409; + public ZSTD_seqSymbol e410; + public ZSTD_seqSymbol e411; + public ZSTD_seqSymbol e412; + public ZSTD_seqSymbol e413; + public ZSTD_seqSymbol e414; + public ZSTD_seqSymbol e415; + public ZSTD_seqSymbol e416; + public ZSTD_seqSymbol e417; + public ZSTD_seqSymbol e418; + public ZSTD_seqSymbol e419; + public ZSTD_seqSymbol e420; + public ZSTD_seqSymbol e421; + public ZSTD_seqSymbol e422; + public ZSTD_seqSymbol e423; + public ZSTD_seqSymbol e424; + public ZSTD_seqSymbol e425; + public ZSTD_seqSymbol e426; + public ZSTD_seqSymbol e427; + public ZSTD_seqSymbol e428; + public ZSTD_seqSymbol e429; + public ZSTD_seqSymbol e430; + public ZSTD_seqSymbol e431; + public ZSTD_seqSymbol e432; + public ZSTD_seqSymbol e433; + public ZSTD_seqSymbol e434; + public ZSTD_seqSymbol e435; + public ZSTD_seqSymbol e436; + public ZSTD_seqSymbol e437; + public ZSTD_seqSymbol e438; + public ZSTD_seqSymbol e439; + public ZSTD_seqSymbol e440; + public ZSTD_seqSymbol e441; + public ZSTD_seqSymbol e442; + public ZSTD_seqSymbol e443; + public ZSTD_seqSymbol e444; + public ZSTD_seqSymbol e445; + public ZSTD_seqSymbol e446; + public ZSTD_seqSymbol e447; + public ZSTD_seqSymbol e448; + public ZSTD_seqSymbol e449; + public ZSTD_seqSymbol e450; + public ZSTD_seqSymbol e451; + public ZSTD_seqSymbol e452; + public ZSTD_seqSymbol e453; + public ZSTD_seqSymbol e454; + public ZSTD_seqSymbol e455; + public ZSTD_seqSymbol e456; + public ZSTD_seqSymbol e457; + public ZSTD_seqSymbol e458; + public ZSTD_seqSymbol e459; + public ZSTD_seqSymbol e460; + public ZSTD_seqSymbol e461; + public ZSTD_seqSymbol e462; + public ZSTD_seqSymbol e463; + public ZSTD_seqSymbol e464; + public ZSTD_seqSymbol e465; + public ZSTD_seqSymbol e466; + public ZSTD_seqSymbol e467; + public ZSTD_seqSymbol e468; + public ZSTD_seqSymbol e469; + public ZSTD_seqSymbol e470; + public ZSTD_seqSymbol e471; + public ZSTD_seqSymbol e472; + public ZSTD_seqSymbol e473; + public ZSTD_seqSymbol e474; + public ZSTD_seqSymbol e475; + public ZSTD_seqSymbol e476; + public ZSTD_seqSymbol e477; + public ZSTD_seqSymbol e478; + public ZSTD_seqSymbol e479; + public ZSTD_seqSymbol e480; + public ZSTD_seqSymbol e481; + public ZSTD_seqSymbol e482; + public ZSTD_seqSymbol e483; + public ZSTD_seqSymbol e484; + public ZSTD_seqSymbol e485; + public ZSTD_seqSymbol e486; + public ZSTD_seqSymbol e487; + public ZSTD_seqSymbol e488; + public ZSTD_seqSymbol e489; + public ZSTD_seqSymbol e490; + public ZSTD_seqSymbol e491; + public ZSTD_seqSymbol e492; + public ZSTD_seqSymbol e493; + public ZSTD_seqSymbol e494; + public ZSTD_seqSymbol e495; + public ZSTD_seqSymbol e496; + public ZSTD_seqSymbol e497; + public ZSTD_seqSymbol e498; + public ZSTD_seqSymbol e499; + public ZSTD_seqSymbol e500; + public ZSTD_seqSymbol e501; + public ZSTD_seqSymbol e502; + public ZSTD_seqSymbol e503; + public ZSTD_seqSymbol e504; + public ZSTD_seqSymbol e505; + public ZSTD_seqSymbol e506; + public ZSTD_seqSymbol e507; + public ZSTD_seqSymbol e508; + public ZSTD_seqSymbol e509; + public ZSTD_seqSymbol e510; + public ZSTD_seqSymbol e511; + public ZSTD_seqSymbol e512; } -} +#endif +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_forceIgnoreChecksum_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_forceIgnoreChecksum_e.cs index db2885ee7..f42886222 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_forceIgnoreChecksum_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_forceIgnoreChecksum_e.cs @@ -1,9 +1,8 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public enum ZSTD_forceIgnoreChecksum_e { - public enum ZSTD_forceIgnoreChecksum_e - { - /* Note: this enum controls ZSTD_d_forceIgnoreChecksum */ - ZSTD_d_validateChecksum = 0, - ZSTD_d_ignoreChecksum = 1, - } -} + /* Note: this enum controls ZSTD_d_forceIgnoreChecksum */ + ZSTD_d_validateChecksum = 0, + ZSTD_d_ignoreChecksum = 1, +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_format_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_format_e.cs index c36c2ac78..28f4de188 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_format_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_format_e.cs @@ -1,13 +1,12 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public enum ZSTD_format_e { - public enum ZSTD_format_e - { - /* zstd frame format, specified in zstd_compression_format.md (default) */ - ZSTD_f_zstd1 = 0, + /* zstd frame format, specified in zstd_compression_format.md (default) */ + ZSTD_f_zstd1 = 0, - /* Variant of zstd frame format, without initial 4-bytes magic number. - * Useful to save 4 bytes per generated frame. - * Decoder cannot recognise automatically this format, requiring this instruction. */ - ZSTD_f_zstd1_magicless = 1, - } + /* Variant of zstd frame format, without initial 4-bytes magic number. + * Useful to save 4 bytes per generated frame. + * Decoder cannot recognise automatically this format, requiring this instruction. */ + ZSTD_f_zstd1_magicless = 1, } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameHeader.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameHeader.cs index 23fb31b18..afa709f0d 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameHeader.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameHeader.cs @@ -1,22 +1,21 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public struct ZSTD_frameHeader { - public struct ZSTD_frameHeader - { - /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */ - public ulong frameContentSize; + /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */ + public ulong frameContentSize; - /* can be very large, up to <= frameContentSize */ - public ulong windowSize; - public uint blockSizeMax; + /* can be very large, up to <= frameContentSize */ + public ulong windowSize; + public uint blockSizeMax; - /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */ - public ZSTD_frameType_e frameType; - public uint headerSize; + /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */ + public ZSTD_frameType_e frameType; + public uint headerSize; - /* for ZSTD_skippableFrame, contains the skippable magic variant [0-15] */ - public uint dictID; - public uint checksumFlag; - public uint _reserved1; - public uint _reserved2; - } -} + /* for ZSTD_skippableFrame, contains the skippable magic variant [0-15] */ + public uint dictID; + public uint checksumFlag; + public uint _reserved1; + public uint _reserved2; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameParameters.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameParameters.cs index 8f03e4576..137ccb375 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameParameters.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameParameters.cs @@ -1,14 +1,13 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public struct ZSTD_frameParameters { - public struct ZSTD_frameParameters - { - /**< 1: content size will be in frame header (when known) */ - public int contentSizeFlag; + /**< 1: content size will be in frame header (when known) */ + public int contentSizeFlag; - /**< 1: generate a 32-bits checksum using XXH64 algorithm at end of frame, for error detection */ - public int checksumFlag; + /**< 1: generate a 32-bits checksum using XXH64 algorithm at end of frame, for error detection */ + public int checksumFlag; - /**< 1: no dictID will be saved into frame header (dictID is only useful for dictionary compression) */ - public int noDictIDFlag; - } + /**< 1: no dictID will be saved into frame header (dictID is only useful for dictionary compression) */ + public int noDictIDFlag; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameProgression.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameProgression.cs index 8e9e46b06..68a380fc8 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameProgression.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameProgression.cs @@ -1,23 +1,22 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public struct ZSTD_frameProgression { - public struct ZSTD_frameProgression - { - /* nb input bytes read and buffered */ - public ulong ingested; + /* nb input bytes read and buffered */ + public ulong ingested; - /* nb input bytes actually compressed */ - public ulong consumed; + /* nb input bytes actually compressed */ + public ulong consumed; - /* nb of compressed bytes generated and buffered */ - public ulong produced; + /* nb of compressed bytes generated and buffered */ + public ulong produced; - /* nb of compressed bytes flushed : not provided; can be tracked from caller side */ - public ulong flushed; + /* nb of compressed bytes flushed : not provided; can be tracked from caller side */ + public ulong flushed; - /* MT only : latest started job nb */ - public uint currentJobID; + /* MT only : latest started job nb */ + public uint currentJobID; - /* MT only : nb of workers actively compressing at probe time */ - public uint nbActiveWorkers; - } -} + /* MT only : nb of workers actively compressing at probe time */ + public uint nbActiveWorkers; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameSizeInfo.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameSizeInfo.cs index 8d60693f1..7f9b6b08f 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameSizeInfo.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameSizeInfo.cs @@ -1,15 +1,14 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/** + * Contains the compressed frame size and an upper-bound for the decompressed frame size. + * Note: before using `compressedSize`, check for errors using ZSTD_isError(). + * similarly, before using `decompressedBound`, check for errors using: + * `decompressedBound != ZSTD_CONTENTSIZE_ERROR` + */ +public struct ZSTD_frameSizeInfo { - /** - * Contains the compressed frame size and an upper-bound for the decompressed frame size. - * Note: before using `compressedSize`, check for errors using ZSTD_isError(). - * similarly, before using `decompressedBound`, check for errors using: - * `decompressedBound != ZSTD_CONTENTSIZE_ERROR` - */ - public struct ZSTD_frameSizeInfo - { - public nuint nbBlocks; - public nuint compressedSize; - public ulong decompressedBound; - } + public nuint nbBlocks; + public nuint compressedSize; + public ulong decompressedBound; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameType_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameType_e.cs index 4cb02e714..80e6e8391 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameType_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameType_e.cs @@ -1,8 +1,7 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public enum ZSTD_frameType_e { - public enum ZSTD_frameType_e - { - ZSTD_frame, - ZSTD_skippableFrame, - } -} + ZSTD_frame, + ZSTD_skippableFrame, +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseCTablesMetadata_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseCTablesMetadata_t.cs index 0e89337b3..c6aa0dc8c 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseCTablesMetadata_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseCTablesMetadata_t.cs @@ -1,19 +1,18 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/** ZSTD_fseCTablesMetadata_t : + * Stores symbol compression modes for a super-block in {ll, ol, ml}Type, and + * fse tables in fseTablesBuffer. + * fseTablesSize refers to the size of fse tables in bytes. + * This metadata is populated in ZSTD_buildBlockEntropyStats_sequences() */ +public unsafe struct ZSTD_fseCTablesMetadata_t { - /** ZSTD_fseCTablesMetadata_t : - * Stores symbol compression modes for a super-block in {ll, ol, ml}Type, and - * fse tables in fseTablesBuffer. - * fseTablesSize refers to the size of fse tables in bytes. - * This metadata is populated in ZSTD_buildBlockEntropyStats_sequences() */ - public unsafe struct ZSTD_fseCTablesMetadata_t - { - public SymbolEncodingType_e llType; - public SymbolEncodingType_e ofType; - public SymbolEncodingType_e mlType; - public fixed byte fseTablesBuffer[133]; - public nuint fseTablesSize; + public SymbolEncodingType_e llType; + public SymbolEncodingType_e ofType; + public SymbolEncodingType_e mlType; + public fixed byte fseTablesBuffer[133]; + public nuint fseTablesSize; - /* This is to account for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */ - public nuint lastCountSize; - } + /* This is to account for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */ + public nuint lastCountSize; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseCTables_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseCTables_t.cs index b716d40ee..0fca629fd 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseCTables_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseCTables_t.cs @@ -1,12 +1,11 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct ZSTD_fseCTables_t { - public unsafe struct ZSTD_fseCTables_t - { - public fixed uint offcodeCTable[193]; - public fixed uint matchlengthCTable[363]; - public fixed uint litlengthCTable[329]; - public FSE_repeat offcode_repeatMode; - public FSE_repeat matchlength_repeatMode; - public FSE_repeat litlength_repeatMode; - } -} + public fixed uint offcodeCTable[193]; + public fixed uint matchlengthCTable[363]; + public fixed uint litlengthCTable[329]; + public FSE_repeat offcode_repeatMode; + public FSE_repeat matchlength_repeatMode; + public FSE_repeat litlength_repeatMode; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseState.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseState.cs index f77f7116e..2e7b2a2c7 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseState.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseState.cs @@ -1,8 +1,7 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct ZSTD_fseState { - public unsafe struct ZSTD_fseState - { - public nuint state; - public ZSTD_seqSymbol* table; - } + public nuint state; + public ZSTD_seqSymbol* table; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_getAllMatchesFn.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_getAllMatchesFn.cs index b6e49f4ed..4f7ee54f3 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_getAllMatchesFn.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_getAllMatchesFn.cs @@ -1,16 +1,15 @@ using System.Runtime.InteropServices; -namespace ZstdSharp.Unsafe -{ - [UnmanagedFunctionPointer(CallingConvention.Cdecl)] - public unsafe delegate uint ZSTD_getAllMatchesFn( - ZSTD_match_t* param0, - ZSTD_MatchState_t* param1, - uint* param2, - byte* param3, - byte* param4, - uint* rep, - uint ll0, - uint lengthToBeat - ); -} +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +[UnmanagedFunctionPointer(CallingConvention.Cdecl)] +public unsafe delegate uint ZSTD_getAllMatchesFn( + ZSTD_match_t* param0, + ZSTD_MatchState_t* param1, + uint* param2, + byte* param3, + byte* param4, + uint* rep, + uint ll0, + uint lengthToBeat +); diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_hufCTablesMetadata_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_hufCTablesMetadata_t.cs index d4b5d4da1..481bd6472 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_hufCTablesMetadata_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_hufCTablesMetadata_t.cs @@ -1,17 +1,16 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/*********************************************** + * Entropy buffer statistics structs and funcs * + ***********************************************/ +/** ZSTD_hufCTablesMetadata_t : + * Stores Literals Block Type for a super-block in hType, and + * huffman tree description in hufDesBuffer. + * hufDesSize refers to the size of huffman tree description in bytes. + * This metadata is populated in ZSTD_buildBlockEntropyStats_literals() */ +public unsafe struct ZSTD_hufCTablesMetadata_t { - /*********************************************** - * Entropy buffer statistics structs and funcs * - ***********************************************/ - /** ZSTD_hufCTablesMetadata_t : - * Stores Literals Block Type for a super-block in hType, and - * huffman tree description in hufDesBuffer. - * hufDesSize refers to the size of huffman tree description in bytes. - * This metadata is populated in ZSTD_buildBlockEntropyStats_literals() */ - public unsafe struct ZSTD_hufCTablesMetadata_t - { - public SymbolEncodingType_e hType; - public fixed byte hufDesBuffer[128]; - public nuint hufDesSize; - } + public SymbolEncodingType_e hType; + public fixed byte hufDesBuffer[128]; + public nuint hufDesSize; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_hufCTables_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_hufCTables_t.cs index a697fa630..7ba93122f 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_hufCTables_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_hufCTables_t.cs @@ -1,11 +1,11 @@ using System.Runtime.CompilerServices; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct ZSTD_hufCTables_t { - public unsafe struct ZSTD_hufCTables_t - { - public _CTable_e__FixedBuffer CTable; - public HUF_repeat repeatMode; + public _CTable_e__FixedBuffer CTable; + public HUF_repeat repeatMode; #if NET8_0_OR_GREATER [InlineArray(257)] @@ -15,266 +15,265 @@ public unsafe struct _CTable_e__FixedBuffer } #else - public unsafe struct _CTable_e__FixedBuffer - { - public nuint e0; - public nuint e1; - public nuint e2; - public nuint e3; - public nuint e4; - public nuint e5; - public nuint e6; - public nuint e7; - public nuint e8; - public nuint e9; - public nuint e10; - public nuint e11; - public nuint e12; - public nuint e13; - public nuint e14; - public nuint e15; - public nuint e16; - public nuint e17; - public nuint e18; - public nuint e19; - public nuint e20; - public nuint e21; - public nuint e22; - public nuint e23; - public nuint e24; - public nuint e25; - public nuint e26; - public nuint e27; - public nuint e28; - public nuint e29; - public nuint e30; - public nuint e31; - public nuint e32; - public nuint e33; - public nuint e34; - public nuint e35; - public nuint e36; - public nuint e37; - public nuint e38; - public nuint e39; - public nuint e40; - public nuint e41; - public nuint e42; - public nuint e43; - public nuint e44; - public nuint e45; - public nuint e46; - public nuint e47; - public nuint e48; - public nuint e49; - public nuint e50; - public nuint e51; - public nuint e52; - public nuint e53; - public nuint e54; - public nuint e55; - public nuint e56; - public nuint e57; - public nuint e58; - public nuint e59; - public nuint e60; - public nuint e61; - public nuint e62; - public nuint e63; - public nuint e64; - public nuint e65; - public nuint e66; - public nuint e67; - public nuint e68; - public nuint e69; - public nuint e70; - public nuint e71; - public nuint e72; - public nuint e73; - public nuint e74; - public nuint e75; - public nuint e76; - public nuint e77; - public nuint e78; - public nuint e79; - public nuint e80; - public nuint e81; - public nuint e82; - public nuint e83; - public nuint e84; - public nuint e85; - public nuint e86; - public nuint e87; - public nuint e88; - public nuint e89; - public nuint e90; - public nuint e91; - public nuint e92; - public nuint e93; - public nuint e94; - public nuint e95; - public nuint e96; - public nuint e97; - public nuint e98; - public nuint e99; - public nuint e100; - public nuint e101; - public nuint e102; - public nuint e103; - public nuint e104; - public nuint e105; - public nuint e106; - public nuint e107; - public nuint e108; - public nuint e109; - public nuint e110; - public nuint e111; - public nuint e112; - public nuint e113; - public nuint e114; - public nuint e115; - public nuint e116; - public nuint e117; - public nuint e118; - public nuint e119; - public nuint e120; - public nuint e121; - public nuint e122; - public nuint e123; - public nuint e124; - public nuint e125; - public nuint e126; - public nuint e127; - public nuint e128; - public nuint e129; - public nuint e130; - public nuint e131; - public nuint e132; - public nuint e133; - public nuint e134; - public nuint e135; - public nuint e136; - public nuint e137; - public nuint e138; - public nuint e139; - public nuint e140; - public nuint e141; - public nuint e142; - public nuint e143; - public nuint e144; - public nuint e145; - public nuint e146; - public nuint e147; - public nuint e148; - public nuint e149; - public nuint e150; - public nuint e151; - public nuint e152; - public nuint e153; - public nuint e154; - public nuint e155; - public nuint e156; - public nuint e157; - public nuint e158; - public nuint e159; - public nuint e160; - public nuint e161; - public nuint e162; - public nuint e163; - public nuint e164; - public nuint e165; - public nuint e166; - public nuint e167; - public nuint e168; - public nuint e169; - public nuint e170; - public nuint e171; - public nuint e172; - public nuint e173; - public nuint e174; - public nuint e175; - public nuint e176; - public nuint e177; - public nuint e178; - public nuint e179; - public nuint e180; - public nuint e181; - public nuint e182; - public nuint e183; - public nuint e184; - public nuint e185; - public nuint e186; - public nuint e187; - public nuint e188; - public nuint e189; - public nuint e190; - public nuint e191; - public nuint e192; - public nuint e193; - public nuint e194; - public nuint e195; - public nuint e196; - public nuint e197; - public nuint e198; - public nuint e199; - public nuint e200; - public nuint e201; - public nuint e202; - public nuint e203; - public nuint e204; - public nuint e205; - public nuint e206; - public nuint e207; - public nuint e208; - public nuint e209; - public nuint e210; - public nuint e211; - public nuint e212; - public nuint e213; - public nuint e214; - public nuint e215; - public nuint e216; - public nuint e217; - public nuint e218; - public nuint e219; - public nuint e220; - public nuint e221; - public nuint e222; - public nuint e223; - public nuint e224; - public nuint e225; - public nuint e226; - public nuint e227; - public nuint e228; - public nuint e229; - public nuint e230; - public nuint e231; - public nuint e232; - public nuint e233; - public nuint e234; - public nuint e235; - public nuint e236; - public nuint e237; - public nuint e238; - public nuint e239; - public nuint e240; - public nuint e241; - public nuint e242; - public nuint e243; - public nuint e244; - public nuint e245; - public nuint e246; - public nuint e247; - public nuint e248; - public nuint e249; - public nuint e250; - public nuint e251; - public nuint e252; - public nuint e253; - public nuint e254; - public nuint e255; - public nuint e256; - } -#endif + public unsafe struct _CTable_e__FixedBuffer + { + public nuint e0; + public nuint e1; + public nuint e2; + public nuint e3; + public nuint e4; + public nuint e5; + public nuint e6; + public nuint e7; + public nuint e8; + public nuint e9; + public nuint e10; + public nuint e11; + public nuint e12; + public nuint e13; + public nuint e14; + public nuint e15; + public nuint e16; + public nuint e17; + public nuint e18; + public nuint e19; + public nuint e20; + public nuint e21; + public nuint e22; + public nuint e23; + public nuint e24; + public nuint e25; + public nuint e26; + public nuint e27; + public nuint e28; + public nuint e29; + public nuint e30; + public nuint e31; + public nuint e32; + public nuint e33; + public nuint e34; + public nuint e35; + public nuint e36; + public nuint e37; + public nuint e38; + public nuint e39; + public nuint e40; + public nuint e41; + public nuint e42; + public nuint e43; + public nuint e44; + public nuint e45; + public nuint e46; + public nuint e47; + public nuint e48; + public nuint e49; + public nuint e50; + public nuint e51; + public nuint e52; + public nuint e53; + public nuint e54; + public nuint e55; + public nuint e56; + public nuint e57; + public nuint e58; + public nuint e59; + public nuint e60; + public nuint e61; + public nuint e62; + public nuint e63; + public nuint e64; + public nuint e65; + public nuint e66; + public nuint e67; + public nuint e68; + public nuint e69; + public nuint e70; + public nuint e71; + public nuint e72; + public nuint e73; + public nuint e74; + public nuint e75; + public nuint e76; + public nuint e77; + public nuint e78; + public nuint e79; + public nuint e80; + public nuint e81; + public nuint e82; + public nuint e83; + public nuint e84; + public nuint e85; + public nuint e86; + public nuint e87; + public nuint e88; + public nuint e89; + public nuint e90; + public nuint e91; + public nuint e92; + public nuint e93; + public nuint e94; + public nuint e95; + public nuint e96; + public nuint e97; + public nuint e98; + public nuint e99; + public nuint e100; + public nuint e101; + public nuint e102; + public nuint e103; + public nuint e104; + public nuint e105; + public nuint e106; + public nuint e107; + public nuint e108; + public nuint e109; + public nuint e110; + public nuint e111; + public nuint e112; + public nuint e113; + public nuint e114; + public nuint e115; + public nuint e116; + public nuint e117; + public nuint e118; + public nuint e119; + public nuint e120; + public nuint e121; + public nuint e122; + public nuint e123; + public nuint e124; + public nuint e125; + public nuint e126; + public nuint e127; + public nuint e128; + public nuint e129; + public nuint e130; + public nuint e131; + public nuint e132; + public nuint e133; + public nuint e134; + public nuint e135; + public nuint e136; + public nuint e137; + public nuint e138; + public nuint e139; + public nuint e140; + public nuint e141; + public nuint e142; + public nuint e143; + public nuint e144; + public nuint e145; + public nuint e146; + public nuint e147; + public nuint e148; + public nuint e149; + public nuint e150; + public nuint e151; + public nuint e152; + public nuint e153; + public nuint e154; + public nuint e155; + public nuint e156; + public nuint e157; + public nuint e158; + public nuint e159; + public nuint e160; + public nuint e161; + public nuint e162; + public nuint e163; + public nuint e164; + public nuint e165; + public nuint e166; + public nuint e167; + public nuint e168; + public nuint e169; + public nuint e170; + public nuint e171; + public nuint e172; + public nuint e173; + public nuint e174; + public nuint e175; + public nuint e176; + public nuint e177; + public nuint e178; + public nuint e179; + public nuint e180; + public nuint e181; + public nuint e182; + public nuint e183; + public nuint e184; + public nuint e185; + public nuint e186; + public nuint e187; + public nuint e188; + public nuint e189; + public nuint e190; + public nuint e191; + public nuint e192; + public nuint e193; + public nuint e194; + public nuint e195; + public nuint e196; + public nuint e197; + public nuint e198; + public nuint e199; + public nuint e200; + public nuint e201; + public nuint e202; + public nuint e203; + public nuint e204; + public nuint e205; + public nuint e206; + public nuint e207; + public nuint e208; + public nuint e209; + public nuint e210; + public nuint e211; + public nuint e212; + public nuint e213; + public nuint e214; + public nuint e215; + public nuint e216; + public nuint e217; + public nuint e218; + public nuint e219; + public nuint e220; + public nuint e221; + public nuint e222; + public nuint e223; + public nuint e224; + public nuint e225; + public nuint e226; + public nuint e227; + public nuint e228; + public nuint e229; + public nuint e230; + public nuint e231; + public nuint e232; + public nuint e233; + public nuint e234; + public nuint e235; + public nuint e236; + public nuint e237; + public nuint e238; + public nuint e239; + public nuint e240; + public nuint e241; + public nuint e242; + public nuint e243; + public nuint e244; + public nuint e245; + public nuint e246; + public nuint e247; + public nuint e248; + public nuint e249; + public nuint e250; + public nuint e251; + public nuint e252; + public nuint e253; + public nuint e254; + public nuint e255; + public nuint e256; } +#endif } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_inBuffer_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_inBuffer_s.cs index 7a92d4439..14c4f2c5a 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_inBuffer_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_inBuffer_s.cs @@ -1,17 +1,16 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/**************************** + * Streaming + ****************************/ +public unsafe struct ZSTD_inBuffer_s { - /**************************** - * Streaming - ****************************/ - public unsafe struct ZSTD_inBuffer_s - { - /**< start of input buffer */ - public void* src; + /**< start of input buffer */ + public void* src; - /**< size of input buffer */ - public nuint size; + /**< size of input buffer */ + public nuint size; - /**< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size */ - public nuint pos; - } -} + /**< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size */ + public nuint pos; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_indexResetPolicy_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_indexResetPolicy_e.cs index ed9ef92b7..f461d52df 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_indexResetPolicy_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_indexResetPolicy_e.cs @@ -1,13 +1,12 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/** + * Controls, for this matchState reset, whether indexing can continue where it + * left off (ZSTDirp_continue), or whether it needs to be restarted from zero + * (ZSTDirp_reset). + */ +public enum ZSTD_indexResetPolicy_e { - /** - * Controls, for this matchState reset, whether indexing can continue where it - * left off (ZSTDirp_continue), or whether it needs to be restarted from zero - * (ZSTDirp_reset). - */ - public enum ZSTD_indexResetPolicy_e - { - ZSTDirp_continue, - ZSTDirp_reset, - } -} + ZSTDirp_continue, + ZSTDirp_reset, +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_litLocation_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_litLocation_e.cs index 69160e0b4..55d285621 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_litLocation_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_litLocation_e.cs @@ -1,14 +1,13 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public enum ZSTD_litLocation_e { - public enum ZSTD_litLocation_e - { - /* Stored entirely within litExtraBuffer */ - ZSTD_not_in_dst = 0, + /* Stored entirely within litExtraBuffer */ + ZSTD_not_in_dst = 0, - /* Stored entirely within dst (in memory after current output write) */ - ZSTD_in_dst = 1, + /* Stored entirely within dst (in memory after current output write) */ + ZSTD_in_dst = 1, - /* Split between litExtraBuffer and dst */ - ZSTD_split = 2, - } + /* Split between litExtraBuffer and dst */ + ZSTD_split = 2, } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_literalCompressionMode_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_literalCompressionMode_e.cs index 63d35773f..2c309b06e 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_literalCompressionMode_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_literalCompressionMode_e.cs @@ -1,17 +1,16 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public enum ZSTD_literalCompressionMode_e { - public enum ZSTD_literalCompressionMode_e - { - /**< Automatically determine the compression mode based on the compression level. - * Negative compression levels will be uncompressed, and positive compression - * levels will be compressed. */ - ZSTD_lcm_auto = 0, + /**< Automatically determine the compression mode based on the compression level. + * Negative compression levels will be uncompressed, and positive compression + * levels will be compressed. */ + ZSTD_lcm_auto = 0, - /**< Always attempt Huffman compression. Uncompressed literals will still be - * emitted if Huffman compression is not profitable. */ - ZSTD_lcm_huffman = 1, + /**< Always attempt Huffman compression. Uncompressed literals will still be + * emitted if Huffman compression is not profitable. */ + ZSTD_lcm_huffman = 1, - /**< Always emit uncompressed literals. */ - ZSTD_lcm_uncompressed = 2, - } + /**< Always emit uncompressed literals. */ + ZSTD_lcm_uncompressed = 2, } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_localDict.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_localDict.cs index 9cbcf7c7a..3ffa6739c 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_localDict.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_localDict.cs @@ -1,11 +1,10 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct ZSTD_localDict { - public unsafe struct ZSTD_localDict - { - public void* dictBuffer; - public void* dict; - public nuint dictSize; - public ZSTD_dictContentType_e dictContentType; - public ZSTD_CDict_s* cdict; - } -} + public void* dictBuffer; + public void* dict; + public nuint dictSize; + public ZSTD_dictContentType_e dictContentType; + public ZSTD_CDict_s* cdict; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_longLengthType_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_longLengthType_e.cs index 37c62011b..1994b34d1 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_longLengthType_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_longLengthType_e.cs @@ -1,15 +1,14 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/* Controls whether seqStore has a single "long" litLength or matchLength. See SeqStore_t. */ +public enum ZSTD_longLengthType_e { - /* Controls whether seqStore has a single "long" litLength or matchLength. See SeqStore_t. */ - public enum ZSTD_longLengthType_e - { - /* no longLengthType */ - ZSTD_llt_none = 0, + /* no longLengthType */ + ZSTD_llt_none = 0, - /* represents a long literal */ - ZSTD_llt_literalLength = 1, + /* represents a long literal */ + ZSTD_llt_literalLength = 1, - /* represents a long match */ - ZSTD_llt_matchLength = 2, - } -} + /* represents a long match */ + ZSTD_llt_matchLength = 2, +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_longOffset_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_longOffset_e.cs index 3f67603de..86881bdd1 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_longOffset_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_longOffset_e.cs @@ -1,8 +1,7 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public enum ZSTD_longOffset_e { - public enum ZSTD_longOffset_e - { - ZSTD_lo_isRegularOffset, - ZSTD_lo_isLongOffset = 1, - } + ZSTD_lo_isRegularOffset, + ZSTD_lo_isLongOffset = 1, } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_match_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_match_t.cs index 5bf187cc7..bc953181f 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_match_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_match_t.cs @@ -1,14 +1,13 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/********************************* + * Compression internals structs * + *********************************/ +public struct ZSTD_match_t { - /********************************* - * Compression internals structs * - *********************************/ - public struct ZSTD_match_t - { - /* Offset sumtype code for the match, using ZSTD_storeSeq() format */ - public uint off; + /* Offset sumtype code for the match, using ZSTD_storeSeq() format */ + public uint off; - /* Raw length of match */ - public uint len; - } -} + /* Raw length of match */ + public uint len; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_nextInputType_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_nextInputType_e.cs index ee85055b7..5ebea51ea 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_nextInputType_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_nextInputType_e.cs @@ -1,12 +1,11 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public enum ZSTD_nextInputType_e { - public enum ZSTD_nextInputType_e - { - ZSTDnit_frameHeader, - ZSTDnit_blockHeader, - ZSTDnit_block, - ZSTDnit_lastBlock, - ZSTDnit_checksum, - ZSTDnit_skippableFrame, - } + ZSTDnit_frameHeader, + ZSTDnit_blockHeader, + ZSTDnit_block, + ZSTDnit_lastBlock, + ZSTDnit_checksum, + ZSTDnit_skippableFrame, } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_optLdm_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_optLdm_t.cs index 6ac86bf86..8319db5bf 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_optLdm_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_optLdm_t.cs @@ -1,18 +1,17 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/* Struct containing info needed to make decision about ldm inclusion */ +public struct ZSTD_optLdm_t { - /* Struct containing info needed to make decision about ldm inclusion */ - public struct ZSTD_optLdm_t - { - /* External match candidates store for this block */ - public RawSeqStore_t seqStore; + /* External match candidates store for this block */ + public RawSeqStore_t seqStore; - /* Start position of the current match candidate */ - public uint startPosInBlock; + /* Start position of the current match candidate */ + public uint startPosInBlock; - /* End position of the current match candidate */ - public uint endPosInBlock; + /* End position of the current match candidate */ + public uint endPosInBlock; - /* Offset of the match candidate */ - public uint offset; - } + /* Offset of the match candidate */ + public uint offset; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_optimal_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_optimal_t.cs index 426617786..848451875 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_optimal_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_optimal_t.cs @@ -1,20 +1,19 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct ZSTD_optimal_t { - public unsafe struct ZSTD_optimal_t - { - /* price from beginning of segment to this position */ - public int price; + /* price from beginning of segment to this position */ + public int price; - /* offset of previous match */ - public uint off; + /* offset of previous match */ + public uint off; - /* length of previous match */ - public uint mlen; + /* length of previous match */ + public uint mlen; - /* nb of literals since previous match */ - public uint litlen; + /* nb of literals since previous match */ + public uint litlen; - /* offset history after previous match */ - public fixed uint rep[3]; - } + /* offset history after previous match */ + public fixed uint rep[3]; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_outBuffer_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_outBuffer_s.cs index 9411454d8..38dc21717 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_outBuffer_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_outBuffer_s.cs @@ -1,14 +1,13 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct ZSTD_outBuffer_s { - public unsafe struct ZSTD_outBuffer_s - { - /**< start of output buffer */ - public void* dst; + /**< start of output buffer */ + public void* dst; - /**< size of output buffer */ - public nuint size; + /**< size of output buffer */ + public nuint size; - /**< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size */ - public nuint pos; - } -} + /**< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size */ + public nuint pos; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_overlap_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_overlap_e.cs index 79bafe5ba..bf5b85e1b 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_overlap_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_overlap_e.cs @@ -1,8 +1,7 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public enum ZSTD_overlap_e { - public enum ZSTD_overlap_e - { - ZSTD_no_overlap, - ZSTD_overlap_src_before_dst, - } + ZSTD_no_overlap, + ZSTD_overlap_src_before_dst, } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_paramSwitch_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_paramSwitch_e.cs index c769bcbb0..3ff214929 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_paramSwitch_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_paramSwitch_e.cs @@ -1,14 +1,13 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public enum ZSTD_paramSwitch_e { - public enum ZSTD_paramSwitch_e - { - /* Let the library automatically determine whether the feature shall be enabled */ - ZSTD_ps_auto = 0, + /* Let the library automatically determine whether the feature shall be enabled */ + ZSTD_ps_auto = 0, - /* Force-enable the feature */ - ZSTD_ps_enable = 1, + /* Force-enable the feature */ + ZSTD_ps_enable = 1, - /* Do not use the feature */ - ZSTD_ps_disable = 2, - } + /* Do not use the feature */ + ZSTD_ps_disable = 2, } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_parameters.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_parameters.cs index 01441a695..eefcdcb47 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_parameters.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_parameters.cs @@ -1,8 +1,7 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public struct ZSTD_parameters { - public struct ZSTD_parameters - { - public ZSTD_compressionParameters cParams; - public ZSTD_frameParameters fParams; - } -} + public ZSTD_compressionParameters cParams; + public ZSTD_frameParameters fParams; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_prefixDict_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_prefixDict_s.cs index 25eb04351..2a0db4729 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_prefixDict_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_prefixDict_s.cs @@ -1,9 +1,8 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct ZSTD_prefixDict_s { - public unsafe struct ZSTD_prefixDict_s - { - public void* dict; - public nuint dictSize; - public ZSTD_dictContentType_e dictContentType; - } + public void* dict; + public nuint dictSize; + public ZSTD_dictContentType_e dictContentType; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_refMultipleDDicts_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_refMultipleDDicts_e.cs index d8dd23e16..c01fcefcf 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_refMultipleDDicts_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_refMultipleDDicts_e.cs @@ -1,9 +1,8 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public enum ZSTD_refMultipleDDicts_e { - public enum ZSTD_refMultipleDDicts_e - { - /* Note: this enum controls ZSTD_d_refMultipleDDicts */ - ZSTD_rmd_refSingleDDict = 0, - ZSTD_rmd_refMultipleDDicts = 1, - } + /* Note: this enum controls ZSTD_d_refMultipleDDicts */ + ZSTD_rmd_refSingleDDict = 0, + ZSTD_rmd_refMultipleDDicts = 1, } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_resetTarget_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_resetTarget_e.cs index ebb3d5d9f..0da6810a7 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_resetTarget_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_resetTarget_e.cs @@ -1,8 +1,7 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public enum ZSTD_resetTarget_e { - public enum ZSTD_resetTarget_e - { - ZSTD_resetTarget_CDict, - ZSTD_resetTarget_CCtx, - } -} + ZSTD_resetTarget_CDict, + ZSTD_resetTarget_CCtx, +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_seqSymbol.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_seqSymbol.cs index cf5d742a7..d088f2454 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_seqSymbol.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_seqSymbol.cs @@ -1,18 +1,17 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public struct ZSTD_seqSymbol { - public struct ZSTD_seqSymbol - { - public ushort nextState; - public byte nbAdditionalBits; - public byte nbBits; - public uint baseValue; + public ushort nextState; + public byte nbAdditionalBits; + public byte nbBits; + public uint baseValue; - public ZSTD_seqSymbol(ushort nextState, byte nbAdditionalBits, byte nbBits, uint baseValue) - { - this.nextState = nextState; - this.nbAdditionalBits = nbAdditionalBits; - this.nbBits = nbBits; - this.baseValue = baseValue; - } + public ZSTD_seqSymbol(ushort nextState, byte nbAdditionalBits, byte nbBits, uint baseValue) + { + this.nextState = nextState; + this.nbAdditionalBits = nbAdditionalBits; + this.nbBits = nbBits; + this.baseValue = baseValue; } -} +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_seqSymbol_header.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_seqSymbol_header.cs index 573487311..eb87911ca 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_seqSymbol_header.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_seqSymbol_header.cs @@ -1,11 +1,10 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/*-******************************************************* + * Decompression types + *********************************************************/ +public struct ZSTD_seqSymbol_header { - /*-******************************************************* - * Decompression types - *********************************************************/ - public struct ZSTD_seqSymbol_header - { - public uint fastMode; - public uint tableLog; - } + public uint fastMode; + public uint tableLog; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_sequenceFormat_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_sequenceFormat_e.cs index 5fc478421..606e7be2c 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_sequenceFormat_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_sequenceFormat_e.cs @@ -1,11 +1,10 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public enum ZSTD_sequenceFormat_e { - public enum ZSTD_sequenceFormat_e - { - /* ZSTD_Sequence[] has no block delimiters, just sequences */ - ZSTD_sf_noBlockDelimiters = 0, + /* ZSTD_Sequence[] has no block delimiters, just sequences */ + ZSTD_sf_noBlockDelimiters = 0, - /* ZSTD_Sequence[] contains explicit block delimiters */ - ZSTD_sf_explicitBlockDelimiters = 1, - } -} + /* ZSTD_Sequence[] contains explicit block delimiters */ + ZSTD_sf_explicitBlockDelimiters = 1, +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_strategy.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_strategy.cs index e6d8fb08a..49d8e23e7 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_strategy.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_strategy.cs @@ -1,16 +1,15 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/* Compression strategies, listed from fastest to strongest */ +public enum ZSTD_strategy { - /* Compression strategies, listed from fastest to strongest */ - public enum ZSTD_strategy - { - ZSTD_fast = 1, - ZSTD_dfast = 2, - ZSTD_greedy = 3, - ZSTD_lazy = 4, - ZSTD_lazy2 = 5, - ZSTD_btlazy2 = 6, - ZSTD_btopt = 7, - ZSTD_btultra = 8, - ZSTD_btultra2 = 9, - } + ZSTD_fast = 1, + ZSTD_dfast = 2, + ZSTD_greedy = 3, + ZSTD_lazy = 4, + ZSTD_lazy2 = 5, + ZSTD_btlazy2 = 6, + ZSTD_btopt = 7, + ZSTD_btultra = 8, + ZSTD_btultra2 = 9, } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_symbolEncodingTypeStats_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_symbolEncodingTypeStats_t.cs index 143226b19..673f2b3df 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_symbolEncodingTypeStats_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_symbolEncodingTypeStats_t.cs @@ -1,17 +1,16 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/* Type returned by ZSTD_buildSequencesStatistics containing finalized symbol encoding types + * and size of the sequences statistics + */ +public struct ZSTD_symbolEncodingTypeStats_t { - /* Type returned by ZSTD_buildSequencesStatistics containing finalized symbol encoding types - * and size of the sequences statistics - */ - public struct ZSTD_symbolEncodingTypeStats_t - { - public uint LLtype; - public uint Offtype; - public uint MLtype; - public nuint size; + public uint LLtype; + public uint Offtype; + public uint MLtype; + public nuint size; - /* Accounts for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */ - public nuint lastCountSize; - public int longOffsets; - } + /* Accounts for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */ + public nuint lastCountSize; + public int longOffsets; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_tableFillPurpose_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_tableFillPurpose_e.cs index 47a92f8f5..3f58f6ad6 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_tableFillPurpose_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_tableFillPurpose_e.cs @@ -1,8 +1,7 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public enum ZSTD_tableFillPurpose_e { - public enum ZSTD_tableFillPurpose_e - { - ZSTD_tfp_forCCtx, - ZSTD_tfp_forCDict, - } -} + ZSTD_tfp_forCCtx, + ZSTD_tfp_forCDict, +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_window_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_window_t.cs index 53d92b75d..84895dab8 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_window_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_window_t.cs @@ -1,26 +1,25 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct ZSTD_window_t { - public unsafe struct ZSTD_window_t - { - /* next block here to continue on current prefix */ - public byte* nextSrc; + /* next block here to continue on current prefix */ + public byte* nextSrc; - /* All regular indexes relative to this position */ - public byte* @base; + /* All regular indexes relative to this position */ + public byte* @base; - /* extDict indexes relative to this position */ - public byte* dictBase; + /* extDict indexes relative to this position */ + public byte* dictBase; - /* below that point, need extDict */ - public uint dictLimit; + /* below that point, need extDict */ + public uint dictLimit; - /* below that point, no more valid data */ - public uint lowLimit; + /* below that point, no more valid data */ + public uint lowLimit; - /* Number of times overflow correction has run since - * ZSTD_window_init(). Useful for debugging coredumps - * and for ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY. - */ - public uint nbOverflowCorrections; - } + /* Number of times overflow correction has run since + * ZSTD_window_init(). Useful for debugging coredumps + * and for ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY. + */ + public uint nbOverflowCorrections; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Zdict.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Zdict.cs index 0b50173da..77c11d81a 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Zdict.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Zdict.cs @@ -1,251 +1,268 @@ -using static ZstdSharp.UnsafeHelper; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods + /*-******************************************************** + * Helper functions + **********************************************************/ + public static bool ZDICT_isError(nuint errorCode) + { + return ERR_isError(errorCode); + } + + public static string ZDICT_getErrorName(nuint errorCode) { - /*-******************************************************** - * Helper functions - **********************************************************/ - public static bool ZDICT_isError(nuint errorCode) + return ERR_getErrorName(errorCode); + } + + private static void ZDICT_countEStats( + EStats_ress_t esr, + ZSTD_parameters* @params, + uint* countLit, + uint* offsetcodeCount, + uint* matchlengthCount, + uint* litlengthCount, + uint* repOffsets, + void* src, + nuint srcSize, + uint notificationLevel + ) + { + nuint blockSizeMax = (nuint)( + 1 << 17 < 1 << (int)@params->cParams.windowLog + ? 1 << 17 + : 1 << (int)@params->cParams.windowLog + ); + nuint cSize; + if (srcSize > blockSizeMax) + srcSize = blockSizeMax; { - return ERR_isError(errorCode); + nuint errorCode = ZSTD_compressBegin_usingCDict_deprecated(esr.zc, esr.dict); + if (ERR_isError(errorCode)) + { + return; + } } - public static string ZDICT_getErrorName(nuint errorCode) + cSize = ZSTD_compressBlock_deprecated(esr.zc, esr.workPlace, 1 << 17, src, srcSize); + if (ERR_isError(cSize)) { - return ERR_getErrorName(errorCode); + return; } - private static void ZDICT_countEStats( - EStats_ress_t esr, - ZSTD_parameters* @params, - uint* countLit, - uint* offsetcodeCount, - uint* matchlengthCount, - uint* litlengthCount, - uint* repOffsets, - void* src, - nuint srcSize, - uint notificationLevel - ) + if (cSize != 0) { - nuint blockSizeMax = (nuint)( - 1 << 17 < 1 << (int)@params->cParams.windowLog - ? 1 << 17 - : 1 << (int)@params->cParams.windowLog - ); - nuint cSize; - if (srcSize > blockSizeMax) - srcSize = blockSizeMax; + SeqStore_t* seqStorePtr = ZSTD_getSeqStore(esr.zc); { - nuint errorCode = ZSTD_compressBegin_usingCDict_deprecated(esr.zc, esr.dict); - if (ERR_isError(errorCode)) - { - return; - } + byte* bytePtr; + for (bytePtr = seqStorePtr->litStart; bytePtr < seqStorePtr->lit; bytePtr++) + countLit[*bytePtr]++; } - cSize = ZSTD_compressBlock_deprecated(esr.zc, esr.workPlace, 1 << 17, src, srcSize); - if (ERR_isError(cSize)) { - return; - } + uint nbSeq = (uint)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + ZSTD_seqToCodes(seqStorePtr); + { + byte* codePtr = seqStorePtr->ofCode; + uint u; + for (u = 0; u < nbSeq; u++) + offsetcodeCount[codePtr[u]]++; + } + + { + byte* codePtr = seqStorePtr->mlCode; + uint u; + for (u = 0; u < nbSeq; u++) + matchlengthCount[codePtr[u]]++; + } - if (cSize != 0) - { - SeqStore_t* seqStorePtr = ZSTD_getSeqStore(esr.zc); { - byte* bytePtr; - for (bytePtr = seqStorePtr->litStart; bytePtr < seqStorePtr->lit; bytePtr++) - countLit[*bytePtr]++; + byte* codePtr = seqStorePtr->llCode; + uint u; + for (u = 0; u < nbSeq; u++) + litlengthCount[codePtr[u]]++; } + if (nbSeq >= 2) { - uint nbSeq = (uint)(seqStorePtr->sequences - seqStorePtr->sequencesStart); - ZSTD_seqToCodes(seqStorePtr); - { - byte* codePtr = seqStorePtr->ofCode; - uint u; - for (u = 0; u < nbSeq; u++) - offsetcodeCount[codePtr[u]]++; - } - - { - byte* codePtr = seqStorePtr->mlCode; - uint u; - for (u = 0; u < nbSeq; u++) - matchlengthCount[codePtr[u]]++; - } - - { - byte* codePtr = seqStorePtr->llCode; - uint u; - for (u = 0; u < nbSeq; u++) - litlengthCount[codePtr[u]]++; - } - - if (nbSeq >= 2) - { - SeqDef_s* seq = seqStorePtr->sequencesStart; - uint offset1 = seq[0].offBase - 3; - uint offset2 = seq[1].offBase - 3; - if (offset1 >= 1024) - offset1 = 0; - if (offset2 >= 1024) - offset2 = 0; - repOffsets[offset1] += 3; - repOffsets[offset2] += 1; - } + SeqDef_s* seq = seqStorePtr->sequencesStart; + uint offset1 = seq[0].offBase - 3; + uint offset2 = seq[1].offBase - 3; + if (offset1 >= 1024) + offset1 = 0; + if (offset2 >= 1024) + offset2 = 0; + repOffsets[offset1] += 3; + repOffsets[offset2] += 1; } } } + } + + private static nuint ZDICT_totalSampleSize(nuint* fileSizes, uint nbFiles) + { + nuint total = 0; + uint u; + for (u = 0; u < nbFiles; u++) + total += fileSizes[u]; + return total; + } - private static nuint ZDICT_totalSampleSize(nuint* fileSizes, uint nbFiles) + private static void ZDICT_insertSortCount(offsetCount_t* table, uint val, uint count) + { + uint u; + table[3].offset = val; + table[3].count = count; + for (u = 3; u > 0; u--) { - nuint total = 0; - uint u; - for (u = 0; u < nbFiles; u++) - total += fileSizes[u]; - return total; + offsetCount_t tmp; + if (table[u - 1].count >= table[u].count) + break; + tmp = table[u - 1]; + table[u - 1] = table[u]; + table[u] = tmp; } + } - private static void ZDICT_insertSortCount(offsetCount_t* table, uint val, uint count) + /* ZDICT_flatLit() : + * rewrite `countLit` to contain a mostly flat but still compressible distribution of literals. + * necessary to avoid generating a non-compressible distribution that HUF_writeCTable() cannot encode. + */ + private static void ZDICT_flatLit(uint* countLit) + { + int u; + for (u = 1; u < 256; u++) + countLit[u] = 2; + countLit[0] = 4; + countLit[253] = 1; + countLit[254] = 1; + } + + private static nuint ZDICT_analyzeEntropy( + void* dstBuffer, + nuint maxDstSize, + int compressionLevel, + void* srcBuffer, + nuint* fileSizes, + uint nbFiles, + void* dictBuffer, + nuint dictBufferSize, + uint notificationLevel + ) + { + uint* countLit = stackalloc uint[256]; + /* no final ; */ + nuint* hufTable = stackalloc nuint[257]; + uint* offcodeCount = stackalloc uint[31]; + short* offcodeNCount = stackalloc short[31]; + uint offcodeMax = ZSTD_highbit32((uint)(dictBufferSize + 128 * (1 << 10))); + uint* matchLengthCount = stackalloc uint[53]; + short* matchLengthNCount = stackalloc short[53]; + uint* litLengthCount = stackalloc uint[36]; + short* litLengthNCount = stackalloc short[36]; + uint* repOffset = stackalloc uint[1024]; + offsetCount_t* bestRepOffset = stackalloc offsetCount_t[4]; + EStats_ress_t esr = new EStats_ress_t { - uint u; - table[3].offset = val; - table[3].count = count; - for (u = 3; u > 0; u--) - { - offsetCount_t tmp; - if (table[u - 1].count >= table[u].count) - break; - tmp = table[u - 1]; - table[u - 1] = table[u]; - table[u] = tmp; - } + dict = null, + zc = null, + workPlace = null, + }; + ZSTD_parameters @params; + uint u, + huffLog = 11, + Offlog = 8, + mlLog = 9, + llLog = 9, + total; + nuint pos = 0, + errorCode; + nuint eSize = 0; + nuint totalSrcSize = ZDICT_totalSampleSize(fileSizes, nbFiles); + nuint averageSampleSize = totalSrcSize / (nbFiles + (uint)(nbFiles == 0 ? 1 : 0)); + byte* dstPtr = (byte*)dstBuffer; + uint* wksp = stackalloc uint[1216]; + if (offcodeMax > 30) + { + eSize = unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionaryCreation_failed) + ); + goto _cleanup; } - /* ZDICT_flatLit() : - * rewrite `countLit` to contain a mostly flat but still compressible distribution of literals. - * necessary to avoid generating a non-compressible distribution that HUF_writeCTable() cannot encode. - */ - private static void ZDICT_flatLit(uint* countLit) + for (u = 0; u < 256; u++) + countLit[u] = 1; + for (u = 0; u <= offcodeMax; u++) + offcodeCount[u] = 1; + for (u = 0; u <= 52; u++) + matchLengthCount[u] = 1; + for (u = 0; u <= 35; u++) + litLengthCount[u] = 1; + memset(repOffset, 0, sizeof(uint) * 1024); + repOffset[1] = repOffset[4] = repOffset[8] = 1; + memset(bestRepOffset, 0, (uint)(sizeof(offsetCount_t) * 4)); + if (compressionLevel == 0) + compressionLevel = 3; + @params = ZSTD_getParams(compressionLevel, averageSampleSize, dictBufferSize); + esr.dict = ZSTD_createCDict_advanced( + dictBuffer, + dictBufferSize, + ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, + ZSTD_dictContentType_e.ZSTD_dct_rawContent, + @params.cParams, + ZSTD_defaultCMem + ); + esr.zc = ZSTD_createCCtx(); + esr.workPlace = malloc(1 << 17); + if (esr.dict == null || esr.zc == null || esr.workPlace == null) { - int u; - for (u = 1; u < 256; u++) - countLit[u] = 2; - countLit[0] = 4; - countLit[253] = 1; - countLit[254] = 1; + eSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + goto _cleanup; } - private static nuint ZDICT_analyzeEntropy( - void* dstBuffer, - nuint maxDstSize, - int compressionLevel, - void* srcBuffer, - nuint* fileSizes, - uint nbFiles, - void* dictBuffer, - nuint dictBufferSize, - uint notificationLevel - ) + for (u = 0; u < nbFiles; u++) { - uint* countLit = stackalloc uint[256]; - /* no final ; */ - nuint* hufTable = stackalloc nuint[257]; - uint* offcodeCount = stackalloc uint[31]; - short* offcodeNCount = stackalloc short[31]; - uint offcodeMax = ZSTD_highbit32((uint)(dictBufferSize + 128 * (1 << 10))); - uint* matchLengthCount = stackalloc uint[53]; - short* matchLengthNCount = stackalloc short[53]; - uint* litLengthCount = stackalloc uint[36]; - short* litLengthNCount = stackalloc short[36]; - uint* repOffset = stackalloc uint[1024]; - offsetCount_t* bestRepOffset = stackalloc offsetCount_t[4]; - EStats_ress_t esr = new EStats_ress_t - { - dict = null, - zc = null, - workPlace = null, - }; - ZSTD_parameters @params; - uint u, - huffLog = 11, - Offlog = 8, - mlLog = 9, - llLog = 9, - total; - nuint pos = 0, - errorCode; - nuint eSize = 0; - nuint totalSrcSize = ZDICT_totalSampleSize(fileSizes, nbFiles); - nuint averageSampleSize = totalSrcSize / (nbFiles + (uint)(nbFiles == 0 ? 1 : 0)); - byte* dstPtr = (byte*)dstBuffer; - uint* wksp = stackalloc uint[1216]; - if (offcodeMax > 30) - { - eSize = unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionaryCreation_failed) - ); - goto _cleanup; - } - - for (u = 0; u < 256; u++) - countLit[u] = 1; - for (u = 0; u <= offcodeMax; u++) - offcodeCount[u] = 1; - for (u = 0; u <= 52; u++) - matchLengthCount[u] = 1; - for (u = 0; u <= 35; u++) - litLengthCount[u] = 1; - memset(repOffset, 0, sizeof(uint) * 1024); - repOffset[1] = repOffset[4] = repOffset[8] = 1; - memset(bestRepOffset, 0, (uint)(sizeof(offsetCount_t) * 4)); - if (compressionLevel == 0) - compressionLevel = 3; - @params = ZSTD_getParams(compressionLevel, averageSampleSize, dictBufferSize); - esr.dict = ZSTD_createCDict_advanced( - dictBuffer, - dictBufferSize, - ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, - ZSTD_dictContentType_e.ZSTD_dct_rawContent, - @params.cParams, - ZSTD_defaultCMem + ZDICT_countEStats( + esr, + &@params, + countLit, + offcodeCount, + matchLengthCount, + litLengthCount, + repOffset, + (sbyte*)srcBuffer + pos, + fileSizes[u], + notificationLevel ); - esr.zc = ZSTD_createCCtx(); - esr.workPlace = malloc(1 << 17); - if (esr.dict == null || esr.zc == null || esr.workPlace == null) - { - eSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); - goto _cleanup; - } + pos += fileSizes[u]; + } - for (u = 0; u < nbFiles; u++) - { - ZDICT_countEStats( - esr, - &@params, - countLit, - offcodeCount, - matchLengthCount, - litLengthCount, - repOffset, - (sbyte*)srcBuffer + pos, - fileSizes[u], - notificationLevel - ); - pos += fileSizes[u]; - } + if (notificationLevel >= 4) + { + for (u = 0; u <= offcodeMax; u++) { } + } - if (notificationLevel >= 4) + { + nuint maxNbBits = HUF_buildCTable_wksp( + hufTable, + countLit, + 255, + huffLog, + wksp, + sizeof(uint) * 1216 + ); + if (ERR_isError(maxNbBits)) { - for (u = 0; u <= offcodeMax; u++) { } + eSize = maxNbBits; + goto _cleanup; } + if (maxNbBits == 8) { - nuint maxNbBits = HUF_buildCTable_wksp( + ZDICT_flatLit(countLit); + maxNbBits = HUF_buildCTable_wksp( hufTable, countLit, 255, @@ -253,405 +270,387 @@ uint notificationLevel wksp, sizeof(uint) * 1216 ); - if (ERR_isError(maxNbBits)) - { - eSize = maxNbBits; - goto _cleanup; - } + assert(maxNbBits == 9); + } - if (maxNbBits == 8) - { - ZDICT_flatLit(countLit); - maxNbBits = HUF_buildCTable_wksp( - hufTable, - countLit, - 255, - huffLog, - wksp, - sizeof(uint) * 1216 - ); - assert(maxNbBits == 9); - } + huffLog = (uint)maxNbBits; + } - huffLog = (uint)maxNbBits; - } + { + uint offset; + for (offset = 1; offset < 1024; offset++) + ZDICT_insertSortCount(bestRepOffset, offset, repOffset[offset]); + } - { - uint offset; - for (offset = 1; offset < 1024; offset++) - ZDICT_insertSortCount(bestRepOffset, offset, repOffset[offset]); - } + total = 0; + for (u = 0; u <= offcodeMax; u++) + total += offcodeCount[u]; + errorCode = FSE_normalizeCount( + offcodeNCount, + Offlog, + offcodeCount, + total, + offcodeMax, + 1 + ); + if (ERR_isError(errorCode)) + { + eSize = errorCode; + goto _cleanup; + } - total = 0; - for (u = 0; u <= offcodeMax; u++) - total += offcodeCount[u]; - errorCode = FSE_normalizeCount( - offcodeNCount, - Offlog, - offcodeCount, - total, - offcodeMax, - 1 - ); - if (ERR_isError(errorCode)) - { - eSize = errorCode; - goto _cleanup; - } + Offlog = (uint)errorCode; + total = 0; + for (u = 0; u <= 52; u++) + total += matchLengthCount[u]; + errorCode = FSE_normalizeCount( + matchLengthNCount, + mlLog, + matchLengthCount, + total, + 52, + 1 + ); + if (ERR_isError(errorCode)) + { + eSize = errorCode; + goto _cleanup; + } - Offlog = (uint)errorCode; - total = 0; - for (u = 0; u <= 52; u++) - total += matchLengthCount[u]; - errorCode = FSE_normalizeCount( - matchLengthNCount, - mlLog, - matchLengthCount, - total, - 52, - 1 + mlLog = (uint)errorCode; + total = 0; + for (u = 0; u <= 35; u++) + total += litLengthCount[u]; + errorCode = FSE_normalizeCount(litLengthNCount, llLog, litLengthCount, total, 35, 1); + if (ERR_isError(errorCode)) + { + eSize = errorCode; + goto _cleanup; + } + + llLog = (uint)errorCode; + { + nuint hhSize = HUF_writeCTable_wksp( + dstPtr, + maxDstSize, + hufTable, + 255, + huffLog, + wksp, + sizeof(uint) * 1216 ); - if (ERR_isError(errorCode)) + if (ERR_isError(hhSize)) { - eSize = errorCode; + eSize = hhSize; goto _cleanup; } - mlLog = (uint)errorCode; - total = 0; - for (u = 0; u <= 35; u++) - total += litLengthCount[u]; - errorCode = FSE_normalizeCount(litLengthNCount, llLog, litLengthCount, total, 35, 1); - if (ERR_isError(errorCode)) - { - eSize = errorCode; - goto _cleanup; - } + dstPtr += hhSize; + maxDstSize -= hhSize; + eSize += hhSize; + } - llLog = (uint)errorCode; + { + nuint ohSize = FSE_writeNCount(dstPtr, maxDstSize, offcodeNCount, 30, Offlog); + if (ERR_isError(ohSize)) { - nuint hhSize = HUF_writeCTable_wksp( - dstPtr, - maxDstSize, - hufTable, - 255, - huffLog, - wksp, - sizeof(uint) * 1216 - ); - if (ERR_isError(hhSize)) - { - eSize = hhSize; - goto _cleanup; - } - - dstPtr += hhSize; - maxDstSize -= hhSize; - eSize += hhSize; + eSize = ohSize; + goto _cleanup; } - { - nuint ohSize = FSE_writeNCount(dstPtr, maxDstSize, offcodeNCount, 30, Offlog); - if (ERR_isError(ohSize)) - { - eSize = ohSize; - goto _cleanup; - } - - dstPtr += ohSize; - maxDstSize -= ohSize; - eSize += ohSize; - } + dstPtr += ohSize; + maxDstSize -= ohSize; + eSize += ohSize; + } + { + nuint mhSize = FSE_writeNCount(dstPtr, maxDstSize, matchLengthNCount, 52, mlLog); + if (ERR_isError(mhSize)) { - nuint mhSize = FSE_writeNCount(dstPtr, maxDstSize, matchLengthNCount, 52, mlLog); - if (ERR_isError(mhSize)) - { - eSize = mhSize; - goto _cleanup; - } - - dstPtr += mhSize; - maxDstSize -= mhSize; - eSize += mhSize; + eSize = mhSize; + goto _cleanup; } - { - nuint lhSize = FSE_writeNCount(dstPtr, maxDstSize, litLengthNCount, 35, llLog); - if (ERR_isError(lhSize)) - { - eSize = lhSize; - goto _cleanup; - } - - dstPtr += lhSize; - maxDstSize -= lhSize; - eSize += lhSize; - } + dstPtr += mhSize; + maxDstSize -= mhSize; + eSize += mhSize; + } - if (maxDstSize < 12) + { + nuint lhSize = FSE_writeNCount(dstPtr, maxDstSize, litLengthNCount, 35, llLog); + if (ERR_isError(lhSize)) { - eSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + eSize = lhSize; goto _cleanup; } - MEM_writeLE32(dstPtr + 0, repStartValue[0]); - MEM_writeLE32(dstPtr + 4, repStartValue[1]); - MEM_writeLE32(dstPtr + 8, repStartValue[2]); - eSize += 12; - _cleanup: - ZSTD_freeCDict(esr.dict); - ZSTD_freeCCtx(esr.zc); - free(esr.workPlace); - return eSize; + dstPtr += lhSize; + maxDstSize -= lhSize; + eSize += lhSize; } - /** - * @returns the maximum repcode value - */ - private static uint ZDICT_maxRep(uint* reps) + if (maxDstSize < 12) { - uint maxRep = reps[0]; - int r; - for (r = 1; r < 3; ++r) - maxRep = maxRep > reps[r] ? maxRep : reps[r]; - return maxRep; + eSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + goto _cleanup; } - /*! ZDICT_finalizeDictionary(): - * Given a custom content as a basis for dictionary, and a set of samples, - * finalize dictionary by adding headers and statistics according to the zstd - * dictionary format. - * - * Samples must be stored concatenated in a flat buffer `samplesBuffer`, - * supplied with an array of sizes `samplesSizes`, providing the size of each - * sample in order. The samples are used to construct the statistics, so they - * should be representative of what you will compress with this dictionary. - * - * The compression level can be set in `parameters`. You should pass the - * compression level you expect to use in production. The statistics for each - * compression level differ, so tuning the dictionary for the compression level - * can help quite a bit. - * - * You can set an explicit dictionary ID in `parameters`, or allow us to pick - * a random dictionary ID for you, but we can't guarantee no collisions. - * - * The dstDictBuffer and the dictContent may overlap, and the content will be - * appended to the end of the header. If the header + the content doesn't fit in - * maxDictSize the beginning of the content is truncated to make room, since it - * is presumed that the most profitable content is at the end of the dictionary, - * since that is the cheapest to reference. - * - * `maxDictSize` must be >= max(dictContentSize, ZDICT_DICTSIZE_MIN). - * - * @return: size of dictionary stored into `dstDictBuffer` (<= `maxDictSize`), - * or an error code, which can be tested by ZDICT_isError(). - * Note: ZDICT_finalizeDictionary() will push notifications into stderr if - * instructed to, using notificationLevel>0. - * NOTE: This function currently may fail in several edge cases including: - * * Not enough samples - * * Samples are uncompressible - * * Samples are all exactly the same - */ - public static nuint ZDICT_finalizeDictionary( - void* dictBuffer, - nuint dictBufferCapacity, - void* customDictContent, - nuint dictContentSize, - void* samplesBuffer, - nuint* samplesSizes, - uint nbSamples, - ZDICT_params_t @params - ) - { - nuint hSize; - byte* header = stackalloc byte[256]; - int compressionLevel = @params.compressionLevel == 0 ? 3 : @params.compressionLevel; - uint notificationLevel = @params.notificationLevel; - /* The final dictionary content must be at least as large as the largest repcode */ - nuint minContentSize = ZDICT_maxRep(repStartValue); - nuint paddingSize; - if (dictBufferCapacity < dictContentSize) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - if (dictBufferCapacity < 256) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - MEM_writeLE32(header, 0xEC30A437); - { - ulong randomID = ZSTD_XXH64(customDictContent, dictContentSize, 0); - uint compliantID = (uint)(randomID % ((1U << 31) - 32768) + 32768); - uint dictID = @params.dictID != 0 ? @params.dictID : compliantID; - MEM_writeLE32(header + 4, dictID); - } - - hSize = 8; - { - nuint eSize = ZDICT_analyzeEntropy( - header + hSize, - 256 - hSize, - compressionLevel, - samplesBuffer, - samplesSizes, - nbSamples, - customDictContent, - dictContentSize, - notificationLevel - ); - if (ZDICT_isError(eSize)) - return eSize; - hSize += eSize; - } + MEM_writeLE32(dstPtr + 0, repStartValue[0]); + MEM_writeLE32(dstPtr + 4, repStartValue[1]); + MEM_writeLE32(dstPtr + 8, repStartValue[2]); + eSize += 12; + _cleanup: + ZSTD_freeCDict(esr.dict); + ZSTD_freeCCtx(esr.zc); + free(esr.workPlace); + return eSize; + } - if (hSize + dictContentSize > dictBufferCapacity) - { - dictContentSize = dictBufferCapacity - hSize; - } + /** + * @returns the maximum repcode value + */ + private static uint ZDICT_maxRep(uint* reps) + { + uint maxRep = reps[0]; + int r; + for (r = 1; r < 3; ++r) + maxRep = maxRep > reps[r] ? maxRep : reps[r]; + return maxRep; + } - if (dictContentSize < minContentSize) - { - if (hSize + minContentSize > dictBufferCapacity) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - } + /*! ZDICT_finalizeDictionary(): + * Given a custom content as a basis for dictionary, and a set of samples, + * finalize dictionary by adding headers and statistics according to the zstd + * dictionary format. + * + * Samples must be stored concatenated in a flat buffer `samplesBuffer`, + * supplied with an array of sizes `samplesSizes`, providing the size of each + * sample in order. The samples are used to construct the statistics, so they + * should be representative of what you will compress with this dictionary. + * + * The compression level can be set in `parameters`. You should pass the + * compression level you expect to use in production. The statistics for each + * compression level differ, so tuning the dictionary for the compression level + * can help quite a bit. + * + * You can set an explicit dictionary ID in `parameters`, or allow us to pick + * a random dictionary ID for you, but we can't guarantee no collisions. + * + * The dstDictBuffer and the dictContent may overlap, and the content will be + * appended to the end of the header. If the header + the content doesn't fit in + * maxDictSize the beginning of the content is truncated to make room, since it + * is presumed that the most profitable content is at the end of the dictionary, + * since that is the cheapest to reference. + * + * `maxDictSize` must be >= max(dictContentSize, ZDICT_DICTSIZE_MIN). + * + * @return: size of dictionary stored into `dstDictBuffer` (<= `maxDictSize`), + * or an error code, which can be tested by ZDICT_isError(). + * Note: ZDICT_finalizeDictionary() will push notifications into stderr if + * instructed to, using notificationLevel>0. + * NOTE: This function currently may fail in several edge cases including: + * * Not enough samples + * * Samples are uncompressible + * * Samples are all exactly the same + */ + public static nuint ZDICT_finalizeDictionary( + void* dictBuffer, + nuint dictBufferCapacity, + void* customDictContent, + nuint dictContentSize, + void* samplesBuffer, + nuint* samplesSizes, + uint nbSamples, + ZDICT_params_t @params + ) + { + nuint hSize; + byte* header = stackalloc byte[256]; + int compressionLevel = @params.compressionLevel == 0 ? 3 : @params.compressionLevel; + uint notificationLevel = @params.notificationLevel; + /* The final dictionary content must be at least as large as the largest repcode */ + nuint minContentSize = ZDICT_maxRep(repStartValue); + nuint paddingSize; + if (dictBufferCapacity < dictContentSize) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + if (dictBufferCapacity < 256) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + MEM_writeLE32(header, 0xEC30A437); + { + ulong randomID = ZSTD_XXH64(customDictContent, dictContentSize, 0); + uint compliantID = (uint)(randomID % ((1U << 31) - 32768) + 32768); + uint dictID = @params.dictID != 0 ? @params.dictID : compliantID; + MEM_writeLE32(header + 4, dictID); + } - paddingSize = minContentSize - dictContentSize; - } - else - { - paddingSize = 0; - } + hSize = 8; + { + nuint eSize = ZDICT_analyzeEntropy( + header + hSize, + 256 - hSize, + compressionLevel, + samplesBuffer, + samplesSizes, + nbSamples, + customDictContent, + dictContentSize, + notificationLevel + ); + if (ZDICT_isError(eSize)) + return eSize; + hSize += eSize; + } - { - nuint dictSize = hSize + paddingSize + dictContentSize; - /* The dictionary consists of the header, optional padding, and the content. - * The padding comes before the content because the "best" position in the - * dictionary is the last byte. - */ - byte* outDictHeader = (byte*)dictBuffer; - byte* outDictPadding = outDictHeader + hSize; - byte* outDictContent = outDictPadding + paddingSize; - assert(dictSize <= dictBufferCapacity); - assert(outDictContent + dictContentSize == (byte*)dictBuffer + dictSize); - memmove(outDictContent, customDictContent, dictContentSize); - memcpy(outDictHeader, header, (uint)hSize); - memset(outDictPadding, 0, (uint)paddingSize); - return dictSize; - } + if (hSize + dictContentSize > dictBufferCapacity) + { + dictContentSize = dictBufferCapacity - hSize; } - private static nuint ZDICT_addEntropyTablesFromBuffer_advanced( - void* dictBuffer, - nuint dictContentSize, - nuint dictBufferCapacity, - void* samplesBuffer, - nuint* samplesSizes, - uint nbSamples, - ZDICT_params_t @params - ) + if (dictContentSize < minContentSize) { - int compressionLevel = @params.compressionLevel == 0 ? 3 : @params.compressionLevel; - uint notificationLevel = @params.notificationLevel; - nuint hSize = 8; + if (hSize + minContentSize > dictBufferCapacity) { - nuint eSize = ZDICT_analyzeEntropy( - (sbyte*)dictBuffer + hSize, - dictBufferCapacity - hSize, - compressionLevel, - samplesBuffer, - samplesSizes, - nbSamples, - (sbyte*)dictBuffer + dictBufferCapacity - dictContentSize, - dictContentSize, - notificationLevel - ); - if (ZDICT_isError(eSize)) - return eSize; - hSize += eSize; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); } - MEM_writeLE32(dictBuffer, 0xEC30A437); - { - ulong randomID = ZSTD_XXH64( - (sbyte*)dictBuffer + dictBufferCapacity - dictContentSize, - dictContentSize, - 0 - ); - uint compliantID = (uint)(randomID % ((1U << 31) - 32768) + 32768); - uint dictID = @params.dictID != 0 ? @params.dictID : compliantID; - MEM_writeLE32((sbyte*)dictBuffer + 4, dictID); - } + paddingSize = minContentSize - dictContentSize; + } + else + { + paddingSize = 0; + } - if (hSize + dictContentSize < dictBufferCapacity) - memmove( - (sbyte*)dictBuffer + hSize, - (sbyte*)dictBuffer + dictBufferCapacity - dictContentSize, - dictContentSize - ); - return dictBufferCapacity < hSize + dictContentSize - ? dictBufferCapacity - : hSize + dictContentSize; + { + nuint dictSize = hSize + paddingSize + dictContentSize; + /* The dictionary consists of the header, optional padding, and the content. + * The padding comes before the content because the "best" position in the + * dictionary is the last byte. + */ + byte* outDictHeader = (byte*)dictBuffer; + byte* outDictPadding = outDictHeader + hSize; + byte* outDictContent = outDictPadding + paddingSize; + assert(dictSize <= dictBufferCapacity); + assert(outDictContent + dictContentSize == (byte*)dictBuffer + dictSize); + memmove(outDictContent, customDictContent, dictContentSize); + memcpy(outDictHeader, header, (uint)hSize); + memset(outDictPadding, 0, (uint)paddingSize); + return dictSize; } + } - /*! ZDICT_trainFromBuffer(): - * Train a dictionary from an array of samples. - * Redirect towards ZDICT_optimizeTrainFromBuffer_fastCover() single-threaded, with d=8, steps=4, - * f=20, and accel=1. - * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, - * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. - * The resulting dictionary will be saved into `dictBuffer`. - * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) - * or an error code, which can be tested with ZDICT_isError(). - * Note: Dictionary training will fail if there are not enough samples to construct a - * dictionary, or if most of the samples are too small (< 8 bytes being the lower limit). - * If dictionary training fails, you should use zstd without a dictionary, as the dictionary - * would've been ineffective anyways. If you believe your samples would benefit from a dictionary - * please open an issue with details, and we can look into it. - * Note: ZDICT_trainFromBuffer()'s memory usage is about 6 MB. - * Tips: In general, a reasonable dictionary has a size of ~ 100 KB. - * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`. - * In general, it's recommended to provide a few thousands samples, though this can vary a lot. - * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. - */ - public static nuint ZDICT_trainFromBuffer( - void* dictBuffer, - nuint dictBufferCapacity, - void* samplesBuffer, - nuint* samplesSizes, - uint nbSamples - ) + private static nuint ZDICT_addEntropyTablesFromBuffer_advanced( + void* dictBuffer, + nuint dictContentSize, + nuint dictBufferCapacity, + void* samplesBuffer, + nuint* samplesSizes, + uint nbSamples, + ZDICT_params_t @params + ) + { + int compressionLevel = @params.compressionLevel == 0 ? 3 : @params.compressionLevel; + uint notificationLevel = @params.notificationLevel; + nuint hSize = 8; { - ZDICT_fastCover_params_t @params; - @params = new ZDICT_fastCover_params_t { d = 8, steps = 4 }; - @params.zParams.compressionLevel = 3; - return ZDICT_optimizeTrainFromBuffer_fastCover( - dictBuffer, - dictBufferCapacity, + nuint eSize = ZDICT_analyzeEntropy( + (sbyte*)dictBuffer + hSize, + dictBufferCapacity - hSize, + compressionLevel, samplesBuffer, samplesSizes, nbSamples, - &@params + (sbyte*)dictBuffer + dictBufferCapacity - dictContentSize, + dictContentSize, + notificationLevel ); + if (ZDICT_isError(eSize)) + return eSize; + hSize += eSize; } - public static nuint ZDICT_addEntropyTablesFromBuffer( - void* dictBuffer, - nuint dictContentSize, - nuint dictBufferCapacity, - void* samplesBuffer, - nuint* samplesSizes, - uint nbSamples - ) + MEM_writeLE32(dictBuffer, 0xEC30A437); { - ZDICT_params_t @params; - @params = new ZDICT_params_t(); - return ZDICT_addEntropyTablesFromBuffer_advanced( - dictBuffer, + ulong randomID = ZSTD_XXH64( + (sbyte*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize, - dictBufferCapacity, - samplesBuffer, - samplesSizes, - nbSamples, - @params + 0 ); + uint compliantID = (uint)(randomID % ((1U << 31) - 32768) + 32768); + uint dictID = @params.dictID != 0 ? @params.dictID : compliantID; + MEM_writeLE32((sbyte*)dictBuffer + 4, dictID); } + + if (hSize + dictContentSize < dictBufferCapacity) + memmove( + (sbyte*)dictBuffer + hSize, + (sbyte*)dictBuffer + dictBufferCapacity - dictContentSize, + dictContentSize + ); + return dictBufferCapacity < hSize + dictContentSize + ? dictBufferCapacity + : hSize + dictContentSize; + } + + /*! ZDICT_trainFromBuffer(): + * Train a dictionary from an array of samples. + * Redirect towards ZDICT_optimizeTrainFromBuffer_fastCover() single-threaded, with d=8, steps=4, + * f=20, and accel=1. + * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, + * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. + * The resulting dictionary will be saved into `dictBuffer`. + * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) + * or an error code, which can be tested with ZDICT_isError(). + * Note: Dictionary training will fail if there are not enough samples to construct a + * dictionary, or if most of the samples are too small (< 8 bytes being the lower limit). + * If dictionary training fails, you should use zstd without a dictionary, as the dictionary + * would've been ineffective anyways. If you believe your samples would benefit from a dictionary + * please open an issue with details, and we can look into it. + * Note: ZDICT_trainFromBuffer()'s memory usage is about 6 MB. + * Tips: In general, a reasonable dictionary has a size of ~ 100 KB. + * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`. + * In general, it's recommended to provide a few thousands samples, though this can vary a lot. + * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. + */ + public static nuint ZDICT_trainFromBuffer( + void* dictBuffer, + nuint dictBufferCapacity, + void* samplesBuffer, + nuint* samplesSizes, + uint nbSamples + ) + { + ZDICT_fastCover_params_t @params; + @params = new ZDICT_fastCover_params_t { d = 8, steps = 4 }; + @params.zParams.compressionLevel = 3; + return ZDICT_optimizeTrainFromBuffer_fastCover( + dictBuffer, + dictBufferCapacity, + samplesBuffer, + samplesSizes, + nbSamples, + &@params + ); + } + + public static nuint ZDICT_addEntropyTablesFromBuffer( + void* dictBuffer, + nuint dictContentSize, + nuint dictBufferCapacity, + void* samplesBuffer, + nuint* samplesSizes, + uint nbSamples + ) + { + ZDICT_params_t @params; + @params = new ZDICT_params_t(); + return ZDICT_addEntropyTablesFromBuffer_advanced( + dictBuffer, + dictContentSize, + dictBufferCapacity, + samplesBuffer, + samplesSizes, + nbSamples, + @params + ); } -} +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Zstd.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Zstd.cs index f3a0972d0..ce1018b0d 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Zstd.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Zstd.cs @@ -1,11 +1,10 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods - { - private static readonly ZSTD_customMem ZSTD_defaultCMem = new ZSTD_customMem( - customAlloc: null, - customFree: null, - opaque: null - ); - } + private static readonly ZSTD_customMem ZSTD_defaultCMem = new ZSTD_customMem( + customAlloc: null, + customFree: null, + opaque: null + ); } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCommon.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCommon.cs index 92312bf30..629a435c6 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCommon.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCommon.cs @@ -1,49 +1,48 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods + /*-**************************************** + * Version + ******************************************/ + public static uint ZSTD_versionNumber() { - /*-**************************************** - * Version - ******************************************/ - public static uint ZSTD_versionNumber() - { - return 1 * 100 * 100 + 5 * 100 + 7; - } + return 1 * 100 * 100 + 5 * 100 + 7; + } - /*! ZSTD_versionString() : - * Return runtime library version, like "1.4.5". Requires v1.3.0+. */ - public static string ZSTD_versionString() - { - return "1.5.7"; - } + /*! ZSTD_versionString() : + * Return runtime library version, like "1.4.5". Requires v1.3.0+. */ + public static string ZSTD_versionString() + { + return "1.5.7"; + } - /*! ZSTD_isError() : - * tells if a return value is an error code - * symbol is required for external callers */ - public static bool ZSTD_isError(nuint code) - { - return ERR_isError(code); - } + /*! ZSTD_isError() : + * tells if a return value is an error code + * symbol is required for external callers */ + public static bool ZSTD_isError(nuint code) + { + return ERR_isError(code); + } - /*! ZSTD_getErrorName() : - * provides error code string from function result (useful for debugging) */ - public static string ZSTD_getErrorName(nuint code) - { - return ERR_getErrorName(code); - } + /*! ZSTD_getErrorName() : + * provides error code string from function result (useful for debugging) */ + public static string ZSTD_getErrorName(nuint code) + { + return ERR_getErrorName(code); + } - /*! ZSTD_getError() : - * convert a `size_t` function result into a proper ZSTD_errorCode enum */ - public static ZSTD_ErrorCode ZSTD_getErrorCode(nuint code) - { - return ERR_getErrorCode(code); - } + /*! ZSTD_getError() : + * convert a `size_t` function result into a proper ZSTD_errorCode enum */ + public static ZSTD_ErrorCode ZSTD_getErrorCode(nuint code) + { + return ERR_getErrorCode(code); + } - /*! ZSTD_getErrorString() : - * provides error code string from enum */ - public static string ZSTD_getErrorString(ZSTD_ErrorCode code) - { - return ERR_getErrorString(code); - } + /*! ZSTD_getErrorString() : + * provides error code string from enum */ + public static string ZSTD_getErrorString(ZSTD_ErrorCode code) + { + return ERR_getErrorString(code); } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompress.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompress.cs index 46b98f3ad..a10b181e2 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompress.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompress.cs @@ -3,1240 +3,1153 @@ using System.Numerics; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; -using static ZstdSharp.UnsafeHelper; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods - { - /*-************************************* - * Helper functions - ***************************************/ - /* ZSTD_compressBound() - * Note that the result from this function is only valid for - * the one-pass compression functions. - * When employing the streaming mode, - * if flushes are frequently altering the size of blocks, - * the overhead from block headers can make the compressed data larger - * than the return value of ZSTD_compressBound(). - */ - public static nuint ZSTD_compressBound(nuint srcSize) - { - nuint r = - srcSize >= (sizeof(nuint) == 8 ? 0xFF00FF00FF00FF00UL : 0xFF00FF00U) - ? 0 - : srcSize - + (srcSize >> 8) - + (srcSize < 128 << 10 ? (128 << 10) - srcSize >> 11 : 0); - if (r == 0) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); - return r; - } + /*-************************************* + * Helper functions + ***************************************/ + /* ZSTD_compressBound() + * Note that the result from this function is only valid for + * the one-pass compression functions. + * When employing the streaming mode, + * if flushes are frequently altering the size of blocks, + * the overhead from block headers can make the compressed data larger + * than the return value of ZSTD_compressBound(). + */ + public static nuint ZSTD_compressBound(nuint srcSize) + { + nuint r = + srcSize >= (sizeof(nuint) == 8 ? 0xFF00FF00FF00FF00UL : 0xFF00FF00U) + ? 0 + : srcSize + + (srcSize >> 8) + + (srcSize < 128 << 10 ? (128 << 10) - srcSize >> 11 : 0); + if (r == 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + return r; + } - public static ZSTD_CCtx_s* ZSTD_createCCtx() - { - return ZSTD_createCCtx_advanced(ZSTD_defaultCMem); - } + public static ZSTD_CCtx_s* ZSTD_createCCtx() + { + return ZSTD_createCCtx_advanced(ZSTD_defaultCMem); + } - private static void ZSTD_initCCtx(ZSTD_CCtx_s* cctx, ZSTD_customMem memManager) + private static void ZSTD_initCCtx(ZSTD_CCtx_s* cctx, ZSTD_customMem memManager) + { + assert(cctx != null); + *cctx = new ZSTD_CCtx_s { customMem = memManager, bmi2 = 0 }; { - assert(cctx != null); - *cctx = new ZSTD_CCtx_s { customMem = memManager, bmi2 = 0 }; - { - nuint err = ZSTD_CCtx_reset(cctx, ZSTD_ResetDirective.ZSTD_reset_parameters); - assert(!ERR_isError(err)); - } + nuint err = ZSTD_CCtx_reset(cctx, ZSTD_ResetDirective.ZSTD_reset_parameters); + assert(!ERR_isError(err)); } + } - public static ZSTD_CCtx_s* ZSTD_createCCtx_advanced(ZSTD_customMem customMem) - { - if ( - ((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) - != 0 - ) - return null; - { - ZSTD_CCtx_s* cctx = (ZSTD_CCtx_s*)ZSTD_customMalloc( - (nuint)sizeof(ZSTD_CCtx_s), - customMem - ); - if (cctx == null) - return null; - ZSTD_initCCtx(cctx, customMem); - return cctx; - } - } - - /*! ZSTD_initStatic*() : - * Initialize an object using a pre-allocated fixed-size buffer. - * workspace: The memory area to emplace the object into. - * Provided pointer *must be 8-bytes aligned*. - * Buffer must outlive object. - * workspaceSize: Use ZSTD_estimate*Size() to determine - * how large workspace must be to support target scenario. - * @return : pointer to object (same address as workspace, just different type), - * or NULL if error (size too small, incorrect alignment, etc.) - * Note : zstd will never resize nor malloc() when using a static buffer. - * If the object requires more memory than available, - * zstd will just error out (typically ZSTD_error_memory_allocation). - * Note 2 : there is no corresponding "free" function. - * Since workspace is allocated externally, it must be freed externally too. - * Note 3 : cParams : use ZSTD_getCParams() to convert a compression level - * into its associated cParams. - * Limitation 1 : currently not compatible with internal dictionary creation, triggered by - * ZSTD_CCtx_loadDictionary(), ZSTD_initCStream_usingDict() or ZSTD_initDStream_usingDict(). - * Limitation 2 : static cctx currently not compatible with multi-threading. - * Limitation 3 : static dctx is incompatible with legacy support. - */ - public static ZSTD_CCtx_s* ZSTD_initStaticCCtx(void* workspace, nuint workspaceSize) + public static ZSTD_CCtx_s* ZSTD_createCCtx_advanced(ZSTD_customMem customMem) + { + if ( + ((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) + != 0 + ) + return null; { - ZSTD_cwksp ws; - ZSTD_CCtx_s* cctx; - if (workspaceSize <= (nuint)sizeof(ZSTD_CCtx_s)) - return null; - if (((nuint)workspace & 7) != 0) - return null; - ZSTD_cwksp_init( - &ws, - workspace, - workspaceSize, - ZSTD_cwksp_static_alloc_e.ZSTD_cwksp_static_alloc + ZSTD_CCtx_s* cctx = (ZSTD_CCtx_s*)ZSTD_customMalloc( + (nuint)sizeof(ZSTD_CCtx_s), + customMem ); - cctx = (ZSTD_CCtx_s*)ZSTD_cwksp_reserve_object(&ws, (nuint)sizeof(ZSTD_CCtx_s)); if (cctx == null) return null; - *cctx = new ZSTD_CCtx_s(); - ZSTD_cwksp_move(&cctx->workspace, &ws); - cctx->staticSize = workspaceSize; - if ( - ZSTD_cwksp_check_available( - &cctx->workspace, - (nuint)( - ( - (8 << 10) + 512 + sizeof(uint) * (52 + 2) > 8208 - ? (8 << 10) + 512 + sizeof(uint) * (52 + 2) - : 8208 - ) - + 2 * sizeof(ZSTD_compressedBlockState_t) - ) - ) == 0 - ) - return null; - cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object( - &cctx->workspace, - (nuint)sizeof(ZSTD_compressedBlockState_t) - ); - cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object( - &cctx->workspace, - (nuint)sizeof(ZSTD_compressedBlockState_t) - ); - cctx->tmpWorkspace = ZSTD_cwksp_reserve_object( - &cctx->workspace, - (8 << 10) + 512 + sizeof(uint) * (52 + 2) > 8208 - ? (8 << 10) + 512 + sizeof(uint) * (52 + 2) - : 8208 - ); - cctx->tmpWkspSize = - (8 << 10) + 512 + sizeof(uint) * (52 + 2) > 8208 - ? (8 << 10) + 512 + sizeof(uint) * (52 + 2) - : 8208; - cctx->bmi2 = 0; + ZSTD_initCCtx(cctx, customMem); return cctx; } + } - /** - * Clears and frees all of the dictionaries in the CCtx. - */ - private static void ZSTD_clearAllDicts(ZSTD_CCtx_s* cctx) - { - ZSTD_customFree(cctx->localDict.dictBuffer, cctx->customMem); - ZSTD_freeCDict(cctx->localDict.cdict); - cctx->localDict = new ZSTD_localDict(); - cctx->prefixDict = new ZSTD_prefixDict_s(); - cctx->cdict = null; - } - - private static nuint ZSTD_sizeof_localDict(ZSTD_localDict dict) - { - nuint bufferSize = dict.dictBuffer != null ? dict.dictSize : 0; - nuint cdictSize = ZSTD_sizeof_CDict(dict.cdict); - return bufferSize + cdictSize; - } + /*! ZSTD_initStatic*() : + * Initialize an object using a pre-allocated fixed-size buffer. + * workspace: The memory area to emplace the object into. + * Provided pointer *must be 8-bytes aligned*. + * Buffer must outlive object. + * workspaceSize: Use ZSTD_estimate*Size() to determine + * how large workspace must be to support target scenario. + * @return : pointer to object (same address as workspace, just different type), + * or NULL if error (size too small, incorrect alignment, etc.) + * Note : zstd will never resize nor malloc() when using a static buffer. + * If the object requires more memory than available, + * zstd will just error out (typically ZSTD_error_memory_allocation). + * Note 2 : there is no corresponding "free" function. + * Since workspace is allocated externally, it must be freed externally too. + * Note 3 : cParams : use ZSTD_getCParams() to convert a compression level + * into its associated cParams. + * Limitation 1 : currently not compatible with internal dictionary creation, triggered by + * ZSTD_CCtx_loadDictionary(), ZSTD_initCStream_usingDict() or ZSTD_initDStream_usingDict(). + * Limitation 2 : static cctx currently not compatible with multi-threading. + * Limitation 3 : static dctx is incompatible with legacy support. + */ + public static ZSTD_CCtx_s* ZSTD_initStaticCCtx(void* workspace, nuint workspaceSize) + { + ZSTD_cwksp ws; + ZSTD_CCtx_s* cctx; + if (workspaceSize <= (nuint)sizeof(ZSTD_CCtx_s)) + return null; + if (((nuint)workspace & 7) != 0) + return null; + ZSTD_cwksp_init( + &ws, + workspace, + workspaceSize, + ZSTD_cwksp_static_alloc_e.ZSTD_cwksp_static_alloc + ); + cctx = (ZSTD_CCtx_s*)ZSTD_cwksp_reserve_object(&ws, (nuint)sizeof(ZSTD_CCtx_s)); + if (cctx == null) + return null; + *cctx = new ZSTD_CCtx_s(); + ZSTD_cwksp_move(&cctx->workspace, &ws); + cctx->staticSize = workspaceSize; + if ( + ZSTD_cwksp_check_available( + &cctx->workspace, + (nuint)( + ( + (8 << 10) + 512 + sizeof(uint) * (52 + 2) > 8208 + ? (8 << 10) + 512 + sizeof(uint) * (52 + 2) + : 8208 + ) + + 2 * sizeof(ZSTD_compressedBlockState_t) + ) + ) == 0 + ) + return null; + cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object( + &cctx->workspace, + (nuint)sizeof(ZSTD_compressedBlockState_t) + ); + cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object( + &cctx->workspace, + (nuint)sizeof(ZSTD_compressedBlockState_t) + ); + cctx->tmpWorkspace = ZSTD_cwksp_reserve_object( + &cctx->workspace, + (8 << 10) + 512 + sizeof(uint) * (52 + 2) > 8208 + ? (8 << 10) + 512 + sizeof(uint) * (52 + 2) + : 8208 + ); + cctx->tmpWkspSize = + (8 << 10) + 512 + sizeof(uint) * (52 + 2) > 8208 + ? (8 << 10) + 512 + sizeof(uint) * (52 + 2) + : 8208; + cctx->bmi2 = 0; + return cctx; + } - private static void ZSTD_freeCCtxContent(ZSTD_CCtx_s* cctx) - { - assert(cctx != null); - assert(cctx->staticSize == 0); - ZSTD_clearAllDicts(cctx); - ZSTDMT_freeCCtx(cctx->mtctx); - cctx->mtctx = null; - ZSTD_cwksp_free(&cctx->workspace, cctx->customMem); - } + /** + * Clears and frees all of the dictionaries in the CCtx. + */ + private static void ZSTD_clearAllDicts(ZSTD_CCtx_s* cctx) + { + ZSTD_customFree(cctx->localDict.dictBuffer, cctx->customMem); + ZSTD_freeCDict(cctx->localDict.cdict); + cctx->localDict = new ZSTD_localDict(); + cctx->prefixDict = new ZSTD_prefixDict_s(); + cctx->cdict = null; + } - public static nuint ZSTD_freeCCtx(ZSTD_CCtx_s* cctx) - { - if (cctx == null) - return 0; - if (cctx->staticSize != 0) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); - } + private static nuint ZSTD_sizeof_localDict(ZSTD_localDict dict) + { + nuint bufferSize = dict.dictBuffer != null ? dict.dictSize : 0; + nuint cdictSize = ZSTD_sizeof_CDict(dict.cdict); + return bufferSize + cdictSize; + } - { - int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx); - ZSTD_freeCCtxContent(cctx); - if (cctxInWorkspace == 0) - ZSTD_customFree(cctx, cctx->customMem); - } + private static void ZSTD_freeCCtxContent(ZSTD_CCtx_s* cctx) + { + assert(cctx != null); + assert(cctx->staticSize == 0); + ZSTD_clearAllDicts(cctx); + ZSTDMT_freeCCtx(cctx->mtctx); + cctx->mtctx = null; + ZSTD_cwksp_free(&cctx->workspace, cctx->customMem); + } + public static nuint ZSTD_freeCCtx(ZSTD_CCtx_s* cctx) + { + if (cctx == null) return 0; - } - - private static nuint ZSTD_sizeof_mtctx(ZSTD_CCtx_s* cctx) + if (cctx->staticSize != 0) { - return ZSTDMT_sizeof_CCtx(cctx->mtctx); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); } - /*! ZSTD_sizeof_*() : Requires v1.4.0+ - * These functions give the _current_ memory usage of selected object. - * Note that object memory usage can evolve (increase or decrease) over time. */ - public static nuint ZSTD_sizeof_CCtx(ZSTD_CCtx_s* cctx) { - if (cctx == null) - return 0; - return (nuint)(cctx->workspace.workspace == cctx ? 0 : sizeof(ZSTD_CCtx_s)) - + ZSTD_cwksp_sizeof(&cctx->workspace) - + ZSTD_sizeof_localDict(cctx->localDict) - + ZSTD_sizeof_mtctx(cctx); + int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx); + ZSTD_freeCCtxContent(cctx); + if (cctxInWorkspace == 0) + ZSTD_customFree(cctx, cctx->customMem); } - public static nuint ZSTD_sizeof_CStream(ZSTD_CCtx_s* zcs) - { - return ZSTD_sizeof_CCtx(zcs); - } + return 0; + } - /* private API call, for dictBuilder only */ - private static SeqStore_t* ZSTD_getSeqStore(ZSTD_CCtx_s* ctx) - { - return &ctx->seqStore; - } + private static nuint ZSTD_sizeof_mtctx(ZSTD_CCtx_s* cctx) + { + return ZSTDMT_sizeof_CCtx(cctx->mtctx); + } - /* Returns true if the strategy supports using a row based matchfinder */ - private static int ZSTD_rowMatchFinderSupported(ZSTD_strategy strategy) - { - return strategy >= ZSTD_strategy.ZSTD_greedy && strategy <= ZSTD_strategy.ZSTD_lazy2 - ? 1 - : 0; - } + /*! ZSTD_sizeof_*() : Requires v1.4.0+ + * These functions give the _current_ memory usage of selected object. + * Note that object memory usage can evolve (increase or decrease) over time. */ + public static nuint ZSTD_sizeof_CCtx(ZSTD_CCtx_s* cctx) + { + if (cctx == null) + return 0; + return (nuint)(cctx->workspace.workspace == cctx ? 0 : sizeof(ZSTD_CCtx_s)) + + ZSTD_cwksp_sizeof(&cctx->workspace) + + ZSTD_sizeof_localDict(cctx->localDict) + + ZSTD_sizeof_mtctx(cctx); + } - /* Returns true if the strategy and useRowMatchFinder mode indicate that we will use the row based matchfinder - * for this compression. - */ - private static int ZSTD_rowMatchFinderUsed(ZSTD_strategy strategy, ZSTD_paramSwitch_e mode) - { - assert(mode != ZSTD_paramSwitch_e.ZSTD_ps_auto); - return - ZSTD_rowMatchFinderSupported(strategy) != 0 - && mode == ZSTD_paramSwitch_e.ZSTD_ps_enable + public static nuint ZSTD_sizeof_CStream(ZSTD_CCtx_s* zcs) + { + return ZSTD_sizeof_CCtx(zcs); + } + + /* private API call, for dictBuilder only */ + private static SeqStore_t* ZSTD_getSeqStore(ZSTD_CCtx_s* ctx) + { + return &ctx->seqStore; + } + + /* Returns true if the strategy supports using a row based matchfinder */ + private static int ZSTD_rowMatchFinderSupported(ZSTD_strategy strategy) + { + return strategy >= ZSTD_strategy.ZSTD_greedy && strategy <= ZSTD_strategy.ZSTD_lazy2 + ? 1 + : 0; + } + + /* Returns true if the strategy and useRowMatchFinder mode indicate that we will use the row based matchfinder + * for this compression. + */ + private static int ZSTD_rowMatchFinderUsed(ZSTD_strategy strategy, ZSTD_paramSwitch_e mode) + { + assert(mode != ZSTD_paramSwitch_e.ZSTD_ps_auto); + return + ZSTD_rowMatchFinderSupported(strategy) != 0 + && mode == ZSTD_paramSwitch_e.ZSTD_ps_enable ? 1 : 0; - } + } - /* Returns row matchfinder usage given an initial mode and cParams */ - private static ZSTD_paramSwitch_e ZSTD_resolveRowMatchFinderMode( - ZSTD_paramSwitch_e mode, - ZSTD_compressionParameters* cParams - ) - { - if (mode != ZSTD_paramSwitch_e.ZSTD_ps_auto) - return mode; - mode = ZSTD_paramSwitch_e.ZSTD_ps_disable; - if (ZSTD_rowMatchFinderSupported(cParams->strategy) == 0) - return mode; - if (cParams->windowLog > 14) - mode = ZSTD_paramSwitch_e.ZSTD_ps_enable; + /* Returns row matchfinder usage given an initial mode and cParams */ + private static ZSTD_paramSwitch_e ZSTD_resolveRowMatchFinderMode( + ZSTD_paramSwitch_e mode, + ZSTD_compressionParameters* cParams + ) + { + if (mode != ZSTD_paramSwitch_e.ZSTD_ps_auto) return mode; - } + mode = ZSTD_paramSwitch_e.ZSTD_ps_disable; + if (ZSTD_rowMatchFinderSupported(cParams->strategy) == 0) + return mode; + if (cParams->windowLog > 14) + mode = ZSTD_paramSwitch_e.ZSTD_ps_enable; + return mode; + } - /* Returns block splitter usage (generally speaking, when using slower/stronger compression modes) */ - private static ZSTD_paramSwitch_e ZSTD_resolveBlockSplitterMode( - ZSTD_paramSwitch_e mode, - ZSTD_compressionParameters* cParams - ) - { - if (mode != ZSTD_paramSwitch_e.ZSTD_ps_auto) - return mode; - return cParams->strategy >= ZSTD_strategy.ZSTD_btopt && cParams->windowLog >= 17 - ? ZSTD_paramSwitch_e.ZSTD_ps_enable - : ZSTD_paramSwitch_e.ZSTD_ps_disable; - } + /* Returns block splitter usage (generally speaking, when using slower/stronger compression modes) */ + private static ZSTD_paramSwitch_e ZSTD_resolveBlockSplitterMode( + ZSTD_paramSwitch_e mode, + ZSTD_compressionParameters* cParams + ) + { + if (mode != ZSTD_paramSwitch_e.ZSTD_ps_auto) + return mode; + return cParams->strategy >= ZSTD_strategy.ZSTD_btopt && cParams->windowLog >= 17 + ? ZSTD_paramSwitch_e.ZSTD_ps_enable + : ZSTD_paramSwitch_e.ZSTD_ps_disable; + } - /* Returns 1 if the arguments indicate that we should allocate a chainTable, 0 otherwise */ - private static int ZSTD_allocateChainTable( - ZSTD_strategy strategy, - ZSTD_paramSwitch_e useRowMatchFinder, - uint forDDSDict - ) - { - assert(useRowMatchFinder != ZSTD_paramSwitch_e.ZSTD_ps_auto); - return - forDDSDict != 0 - || strategy != ZSTD_strategy.ZSTD_fast - && ZSTD_rowMatchFinderUsed(strategy, useRowMatchFinder) == 0 + /* Returns 1 if the arguments indicate that we should allocate a chainTable, 0 otherwise */ + private static int ZSTD_allocateChainTable( + ZSTD_strategy strategy, + ZSTD_paramSwitch_e useRowMatchFinder, + uint forDDSDict + ) + { + assert(useRowMatchFinder != ZSTD_paramSwitch_e.ZSTD_ps_auto); + return + forDDSDict != 0 + || strategy != ZSTD_strategy.ZSTD_fast + && ZSTD_rowMatchFinderUsed(strategy, useRowMatchFinder) == 0 ? 1 : 0; - } + } - /* Returns ZSTD_ps_enable if compression parameters are such that we should - * enable long distance matching (wlog >= 27, strategy >= btopt). - * Returns ZSTD_ps_disable otherwise. - */ - private static ZSTD_paramSwitch_e ZSTD_resolveEnableLdm( - ZSTD_paramSwitch_e mode, - ZSTD_compressionParameters* cParams - ) + /* Returns ZSTD_ps_enable if compression parameters are such that we should + * enable long distance matching (wlog >= 27, strategy >= btopt). + * Returns ZSTD_ps_disable otherwise. + */ + private static ZSTD_paramSwitch_e ZSTD_resolveEnableLdm( + ZSTD_paramSwitch_e mode, + ZSTD_compressionParameters* cParams + ) + { + if (mode != ZSTD_paramSwitch_e.ZSTD_ps_auto) + return mode; + return cParams->strategy >= ZSTD_strategy.ZSTD_btopt && cParams->windowLog >= 27 + ? ZSTD_paramSwitch_e.ZSTD_ps_enable + : ZSTD_paramSwitch_e.ZSTD_ps_disable; + } + + private static int ZSTD_resolveExternalSequenceValidation(int mode) + { + return mode; + } + + /* Resolves maxBlockSize to the default if no value is present. */ + private static nuint ZSTD_resolveMaxBlockSize(nuint maxBlockSize) + { + if (maxBlockSize == 0) { - if (mode != ZSTD_paramSwitch_e.ZSTD_ps_auto) - return mode; - return cParams->strategy >= ZSTD_strategy.ZSTD_btopt && cParams->windowLog >= 27 - ? ZSTD_paramSwitch_e.ZSTD_ps_enable - : ZSTD_paramSwitch_e.ZSTD_ps_disable; + return 1 << 17; } - - private static int ZSTD_resolveExternalSequenceValidation(int mode) + else { - return mode; + return maxBlockSize; } + } - /* Resolves maxBlockSize to the default if no value is present. */ - private static nuint ZSTD_resolveMaxBlockSize(nuint maxBlockSize) + private static ZSTD_paramSwitch_e ZSTD_resolveExternalRepcodeSearch( + ZSTD_paramSwitch_e value, + int cLevel + ) + { + if (value != ZSTD_paramSwitch_e.ZSTD_ps_auto) + return value; + if (cLevel < 10) { - if (maxBlockSize == 0) - { - return 1 << 17; - } - else - { - return maxBlockSize; - } + return ZSTD_paramSwitch_e.ZSTD_ps_disable; } - - private static ZSTD_paramSwitch_e ZSTD_resolveExternalRepcodeSearch( - ZSTD_paramSwitch_e value, - int cLevel - ) + else { - if (value != ZSTD_paramSwitch_e.ZSTD_ps_auto) - return value; - if (cLevel < 10) - { - return ZSTD_paramSwitch_e.ZSTD_ps_disable; - } - else - { - return ZSTD_paramSwitch_e.ZSTD_ps_enable; - } + return ZSTD_paramSwitch_e.ZSTD_ps_enable; } + } - /* Returns 1 if compression parameters are such that CDict hashtable and chaintable indices are tagged. - * If so, the tags need to be removed in ZSTD_resetCCtx_byCopyingCDict. */ - private static int ZSTD_CDictIndicesAreTagged(ZSTD_compressionParameters* cParams) - { - return - cParams->strategy == ZSTD_strategy.ZSTD_fast - || cParams->strategy == ZSTD_strategy.ZSTD_dfast + /* Returns 1 if compression parameters are such that CDict hashtable and chaintable indices are tagged. + * If so, the tags need to be removed in ZSTD_resetCCtx_byCopyingCDict. */ + private static int ZSTD_CDictIndicesAreTagged(ZSTD_compressionParameters* cParams) + { + return + cParams->strategy == ZSTD_strategy.ZSTD_fast + || cParams->strategy == ZSTD_strategy.ZSTD_dfast ? 1 : 0; - } + } - private static ZSTD_CCtx_params_s ZSTD_makeCCtxParamsFromCParams( - ZSTD_compressionParameters cParams - ) + private static ZSTD_CCtx_params_s ZSTD_makeCCtxParamsFromCParams( + ZSTD_compressionParameters cParams + ) + { + ZSTD_CCtx_params_s cctxParams; + ZSTD_CCtxParams_init(&cctxParams, 3); + cctxParams.cParams = cParams; + cctxParams.ldmParams.enableLdm = ZSTD_resolveEnableLdm( + cctxParams.ldmParams.enableLdm, + &cParams + ); + if (cctxParams.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) { - ZSTD_CCtx_params_s cctxParams; - ZSTD_CCtxParams_init(&cctxParams, 3); - cctxParams.cParams = cParams; - cctxParams.ldmParams.enableLdm = ZSTD_resolveEnableLdm( - cctxParams.ldmParams.enableLdm, - &cParams - ); - if (cctxParams.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) - { - ZSTD_ldm_adjustParameters(&cctxParams.ldmParams, &cParams); - assert(cctxParams.ldmParams.hashLog >= cctxParams.ldmParams.bucketSizeLog); - assert(cctxParams.ldmParams.hashRateLog < 32); - } - - cctxParams.postBlockSplitter = ZSTD_resolveBlockSplitterMode( - cctxParams.postBlockSplitter, - &cParams - ); - cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode( - cctxParams.useRowMatchFinder, - &cParams - ); - cctxParams.validateSequences = ZSTD_resolveExternalSequenceValidation( - cctxParams.validateSequences - ); - cctxParams.maxBlockSize = ZSTD_resolveMaxBlockSize(cctxParams.maxBlockSize); - cctxParams.searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch( - cctxParams.searchForExternalRepcodes, - cctxParams.compressionLevel - ); - assert(ZSTD_checkCParams(cParams) == 0); - return cctxParams; + ZSTD_ldm_adjustParameters(&cctxParams.ldmParams, &cParams); + assert(cctxParams.ldmParams.hashLog >= cctxParams.ldmParams.bucketSizeLog); + assert(cctxParams.ldmParams.hashRateLog < 32); } - private static ZSTD_CCtx_params_s* ZSTD_createCCtxParams_advanced(ZSTD_customMem customMem) - { - ZSTD_CCtx_params_s* @params; - if ( - ((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) - != 0 - ) - return null; - @params = (ZSTD_CCtx_params_s*)ZSTD_customCalloc( - (nuint)sizeof(ZSTD_CCtx_params_s), - customMem - ); - if (@params == null) - { - return null; - } + cctxParams.postBlockSplitter = ZSTD_resolveBlockSplitterMode( + cctxParams.postBlockSplitter, + &cParams + ); + cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode( + cctxParams.useRowMatchFinder, + &cParams + ); + cctxParams.validateSequences = ZSTD_resolveExternalSequenceValidation( + cctxParams.validateSequences + ); + cctxParams.maxBlockSize = ZSTD_resolveMaxBlockSize(cctxParams.maxBlockSize); + cctxParams.searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch( + cctxParams.searchForExternalRepcodes, + cctxParams.compressionLevel + ); + assert(ZSTD_checkCParams(cParams) == 0); + return cctxParams; + } - ZSTD_CCtxParams_init(@params, 3); - @params->customMem = customMem; - return @params; - } - - /*! ZSTD_CCtx_params : - * Quick howto : - * - ZSTD_createCCtxParams() : Create a ZSTD_CCtx_params structure - * - ZSTD_CCtxParams_setParameter() : Push parameters one by one into - * an existing ZSTD_CCtx_params structure. - * This is similar to - * ZSTD_CCtx_setParameter(). - * - ZSTD_CCtx_setParametersUsingCCtxParams() : Apply parameters to - * an existing CCtx. - * These parameters will be applied to - * all subsequent frames. - * - ZSTD_compressStream2() : Do compression using the CCtx. - * - ZSTD_freeCCtxParams() : Free the memory, accept NULL pointer. - * - * This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams() - * for static allocation of CCtx for single-threaded compression. - */ - public static ZSTD_CCtx_params_s* ZSTD_createCCtxParams() + private static ZSTD_CCtx_params_s* ZSTD_createCCtxParams_advanced(ZSTD_customMem customMem) + { + ZSTD_CCtx_params_s* @params; + if ( + ((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) + != 0 + ) + return null; + @params = (ZSTD_CCtx_params_s*)ZSTD_customCalloc( + (nuint)sizeof(ZSTD_CCtx_params_s), + customMem + ); + if (@params == null) { - return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem); + return null; } - public static nuint ZSTD_freeCCtxParams(ZSTD_CCtx_params_s* @params) - { - if (@params == null) - { - return 0; - } + ZSTD_CCtxParams_init(@params, 3); + @params->customMem = customMem; + return @params; + } + + /*! ZSTD_CCtx_params : + * Quick howto : + * - ZSTD_createCCtxParams() : Create a ZSTD_CCtx_params structure + * - ZSTD_CCtxParams_setParameter() : Push parameters one by one into + * an existing ZSTD_CCtx_params structure. + * This is similar to + * ZSTD_CCtx_setParameter(). + * - ZSTD_CCtx_setParametersUsingCCtxParams() : Apply parameters to + * an existing CCtx. + * These parameters will be applied to + * all subsequent frames. + * - ZSTD_compressStream2() : Do compression using the CCtx. + * - ZSTD_freeCCtxParams() : Free the memory, accept NULL pointer. + * + * This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams() + * for static allocation of CCtx for single-threaded compression. + */ + public static ZSTD_CCtx_params_s* ZSTD_createCCtxParams() + { + return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem); + } - ZSTD_customFree(@params, @params->customMem); + public static nuint ZSTD_freeCCtxParams(ZSTD_CCtx_params_s* @params) + { + if (@params == null) + { return 0; } - /*! ZSTD_CCtxParams_reset() : - * Reset params to default values. - */ - public static nuint ZSTD_CCtxParams_reset(ZSTD_CCtx_params_s* @params) + ZSTD_customFree(@params, @params->customMem); + return 0; + } + + /*! ZSTD_CCtxParams_reset() : + * Reset params to default values. + */ + public static nuint ZSTD_CCtxParams_reset(ZSTD_CCtx_params_s* @params) + { + return ZSTD_CCtxParams_init(@params, 3); + } + + /*! ZSTD_CCtxParams_init() : + * Initializes the compression parameters of cctxParams according to + * compression level. All other parameters are reset to their default values. + */ + public static nuint ZSTD_CCtxParams_init( + ZSTD_CCtx_params_s* cctxParams, + int compressionLevel + ) + { + if (cctxParams == null) { - return ZSTD_CCtxParams_init(@params, 3); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); } - /*! ZSTD_CCtxParams_init() : - * Initializes the compression parameters of cctxParams according to - * compression level. All other parameters are reset to their default values. - */ - public static nuint ZSTD_CCtxParams_init( - ZSTD_CCtx_params_s* cctxParams, - int compressionLevel - ) - { - if (cctxParams == null) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); - } + *cctxParams = new ZSTD_CCtx_params_s { compressionLevel = compressionLevel }; + cctxParams->fParams.contentSizeFlag = 1; + return 0; + } - *cctxParams = new ZSTD_CCtx_params_s { compressionLevel = compressionLevel }; - cctxParams->fParams.contentSizeFlag = 1; - return 0; - } + /** + * Initializes `cctxParams` from `params` and `compressionLevel`. + * @param compressionLevel If params are derived from a compression level then that compression level, otherwise ZSTD_NO_CLEVEL. + */ + private static void ZSTD_CCtxParams_init_internal( + ZSTD_CCtx_params_s* cctxParams, + ZSTD_parameters* @params, + int compressionLevel + ) + { + assert(ZSTD_checkCParams(@params->cParams) == 0); + *cctxParams = new ZSTD_CCtx_params_s + { + cParams = @params->cParams, + fParams = @params->fParams, + compressionLevel = compressionLevel, + useRowMatchFinder = ZSTD_resolveRowMatchFinderMode( + cctxParams->useRowMatchFinder, + &@params->cParams + ), + postBlockSplitter = ZSTD_resolveBlockSplitterMode( + cctxParams->postBlockSplitter, + &@params->cParams + ), + }; + cctxParams->ldmParams.enableLdm = ZSTD_resolveEnableLdm( + cctxParams->ldmParams.enableLdm, + &@params->cParams + ); + cctxParams->validateSequences = ZSTD_resolveExternalSequenceValidation( + cctxParams->validateSequences + ); + cctxParams->maxBlockSize = ZSTD_resolveMaxBlockSize(cctxParams->maxBlockSize); + cctxParams->searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch( + cctxParams->searchForExternalRepcodes, + compressionLevel + ); + } - /** - * Initializes `cctxParams` from `params` and `compressionLevel`. - * @param compressionLevel If params are derived from a compression level then that compression level, otherwise ZSTD_NO_CLEVEL. - */ - private static void ZSTD_CCtxParams_init_internal( - ZSTD_CCtx_params_s* cctxParams, - ZSTD_parameters* @params, - int compressionLevel - ) + /*! ZSTD_CCtxParams_init_advanced() : + * Initializes the compression and frame parameters of cctxParams according to + * params. All other parameters are reset to their default values. + */ + public static nuint ZSTD_CCtxParams_init_advanced( + ZSTD_CCtx_params_s* cctxParams, + ZSTD_parameters @params + ) + { + if (cctxParams == null) { - assert(ZSTD_checkCParams(@params->cParams) == 0); - *cctxParams = new ZSTD_CCtx_params_s - { - cParams = @params->cParams, - fParams = @params->fParams, - compressionLevel = compressionLevel, - useRowMatchFinder = ZSTD_resolveRowMatchFinderMode( - cctxParams->useRowMatchFinder, - &@params->cParams - ), - postBlockSplitter = ZSTD_resolveBlockSplitterMode( - cctxParams->postBlockSplitter, - &@params->cParams - ), - }; - cctxParams->ldmParams.enableLdm = ZSTD_resolveEnableLdm( - cctxParams->ldmParams.enableLdm, - &@params->cParams - ); - cctxParams->validateSequences = ZSTD_resolveExternalSequenceValidation( - cctxParams->validateSequences - ); - cctxParams->maxBlockSize = ZSTD_resolveMaxBlockSize(cctxParams->maxBlockSize); - cctxParams->searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch( - cctxParams->searchForExternalRepcodes, - compressionLevel - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); } - /*! ZSTD_CCtxParams_init_advanced() : - * Initializes the compression and frame parameters of cctxParams according to - * params. All other parameters are reset to their default values. - */ - public static nuint ZSTD_CCtxParams_init_advanced( - ZSTD_CCtx_params_s* cctxParams, - ZSTD_parameters @params - ) { - if (cctxParams == null) + nuint err_code = ZSTD_checkCParams(@params.cParams); + if (ERR_isError(err_code)) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + return err_code; } + } - { - nuint err_code = ZSTD_checkCParams(@params.cParams); - if (ERR_isError(err_code)) - { - return err_code; - } - } + ZSTD_CCtxParams_init_internal(cctxParams, &@params, 0); + return 0; + } - ZSTD_CCtxParams_init_internal(cctxParams, &@params, 0); - return 0; - } + /** + * Sets cctxParams' cParams and fParams from params, but otherwise leaves them alone. + * @param params Validated zstd parameters. + */ + private static void ZSTD_CCtxParams_setZstdParams( + ZSTD_CCtx_params_s* cctxParams, + ZSTD_parameters* @params + ) + { + assert(ZSTD_checkCParams(@params->cParams) == 0); + cctxParams->cParams = @params->cParams; + cctxParams->fParams = @params->fParams; + cctxParams->compressionLevel = 0; + } - /** - * Sets cctxParams' cParams and fParams from params, but otherwise leaves them alone. - * @param params Validated zstd parameters. - */ - private static void ZSTD_CCtxParams_setZstdParams( - ZSTD_CCtx_params_s* cctxParams, - ZSTD_parameters* @params - ) - { - assert(ZSTD_checkCParams(@params->cParams) == 0); - cctxParams->cParams = @params->cParams; - cctxParams->fParams = @params->fParams; - cctxParams->compressionLevel = 0; + /*! ZSTD_cParam_getBounds() : + * All parameters must belong to an interval with lower and upper bounds, + * otherwise they will either trigger an error or be automatically clamped. + * @return : a structure, ZSTD_bounds, which contains + * - an error status field, which must be tested using ZSTD_isError() + * - lower and upper bounds, both inclusive + */ + public static ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param) + { + ZSTD_bounds bounds = new ZSTD_bounds + { + error = 0, + lowerBound = 0, + upperBound = 0, + }; + switch (param) + { + case ZSTD_cParameter.ZSTD_c_compressionLevel: + bounds.lowerBound = ZSTD_minCLevel(); + bounds.upperBound = ZSTD_maxCLevel(); + return bounds; + case ZSTD_cParameter.ZSTD_c_windowLog: + bounds.lowerBound = 10; + bounds.upperBound = sizeof(nuint) == 4 ? 30 : 31; + return bounds; + case ZSTD_cParameter.ZSTD_c_hashLog: + bounds.lowerBound = 6; + bounds.upperBound = + (sizeof(nuint) == 4 ? 30 : 31) < 30 + ? sizeof(nuint) == 4 + ? 30 + : 31 + : 30; + return bounds; + case ZSTD_cParameter.ZSTD_c_chainLog: + bounds.lowerBound = 6; + bounds.upperBound = sizeof(nuint) == 4 ? 29 : 30; + return bounds; + case ZSTD_cParameter.ZSTD_c_searchLog: + bounds.lowerBound = 1; + bounds.upperBound = (sizeof(nuint) == 4 ? 30 : 31) - 1; + return bounds; + case ZSTD_cParameter.ZSTD_c_minMatch: + bounds.lowerBound = 3; + bounds.upperBound = 7; + return bounds; + case ZSTD_cParameter.ZSTD_c_targetLength: + bounds.lowerBound = 0; + bounds.upperBound = 1 << 17; + return bounds; + case ZSTD_cParameter.ZSTD_c_strategy: + bounds.lowerBound = (int)ZSTD_strategy.ZSTD_fast; + bounds.upperBound = (int)ZSTD_strategy.ZSTD_btultra2; + return bounds; + case ZSTD_cParameter.ZSTD_c_contentSizeFlag: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + case ZSTD_cParameter.ZSTD_c_checksumFlag: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + case ZSTD_cParameter.ZSTD_c_dictIDFlag: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + case ZSTD_cParameter.ZSTD_c_nbWorkers: + bounds.lowerBound = 0; + bounds.upperBound = sizeof(void*) == 4 ? 64 : 256; + return bounds; + case ZSTD_cParameter.ZSTD_c_jobSize: + bounds.lowerBound = 0; + bounds.upperBound = MEM_32bits ? 512 * (1 << 20) : 1024 * (1 << 20); + return bounds; + case ZSTD_cParameter.ZSTD_c_overlapLog: + bounds.lowerBound = 0; + bounds.upperBound = 9; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam8: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + case ZSTD_cParameter.ZSTD_c_enableLongDistanceMatching: + bounds.lowerBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_disable; + return bounds; + case ZSTD_cParameter.ZSTD_c_ldmHashLog: + bounds.lowerBound = 6; + bounds.upperBound = + (sizeof(nuint) == 4 ? 30 : 31) < 30 + ? sizeof(nuint) == 4 + ? 30 + : 31 + : 30; + return bounds; + case ZSTD_cParameter.ZSTD_c_ldmMinMatch: + bounds.lowerBound = 4; + bounds.upperBound = 4096; + return bounds; + case ZSTD_cParameter.ZSTD_c_ldmBucketSizeLog: + bounds.lowerBound = 1; + bounds.upperBound = 8; + return bounds; + case ZSTD_cParameter.ZSTD_c_ldmHashRateLog: + bounds.lowerBound = 0; + bounds.upperBound = (sizeof(nuint) == 4 ? 30 : 31) - 6; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam1: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam3: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam2: + bounds.lowerBound = (int)ZSTD_format_e.ZSTD_f_zstd1; + bounds.upperBound = (int)ZSTD_format_e.ZSTD_f_zstd1_magicless; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam4: + bounds.lowerBound = (int)ZSTD_dictAttachPref_e.ZSTD_dictDefaultAttach; + bounds.upperBound = (int)ZSTD_dictAttachPref_e.ZSTD_dictForceLoad; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam5: + bounds.lowerBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_disable; + return bounds; + case ZSTD_cParameter.ZSTD_c_targetCBlockSize: + bounds.lowerBound = 1340; + bounds.upperBound = 1 << 17; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam7: + bounds.lowerBound = 0; + bounds.upperBound = 2147483647; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam9: + case ZSTD_cParameter.ZSTD_c_experimentalParam10: + bounds.lowerBound = (int)ZSTD_bufferMode_e.ZSTD_bm_buffered; + bounds.upperBound = (int)ZSTD_bufferMode_e.ZSTD_bm_stable; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam11: + bounds.lowerBound = (int)ZSTD_sequenceFormat_e.ZSTD_sf_noBlockDelimiters; + bounds.upperBound = (int)ZSTD_sequenceFormat_e.ZSTD_sf_explicitBlockDelimiters; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam12: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam13: + bounds.lowerBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_disable; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam20: + bounds.lowerBound = 0; + bounds.upperBound = 6; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam14: + bounds.lowerBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_disable; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam15: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam16: + bounds.lowerBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_disable; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam17: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam18: + bounds.lowerBound = 1 << 10; + bounds.upperBound = 1 << 17; + return bounds; + case ZSTD_cParameter.ZSTD_c_experimentalParam19: + bounds.lowerBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_disable; + return bounds; + default: + bounds.error = unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported) + ); + return bounds; } + } - /*! ZSTD_cParam_getBounds() : - * All parameters must belong to an interval with lower and upper bounds, - * otherwise they will either trigger an error or be automatically clamped. - * @return : a structure, ZSTD_bounds, which contains - * - an error status field, which must be tested using ZSTD_isError() - * - lower and upper bounds, both inclusive - */ - public static ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param) - { - ZSTD_bounds bounds = new ZSTD_bounds - { - error = 0, - lowerBound = 0, - upperBound = 0, - }; - switch (param) - { - case ZSTD_cParameter.ZSTD_c_compressionLevel: - bounds.lowerBound = ZSTD_minCLevel(); - bounds.upperBound = ZSTD_maxCLevel(); - return bounds; - case ZSTD_cParameter.ZSTD_c_windowLog: - bounds.lowerBound = 10; - bounds.upperBound = sizeof(nuint) == 4 ? 30 : 31; - return bounds; - case ZSTD_cParameter.ZSTD_c_hashLog: - bounds.lowerBound = 6; - bounds.upperBound = - (sizeof(nuint) == 4 ? 30 : 31) < 30 - ? sizeof(nuint) == 4 - ? 30 - : 31 - : 30; - return bounds; - case ZSTD_cParameter.ZSTD_c_chainLog: - bounds.lowerBound = 6; - bounds.upperBound = sizeof(nuint) == 4 ? 29 : 30; - return bounds; - case ZSTD_cParameter.ZSTD_c_searchLog: - bounds.lowerBound = 1; - bounds.upperBound = (sizeof(nuint) == 4 ? 30 : 31) - 1; - return bounds; - case ZSTD_cParameter.ZSTD_c_minMatch: - bounds.lowerBound = 3; - bounds.upperBound = 7; - return bounds; - case ZSTD_cParameter.ZSTD_c_targetLength: - bounds.lowerBound = 0; - bounds.upperBound = 1 << 17; - return bounds; - case ZSTD_cParameter.ZSTD_c_strategy: - bounds.lowerBound = (int)ZSTD_strategy.ZSTD_fast; - bounds.upperBound = (int)ZSTD_strategy.ZSTD_btultra2; - return bounds; - case ZSTD_cParameter.ZSTD_c_contentSizeFlag: - bounds.lowerBound = 0; - bounds.upperBound = 1; - return bounds; - case ZSTD_cParameter.ZSTD_c_checksumFlag: - bounds.lowerBound = 0; - bounds.upperBound = 1; - return bounds; - case ZSTD_cParameter.ZSTD_c_dictIDFlag: - bounds.lowerBound = 0; - bounds.upperBound = 1; - return bounds; - case ZSTD_cParameter.ZSTD_c_nbWorkers: - bounds.lowerBound = 0; - bounds.upperBound = sizeof(void*) == 4 ? 64 : 256; - return bounds; - case ZSTD_cParameter.ZSTD_c_jobSize: - bounds.lowerBound = 0; - bounds.upperBound = MEM_32bits ? 512 * (1 << 20) : 1024 * (1 << 20); - return bounds; - case ZSTD_cParameter.ZSTD_c_overlapLog: - bounds.lowerBound = 0; - bounds.upperBound = 9; - return bounds; - case ZSTD_cParameter.ZSTD_c_experimentalParam8: - bounds.lowerBound = 0; - bounds.upperBound = 1; - return bounds; - case ZSTD_cParameter.ZSTD_c_enableLongDistanceMatching: - bounds.lowerBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_auto; - bounds.upperBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_disable; - return bounds; - case ZSTD_cParameter.ZSTD_c_ldmHashLog: - bounds.lowerBound = 6; - bounds.upperBound = - (sizeof(nuint) == 4 ? 30 : 31) < 30 - ? sizeof(nuint) == 4 - ? 30 - : 31 - : 30; - return bounds; - case ZSTD_cParameter.ZSTD_c_ldmMinMatch: - bounds.lowerBound = 4; - bounds.upperBound = 4096; - return bounds; - case ZSTD_cParameter.ZSTD_c_ldmBucketSizeLog: - bounds.lowerBound = 1; - bounds.upperBound = 8; - return bounds; - case ZSTD_cParameter.ZSTD_c_ldmHashRateLog: - bounds.lowerBound = 0; - bounds.upperBound = (sizeof(nuint) == 4 ? 30 : 31) - 6; - return bounds; - case ZSTD_cParameter.ZSTD_c_experimentalParam1: - bounds.lowerBound = 0; - bounds.upperBound = 1; - return bounds; - case ZSTD_cParameter.ZSTD_c_experimentalParam3: - bounds.lowerBound = 0; - bounds.upperBound = 1; - return bounds; - case ZSTD_cParameter.ZSTD_c_experimentalParam2: - bounds.lowerBound = (int)ZSTD_format_e.ZSTD_f_zstd1; - bounds.upperBound = (int)ZSTD_format_e.ZSTD_f_zstd1_magicless; - return bounds; - case ZSTD_cParameter.ZSTD_c_experimentalParam4: - bounds.lowerBound = (int)ZSTD_dictAttachPref_e.ZSTD_dictDefaultAttach; - bounds.upperBound = (int)ZSTD_dictAttachPref_e.ZSTD_dictForceLoad; - return bounds; - case ZSTD_cParameter.ZSTD_c_experimentalParam5: - bounds.lowerBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_auto; - bounds.upperBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_disable; - return bounds; - case ZSTD_cParameter.ZSTD_c_targetCBlockSize: - bounds.lowerBound = 1340; - bounds.upperBound = 1 << 17; - return bounds; - case ZSTD_cParameter.ZSTD_c_experimentalParam7: - bounds.lowerBound = 0; - bounds.upperBound = 2147483647; - return bounds; - case ZSTD_cParameter.ZSTD_c_experimentalParam9: - case ZSTD_cParameter.ZSTD_c_experimentalParam10: - bounds.lowerBound = (int)ZSTD_bufferMode_e.ZSTD_bm_buffered; - bounds.upperBound = (int)ZSTD_bufferMode_e.ZSTD_bm_stable; - return bounds; - case ZSTD_cParameter.ZSTD_c_experimentalParam11: - bounds.lowerBound = (int)ZSTD_sequenceFormat_e.ZSTD_sf_noBlockDelimiters; - bounds.upperBound = (int)ZSTD_sequenceFormat_e.ZSTD_sf_explicitBlockDelimiters; - return bounds; - case ZSTD_cParameter.ZSTD_c_experimentalParam12: - bounds.lowerBound = 0; - bounds.upperBound = 1; - return bounds; - case ZSTD_cParameter.ZSTD_c_experimentalParam13: - bounds.lowerBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_auto; - bounds.upperBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_disable; - return bounds; - case ZSTD_cParameter.ZSTD_c_experimentalParam20: - bounds.lowerBound = 0; - bounds.upperBound = 6; - return bounds; - case ZSTD_cParameter.ZSTD_c_experimentalParam14: - bounds.lowerBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_auto; - bounds.upperBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_disable; - return bounds; - case ZSTD_cParameter.ZSTD_c_experimentalParam15: - bounds.lowerBound = 0; - bounds.upperBound = 1; - return bounds; - case ZSTD_cParameter.ZSTD_c_experimentalParam16: - bounds.lowerBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_auto; - bounds.upperBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_disable; - return bounds; - case ZSTD_cParameter.ZSTD_c_experimentalParam17: - bounds.lowerBound = 0; - bounds.upperBound = 1; - return bounds; - case ZSTD_cParameter.ZSTD_c_experimentalParam18: - bounds.lowerBound = 1 << 10; - bounds.upperBound = 1 << 17; - return bounds; - case ZSTD_cParameter.ZSTD_c_experimentalParam19: - bounds.lowerBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_auto; - bounds.upperBound = (int)ZSTD_paramSwitch_e.ZSTD_ps_disable; - return bounds; - default: - bounds.error = unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported) - ); - return bounds; - } - } - - /* ZSTD_cParam_clampBounds: - * Clamps the value into the bounded range. - */ - private static nuint ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value) - { - ZSTD_bounds bounds = ZSTD_cParam_getBounds(cParam); - if (ERR_isError(bounds.error)) - return bounds.error; - if (*value < bounds.lowerBound) - *value = bounds.lowerBound; - if (*value > bounds.upperBound) - *value = bounds.upperBound; - return 0; - } + /* ZSTD_cParam_clampBounds: + * Clamps the value into the bounded range. + */ + private static nuint ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value) + { + ZSTD_bounds bounds = ZSTD_cParam_getBounds(cParam); + if (ERR_isError(bounds.error)) + return bounds.error; + if (*value < bounds.lowerBound) + *value = bounds.lowerBound; + if (*value > bounds.upperBound) + *value = bounds.upperBound; + return 0; + } - private static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param) - { - switch (param) - { - case ZSTD_cParameter.ZSTD_c_compressionLevel: - case ZSTD_cParameter.ZSTD_c_hashLog: - case ZSTD_cParameter.ZSTD_c_chainLog: - case ZSTD_cParameter.ZSTD_c_searchLog: - case ZSTD_cParameter.ZSTD_c_minMatch: - case ZSTD_cParameter.ZSTD_c_targetLength: - case ZSTD_cParameter.ZSTD_c_strategy: - case ZSTD_cParameter.ZSTD_c_experimentalParam20: - return 1; - case ZSTD_cParameter.ZSTD_c_experimentalParam2: - case ZSTD_cParameter.ZSTD_c_windowLog: - case ZSTD_cParameter.ZSTD_c_contentSizeFlag: - case ZSTD_cParameter.ZSTD_c_checksumFlag: - case ZSTD_cParameter.ZSTD_c_dictIDFlag: - case ZSTD_cParameter.ZSTD_c_experimentalParam3: - case ZSTD_cParameter.ZSTD_c_nbWorkers: - case ZSTD_cParameter.ZSTD_c_jobSize: - case ZSTD_cParameter.ZSTD_c_overlapLog: - case ZSTD_cParameter.ZSTD_c_experimentalParam1: - case ZSTD_cParameter.ZSTD_c_experimentalParam8: - case ZSTD_cParameter.ZSTD_c_enableLongDistanceMatching: - case ZSTD_cParameter.ZSTD_c_ldmHashLog: - case ZSTD_cParameter.ZSTD_c_ldmMinMatch: - case ZSTD_cParameter.ZSTD_c_ldmBucketSizeLog: - case ZSTD_cParameter.ZSTD_c_ldmHashRateLog: - case ZSTD_cParameter.ZSTD_c_experimentalParam4: - case ZSTD_cParameter.ZSTD_c_experimentalParam5: - case ZSTD_cParameter.ZSTD_c_targetCBlockSize: - case ZSTD_cParameter.ZSTD_c_experimentalParam7: - case ZSTD_cParameter.ZSTD_c_experimentalParam9: - case ZSTD_cParameter.ZSTD_c_experimentalParam10: - case ZSTD_cParameter.ZSTD_c_experimentalParam11: - case ZSTD_cParameter.ZSTD_c_experimentalParam12: - case ZSTD_cParameter.ZSTD_c_experimentalParam13: - case ZSTD_cParameter.ZSTD_c_experimentalParam14: - case ZSTD_cParameter.ZSTD_c_experimentalParam15: - case ZSTD_cParameter.ZSTD_c_experimentalParam16: - case ZSTD_cParameter.ZSTD_c_experimentalParam17: - case ZSTD_cParameter.ZSTD_c_experimentalParam18: - case ZSTD_cParameter.ZSTD_c_experimentalParam19: - default: - return 0; - } + private static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param) + { + switch (param) + { + case ZSTD_cParameter.ZSTD_c_compressionLevel: + case ZSTD_cParameter.ZSTD_c_hashLog: + case ZSTD_cParameter.ZSTD_c_chainLog: + case ZSTD_cParameter.ZSTD_c_searchLog: + case ZSTD_cParameter.ZSTD_c_minMatch: + case ZSTD_cParameter.ZSTD_c_targetLength: + case ZSTD_cParameter.ZSTD_c_strategy: + case ZSTD_cParameter.ZSTD_c_experimentalParam20: + return 1; + case ZSTD_cParameter.ZSTD_c_experimentalParam2: + case ZSTD_cParameter.ZSTD_c_windowLog: + case ZSTD_cParameter.ZSTD_c_contentSizeFlag: + case ZSTD_cParameter.ZSTD_c_checksumFlag: + case ZSTD_cParameter.ZSTD_c_dictIDFlag: + case ZSTD_cParameter.ZSTD_c_experimentalParam3: + case ZSTD_cParameter.ZSTD_c_nbWorkers: + case ZSTD_cParameter.ZSTD_c_jobSize: + case ZSTD_cParameter.ZSTD_c_overlapLog: + case ZSTD_cParameter.ZSTD_c_experimentalParam1: + case ZSTD_cParameter.ZSTD_c_experimentalParam8: + case ZSTD_cParameter.ZSTD_c_enableLongDistanceMatching: + case ZSTD_cParameter.ZSTD_c_ldmHashLog: + case ZSTD_cParameter.ZSTD_c_ldmMinMatch: + case ZSTD_cParameter.ZSTD_c_ldmBucketSizeLog: + case ZSTD_cParameter.ZSTD_c_ldmHashRateLog: + case ZSTD_cParameter.ZSTD_c_experimentalParam4: + case ZSTD_cParameter.ZSTD_c_experimentalParam5: + case ZSTD_cParameter.ZSTD_c_targetCBlockSize: + case ZSTD_cParameter.ZSTD_c_experimentalParam7: + case ZSTD_cParameter.ZSTD_c_experimentalParam9: + case ZSTD_cParameter.ZSTD_c_experimentalParam10: + case ZSTD_cParameter.ZSTD_c_experimentalParam11: + case ZSTD_cParameter.ZSTD_c_experimentalParam12: + case ZSTD_cParameter.ZSTD_c_experimentalParam13: + case ZSTD_cParameter.ZSTD_c_experimentalParam14: + case ZSTD_cParameter.ZSTD_c_experimentalParam15: + case ZSTD_cParameter.ZSTD_c_experimentalParam16: + case ZSTD_cParameter.ZSTD_c_experimentalParam17: + case ZSTD_cParameter.ZSTD_c_experimentalParam18: + case ZSTD_cParameter.ZSTD_c_experimentalParam19: + default: + return 0; } + } - /*! ZSTD_CCtx_setParameter() : - * Set one compression parameter, selected by enum ZSTD_cParameter. - * All parameters have valid bounds. Bounds can be queried using ZSTD_cParam_getBounds(). - * Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter). - * Setting a parameter is generally only possible during frame initialization (before starting compression). - * Exception : when using multi-threading mode (nbWorkers >= 1), - * the following parameters can be updated _during_ compression (within same frame): - * => compressionLevel, hashLog, chainLog, searchLog, minMatch, targetLength and strategy. - * new parameters will be active for next job only (after a flush()). - * @return : an error code (which can be tested using ZSTD_isError()). - */ - public static nuint ZSTD_CCtx_setParameter( - ZSTD_CCtx_s* cctx, - ZSTD_cParameter param, - int value - ) + /*! ZSTD_CCtx_setParameter() : + * Set one compression parameter, selected by enum ZSTD_cParameter. + * All parameters have valid bounds. Bounds can be queried using ZSTD_cParam_getBounds(). + * Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter). + * Setting a parameter is generally only possible during frame initialization (before starting compression). + * Exception : when using multi-threading mode (nbWorkers >= 1), + * the following parameters can be updated _during_ compression (within same frame): + * => compressionLevel, hashLog, chainLog, searchLog, minMatch, targetLength and strategy. + * new parameters will be active for next job only (after a flush()). + * @return : an error code (which can be tested using ZSTD_isError()). + */ + public static nuint ZSTD_CCtx_setParameter( + ZSTD_CCtx_s* cctx, + ZSTD_cParameter param, + int value + ) + { + if (cctx->streamStage != ZSTD_cStreamStage.zcss_init) { - if (cctx->streamStage != ZSTD_cStreamStage.zcss_init) + if (ZSTD_isUpdateAuthorized(param) != 0) { - if (ZSTD_isUpdateAuthorized(param) != 0) - { - cctx->cParamsChanged = 1; - } - else - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); - } + cctx->cParamsChanged = 1; } - - switch (param) + else { - case ZSTD_cParameter.ZSTD_c_nbWorkers: - if (value != 0 && cctx->staticSize != 0) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported) - ); - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); + } + } - break; - case ZSTD_cParameter.ZSTD_c_compressionLevel: - case ZSTD_cParameter.ZSTD_c_windowLog: - case ZSTD_cParameter.ZSTD_c_hashLog: - case ZSTD_cParameter.ZSTD_c_chainLog: - case ZSTD_cParameter.ZSTD_c_searchLog: - case ZSTD_cParameter.ZSTD_c_minMatch: - case ZSTD_cParameter.ZSTD_c_targetLength: - case ZSTD_cParameter.ZSTD_c_strategy: - case ZSTD_cParameter.ZSTD_c_ldmHashRateLog: - case ZSTD_cParameter.ZSTD_c_experimentalParam2: - case ZSTD_cParameter.ZSTD_c_contentSizeFlag: - case ZSTD_cParameter.ZSTD_c_checksumFlag: - case ZSTD_cParameter.ZSTD_c_dictIDFlag: - case ZSTD_cParameter.ZSTD_c_experimentalParam3: - case ZSTD_cParameter.ZSTD_c_experimentalParam4: - case ZSTD_cParameter.ZSTD_c_experimentalParam5: - case ZSTD_cParameter.ZSTD_c_jobSize: - case ZSTD_cParameter.ZSTD_c_overlapLog: - case ZSTD_cParameter.ZSTD_c_experimentalParam1: - case ZSTD_cParameter.ZSTD_c_experimentalParam8: - case ZSTD_cParameter.ZSTD_c_enableLongDistanceMatching: - case ZSTD_cParameter.ZSTD_c_ldmHashLog: - case ZSTD_cParameter.ZSTD_c_ldmMinMatch: - case ZSTD_cParameter.ZSTD_c_ldmBucketSizeLog: - case ZSTD_cParameter.ZSTD_c_targetCBlockSize: - case ZSTD_cParameter.ZSTD_c_experimentalParam7: - case ZSTD_cParameter.ZSTD_c_experimentalParam9: - case ZSTD_cParameter.ZSTD_c_experimentalParam10: - case ZSTD_cParameter.ZSTD_c_experimentalParam11: - case ZSTD_cParameter.ZSTD_c_experimentalParam12: - case ZSTD_cParameter.ZSTD_c_experimentalParam13: - case ZSTD_cParameter.ZSTD_c_experimentalParam20: - case ZSTD_cParameter.ZSTD_c_experimentalParam14: - case ZSTD_cParameter.ZSTD_c_experimentalParam15: - case ZSTD_cParameter.ZSTD_c_experimentalParam16: - case ZSTD_cParameter.ZSTD_c_experimentalParam17: - case ZSTD_cParameter.ZSTD_c_experimentalParam18: - case ZSTD_cParameter.ZSTD_c_experimentalParam19: - break; - default: + switch (param) + { + case ZSTD_cParameter.ZSTD_c_nbWorkers: + if (value != 0 && cctx->staticSize != 0) + { return unchecked( (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported) ); - } + } - return ZSTD_CCtxParams_setParameter(&cctx->requestedParams, param, value); + break; + case ZSTD_cParameter.ZSTD_c_compressionLevel: + case ZSTD_cParameter.ZSTD_c_windowLog: + case ZSTD_cParameter.ZSTD_c_hashLog: + case ZSTD_cParameter.ZSTD_c_chainLog: + case ZSTD_cParameter.ZSTD_c_searchLog: + case ZSTD_cParameter.ZSTD_c_minMatch: + case ZSTD_cParameter.ZSTD_c_targetLength: + case ZSTD_cParameter.ZSTD_c_strategy: + case ZSTD_cParameter.ZSTD_c_ldmHashRateLog: + case ZSTD_cParameter.ZSTD_c_experimentalParam2: + case ZSTD_cParameter.ZSTD_c_contentSizeFlag: + case ZSTD_cParameter.ZSTD_c_checksumFlag: + case ZSTD_cParameter.ZSTD_c_dictIDFlag: + case ZSTD_cParameter.ZSTD_c_experimentalParam3: + case ZSTD_cParameter.ZSTD_c_experimentalParam4: + case ZSTD_cParameter.ZSTD_c_experimentalParam5: + case ZSTD_cParameter.ZSTD_c_jobSize: + case ZSTD_cParameter.ZSTD_c_overlapLog: + case ZSTD_cParameter.ZSTD_c_experimentalParam1: + case ZSTD_cParameter.ZSTD_c_experimentalParam8: + case ZSTD_cParameter.ZSTD_c_enableLongDistanceMatching: + case ZSTD_cParameter.ZSTD_c_ldmHashLog: + case ZSTD_cParameter.ZSTD_c_ldmMinMatch: + case ZSTD_cParameter.ZSTD_c_ldmBucketSizeLog: + case ZSTD_cParameter.ZSTD_c_targetCBlockSize: + case ZSTD_cParameter.ZSTD_c_experimentalParam7: + case ZSTD_cParameter.ZSTD_c_experimentalParam9: + case ZSTD_cParameter.ZSTD_c_experimentalParam10: + case ZSTD_cParameter.ZSTD_c_experimentalParam11: + case ZSTD_cParameter.ZSTD_c_experimentalParam12: + case ZSTD_cParameter.ZSTD_c_experimentalParam13: + case ZSTD_cParameter.ZSTD_c_experimentalParam20: + case ZSTD_cParameter.ZSTD_c_experimentalParam14: + case ZSTD_cParameter.ZSTD_c_experimentalParam15: + case ZSTD_cParameter.ZSTD_c_experimentalParam16: + case ZSTD_cParameter.ZSTD_c_experimentalParam17: + case ZSTD_cParameter.ZSTD_c_experimentalParam18: + case ZSTD_cParameter.ZSTD_c_experimentalParam19: + break; + default: + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported) + ); } - /*! ZSTD_CCtxParams_setParameter() : Requires v1.4.0+ - * Similar to ZSTD_CCtx_setParameter. - * Set one compression parameter, selected by enum ZSTD_cParameter. - * Parameters must be applied to a ZSTD_CCtx using - * ZSTD_CCtx_setParametersUsingCCtxParams(). - * @result : a code representing success or failure (which can be tested with - * ZSTD_isError()). - */ - public static nuint ZSTD_CCtxParams_setParameter( - ZSTD_CCtx_params_s* CCtxParams, - ZSTD_cParameter param, - int value - ) + return ZSTD_CCtxParams_setParameter(&cctx->requestedParams, param, value); + } + + /*! ZSTD_CCtxParams_setParameter() : Requires v1.4.0+ + * Similar to ZSTD_CCtx_setParameter. + * Set one compression parameter, selected by enum ZSTD_cParameter. + * Parameters must be applied to a ZSTD_CCtx using + * ZSTD_CCtx_setParametersUsingCCtxParams(). + * @result : a code representing success or failure (which can be tested with + * ZSTD_isError()). + */ + public static nuint ZSTD_CCtxParams_setParameter( + ZSTD_CCtx_params_s* CCtxParams, + ZSTD_cParameter param, + int value + ) + { + switch (param) { - switch (param) - { - case ZSTD_cParameter.ZSTD_c_experimentalParam2: - if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam2, value) - == 0 - ) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); - } + case ZSTD_cParameter.ZSTD_c_experimentalParam2: + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam2, value) + == 0 + ) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); + } - CCtxParams->format = (ZSTD_format_e)value; - return (nuint)CCtxParams->format; - case ZSTD_cParameter.ZSTD_c_compressionLevel: + CCtxParams->format = (ZSTD_format_e)value; + return (nuint)CCtxParams->format; + case ZSTD_cParameter.ZSTD_c_compressionLevel: + { { + nuint err_code = ZSTD_cParam_clampBounds(param, &value); + if (ERR_isError(err_code)) { - nuint err_code = ZSTD_cParam_clampBounds(param, &value); - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } - - if (value == 0) - CCtxParams->compressionLevel = 3; - else - CCtxParams->compressionLevel = value; - if (CCtxParams->compressionLevel >= 0) - return (nuint)CCtxParams->compressionLevel; - return 0; } - case ZSTD_cParameter.ZSTD_c_windowLog: - if (value != 0) - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_windowLog, value) == 0) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); - } - - CCtxParams->cParams.windowLog = (uint)value; - return CCtxParams->cParams.windowLog; - case ZSTD_cParameter.ZSTD_c_hashLog: - if (value != 0) - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_hashLog, value) == 0) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); - } - - CCtxParams->cParams.hashLog = (uint)value; - return CCtxParams->cParams.hashLog; - case ZSTD_cParameter.ZSTD_c_chainLog: - if (value != 0) - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_chainLog, value) == 0) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); - } - - CCtxParams->cParams.chainLog = (uint)value; - return CCtxParams->cParams.chainLog; - case ZSTD_cParameter.ZSTD_c_searchLog: - if (value != 0) - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_searchLog, value) == 0) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); - } - - CCtxParams->cParams.searchLog = (uint)value; - return (nuint)value; - case ZSTD_cParameter.ZSTD_c_minMatch: - if (value != 0) - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_minMatch, value) == 0) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); - } + if (value == 0) + CCtxParams->compressionLevel = 3; + else + CCtxParams->compressionLevel = value; + if (CCtxParams->compressionLevel >= 0) + return (nuint)CCtxParams->compressionLevel; + return 0; + } - CCtxParams->cParams.minMatch = (uint)value; - return CCtxParams->cParams.minMatch; - case ZSTD_cParameter.ZSTD_c_targetLength: - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_targetLength, value) == 0) + case ZSTD_cParameter.ZSTD_c_windowLog: + if (value != 0) + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_windowLog, value) == 0) { return unchecked( (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) ); } - CCtxParams->cParams.targetLength = (uint)value; - return CCtxParams->cParams.targetLength; - case ZSTD_cParameter.ZSTD_c_strategy: - if (value != 0) - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_strategy, value) == 0) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); - } - - CCtxParams->cParams.strategy = (ZSTD_strategy)value; - return (nuint)CCtxParams->cParams.strategy; - case ZSTD_cParameter.ZSTD_c_contentSizeFlag: - CCtxParams->fParams.contentSizeFlag = value != 0 ? 1 : 0; - return (nuint)CCtxParams->fParams.contentSizeFlag; - case ZSTD_cParameter.ZSTD_c_checksumFlag: - CCtxParams->fParams.checksumFlag = value != 0 ? 1 : 0; - return (nuint)CCtxParams->fParams.checksumFlag; - case ZSTD_cParameter.ZSTD_c_dictIDFlag: - CCtxParams->fParams.noDictIDFlag = value == 0 ? 1 : 0; - return CCtxParams->fParams.noDictIDFlag == 0 ? 1U : 0U; - case ZSTD_cParameter.ZSTD_c_experimentalParam3: - CCtxParams->forceWindow = value != 0 ? 1 : 0; - return (nuint)CCtxParams->forceWindow; - case ZSTD_cParameter.ZSTD_c_experimentalParam4: - { - ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value; - if ( - ZSTD_cParam_withinBounds( - ZSTD_cParameter.ZSTD_c_experimentalParam4, - (int)pref - ) == 0 - ) + CCtxParams->cParams.windowLog = (uint)value; + return CCtxParams->cParams.windowLog; + case ZSTD_cParameter.ZSTD_c_hashLog: + if (value != 0) + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_hashLog, value) == 0) { return unchecked( (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) ); } - CCtxParams->attachDictPref = pref; - return (nuint)CCtxParams->attachDictPref; - } - - case ZSTD_cParameter.ZSTD_c_experimentalParam5: - { - ZSTD_paramSwitch_e lcm = (ZSTD_paramSwitch_e)value; - if ( - ZSTD_cParam_withinBounds( - ZSTD_cParameter.ZSTD_c_experimentalParam5, - (int)lcm - ) == 0 - ) + CCtxParams->cParams.hashLog = (uint)value; + return CCtxParams->cParams.hashLog; + case ZSTD_cParameter.ZSTD_c_chainLog: + if (value != 0) + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_chainLog, value) == 0) { return unchecked( (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) ); } - CCtxParams->literalCompressionMode = lcm; - return (nuint)CCtxParams->literalCompressionMode; - } - - case ZSTD_cParameter.ZSTD_c_nbWorkers: - { - nuint err_code = ZSTD_cParam_clampBounds(param, &value); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - CCtxParams->nbWorkers = value; - return (nuint)CCtxParams->nbWorkers; - case ZSTD_cParameter.ZSTD_c_jobSize: - if (value != 0 && value < 512 * (1 << 10)) - value = 512 * (1 << 10); - - { - nuint err_code = ZSTD_cParam_clampBounds(param, &value); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - assert(value >= 0); - CCtxParams->jobSize = (nuint)value; - return CCtxParams->jobSize; - case ZSTD_cParameter.ZSTD_c_overlapLog: + CCtxParams->cParams.chainLog = (uint)value; + return CCtxParams->cParams.chainLog; + case ZSTD_cParameter.ZSTD_c_searchLog: + if (value != 0) + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_searchLog, value) == 0) { - nuint err_code = ZSTD_cParam_clampBounds( - ZSTD_cParameter.ZSTD_c_overlapLog, - &value + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) ); - if (ERR_isError(err_code)) - { - return err_code; - } } - CCtxParams->overlapLog = value; - return (nuint)CCtxParams->overlapLog; - case ZSTD_cParameter.ZSTD_c_experimentalParam1: + CCtxParams->cParams.searchLog = (uint)value; + return (nuint)value; + case ZSTD_cParameter.ZSTD_c_minMatch: + if (value != 0) + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_minMatch, value) == 0) { - nuint err_code = ZSTD_cParam_clampBounds( - ZSTD_cParameter.ZSTD_c_overlapLog, - &value + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) ); - if (ERR_isError(err_code)) - { - return err_code; - } } - CCtxParams->rsyncable = value; - return (nuint)CCtxParams->rsyncable; - case ZSTD_cParameter.ZSTD_c_experimentalParam8: - CCtxParams->enableDedicatedDictSearch = value != 0 ? 1 : 0; - return (nuint)CCtxParams->enableDedicatedDictSearch; - case ZSTD_cParameter.ZSTD_c_enableLongDistanceMatching: - if ( - ZSTD_cParam_withinBounds( - ZSTD_cParameter.ZSTD_c_enableLongDistanceMatching, - value - ) == 0 - ) + CCtxParams->cParams.minMatch = (uint)value; + return CCtxParams->cParams.minMatch; + case ZSTD_cParameter.ZSTD_c_targetLength: + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_targetLength, value) == 0) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); + } + + CCtxParams->cParams.targetLength = (uint)value; + return CCtxParams->cParams.targetLength; + case ZSTD_cParameter.ZSTD_c_strategy: + if (value != 0) + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_strategy, value) == 0) { return unchecked( (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) ); } - CCtxParams->ldmParams.enableLdm = (ZSTD_paramSwitch_e)value; - return (nuint)CCtxParams->ldmParams.enableLdm; - case ZSTD_cParameter.ZSTD_c_ldmHashLog: - if (value != 0) - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_ldmHashLog, value) == 0) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); - } + CCtxParams->cParams.strategy = (ZSTD_strategy)value; + return (nuint)CCtxParams->cParams.strategy; + case ZSTD_cParameter.ZSTD_c_contentSizeFlag: + CCtxParams->fParams.contentSizeFlag = value != 0 ? 1 : 0; + return (nuint)CCtxParams->fParams.contentSizeFlag; + case ZSTD_cParameter.ZSTD_c_checksumFlag: + CCtxParams->fParams.checksumFlag = value != 0 ? 1 : 0; + return (nuint)CCtxParams->fParams.checksumFlag; + case ZSTD_cParameter.ZSTD_c_dictIDFlag: + CCtxParams->fParams.noDictIDFlag = value == 0 ? 1 : 0; + return CCtxParams->fParams.noDictIDFlag == 0 ? 1U : 0U; + case ZSTD_cParameter.ZSTD_c_experimentalParam3: + CCtxParams->forceWindow = value != 0 ? 1 : 0; + return (nuint)CCtxParams->forceWindow; + case ZSTD_cParameter.ZSTD_c_experimentalParam4: + { + ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value; + if ( + ZSTD_cParam_withinBounds( + ZSTD_cParameter.ZSTD_c_experimentalParam4, + (int)pref + ) == 0 + ) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); + } - CCtxParams->ldmParams.hashLog = (uint)value; - return CCtxParams->ldmParams.hashLog; - case ZSTD_cParameter.ZSTD_c_ldmMinMatch: - if (value != 0) - if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_ldmMinMatch, value) == 0 - ) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); - } + CCtxParams->attachDictPref = pref; + return (nuint)CCtxParams->attachDictPref; + } - CCtxParams->ldmParams.minMatchLength = (uint)value; - return CCtxParams->ldmParams.minMatchLength; - case ZSTD_cParameter.ZSTD_c_ldmBucketSizeLog: - if (value != 0) - if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_ldmBucketSizeLog, value) - == 0 - ) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); - } + case ZSTD_cParameter.ZSTD_c_experimentalParam5: + { + ZSTD_paramSwitch_e lcm = (ZSTD_paramSwitch_e)value; + if ( + ZSTD_cParam_withinBounds( + ZSTD_cParameter.ZSTD_c_experimentalParam5, + (int)lcm + ) == 0 + ) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); + } - CCtxParams->ldmParams.bucketSizeLog = (uint)value; - return CCtxParams->ldmParams.bucketSizeLog; - case ZSTD_cParameter.ZSTD_c_ldmHashRateLog: - if (value != 0) - if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_ldmHashRateLog, value) - == 0 - ) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); - } + CCtxParams->literalCompressionMode = lcm; + return (nuint)CCtxParams->literalCompressionMode; + } - CCtxParams->ldmParams.hashRateLog = (uint)value; - return CCtxParams->ldmParams.hashRateLog; - case ZSTD_cParameter.ZSTD_c_targetCBlockSize: - if (value != 0) - { - value = value > 1340 ? value : 1340; - if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_targetCBlockSize, value) - == 0 - ) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); - } - } + case ZSTD_cParameter.ZSTD_c_nbWorkers: + { + nuint err_code = ZSTD_cParam_clampBounds(param, &value); + if (ERR_isError(err_code)) + { + return err_code; + } + } - CCtxParams->targetCBlockSize = (uint)value; - return CCtxParams->targetCBlockSize; - case ZSTD_cParameter.ZSTD_c_experimentalParam7: - if (value != 0) - if ( - ZSTD_cParam_withinBounds( - ZSTD_cParameter.ZSTD_c_experimentalParam7, - value - ) == 0 - ) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); - } + CCtxParams->nbWorkers = value; + return (nuint)CCtxParams->nbWorkers; + case ZSTD_cParameter.ZSTD_c_jobSize: + if (value != 0 && value < 512 * (1 << 10)) + value = 512 * (1 << 10); - CCtxParams->srcSizeHint = value; - return (nuint)CCtxParams->srcSizeHint; - case ZSTD_cParameter.ZSTD_c_experimentalParam9: - if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam9, value) - == 0 - ) + { + nuint err_code = ZSTD_cParam_clampBounds(param, &value); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + assert(value >= 0); + CCtxParams->jobSize = (nuint)value; + return CCtxParams->jobSize; + case ZSTD_cParameter.ZSTD_c_overlapLog: + { + nuint err_code = ZSTD_cParam_clampBounds( + ZSTD_cParameter.ZSTD_c_overlapLog, + &value + ); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + CCtxParams->overlapLog = value; + return (nuint)CCtxParams->overlapLog; + case ZSTD_cParameter.ZSTD_c_experimentalParam1: + { + nuint err_code = ZSTD_cParam_clampBounds( + ZSTD_cParameter.ZSTD_c_overlapLog, + &value + ); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + CCtxParams->rsyncable = value; + return (nuint)CCtxParams->rsyncable; + case ZSTD_cParameter.ZSTD_c_experimentalParam8: + CCtxParams->enableDedicatedDictSearch = value != 0 ? 1 : 0; + return (nuint)CCtxParams->enableDedicatedDictSearch; + case ZSTD_cParameter.ZSTD_c_enableLongDistanceMatching: + if ( + ZSTD_cParam_withinBounds( + ZSTD_cParameter.ZSTD_c_enableLongDistanceMatching, + value + ) == 0 + ) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); + } + + CCtxParams->ldmParams.enableLdm = (ZSTD_paramSwitch_e)value; + return (nuint)CCtxParams->ldmParams.enableLdm; + case ZSTD_cParameter.ZSTD_c_ldmHashLog: + if (value != 0) + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_ldmHashLog, value) == 0) { return unchecked( (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) ); } - CCtxParams->inBufferMode = (ZSTD_bufferMode_e)value; - return (nuint)CCtxParams->inBufferMode; - case ZSTD_cParameter.ZSTD_c_experimentalParam10: + CCtxParams->ldmParams.hashLog = (uint)value; + return CCtxParams->ldmParams.hashLog; + case ZSTD_cParameter.ZSTD_c_ldmMinMatch: + if (value != 0) if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam10, value) - == 0 + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_ldmMinMatch, value) == 0 ) { return unchecked( @@ -1244,11 +1157,12 @@ int value ); } - CCtxParams->outBufferMode = (ZSTD_bufferMode_e)value; - return (nuint)CCtxParams->outBufferMode; - case ZSTD_cParameter.ZSTD_c_experimentalParam11: + CCtxParams->ldmParams.minMatchLength = (uint)value; + return CCtxParams->ldmParams.minMatchLength; + case ZSTD_cParameter.ZSTD_c_ldmBucketSizeLog: + if (value != 0) if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam11, value) + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_ldmBucketSizeLog, value) == 0 ) { @@ -1257,11 +1171,12 @@ int value ); } - CCtxParams->blockDelimiters = (ZSTD_sequenceFormat_e)value; - return (nuint)CCtxParams->blockDelimiters; - case ZSTD_cParameter.ZSTD_c_experimentalParam12: + CCtxParams->ldmParams.bucketSizeLog = (uint)value; + return CCtxParams->ldmParams.bucketSizeLog; + case ZSTD_cParameter.ZSTD_c_ldmHashRateLog: + if (value != 0) if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam12, value) + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_ldmHashRateLog, value) == 0 ) { @@ -1270,11 +1185,14 @@ int value ); } - CCtxParams->validateSequences = value; - return (nuint)CCtxParams->validateSequences; - case ZSTD_cParameter.ZSTD_c_experimentalParam13: + CCtxParams->ldmParams.hashRateLog = (uint)value; + return CCtxParams->ldmParams.hashRateLog; + case ZSTD_cParameter.ZSTD_c_targetCBlockSize: + if (value != 0) + { + value = value > 1340 ? value : 1340; if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam13, value) + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_targetCBlockSize, value) == 0 ) { @@ -1282,13 +1200,17 @@ int value (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) ); } + } - CCtxParams->postBlockSplitter = (ZSTD_paramSwitch_e)value; - return (nuint)CCtxParams->postBlockSplitter; - case ZSTD_cParameter.ZSTD_c_experimentalParam20: + CCtxParams->targetCBlockSize = (uint)value; + return CCtxParams->targetCBlockSize; + case ZSTD_cParameter.ZSTD_c_experimentalParam7: + if (value != 0) if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam20, value) - == 0 + ZSTD_cParam_withinBounds( + ZSTD_cParameter.ZSTD_c_experimentalParam7, + value + ) == 0 ) { return unchecked( @@ -1296,81 +1218,145 @@ int value ); } - CCtxParams->preBlockSplitter_level = value; - return (nuint)CCtxParams->preBlockSplitter_level; - case ZSTD_cParameter.ZSTD_c_experimentalParam14: - if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam14, value) - == 0 - ) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); - } + CCtxParams->srcSizeHint = value; + return (nuint)CCtxParams->srcSizeHint; + case ZSTD_cParameter.ZSTD_c_experimentalParam9: + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam9, value) + == 0 + ) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); + } - CCtxParams->useRowMatchFinder = (ZSTD_paramSwitch_e)value; - return (nuint)CCtxParams->useRowMatchFinder; - case ZSTD_cParameter.ZSTD_c_experimentalParam15: - if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam15, value) - == 0 - ) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); - } + CCtxParams->inBufferMode = (ZSTD_bufferMode_e)value; + return (nuint)CCtxParams->inBufferMode; + case ZSTD_cParameter.ZSTD_c_experimentalParam10: + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam10, value) + == 0 + ) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); + } - CCtxParams->deterministicRefPrefix = !(value == 0) ? 1 : 0; - return (nuint)CCtxParams->deterministicRefPrefix; - case ZSTD_cParameter.ZSTD_c_experimentalParam16: - if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam16, value) - == 0 - ) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); - } + CCtxParams->outBufferMode = (ZSTD_bufferMode_e)value; + return (nuint)CCtxParams->outBufferMode; + case ZSTD_cParameter.ZSTD_c_experimentalParam11: + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam11, value) + == 0 + ) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); + } - CCtxParams->prefetchCDictTables = (ZSTD_paramSwitch_e)value; - return (nuint)CCtxParams->prefetchCDictTables; - case ZSTD_cParameter.ZSTD_c_experimentalParam17: - if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam17, value) - == 0 - ) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); - } + CCtxParams->blockDelimiters = (ZSTD_sequenceFormat_e)value; + return (nuint)CCtxParams->blockDelimiters; + case ZSTD_cParameter.ZSTD_c_experimentalParam12: + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam12, value) + == 0 + ) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); + } - CCtxParams->enableMatchFinderFallback = value; - return (nuint)CCtxParams->enableMatchFinderFallback; - case ZSTD_cParameter.ZSTD_c_experimentalParam18: - if (value != 0) - if ( - ZSTD_cParam_withinBounds( - ZSTD_cParameter.ZSTD_c_experimentalParam18, - value - ) == 0 - ) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); - } + CCtxParams->validateSequences = value; + return (nuint)CCtxParams->validateSequences; + case ZSTD_cParameter.ZSTD_c_experimentalParam13: + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam13, value) + == 0 + ) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); + } + + CCtxParams->postBlockSplitter = (ZSTD_paramSwitch_e)value; + return (nuint)CCtxParams->postBlockSplitter; + case ZSTD_cParameter.ZSTD_c_experimentalParam20: + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam20, value) + == 0 + ) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); + } + + CCtxParams->preBlockSplitter_level = value; + return (nuint)CCtxParams->preBlockSplitter_level; + case ZSTD_cParameter.ZSTD_c_experimentalParam14: + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam14, value) + == 0 + ) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); + } + + CCtxParams->useRowMatchFinder = (ZSTD_paramSwitch_e)value; + return (nuint)CCtxParams->useRowMatchFinder; + case ZSTD_cParameter.ZSTD_c_experimentalParam15: + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam15, value) + == 0 + ) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); + } + + CCtxParams->deterministicRefPrefix = !(value == 0) ? 1 : 0; + return (nuint)CCtxParams->deterministicRefPrefix; + case ZSTD_cParameter.ZSTD_c_experimentalParam16: + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam16, value) + == 0 + ) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); + } - assert(value >= 0); - CCtxParams->maxBlockSize = (nuint)value; - return CCtxParams->maxBlockSize; - case ZSTD_cParameter.ZSTD_c_experimentalParam19: + CCtxParams->prefetchCDictTables = (ZSTD_paramSwitch_e)value; + return (nuint)CCtxParams->prefetchCDictTables; + case ZSTD_cParameter.ZSTD_c_experimentalParam17: + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam17, value) + == 0 + ) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); + } + + CCtxParams->enableMatchFinderFallback = value; + return (nuint)CCtxParams->enableMatchFinderFallback; + case ZSTD_cParameter.ZSTD_c_experimentalParam18: + if (value != 0) if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam19, value) - == 0 + ZSTD_cParam_withinBounds( + ZSTD_cParameter.ZSTD_c_experimentalParam18, + value + ) == 0 ) { return unchecked( @@ -1378,565 +1364,673 @@ int value ); } - CCtxParams->searchForExternalRepcodes = (ZSTD_paramSwitch_e)value; - return (nuint)CCtxParams->searchForExternalRepcodes; - default: + assert(value >= 0); + CCtxParams->maxBlockSize = (nuint)value; + return CCtxParams->maxBlockSize; + case ZSTD_cParameter.ZSTD_c_experimentalParam19: + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam19, value) + == 0 + ) + { return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported) + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) ); - } + } + + CCtxParams->searchForExternalRepcodes = (ZSTD_paramSwitch_e)value; + return (nuint)CCtxParams->searchForExternalRepcodes; + default: + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported) + ); } + } - /*! ZSTD_CCtx_getParameter() : - * Get the requested compression parameter value, selected by enum ZSTD_cParameter, - * and store it into int* value. - * @return : 0, or an error code (which can be tested with ZSTD_isError()). - */ - public static nuint ZSTD_CCtx_getParameter( - ZSTD_CCtx_s* cctx, - ZSTD_cParameter param, - int* value - ) - { - return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value); + /*! ZSTD_CCtx_getParameter() : + * Get the requested compression parameter value, selected by enum ZSTD_cParameter, + * and store it into int* value. + * @return : 0, or an error code (which can be tested with ZSTD_isError()). + */ + public static nuint ZSTD_CCtx_getParameter( + ZSTD_CCtx_s* cctx, + ZSTD_cParameter param, + int* value + ) + { + return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value); + } + + /*! ZSTD_CCtxParams_getParameter() : + * Similar to ZSTD_CCtx_getParameter. + * Get the requested value of one compression parameter, selected by enum ZSTD_cParameter. + * @result : 0, or an error code (which can be tested with ZSTD_isError()). + */ + public static nuint ZSTD_CCtxParams_getParameter( + ZSTD_CCtx_params_s* CCtxParams, + ZSTD_cParameter param, + int* value + ) + { + switch (param) + { + case ZSTD_cParameter.ZSTD_c_experimentalParam2: + *value = (int)CCtxParams->format; + break; + case ZSTD_cParameter.ZSTD_c_compressionLevel: + *value = CCtxParams->compressionLevel; + break; + case ZSTD_cParameter.ZSTD_c_windowLog: + *value = (int)CCtxParams->cParams.windowLog; + break; + case ZSTD_cParameter.ZSTD_c_hashLog: + *value = (int)CCtxParams->cParams.hashLog; + break; + case ZSTD_cParameter.ZSTD_c_chainLog: + *value = (int)CCtxParams->cParams.chainLog; + break; + case ZSTD_cParameter.ZSTD_c_searchLog: + *value = (int)CCtxParams->cParams.searchLog; + break; + case ZSTD_cParameter.ZSTD_c_minMatch: + *value = (int)CCtxParams->cParams.minMatch; + break; + case ZSTD_cParameter.ZSTD_c_targetLength: + *value = (int)CCtxParams->cParams.targetLength; + break; + case ZSTD_cParameter.ZSTD_c_strategy: + *value = (int)CCtxParams->cParams.strategy; + break; + case ZSTD_cParameter.ZSTD_c_contentSizeFlag: + *value = CCtxParams->fParams.contentSizeFlag; + break; + case ZSTD_cParameter.ZSTD_c_checksumFlag: + *value = CCtxParams->fParams.checksumFlag; + break; + case ZSTD_cParameter.ZSTD_c_dictIDFlag: + *value = CCtxParams->fParams.noDictIDFlag == 0 ? 1 : 0; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam3: + *value = CCtxParams->forceWindow; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam4: + *value = (int)CCtxParams->attachDictPref; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam5: + *value = (int)CCtxParams->literalCompressionMode; + break; + case ZSTD_cParameter.ZSTD_c_nbWorkers: + *value = CCtxParams->nbWorkers; + break; + case ZSTD_cParameter.ZSTD_c_jobSize: + assert(CCtxParams->jobSize <= 2147483647); + *value = (int)CCtxParams->jobSize; + break; + case ZSTD_cParameter.ZSTD_c_overlapLog: + *value = CCtxParams->overlapLog; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam1: + *value = CCtxParams->rsyncable; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam8: + *value = CCtxParams->enableDedicatedDictSearch; + break; + case ZSTD_cParameter.ZSTD_c_enableLongDistanceMatching: + *value = (int)CCtxParams->ldmParams.enableLdm; + break; + case ZSTD_cParameter.ZSTD_c_ldmHashLog: + *value = (int)CCtxParams->ldmParams.hashLog; + break; + case ZSTD_cParameter.ZSTD_c_ldmMinMatch: + *value = (int)CCtxParams->ldmParams.minMatchLength; + break; + case ZSTD_cParameter.ZSTD_c_ldmBucketSizeLog: + *value = (int)CCtxParams->ldmParams.bucketSizeLog; + break; + case ZSTD_cParameter.ZSTD_c_ldmHashRateLog: + *value = (int)CCtxParams->ldmParams.hashRateLog; + break; + case ZSTD_cParameter.ZSTD_c_targetCBlockSize: + *value = (int)CCtxParams->targetCBlockSize; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam7: + *value = CCtxParams->srcSizeHint; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam9: + *value = (int)CCtxParams->inBufferMode; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam10: + *value = (int)CCtxParams->outBufferMode; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam11: + *value = (int)CCtxParams->blockDelimiters; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam12: + *value = CCtxParams->validateSequences; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam13: + *value = (int)CCtxParams->postBlockSplitter; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam20: + *value = CCtxParams->preBlockSplitter_level; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam14: + *value = (int)CCtxParams->useRowMatchFinder; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam15: + *value = CCtxParams->deterministicRefPrefix; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam16: + *value = (int)CCtxParams->prefetchCDictTables; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam17: + *value = CCtxParams->enableMatchFinderFallback; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam18: + *value = (int)CCtxParams->maxBlockSize; + break; + case ZSTD_cParameter.ZSTD_c_experimentalParam19: + *value = (int)CCtxParams->searchForExternalRepcodes; + break; + default: + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported) + ); } - /*! ZSTD_CCtxParams_getParameter() : - * Similar to ZSTD_CCtx_getParameter. - * Get the requested value of one compression parameter, selected by enum ZSTD_cParameter. - * @result : 0, or an error code (which can be tested with ZSTD_isError()). - */ - public static nuint ZSTD_CCtxParams_getParameter( - ZSTD_CCtx_params_s* CCtxParams, - ZSTD_cParameter param, - int* value - ) + return 0; + } + + /** ZSTD_CCtx_setParametersUsingCCtxParams() : + * just applies `params` into `cctx` + * no action is performed, parameters are merely stored. + * If ZSTDMT is enabled, parameters are pushed to cctx->mtctx. + * This is possible even if a compression is ongoing. + * In which case, new parameters will be applied on the fly, starting with next compression job. + */ + public static nuint ZSTD_CCtx_setParametersUsingCCtxParams( + ZSTD_CCtx_s* cctx, + ZSTD_CCtx_params_s* @params + ) + { + if (cctx->streamStage != ZSTD_cStreamStage.zcss_init) { - switch (param) - { - case ZSTD_cParameter.ZSTD_c_experimentalParam2: - *value = (int)CCtxParams->format; - break; - case ZSTD_cParameter.ZSTD_c_compressionLevel: - *value = CCtxParams->compressionLevel; - break; - case ZSTD_cParameter.ZSTD_c_windowLog: - *value = (int)CCtxParams->cParams.windowLog; - break; - case ZSTD_cParameter.ZSTD_c_hashLog: - *value = (int)CCtxParams->cParams.hashLog; - break; - case ZSTD_cParameter.ZSTD_c_chainLog: - *value = (int)CCtxParams->cParams.chainLog; - break; - case ZSTD_cParameter.ZSTD_c_searchLog: - *value = (int)CCtxParams->cParams.searchLog; - break; - case ZSTD_cParameter.ZSTD_c_minMatch: - *value = (int)CCtxParams->cParams.minMatch; - break; - case ZSTD_cParameter.ZSTD_c_targetLength: - *value = (int)CCtxParams->cParams.targetLength; - break; - case ZSTD_cParameter.ZSTD_c_strategy: - *value = (int)CCtxParams->cParams.strategy; - break; - case ZSTD_cParameter.ZSTD_c_contentSizeFlag: - *value = CCtxParams->fParams.contentSizeFlag; - break; - case ZSTD_cParameter.ZSTD_c_checksumFlag: - *value = CCtxParams->fParams.checksumFlag; - break; - case ZSTD_cParameter.ZSTD_c_dictIDFlag: - *value = CCtxParams->fParams.noDictIDFlag == 0 ? 1 : 0; - break; - case ZSTD_cParameter.ZSTD_c_experimentalParam3: - *value = CCtxParams->forceWindow; - break; - case ZSTD_cParameter.ZSTD_c_experimentalParam4: - *value = (int)CCtxParams->attachDictPref; - break; - case ZSTD_cParameter.ZSTD_c_experimentalParam5: - *value = (int)CCtxParams->literalCompressionMode; - break; - case ZSTD_cParameter.ZSTD_c_nbWorkers: - *value = CCtxParams->nbWorkers; - break; - case ZSTD_cParameter.ZSTD_c_jobSize: - assert(CCtxParams->jobSize <= 2147483647); - *value = (int)CCtxParams->jobSize; - break; - case ZSTD_cParameter.ZSTD_c_overlapLog: - *value = CCtxParams->overlapLog; - break; - case ZSTD_cParameter.ZSTD_c_experimentalParam1: - *value = CCtxParams->rsyncable; - break; - case ZSTD_cParameter.ZSTD_c_experimentalParam8: - *value = CCtxParams->enableDedicatedDictSearch; - break; - case ZSTD_cParameter.ZSTD_c_enableLongDistanceMatching: - *value = (int)CCtxParams->ldmParams.enableLdm; - break; - case ZSTD_cParameter.ZSTD_c_ldmHashLog: - *value = (int)CCtxParams->ldmParams.hashLog; - break; - case ZSTD_cParameter.ZSTD_c_ldmMinMatch: - *value = (int)CCtxParams->ldmParams.minMatchLength; - break; - case ZSTD_cParameter.ZSTD_c_ldmBucketSizeLog: - *value = (int)CCtxParams->ldmParams.bucketSizeLog; - break; - case ZSTD_cParameter.ZSTD_c_ldmHashRateLog: - *value = (int)CCtxParams->ldmParams.hashRateLog; - break; - case ZSTD_cParameter.ZSTD_c_targetCBlockSize: - *value = (int)CCtxParams->targetCBlockSize; - break; - case ZSTD_cParameter.ZSTD_c_experimentalParam7: - *value = CCtxParams->srcSizeHint; - break; - case ZSTD_cParameter.ZSTD_c_experimentalParam9: - *value = (int)CCtxParams->inBufferMode; - break; - case ZSTD_cParameter.ZSTD_c_experimentalParam10: - *value = (int)CCtxParams->outBufferMode; - break; - case ZSTD_cParameter.ZSTD_c_experimentalParam11: - *value = (int)CCtxParams->blockDelimiters; - break; - case ZSTD_cParameter.ZSTD_c_experimentalParam12: - *value = CCtxParams->validateSequences; - break; - case ZSTD_cParameter.ZSTD_c_experimentalParam13: - *value = (int)CCtxParams->postBlockSplitter; - break; - case ZSTD_cParameter.ZSTD_c_experimentalParam20: - *value = CCtxParams->preBlockSplitter_level; - break; - case ZSTD_cParameter.ZSTD_c_experimentalParam14: - *value = (int)CCtxParams->useRowMatchFinder; - break; - case ZSTD_cParameter.ZSTD_c_experimentalParam15: - *value = CCtxParams->deterministicRefPrefix; - break; - case ZSTD_cParameter.ZSTD_c_experimentalParam16: - *value = (int)CCtxParams->prefetchCDictTables; - break; - case ZSTD_cParameter.ZSTD_c_experimentalParam17: - *value = CCtxParams->enableMatchFinderFallback; - break; - case ZSTD_cParameter.ZSTD_c_experimentalParam18: - *value = (int)CCtxParams->maxBlockSize; - break; - case ZSTD_cParameter.ZSTD_c_experimentalParam19: - *value = (int)CCtxParams->searchForExternalRepcodes; - break; - default: - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported) - ); - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); + } - return 0; + if (cctx->cdict != null) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); } - /** ZSTD_CCtx_setParametersUsingCCtxParams() : - * just applies `params` into `cctx` - * no action is performed, parameters are merely stored. - * If ZSTDMT is enabled, parameters are pushed to cctx->mtctx. - * This is possible even if a compression is ongoing. - * In which case, new parameters will be applied on the fly, starting with next compression job. - */ - public static nuint ZSTD_CCtx_setParametersUsingCCtxParams( - ZSTD_CCtx_s* cctx, - ZSTD_CCtx_params_s* @params - ) + cctx->requestedParams = *@params; + return 0; + } + + /*! ZSTD_CCtx_setCParams() : + * Set all parameters provided within @p cparams into the working @p cctx. + * Note : if modifying parameters during compression (MT mode only), + * note that changes to the .windowLog parameter will be ignored. + * @return 0 on success, or an error code (can be checked with ZSTD_isError()). + * On failure, no parameters are updated. + */ + public static nuint ZSTD_CCtx_setCParams( + ZSTD_CCtx_s* cctx, + ZSTD_compressionParameters cparams + ) + { { - if (cctx->streamStage != ZSTD_cStreamStage.zcss_init) + /* only update if all parameters are valid */ + nuint err_code = ZSTD_checkCParams(cparams); + if (ERR_isError(err_code)) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); + return err_code; } + } - if (cctx->cdict != null) + { + nuint err_code = ZSTD_CCtx_setParameter( + cctx, + ZSTD_cParameter.ZSTD_c_windowLog, + (int)cparams.windowLog + ); + if (ERR_isError(err_code)) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); + return err_code; } - - cctx->requestedParams = *@params; - return 0; } - /*! ZSTD_CCtx_setCParams() : - * Set all parameters provided within @p cparams into the working @p cctx. - * Note : if modifying parameters during compression (MT mode only), - * note that changes to the .windowLog parameter will be ignored. - * @return 0 on success, or an error code (can be checked with ZSTD_isError()). - * On failure, no parameters are updated. - */ - public static nuint ZSTD_CCtx_setCParams( - ZSTD_CCtx_s* cctx, - ZSTD_compressionParameters cparams - ) { + nuint err_code = ZSTD_CCtx_setParameter( + cctx, + ZSTD_cParameter.ZSTD_c_chainLog, + (int)cparams.chainLog + ); + if (ERR_isError(err_code)) { - /* only update if all parameters are valid */ - nuint err_code = ZSTD_checkCParams(cparams); - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } + } + { + nuint err_code = ZSTD_CCtx_setParameter( + cctx, + ZSTD_cParameter.ZSTD_c_hashLog, + (int)cparams.hashLog + ); + if (ERR_isError(err_code)) { - nuint err_code = ZSTD_CCtx_setParameter( - cctx, - ZSTD_cParameter.ZSTD_c_windowLog, - (int)cparams.windowLog - ); - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } + } + { + nuint err_code = ZSTD_CCtx_setParameter( + cctx, + ZSTD_cParameter.ZSTD_c_searchLog, + (int)cparams.searchLog + ); + if (ERR_isError(err_code)) { - nuint err_code = ZSTD_CCtx_setParameter( - cctx, - ZSTD_cParameter.ZSTD_c_chainLog, - (int)cparams.chainLog - ); - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } + } + { + nuint err_code = ZSTD_CCtx_setParameter( + cctx, + ZSTD_cParameter.ZSTD_c_minMatch, + (int)cparams.minMatch + ); + if (ERR_isError(err_code)) { - nuint err_code = ZSTD_CCtx_setParameter( - cctx, - ZSTD_cParameter.ZSTD_c_hashLog, - (int)cparams.hashLog - ); - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } + } + { + nuint err_code = ZSTD_CCtx_setParameter( + cctx, + ZSTD_cParameter.ZSTD_c_targetLength, + (int)cparams.targetLength + ); + if (ERR_isError(err_code)) { - nuint err_code = ZSTD_CCtx_setParameter( - cctx, - ZSTD_cParameter.ZSTD_c_searchLog, - (int)cparams.searchLog - ); - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } + } + { + nuint err_code = ZSTD_CCtx_setParameter( + cctx, + ZSTD_cParameter.ZSTD_c_strategy, + (int)cparams.strategy + ); + if (ERR_isError(err_code)) { - nuint err_code = ZSTD_CCtx_setParameter( - cctx, - ZSTD_cParameter.ZSTD_c_minMatch, - (int)cparams.minMatch - ); - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } + } - { - nuint err_code = ZSTD_CCtx_setParameter( - cctx, - ZSTD_cParameter.ZSTD_c_targetLength, - (int)cparams.targetLength - ); - if (ERR_isError(err_code)) - { - return err_code; - } - } + return 0; + } + /*! ZSTD_CCtx_setFParams() : + * Set all parameters provided within @p fparams into the working @p cctx. + * @return 0 on success, or an error code (can be checked with ZSTD_isError()). + */ + public static nuint ZSTD_CCtx_setFParams(ZSTD_CCtx_s* cctx, ZSTD_frameParameters fparams) + { + { + nuint err_code = ZSTD_CCtx_setParameter( + cctx, + ZSTD_cParameter.ZSTD_c_contentSizeFlag, + fparams.contentSizeFlag != 0 ? 1 : 0 + ); + if (ERR_isError(err_code)) { - nuint err_code = ZSTD_CCtx_setParameter( - cctx, - ZSTD_cParameter.ZSTD_c_strategy, - (int)cparams.strategy - ); - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } - - return 0; } - /*! ZSTD_CCtx_setFParams() : - * Set all parameters provided within @p fparams into the working @p cctx. - * @return 0 on success, or an error code (can be checked with ZSTD_isError()). - */ - public static nuint ZSTD_CCtx_setFParams(ZSTD_CCtx_s* cctx, ZSTD_frameParameters fparams) { + nuint err_code = ZSTD_CCtx_setParameter( + cctx, + ZSTD_cParameter.ZSTD_c_checksumFlag, + fparams.checksumFlag != 0 ? 1 : 0 + ); + if (ERR_isError(err_code)) { - nuint err_code = ZSTD_CCtx_setParameter( - cctx, - ZSTD_cParameter.ZSTD_c_contentSizeFlag, - fparams.contentSizeFlag != 0 ? 1 : 0 - ); - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } + } + { + nuint err_code = ZSTD_CCtx_setParameter( + cctx, + ZSTD_cParameter.ZSTD_c_dictIDFlag, + fparams.noDictIDFlag == 0 ? 1 : 0 + ); + if (ERR_isError(err_code)) { - nuint err_code = ZSTD_CCtx_setParameter( - cctx, - ZSTD_cParameter.ZSTD_c_checksumFlag, - fparams.checksumFlag != 0 ? 1 : 0 - ); - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } + } + + return 0; + } + /*! ZSTD_CCtx_setParams() : + * Set all parameters provided within @p params into the working @p cctx. + * @return 0 on success, or an error code (can be checked with ZSTD_isError()). + */ + public static nuint ZSTD_CCtx_setParams(ZSTD_CCtx_s* cctx, ZSTD_parameters @params) + { + { + /* First check cParams, because we want to update all or none. */ + nuint err_code = ZSTD_checkCParams(@params.cParams); + if (ERR_isError(err_code)) { - nuint err_code = ZSTD_CCtx_setParameter( - cctx, - ZSTD_cParameter.ZSTD_c_dictIDFlag, - fparams.noDictIDFlag == 0 ? 1 : 0 - ); - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } - - return 0; } - /*! ZSTD_CCtx_setParams() : - * Set all parameters provided within @p params into the working @p cctx. - * @return 0 on success, or an error code (can be checked with ZSTD_isError()). - */ - public static nuint ZSTD_CCtx_setParams(ZSTD_CCtx_s* cctx, ZSTD_parameters @params) { + /* Next set fParams, because this could fail if the cctx isn't in init stage. */ + nuint err_code = ZSTD_CCtx_setFParams(cctx, @params.fParams); + if (ERR_isError(err_code)) { - /* First check cParams, because we want to update all or none. */ - nuint err_code = ZSTD_checkCParams(@params.cParams); - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } + } + { + /* Finally set cParams, which should succeed. */ + nuint err_code = ZSTD_CCtx_setCParams(cctx, @params.cParams); + if (ERR_isError(err_code)) { - /* Next set fParams, because this could fail if the cctx isn't in init stage. */ - nuint err_code = ZSTD_CCtx_setFParams(cctx, @params.fParams); - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } + } - { - /* Finally set cParams, which should succeed. */ - nuint err_code = ZSTD_CCtx_setCParams(cctx, @params.cParams); - if (ERR_isError(err_code)) - { - return err_code; - } - } + return 0; + } - return 0; + /*! ZSTD_CCtx_setPledgedSrcSize() : + * Total input data size to be compressed as a single frame. + * Value will be written in frame header, unless if explicitly forbidden using ZSTD_c_contentSizeFlag. + * This value will also be controlled at end of frame, and trigger an error if not respected. + * @result : 0, or an error code (which can be tested with ZSTD_isError()). + * Note 1 : pledgedSrcSize==0 actually means zero, aka an empty frame. + * In order to mean "unknown content size", pass constant ZSTD_CONTENTSIZE_UNKNOWN. + * ZSTD_CONTENTSIZE_UNKNOWN is default value for any new frame. + * Note 2 : pledgedSrcSize is only valid once, for the next frame. + * It's discarded at the end of the frame, and replaced by ZSTD_CONTENTSIZE_UNKNOWN. + * Note 3 : Whenever all input data is provided and consumed in a single round, + * for example with ZSTD_compress2(), + * or invoking immediately ZSTD_compressStream2(,,,ZSTD_e_end), + * this value is automatically overridden by srcSize instead. + */ + public static nuint ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx_s* cctx, ulong pledgedSrcSize) + { + if (cctx->streamStage != ZSTD_cStreamStage.zcss_init) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); } - /*! ZSTD_CCtx_setPledgedSrcSize() : - * Total input data size to be compressed as a single frame. - * Value will be written in frame header, unless if explicitly forbidden using ZSTD_c_contentSizeFlag. - * This value will also be controlled at end of frame, and trigger an error if not respected. - * @result : 0, or an error code (which can be tested with ZSTD_isError()). - * Note 1 : pledgedSrcSize==0 actually means zero, aka an empty frame. - * In order to mean "unknown content size", pass constant ZSTD_CONTENTSIZE_UNKNOWN. - * ZSTD_CONTENTSIZE_UNKNOWN is default value for any new frame. - * Note 2 : pledgedSrcSize is only valid once, for the next frame. - * It's discarded at the end of the frame, and replaced by ZSTD_CONTENTSIZE_UNKNOWN. - * Note 3 : Whenever all input data is provided and consumed in a single round, - * for example with ZSTD_compress2(), - * or invoking immediately ZSTD_compressStream2(,,,ZSTD_e_end), - * this value is automatically overridden by srcSize instead. - */ - public static nuint ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx_s* cctx, ulong pledgedSrcSize) + cctx->pledgedSrcSizePlusOne = pledgedSrcSize + 1; + return 0; + } + + /** + * Initializes the local dictionary using requested parameters. + * NOTE: Initialization does not employ the pledged src size, + * because the dictionary may be used for multiple compressions. + */ + private static nuint ZSTD_initLocalDict(ZSTD_CCtx_s* cctx) + { + ZSTD_localDict* dl = &cctx->localDict; + if (dl->dict == null) { - if (cctx->streamStage != ZSTD_cStreamStage.zcss_init) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); - } + assert(dl->dictBuffer == null); + assert(dl->cdict == null); + assert(dl->dictSize == 0); + return 0; + } - cctx->pledgedSrcSizePlusOne = pledgedSrcSize + 1; + if (dl->cdict != null) + { + assert(cctx->cdict == dl->cdict); return 0; } - /** - * Initializes the local dictionary using requested parameters. - * NOTE: Initialization does not employ the pledged src size, - * because the dictionary may be used for multiple compressions. - */ - private static nuint ZSTD_initLocalDict(ZSTD_CCtx_s* cctx) + assert(dl->dictSize > 0); + assert(cctx->cdict == null); + assert(cctx->prefixDict.dict == null); + dl->cdict = ZSTD_createCDict_advanced2( + dl->dict, + dl->dictSize, + ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, + dl->dictContentType, + &cctx->requestedParams, + cctx->customMem + ); + if (dl->cdict == null) { - ZSTD_localDict* dl = &cctx->localDict; - if (dl->dict == null) - { - assert(dl->dictBuffer == null); - assert(dl->cdict == null); - assert(dl->dictSize == 0); - return 0; - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } + + cctx->cdict = dl->cdict; + return 0; + } + + /*! ZSTD_CCtx_loadDictionary_advanced() : + * Same as ZSTD_CCtx_loadDictionary(), but gives finer control over + * how to load the dictionary (by copy ? by reference ?) + * and how to interpret it (automatic ? force raw mode ? full mode only ?) */ + public static nuint ZSTD_CCtx_loadDictionary_advanced( + ZSTD_CCtx_s* cctx, + void* dict, + nuint dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType + ) + { + if (cctx->streamStage != ZSTD_cStreamStage.zcss_init) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); + } - if (dl->cdict != null) + ZSTD_clearAllDicts(cctx); + if (dict == null || dictSize == 0) + return 0; + if (dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef) + { + cctx->localDict.dict = dict; + } + else + { + /* copy dictionary content inside CCtx to own its lifetime */ + void* dictBuffer; + if (cctx->staticSize != 0) { - assert(cctx->cdict == dl->cdict); - return 0; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); } - assert(dl->dictSize > 0); - assert(cctx->cdict == null); - assert(cctx->prefixDict.dict == null); - dl->cdict = ZSTD_createCDict_advanced2( - dl->dict, - dl->dictSize, - ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, - dl->dictContentType, - &cctx->requestedParams, - cctx->customMem - ); - if (dl->cdict == null) + dictBuffer = ZSTD_customMalloc(dictSize, cctx->customMem); + if (dictBuffer == null) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); } - cctx->cdict = dl->cdict; - return 0; + memcpy(dictBuffer, dict, (uint)dictSize); + cctx->localDict.dictBuffer = dictBuffer; + cctx->localDict.dict = dictBuffer; } - /*! ZSTD_CCtx_loadDictionary_advanced() : - * Same as ZSTD_CCtx_loadDictionary(), but gives finer control over - * how to load the dictionary (by copy ? by reference ?) - * and how to interpret it (automatic ? force raw mode ? full mode only ?) */ - public static nuint ZSTD_CCtx_loadDictionary_advanced( - ZSTD_CCtx_s* cctx, - void* dict, - nuint dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod, - ZSTD_dictContentType_e dictContentType - ) + cctx->localDict.dictSize = dictSize; + cctx->localDict.dictContentType = dictContentType; + return 0; + } + + /*! ZSTD_CCtx_loadDictionary_byReference() : + * Same as ZSTD_CCtx_loadDictionary(), but dictionary content is referenced, instead of being copied into CCtx. + * It saves some memory, but also requires that `dict` outlives its usage within `cctx` */ + public static nuint ZSTD_CCtx_loadDictionary_byReference( + ZSTD_CCtx_s* cctx, + void* dict, + nuint dictSize + ) + { + return ZSTD_CCtx_loadDictionary_advanced( + cctx, + dict, + dictSize, + ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, + ZSTD_dictContentType_e.ZSTD_dct_auto + ); + } + + /*! ZSTD_CCtx_loadDictionary() : Requires v1.4.0+ + * Create an internal CDict from `dict` buffer. + * Decompression will have to use same dictionary. + * @result : 0, or an error code (which can be tested with ZSTD_isError()). + * Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary, + * meaning "return to no-dictionary mode". + * Note 1 : Dictionary is sticky, it will be used for all future compressed frames, + * until parameters are reset, a new dictionary is loaded, or the dictionary + * is explicitly invalidated by loading a NULL dictionary. + * Note 2 : Loading a dictionary involves building tables. + * It's also a CPU consuming operation, with non-negligible impact on latency. + * Tables are dependent on compression parameters, and for this reason, + * compression parameters can no longer be changed after loading a dictionary. + * Note 3 :`dict` content will be copied internally. + * Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead. + * In such a case, dictionary buffer must outlive its users. + * Note 4 : Use ZSTD_CCtx_loadDictionary_advanced() + * to precisely select how dictionary content must be interpreted. + * Note 5 : This method does not benefit from LDM (long distance mode). + * If you want to employ LDM on some large dictionary content, + * prefer employing ZSTD_CCtx_refPrefix() described below. + */ + public static nuint ZSTD_CCtx_loadDictionary(ZSTD_CCtx_s* cctx, void* dict, nuint dictSize) + { + return ZSTD_CCtx_loadDictionary_advanced( + cctx, + dict, + dictSize, + ZSTD_dictLoadMethod_e.ZSTD_dlm_byCopy, + ZSTD_dictContentType_e.ZSTD_dct_auto + ); + } + + /*! ZSTD_CCtx_refCDict() : Requires v1.4.0+ + * Reference a prepared dictionary, to be used for all future compressed frames. + * Note that compression parameters are enforced from within CDict, + * and supersede any compression parameter previously set within CCtx. + * The parameters ignored are labelled as "superseded-by-cdict" in the ZSTD_cParameter enum docs. + * The ignored parameters will be used again if the CCtx is returned to no-dictionary mode. + * The dictionary will remain valid for future compressed frames using same CCtx. + * @result : 0, or an error code (which can be tested with ZSTD_isError()). + * Special : Referencing a NULL CDict means "return to no-dictionary mode". + * Note 1 : Currently, only one dictionary can be managed. + * Referencing a new dictionary effectively "discards" any previous one. + * Note 2 : CDict is just referenced, its lifetime must outlive its usage within CCtx. */ + public static nuint ZSTD_CCtx_refCDict(ZSTD_CCtx_s* cctx, ZSTD_CDict_s* cdict) + { + if (cctx->streamStage != ZSTD_cStreamStage.zcss_init) { - if (cctx->streamStage != ZSTD_cStreamStage.zcss_init) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); + } - ZSTD_clearAllDicts(cctx); - if (dict == null || dictSize == 0) - return 0; - if (dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef) - { - cctx->localDict.dict = dict; - } - else - { - /* copy dictionary content inside CCtx to own its lifetime */ - void* dictBuffer; - if (cctx->staticSize != 0) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); - } + ZSTD_clearAllDicts(cctx); + cctx->cdict = cdict; + return 0; + } - dictBuffer = ZSTD_customMalloc(dictSize, cctx->customMem); - if (dictBuffer == null) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); - } + public static nuint ZSTD_CCtx_refThreadPool(ZSTD_CCtx_s* cctx, void* pool) + { + if (cctx->streamStage != ZSTD_cStreamStage.zcss_init) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); + } - memcpy(dictBuffer, dict, (uint)dictSize); - cctx->localDict.dictBuffer = dictBuffer; - cctx->localDict.dict = dictBuffer; - } + cctx->pool = pool; + return 0; + } - cctx->localDict.dictSize = dictSize; - cctx->localDict.dictContentType = dictContentType; - return 0; + /*! ZSTD_CCtx_refPrefix() : Requires v1.4.0+ + * Reference a prefix (single-usage dictionary) for next compressed frame. + * A prefix is **only used once**. Tables are discarded at end of frame (ZSTD_e_end). + * Decompression will need same prefix to properly regenerate data. + * Compressing with a prefix is similar in outcome as performing a diff and compressing it, + * but performs much faster, especially during decompression (compression speed is tunable with compression level). + * This method is compatible with LDM (long distance mode). + * @result : 0, or an error code (which can be tested with ZSTD_isError()). + * Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary + * Note 1 : Prefix buffer is referenced. It **must** outlive compression. + * Its content must remain unmodified during compression. + * Note 2 : If the intention is to diff some large src data blob with some prior version of itself, + * ensure that the window size is large enough to contain the entire source. + * See ZSTD_c_windowLog. + * Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters. + * It's a CPU consuming operation, with non-negligible impact on latency. + * If there is a need to use the same prefix multiple times, consider loadDictionary instead. + * Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dct_rawContent). + * Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation. */ + public static nuint ZSTD_CCtx_refPrefix(ZSTD_CCtx_s* cctx, void* prefix, nuint prefixSize) + { + return ZSTD_CCtx_refPrefix_advanced( + cctx, + prefix, + prefixSize, + ZSTD_dictContentType_e.ZSTD_dct_rawContent + ); + } + + /*! ZSTD_CCtx_refPrefix_advanced() : + * Same as ZSTD_CCtx_refPrefix(), but gives finer control over + * how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */ + public static nuint ZSTD_CCtx_refPrefix_advanced( + ZSTD_CCtx_s* cctx, + void* prefix, + nuint prefixSize, + ZSTD_dictContentType_e dictContentType + ) + { + if (cctx->streamStage != ZSTD_cStreamStage.zcss_init) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); } - /*! ZSTD_CCtx_loadDictionary_byReference() : - * Same as ZSTD_CCtx_loadDictionary(), but dictionary content is referenced, instead of being copied into CCtx. - * It saves some memory, but also requires that `dict` outlives its usage within `cctx` */ - public static nuint ZSTD_CCtx_loadDictionary_byReference( - ZSTD_CCtx_s* cctx, - void* dict, - nuint dictSize - ) + ZSTD_clearAllDicts(cctx); + if (prefix != null && prefixSize > 0) { - return ZSTD_CCtx_loadDictionary_advanced( - cctx, - dict, - dictSize, - ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, - ZSTD_dictContentType_e.ZSTD_dct_auto - ); + cctx->prefixDict.dict = prefix; + cctx->prefixDict.dictSize = prefixSize; + cctx->prefixDict.dictContentType = dictContentType; } - /*! ZSTD_CCtx_loadDictionary() : Requires v1.4.0+ - * Create an internal CDict from `dict` buffer. - * Decompression will have to use same dictionary. - * @result : 0, or an error code (which can be tested with ZSTD_isError()). - * Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary, - * meaning "return to no-dictionary mode". - * Note 1 : Dictionary is sticky, it will be used for all future compressed frames, - * until parameters are reset, a new dictionary is loaded, or the dictionary - * is explicitly invalidated by loading a NULL dictionary. - * Note 2 : Loading a dictionary involves building tables. - * It's also a CPU consuming operation, with non-negligible impact on latency. - * Tables are dependent on compression parameters, and for this reason, - * compression parameters can no longer be changed after loading a dictionary. - * Note 3 :`dict` content will be copied internally. - * Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead. - * In such a case, dictionary buffer must outlive its users. - * Note 4 : Use ZSTD_CCtx_loadDictionary_advanced() - * to precisely select how dictionary content must be interpreted. - * Note 5 : This method does not benefit from LDM (long distance mode). - * If you want to employ LDM on some large dictionary content, - * prefer employing ZSTD_CCtx_refPrefix() described below. - */ - public static nuint ZSTD_CCtx_loadDictionary(ZSTD_CCtx_s* cctx, void* dict, nuint dictSize) + return 0; + } + + /*! ZSTD_CCtx_reset() : + * Also dumps dictionary */ + public static nuint ZSTD_CCtx_reset(ZSTD_CCtx_s* cctx, ZSTD_ResetDirective reset) + { + if ( + reset == ZSTD_ResetDirective.ZSTD_reset_session_only + || reset == ZSTD_ResetDirective.ZSTD_reset_session_and_parameters + ) { - return ZSTD_CCtx_loadDictionary_advanced( - cctx, - dict, - dictSize, - ZSTD_dictLoadMethod_e.ZSTD_dlm_byCopy, - ZSTD_dictContentType_e.ZSTD_dct_auto - ); + cctx->streamStage = ZSTD_cStreamStage.zcss_init; + cctx->pledgedSrcSizePlusOne = 0; } - /*! ZSTD_CCtx_refCDict() : Requires v1.4.0+ - * Reference a prepared dictionary, to be used for all future compressed frames. - * Note that compression parameters are enforced from within CDict, - * and supersede any compression parameter previously set within CCtx. - * The parameters ignored are labelled as "superseded-by-cdict" in the ZSTD_cParameter enum docs. - * The ignored parameters will be used again if the CCtx is returned to no-dictionary mode. - * The dictionary will remain valid for future compressed frames using same CCtx. - * @result : 0, or an error code (which can be tested with ZSTD_isError()). - * Special : Referencing a NULL CDict means "return to no-dictionary mode". - * Note 1 : Currently, only one dictionary can be managed. - * Referencing a new dictionary effectively "discards" any previous one. - * Note 2 : CDict is just referenced, its lifetime must outlive its usage within CCtx. */ - public static nuint ZSTD_CCtx_refCDict(ZSTD_CCtx_s* cctx, ZSTD_CDict_s* cdict) + if ( + reset == ZSTD_ResetDirective.ZSTD_reset_parameters + || reset == ZSTD_ResetDirective.ZSTD_reset_session_and_parameters + ) { if (cctx->streamStage != ZSTD_cStreamStage.zcss_init) { @@ -1944,580 +2038,631 @@ public static nuint ZSTD_CCtx_refCDict(ZSTD_CCtx_s* cctx, ZSTD_CDict_s* cdict) } ZSTD_clearAllDicts(cctx); - cctx->cdict = cdict; - return 0; + return ZSTD_CCtxParams_reset(&cctx->requestedParams); } - public static nuint ZSTD_CCtx_refThreadPool(ZSTD_CCtx_s* cctx, void* pool) + return 0; + } + + /** ZSTD_checkCParams() : + control CParam values remain within authorized range. + @return : 0, or an error code if one value is beyond authorized range */ + public static nuint ZSTD_checkCParams(ZSTD_compressionParameters cParams) + { + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_windowLog, (int)cParams.windowLog) + == 0 + ) { - if (cctx->streamStage != ZSTD_cStreamStage.zcss_init) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } - cctx->pool = pool; - return 0; + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_chainLog, (int)cParams.chainLog) + == 0 + ) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } - /*! ZSTD_CCtx_refPrefix() : Requires v1.4.0+ - * Reference a prefix (single-usage dictionary) for next compressed frame. - * A prefix is **only used once**. Tables are discarded at end of frame (ZSTD_e_end). - * Decompression will need same prefix to properly regenerate data. - * Compressing with a prefix is similar in outcome as performing a diff and compressing it, - * but performs much faster, especially during decompression (compression speed is tunable with compression level). - * This method is compatible with LDM (long distance mode). - * @result : 0, or an error code (which can be tested with ZSTD_isError()). - * Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary - * Note 1 : Prefix buffer is referenced. It **must** outlive compression. - * Its content must remain unmodified during compression. - * Note 2 : If the intention is to diff some large src data blob with some prior version of itself, - * ensure that the window size is large enough to contain the entire source. - * See ZSTD_c_windowLog. - * Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters. - * It's a CPU consuming operation, with non-negligible impact on latency. - * If there is a need to use the same prefix multiple times, consider loadDictionary instead. - * Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dct_rawContent). - * Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation. */ - public static nuint ZSTD_CCtx_refPrefix(ZSTD_CCtx_s* cctx, void* prefix, nuint prefixSize) - { - return ZSTD_CCtx_refPrefix_advanced( - cctx, - prefix, - prefixSize, - ZSTD_dictContentType_e.ZSTD_dct_rawContent - ); + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_hashLog, (int)cParams.hashLog) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } - /*! ZSTD_CCtx_refPrefix_advanced() : - * Same as ZSTD_CCtx_refPrefix(), but gives finer control over - * how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */ - public static nuint ZSTD_CCtx_refPrefix_advanced( - ZSTD_CCtx_s* cctx, - void* prefix, - nuint prefixSize, - ZSTD_dictContentType_e dictContentType + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_searchLog, (int)cParams.searchLog) + == 0 ) { - if (cctx->streamStage != ZSTD_cStreamStage.zcss_init) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } - ZSTD_clearAllDicts(cctx); - if (prefix != null && prefixSize > 0) - { - cctx->prefixDict.dict = prefix; - cctx->prefixDict.dictSize = prefixSize; - cctx->prefixDict.dictContentType = dictContentType; - } + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_minMatch, (int)cParams.minMatch) + == 0 + ) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } - return 0; + if ( + ZSTD_cParam_withinBounds( + ZSTD_cParameter.ZSTD_c_targetLength, + (int)cParams.targetLength + ) == 0 + ) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } - /*! ZSTD_CCtx_reset() : - * Also dumps dictionary */ - public static nuint ZSTD_CCtx_reset(ZSTD_CCtx_s* cctx, ZSTD_ResetDirective reset) + if ( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_strategy, (int)cParams.strategy) + == 0 + ) { - if ( - reset == ZSTD_ResetDirective.ZSTD_reset_session_only - || reset == ZSTD_ResetDirective.ZSTD_reset_session_and_parameters - ) - { - cctx->streamStage = ZSTD_cStreamStage.zcss_init; - cctx->pledgedSrcSizePlusOne = 0; - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } - if ( - reset == ZSTD_ResetDirective.ZSTD_reset_parameters - || reset == ZSTD_ResetDirective.ZSTD_reset_session_and_parameters - ) - { - if (cctx->streamStage != ZSTD_cStreamStage.zcss_init) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); - } + return 0; + } - ZSTD_clearAllDicts(cctx); - return ZSTD_CCtxParams_reset(&cctx->requestedParams); - } + /** ZSTD_clampCParams() : + * make CParam values within valid range. + * @return : valid CParams */ + private static ZSTD_compressionParameters ZSTD_clampCParams( + ZSTD_compressionParameters cParams + ) + { + { + ZSTD_bounds bounds = ZSTD_cParam_getBounds(ZSTD_cParameter.ZSTD_c_windowLog); + if ((int)cParams.windowLog < bounds.lowerBound) + cParams.windowLog = (uint)bounds.lowerBound; + else if ((int)cParams.windowLog > bounds.upperBound) + cParams.windowLog = (uint)bounds.upperBound; + } - return 0; + { + ZSTD_bounds bounds = ZSTD_cParam_getBounds(ZSTD_cParameter.ZSTD_c_chainLog); + if ((int)cParams.chainLog < bounds.lowerBound) + cParams.chainLog = (uint)bounds.lowerBound; + else if ((int)cParams.chainLog > bounds.upperBound) + cParams.chainLog = (uint)bounds.upperBound; } - /** ZSTD_checkCParams() : - control CParam values remain within authorized range. - @return : 0, or an error code if one value is beyond authorized range */ - public static nuint ZSTD_checkCParams(ZSTD_compressionParameters cParams) { - if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_windowLog, (int)cParams.windowLog) - == 0 - ) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); - } + ZSTD_bounds bounds = ZSTD_cParam_getBounds(ZSTD_cParameter.ZSTD_c_hashLog); + if ((int)cParams.hashLog < bounds.lowerBound) + cParams.hashLog = (uint)bounds.lowerBound; + else if ((int)cParams.hashLog > bounds.upperBound) + cParams.hashLog = (uint)bounds.upperBound; + } - if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_chainLog, (int)cParams.chainLog) - == 0 - ) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); - } + { + ZSTD_bounds bounds = ZSTD_cParam_getBounds(ZSTD_cParameter.ZSTD_c_searchLog); + if ((int)cParams.searchLog < bounds.lowerBound) + cParams.searchLog = (uint)bounds.lowerBound; + else if ((int)cParams.searchLog > bounds.upperBound) + cParams.searchLog = (uint)bounds.upperBound; + } - if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_hashLog, (int)cParams.hashLog) == 0) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); - } + { + ZSTD_bounds bounds = ZSTD_cParam_getBounds(ZSTD_cParameter.ZSTD_c_minMatch); + if ((int)cParams.minMatch < bounds.lowerBound) + cParams.minMatch = (uint)bounds.lowerBound; + else if ((int)cParams.minMatch > bounds.upperBound) + cParams.minMatch = (uint)bounds.upperBound; + } - if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_searchLog, (int)cParams.searchLog) - == 0 - ) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); - } + { + ZSTD_bounds bounds = ZSTD_cParam_getBounds(ZSTD_cParameter.ZSTD_c_targetLength); + if ((int)cParams.targetLength < bounds.lowerBound) + cParams.targetLength = (uint)bounds.lowerBound; + else if ((int)cParams.targetLength > bounds.upperBound) + cParams.targetLength = (uint)bounds.upperBound; + } - if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_minMatch, (int)cParams.minMatch) - == 0 - ) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); - } + { + ZSTD_bounds bounds = ZSTD_cParam_getBounds(ZSTD_cParameter.ZSTD_c_strategy); + if ((int)cParams.strategy < bounds.lowerBound) + cParams.strategy = (ZSTD_strategy)bounds.lowerBound; + else if ((int)cParams.strategy > bounds.upperBound) + cParams.strategy = (ZSTD_strategy)bounds.upperBound; + } - if ( - ZSTD_cParam_withinBounds( - ZSTD_cParameter.ZSTD_c_targetLength, - (int)cParams.targetLength - ) == 0 - ) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); - } + return cParams; + } - if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_strategy, (int)cParams.strategy) - == 0 - ) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); - } + /** ZSTD_cycleLog() : + * condition for correct operation : hashLog > 1 */ + private static uint ZSTD_cycleLog(uint hashLog, ZSTD_strategy strat) + { + uint btScale = (uint)strat >= (uint)ZSTD_strategy.ZSTD_btlazy2 ? 1U : 0U; + return hashLog - btScale; + } - return 0; + /** ZSTD_dictAndWindowLog() : + * Returns an adjusted window log that is large enough to fit the source and the dictionary. + * The zstd format says that the entire dictionary is valid if one byte of the dictionary + * is within the window. So the hashLog and chainLog should be large enough to reference both + * the dictionary and the window. So we must use this adjusted dictAndWindowLog when downsizing + * the hashLog and windowLog. + * NOTE: srcSize must not be ZSTD_CONTENTSIZE_UNKNOWN. + */ + private static uint ZSTD_dictAndWindowLog(uint windowLog, ulong srcSize, ulong dictSize) + { + ulong maxWindowSize = 1UL << (sizeof(nuint) == 4 ? 30 : 31); + if (dictSize == 0) + { + return windowLog; } - /** ZSTD_clampCParams() : - * make CParam values within valid range. - * @return : valid CParams */ - private static ZSTD_compressionParameters ZSTD_clampCParams( - ZSTD_compressionParameters cParams - ) + assert(windowLog <= (uint)(sizeof(nuint) == 4 ? 30 : 31)); + assert(srcSize != unchecked(0UL - 1)); { + ulong windowSize = 1UL << (int)windowLog; + ulong dictAndWindowSize = dictSize + windowSize; + if (windowSize >= dictSize + srcSize) { - ZSTD_bounds bounds = ZSTD_cParam_getBounds(ZSTD_cParameter.ZSTD_c_windowLog); - if ((int)cParams.windowLog < bounds.lowerBound) - cParams.windowLog = (uint)bounds.lowerBound; - else if ((int)cParams.windowLog > bounds.upperBound) - cParams.windowLog = (uint)bounds.upperBound; + return windowLog; } - + else if (dictAndWindowSize >= maxWindowSize) { - ZSTD_bounds bounds = ZSTD_cParam_getBounds(ZSTD_cParameter.ZSTD_c_chainLog); - if ((int)cParams.chainLog < bounds.lowerBound) - cParams.chainLog = (uint)bounds.lowerBound; - else if ((int)cParams.chainLog > bounds.upperBound) - cParams.chainLog = (uint)bounds.upperBound; + return (uint)(sizeof(nuint) == 4 ? 30 : 31); } - + else { - ZSTD_bounds bounds = ZSTD_cParam_getBounds(ZSTD_cParameter.ZSTD_c_hashLog); - if ((int)cParams.hashLog < bounds.lowerBound) - cParams.hashLog = (uint)bounds.lowerBound; - else if ((int)cParams.hashLog > bounds.upperBound) - cParams.hashLog = (uint)bounds.upperBound; + return ZSTD_highbit32((uint)dictAndWindowSize - 1) + 1; } + } + } - { - ZSTD_bounds bounds = ZSTD_cParam_getBounds(ZSTD_cParameter.ZSTD_c_searchLog); - if ((int)cParams.searchLog < bounds.lowerBound) - cParams.searchLog = (uint)bounds.lowerBound; - else if ((int)cParams.searchLog > bounds.upperBound) - cParams.searchLog = (uint)bounds.upperBound; - } - - { - ZSTD_bounds bounds = ZSTD_cParam_getBounds(ZSTD_cParameter.ZSTD_c_minMatch); - if ((int)cParams.minMatch < bounds.lowerBound) - cParams.minMatch = (uint)bounds.lowerBound; - else if ((int)cParams.minMatch > bounds.upperBound) - cParams.minMatch = (uint)bounds.upperBound; - } - - { - ZSTD_bounds bounds = ZSTD_cParam_getBounds(ZSTD_cParameter.ZSTD_c_targetLength); - if ((int)cParams.targetLength < bounds.lowerBound) - cParams.targetLength = (uint)bounds.lowerBound; - else if ((int)cParams.targetLength > bounds.upperBound) - cParams.targetLength = (uint)bounds.upperBound; - } - - { - ZSTD_bounds bounds = ZSTD_cParam_getBounds(ZSTD_cParameter.ZSTD_c_strategy); - if ((int)cParams.strategy < bounds.lowerBound) - cParams.strategy = (ZSTD_strategy)bounds.lowerBound; - else if ((int)cParams.strategy > bounds.upperBound) - cParams.strategy = (ZSTD_strategy)bounds.upperBound; - } - - return cParams; + /** ZSTD_adjustCParams_internal() : + * optimize `cPar` for a specified input (`srcSize` and `dictSize`). + * mostly downsize to reduce memory consumption and initialization latency. + * `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known. + * `mode` is the mode for parameter adjustment. See docs for `ZSTD_CParamMode_e`. + * note : `srcSize==0` means 0! + * condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */ + private static ZSTD_compressionParameters ZSTD_adjustCParams_internal( + ZSTD_compressionParameters cPar, + ulong srcSize, + nuint dictSize, + ZSTD_CParamMode_e mode, + ZSTD_paramSwitch_e useRowMatchFinder + ) + { + /* (1<<9) + 1 */ + const ulong minSrcSize = 513; + ulong maxWindowResize = 1UL << (sizeof(nuint) == 4 ? 30 : 31) - 1; + assert(ZSTD_checkCParams(cPar) == 0); + switch (mode) + { + case ZSTD_CParamMode_e.ZSTD_cpm_unknown: + case ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict: + break; + case ZSTD_CParamMode_e.ZSTD_cpm_createCDict: + if (dictSize != 0 && srcSize == unchecked(0UL - 1)) + srcSize = minSrcSize; + break; + case ZSTD_CParamMode_e.ZSTD_cpm_attachDict: + dictSize = 0; + break; + default: + assert(0 != 0); + break; } - /** ZSTD_cycleLog() : - * condition for correct operation : hashLog > 1 */ - private static uint ZSTD_cycleLog(uint hashLog, ZSTD_strategy strat) + if (srcSize <= maxWindowResize && dictSize <= maxWindowResize) { - uint btScale = (uint)strat >= (uint)ZSTD_strategy.ZSTD_btlazy2 ? 1U : 0U; - return hashLog - btScale; + uint tSize = (uint)(srcSize + dictSize); + const uint hashSizeMin = 1 << 6; + uint srcLog = tSize < hashSizeMin ? 6 : ZSTD_highbit32(tSize - 1) + 1; + if (cPar.windowLog > srcLog) + cPar.windowLog = srcLog; } - /** ZSTD_dictAndWindowLog() : - * Returns an adjusted window log that is large enough to fit the source and the dictionary. - * The zstd format says that the entire dictionary is valid if one byte of the dictionary - * is within the window. So the hashLog and chainLog should be large enough to reference both - * the dictionary and the window. So we must use this adjusted dictAndWindowLog when downsizing - * the hashLog and windowLog. - * NOTE: srcSize must not be ZSTD_CONTENTSIZE_UNKNOWN. - */ - private static uint ZSTD_dictAndWindowLog(uint windowLog, ulong srcSize, ulong dictSize) + if (srcSize != unchecked(0UL - 1)) { - ulong maxWindowSize = 1UL << (sizeof(nuint) == 4 ? 30 : 31); - if (dictSize == 0) - { - return windowLog; - } - - assert(windowLog <= (uint)(sizeof(nuint) == 4 ? 30 : 31)); - assert(srcSize != unchecked(0UL - 1)); - { - ulong windowSize = 1UL << (int)windowLog; - ulong dictAndWindowSize = dictSize + windowSize; - if (windowSize >= dictSize + srcSize) - { - return windowLog; - } - else if (dictAndWindowSize >= maxWindowSize) - { - return (uint)(sizeof(nuint) == 4 ? 30 : 31); - } - else - { - return ZSTD_highbit32((uint)dictAndWindowSize - 1) + 1; - } - } + uint dictAndWindowLog = ZSTD_dictAndWindowLog(cPar.windowLog, srcSize, dictSize); + uint cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy); + if (cPar.hashLog > dictAndWindowLog + 1) + cPar.hashLog = dictAndWindowLog + 1; + if (cycleLog > dictAndWindowLog) + cPar.chainLog -= cycleLog - dictAndWindowLog; } - /** ZSTD_adjustCParams_internal() : - * optimize `cPar` for a specified input (`srcSize` and `dictSize`). - * mostly downsize to reduce memory consumption and initialization latency. - * `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known. - * `mode` is the mode for parameter adjustment. See docs for `ZSTD_CParamMode_e`. - * note : `srcSize==0` means 0! - * condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */ - private static ZSTD_compressionParameters ZSTD_adjustCParams_internal( - ZSTD_compressionParameters cPar, - ulong srcSize, - nuint dictSize, - ZSTD_CParamMode_e mode, - ZSTD_paramSwitch_e useRowMatchFinder + if (cPar.windowLog < 10) + cPar.windowLog = 10; + if ( + mode == ZSTD_CParamMode_e.ZSTD_cpm_createCDict + && ZSTD_CDictIndicesAreTagged(&cPar) != 0 ) { - /* (1<<9) + 1 */ - const ulong minSrcSize = 513; - ulong maxWindowResize = 1UL << (sizeof(nuint) == 4 ? 30 : 31) - 1; - assert(ZSTD_checkCParams(cPar) == 0); - switch (mode) + const uint maxShortCacheHashLog = 32 - 8; + if (cPar.hashLog > maxShortCacheHashLog) { - case ZSTD_CParamMode_e.ZSTD_cpm_unknown: - case ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict: - break; - case ZSTD_CParamMode_e.ZSTD_cpm_createCDict: - if (dictSize != 0 && srcSize == unchecked(0UL - 1)) - srcSize = minSrcSize; - break; - case ZSTD_CParamMode_e.ZSTD_cpm_attachDict: - dictSize = 0; - break; - default: - assert(0 != 0); - break; + cPar.hashLog = maxShortCacheHashLog; } - if (srcSize <= maxWindowResize && dictSize <= maxWindowResize) + if (cPar.chainLog > maxShortCacheHashLog) { - uint tSize = (uint)(srcSize + dictSize); - const uint hashSizeMin = 1 << 6; - uint srcLog = tSize < hashSizeMin ? 6 : ZSTD_highbit32(tSize - 1) + 1; - if (cPar.windowLog > srcLog) - cPar.windowLog = srcLog; + cPar.chainLog = maxShortCacheHashLog; } + } - if (srcSize != unchecked(0UL - 1)) + if (useRowMatchFinder == ZSTD_paramSwitch_e.ZSTD_ps_auto) + useRowMatchFinder = ZSTD_paramSwitch_e.ZSTD_ps_enable; + if (ZSTD_rowMatchFinderUsed(cPar.strategy, useRowMatchFinder) != 0) + { + /* Switch to 32-entry rows if searchLog is 5 (or more) */ + uint rowLog = + cPar.searchLog <= 4 ? 4 + : cPar.searchLog <= 6 ? cPar.searchLog + : 6; + const uint maxRowHashLog = 32 - 8; + uint maxHashLog = maxRowHashLog + rowLog; + assert(cPar.hashLog >= rowLog); + if (cPar.hashLog > maxHashLog) { - uint dictAndWindowLog = ZSTD_dictAndWindowLog(cPar.windowLog, srcSize, dictSize); - uint cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy); - if (cPar.hashLog > dictAndWindowLog + 1) - cPar.hashLog = dictAndWindowLog + 1; - if (cycleLog > dictAndWindowLog) - cPar.chainLog -= cycleLog - dictAndWindowLog; + cPar.hashLog = maxHashLog; } + } - if (cPar.windowLog < 10) - cPar.windowLog = 10; - if ( - mode == ZSTD_CParamMode_e.ZSTD_cpm_createCDict - && ZSTD_CDictIndicesAreTagged(&cPar) != 0 - ) - { - const uint maxShortCacheHashLog = 32 - 8; - if (cPar.hashLog > maxShortCacheHashLog) - { - cPar.hashLog = maxShortCacheHashLog; - } + return cPar; + } - if (cPar.chainLog > maxShortCacheHashLog) - { - cPar.chainLog = maxShortCacheHashLog; - } - } + /*! ZSTD_adjustCParams() : + * optimize params for a given `srcSize` and `dictSize`. + * `srcSize` can be unknown, in which case use ZSTD_CONTENTSIZE_UNKNOWN. + * `dictSize` must be `0` when there is no dictionary. + * cPar can be invalid : all parameters will be clamped within valid range in the @return struct. + * This function never fails (wide contract) */ + public static ZSTD_compressionParameters ZSTD_adjustCParams( + ZSTD_compressionParameters cPar, + ulong srcSize, + nuint dictSize + ) + { + cPar = ZSTD_clampCParams(cPar); + if (srcSize == 0) + srcSize = unchecked(0UL - 1); + return ZSTD_adjustCParams_internal( + cPar, + srcSize, + dictSize, + ZSTD_CParamMode_e.ZSTD_cpm_unknown, + ZSTD_paramSwitch_e.ZSTD_ps_auto + ); + } - if (useRowMatchFinder == ZSTD_paramSwitch_e.ZSTD_ps_auto) - useRowMatchFinder = ZSTD_paramSwitch_e.ZSTD_ps_enable; - if (ZSTD_rowMatchFinderUsed(cPar.strategy, useRowMatchFinder) != 0) - { - /* Switch to 32-entry rows if searchLog is 5 (or more) */ - uint rowLog = - cPar.searchLog <= 4 ? 4 - : cPar.searchLog <= 6 ? cPar.searchLog - : 6; - const uint maxRowHashLog = 32 - 8; - uint maxHashLog = maxRowHashLog + rowLog; - assert(cPar.hashLog >= rowLog); - if (cPar.hashLog > maxHashLog) - { - cPar.hashLog = maxHashLog; - } - } + private static void ZSTD_overrideCParams( + ZSTD_compressionParameters* cParams, + ZSTD_compressionParameters* overrides + ) + { + if (overrides->windowLog != 0) + cParams->windowLog = overrides->windowLog; + if (overrides->hashLog != 0) + cParams->hashLog = overrides->hashLog; + if (overrides->chainLog != 0) + cParams->chainLog = overrides->chainLog; + if (overrides->searchLog != 0) + cParams->searchLog = overrides->searchLog; + if (overrides->minMatch != 0) + cParams->minMatch = overrides->minMatch; + if (overrides->targetLength != 0) + cParams->targetLength = overrides->targetLength; + if (overrides->strategy != default) + cParams->strategy = overrides->strategy; + } - return cPar; + /* ZSTD_getCParamsFromCCtxParams() : + * cParams are built depending on compressionLevel, src size hints, + * LDM and manually set compression parameters. + * Note: srcSizeHint == 0 means 0! + */ + private static ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams( + ZSTD_CCtx_params_s* CCtxParams, + ulong srcSizeHint, + nuint dictSize, + ZSTD_CParamMode_e mode + ) + { + ZSTD_compressionParameters cParams; + if (srcSizeHint == unchecked(0UL - 1) && CCtxParams->srcSizeHint > 0) + { + assert(CCtxParams->srcSizeHint >= 0); + srcSizeHint = (ulong)CCtxParams->srcSizeHint; } - /*! ZSTD_adjustCParams() : - * optimize params for a given `srcSize` and `dictSize`. - * `srcSize` can be unknown, in which case use ZSTD_CONTENTSIZE_UNKNOWN. - * `dictSize` must be `0` when there is no dictionary. - * cPar can be invalid : all parameters will be clamped within valid range in the @return struct. - * This function never fails (wide contract) */ - public static ZSTD_compressionParameters ZSTD_adjustCParams( - ZSTD_compressionParameters cPar, - ulong srcSize, - nuint dictSize - ) - { - cPar = ZSTD_clampCParams(cPar); - if (srcSize == 0) - srcSize = unchecked(0UL - 1); - return ZSTD_adjustCParams_internal( - cPar, - srcSize, - dictSize, - ZSTD_CParamMode_e.ZSTD_cpm_unknown, - ZSTD_paramSwitch_e.ZSTD_ps_auto + cParams = ZSTD_getCParams_internal( + CCtxParams->compressionLevel, + srcSizeHint, + dictSize, + mode + ); + if (CCtxParams->ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) + cParams.windowLog = 27; + ZSTD_overrideCParams(&cParams, &CCtxParams->cParams); + assert(ZSTD_checkCParams(cParams) == 0); + return ZSTD_adjustCParams_internal( + cParams, + srcSizeHint, + dictSize, + mode, + CCtxParams->useRowMatchFinder + ); + } + + private static nuint ZSTD_sizeof_matchState( + ZSTD_compressionParameters* cParams, + ZSTD_paramSwitch_e useRowMatchFinder, + int enableDedicatedDictSearch, + uint forCCtx + ) + { + /* chain table size should be 0 for fast or row-hash strategies */ + nuint chainSize = + ZSTD_allocateChainTable( + cParams->strategy, + useRowMatchFinder, + enableDedicatedDictSearch != 0 && forCCtx == 0 ? 1U : 0U + ) != 0 + ? (nuint)1 << (int)cParams->chainLog + : 0; + nuint hSize = (nuint)1 << (int)cParams->hashLog; + uint hashLog3 = + forCCtx != 0 && cParams->minMatch == 3 + ? 17 < cParams->windowLog + ? 17 + : cParams->windowLog + : 0; + nuint h3Size = hashLog3 != 0 ? (nuint)1 << (int)hashLog3 : 0; + /* We don't use ZSTD_cwksp_alloc_size() here because the tables aren't + * surrounded by redzones in ASAN. */ + nuint tableSpace = + chainSize * sizeof(uint) + hSize * sizeof(uint) + h3Size * sizeof(uint); + nuint optPotentialSpace = + ZSTD_cwksp_aligned64_alloc_size((52 + 1) * sizeof(uint)) + + ZSTD_cwksp_aligned64_alloc_size((35 + 1) * sizeof(uint)) + + ZSTD_cwksp_aligned64_alloc_size((31 + 1) * sizeof(uint)) + + ZSTD_cwksp_aligned64_alloc_size((1 << 8) * sizeof(uint)) + + ZSTD_cwksp_aligned64_alloc_size((nuint)(((1 << 12) + 3) * sizeof(ZSTD_match_t))) + + ZSTD_cwksp_aligned64_alloc_size( + (nuint)(((1 << 12) + 3) * sizeof(ZSTD_optimal_t)) ); + nuint lazyAdditionalSpace = + ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder) != 0 + ? ZSTD_cwksp_aligned64_alloc_size(hSize) + : 0; + nuint optSpace = + forCCtx != 0 && cParams->strategy >= ZSTD_strategy.ZSTD_btopt + ? optPotentialSpace + : 0; + nuint slackSpace = ZSTD_cwksp_slack_space_required(); + assert(useRowMatchFinder != ZSTD_paramSwitch_e.ZSTD_ps_auto); + return tableSpace + optSpace + slackSpace + lazyAdditionalSpace; + } + + /* Helper function for calculating memory requirements. + * Gives a tighter bound than ZSTD_sequenceBound() by taking minMatch into account. */ + private static nuint ZSTD_maxNbSeq(nuint blockSize, uint minMatch, int useSequenceProducer) + { + uint divider = (uint)(minMatch == 3 || useSequenceProducer != 0 ? 3 : 4); + return blockSize / divider; + } + + private static nuint ZSTD_estimateCCtxSize_usingCCtxParams_internal( + ZSTD_compressionParameters* cParams, + ldmParams_t* ldmParams, + int isStatic, + ZSTD_paramSwitch_e useRowMatchFinder, + nuint buffInSize, + nuint buffOutSize, + ulong pledgedSrcSize, + int useSequenceProducer, + nuint maxBlockSize + ) + { + nuint windowSize = (nuint)( + 1UL << (int)cParams->windowLog <= 1UL ? 1UL + : 1UL << (int)cParams->windowLog <= pledgedSrcSize ? 1UL << (int)cParams->windowLog + : pledgedSrcSize + ); + nuint blockSize = + ZSTD_resolveMaxBlockSize(maxBlockSize) < windowSize + ? ZSTD_resolveMaxBlockSize(maxBlockSize) + : windowSize; + nuint maxNbSeq = ZSTD_maxNbSeq(blockSize, cParams->minMatch, useSequenceProducer); + nuint tokenSpace = + ZSTD_cwksp_alloc_size(32 + blockSize) + + ZSTD_cwksp_aligned64_alloc_size(maxNbSeq * (nuint)sizeof(SeqDef_s)) + + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(byte)); + nuint tmpWorkSpace = ZSTD_cwksp_alloc_size( + (8 << 10) + 512 + sizeof(uint) * (52 + 2) > 8208 + ? (8 << 10) + 512 + sizeof(uint) * (52 + 2) + : 8208 + ); + nuint blockStateSpace = + 2 * ZSTD_cwksp_alloc_size((nuint)sizeof(ZSTD_compressedBlockState_t)); + /* enableDedicatedDictSearch */ + nuint matchStateSize = ZSTD_sizeof_matchState(cParams, useRowMatchFinder, 0, 1); + nuint ldmSpace = ZSTD_ldm_getTableSize(*ldmParams); + nuint maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(*ldmParams, blockSize); + nuint ldmSeqSpace = + ldmParams->enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable + ? ZSTD_cwksp_aligned64_alloc_size(maxNbLdmSeq * (nuint)sizeof(rawSeq)) + : 0; + nuint bufferSpace = + ZSTD_cwksp_alloc_size(buffInSize) + ZSTD_cwksp_alloc_size(buffOutSize); + nuint cctxSpace = isStatic != 0 ? ZSTD_cwksp_alloc_size((nuint)sizeof(ZSTD_CCtx_s)) : 0; + nuint maxNbExternalSeq = ZSTD_sequenceBound(blockSize); + nuint externalSeqSpace = + useSequenceProducer != 0 + ? ZSTD_cwksp_aligned64_alloc_size( + maxNbExternalSeq * (nuint)sizeof(ZSTD_Sequence) + ) + : 0; + nuint neededSpace = + cctxSpace + + tmpWorkSpace + + blockStateSpace + + ldmSpace + + ldmSeqSpace + + matchStateSize + + tokenSpace + + bufferSpace + + externalSeqSpace; + return neededSpace; + } + + public static nuint ZSTD_estimateCCtxSize_usingCCtxParams(ZSTD_CCtx_params_s* @params) + { + ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams( + @params, + unchecked(0UL - 1), + 0, + ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict + ); + ZSTD_paramSwitch_e useRowMatchFinder = ZSTD_resolveRowMatchFinderMode( + @params->useRowMatchFinder, + &cParams + ); + if (@params->nbWorkers > 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); } - private static void ZSTD_overrideCParams( - ZSTD_compressionParameters* cParams, - ZSTD_compressionParameters* overrides - ) + return ZSTD_estimateCCtxSize_usingCCtxParams_internal( + &cParams, + &@params->ldmParams, + 1, + useRowMatchFinder, + 0, + 0, + unchecked(0UL - 1), + ZSTD_hasExtSeqProd(@params), + @params->maxBlockSize + ); + } + + public static nuint ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams) + { + ZSTD_CCtx_params_s initialParams = ZSTD_makeCCtxParamsFromCParams(cParams); + if (ZSTD_rowMatchFinderSupported(cParams.strategy) != 0) { - if (overrides->windowLog != 0) - cParams->windowLog = overrides->windowLog; - if (overrides->hashLog != 0) - cParams->hashLog = overrides->hashLog; - if (overrides->chainLog != 0) - cParams->chainLog = overrides->chainLog; - if (overrides->searchLog != 0) - cParams->searchLog = overrides->searchLog; - if (overrides->minMatch != 0) - cParams->minMatch = overrides->minMatch; - if (overrides->targetLength != 0) - cParams->targetLength = overrides->targetLength; - if (overrides->strategy != default) - cParams->strategy = overrides->strategy; - } - - /* ZSTD_getCParamsFromCCtxParams() : - * cParams are built depending on compressionLevel, src size hints, - * LDM and manually set compression parameters. - * Note: srcSizeHint == 0 means 0! - */ - private static ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams( - ZSTD_CCtx_params_s* CCtxParams, - ulong srcSizeHint, - nuint dictSize, - ZSTD_CParamMode_e mode - ) + /* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */ + nuint noRowCCtxSize; + nuint rowCCtxSize; + initialParams.useRowMatchFinder = ZSTD_paramSwitch_e.ZSTD_ps_disable; + noRowCCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams); + initialParams.useRowMatchFinder = ZSTD_paramSwitch_e.ZSTD_ps_enable; + rowCCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams); + return noRowCCtxSize > rowCCtxSize ? noRowCCtxSize : rowCCtxSize; + } + else { - ZSTD_compressionParameters cParams; - if (srcSizeHint == unchecked(0UL - 1) && CCtxParams->srcSizeHint > 0) - { - assert(CCtxParams->srcSizeHint >= 0); - srcSizeHint = (ulong)CCtxParams->srcSizeHint; - } - - cParams = ZSTD_getCParams_internal( - CCtxParams->compressionLevel, - srcSizeHint, - dictSize, - mode - ); - if (CCtxParams->ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) - cParams.windowLog = 27; - ZSTD_overrideCParams(&cParams, &CCtxParams->cParams); - assert(ZSTD_checkCParams(cParams) == 0); - return ZSTD_adjustCParams_internal( - cParams, - srcSizeHint, - dictSize, - mode, - CCtxParams->useRowMatchFinder - ); + return ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams); } + } - private static nuint ZSTD_sizeof_matchState( - ZSTD_compressionParameters* cParams, - ZSTD_paramSwitch_e useRowMatchFinder, - int enableDedicatedDictSearch, - uint forCCtx - ) - { - /* chain table size should be 0 for fast or row-hash strategies */ - nuint chainSize = - ZSTD_allocateChainTable( - cParams->strategy, - useRowMatchFinder, - enableDedicatedDictSearch != 0 && forCCtx == 0 ? 1U : 0U - ) != 0 - ? (nuint)1 << (int)cParams->chainLog - : 0; - nuint hSize = (nuint)1 << (int)cParams->hashLog; - uint hashLog3 = - forCCtx != 0 && cParams->minMatch == 3 - ? 17 < cParams->windowLog - ? 17 - : cParams->windowLog - : 0; - nuint h3Size = hashLog3 != 0 ? (nuint)1 << (int)hashLog3 : 0; - /* We don't use ZSTD_cwksp_alloc_size() here because the tables aren't - * surrounded by redzones in ASAN. */ - nuint tableSpace = - chainSize * sizeof(uint) + hSize * sizeof(uint) + h3Size * sizeof(uint); - nuint optPotentialSpace = - ZSTD_cwksp_aligned64_alloc_size((52 + 1) * sizeof(uint)) - + ZSTD_cwksp_aligned64_alloc_size((35 + 1) * sizeof(uint)) - + ZSTD_cwksp_aligned64_alloc_size((31 + 1) * sizeof(uint)) - + ZSTD_cwksp_aligned64_alloc_size((1 << 8) * sizeof(uint)) - + ZSTD_cwksp_aligned64_alloc_size((nuint)(((1 << 12) + 3) * sizeof(ZSTD_match_t))) - + ZSTD_cwksp_aligned64_alloc_size( - (nuint)(((1 << 12) + 3) * sizeof(ZSTD_optimal_t)) +#if NET7_0_OR_GREATER + private static ReadOnlySpan Span_srcSizeTiers => + new ulong[4] { 16 * (1 << 10), 128 * (1 << 10), 256 * (1 << 10), unchecked(0UL - 1) }; + private static ulong* srcSizeTiers => + (ulong*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_srcSizeTiers) ); - nuint lazyAdditionalSpace = - ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder) != 0 - ? ZSTD_cwksp_aligned64_alloc_size(hSize) - : 0; - nuint optSpace = - forCCtx != 0 && cParams->strategy >= ZSTD_strategy.ZSTD_btopt - ? optPotentialSpace - : 0; - nuint slackSpace = ZSTD_cwksp_slack_space_required(); - assert(useRowMatchFinder != ZSTD_paramSwitch_e.ZSTD_ps_auto); - return tableSpace + optSpace + slackSpace + lazyAdditionalSpace; +#else + + private static readonly ulong* srcSizeTiers = GetArrayPointer( + new ulong[4] + { + (ulong)(16 * (1 << 10)), + (ulong)(128 * (1 << 10)), + (ulong)(256 * (1 << 10)), + (unchecked(0UL - 1)), } + ); +#endif - /* Helper function for calculating memory requirements. - * Gives a tighter bound than ZSTD_sequenceBound() by taking minMatch into account. */ - private static nuint ZSTD_maxNbSeq(nuint blockSize, uint minMatch, int useSequenceProducer) + private static nuint ZSTD_estimateCCtxSize_internal(int compressionLevel) + { + int tier = 0; + nuint largestSize = 0; + for (; tier < 4; ++tier) { - uint divider = (uint)(minMatch == 3 || useSequenceProducer != 0 ? 3 : 4); - return blockSize / divider; + /* Choose the set of cParams for a given level across all srcSizes that give the largest cctxSize */ + ZSTD_compressionParameters cParams = ZSTD_getCParams_internal( + compressionLevel, + srcSizeTiers[tier], + 0, + ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict + ); + largestSize = + ZSTD_estimateCCtxSize_usingCParams(cParams) > largestSize + ? ZSTD_estimateCCtxSize_usingCParams(cParams) + : largestSize; } - private static nuint ZSTD_estimateCCtxSize_usingCCtxParams_internal( - ZSTD_compressionParameters* cParams, - ldmParams_t* ldmParams, - int isStatic, - ZSTD_paramSwitch_e useRowMatchFinder, - nuint buffInSize, - nuint buffOutSize, - ulong pledgedSrcSize, - int useSequenceProducer, - nuint maxBlockSize + return largestSize; + } + + /*! ZSTD_estimate*() : + * These functions make it possible to estimate memory usage + * of a future {D,C}Ctx, before its creation. + * This is useful in combination with ZSTD_initStatic(), + * which makes it possible to employ a static buffer for ZSTD_CCtx* state. + * + * ZSTD_estimateCCtxSize() will provide a memory budget large enough + * to compress data of any size using one-shot compression ZSTD_compressCCtx() or ZSTD_compress2() + * associated with any compression level up to max specified one. + * The estimate will assume the input may be arbitrarily large, + * which is the worst case. + * + * Note that the size estimation is specific for one-shot compression, + * it is not valid for streaming (see ZSTD_estimateCStreamSize*()) + * nor other potential ways of using a ZSTD_CCtx* state. + * + * When srcSize can be bound by a known and rather "small" value, + * this knowledge can be used to provide a tighter budget estimation + * because the ZSTD_CCtx* state will need less memory for small inputs. + * This tighter estimation can be provided by employing more advanced functions + * ZSTD_estimateCCtxSize_usingCParams(), which can be used in tandem with ZSTD_getCParams(), + * and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter(). + * Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits. + * + * Note : only single-threaded compression is supported. + * ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1. + */ + public static nuint ZSTD_estimateCCtxSize(int compressionLevel) + { + int level; + nuint memBudget = 0; + for ( + level = compressionLevel < 1 ? compressionLevel : 1; + level <= compressionLevel; + level++ ) { - nuint windowSize = (nuint)( - 1UL << (int)cParams->windowLog <= 1UL ? 1UL - : 1UL << (int)cParams->windowLog <= pledgedSrcSize ? 1UL << (int)cParams->windowLog - : pledgedSrcSize - ); - nuint blockSize = - ZSTD_resolveMaxBlockSize(maxBlockSize) < windowSize - ? ZSTD_resolveMaxBlockSize(maxBlockSize) - : windowSize; - nuint maxNbSeq = ZSTD_maxNbSeq(blockSize, cParams->minMatch, useSequenceProducer); - nuint tokenSpace = - ZSTD_cwksp_alloc_size(32 + blockSize) - + ZSTD_cwksp_aligned64_alloc_size(maxNbSeq * (nuint)sizeof(SeqDef_s)) - + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(byte)); - nuint tmpWorkSpace = ZSTD_cwksp_alloc_size( - (8 << 10) + 512 + sizeof(uint) * (52 + 2) > 8208 - ? (8 << 10) + 512 + sizeof(uint) * (52 + 2) - : 8208 - ); - nuint blockStateSpace = - 2 * ZSTD_cwksp_alloc_size((nuint)sizeof(ZSTD_compressedBlockState_t)); - /* enableDedicatedDictSearch */ - nuint matchStateSize = ZSTD_sizeof_matchState(cParams, useRowMatchFinder, 0, 1); - nuint ldmSpace = ZSTD_ldm_getTableSize(*ldmParams); - nuint maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(*ldmParams, blockSize); - nuint ldmSeqSpace = - ldmParams->enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable - ? ZSTD_cwksp_aligned64_alloc_size(maxNbLdmSeq * (nuint)sizeof(rawSeq)) - : 0; - nuint bufferSpace = - ZSTD_cwksp_alloc_size(buffInSize) + ZSTD_cwksp_alloc_size(buffOutSize); - nuint cctxSpace = isStatic != 0 ? ZSTD_cwksp_alloc_size((nuint)sizeof(ZSTD_CCtx_s)) : 0; - nuint maxNbExternalSeq = ZSTD_sequenceBound(blockSize); - nuint externalSeqSpace = - useSequenceProducer != 0 - ? ZSTD_cwksp_aligned64_alloc_size( - maxNbExternalSeq * (nuint)sizeof(ZSTD_Sequence) - ) - : 0; - nuint neededSpace = - cctxSpace - + tmpWorkSpace - + blockStateSpace - + ldmSpace - + ldmSeqSpace - + matchStateSize - + tokenSpace - + bufferSpace - + externalSeqSpace; - return neededSpace; + /* Ensure monotonically increasing memory usage as compression level increases */ + nuint newMB = ZSTD_estimateCCtxSize_internal(level); + if (newMB > memBudget) + memBudget = newMB; + } + + return memBudget; + } + + public static nuint ZSTD_estimateCStreamSize_usingCCtxParams(ZSTD_CCtx_params_s* @params) + { + if (@params->nbWorkers > 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); } - public static nuint ZSTD_estimateCCtxSize_usingCCtxParams(ZSTD_CCtx_params_s* @params) { ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams( @params, @@ -2525,1738 +2670,1450 @@ public static nuint ZSTD_estimateCCtxSize_usingCCtxParams(ZSTD_CCtx_params_s* @p 0, ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict ); + nuint blockSize = + ZSTD_resolveMaxBlockSize(@params->maxBlockSize) + < (nuint)1 << (int)cParams.windowLog + ? ZSTD_resolveMaxBlockSize(@params->maxBlockSize) + : (nuint)1 << (int)cParams.windowLog; + nuint inBuffSize = + @params->inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered + ? ((nuint)1 << (int)cParams.windowLog) + blockSize + : 0; + nuint outBuffSize = + @params->outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered + ? ZSTD_compressBound(blockSize) + 1 + : 0; ZSTD_paramSwitch_e useRowMatchFinder = ZSTD_resolveRowMatchFinderMode( @params->useRowMatchFinder, - &cParams + &@params->cParams ); - if (@params->nbWorkers > 0) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); - } - return ZSTD_estimateCCtxSize_usingCCtxParams_internal( &cParams, &@params->ldmParams, 1, useRowMatchFinder, - 0, - 0, + inBuffSize, + outBuffSize, unchecked(0UL - 1), ZSTD_hasExtSeqProd(@params), @params->maxBlockSize ); } + } - public static nuint ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams) + public static nuint ZSTD_estimateCStreamSize_usingCParams( + ZSTD_compressionParameters cParams + ) + { + ZSTD_CCtx_params_s initialParams = ZSTD_makeCCtxParamsFromCParams(cParams); + if (ZSTD_rowMatchFinderSupported(cParams.strategy) != 0) { - ZSTD_CCtx_params_s initialParams = ZSTD_makeCCtxParamsFromCParams(cParams); - if (ZSTD_rowMatchFinderSupported(cParams.strategy) != 0) - { - /* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */ - nuint noRowCCtxSize; - nuint rowCCtxSize; - initialParams.useRowMatchFinder = ZSTD_paramSwitch_e.ZSTD_ps_disable; - noRowCCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams); - initialParams.useRowMatchFinder = ZSTD_paramSwitch_e.ZSTD_ps_enable; - rowCCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams); - return noRowCCtxSize > rowCCtxSize ? noRowCCtxSize : rowCCtxSize; - } - else - { - return ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams); - } + /* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */ + nuint noRowCCtxSize; + nuint rowCCtxSize; + initialParams.useRowMatchFinder = ZSTD_paramSwitch_e.ZSTD_ps_disable; + noRowCCtxSize = ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams); + initialParams.useRowMatchFinder = ZSTD_paramSwitch_e.ZSTD_ps_enable; + rowCCtxSize = ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams); + return noRowCCtxSize > rowCCtxSize ? noRowCCtxSize : rowCCtxSize; } + else + { + return ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams); + } + } -#if NET7_0_OR_GREATER - private static ReadOnlySpan Span_srcSizeTiers => - new ulong[4] { 16 * (1 << 10), 128 * (1 << 10), 256 * (1 << 10), unchecked(0UL - 1) }; - private static ulong* srcSizeTiers => - (ulong*) - System.Runtime.CompilerServices.Unsafe.AsPointer( - ref MemoryMarshal.GetReference(Span_srcSizeTiers) - ); -#else - - private static readonly ulong* srcSizeTiers = GetArrayPointer( - new ulong[4] - { - (ulong)(16 * (1 << 10)), - (ulong)(128 * (1 << 10)), - (ulong)(256 * (1 << 10)), - (unchecked(0UL - 1)), - } + private static nuint ZSTD_estimateCStreamSize_internal(int compressionLevel) + { + ZSTD_compressionParameters cParams = ZSTD_getCParams_internal( + compressionLevel, + unchecked(0UL - 1), + 0, + ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict ); -#endif + return ZSTD_estimateCStreamSize_usingCParams(cParams); + } + + /*! ZSTD_estimateCStreamSize() : + * ZSTD_estimateCStreamSize() will provide a memory budget large enough for streaming compression + * using any compression level up to the max specified one. + * It will also consider src size to be arbitrarily "large", which is a worst case scenario. + * If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation. + * ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel. + * ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1. + * Note : CStream size estimation is only correct for single-threaded compression. + * ZSTD_estimateCStreamSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1. + * Note 2 : ZSTD_estimateCStreamSize* functions are not compatible with the Block-Level Sequence Producer API at this time. + * Size estimates assume that no external sequence producer is registered. + * + * ZSTD_DStream memory budget depends on frame's window Size. + * This information can be passed manually, using ZSTD_estimateDStreamSize, + * or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame(); + * Any frame requesting a window size larger than max specified one will be rejected. + * Note : if streaming is init with function ZSTD_init?Stream_usingDict(), + * an internal ?Dict will be created, which additional size is not estimated here. + * In this case, get total size by adding ZSTD_estimate?DictSize + */ + public static nuint ZSTD_estimateCStreamSize(int compressionLevel) + { + int level; + nuint memBudget = 0; + for ( + level = compressionLevel < 1 ? compressionLevel : 1; + level <= compressionLevel; + level++ + ) + { + nuint newMB = ZSTD_estimateCStreamSize_internal(level); + if (newMB > memBudget) + memBudget = newMB; + } + + return memBudget; + } - private static nuint ZSTD_estimateCCtxSize_internal(int compressionLevel) + /* ZSTD_getFrameProgression(): + * tells how much data has been consumed (input) and produced (output) for current frame. + * able to count progression inside worker threads (non-blocking mode). + */ + public static ZSTD_frameProgression ZSTD_getFrameProgression(ZSTD_CCtx_s* cctx) + { + if (cctx->appliedParams.nbWorkers > 0) { - int tier = 0; - nuint largestSize = 0; - for (; tier < 4; ++tier) - { - /* Choose the set of cParams for a given level across all srcSizes that give the largest cctxSize */ - ZSTD_compressionParameters cParams = ZSTD_getCParams_internal( - compressionLevel, - srcSizeTiers[tier], - 0, - ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict - ); - largestSize = - ZSTD_estimateCCtxSize_usingCParams(cParams) > largestSize - ? ZSTD_estimateCCtxSize_usingCParams(cParams) - : largestSize; - } + return ZSTDMT_getFrameProgression(cctx->mtctx); + } - return largestSize; + { + ZSTD_frameProgression fp; + nuint buffered = cctx->inBuff == null ? 0 : cctx->inBuffPos - cctx->inToCompress; +#if DEBUG + if (buffered != 0) + assert(cctx->inBuffPos >= cctx->inToCompress); +#endif + assert(buffered <= 1 << 17); + fp.ingested = cctx->consumedSrcSize + buffered; + fp.consumed = cctx->consumedSrcSize; + fp.produced = cctx->producedCSize; + fp.flushed = cctx->producedCSize; + fp.currentJobID = 0; + fp.nbActiveWorkers = 0; + return fp; } + } - /*! ZSTD_estimate*() : - * These functions make it possible to estimate memory usage - * of a future {D,C}Ctx, before its creation. - * This is useful in combination with ZSTD_initStatic(), - * which makes it possible to employ a static buffer for ZSTD_CCtx* state. - * - * ZSTD_estimateCCtxSize() will provide a memory budget large enough - * to compress data of any size using one-shot compression ZSTD_compressCCtx() or ZSTD_compress2() - * associated with any compression level up to max specified one. - * The estimate will assume the input may be arbitrarily large, - * which is the worst case. - * - * Note that the size estimation is specific for one-shot compression, - * it is not valid for streaming (see ZSTD_estimateCStreamSize*()) - * nor other potential ways of using a ZSTD_CCtx* state. - * - * When srcSize can be bound by a known and rather "small" value, - * this knowledge can be used to provide a tighter budget estimation - * because the ZSTD_CCtx* state will need less memory for small inputs. - * This tighter estimation can be provided by employing more advanced functions - * ZSTD_estimateCCtxSize_usingCParams(), which can be used in tandem with ZSTD_getCParams(), - * and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter(). - * Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits. - * - * Note : only single-threaded compression is supported. - * ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1. - */ - public static nuint ZSTD_estimateCCtxSize(int compressionLevel) - { - int level; - nuint memBudget = 0; - for ( - level = compressionLevel < 1 ? compressionLevel : 1; - level <= compressionLevel; - level++ - ) - { - /* Ensure monotonically increasing memory usage as compression level increases */ - nuint newMB = ZSTD_estimateCCtxSize_internal(level); - if (newMB > memBudget) - memBudget = newMB; - } + /*! ZSTD_toFlushNow() + * Only useful for multithreading scenarios currently (nbWorkers >= 1). + */ + public static nuint ZSTD_toFlushNow(ZSTD_CCtx_s* cctx) + { + if (cctx->appliedParams.nbWorkers > 0) + { + return ZSTDMT_toFlushNow(cctx->mtctx); + } + + return 0; + } + + [Conditional("DEBUG")] + private static void ZSTD_assertEqualCParams( + ZSTD_compressionParameters cParams1, + ZSTD_compressionParameters cParams2 + ) + { + assert(cParams1.windowLog == cParams2.windowLog); + assert(cParams1.chainLog == cParams2.chainLog); + assert(cParams1.hashLog == cParams2.hashLog); + assert(cParams1.searchLog == cParams2.searchLog); + assert(cParams1.minMatch == cParams2.minMatch); + assert(cParams1.targetLength == cParams2.targetLength); + assert(cParams1.strategy == cParams2.strategy); + } + + private static void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs) + { + int i; + for (i = 0; i < 3; ++i) + bs->rep[i] = repStartValue[i]; + bs->entropy.huf.repeatMode = HUF_repeat.HUF_repeat_none; + bs->entropy.fse.offcode_repeatMode = FSE_repeat.FSE_repeat_none; + bs->entropy.fse.matchlength_repeatMode = FSE_repeat.FSE_repeat_none; + bs->entropy.fse.litlength_repeatMode = FSE_repeat.FSE_repeat_none; + } + + /*! ZSTD_invalidateMatchState() + * Invalidate all the matches in the match finder tables. + * Requires nextSrc and base to be set (can be NULL). + */ + private static void ZSTD_invalidateMatchState(ZSTD_MatchState_t* ms) + { + ZSTD_window_clear(&ms->window); + ms->nextToUpdate = ms->window.dictLimit; + ms->loadedDictEnd = 0; + ms->opt.litLengthSum = 0; + ms->dictMatchState = null; + } + + /* Mixes bits in a 64 bits in a value, based on XXH3_rrmxmx */ + private static ulong ZSTD_bitmix(ulong val, ulong len) + { + val ^= BitOperations.RotateRight(val, 49) ^ BitOperations.RotateRight(val, 24); + val *= 0x9FB21C651E98DF25UL; + val ^= (val >> 35) + len; + val *= 0x9FB21C651E98DF25UL; + return val ^ val >> 28; + } - return memBudget; + /* Mixes in the hashSalt and hashSaltEntropy to create a new hashSalt */ + private static void ZSTD_advanceHashSalt(ZSTD_MatchState_t* ms) + { + ms->hashSalt = ZSTD_bitmix(ms->hashSalt, 8) ^ ZSTD_bitmix(ms->hashSaltEntropy, 4); + } + + private static nuint ZSTD_reset_matchState( + ZSTD_MatchState_t* ms, + ZSTD_cwksp* ws, + ZSTD_compressionParameters* cParams, + ZSTD_paramSwitch_e useRowMatchFinder, + ZSTD_compResetPolicy_e crp, + ZSTD_indexResetPolicy_e forceResetIndex, + ZSTD_resetTarget_e forWho + ) + { + /* disable chain table allocation for fast or row-based strategies */ + nuint chainSize = + ZSTD_allocateChainTable( + cParams->strategy, + useRowMatchFinder, + ms->dedicatedDictSearch != 0 + && forWho == ZSTD_resetTarget_e.ZSTD_resetTarget_CDict + ? 1U + : 0U + ) != 0 + ? (nuint)1 << (int)cParams->chainLog + : 0; + nuint hSize = (nuint)1 << (int)cParams->hashLog; + uint hashLog3 = + forWho == ZSTD_resetTarget_e.ZSTD_resetTarget_CCtx && cParams->minMatch == 3 + ? 17 < cParams->windowLog + ? 17 + : cParams->windowLog + : 0; + nuint h3Size = hashLog3 != 0 ? (nuint)1 << (int)hashLog3 : 0; + assert(useRowMatchFinder != ZSTD_paramSwitch_e.ZSTD_ps_auto); + if (forceResetIndex == ZSTD_indexResetPolicy_e.ZSTDirp_reset) + { + ZSTD_window_init(&ms->window); + ZSTD_cwksp_mark_tables_dirty(ws); } - public static nuint ZSTD_estimateCStreamSize_usingCCtxParams(ZSTD_CCtx_params_s* @params) + ms->hashLog3 = hashLog3; + ms->lazySkipping = 0; + ZSTD_invalidateMatchState(ms); + assert(ZSTD_cwksp_reserve_failed(ws) == 0); + ZSTD_cwksp_clear_tables(ws); + ms->hashTable = (uint*)ZSTD_cwksp_reserve_table(ws, hSize * sizeof(uint)); + ms->chainTable = (uint*)ZSTD_cwksp_reserve_table(ws, chainSize * sizeof(uint)); + ms->hashTable3 = (uint*)ZSTD_cwksp_reserve_table(ws, h3Size * sizeof(uint)); + if (ZSTD_cwksp_reserve_failed(ws) != 0) { - if (@params->nbWorkers > 0) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } - { - ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams( - @params, - unchecked(0UL - 1), - 0, - ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict - ); - nuint blockSize = - ZSTD_resolveMaxBlockSize(@params->maxBlockSize) - < (nuint)1 << (int)cParams.windowLog - ? ZSTD_resolveMaxBlockSize(@params->maxBlockSize) - : (nuint)1 << (int)cParams.windowLog; - nuint inBuffSize = - @params->inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered - ? ((nuint)1 << (int)cParams.windowLog) + blockSize - : 0; - nuint outBuffSize = - @params->outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered - ? ZSTD_compressBound(blockSize) + 1 - : 0; - ZSTD_paramSwitch_e useRowMatchFinder = ZSTD_resolveRowMatchFinderMode( - @params->useRowMatchFinder, - &@params->cParams - ); - return ZSTD_estimateCCtxSize_usingCCtxParams_internal( - &cParams, - &@params->ldmParams, - 1, - useRowMatchFinder, - inBuffSize, - outBuffSize, - unchecked(0UL - 1), - ZSTD_hasExtSeqProd(@params), - @params->maxBlockSize - ); - } + if (crp != ZSTD_compResetPolicy_e.ZSTDcrp_leaveDirty) + { + ZSTD_cwksp_clean_tables(ws); } - public static nuint ZSTD_estimateCStreamSize_usingCParams( - ZSTD_compressionParameters cParams - ) + if (ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder) != 0) { - ZSTD_CCtx_params_s initialParams = ZSTD_makeCCtxParamsFromCParams(cParams); - if (ZSTD_rowMatchFinderSupported(cParams.strategy) != 0) + /* Row match finder needs an additional table of hashes ("tags") */ + nuint tagTableSize = hSize; + if (forWho == ZSTD_resetTarget_e.ZSTD_resetTarget_CCtx) { - /* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */ - nuint noRowCCtxSize; - nuint rowCCtxSize; - initialParams.useRowMatchFinder = ZSTD_paramSwitch_e.ZSTD_ps_disable; - noRowCCtxSize = ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams); - initialParams.useRowMatchFinder = ZSTD_paramSwitch_e.ZSTD_ps_enable; - rowCCtxSize = ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams); - return noRowCCtxSize > rowCCtxSize ? noRowCCtxSize : rowCCtxSize; + ms->tagTable = (byte*)ZSTD_cwksp_reserve_aligned_init_once(ws, tagTableSize); + ZSTD_advanceHashSalt(ms); } else { - return ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams); + ms->tagTable = (byte*)ZSTD_cwksp_reserve_aligned64(ws, tagTableSize); + memset(ms->tagTable, 0, (uint)tagTableSize); + ms->hashSalt = 0; } - } - private static nuint ZSTD_estimateCStreamSize_internal(int compressionLevel) - { - ZSTD_compressionParameters cParams = ZSTD_getCParams_internal( - compressionLevel, - unchecked(0UL - 1), - 0, - ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict - ); - return ZSTD_estimateCStreamSize_usingCParams(cParams); - } - - /*! ZSTD_estimateCStreamSize() : - * ZSTD_estimateCStreamSize() will provide a memory budget large enough for streaming compression - * using any compression level up to the max specified one. - * It will also consider src size to be arbitrarily "large", which is a worst case scenario. - * If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation. - * ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel. - * ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1. - * Note : CStream size estimation is only correct for single-threaded compression. - * ZSTD_estimateCStreamSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1. - * Note 2 : ZSTD_estimateCStreamSize* functions are not compatible with the Block-Level Sequence Producer API at this time. - * Size estimates assume that no external sequence producer is registered. - * - * ZSTD_DStream memory budget depends on frame's window Size. - * This information can be passed manually, using ZSTD_estimateDStreamSize, - * or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame(); - * Any frame requesting a window size larger than max specified one will be rejected. - * Note : if streaming is init with function ZSTD_init?Stream_usingDict(), - * an internal ?Dict will be created, which additional size is not estimated here. - * In this case, get total size by adding ZSTD_estimate?DictSize - */ - public static nuint ZSTD_estimateCStreamSize(int compressionLevel) - { - int level; - nuint memBudget = 0; - for ( - level = compressionLevel < 1 ? compressionLevel : 1; - level <= compressionLevel; - level++ - ) - { - nuint newMB = ZSTD_estimateCStreamSize_internal(level); - if (newMB > memBudget) - memBudget = newMB; - } - - return memBudget; - } - - /* ZSTD_getFrameProgression(): - * tells how much data has been consumed (input) and produced (output) for current frame. - * able to count progression inside worker threads (non-blocking mode). - */ - public static ZSTD_frameProgression ZSTD_getFrameProgression(ZSTD_CCtx_s* cctx) - { - if (cctx->appliedParams.nbWorkers > 0) - { - return ZSTDMT_getFrameProgression(cctx->mtctx); - } - - { - ZSTD_frameProgression fp; - nuint buffered = cctx->inBuff == null ? 0 : cctx->inBuffPos - cctx->inToCompress; -#if DEBUG - if (buffered != 0) - assert(cctx->inBuffPos >= cctx->inToCompress); -#endif - assert(buffered <= 1 << 17); - fp.ingested = cctx->consumedSrcSize + buffered; - fp.consumed = cctx->consumedSrcSize; - fp.produced = cctx->producedCSize; - fp.flushed = cctx->producedCSize; - fp.currentJobID = 0; - fp.nbActiveWorkers = 0; - return fp; - } - } - - /*! ZSTD_toFlushNow() - * Only useful for multithreading scenarios currently (nbWorkers >= 1). - */ - public static nuint ZSTD_toFlushNow(ZSTD_CCtx_s* cctx) - { - if (cctx->appliedParams.nbWorkers > 0) { - return ZSTDMT_toFlushNow(cctx->mtctx); + uint rowLog = + cParams->searchLog <= 4 ? 4 + : cParams->searchLog <= 6 ? cParams->searchLog + : 6; + assert(cParams->hashLog >= rowLog); + ms->rowHashLog = cParams->hashLog - rowLog; } - - return 0; } - [Conditional("DEBUG")] - private static void ZSTD_assertEqualCParams( - ZSTD_compressionParameters cParams1, - ZSTD_compressionParameters cParams2 + if ( + forWho == ZSTD_resetTarget_e.ZSTD_resetTarget_CCtx + && cParams->strategy >= ZSTD_strategy.ZSTD_btopt ) { - assert(cParams1.windowLog == cParams2.windowLog); - assert(cParams1.chainLog == cParams2.chainLog); - assert(cParams1.hashLog == cParams2.hashLog); - assert(cParams1.searchLog == cParams2.searchLog); - assert(cParams1.minMatch == cParams2.minMatch); - assert(cParams1.targetLength == cParams2.targetLength); - assert(cParams1.strategy == cParams2.strategy); + ms->opt.litFreq = (uint*)ZSTD_cwksp_reserve_aligned64(ws, (1 << 8) * sizeof(uint)); + ms->opt.litLengthFreq = (uint*)ZSTD_cwksp_reserve_aligned64( + ws, + (35 + 1) * sizeof(uint) + ); + ms->opt.matchLengthFreq = (uint*)ZSTD_cwksp_reserve_aligned64( + ws, + (52 + 1) * sizeof(uint) + ); + ms->opt.offCodeFreq = (uint*)ZSTD_cwksp_reserve_aligned64( + ws, + (31 + 1) * sizeof(uint) + ); + ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned64( + ws, + (nuint)(((1 << 12) + 3) * sizeof(ZSTD_match_t)) + ); + ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned64( + ws, + (nuint)(((1 << 12) + 3) * sizeof(ZSTD_optimal_t)) + ); } - private static void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs) + ms->cParams = *cParams; + if (ZSTD_cwksp_reserve_failed(ws) != 0) { - int i; - for (i = 0; i < 3; ++i) - bs->rep[i] = repStartValue[i]; - bs->entropy.huf.repeatMode = HUF_repeat.HUF_repeat_none; - bs->entropy.fse.offcode_repeatMode = FSE_repeat.FSE_repeat_none; - bs->entropy.fse.matchlength_repeatMode = FSE_repeat.FSE_repeat_none; - bs->entropy.fse.litlength_repeatMode = FSE_repeat.FSE_repeat_none; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); } - /*! ZSTD_invalidateMatchState() - * Invalidate all the matches in the match finder tables. - * Requires nextSrc and base to be set (can be NULL). - */ - private static void ZSTD_invalidateMatchState(ZSTD_MatchState_t* ms) - { - ZSTD_window_clear(&ms->window); - ms->nextToUpdate = ms->window.dictLimit; - ms->loadedDictEnd = 0; - ms->opt.litLengthSum = 0; - ms->dictMatchState = null; - } + return 0; + } - /* Mixes bits in a 64 bits in a value, based on XXH3_rrmxmx */ - private static ulong ZSTD_bitmix(ulong val, ulong len) - { - val ^= BitOperations.RotateRight(val, 49) ^ BitOperations.RotateRight(val, 24); - val *= 0x9FB21C651E98DF25UL; - val ^= (val >> 35) + len; - val *= 0x9FB21C651E98DF25UL; - return val ^ val >> 28; - } + private static int ZSTD_indexTooCloseToMax(ZSTD_window_t w) + { + return + (nuint)(w.nextSrc - w.@base) + > (MEM_64bits ? 3500U * (1 << 20) : 2000U * (1 << 20)) - 16 * (1 << 20) + ? 1 + : 0; + } + + /** ZSTD_dictTooBig(): + * When dictionaries are larger than ZSTD_CHUNKSIZE_MAX they can't be loaded in + * one go generically. So we ensure that in that case we reset the tables to zero, + * so that we can load as much of the dictionary as possible. + */ + private static int ZSTD_dictTooBig(nuint loadedDictSize) + { + return + loadedDictSize + > unchecked((uint)-1) - (MEM_64bits ? 3500U * (1 << 20) : 2000U * (1 << 20)) + ? 1 + : 0; + } - /* Mixes in the hashSalt and hashSaltEntropy to create a new hashSalt */ - private static void ZSTD_advanceHashSalt(ZSTD_MatchState_t* ms) + /*! ZSTD_resetCCtx_internal() : + * @param loadedDictSize The size of the dictionary to be loaded + * into the context, if any. If no dictionary is used, or the + * dictionary is being attached / copied, then pass 0. + * note : `params` are assumed fully validated at this stage. + */ + private static nuint ZSTD_resetCCtx_internal( + ZSTD_CCtx_s* zc, + ZSTD_CCtx_params_s* @params, + ulong pledgedSrcSize, + nuint loadedDictSize, + ZSTD_compResetPolicy_e crp, + ZSTD_buffered_policy_e zbuff + ) + { + ZSTD_cwksp* ws = &zc->workspace; + assert(!ERR_isError(ZSTD_checkCParams(@params->cParams))); + zc->isFirstBlock = 1; + zc->appliedParams = *@params; + @params = &zc->appliedParams; + assert(@params->useRowMatchFinder != ZSTD_paramSwitch_e.ZSTD_ps_auto); + assert(@params->postBlockSplitter != ZSTD_paramSwitch_e.ZSTD_ps_auto); + assert(@params->ldmParams.enableLdm != ZSTD_paramSwitch_e.ZSTD_ps_auto); + assert(@params->maxBlockSize != 0); + if (@params->ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) { - ms->hashSalt = ZSTD_bitmix(ms->hashSalt, 8) ^ ZSTD_bitmix(ms->hashSaltEntropy, 4); + ZSTD_ldm_adjustParameters(&zc->appliedParams.ldmParams, &@params->cParams); + assert(@params->ldmParams.hashLog >= @params->ldmParams.bucketSizeLog); + assert(@params->ldmParams.hashRateLog < 32); } - private static nuint ZSTD_reset_matchState( - ZSTD_MatchState_t* ms, - ZSTD_cwksp* ws, - ZSTD_compressionParameters* cParams, - ZSTD_paramSwitch_e useRowMatchFinder, - ZSTD_compResetPolicy_e crp, - ZSTD_indexResetPolicy_e forceResetIndex, - ZSTD_resetTarget_e forWho - ) { - /* disable chain table allocation for fast or row-based strategies */ - nuint chainSize = - ZSTD_allocateChainTable( - cParams->strategy, - useRowMatchFinder, - ms->dedicatedDictSearch != 0 - && forWho == ZSTD_resetTarget_e.ZSTD_resetTarget_CDict - ? 1U - : 0U - ) != 0 - ? (nuint)1 << (int)cParams->chainLog + nuint windowSize = + 1 + > (nuint)( + (ulong)1 << (int)@params->cParams.windowLog < pledgedSrcSize + ? (ulong)1 << (int)@params->cParams.windowLog + : pledgedSrcSize + ) + ? 1 + : (nuint)( + (ulong)1 << (int)@params->cParams.windowLog < pledgedSrcSize + ? (ulong)1 << (int)@params->cParams.windowLog + : pledgedSrcSize + ); + nuint blockSize = + @params->maxBlockSize < windowSize ? @params->maxBlockSize : windowSize; + nuint maxNbSeq = ZSTD_maxNbSeq( + blockSize, + @params->cParams.minMatch, + ZSTD_hasExtSeqProd(@params) + ); + nuint buffOutSize = + zbuff == ZSTD_buffered_policy_e.ZSTDb_buffered + && @params->outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered + ? ZSTD_compressBound(blockSize) + 1 : 0; - nuint hSize = (nuint)1 << (int)cParams->hashLog; - uint hashLog3 = - forWho == ZSTD_resetTarget_e.ZSTD_resetTarget_CCtx && cParams->minMatch == 3 - ? 17 < cParams->windowLog - ? 17 - : cParams->windowLog + nuint buffInSize = + zbuff == ZSTD_buffered_policy_e.ZSTDb_buffered + && @params->inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered + ? windowSize + blockSize : 0; - nuint h3Size = hashLog3 != 0 ? (nuint)1 << (int)hashLog3 : 0; - assert(useRowMatchFinder != ZSTD_paramSwitch_e.ZSTD_ps_auto); - if (forceResetIndex == ZSTD_indexResetPolicy_e.ZSTDirp_reset) + nuint maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(@params->ldmParams, blockSize); + int indexTooClose = ZSTD_indexTooCloseToMax(zc->blockState.matchState.window); + int dictTooBig = ZSTD_dictTooBig(loadedDictSize); + ZSTD_indexResetPolicy_e needsIndexReset = + indexTooClose != 0 || dictTooBig != 0 || zc->initialized == 0 + ? ZSTD_indexResetPolicy_e.ZSTDirp_reset + : ZSTD_indexResetPolicy_e.ZSTDirp_continue; + nuint neededSpace = ZSTD_estimateCCtxSize_usingCCtxParams_internal( + &@params->cParams, + &@params->ldmParams, + zc->staticSize != 0 ? 1 : 0, + @params->useRowMatchFinder, + buffInSize, + buffOutSize, + pledgedSrcSize, + ZSTD_hasExtSeqProd(@params), + @params->maxBlockSize + ); { - ZSTD_window_init(&ms->window); - ZSTD_cwksp_mark_tables_dirty(ws); + nuint err_code = neededSpace; + if (ERR_isError(err_code)) + { + return err_code; + } } - ms->hashLog3 = hashLog3; - ms->lazySkipping = 0; - ZSTD_invalidateMatchState(ms); - assert(ZSTD_cwksp_reserve_failed(ws) == 0); - ZSTD_cwksp_clear_tables(ws); - ms->hashTable = (uint*)ZSTD_cwksp_reserve_table(ws, hSize * sizeof(uint)); - ms->chainTable = (uint*)ZSTD_cwksp_reserve_table(ws, chainSize * sizeof(uint)); - ms->hashTable3 = (uint*)ZSTD_cwksp_reserve_table(ws, h3Size * sizeof(uint)); - if (ZSTD_cwksp_reserve_failed(ws) != 0) + if (zc->staticSize == 0) + ZSTD_cwksp_bump_oversized_duration(ws, 0); { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); - } + int workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace ? 1 : 0; + int workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace); + int resizeWorkspace = workspaceTooSmall != 0 || workspaceWasteful != 0 ? 1 : 0; + if (resizeWorkspace != 0) + { + if (zc->staticSize != 0) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) + ); + } - if (crp != ZSTD_compResetPolicy_e.ZSTDcrp_leaveDirty) - { - ZSTD_cwksp_clean_tables(ws); - } + needsIndexReset = ZSTD_indexResetPolicy_e.ZSTDirp_reset; + ZSTD_cwksp_free(ws, zc->customMem); + { + nuint err_code = ZSTD_cwksp_create(ws, neededSpace, zc->customMem); + if (ERR_isError(err_code)) + { + return err_code; + } + } - if (ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder) != 0) - { - /* Row match finder needs an additional table of hashes ("tags") */ - nuint tagTableSize = hSize; - if (forWho == ZSTD_resetTarget_e.ZSTD_resetTarget_CCtx) - { - ms->tagTable = (byte*)ZSTD_cwksp_reserve_aligned_init_once(ws, tagTableSize); - ZSTD_advanceHashSalt(ms); - } - else - { - ms->tagTable = (byte*)ZSTD_cwksp_reserve_aligned64(ws, tagTableSize); - memset(ms->tagTable, 0, (uint)tagTableSize); - ms->hashSalt = 0; - } + assert( + ZSTD_cwksp_check_available( + ws, + (nuint)(2 * sizeof(ZSTD_compressedBlockState_t)) + ) != 0 + ); + zc->blockState.prevCBlock = + (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object( + ws, + (nuint)sizeof(ZSTD_compressedBlockState_t) + ); + if (zc->blockState.prevCBlock == null) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) + ); + } + + zc->blockState.nextCBlock = + (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object( + ws, + (nuint)sizeof(ZSTD_compressedBlockState_t) + ); + if (zc->blockState.nextCBlock == null) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) + ); + } + + zc->tmpWorkspace = ZSTD_cwksp_reserve_object( + ws, + (8 << 10) + 512 + sizeof(uint) * (52 + 2) > 8208 + ? (8 << 10) + 512 + sizeof(uint) * (52 + 2) + : 8208 + ); + if (zc->tmpWorkspace == null) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) + ); + } + zc->tmpWkspSize = + (8 << 10) + 512 + sizeof(uint) * (52 + 2) > 8208 + ? (8 << 10) + 512 + sizeof(uint) * (52 + 2) + : 8208; + } + } + + ZSTD_cwksp_clear(ws); + zc->blockState.matchState.cParams = @params->cParams; + zc->blockState.matchState.prefetchCDictTables = + @params->prefetchCDictTables == ZSTD_paramSwitch_e.ZSTD_ps_enable ? 1 : 0; + zc->pledgedSrcSizePlusOne = pledgedSrcSize + 1; + zc->consumedSrcSize = 0; + zc->producedCSize = 0; + if (pledgedSrcSize == unchecked(0UL - 1)) + zc->appliedParams.fParams.contentSizeFlag = 0; + zc->blockSizeMax = blockSize; + ZSTD_XXH64_reset(&zc->xxhState, 0); + zc->stage = ZSTD_compressionStage_e.ZSTDcs_init; + zc->dictID = 0; + zc->dictContentSize = 0; + ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock); + { + nuint err_code = ZSTD_reset_matchState( + &zc->blockState.matchState, + ws, + &@params->cParams, + @params->useRowMatchFinder, + crp, + needsIndexReset, + ZSTD_resetTarget_e.ZSTD_resetTarget_CCtx + ); + if (ERR_isError(err_code)) { - uint rowLog = - cParams->searchLog <= 4 ? 4 - : cParams->searchLog <= 6 ? cParams->searchLog - : 6; - assert(cParams->hashLog >= rowLog); - ms->rowHashLog = cParams->hashLog - rowLog; + return err_code; } } - if ( - forWho == ZSTD_resetTarget_e.ZSTD_resetTarget_CCtx - && cParams->strategy >= ZSTD_strategy.ZSTD_btopt - ) + zc->seqStore.sequencesStart = (SeqDef_s*)ZSTD_cwksp_reserve_aligned64( + ws, + maxNbSeq * (nuint)sizeof(SeqDef_s) + ); + if (@params->ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) { - ms->opt.litFreq = (uint*)ZSTD_cwksp_reserve_aligned64(ws, (1 << 8) * sizeof(uint)); - ms->opt.litLengthFreq = (uint*)ZSTD_cwksp_reserve_aligned64( - ws, - (35 + 1) * sizeof(uint) - ); - ms->opt.matchLengthFreq = (uint*)ZSTD_cwksp_reserve_aligned64( - ws, - (52 + 1) * sizeof(uint) - ); - ms->opt.offCodeFreq = (uint*)ZSTD_cwksp_reserve_aligned64( + /* TODO: avoid memset? */ + nuint ldmHSize = (nuint)1 << (int)@params->ldmParams.hashLog; + zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned64( ws, - (31 + 1) * sizeof(uint) + ldmHSize * (nuint)sizeof(ldmEntry_t) ); - ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned64( + memset(zc->ldmState.hashTable, 0, (uint)(ldmHSize * (nuint)sizeof(ldmEntry_t))); + zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned64( ws, - (nuint)(((1 << 12) + 3) * sizeof(ZSTD_match_t)) - ); - ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned64( - ws, - (nuint)(((1 << 12) + 3) * sizeof(ZSTD_optimal_t)) + maxNbLdmSeq * (nuint)sizeof(rawSeq) ); + zc->maxNbLdmSequences = maxNbLdmSeq; + ZSTD_window_init(&zc->ldmState.window); + zc->ldmState.loadedDictEnd = 0; } - ms->cParams = *cParams; - if (ZSTD_cwksp_reserve_failed(ws) != 0) + if (ZSTD_hasExtSeqProd(@params) != 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + nuint maxNbExternalSeq = ZSTD_sequenceBound(blockSize); + zc->extSeqBufCapacity = maxNbExternalSeq; + zc->extSeqBuf = (ZSTD_Sequence*)ZSTD_cwksp_reserve_aligned64( + ws, + maxNbExternalSeq * (nuint)sizeof(ZSTD_Sequence) + ); } + zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + 32); + zc->seqStore.maxNbLit = blockSize; + zc->bufferedPolicy = zbuff; + zc->inBuffSize = buffInSize; + zc->inBuff = (sbyte*)ZSTD_cwksp_reserve_buffer(ws, buffInSize); + zc->outBuffSize = buffOutSize; + zc->outBuff = (sbyte*)ZSTD_cwksp_reserve_buffer(ws, buffOutSize); + if (@params->ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) + { + /* TODO: avoid memset? */ + nuint numBuckets = + (nuint)1 + << (int)(@params->ldmParams.hashLog - @params->ldmParams.bucketSizeLog); + zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, numBuckets); + memset(zc->ldmState.bucketOffsets, 0, (uint)numBuckets); + } + + ZSTD_referenceExternalSequences(zc, null, 0); + zc->seqStore.maxNbSeq = maxNbSeq; + zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(byte)); + zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(byte)); + zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(byte)); + assert(ZSTD_cwksp_estimated_space_within_bounds(ws, neededSpace) != 0); + zc->initialized = 1; return 0; } + } - private static int ZSTD_indexTooCloseToMax(ZSTD_window_t w) - { - return - (nuint)(w.nextSrc - w.@base) - > (MEM_64bits ? 3500U * (1 << 20) : 2000U * (1 << 20)) - 16 * (1 << 20) - ? 1 - : 0; - } + /* ZSTD_invalidateRepCodes() : + * ensures next compression will not use repcodes from previous block. + * Note : only works with regular variant; + * do not use with extDict variant ! */ + private static void ZSTD_invalidateRepCodes(ZSTD_CCtx_s* cctx) + { + int i; + for (i = 0; i < 3; i++) + cctx->blockState.prevCBlock->rep[i] = 0; + assert(ZSTD_window_hasExtDict(cctx->blockState.matchState.window) == 0); + } - /** ZSTD_dictTooBig(): - * When dictionaries are larger than ZSTD_CHUNKSIZE_MAX they can't be loaded in - * one go generically. So we ensure that in that case we reset the tables to zero, - * so that we can load as much of the dictionary as possible. - */ - private static int ZSTD_dictTooBig(nuint loadedDictSize) - { - return - loadedDictSize - > unchecked((uint)-1) - (MEM_64bits ? 3500U * (1 << 20) : 2000U * (1 << 20)) + private static readonly nuint* attachDictSizeCutoffs = GetArrayPointer( + new nuint[10] + { + 8 * (1 << 10), + 8 * (1 << 10), + 16 * (1 << 10), + 32 * (1 << 10), + 32 * (1 << 10), + 32 * (1 << 10), + 32 * (1 << 10), + 32 * (1 << 10), + 8 * (1 << 10), + 8 * (1 << 10), + } + ); + + private static int ZSTD_shouldAttachDict( + ZSTD_CDict_s* cdict, + ZSTD_CCtx_params_s* @params, + ulong pledgedSrcSize + ) + { + nuint cutoff = attachDictSizeCutoffs[(int)cdict->matchState.cParams.strategy]; + int dedicatedDictSearch = cdict->matchState.dedicatedDictSearch; + return + dedicatedDictSearch != 0 + || ( + pledgedSrcSize <= cutoff + || pledgedSrcSize == unchecked(0UL - 1) + || @params->attachDictPref == ZSTD_dictAttachPref_e.ZSTD_dictForceAttach + ) + && @params->attachDictPref != ZSTD_dictAttachPref_e.ZSTD_dictForceCopy + && @params->forceWindow == 0 ? 1 : 0; - } + } - /*! ZSTD_resetCCtx_internal() : - * @param loadedDictSize The size of the dictionary to be loaded - * into the context, if any. If no dictionary is used, or the - * dictionary is being attached / copied, then pass 0. - * note : `params` are assumed fully validated at this stage. - */ - private static nuint ZSTD_resetCCtx_internal( - ZSTD_CCtx_s* zc, - ZSTD_CCtx_params_s* @params, - ulong pledgedSrcSize, - nuint loadedDictSize, - ZSTD_compResetPolicy_e crp, - ZSTD_buffered_policy_e zbuff - ) + private static nuint ZSTD_resetCCtx_byAttachingCDict( + ZSTD_CCtx_s* cctx, + ZSTD_CDict_s* cdict, + ZSTD_CCtx_params_s @params, + ulong pledgedSrcSize, + ZSTD_buffered_policy_e zbuff + ) + { { - ZSTD_cwksp* ws = &zc->workspace; - assert(!ERR_isError(ZSTD_checkCParams(@params->cParams))); - zc->isFirstBlock = 1; - zc->appliedParams = *@params; - @params = &zc->appliedParams; - assert(@params->useRowMatchFinder != ZSTD_paramSwitch_e.ZSTD_ps_auto); - assert(@params->postBlockSplitter != ZSTD_paramSwitch_e.ZSTD_ps_auto); - assert(@params->ldmParams.enableLdm != ZSTD_paramSwitch_e.ZSTD_ps_auto); - assert(@params->maxBlockSize != 0); - if (@params->ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) + ZSTD_compressionParameters adjusted_cdict_cParams = cdict->matchState.cParams; + uint windowLog = @params.cParams.windowLog; + assert(windowLog != 0); + if (cdict->matchState.dedicatedDictSearch != 0) { - ZSTD_ldm_adjustParameters(&zc->appliedParams.ldmParams, &@params->cParams); - assert(@params->ldmParams.hashLog >= @params->ldmParams.bucketSizeLog); - assert(@params->ldmParams.hashRateLog < 32); + ZSTD_dedicatedDictSearch_revertCParams(&adjusted_cdict_cParams); } + @params.cParams = ZSTD_adjustCParams_internal( + adjusted_cdict_cParams, + pledgedSrcSize, + cdict->dictContentSize, + ZSTD_CParamMode_e.ZSTD_cpm_attachDict, + @params.useRowMatchFinder + ); + @params.cParams.windowLog = windowLog; + @params.useRowMatchFinder = cdict->useRowMatchFinder; { - nuint windowSize = - 1 - > (nuint)( - (ulong)1 << (int)@params->cParams.windowLog < pledgedSrcSize - ? (ulong)1 << (int)@params->cParams.windowLog - : pledgedSrcSize - ) - ? 1 - : (nuint)( - (ulong)1 << (int)@params->cParams.windowLog < pledgedSrcSize - ? (ulong)1 << (int)@params->cParams.windowLog - : pledgedSrcSize - ); - nuint blockSize = - @params->maxBlockSize < windowSize ? @params->maxBlockSize : windowSize; - nuint maxNbSeq = ZSTD_maxNbSeq( - blockSize, - @params->cParams.minMatch, - ZSTD_hasExtSeqProd(@params) - ); - nuint buffOutSize = - zbuff == ZSTD_buffered_policy_e.ZSTDb_buffered - && @params->outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered - ? ZSTD_compressBound(blockSize) + 1 - : 0; - nuint buffInSize = - zbuff == ZSTD_buffered_policy_e.ZSTDb_buffered - && @params->inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered - ? windowSize + blockSize - : 0; - nuint maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(@params->ldmParams, blockSize); - int indexTooClose = ZSTD_indexTooCloseToMax(zc->blockState.matchState.window); - int dictTooBig = ZSTD_dictTooBig(loadedDictSize); - ZSTD_indexResetPolicy_e needsIndexReset = - indexTooClose != 0 || dictTooBig != 0 || zc->initialized == 0 - ? ZSTD_indexResetPolicy_e.ZSTDirp_reset - : ZSTD_indexResetPolicy_e.ZSTDirp_continue; - nuint neededSpace = ZSTD_estimateCCtxSize_usingCCtxParams_internal( - &@params->cParams, - &@params->ldmParams, - zc->staticSize != 0 ? 1 : 0, - @params->useRowMatchFinder, - buffInSize, - buffOutSize, + nuint err_code = ZSTD_resetCCtx_internal( + cctx, + &@params, pledgedSrcSize, - ZSTD_hasExtSeqProd(@params), - @params->maxBlockSize + 0, + ZSTD_compResetPolicy_e.ZSTDcrp_makeClean, + zbuff ); + if (ERR_isError(err_code)) { - nuint err_code = neededSpace; - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } + } - if (zc->staticSize == 0) - ZSTD_cwksp_bump_oversized_duration(ws, 0); - { - int workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace ? 1 : 0; - int workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace); - int resizeWorkspace = workspaceTooSmall != 0 || workspaceWasteful != 0 ? 1 : 0; - if (resizeWorkspace != 0) - { - if (zc->staticSize != 0) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) - ); - } - - needsIndexReset = ZSTD_indexResetPolicy_e.ZSTDirp_reset; - ZSTD_cwksp_free(ws, zc->customMem); - { - nuint err_code = ZSTD_cwksp_create(ws, neededSpace, zc->customMem); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - assert( - ZSTD_cwksp_check_available( - ws, - (nuint)(2 * sizeof(ZSTD_compressedBlockState_t)) - ) != 0 - ); - zc->blockState.prevCBlock = - (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object( - ws, - (nuint)sizeof(ZSTD_compressedBlockState_t) - ); - if (zc->blockState.prevCBlock == null) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) - ); - } - - zc->blockState.nextCBlock = - (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object( - ws, - (nuint)sizeof(ZSTD_compressedBlockState_t) - ); - if (zc->blockState.nextCBlock == null) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) - ); - } - - zc->tmpWorkspace = ZSTD_cwksp_reserve_object( - ws, - (8 << 10) + 512 + sizeof(uint) * (52 + 2) > 8208 - ? (8 << 10) + 512 + sizeof(uint) * (52 + 2) - : 8208 - ); - if (zc->tmpWorkspace == null) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) - ); - } - - zc->tmpWkspSize = - (8 << 10) + 512 + sizeof(uint) * (52 + 2) > 8208 - ? (8 << 10) + 512 + sizeof(uint) * (52 + 2) - : 8208; - } - } + assert(cctx->appliedParams.cParams.strategy == adjusted_cdict_cParams.strategy); + } - ZSTD_cwksp_clear(ws); - zc->blockState.matchState.cParams = @params->cParams; - zc->blockState.matchState.prefetchCDictTables = - @params->prefetchCDictTables == ZSTD_paramSwitch_e.ZSTD_ps_enable ? 1 : 0; - zc->pledgedSrcSizePlusOne = pledgedSrcSize + 1; - zc->consumedSrcSize = 0; - zc->producedCSize = 0; - if (pledgedSrcSize == unchecked(0UL - 1)) - zc->appliedParams.fParams.contentSizeFlag = 0; - zc->blockSizeMax = blockSize; - ZSTD_XXH64_reset(&zc->xxhState, 0); - zc->stage = ZSTD_compressionStage_e.ZSTDcs_init; - zc->dictID = 0; - zc->dictContentSize = 0; - ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock); + { + uint cdictEnd = (uint)( + cdict->matchState.window.nextSrc - cdict->matchState.window.@base + ); + uint cdictLen = cdictEnd - cdict->matchState.window.dictLimit; + if (cdictLen != 0) + { + cctx->blockState.matchState.dictMatchState = &cdict->matchState; + if (cctx->blockState.matchState.window.dictLimit < cdictEnd) { - nuint err_code = ZSTD_reset_matchState( - &zc->blockState.matchState, - ws, - &@params->cParams, - @params->useRowMatchFinder, - crp, - needsIndexReset, - ZSTD_resetTarget_e.ZSTD_resetTarget_CCtx - ); - if (ERR_isError(err_code)) - { - return err_code; - } + cctx->blockState.matchState.window.nextSrc = + cctx->blockState.matchState.window.@base + cdictEnd; + ZSTD_window_clear(&cctx->blockState.matchState.window); } - zc->seqStore.sequencesStart = (SeqDef_s*)ZSTD_cwksp_reserve_aligned64( - ws, - maxNbSeq * (nuint)sizeof(SeqDef_s) - ); - if (@params->ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) - { - /* TODO: avoid memset? */ - nuint ldmHSize = (nuint)1 << (int)@params->ldmParams.hashLog; - zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned64( - ws, - ldmHSize * (nuint)sizeof(ldmEntry_t) - ); - memset(zc->ldmState.hashTable, 0, (uint)(ldmHSize * (nuint)sizeof(ldmEntry_t))); - zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned64( - ws, - maxNbLdmSeq * (nuint)sizeof(rawSeq) - ); - zc->maxNbLdmSequences = maxNbLdmSeq; - ZSTD_window_init(&zc->ldmState.window); - zc->ldmState.loadedDictEnd = 0; - } + cctx->blockState.matchState.loadedDictEnd = cctx->blockState + .matchState + .window + .dictLimit; + } + } - if (ZSTD_hasExtSeqProd(@params) != 0) - { - nuint maxNbExternalSeq = ZSTD_sequenceBound(blockSize); - zc->extSeqBufCapacity = maxNbExternalSeq; - zc->extSeqBuf = (ZSTD_Sequence*)ZSTD_cwksp_reserve_aligned64( - ws, - maxNbExternalSeq * (nuint)sizeof(ZSTD_Sequence) - ); - } - - zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + 32); - zc->seqStore.maxNbLit = blockSize; - zc->bufferedPolicy = zbuff; - zc->inBuffSize = buffInSize; - zc->inBuff = (sbyte*)ZSTD_cwksp_reserve_buffer(ws, buffInSize); - zc->outBuffSize = buffOutSize; - zc->outBuff = (sbyte*)ZSTD_cwksp_reserve_buffer(ws, buffOutSize); - if (@params->ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) - { - /* TODO: avoid memset? */ - nuint numBuckets = - (nuint)1 - << (int)(@params->ldmParams.hashLog - @params->ldmParams.bucketSizeLog); - zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, numBuckets); - memset(zc->ldmState.bucketOffsets, 0, (uint)numBuckets); - } - - ZSTD_referenceExternalSequences(zc, null, 0); - zc->seqStore.maxNbSeq = maxNbSeq; - zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(byte)); - zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(byte)); - zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(byte)); - assert(ZSTD_cwksp_estimated_space_within_bounds(ws, neededSpace) != 0); - zc->initialized = 1; - return 0; - } - } + cctx->dictID = cdict->dictID; + cctx->dictContentSize = cdict->dictContentSize; + memcpy( + cctx->blockState.prevCBlock, + &cdict->cBlockState, + (uint)sizeof(ZSTD_compressedBlockState_t) + ); + return 0; + } - /* ZSTD_invalidateRepCodes() : - * ensures next compression will not use repcodes from previous block. - * Note : only works with regular variant; - * do not use with extDict variant ! */ - private static void ZSTD_invalidateRepCodes(ZSTD_CCtx_s* cctx) + private static void ZSTD_copyCDictTableIntoCCtx( + uint* dst, + uint* src, + nuint tableSize, + ZSTD_compressionParameters* cParams + ) + { + if (ZSTD_CDictIndicesAreTagged(cParams) != 0) { - int i; - for (i = 0; i < 3; i++) - cctx->blockState.prevCBlock->rep[i] = 0; - assert(ZSTD_window_hasExtDict(cctx->blockState.matchState.window) == 0); - } - - private static readonly nuint* attachDictSizeCutoffs = GetArrayPointer( - new nuint[10] + /* Remove tags from the CDict table if they are present. + * See docs on "short cache" in zstd_compress_internal.h for context. */ + nuint i; + for (i = 0; i < tableSize; i++) { - 8 * (1 << 10), - 8 * (1 << 10), - 16 * (1 << 10), - 32 * (1 << 10), - 32 * (1 << 10), - 32 * (1 << 10), - 32 * (1 << 10), - 32 * (1 << 10), - 8 * (1 << 10), - 8 * (1 << 10), + uint taggedIndex = src[i]; + uint index = taggedIndex >> 8; + dst[i] = index; } - ); - - private static int ZSTD_shouldAttachDict( - ZSTD_CDict_s* cdict, - ZSTD_CCtx_params_s* @params, - ulong pledgedSrcSize - ) + } + else { - nuint cutoff = attachDictSizeCutoffs[(int)cdict->matchState.cParams.strategy]; - int dedicatedDictSearch = cdict->matchState.dedicatedDictSearch; - return - dedicatedDictSearch != 0 - || ( - pledgedSrcSize <= cutoff - || pledgedSrcSize == unchecked(0UL - 1) - || @params->attachDictPref == ZSTD_dictAttachPref_e.ZSTD_dictForceAttach - ) - && @params->attachDictPref != ZSTD_dictAttachPref_e.ZSTD_dictForceCopy - && @params->forceWindow == 0 - ? 1 - : 0; + memcpy(dst, src, (uint)(tableSize * sizeof(uint))); } + } - private static nuint ZSTD_resetCCtx_byAttachingCDict( - ZSTD_CCtx_s* cctx, - ZSTD_CDict_s* cdict, - ZSTD_CCtx_params_s @params, - ulong pledgedSrcSize, - ZSTD_buffered_policy_e zbuff - ) + private static nuint ZSTD_resetCCtx_byCopyingCDict( + ZSTD_CCtx_s* cctx, + ZSTD_CDict_s* cdict, + ZSTD_CCtx_params_s @params, + ulong pledgedSrcSize, + ZSTD_buffered_policy_e zbuff + ) + { + ZSTD_compressionParameters* cdict_cParams = &cdict->matchState.cParams; + assert(cdict->matchState.dedicatedDictSearch == 0); { + uint windowLog = @params.cParams.windowLog; + assert(windowLog != 0); + @params.cParams = *cdict_cParams; + @params.cParams.windowLog = windowLog; + @params.useRowMatchFinder = cdict->useRowMatchFinder; { - ZSTD_compressionParameters adjusted_cdict_cParams = cdict->matchState.cParams; - uint windowLog = @params.cParams.windowLog; - assert(windowLog != 0); - if (cdict->matchState.dedicatedDictSearch != 0) - { - ZSTD_dedicatedDictSearch_revertCParams(&adjusted_cdict_cParams); - } - - @params.cParams = ZSTD_adjustCParams_internal( - adjusted_cdict_cParams, + nuint err_code = ZSTD_resetCCtx_internal( + cctx, + &@params, pledgedSrcSize, - cdict->dictContentSize, - ZSTD_CParamMode_e.ZSTD_cpm_attachDict, - @params.useRowMatchFinder - ); - @params.cParams.windowLog = windowLog; - @params.useRowMatchFinder = cdict->useRowMatchFinder; - { - nuint err_code = ZSTD_resetCCtx_internal( - cctx, - &@params, - pledgedSrcSize, - 0, - ZSTD_compResetPolicy_e.ZSTDcrp_makeClean, - zbuff - ); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - assert(cctx->appliedParams.cParams.strategy == adjusted_cdict_cParams.strategy); - } - - { - uint cdictEnd = (uint)( - cdict->matchState.window.nextSrc - cdict->matchState.window.@base + 0, + ZSTD_compResetPolicy_e.ZSTDcrp_leaveDirty, + zbuff ); - uint cdictLen = cdictEnd - cdict->matchState.window.dictLimit; - if (cdictLen != 0) + if (ERR_isError(err_code)) { - cctx->blockState.matchState.dictMatchState = &cdict->matchState; - if (cctx->blockState.matchState.window.dictLimit < cdictEnd) - { - cctx->blockState.matchState.window.nextSrc = - cctx->blockState.matchState.window.@base + cdictEnd; - ZSTD_window_clear(&cctx->blockState.matchState.window); - } - - cctx->blockState.matchState.loadedDictEnd = cctx->blockState - .matchState - .window - .dictLimit; + return err_code; } } - cctx->dictID = cdict->dictID; - cctx->dictContentSize = cdict->dictContentSize; - memcpy( - cctx->blockState.prevCBlock, - &cdict->cBlockState, - (uint)sizeof(ZSTD_compressedBlockState_t) - ); - return 0; - } - - private static void ZSTD_copyCDictTableIntoCCtx( - uint* dst, - uint* src, - nuint tableSize, - ZSTD_compressionParameters* cParams - ) - { - if (ZSTD_CDictIndicesAreTagged(cParams) != 0) - { - /* Remove tags from the CDict table if they are present. - * See docs on "short cache" in zstd_compress_internal.h for context. */ - nuint i; - for (i = 0; i < tableSize; i++) - { - uint taggedIndex = src[i]; - uint index = taggedIndex >> 8; - dst[i] = index; - } - } - else - { - memcpy(dst, src, (uint)(tableSize * sizeof(uint))); - } + assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy); + assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog); + assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog); } - private static nuint ZSTD_resetCCtx_byCopyingCDict( - ZSTD_CCtx_s* cctx, - ZSTD_CDict_s* cdict, - ZSTD_CCtx_params_s @params, - ulong pledgedSrcSize, - ZSTD_buffered_policy_e zbuff - ) + ZSTD_cwksp_mark_tables_dirty(&cctx->workspace); + assert(@params.useRowMatchFinder != ZSTD_paramSwitch_e.ZSTD_ps_auto); { - ZSTD_compressionParameters* cdict_cParams = &cdict->matchState.cParams; - assert(cdict->matchState.dedicatedDictSearch == 0); - { - uint windowLog = @params.cParams.windowLog; - assert(windowLog != 0); - @params.cParams = *cdict_cParams; - @params.cParams.windowLog = windowLog; - @params.useRowMatchFinder = cdict->useRowMatchFinder; - { - nuint err_code = ZSTD_resetCCtx_internal( - cctx, - &@params, - pledgedSrcSize, - 0, - ZSTD_compResetPolicy_e.ZSTDcrp_leaveDirty, - zbuff - ); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy); - assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog); - assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog); - } - - ZSTD_cwksp_mark_tables_dirty(&cctx->workspace); - assert(@params.useRowMatchFinder != ZSTD_paramSwitch_e.ZSTD_ps_auto); + /* DDS guaranteed disabled */ + nuint chainSize = + ZSTD_allocateChainTable(cdict_cParams->strategy, cdict->useRowMatchFinder, 0) + != 0 + ? (nuint)1 << (int)cdict_cParams->chainLog + : 0; + nuint hSize = (nuint)1 << (int)cdict_cParams->hashLog; + ZSTD_copyCDictTableIntoCCtx( + cctx->blockState.matchState.hashTable, + cdict->matchState.hashTable, + hSize, + cdict_cParams + ); + if ( + ZSTD_allocateChainTable( + cctx->appliedParams.cParams.strategy, + cctx->appliedParams.useRowMatchFinder, + 0 + ) != 0 + ) { - /* DDS guaranteed disabled */ - nuint chainSize = - ZSTD_allocateChainTable(cdict_cParams->strategy, cdict->useRowMatchFinder, 0) - != 0 - ? (nuint)1 << (int)cdict_cParams->chainLog - : 0; - nuint hSize = (nuint)1 << (int)cdict_cParams->hashLog; ZSTD_copyCDictTableIntoCCtx( - cctx->blockState.matchState.hashTable, - cdict->matchState.hashTable, - hSize, + cctx->blockState.matchState.chainTable, + cdict->matchState.chainTable, + chainSize, cdict_cParams ); - if ( - ZSTD_allocateChainTable( - cctx->appliedParams.cParams.strategy, - cctx->appliedParams.useRowMatchFinder, - 0 - ) != 0 - ) - { - ZSTD_copyCDictTableIntoCCtx( - cctx->blockState.matchState.chainTable, - cdict->matchState.chainTable, - chainSize, - cdict_cParams - ); - } - - if (ZSTD_rowMatchFinderUsed(cdict_cParams->strategy, cdict->useRowMatchFinder) != 0) - { - nuint tagTableSize = hSize; - memcpy( - cctx->blockState.matchState.tagTable, - cdict->matchState.tagTable, - (uint)tagTableSize - ); - cctx->blockState.matchState.hashSalt = cdict->matchState.hashSalt; - } - } - - assert(cctx->blockState.matchState.hashLog3 <= 31); - { - uint h3log = cctx->blockState.matchState.hashLog3; - nuint h3Size = h3log != 0 ? (nuint)1 << (int)h3log : 0; - assert(cdict->matchState.hashLog3 == 0); - memset(cctx->blockState.matchState.hashTable3, 0, (uint)(h3Size * sizeof(uint))); } - ZSTD_cwksp_mark_tables_clean(&cctx->workspace); + if (ZSTD_rowMatchFinderUsed(cdict_cParams->strategy, cdict->useRowMatchFinder) != 0) { - ZSTD_MatchState_t* srcMatchState = &cdict->matchState; - ZSTD_MatchState_t* dstMatchState = &cctx->blockState.matchState; - dstMatchState->window = srcMatchState->window; - dstMatchState->nextToUpdate = srcMatchState->nextToUpdate; - dstMatchState->loadedDictEnd = srcMatchState->loadedDictEnd; + nuint tagTableSize = hSize; + memcpy( + cctx->blockState.matchState.tagTable, + cdict->matchState.tagTable, + (uint)tagTableSize + ); + cctx->blockState.matchState.hashSalt = cdict->matchState.hashSalt; } - - cctx->dictID = cdict->dictID; - cctx->dictContentSize = cdict->dictContentSize; - memcpy( - cctx->blockState.prevCBlock, - &cdict->cBlockState, - (uint)sizeof(ZSTD_compressedBlockState_t) - ); - return 0; } - /* We have a choice between copying the dictionary context into the working - * context, or referencing the dictionary context from the working context - * in-place. We decide here which strategy to use. */ - private static nuint ZSTD_resetCCtx_usingCDict( - ZSTD_CCtx_s* cctx, - ZSTD_CDict_s* cdict, - ZSTD_CCtx_params_s* @params, - ulong pledgedSrcSize, - ZSTD_buffered_policy_e zbuff - ) + assert(cctx->blockState.matchState.hashLog3 <= 31); { - if (ZSTD_shouldAttachDict(cdict, @params, pledgedSrcSize) != 0) - { - return ZSTD_resetCCtx_byAttachingCDict( - cctx, - cdict, - *@params, - pledgedSrcSize, - zbuff - ); - } - else - { - return ZSTD_resetCCtx_byCopyingCDict(cctx, cdict, *@params, pledgedSrcSize, zbuff); - } + uint h3log = cctx->blockState.matchState.hashLog3; + nuint h3Size = h3log != 0 ? (nuint)1 << (int)h3log : 0; + assert(cdict->matchState.hashLog3 == 0); + memset(cctx->blockState.matchState.hashTable3, 0, (uint)(h3Size * sizeof(uint))); } - /*! ZSTD_copyCCtx_internal() : - * Duplicate an existing context `srcCCtx` into another one `dstCCtx`. - * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). - * The "context", in this case, refers to the hash and chain tables, - * entropy tables, and dictionary references. - * `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx. - * @return : 0, or an error code */ - private static nuint ZSTD_copyCCtx_internal( - ZSTD_CCtx_s* dstCCtx, - ZSTD_CCtx_s* srcCCtx, - ZSTD_frameParameters fParams, - ulong pledgedSrcSize, - ZSTD_buffered_policy_e zbuff - ) + ZSTD_cwksp_mark_tables_clean(&cctx->workspace); { - if (srcCCtx->stage != ZSTD_compressionStage_e.ZSTDcs_init) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); - } - - memcpy(&dstCCtx->customMem, &srcCCtx->customMem, (uint)sizeof(ZSTD_customMem)); - { - ZSTD_CCtx_params_s @params = dstCCtx->requestedParams; - @params.cParams = srcCCtx->appliedParams.cParams; - assert(srcCCtx->appliedParams.useRowMatchFinder != ZSTD_paramSwitch_e.ZSTD_ps_auto); - assert(srcCCtx->appliedParams.postBlockSplitter != ZSTD_paramSwitch_e.ZSTD_ps_auto); - assert( - srcCCtx->appliedParams.ldmParams.enableLdm != ZSTD_paramSwitch_e.ZSTD_ps_auto - ); - @params.useRowMatchFinder = srcCCtx->appliedParams.useRowMatchFinder; - @params.postBlockSplitter = srcCCtx->appliedParams.postBlockSplitter; - @params.ldmParams = srcCCtx->appliedParams.ldmParams; - @params.fParams = fParams; - @params.maxBlockSize = srcCCtx->appliedParams.maxBlockSize; - ZSTD_resetCCtx_internal( - dstCCtx, - &@params, - pledgedSrcSize, - 0, - ZSTD_compResetPolicy_e.ZSTDcrp_leaveDirty, - zbuff - ); - assert( - dstCCtx->appliedParams.cParams.windowLog - == srcCCtx->appliedParams.cParams.windowLog - ); - assert( - dstCCtx->appliedParams.cParams.strategy - == srcCCtx->appliedParams.cParams.strategy - ); - assert( - dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog - ); - assert( - dstCCtx->appliedParams.cParams.chainLog - == srcCCtx->appliedParams.cParams.chainLog - ); - assert( - dstCCtx->blockState.matchState.hashLog3 - == srcCCtx->blockState.matchState.hashLog3 - ); - } - - ZSTD_cwksp_mark_tables_dirty(&dstCCtx->workspace); - { - nuint chainSize = - ZSTD_allocateChainTable( - srcCCtx->appliedParams.cParams.strategy, - srcCCtx->appliedParams.useRowMatchFinder, - 0 - ) != 0 - ? (nuint)1 << (int)srcCCtx->appliedParams.cParams.chainLog - : 0; - nuint hSize = (nuint)1 << (int)srcCCtx->appliedParams.cParams.hashLog; - uint h3log = srcCCtx->blockState.matchState.hashLog3; - nuint h3Size = h3log != 0 ? (nuint)1 << (int)h3log : 0; - memcpy( - dstCCtx->blockState.matchState.hashTable, - srcCCtx->blockState.matchState.hashTable, - (uint)(hSize * sizeof(uint)) - ); - memcpy( - dstCCtx->blockState.matchState.chainTable, - srcCCtx->blockState.matchState.chainTable, - (uint)(chainSize * sizeof(uint)) - ); - memcpy( - dstCCtx->blockState.matchState.hashTable3, - srcCCtx->blockState.matchState.hashTable3, - (uint)(h3Size * sizeof(uint)) - ); - } + ZSTD_MatchState_t* srcMatchState = &cdict->matchState; + ZSTD_MatchState_t* dstMatchState = &cctx->blockState.matchState; + dstMatchState->window = srcMatchState->window; + dstMatchState->nextToUpdate = srcMatchState->nextToUpdate; + dstMatchState->loadedDictEnd = srcMatchState->loadedDictEnd; + } - ZSTD_cwksp_mark_tables_clean(&dstCCtx->workspace); - { - ZSTD_MatchState_t* srcMatchState = &srcCCtx->blockState.matchState; - ZSTD_MatchState_t* dstMatchState = &dstCCtx->blockState.matchState; - dstMatchState->window = srcMatchState->window; - dstMatchState->nextToUpdate = srcMatchState->nextToUpdate; - dstMatchState->loadedDictEnd = srcMatchState->loadedDictEnd; - } + cctx->dictID = cdict->dictID; + cctx->dictContentSize = cdict->dictContentSize; + memcpy( + cctx->blockState.prevCBlock, + &cdict->cBlockState, + (uint)sizeof(ZSTD_compressedBlockState_t) + ); + return 0; + } - dstCCtx->dictID = srcCCtx->dictID; - dstCCtx->dictContentSize = srcCCtx->dictContentSize; - memcpy( - dstCCtx->blockState.prevCBlock, - srcCCtx->blockState.prevCBlock, - (uint)sizeof(ZSTD_compressedBlockState_t) + /* We have a choice between copying the dictionary context into the working + * context, or referencing the dictionary context from the working context + * in-place. We decide here which strategy to use. */ + private static nuint ZSTD_resetCCtx_usingCDict( + ZSTD_CCtx_s* cctx, + ZSTD_CDict_s* cdict, + ZSTD_CCtx_params_s* @params, + ulong pledgedSrcSize, + ZSTD_buffered_policy_e zbuff + ) + { + if (ZSTD_shouldAttachDict(cdict, @params, pledgedSrcSize) != 0) + { + return ZSTD_resetCCtx_byAttachingCDict( + cctx, + cdict, + *@params, + pledgedSrcSize, + zbuff ); - return 0; } - - /*! ZSTD_copyCCtx() : - * Duplicate an existing context `srcCCtx` into another one `dstCCtx`. - * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). - * pledgedSrcSize==0 means "unknown". - * @return : 0, or an error code */ - public static nuint ZSTD_copyCCtx( - ZSTD_CCtx_s* dstCCtx, - ZSTD_CCtx_s* srcCCtx, - ulong pledgedSrcSize - ) - { - /*content*/ - ZSTD_frameParameters fParams = new ZSTD_frameParameters - { - contentSizeFlag = 1, - checksumFlag = 0, - noDictIDFlag = 0, - }; - ZSTD_buffered_policy_e zbuff = srcCCtx->bufferedPolicy; - if (pledgedSrcSize == 0) - pledgedSrcSize = unchecked(0UL - 1); - fParams.contentSizeFlag = pledgedSrcSize != unchecked(0UL - 1) ? 1 : 0; - return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx, fParams, pledgedSrcSize, zbuff); - } - - /*! ZSTD_reduceTable() : - * reduce table indexes by `reducerValue`, or squash to zero. - * PreserveMark preserves "unsorted mark" for btlazy2 strategy. - * It must be set to a clear 0/1 value, to remove branch during inlining. - * Presume table size is a multiple of ZSTD_ROWSIZE - * to help auto-vectorization */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_reduceTable_internal( - uint* table, - uint size, - uint reducerValue, - int preserveMark - ) + else { - int nbRows = (int)size / 16; - int cellNb = 0; - int rowNb; - /* Protect special index values < ZSTD_WINDOW_START_INDEX. */ - uint reducerThreshold = reducerValue + 2; - assert((size & 16 - 1) == 0); - assert(size < 1U << 31); - for (rowNb = 0; rowNb < nbRows; rowNb++) - { - int column; - for (column = 0; column < 16; column++) - { - uint newVal; - if (preserveMark != 0 && table[cellNb] == 1) - { - newVal = 1; - } - else if (table[cellNb] < reducerThreshold) - { - newVal = 0; - } - else - { - newVal = table[cellNb] - reducerValue; - } - - table[cellNb] = newVal; - cellNb++; - } - } + return ZSTD_resetCCtx_byCopyingCDict(cctx, cdict, *@params, pledgedSrcSize, zbuff); } + } - private static void ZSTD_reduceTable(uint* table, uint size, uint reducerValue) + /*! ZSTD_copyCCtx_internal() : + * Duplicate an existing context `srcCCtx` into another one `dstCCtx`. + * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). + * The "context", in this case, refers to the hash and chain tables, + * entropy tables, and dictionary references. + * `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx. + * @return : 0, or an error code */ + private static nuint ZSTD_copyCCtx_internal( + ZSTD_CCtx_s* dstCCtx, + ZSTD_CCtx_s* srcCCtx, + ZSTD_frameParameters fParams, + ulong pledgedSrcSize, + ZSTD_buffered_policy_e zbuff + ) + { + if (srcCCtx->stage != ZSTD_compressionStage_e.ZSTDcs_init) { - ZSTD_reduceTable_internal(table, size, reducerValue, 0); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); } - private static void ZSTD_reduceTable_btlazy2(uint* table, uint size, uint reducerValue) + memcpy(&dstCCtx->customMem, &srcCCtx->customMem, (uint)sizeof(ZSTD_customMem)); { - ZSTD_reduceTable_internal(table, size, reducerValue, 1); + ZSTD_CCtx_params_s @params = dstCCtx->requestedParams; + @params.cParams = srcCCtx->appliedParams.cParams; + assert(srcCCtx->appliedParams.useRowMatchFinder != ZSTD_paramSwitch_e.ZSTD_ps_auto); + assert(srcCCtx->appliedParams.postBlockSplitter != ZSTD_paramSwitch_e.ZSTD_ps_auto); + assert( + srcCCtx->appliedParams.ldmParams.enableLdm != ZSTD_paramSwitch_e.ZSTD_ps_auto + ); + @params.useRowMatchFinder = srcCCtx->appliedParams.useRowMatchFinder; + @params.postBlockSplitter = srcCCtx->appliedParams.postBlockSplitter; + @params.ldmParams = srcCCtx->appliedParams.ldmParams; + @params.fParams = fParams; + @params.maxBlockSize = srcCCtx->appliedParams.maxBlockSize; + ZSTD_resetCCtx_internal( + dstCCtx, + &@params, + pledgedSrcSize, + 0, + ZSTD_compResetPolicy_e.ZSTDcrp_leaveDirty, + zbuff + ); + assert( + dstCCtx->appliedParams.cParams.windowLog + == srcCCtx->appliedParams.cParams.windowLog + ); + assert( + dstCCtx->appliedParams.cParams.strategy + == srcCCtx->appliedParams.cParams.strategy + ); + assert( + dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog + ); + assert( + dstCCtx->appliedParams.cParams.chainLog + == srcCCtx->appliedParams.cParams.chainLog + ); + assert( + dstCCtx->blockState.matchState.hashLog3 + == srcCCtx->blockState.matchState.hashLog3 + ); } - /*! ZSTD_reduceIndex() : - * rescale all indexes to avoid future overflow (indexes are U32) */ - private static void ZSTD_reduceIndex( - ZSTD_MatchState_t* ms, - ZSTD_CCtx_params_s* @params, - uint reducerValue - ) + ZSTD_cwksp_mark_tables_dirty(&dstCCtx->workspace); { - { - uint hSize = (uint)1 << (int)@params->cParams.hashLog; - ZSTD_reduceTable(ms->hashTable, hSize, reducerValue); - } - - if ( + nuint chainSize = ZSTD_allocateChainTable( - @params->cParams.strategy, - @params->useRowMatchFinder, - (uint)ms->dedicatedDictSearch + srcCCtx->appliedParams.cParams.strategy, + srcCCtx->appliedParams.useRowMatchFinder, + 0 ) != 0 - ) - { - uint chainSize = (uint)1 << (int)@params->cParams.chainLog; - if (@params->cParams.strategy == ZSTD_strategy.ZSTD_btlazy2) - ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue); - else - ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue); - } + ? (nuint)1 << (int)srcCCtx->appliedParams.cParams.chainLog + : 0; + nuint hSize = (nuint)1 << (int)srcCCtx->appliedParams.cParams.hashLog; + uint h3log = srcCCtx->blockState.matchState.hashLog3; + nuint h3Size = h3log != 0 ? (nuint)1 << (int)h3log : 0; + memcpy( + dstCCtx->blockState.matchState.hashTable, + srcCCtx->blockState.matchState.hashTable, + (uint)(hSize * sizeof(uint)) + ); + memcpy( + dstCCtx->blockState.matchState.chainTable, + srcCCtx->blockState.matchState.chainTable, + (uint)(chainSize * sizeof(uint)) + ); + memcpy( + dstCCtx->blockState.matchState.hashTable3, + srcCCtx->blockState.matchState.hashTable3, + (uint)(h3Size * sizeof(uint)) + ); + } - if (ms->hashLog3 != 0) - { - uint h3Size = (uint)1 << (int)ms->hashLog3; - ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue); - } + ZSTD_cwksp_mark_tables_clean(&dstCCtx->workspace); + { + ZSTD_MatchState_t* srcMatchState = &srcCCtx->blockState.matchState; + ZSTD_MatchState_t* dstMatchState = &dstCCtx->blockState.matchState; + dstMatchState->window = srcMatchState->window; + dstMatchState->nextToUpdate = srcMatchState->nextToUpdate; + dstMatchState->loadedDictEnd = srcMatchState->loadedDictEnd; } - /* See doc/zstd_compression_format.md for detailed format description */ - private static int ZSTD_seqToCodes(SeqStore_t* seqStorePtr) + dstCCtx->dictID = srcCCtx->dictID; + dstCCtx->dictContentSize = srcCCtx->dictContentSize; + memcpy( + dstCCtx->blockState.prevCBlock, + srcCCtx->blockState.prevCBlock, + (uint)sizeof(ZSTD_compressedBlockState_t) + ); + return 0; + } + + /*! ZSTD_copyCCtx() : + * Duplicate an existing context `srcCCtx` into another one `dstCCtx`. + * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). + * pledgedSrcSize==0 means "unknown". + * @return : 0, or an error code */ + public static nuint ZSTD_copyCCtx( + ZSTD_CCtx_s* dstCCtx, + ZSTD_CCtx_s* srcCCtx, + ulong pledgedSrcSize + ) + { + /*content*/ + ZSTD_frameParameters fParams = new ZSTD_frameParameters + { + contentSizeFlag = 1, + checksumFlag = 0, + noDictIDFlag = 0, + }; + ZSTD_buffered_policy_e zbuff = srcCCtx->bufferedPolicy; + if (pledgedSrcSize == 0) + pledgedSrcSize = unchecked(0UL - 1); + fParams.contentSizeFlag = pledgedSrcSize != unchecked(0UL - 1) ? 1 : 0; + return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx, fParams, pledgedSrcSize, zbuff); + } + + /*! ZSTD_reduceTable() : + * reduce table indexes by `reducerValue`, or squash to zero. + * PreserveMark preserves "unsorted mark" for btlazy2 strategy. + * It must be set to a clear 0/1 value, to remove branch during inlining. + * Presume table size is a multiple of ZSTD_ROWSIZE + * to help auto-vectorization */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_reduceTable_internal( + uint* table, + uint size, + uint reducerValue, + int preserveMark + ) + { + int nbRows = (int)size / 16; + int cellNb = 0; + int rowNb; + /* Protect special index values < ZSTD_WINDOW_START_INDEX. */ + uint reducerThreshold = reducerValue + 2; + assert((size & 16 - 1) == 0); + assert(size < 1U << 31); + for (rowNb = 0; rowNb < nbRows; rowNb++) { - SeqDef_s* sequences = seqStorePtr->sequencesStart; - byte* llCodeTable = seqStorePtr->llCode; - byte* ofCodeTable = seqStorePtr->ofCode; - byte* mlCodeTable = seqStorePtr->mlCode; - uint nbSeq = (uint)(seqStorePtr->sequences - seqStorePtr->sequencesStart); - uint u; - int longOffsets = 0; - assert(nbSeq <= seqStorePtr->maxNbSeq); - for (u = 0; u < nbSeq; u++) + int column; + for (column = 0; column < 16; column++) { - uint llv = sequences[u].litLength; - uint ofCode = ZSTD_highbit32(sequences[u].offBase); - uint mlv = sequences[u].mlBase; - llCodeTable[u] = (byte)ZSTD_LLcode(llv); - ofCodeTable[u] = (byte)ofCode; - mlCodeTable[u] = (byte)ZSTD_MLcode(mlv); - assert(!(MEM_64bits && ofCode >= (uint)(MEM_32bits ? 25 : 57))); - if (MEM_32bits && ofCode >= (uint)(MEM_32bits ? 25 : 57)) - longOffsets = 1; + uint newVal; + if (preserveMark != 0 && table[cellNb] == 1) + { + newVal = 1; + } + else if (table[cellNb] < reducerThreshold) + { + newVal = 0; + } + else + { + newVal = table[cellNb] - reducerValue; + } + + table[cellNb] = newVal; + cellNb++; } + } + } + + private static void ZSTD_reduceTable(uint* table, uint size, uint reducerValue) + { + ZSTD_reduceTable_internal(table, size, reducerValue, 0); + } + + private static void ZSTD_reduceTable_btlazy2(uint* table, uint size, uint reducerValue) + { + ZSTD_reduceTable_internal(table, size, reducerValue, 1); + } - if (seqStorePtr->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_literalLength) - llCodeTable[seqStorePtr->longLengthPos] = 35; - if (seqStorePtr->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_matchLength) - mlCodeTable[seqStorePtr->longLengthPos] = 52; - return longOffsets; + /*! ZSTD_reduceIndex() : + * rescale all indexes to avoid future overflow (indexes are U32) */ + private static void ZSTD_reduceIndex( + ZSTD_MatchState_t* ms, + ZSTD_CCtx_params_s* @params, + uint reducerValue + ) + { + { + uint hSize = (uint)1 << (int)@params->cParams.hashLog; + ZSTD_reduceTable(ms->hashTable, hSize, reducerValue); } - /* ZSTD_useTargetCBlockSize(): - * Returns if target compressed block size param is being used. - * If used, compression will do best effort to make a compressed block size to be around targetCBlockSize. - * Returns 1 if true, 0 otherwise. */ - private static int ZSTD_useTargetCBlockSize(ZSTD_CCtx_params_s* cctxParams) + if ( + ZSTD_allocateChainTable( + @params->cParams.strategy, + @params->useRowMatchFinder, + (uint)ms->dedicatedDictSearch + ) != 0 + ) { - return cctxParams->targetCBlockSize != 0 ? 1 : 0; + uint chainSize = (uint)1 << (int)@params->cParams.chainLog; + if (@params->cParams.strategy == ZSTD_strategy.ZSTD_btlazy2) + ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue); + else + ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue); } - /* ZSTD_blockSplitterEnabled(): - * Returns if block splitting param is being used - * If used, compression will do best effort to split a block in order to improve compression ratio. - * At the time this function is called, the parameter must be finalized. - * Returns 1 if true, 0 otherwise. */ - private static int ZSTD_blockSplitterEnabled(ZSTD_CCtx_params_s* cctxParams) + if (ms->hashLog3 != 0) { - assert(cctxParams->postBlockSplitter != ZSTD_paramSwitch_e.ZSTD_ps_auto); - return cctxParams->postBlockSplitter == ZSTD_paramSwitch_e.ZSTD_ps_enable ? 1 : 0; + uint h3Size = (uint)1 << (int)ms->hashLog3; + ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue); } + } - /* ZSTD_buildSequencesStatistics(): - * Returns a ZSTD_symbolEncodingTypeStats_t, or a zstd error code in the `size` field. - * Modifies `nextEntropy` to have the appropriate values as a side effect. - * nbSeq must be greater than 0. - * - * entropyWkspSize must be of size at least ENTROPY_WORKSPACE_SIZE - (MaxSeq + 1)*sizeof(U32) - */ - private static ZSTD_symbolEncodingTypeStats_t ZSTD_buildSequencesStatistics( - SeqStore_t* seqStorePtr, - nuint nbSeq, - ZSTD_fseCTables_t* prevEntropy, - ZSTD_fseCTables_t* nextEntropy, - byte* dst, - byte* dstEnd, - ZSTD_strategy strategy, - uint* countWorkspace, - void* entropyWorkspace, - nuint entropyWkspSize - ) - { - byte* ostart = dst; - byte* oend = dstEnd; - byte* op = ostart; - uint* CTable_LitLength = nextEntropy->litlengthCTable; - uint* CTable_OffsetBits = nextEntropy->offcodeCTable; - uint* CTable_MatchLength = nextEntropy->matchlengthCTable; - byte* ofCodeTable = seqStorePtr->ofCode; - byte* llCodeTable = seqStorePtr->llCode; - byte* mlCodeTable = seqStorePtr->mlCode; - ZSTD_symbolEncodingTypeStats_t stats; - System.Runtime.CompilerServices.Unsafe.SkipInit(out stats); - stats.lastCountSize = 0; - stats.longOffsets = ZSTD_seqToCodes(seqStorePtr); - assert(op <= oend); - assert(nbSeq != 0); + /* See doc/zstd_compression_format.md for detailed format description */ + private static int ZSTD_seqToCodes(SeqStore_t* seqStorePtr) + { + SeqDef_s* sequences = seqStorePtr->sequencesStart; + byte* llCodeTable = seqStorePtr->llCode; + byte* ofCodeTable = seqStorePtr->ofCode; + byte* mlCodeTable = seqStorePtr->mlCode; + uint nbSeq = (uint)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + uint u; + int longOffsets = 0; + assert(nbSeq <= seqStorePtr->maxNbSeq); + for (u = 0; u < nbSeq; u++) + { + uint llv = sequences[u].litLength; + uint ofCode = ZSTD_highbit32(sequences[u].offBase); + uint mlv = sequences[u].mlBase; + llCodeTable[u] = (byte)ZSTD_LLcode(llv); + ofCodeTable[u] = (byte)ofCode; + mlCodeTable[u] = (byte)ZSTD_MLcode(mlv); + assert(!(MEM_64bits && ofCode >= (uint)(MEM_32bits ? 25 : 57))); + if (MEM_32bits && ofCode >= (uint)(MEM_32bits ? 25 : 57)) + longOffsets = 1; + } + + if (seqStorePtr->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_literalLength) + llCodeTable[seqStorePtr->longLengthPos] = 35; + if (seqStorePtr->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_matchLength) + mlCodeTable[seqStorePtr->longLengthPos] = 52; + return longOffsets; + } + + /* ZSTD_useTargetCBlockSize(): + * Returns if target compressed block size param is being used. + * If used, compression will do best effort to make a compressed block size to be around targetCBlockSize. + * Returns 1 if true, 0 otherwise. */ + private static int ZSTD_useTargetCBlockSize(ZSTD_CCtx_params_s* cctxParams) + { + return cctxParams->targetCBlockSize != 0 ? 1 : 0; + } + + /* ZSTD_blockSplitterEnabled(): + * Returns if block splitting param is being used + * If used, compression will do best effort to split a block in order to improve compression ratio. + * At the time this function is called, the parameter must be finalized. + * Returns 1 if true, 0 otherwise. */ + private static int ZSTD_blockSplitterEnabled(ZSTD_CCtx_params_s* cctxParams) + { + assert(cctxParams->postBlockSplitter != ZSTD_paramSwitch_e.ZSTD_ps_auto); + return cctxParams->postBlockSplitter == ZSTD_paramSwitch_e.ZSTD_ps_enable ? 1 : 0; + } + + /* ZSTD_buildSequencesStatistics(): + * Returns a ZSTD_symbolEncodingTypeStats_t, or a zstd error code in the `size` field. + * Modifies `nextEntropy` to have the appropriate values as a side effect. + * nbSeq must be greater than 0. + * + * entropyWkspSize must be of size at least ENTROPY_WORKSPACE_SIZE - (MaxSeq + 1)*sizeof(U32) + */ + private static ZSTD_symbolEncodingTypeStats_t ZSTD_buildSequencesStatistics( + SeqStore_t* seqStorePtr, + nuint nbSeq, + ZSTD_fseCTables_t* prevEntropy, + ZSTD_fseCTables_t* nextEntropy, + byte* dst, + byte* dstEnd, + ZSTD_strategy strategy, + uint* countWorkspace, + void* entropyWorkspace, + nuint entropyWkspSize + ) + { + byte* ostart = dst; + byte* oend = dstEnd; + byte* op = ostart; + uint* CTable_LitLength = nextEntropy->litlengthCTable; + uint* CTable_OffsetBits = nextEntropy->offcodeCTable; + uint* CTable_MatchLength = nextEntropy->matchlengthCTable; + byte* ofCodeTable = seqStorePtr->ofCode; + byte* llCodeTable = seqStorePtr->llCode; + byte* mlCodeTable = seqStorePtr->mlCode; + ZSTD_symbolEncodingTypeStats_t stats; + System.Runtime.CompilerServices.Unsafe.SkipInit(out stats); + stats.lastCountSize = 0; + stats.longOffsets = ZSTD_seqToCodes(seqStorePtr); + assert(op <= oend); + assert(nbSeq != 0); + { + uint max = 35; + /* can't fail */ + nuint mostFrequent = HIST_countFast_wksp( + countWorkspace, + &max, + llCodeTable, + nbSeq, + entropyWorkspace, + entropyWkspSize + ); + nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode; + stats.LLtype = (uint)ZSTD_selectEncodingType( + &nextEntropy->litlength_repeatMode, + countWorkspace, + max, + mostFrequent, + nbSeq, + 9, + prevEntropy->litlengthCTable, + LL_defaultNorm, + LL_defaultNormLog, + ZSTD_DefaultPolicy_e.ZSTD_defaultAllowed, + strategy + ); + assert( + SymbolEncodingType_e.set_basic < SymbolEncodingType_e.set_compressed + && SymbolEncodingType_e.set_rle < SymbolEncodingType_e.set_compressed + ); + assert( + !( + stats.LLtype < (uint)SymbolEncodingType_e.set_compressed + && nextEntropy->litlength_repeatMode != FSE_repeat.FSE_repeat_none + ) + ); { - uint max = 35; - /* can't fail */ - nuint mostFrequent = HIST_countFast_wksp( - countWorkspace, - &max, - llCodeTable, - nbSeq, - entropyWorkspace, - entropyWkspSize - ); - nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode; - stats.LLtype = (uint)ZSTD_selectEncodingType( - &nextEntropy->litlength_repeatMode, + nuint countSize = ZSTD_buildCTable( + op, + (nuint)(oend - op), + CTable_LitLength, + 9, + (SymbolEncodingType_e)stats.LLtype, countWorkspace, max, - mostFrequent, + llCodeTable, nbSeq, - 9, - prevEntropy->litlengthCTable, LL_defaultNorm, LL_defaultNormLog, - ZSTD_DefaultPolicy_e.ZSTD_defaultAllowed, - strategy - ); - assert( - SymbolEncodingType_e.set_basic < SymbolEncodingType_e.set_compressed - && SymbolEncodingType_e.set_rle < SymbolEncodingType_e.set_compressed - ); - assert( - !( - stats.LLtype < (uint)SymbolEncodingType_e.set_compressed - && nextEntropy->litlength_repeatMode != FSE_repeat.FSE_repeat_none - ) - ); - { - nuint countSize = ZSTD_buildCTable( - op, - (nuint)(oend - op), - CTable_LitLength, - 9, - (SymbolEncodingType_e)stats.LLtype, - countWorkspace, - max, - llCodeTable, - nbSeq, - LL_defaultNorm, - LL_defaultNormLog, - 35, - prevEntropy->litlengthCTable, - sizeof(uint) * 329, - entropyWorkspace, - entropyWkspSize - ); - if (ERR_isError(countSize)) - { - stats.size = countSize; - return stats; - } - - if (stats.LLtype == (uint)SymbolEncodingType_e.set_compressed) - stats.lastCountSize = countSize; - op += countSize; - assert(op <= oend); - } - } - - { - uint max = 31; - nuint mostFrequent = HIST_countFast_wksp( - countWorkspace, - &max, - ofCodeTable, - nbSeq, + 35, + prevEntropy->litlengthCTable, + sizeof(uint) * 329, entropyWorkspace, entropyWkspSize ); - /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */ - ZSTD_DefaultPolicy_e defaultPolicy = - max <= 28 - ? ZSTD_DefaultPolicy_e.ZSTD_defaultAllowed - : ZSTD_DefaultPolicy_e.ZSTD_defaultDisallowed; - nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode; - stats.Offtype = (uint)ZSTD_selectEncodingType( - &nextEntropy->offcode_repeatMode, - countWorkspace, - max, - mostFrequent, - nbSeq, - 8, - prevEntropy->offcodeCTable, - OF_defaultNorm, - OF_defaultNormLog, - defaultPolicy, - strategy - ); - assert( - !( - stats.Offtype < (uint)SymbolEncodingType_e.set_compressed - && nextEntropy->offcode_repeatMode != FSE_repeat.FSE_repeat_none - ) - ); + if (ERR_isError(countSize)) { - nuint countSize = ZSTD_buildCTable( - op, - (nuint)(oend - op), - CTable_OffsetBits, - 8, - (SymbolEncodingType_e)stats.Offtype, - countWorkspace, - max, - ofCodeTable, - nbSeq, - OF_defaultNorm, - OF_defaultNormLog, - 28, - prevEntropy->offcodeCTable, - sizeof(uint) * 193, - entropyWorkspace, - entropyWkspSize - ); - if (ERR_isError(countSize)) - { - stats.size = countSize; - return stats; - } - - if (stats.Offtype == (uint)SymbolEncodingType_e.set_compressed) - stats.lastCountSize = countSize; - op += countSize; - assert(op <= oend); + stats.size = countSize; + return stats; } - } - - { - uint max = 52; - nuint mostFrequent = HIST_countFast_wksp( - countWorkspace, - &max, - mlCodeTable, - nbSeq, - entropyWorkspace, - entropyWkspSize - ); - nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode; - stats.MLtype = (uint)ZSTD_selectEncodingType( - &nextEntropy->matchlength_repeatMode, - countWorkspace, - max, - mostFrequent, - nbSeq, - 9, - prevEntropy->matchlengthCTable, - ML_defaultNorm, - ML_defaultNormLog, - ZSTD_DefaultPolicy_e.ZSTD_defaultAllowed, - strategy - ); - assert( - !( - stats.MLtype < (uint)SymbolEncodingType_e.set_compressed - && nextEntropy->matchlength_repeatMode != FSE_repeat.FSE_repeat_none - ) - ); - { - nuint countSize = ZSTD_buildCTable( - op, - (nuint)(oend - op), - CTable_MatchLength, - 9, - (SymbolEncodingType_e)stats.MLtype, - countWorkspace, - max, - mlCodeTable, - nbSeq, - ML_defaultNorm, - ML_defaultNormLog, - 52, - prevEntropy->matchlengthCTable, - sizeof(uint) * 363, - entropyWorkspace, - entropyWkspSize - ); - if (ERR_isError(countSize)) - { - stats.size = countSize; - return stats; - } - if (stats.MLtype == (uint)SymbolEncodingType_e.set_compressed) - stats.lastCountSize = countSize; - op += countSize; - assert(op <= oend); - } + if (stats.LLtype == (uint)SymbolEncodingType_e.set_compressed) + stats.lastCountSize = countSize; + op += countSize; + assert(op <= oend); } - - stats.size = (nuint)(op - ostart); - return stats; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_entropyCompressSeqStore_internal( - void* dst, - nuint dstCapacity, - void* literals, - nuint litSize, - SeqStore_t* seqStorePtr, - ZSTD_entropyCTables_t* prevEntropy, - ZSTD_entropyCTables_t* nextEntropy, - ZSTD_CCtx_params_s* cctxParams, - void* entropyWorkspace, - nuint entropyWkspSize, - int bmi2 - ) { - ZSTD_strategy strategy = cctxParams->cParams.strategy; - uint* count = (uint*)entropyWorkspace; - uint* CTable_LitLength = nextEntropy->fse.litlengthCTable; - uint* CTable_OffsetBits = nextEntropy->fse.offcodeCTable; - uint* CTable_MatchLength = nextEntropy->fse.matchlengthCTable; - SeqDef_s* sequences = seqStorePtr->sequencesStart; - nuint nbSeq = (nuint)(seqStorePtr->sequences - seqStorePtr->sequencesStart); - byte* ofCodeTable = seqStorePtr->ofCode; - byte* llCodeTable = seqStorePtr->llCode; - byte* mlCodeTable = seqStorePtr->mlCode; - byte* ostart = (byte*)dst; - byte* oend = ostart + dstCapacity; - byte* op = ostart; - nuint lastCountSize; - int longOffsets = 0; - entropyWorkspace = count + (52 + 1); - entropyWkspSize -= (52 + 1) * sizeof(uint); - assert(entropyWkspSize >= (8 << 10) + 512); - { - nuint numSequences = (nuint)(seqStorePtr->sequences - seqStorePtr->sequencesStart); - /* Base suspicion of uncompressibility on ratio of literals to sequences */ - int suspectUncompressible = - numSequences == 0 || litSize / numSequences >= 20 ? 1 : 0; - nuint cSize = ZSTD_compressLiterals( - op, - dstCapacity, - literals, - litSize, - entropyWorkspace, - entropyWkspSize, - &prevEntropy->huf, - &nextEntropy->huf, - cctxParams->cParams.strategy, - ZSTD_literalsCompressionIsDisabled(cctxParams), - suspectUncompressible, - bmi2 - ); - { - nuint err_code = cSize; - if (ERR_isError(err_code)) - { - return err_code; - } - } - - assert(cSize <= dstCapacity); - op += cSize; - } - - if (oend - op < 3 + 1) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - } - - if (nbSeq < 128) - { - *op++ = (byte)nbSeq; - } - else if (nbSeq < 0x7F00) - { - op[0] = (byte)((nbSeq >> 8) + 0x80); - op[1] = (byte)nbSeq; - op += 2; - } - else - { - op[0] = 0xFF; - MEM_writeLE16(op + 1, (ushort)(nbSeq - 0x7F00)); - op += 3; - } - - assert(op <= oend); - if (nbSeq == 0) - { - memcpy(&nextEntropy->fse, &prevEntropy->fse, (uint)sizeof(ZSTD_fseCTables_t)); - return (nuint)(op - ostart); - } - + uint max = 31; + nuint mostFrequent = HIST_countFast_wksp( + countWorkspace, + &max, + ofCodeTable, + nbSeq, + entropyWorkspace, + entropyWkspSize + ); + /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */ + ZSTD_DefaultPolicy_e defaultPolicy = + max <= 28 + ? ZSTD_DefaultPolicy_e.ZSTD_defaultAllowed + : ZSTD_DefaultPolicy_e.ZSTD_defaultDisallowed; + nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode; + stats.Offtype = (uint)ZSTD_selectEncodingType( + &nextEntropy->offcode_repeatMode, + countWorkspace, + max, + mostFrequent, + nbSeq, + 8, + prevEntropy->offcodeCTable, + OF_defaultNorm, + OF_defaultNormLog, + defaultPolicy, + strategy + ); + assert( + !( + stats.Offtype < (uint)SymbolEncodingType_e.set_compressed + && nextEntropy->offcode_repeatMode != FSE_repeat.FSE_repeat_none + ) + ); { - byte* seqHead = op++; - /* build stats for sequences */ - ZSTD_symbolEncodingTypeStats_t stats = ZSTD_buildSequencesStatistics( - seqStorePtr, - nbSeq, - &prevEntropy->fse, - &nextEntropy->fse, + nuint countSize = ZSTD_buildCTable( op, - oend, - strategy, - count, + (nuint)(oend - op), + CTable_OffsetBits, + 8, + (SymbolEncodingType_e)stats.Offtype, + countWorkspace, + max, + ofCodeTable, + nbSeq, + OF_defaultNorm, + OF_defaultNormLog, + 28, + prevEntropy->offcodeCTable, + sizeof(uint) * 193, entropyWorkspace, entropyWkspSize ); + if (ERR_isError(countSize)) { - nuint err_code = stats.size; - if (ERR_isError(err_code)) - { - return err_code; - } + stats.size = countSize; + return stats; } - *seqHead = (byte)((stats.LLtype << 6) + (stats.Offtype << 4) + (stats.MLtype << 2)); - lastCountSize = stats.lastCountSize; - op += stats.size; - longOffsets = stats.longOffsets; + if (stats.Offtype == (uint)SymbolEncodingType_e.set_compressed) + stats.lastCountSize = countSize; + op += countSize; + assert(op <= oend); } + } + { + uint max = 52; + nuint mostFrequent = HIST_countFast_wksp( + countWorkspace, + &max, + mlCodeTable, + nbSeq, + entropyWorkspace, + entropyWkspSize + ); + nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode; + stats.MLtype = (uint)ZSTD_selectEncodingType( + &nextEntropy->matchlength_repeatMode, + countWorkspace, + max, + mostFrequent, + nbSeq, + 9, + prevEntropy->matchlengthCTable, + ML_defaultNorm, + ML_defaultNormLog, + ZSTD_DefaultPolicy_e.ZSTD_defaultAllowed, + strategy + ); + assert( + !( + stats.MLtype < (uint)SymbolEncodingType_e.set_compressed + && nextEntropy->matchlength_repeatMode != FSE_repeat.FSE_repeat_none + ) + ); { - nuint bitstreamSize = ZSTD_encodeSequences( + nuint countSize = ZSTD_buildCTable( op, (nuint)(oend - op), CTable_MatchLength, + 9, + (SymbolEncodingType_e)stats.MLtype, + countWorkspace, + max, mlCodeTable, - CTable_OffsetBits, - ofCodeTable, - CTable_LitLength, - llCodeTable, - sequences, nbSeq, - longOffsets, - bmi2 + ML_defaultNorm, + ML_defaultNormLog, + 52, + prevEntropy->matchlengthCTable, + sizeof(uint) * 363, + entropyWorkspace, + entropyWkspSize ); + if (ERR_isError(countSize)) { - nuint err_code = bitstreamSize; - if (ERR_isError(err_code)) - { - return err_code; - } + stats.size = countSize; + return stats; } - op += bitstreamSize; + if (stats.MLtype == (uint)SymbolEncodingType_e.set_compressed) + stats.lastCountSize = countSize; + op += countSize; assert(op <= oend); - if (lastCountSize != 0 && lastCountSize + bitstreamSize < 4) - { - assert(lastCountSize + bitstreamSize == 3); - return 0; - } } - - return (nuint)(op - ostart); } - private static nuint ZSTD_entropyCompressSeqStore_wExtLitBuffer( - void* dst, - nuint dstCapacity, - void* literals, - nuint litSize, - nuint blockSize, - SeqStore_t* seqStorePtr, - ZSTD_entropyCTables_t* prevEntropy, - ZSTD_entropyCTables_t* nextEntropy, - ZSTD_CCtx_params_s* cctxParams, - void* entropyWorkspace, - nuint entropyWkspSize, - int bmi2 - ) - { - nuint cSize = ZSTD_entropyCompressSeqStore_internal( - dst, + stats.size = (nuint)(op - ostart); + return stats; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_entropyCompressSeqStore_internal( + void* dst, + nuint dstCapacity, + void* literals, + nuint litSize, + SeqStore_t* seqStorePtr, + ZSTD_entropyCTables_t* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + ZSTD_CCtx_params_s* cctxParams, + void* entropyWorkspace, + nuint entropyWkspSize, + int bmi2 + ) + { + ZSTD_strategy strategy = cctxParams->cParams.strategy; + uint* count = (uint*)entropyWorkspace; + uint* CTable_LitLength = nextEntropy->fse.litlengthCTable; + uint* CTable_OffsetBits = nextEntropy->fse.offcodeCTable; + uint* CTable_MatchLength = nextEntropy->fse.matchlengthCTable; + SeqDef_s* sequences = seqStorePtr->sequencesStart; + nuint nbSeq = (nuint)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + byte* ofCodeTable = seqStorePtr->ofCode; + byte* llCodeTable = seqStorePtr->llCode; + byte* mlCodeTable = seqStorePtr->mlCode; + byte* ostart = (byte*)dst; + byte* oend = ostart + dstCapacity; + byte* op = ostart; + nuint lastCountSize; + int longOffsets = 0; + entropyWorkspace = count + (52 + 1); + entropyWkspSize -= (52 + 1) * sizeof(uint); + assert(entropyWkspSize >= (8 << 10) + 512); + { + nuint numSequences = (nuint)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + /* Base suspicion of uncompressibility on ratio of literals to sequences */ + int suspectUncompressible = + numSequences == 0 || litSize / numSequences >= 20 ? 1 : 0; + nuint cSize = ZSTD_compressLiterals( + op, dstCapacity, literals, litSize, - seqStorePtr, - prevEntropy, - nextEntropy, - cctxParams, entropyWorkspace, entropyWkspSize, + &prevEntropy->huf, + &nextEntropy->huf, + cctxParams->cParams.strategy, + ZSTD_literalsCompressionIsDisabled(cctxParams), + suspectUncompressible, bmi2 ); - if (cSize == 0) - return 0; - if ( - cSize == unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)) - && blockSize <= dstCapacity - ) - { - return 0; - } - { nuint err_code = cSize; if (ERR_isError(err_code)) @@ -4265,2170 +4122,2313 @@ int bmi2 } } - { - nuint maxCSize = blockSize - ZSTD_minGain(blockSize, cctxParams->cParams.strategy); - if (cSize >= maxCSize) - return 0; - } + assert(cSize <= dstCapacity); + op += cSize; + } - assert(cSize < 1 << 17); - return cSize; + if (oend - op < 3 + 1) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); } - private static nuint ZSTD_entropyCompressSeqStore( - SeqStore_t* seqStorePtr, - ZSTD_entropyCTables_t* prevEntropy, - ZSTD_entropyCTables_t* nextEntropy, - ZSTD_CCtx_params_s* cctxParams, - void* dst, - nuint dstCapacity, - nuint srcSize, - void* entropyWorkspace, - nuint entropyWkspSize, - int bmi2 - ) + if (nbSeq < 128) { - return ZSTD_entropyCompressSeqStore_wExtLitBuffer( - dst, - dstCapacity, - seqStorePtr->litStart, - (nuint)(seqStorePtr->lit - seqStorePtr->litStart), - srcSize, + *op++ = (byte)nbSeq; + } + else if (nbSeq < 0x7F00) + { + op[0] = (byte)((nbSeq >> 8) + 0x80); + op[1] = (byte)nbSeq; + op += 2; + } + else + { + op[0] = 0xFF; + MEM_writeLE16(op + 1, (ushort)(nbSeq - 0x7F00)); + op += 3; + } + + assert(op <= oend); + if (nbSeq == 0) + { + memcpy(&nextEntropy->fse, &prevEntropy->fse, (uint)sizeof(ZSTD_fseCTables_t)); + return (nuint)(op - ostart); + } + + { + byte* seqHead = op++; + /* build stats for sequences */ + ZSTD_symbolEncodingTypeStats_t stats = ZSTD_buildSequencesStatistics( seqStorePtr, - prevEntropy, - nextEntropy, - cctxParams, + nbSeq, + &prevEntropy->fse, + &nextEntropy->fse, + op, + oend, + strategy, + count, entropyWorkspace, - entropyWkspSize, - bmi2 + entropyWkspSize ); - } - - private static readonly ZSTD_BlockCompressor_f?[][] blockCompressor = - new ZSTD_BlockCompressor_f?[4][] { - new ZSTD_BlockCompressor_f[10] - { - ZSTD_compressBlock_fast, - ZSTD_compressBlock_fast, - ZSTD_compressBlock_doubleFast, - ZSTD_compressBlock_greedy, - ZSTD_compressBlock_lazy, - ZSTD_compressBlock_lazy2, - ZSTD_compressBlock_btlazy2, - ZSTD_compressBlock_btopt, - ZSTD_compressBlock_btultra, - ZSTD_compressBlock_btultra2, - }, - new ZSTD_BlockCompressor_f[10] - { - ZSTD_compressBlock_fast_extDict, - ZSTD_compressBlock_fast_extDict, - ZSTD_compressBlock_doubleFast_extDict, - ZSTD_compressBlock_greedy_extDict, - ZSTD_compressBlock_lazy_extDict, - ZSTD_compressBlock_lazy2_extDict, - ZSTD_compressBlock_btlazy2_extDict, - ZSTD_compressBlock_btopt_extDict, - ZSTD_compressBlock_btultra_extDict, - ZSTD_compressBlock_btultra_extDict, - }, - new ZSTD_BlockCompressor_f[10] - { - ZSTD_compressBlock_fast_dictMatchState, - ZSTD_compressBlock_fast_dictMatchState, - ZSTD_compressBlock_doubleFast_dictMatchState, - ZSTD_compressBlock_greedy_dictMatchState, - ZSTD_compressBlock_lazy_dictMatchState, - ZSTD_compressBlock_lazy2_dictMatchState, - ZSTD_compressBlock_btlazy2_dictMatchState, - ZSTD_compressBlock_btopt_dictMatchState, - ZSTD_compressBlock_btultra_dictMatchState, - ZSTD_compressBlock_btultra_dictMatchState, - }, - new ZSTD_BlockCompressor_f?[10] + nuint err_code = stats.size; + if (ERR_isError(err_code)) { - null, - null, - null, - ZSTD_compressBlock_greedy_dedicatedDictSearch, - ZSTD_compressBlock_lazy_dedicatedDictSearch, - ZSTD_compressBlock_lazy2_dedicatedDictSearch, - null, - null, - null, - null, - }, - }; - private static readonly ZSTD_BlockCompressor_f[][] rowBasedBlockCompressors = - new ZSTD_BlockCompressor_f[4][] + return err_code; + } + } + + *seqHead = (byte)((stats.LLtype << 6) + (stats.Offtype << 4) + (stats.MLtype << 2)); + lastCountSize = stats.lastCountSize; + op += stats.size; + longOffsets = stats.longOffsets; + } + + { + nuint bitstreamSize = ZSTD_encodeSequences( + op, + (nuint)(oend - op), + CTable_MatchLength, + mlCodeTable, + CTable_OffsetBits, + ofCodeTable, + CTable_LitLength, + llCodeTable, + sequences, + nbSeq, + longOffsets, + bmi2 + ); { - new ZSTD_BlockCompressor_f[3] - { - ZSTD_compressBlock_greedy_row, - ZSTD_compressBlock_lazy_row, - ZSTD_compressBlock_lazy2_row, - }, - new ZSTD_BlockCompressor_f[3] - { - ZSTD_compressBlock_greedy_extDict_row, - ZSTD_compressBlock_lazy_extDict_row, - ZSTD_compressBlock_lazy2_extDict_row, - }, - new ZSTD_BlockCompressor_f[3] - { - ZSTD_compressBlock_greedy_dictMatchState_row, - ZSTD_compressBlock_lazy_dictMatchState_row, - ZSTD_compressBlock_lazy2_dictMatchState_row, - }, - new ZSTD_BlockCompressor_f[3] + nuint err_code = bitstreamSize; + if (ERR_isError(err_code)) { - ZSTD_compressBlock_greedy_dedicatedDictSearch_row, - ZSTD_compressBlock_lazy_dedicatedDictSearch_row, - ZSTD_compressBlock_lazy2_dedicatedDictSearch_row, - }, - }; - - /* ZSTD_selectBlockCompressor() : - * Not static, but internal use only (used by long distance matcher) - * assumption : strat is a valid strategy */ - private static ZSTD_BlockCompressor_f ZSTD_selectBlockCompressor( - ZSTD_strategy strat, - ZSTD_paramSwitch_e useRowMatchFinder, - ZSTD_dictMode_e dictMode + return err_code; + } + } + + op += bitstreamSize; + assert(op <= oend); + if (lastCountSize != 0 && lastCountSize + bitstreamSize < 4) + { + assert(lastCountSize + bitstreamSize == 3); + return 0; + } + } + + return (nuint)(op - ostart); + } + + private static nuint ZSTD_entropyCompressSeqStore_wExtLitBuffer( + void* dst, + nuint dstCapacity, + void* literals, + nuint litSize, + nuint blockSize, + SeqStore_t* seqStorePtr, + ZSTD_entropyCTables_t* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + ZSTD_CCtx_params_s* cctxParams, + void* entropyWorkspace, + nuint entropyWkspSize, + int bmi2 + ) + { + nuint cSize = ZSTD_entropyCompressSeqStore_internal( + dst, + dstCapacity, + literals, + litSize, + seqStorePtr, + prevEntropy, + nextEntropy, + cctxParams, + entropyWorkspace, + entropyWkspSize, + bmi2 + ); + if (cSize == 0) + return 0; + if ( + cSize == unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)) + && blockSize <= dstCapacity ) { - ZSTD_BlockCompressor_f? selectedCompressor; - assert(ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_strategy, (int)strat) != 0); - if (ZSTD_rowMatchFinderUsed(strat, useRowMatchFinder) != 0) + return 0; + } + + { + nuint err_code = cSize; + if (ERR_isError(err_code)) { - assert(useRowMatchFinder != ZSTD_paramSwitch_e.ZSTD_ps_auto); - selectedCompressor = rowBasedBlockCompressors[(int)dictMode][ - (int)strat - (int)ZSTD_strategy.ZSTD_greedy - ]; + return err_code; } - else + } + + { + nuint maxCSize = blockSize - ZSTD_minGain(blockSize, cctxParams->cParams.strategy); + if (cSize >= maxCSize) + return 0; + } + + assert(cSize < 1 << 17); + return cSize; + } + + private static nuint ZSTD_entropyCompressSeqStore( + SeqStore_t* seqStorePtr, + ZSTD_entropyCTables_t* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + ZSTD_CCtx_params_s* cctxParams, + void* dst, + nuint dstCapacity, + nuint srcSize, + void* entropyWorkspace, + nuint entropyWkspSize, + int bmi2 + ) + { + return ZSTD_entropyCompressSeqStore_wExtLitBuffer( + dst, + dstCapacity, + seqStorePtr->litStart, + (nuint)(seqStorePtr->lit - seqStorePtr->litStart), + srcSize, + seqStorePtr, + prevEntropy, + nextEntropy, + cctxParams, + entropyWorkspace, + entropyWkspSize, + bmi2 + ); + } + + private static readonly ZSTD_BlockCompressor_f?[][] blockCompressor = + new ZSTD_BlockCompressor_f?[4][] + { + new ZSTD_BlockCompressor_f[10] + { + ZSTD_compressBlock_fast, + ZSTD_compressBlock_fast, + ZSTD_compressBlock_doubleFast, + ZSTD_compressBlock_greedy, + ZSTD_compressBlock_lazy, + ZSTD_compressBlock_lazy2, + ZSTD_compressBlock_btlazy2, + ZSTD_compressBlock_btopt, + ZSTD_compressBlock_btultra, + ZSTD_compressBlock_btultra2, + }, + new ZSTD_BlockCompressor_f[10] + { + ZSTD_compressBlock_fast_extDict, + ZSTD_compressBlock_fast_extDict, + ZSTD_compressBlock_doubleFast_extDict, + ZSTD_compressBlock_greedy_extDict, + ZSTD_compressBlock_lazy_extDict, + ZSTD_compressBlock_lazy2_extDict, + ZSTD_compressBlock_btlazy2_extDict, + ZSTD_compressBlock_btopt_extDict, + ZSTD_compressBlock_btultra_extDict, + ZSTD_compressBlock_btultra_extDict, + }, + new ZSTD_BlockCompressor_f[10] + { + ZSTD_compressBlock_fast_dictMatchState, + ZSTD_compressBlock_fast_dictMatchState, + ZSTD_compressBlock_doubleFast_dictMatchState, + ZSTD_compressBlock_greedy_dictMatchState, + ZSTD_compressBlock_lazy_dictMatchState, + ZSTD_compressBlock_lazy2_dictMatchState, + ZSTD_compressBlock_btlazy2_dictMatchState, + ZSTD_compressBlock_btopt_dictMatchState, + ZSTD_compressBlock_btultra_dictMatchState, + ZSTD_compressBlock_btultra_dictMatchState, + }, + new ZSTD_BlockCompressor_f?[10] { - selectedCompressor = blockCompressor[(int)dictMode][(int)strat]; - } + null, + null, + null, + ZSTD_compressBlock_greedy_dedicatedDictSearch, + ZSTD_compressBlock_lazy_dedicatedDictSearch, + ZSTD_compressBlock_lazy2_dedicatedDictSearch, + null, + null, + null, + null, + }, + }; + private static readonly ZSTD_BlockCompressor_f[][] rowBasedBlockCompressors = + new ZSTD_BlockCompressor_f[4][] + { + new ZSTD_BlockCompressor_f[3] + { + ZSTD_compressBlock_greedy_row, + ZSTD_compressBlock_lazy_row, + ZSTD_compressBlock_lazy2_row, + }, + new ZSTD_BlockCompressor_f[3] + { + ZSTD_compressBlock_greedy_extDict_row, + ZSTD_compressBlock_lazy_extDict_row, + ZSTD_compressBlock_lazy2_extDict_row, + }, + new ZSTD_BlockCompressor_f[3] + { + ZSTD_compressBlock_greedy_dictMatchState_row, + ZSTD_compressBlock_lazy_dictMatchState_row, + ZSTD_compressBlock_lazy2_dictMatchState_row, + }, + new ZSTD_BlockCompressor_f[3] + { + ZSTD_compressBlock_greedy_dedicatedDictSearch_row, + ZSTD_compressBlock_lazy_dedicatedDictSearch_row, + ZSTD_compressBlock_lazy2_dedicatedDictSearch_row, + }, + }; + + /* ZSTD_selectBlockCompressor() : + * Not static, but internal use only (used by long distance matcher) + * assumption : strat is a valid strategy */ + private static ZSTD_BlockCompressor_f ZSTD_selectBlockCompressor( + ZSTD_strategy strat, + ZSTD_paramSwitch_e useRowMatchFinder, + ZSTD_dictMode_e dictMode + ) + { + ZSTD_BlockCompressor_f? selectedCompressor; + assert(ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_strategy, (int)strat) != 0); + if (ZSTD_rowMatchFinderUsed(strat, useRowMatchFinder) != 0) + { + assert(useRowMatchFinder != ZSTD_paramSwitch_e.ZSTD_ps_auto); + selectedCompressor = rowBasedBlockCompressors[(int)dictMode][ + (int)strat - (int)ZSTD_strategy.ZSTD_greedy + ]; + } + else + { + selectedCompressor = blockCompressor[(int)dictMode][(int)strat]; + } + + assert(selectedCompressor != null); + return selectedCompressor.NotNull(); + } - assert(selectedCompressor != null); - return selectedCompressor.NotNull(); + private static void ZSTD_storeLastLiterals( + SeqStore_t* seqStorePtr, + byte* anchor, + nuint lastLLSize + ) + { + memcpy(seqStorePtr->lit, anchor, (uint)lastLLSize); + seqStorePtr->lit += lastLLSize; + } + + private static void ZSTD_resetSeqStore(SeqStore_t* ssPtr) + { + ssPtr->lit = ssPtr->litStart; + ssPtr->sequences = ssPtr->sequencesStart; + ssPtr->longLengthType = ZSTD_longLengthType_e.ZSTD_llt_none; + } + + /* ZSTD_postProcessSequenceProducerResult() : + * Validates and post-processes sequences obtained through the external matchfinder API: + * - Checks whether nbExternalSeqs represents an error condition. + * - Appends a block delimiter to outSeqs if one is not already present. + * See zstd.h for context regarding block delimiters. + * Returns the number of sequences after post-processing, or an error code. */ + private static nuint ZSTD_postProcessSequenceProducerResult( + ZSTD_Sequence* outSeqs, + nuint nbExternalSeqs, + nuint outSeqsCapacity, + nuint srcSize + ) + { + if (nbExternalSeqs > outSeqsCapacity) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_sequenceProducer_failed)); } - private static void ZSTD_storeLastLiterals( - SeqStore_t* seqStorePtr, - byte* anchor, - nuint lastLLSize - ) + if (nbExternalSeqs == 0 && srcSize > 0) { - memcpy(seqStorePtr->lit, anchor, (uint)lastLLSize); - seqStorePtr->lit += lastLLSize; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_sequenceProducer_failed)); } - private static void ZSTD_resetSeqStore(SeqStore_t* ssPtr) + if (srcSize == 0) { - ssPtr->lit = ssPtr->litStart; - ssPtr->sequences = ssPtr->sequencesStart; - ssPtr->longLengthType = ZSTD_longLengthType_e.ZSTD_llt_none; + outSeqs[0] = new ZSTD_Sequence(); + return 1; } - /* ZSTD_postProcessSequenceProducerResult() : - * Validates and post-processes sequences obtained through the external matchfinder API: - * - Checks whether nbExternalSeqs represents an error condition. - * - Appends a block delimiter to outSeqs if one is not already present. - * See zstd.h for context regarding block delimiters. - * Returns the number of sequences after post-processing, or an error code. */ - private static nuint ZSTD_postProcessSequenceProducerResult( - ZSTD_Sequence* outSeqs, - nuint nbExternalSeqs, - nuint outSeqsCapacity, - nuint srcSize - ) { - if (nbExternalSeqs > outSeqsCapacity) + ZSTD_Sequence lastSeq = outSeqs[nbExternalSeqs - 1]; + if (lastSeq.offset == 0 && lastSeq.matchLength == 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_sequenceProducer_failed)); + return nbExternalSeqs; } - if (nbExternalSeqs == 0 && srcSize > 0) + if (nbExternalSeqs == outSeqsCapacity) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_sequenceProducer_failed)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_sequenceProducer_failed) + ); } - if (srcSize == 0) - { - outSeqs[0] = new ZSTD_Sequence(); - return 1; - } + outSeqs[nbExternalSeqs] = new ZSTD_Sequence(); + return nbExternalSeqs + 1; + } + } - { - ZSTD_Sequence lastSeq = outSeqs[nbExternalSeqs - 1]; - if (lastSeq.offset == 0 && lastSeq.matchLength == 0) - { - return nbExternalSeqs; - } + /* ZSTD_fastSequenceLengthSum() : + * Returns sum(litLen) + sum(matchLen) + lastLits for *seqBuf*. + * Similar to another function in zstd_compress.c (determine_blockSize), + * except it doesn't check for a block delimiter to end summation. + * Removing the early exit allows the compiler to auto-vectorize (https://godbolt.org/z/cY1cajz9P). + * This function can be deleted and replaced by determine_blockSize after we resolve issue #3456. */ + private static nuint ZSTD_fastSequenceLengthSum(ZSTD_Sequence* seqBuf, nuint seqBufSize) + { + nuint matchLenSum, + litLenSum, + i; + matchLenSum = 0; + litLenSum = 0; + for (i = 0; i < seqBufSize; i++) + { + litLenSum += seqBuf[i].litLength; + matchLenSum += seqBuf[i].matchLength; + } - if (nbExternalSeqs == outSeqsCapacity) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_sequenceProducer_failed) - ); - } + return litLenSum + matchLenSum; + } - outSeqs[nbExternalSeqs] = new ZSTD_Sequence(); - return nbExternalSeqs + 1; - } - } + /** + * Function to validate sequences produced by a block compressor. + */ + private static void ZSTD_validateSeqStore( + SeqStore_t* seqStore, + ZSTD_compressionParameters* cParams + ) { } - /* ZSTD_fastSequenceLengthSum() : - * Returns sum(litLen) + sum(matchLen) + lastLits for *seqBuf*. - * Similar to another function in zstd_compress.c (determine_blockSize), - * except it doesn't check for a block delimiter to end summation. - * Removing the early exit allows the compiler to auto-vectorize (https://godbolt.org/z/cY1cajz9P). - * This function can be deleted and replaced by determine_blockSize after we resolve issue #3456. */ - private static nuint ZSTD_fastSequenceLengthSum(ZSTD_Sequence* seqBuf, nuint seqBufSize) + private static nuint ZSTD_buildSeqStore(ZSTD_CCtx_s* zc, void* src, nuint srcSize) + { + ZSTD_MatchState_t* ms = &zc->blockState.matchState; + assert(srcSize <= 1 << 17); + ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams); + if (srcSize < (nuint)(1 + 1) + ZSTD_blockHeaderSize + 1 + 1) { - nuint matchLenSum, - litLenSum, - i; - matchLenSum = 0; - litLenSum = 0; - for (i = 0; i < seqBufSize; i++) + if (zc->appliedParams.cParams.strategy >= ZSTD_strategy.ZSTD_btopt) + { + ZSTD_ldm_skipRawSeqStoreBytes(&zc->externSeqStore, srcSize); + } + else { - litLenSum += seqBuf[i].litLength; - matchLenSum += seqBuf[i].matchLength; + ZSTD_ldm_skipSequences( + &zc->externSeqStore, + srcSize, + zc->appliedParams.cParams.minMatch + ); } - return litLenSum + matchLenSum; + return (nuint)ZSTD_BuildSeqStore_e.ZSTDbss_noCompress; } - /** - * Function to validate sequences produced by a block compressor. - */ - private static void ZSTD_validateSeqStore( - SeqStore_t* seqStore, - ZSTD_compressionParameters* cParams - ) { } - - private static nuint ZSTD_buildSeqStore(ZSTD_CCtx_s* zc, void* src, nuint srcSize) + ZSTD_resetSeqStore(&zc->seqStore); + ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy; + ms->opt.literalCompressionMode = zc->appliedParams.literalCompressionMode; + assert(ms->dictMatchState == null || ms->loadedDictEnd == ms->window.dictLimit); { - ZSTD_MatchState_t* ms = &zc->blockState.matchState; - assert(srcSize <= 1 << 17); - ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams); - if (srcSize < (nuint)(1 + 1) + ZSTD_blockHeaderSize + 1 + 1) - { - if (zc->appliedParams.cParams.strategy >= ZSTD_strategy.ZSTD_btopt) - { - ZSTD_ldm_skipRawSeqStoreBytes(&zc->externSeqStore, srcSize); - } - else - { - ZSTD_ldm_skipSequences( - &zc->externSeqStore, - srcSize, - zc->appliedParams.cParams.minMatch + byte* @base = ms->window.@base; + byte* istart = (byte*)src; + uint curr = (uint)(istart - @base); +#if DEBUG + if (sizeof(nint) == 8) + assert(istart - @base < unchecked((nint)(uint)-1)); +#endif + if (curr > ms->nextToUpdate + 384) + ms->nextToUpdate = + curr + - ( + 192 < curr - ms->nextToUpdate - 384 + ? 192 + : curr - ms->nextToUpdate - 384 ); - } - - return (nuint)ZSTD_BuildSeqStore_e.ZSTDbss_noCompress; - } + } - ZSTD_resetSeqStore(&zc->seqStore); - ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy; - ms->opt.literalCompressionMode = zc->appliedParams.literalCompressionMode; - assert(ms->dictMatchState == null || ms->loadedDictEnd == ms->window.dictLimit); + { + ZSTD_dictMode_e dictMode = ZSTD_matchState_dictMode(ms); + nuint lastLLSize; { - byte* @base = ms->window.@base; - byte* istart = (byte*)src; - uint curr = (uint)(istart - @base); -#if DEBUG - if (sizeof(nint) == 8) - assert(istart - @base < unchecked((nint)(uint)-1)); -#endif - if (curr > ms->nextToUpdate + 384) - ms->nextToUpdate = - curr - - ( - 192 < curr - ms->nextToUpdate - 384 - ? 192 - : curr - ms->nextToUpdate - 384 - ); + int i; + for (i = 0; i < 3; ++i) + zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i]; } + if (zc->externSeqStore.pos < zc->externSeqStore.size) { - ZSTD_dictMode_e dictMode = ZSTD_matchState_dictMode(ms); - nuint lastLLSize; + assert( + zc->appliedParams.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_disable + ); + if (ZSTD_hasExtSeqProd(&zc->appliedParams) != 0) { - int i; - for (i = 0; i < 3; ++i) - zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i]; + return unchecked( + (nuint)( + -(int)ZSTD_ErrorCode.ZSTD_error_parameter_combination_unsupported + ) + ); } - if (zc->externSeqStore.pos < zc->externSeqStore.size) + lastLLSize = ZSTD_ldm_blockCompress( + &zc->externSeqStore, + ms, + &zc->seqStore, + zc->blockState.nextCBlock->rep, + zc->appliedParams.useRowMatchFinder, + src, + srcSize + ); + assert(zc->externSeqStore.pos <= zc->externSeqStore.size); + } + else if (zc->appliedParams.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) + { + RawSeqStore_t ldmSeqStore = kNullRawSeqStore; + if (ZSTD_hasExtSeqProd(&zc->appliedParams) != 0) { - assert( - zc->appliedParams.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_disable + return unchecked( + (nuint)( + -(int)ZSTD_ErrorCode.ZSTD_error_parameter_combination_unsupported + ) ); - if (ZSTD_hasExtSeqProd(&zc->appliedParams) != 0) - { - return unchecked( - (nuint)( - -(int)ZSTD_ErrorCode.ZSTD_error_parameter_combination_unsupported - ) - ); - } + } - lastLLSize = ZSTD_ldm_blockCompress( - &zc->externSeqStore, - ms, - &zc->seqStore, - zc->blockState.nextCBlock->rep, - zc->appliedParams.useRowMatchFinder, + ldmSeqStore.seq = zc->ldmSequences; + ldmSeqStore.capacity = zc->maxNbLdmSequences; + { + /* Updates ldmSeqStore.size */ + nuint err_code = ZSTD_ldm_generateSequences( + &zc->ldmState, + &ldmSeqStore, + &zc->appliedParams.ldmParams, src, srcSize ); - assert(zc->externSeqStore.pos <= zc->externSeqStore.size); - } - else if (zc->appliedParams.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) - { - RawSeqStore_t ldmSeqStore = kNullRawSeqStore; - if (ZSTD_hasExtSeqProd(&zc->appliedParams) != 0) - { - return unchecked( - (nuint)( - -(int)ZSTD_ErrorCode.ZSTD_error_parameter_combination_unsupported - ) - ); - } - - ldmSeqStore.seq = zc->ldmSequences; - ldmSeqStore.capacity = zc->maxNbLdmSequences; + if (ERR_isError(err_code)) { - /* Updates ldmSeqStore.size */ - nuint err_code = ZSTD_ldm_generateSequences( - &zc->ldmState, - &ldmSeqStore, - &zc->appliedParams.ldmParams, - src, - srcSize - ); - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } + } - lastLLSize = ZSTD_ldm_blockCompress( - &ldmSeqStore, - ms, - &zc->seqStore, - zc->blockState.nextCBlock->rep, - zc->appliedParams.useRowMatchFinder, + lastLLSize = ZSTD_ldm_blockCompress( + &ldmSeqStore, + ms, + &zc->seqStore, + zc->blockState.nextCBlock->rep, + zc->appliedParams.useRowMatchFinder, + src, + srcSize + ); + assert(ldmSeqStore.pos == ldmSeqStore.size); + } + else if (ZSTD_hasExtSeqProd(&zc->appliedParams) != 0) + { + assert(zc->extSeqBufCapacity >= ZSTD_sequenceBound(srcSize)); + assert(zc->appliedParams.extSeqProdFunc != null); + { + uint windowSize = (uint)1 << (int)zc->appliedParams.cParams.windowLog; + nuint nbExternalSeqs = ( + (delegate* managed< + void*, + ZSTD_Sequence*, + nuint, + void*, + nuint, + void*, + nuint, + int, + nuint, + nuint>) + zc->appliedParams.extSeqProdFunc + )( + zc->appliedParams.extSeqProdState, + zc->extSeqBuf, + zc->extSeqBufCapacity, src, + srcSize, + null, + 0, + zc->appliedParams.compressionLevel, + windowSize + ); + nuint nbPostProcessedSeqs = ZSTD_postProcessSequenceProducerResult( + zc->extSeqBuf, + nbExternalSeqs, + zc->extSeqBufCapacity, srcSize ); - assert(ldmSeqStore.pos == ldmSeqStore.size); - } - else if (ZSTD_hasExtSeqProd(&zc->appliedParams) != 0) - { - assert(zc->extSeqBufCapacity >= ZSTD_sequenceBound(srcSize)); - assert(zc->appliedParams.extSeqProdFunc != null); + if (!ERR_isError(nbPostProcessedSeqs)) { - uint windowSize = (uint)1 << (int)zc->appliedParams.cParams.windowLog; - nuint nbExternalSeqs = ( - (delegate* managed< - void*, - ZSTD_Sequence*, - nuint, - void*, - nuint, - void*, - nuint, - int, - nuint, - nuint>) - zc->appliedParams.extSeqProdFunc - )( - zc->appliedParams.extSeqProdState, - zc->extSeqBuf, - zc->extSeqBufCapacity, - src, - srcSize, - null, - 0, - zc->appliedParams.compressionLevel, - windowSize - ); - nuint nbPostProcessedSeqs = ZSTD_postProcessSequenceProducerResult( + ZSTD_SequencePosition seqPos = new ZSTD_SequencePosition + { + idx = 0, + posInSequence = 0, + posInSrc = 0, + }; + nuint seqLenSum = ZSTD_fastSequenceLengthSum( zc->extSeqBuf, - nbExternalSeqs, - zc->extSeqBufCapacity, - srcSize + nbPostProcessedSeqs ); - if (!ERR_isError(nbPostProcessedSeqs)) + if (seqLenSum > srcSize) { - ZSTD_SequencePosition seqPos = new ZSTD_SequencePosition - { - idx = 0, - posInSequence = 0, - posInSrc = 0, - }; - nuint seqLenSum = ZSTD_fastSequenceLengthSum( - zc->extSeqBuf, - nbPostProcessedSeqs + return unchecked( + (nuint)( + -(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid + ) ); - if (seqLenSum > srcSize) - { - return unchecked( - (nuint)( - -(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid - ) - ); - } - - { - nuint err_code = ZSTD_transferSequences_wBlockDelim( - zc, - &seqPos, - zc->extSeqBuf, - nbPostProcessedSeqs, - src, - srcSize, - zc->appliedParams.searchForExternalRepcodes - ); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - ms->ldmSeqStore = null; - return (nuint)ZSTD_BuildSeqStore_e.ZSTDbss_compress; - } - - if (zc->appliedParams.enableMatchFinderFallback == 0) - { - return nbPostProcessedSeqs; } { - ZSTD_BlockCompressor_f blockCompressor = ZSTD_selectBlockCompressor( - zc->appliedParams.cParams.strategy, - zc->appliedParams.useRowMatchFinder, - dictMode - ); - ms->ldmSeqStore = null; - lastLLSize = blockCompressor( - ms, - &zc->seqStore, - zc->blockState.nextCBlock->rep, + nuint err_code = ZSTD_transferSequences_wBlockDelim( + zc, + &seqPos, + zc->extSeqBuf, + nbPostProcessedSeqs, src, - srcSize + srcSize, + zc->appliedParams.searchForExternalRepcodes ); + if (ERR_isError(err_code)) + { + return err_code; + } } + + ms->ldmSeqStore = null; + return (nuint)ZSTD_BuildSeqStore_e.ZSTDbss_compress; } - } - else - { - ZSTD_BlockCompressor_f blockCompressor = ZSTD_selectBlockCompressor( - zc->appliedParams.cParams.strategy, - zc->appliedParams.useRowMatchFinder, - dictMode - ); - ms->ldmSeqStore = null; - lastLLSize = blockCompressor( - ms, - &zc->seqStore, - zc->blockState.nextCBlock->rep, - src, - srcSize - ); - } - { - byte* lastLiterals = (byte*)src + srcSize - lastLLSize; - ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize); + if (zc->appliedParams.enableMatchFinderFallback == 0) + { + return nbPostProcessedSeqs; + } + + { + ZSTD_BlockCompressor_f blockCompressor = ZSTD_selectBlockCompressor( + zc->appliedParams.cParams.strategy, + zc->appliedParams.useRowMatchFinder, + dictMode + ); + ms->ldmSeqStore = null; + lastLLSize = blockCompressor( + ms, + &zc->seqStore, + zc->blockState.nextCBlock->rep, + src, + srcSize + ); + } } } + else + { + ZSTD_BlockCompressor_f blockCompressor = ZSTD_selectBlockCompressor( + zc->appliedParams.cParams.strategy, + zc->appliedParams.useRowMatchFinder, + dictMode + ); + ms->ldmSeqStore = null; + lastLLSize = blockCompressor( + ms, + &zc->seqStore, + zc->blockState.nextCBlock->rep, + src, + srcSize + ); + } - ZSTD_validateSeqStore(&zc->seqStore, &zc->appliedParams.cParams); - return (nuint)ZSTD_BuildSeqStore_e.ZSTDbss_compress; + { + byte* lastLiterals = (byte*)src + srcSize - lastLLSize; + ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize); + } } - private static nuint ZSTD_copyBlockSequences( - SeqCollector* seqCollector, - SeqStore_t* seqStore, - uint* prevRepcodes - ) + ZSTD_validateSeqStore(&zc->seqStore, &zc->appliedParams.cParams); + return (nuint)ZSTD_BuildSeqStore_e.ZSTDbss_compress; + } + + private static nuint ZSTD_copyBlockSequences( + SeqCollector* seqCollector, + SeqStore_t* seqStore, + uint* prevRepcodes + ) + { + SeqDef_s* inSeqs = seqStore->sequencesStart; + nuint nbInSequences = (nuint)(seqStore->sequences - inSeqs); + nuint nbInLiterals = (nuint)(seqStore->lit - seqStore->litStart); + ZSTD_Sequence* outSeqs = + seqCollector->seqIndex == 0 + ? seqCollector->seqStart + : seqCollector->seqStart + seqCollector->seqIndex; + nuint nbOutSequences = nbInSequences + 1; + nuint nbOutLiterals = 0; + repcodes_s repcodes; + nuint i; + assert(seqCollector->seqIndex <= seqCollector->maxSequences); + if (nbOutSequences > seqCollector->maxSequences - seqCollector->seqIndex) { - SeqDef_s* inSeqs = seqStore->sequencesStart; - nuint nbInSequences = (nuint)(seqStore->sequences - inSeqs); - nuint nbInLiterals = (nuint)(seqStore->lit - seqStore->litStart); - ZSTD_Sequence* outSeqs = - seqCollector->seqIndex == 0 - ? seqCollector->seqStart - : seqCollector->seqStart + seqCollector->seqIndex; - nuint nbOutSequences = nbInSequences + 1; - nuint nbOutLiterals = 0; - repcodes_s repcodes; - nuint i; - assert(seqCollector->seqIndex <= seqCollector->maxSequences); - if (nbOutSequences > seqCollector->maxSequences - seqCollector->seqIndex) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } - memcpy(&repcodes, prevRepcodes, (uint)sizeof(repcodes_s)); - for (i = 0; i < nbInSequences; ++i) + memcpy(&repcodes, prevRepcodes, (uint)sizeof(repcodes_s)); + for (i = 0; i < nbInSequences; ++i) + { + uint rawOffset; + outSeqs[i].litLength = inSeqs[i].litLength; + outSeqs[i].matchLength = (uint)(inSeqs[i].mlBase + 3); + outSeqs[i].rep = 0; + if (i == seqStore->longLengthPos) { - uint rawOffset; - outSeqs[i].litLength = inSeqs[i].litLength; - outSeqs[i].matchLength = (uint)(inSeqs[i].mlBase + 3); - outSeqs[i].rep = 0; - if (i == seqStore->longLengthPos) + if (seqStore->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_literalLength) { - if (seqStore->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_literalLength) - { - outSeqs[i].litLength += 0x10000; - } - else if (seqStore->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_matchLength) - { - outSeqs[i].matchLength += 0x10000; - } + outSeqs[i].litLength += 0x10000; } - - if (1 <= inSeqs[i].offBase && inSeqs[i].offBase <= 3) + else if (seqStore->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_matchLength) { - assert(1 <= inSeqs[i].offBase && inSeqs[i].offBase <= 3); - uint repcode = inSeqs[i].offBase; - assert(repcode > 0); - outSeqs[i].rep = repcode; - if (outSeqs[i].litLength != 0) + outSeqs[i].matchLength += 0x10000; + } + } + + if (1 <= inSeqs[i].offBase && inSeqs[i].offBase <= 3) + { + assert(1 <= inSeqs[i].offBase && inSeqs[i].offBase <= 3); + uint repcode = inSeqs[i].offBase; + assert(repcode > 0); + outSeqs[i].rep = repcode; + if (outSeqs[i].litLength != 0) + { + rawOffset = repcodes.rep[repcode - 1]; + } + else + { + if (repcode == 3) { - rawOffset = repcodes.rep[repcode - 1]; + assert(repcodes.rep[0] > 1); + rawOffset = repcodes.rep[0] - 1; } else { - if (repcode == 3) - { - assert(repcodes.rep[0] > 1); - rawOffset = repcodes.rep[0] - 1; - } - else - { - rawOffset = repcodes.rep[repcode]; - } + rawOffset = repcodes.rep[repcode]; } } - else - { - assert(inSeqs[i].offBase > 3); - rawOffset = inSeqs[i].offBase - 3; - } - - outSeqs[i].offset = rawOffset; - ZSTD_updateRep(repcodes.rep, inSeqs[i].offBase, inSeqs[i].litLength == 0 ? 1U : 0U); - nbOutLiterals += outSeqs[i].litLength; } - - assert(nbInLiterals >= nbOutLiterals); + else { - nuint lastLLSize = nbInLiterals - nbOutLiterals; - outSeqs[nbInSequences].litLength = (uint)lastLLSize; - outSeqs[nbInSequences].matchLength = 0; - outSeqs[nbInSequences].offset = 0; - assert(nbOutSequences == nbInSequences + 1); + assert(inSeqs[i].offBase > 3); + rawOffset = inSeqs[i].offBase - 3; } - seqCollector->seqIndex += nbOutSequences; - assert(seqCollector->seqIndex <= seqCollector->maxSequences); - return 0; + outSeqs[i].offset = rawOffset; + ZSTD_updateRep(repcodes.rep, inSeqs[i].offBase, inSeqs[i].litLength == 0 ? 1U : 0U); + nbOutLiterals += outSeqs[i].litLength; } - /*! ZSTD_sequenceBound() : - * `srcSize` : size of the input buffer - * @return : upper-bound for the number of sequences that can be generated - * from a buffer of srcSize bytes - * - * note : returns number of sequences - to get bytes, multiply by sizeof(ZSTD_Sequence). - */ - public static nuint ZSTD_sequenceBound(nuint srcSize) + assert(nbInLiterals >= nbOutLiterals); { - nuint maxNbSeq = srcSize / 3 + 1; - nuint maxNbDelims = srcSize / (1 << 10) + 1; - return maxNbSeq + maxNbDelims; + nuint lastLLSize = nbInLiterals - nbOutLiterals; + outSeqs[nbInSequences].litLength = (uint)lastLLSize; + outSeqs[nbInSequences].matchLength = 0; + outSeqs[nbInSequences].offset = 0; + assert(nbOutSequences == nbInSequences + 1); } - /*! ZSTD_generateSequences() : - * WARNING: This function is meant for debugging and informational purposes ONLY! - * Its implementation is flawed, and it will be deleted in a future version. - * It is not guaranteed to succeed, as there are several cases where it will give - * up and fail. You should NOT use this function in production code. - * - * This function is deprecated, and will be removed in a future version. - * - * Generate sequences using ZSTD_compress2(), given a source buffer. - * - * @param zc The compression context to be used for ZSTD_compress2(). Set any - * compression parameters you need on this context. - * @param outSeqs The output sequences buffer of size @p outSeqsSize - * @param outSeqsCapacity The size of the output sequences buffer. - * ZSTD_sequenceBound(srcSize) is an upper bound on the number - * of sequences that can be generated. - * @param src The source buffer to generate sequences from of size @p srcSize. - * @param srcSize The size of the source buffer. - * - * Each block will end with a dummy sequence - * with offset == 0, matchLength == 0, and litLength == length of last literals. - * litLength may be == 0, and if so, then the sequence of (of: 0 ml: 0 ll: 0) - * simply acts as a block delimiter. - * - * @returns The number of sequences generated, necessarily less than - * ZSTD_sequenceBound(srcSize), or an error code that can be checked - * with ZSTD_isError(). - */ - public static nuint ZSTD_generateSequences( - ZSTD_CCtx_s* zc, - ZSTD_Sequence* outSeqs, - nuint outSeqsSize, - void* src, - nuint srcSize - ) - { - nuint dstCapacity = ZSTD_compressBound(srcSize); - /* Make C90 happy. */ - void* dst; - SeqCollector seqCollector; - { - int targetCBlockSize; - { - nuint err_code = ZSTD_CCtx_getParameter( - zc, - ZSTD_cParameter.ZSTD_c_targetCBlockSize, - &targetCBlockSize - ); - if (ERR_isError(err_code)) - { - return err_code; - } - } + seqCollector->seqIndex += nbOutSequences; + assert(seqCollector->seqIndex <= seqCollector->maxSequences); + return 0; + } - if (targetCBlockSize != 0) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported) - ); - } - } + /*! ZSTD_sequenceBound() : + * `srcSize` : size of the input buffer + * @return : upper-bound for the number of sequences that can be generated + * from a buffer of srcSize bytes + * + * note : returns number of sequences - to get bytes, multiply by sizeof(ZSTD_Sequence). + */ + public static nuint ZSTD_sequenceBound(nuint srcSize) + { + nuint maxNbSeq = srcSize / 3 + 1; + nuint maxNbDelims = srcSize / (1 << 10) + 1; + return maxNbSeq + maxNbDelims; + } + /*! ZSTD_generateSequences() : + * WARNING: This function is meant for debugging and informational purposes ONLY! + * Its implementation is flawed, and it will be deleted in a future version. + * It is not guaranteed to succeed, as there are several cases where it will give + * up and fail. You should NOT use this function in production code. + * + * This function is deprecated, and will be removed in a future version. + * + * Generate sequences using ZSTD_compress2(), given a source buffer. + * + * @param zc The compression context to be used for ZSTD_compress2(). Set any + * compression parameters you need on this context. + * @param outSeqs The output sequences buffer of size @p outSeqsSize + * @param outSeqsCapacity The size of the output sequences buffer. + * ZSTD_sequenceBound(srcSize) is an upper bound on the number + * of sequences that can be generated. + * @param src The source buffer to generate sequences from of size @p srcSize. + * @param srcSize The size of the source buffer. + * + * Each block will end with a dummy sequence + * with offset == 0, matchLength == 0, and litLength == length of last literals. + * litLength may be == 0, and if so, then the sequence of (of: 0 ml: 0 ll: 0) + * simply acts as a block delimiter. + * + * @returns The number of sequences generated, necessarily less than + * ZSTD_sequenceBound(srcSize), or an error code that can be checked + * with ZSTD_isError(). + */ + public static nuint ZSTD_generateSequences( + ZSTD_CCtx_s* zc, + ZSTD_Sequence* outSeqs, + nuint outSeqsSize, + void* src, + nuint srcSize + ) + { + nuint dstCapacity = ZSTD_compressBound(srcSize); + /* Make C90 happy. */ + void* dst; + SeqCollector seqCollector; + { + int targetCBlockSize; { - int nbWorkers; - { - nuint err_code = ZSTD_CCtx_getParameter( - zc, - ZSTD_cParameter.ZSTD_c_nbWorkers, - &nbWorkers - ); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - if (nbWorkers != 0) + nuint err_code = ZSTD_CCtx_getParameter( + zc, + ZSTD_cParameter.ZSTD_c_targetCBlockSize, + &targetCBlockSize + ); + if (ERR_isError(err_code)) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported) - ); + return err_code; } } - dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem); - if (dst == null) + if (targetCBlockSize != 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported) + ); } + } - seqCollector.collectSequences = 1; - seqCollector.seqStart = outSeqs; - seqCollector.seqIndex = 0; - seqCollector.maxSequences = outSeqsSize; - zc->seqCollector = seqCollector; + { + int nbWorkers; { - nuint ret = ZSTD_compress2(zc, dst, dstCapacity, src, srcSize); - ZSTD_customFree(dst, ZSTD_defaultCMem); + nuint err_code = ZSTD_CCtx_getParameter( + zc, + ZSTD_cParameter.ZSTD_c_nbWorkers, + &nbWorkers + ); + if (ERR_isError(err_code)) { - nuint err_code = ret; - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } } - assert(zc->seqCollector.seqIndex <= ZSTD_sequenceBound(srcSize)); - return zc->seqCollector.seqIndex; + if (nbWorkers != 0) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported) + ); + } + } + + dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem); + if (dst == null) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); } - /*! ZSTD_mergeBlockDelimiters() : - * Given an array of ZSTD_Sequence, remove all sequences that represent block delimiters/last literals - * by merging them into the literals of the next sequence. - * - * As such, the final generated result has no explicit representation of block boundaries, - * and the final last literals segment is not represented in the sequences. - * - * The output of this function can be fed into ZSTD_compressSequences() with CCtx - * setting of ZSTD_c_blockDelimiters as ZSTD_sf_noBlockDelimiters - * @return : number of sequences left after merging - */ - public static nuint ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, nuint seqsSize) + seqCollector.collectSequences = 1; + seqCollector.seqStart = outSeqs; + seqCollector.seqIndex = 0; + seqCollector.maxSequences = outSeqsSize; + zc->seqCollector = seqCollector; { - nuint @in = 0; - nuint @out = 0; - for (; @in < seqsSize; ++@in) + nuint ret = ZSTD_compress2(zc, dst, dstCapacity, src, srcSize); + ZSTD_customFree(dst, ZSTD_defaultCMem); { - if (sequences[@in].offset == 0 && sequences[@in].matchLength == 0) - { - if (@in != seqsSize - 1) - { - sequences[@in + 1].litLength += sequences[@in].litLength; - } - } - else + nuint err_code = ret; + if (ERR_isError(err_code)) { - sequences[@out] = sequences[@in]; - ++@out; + return err_code; } } - - return @out; } - /* Unrolled loop to read four size_ts of input at a time. Returns 1 if is RLE, 0 if not. */ - private static int ZSTD_isRLE(byte* src, nuint length) - { - byte* ip = src; - byte value = ip[0]; - nuint valueST = (nuint)(value * 0x0101010101010101UL); - nuint unrollSize = (nuint)(sizeof(nuint) * 4); - nuint unrollMask = unrollSize - 1; - nuint prefixLength = length & unrollMask; - nuint i; - if (length == 1) - return 1; - if (prefixLength != 0 && ZSTD_count(ip + 1, ip, ip + prefixLength) != prefixLength - 1) - { - return 0; - } + assert(zc->seqCollector.seqIndex <= ZSTD_sequenceBound(srcSize)); + return zc->seqCollector.seqIndex; + } - for (i = prefixLength; i != length; i += unrollSize) + /*! ZSTD_mergeBlockDelimiters() : + * Given an array of ZSTD_Sequence, remove all sequences that represent block delimiters/last literals + * by merging them into the literals of the next sequence. + * + * As such, the final generated result has no explicit representation of block boundaries, + * and the final last literals segment is not represented in the sequences. + * + * The output of this function can be fed into ZSTD_compressSequences() with CCtx + * setting of ZSTD_c_blockDelimiters as ZSTD_sf_noBlockDelimiters + * @return : number of sequences left after merging + */ + public static nuint ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, nuint seqsSize) + { + nuint @in = 0; + nuint @out = 0; + for (; @in < seqsSize; ++@in) + { + if (sequences[@in].offset == 0 && sequences[@in].matchLength == 0) { - nuint u; - for (u = 0; u < unrollSize; u += (nuint)sizeof(nuint)) + if (@in != seqsSize - 1) { - if (MEM_readST(ip + i + u) != valueST) - { - return 0; - } + sequences[@in + 1].litLength += sequences[@in].litLength; } } - - return 1; - } - - /* Returns true if the given block may be RLE. - * This is just a heuristic based on the compressibility. - * It may return both false positives and false negatives. - */ - private static int ZSTD_maybeRLE(SeqStore_t* seqStore) - { - nuint nbSeqs = (nuint)(seqStore->sequences - seqStore->sequencesStart); - nuint nbLits = (nuint)(seqStore->lit - seqStore->litStart); - return nbSeqs < 4 && nbLits < 10 ? 1 : 0; + else + { + sequences[@out] = sequences[@in]; + ++@out; + } } - private static void ZSTD_blockState_confirmRepcodesAndEntropyTables(ZSTD_blockState_t* bs) - { - ZSTD_compressedBlockState_t* tmp = bs->prevCBlock; - bs->prevCBlock = bs->nextCBlock; - bs->nextCBlock = tmp; - } + return @out; + } - /* Writes the block header */ - private static void writeBlockHeader(void* op, nuint cSize, nuint blockSize, uint lastBlock) + /* Unrolled loop to read four size_ts of input at a time. Returns 1 if is RLE, 0 if not. */ + private static int ZSTD_isRLE(byte* src, nuint length) + { + byte* ip = src; + byte value = ip[0]; + nuint valueST = (nuint)(value * 0x0101010101010101UL); + nuint unrollSize = (nuint)(sizeof(nuint) * 4); + nuint unrollMask = unrollSize - 1; + nuint prefixLength = length & unrollMask; + nuint i; + if (length == 1) + return 1; + if (prefixLength != 0 && ZSTD_count(ip + 1, ip, ip + prefixLength) != prefixLength - 1) { - uint cBlockHeader = - cSize == 1 - ? lastBlock + ((uint)blockType_e.bt_rle << 1) + (uint)(blockSize << 3) - : lastBlock + ((uint)blockType_e.bt_compressed << 1) + (uint)(cSize << 3); - MEM_writeLE24(op, cBlockHeader); + return 0; } - /** ZSTD_buildBlockEntropyStats_literals() : - * Builds entropy for the literals. - * Stores literals block type (raw, rle, compressed, repeat) and - * huffman description table to hufMetadata. - * Requires ENTROPY_WORKSPACE_SIZE workspace - * @return : size of huffman description table, or an error code - */ - private static nuint ZSTD_buildBlockEntropyStats_literals( - void* src, - nuint srcSize, - ZSTD_hufCTables_t* prevHuf, - ZSTD_hufCTables_t* nextHuf, - ZSTD_hufCTablesMetadata_t* hufMetadata, - int literalsCompressionIsDisabled, - void* workspace, - nuint wkspSize, - int hufFlags - ) + for (i = prefixLength; i != length; i += unrollSize) { - byte* wkspStart = (byte*)workspace; - byte* wkspEnd = wkspStart + wkspSize; - byte* countWkspStart = wkspStart; - uint* countWksp = (uint*)workspace; - const nuint countWkspSize = (255 + 1) * sizeof(uint); - byte* nodeWksp = countWkspStart + countWkspSize; - nuint nodeWkspSize = (nuint)(wkspEnd - nodeWksp); - uint maxSymbolValue = 255; - uint huffLog = 11; - HUF_repeat repeat = prevHuf->repeatMode; - memcpy(nextHuf, prevHuf, (uint)sizeof(ZSTD_hufCTables_t)); - if (literalsCompressionIsDisabled != 0) - { - hufMetadata->hType = SymbolEncodingType_e.set_basic; - return 0; - } - + nuint u; + for (u = 0; u < unrollSize; u += (nuint)sizeof(nuint)) { - nuint minLitSize = (nuint)( - prevHuf->repeatMode == HUF_repeat.HUF_repeat_valid ? 6 : 63 - ); - if (srcSize <= minLitSize) + if (MEM_readST(ip + i + u) != valueST) { - hufMetadata->hType = SymbolEncodingType_e.set_basic; return 0; } } + } - { - nuint largest = HIST_count_wksp( - countWksp, - &maxSymbolValue, - (byte*)src, - srcSize, - workspace, - wkspSize - ); - { - nuint err_code = largest; - if (ERR_isError(err_code)) - { - return err_code; - } - } + return 1; + } - if (largest == srcSize) - { - hufMetadata->hType = SymbolEncodingType_e.set_rle; - return 0; - } + /* Returns true if the given block may be RLE. + * This is just a heuristic based on the compressibility. + * It may return both false positives and false negatives. + */ + private static int ZSTD_maybeRLE(SeqStore_t* seqStore) + { + nuint nbSeqs = (nuint)(seqStore->sequences - seqStore->sequencesStart); + nuint nbLits = (nuint)(seqStore->lit - seqStore->litStart); + return nbSeqs < 4 && nbLits < 10 ? 1 : 0; + } - if (largest <= (srcSize >> 7) + 4) - { - hufMetadata->hType = SymbolEncodingType_e.set_basic; - return 0; - } - } + private static void ZSTD_blockState_confirmRepcodesAndEntropyTables(ZSTD_blockState_t* bs) + { + ZSTD_compressedBlockState_t* tmp = bs->prevCBlock; + bs->prevCBlock = bs->nextCBlock; + bs->nextCBlock = tmp; + } - if ( - repeat == HUF_repeat.HUF_repeat_check - && HUF_validateCTable(&prevHuf->CTable.e0, countWksp, maxSymbolValue) == 0 - ) + /* Writes the block header */ + private static void writeBlockHeader(void* op, nuint cSize, nuint blockSize, uint lastBlock) + { + uint cBlockHeader = + cSize == 1 + ? lastBlock + ((uint)blockType_e.bt_rle << 1) + (uint)(blockSize << 3) + : lastBlock + ((uint)blockType_e.bt_compressed << 1) + (uint)(cSize << 3); + MEM_writeLE24(op, cBlockHeader); + } + + /** ZSTD_buildBlockEntropyStats_literals() : + * Builds entropy for the literals. + * Stores literals block type (raw, rle, compressed, repeat) and + * huffman description table to hufMetadata. + * Requires ENTROPY_WORKSPACE_SIZE workspace + * @return : size of huffman description table, or an error code + */ + private static nuint ZSTD_buildBlockEntropyStats_literals( + void* src, + nuint srcSize, + ZSTD_hufCTables_t* prevHuf, + ZSTD_hufCTables_t* nextHuf, + ZSTD_hufCTablesMetadata_t* hufMetadata, + int literalsCompressionIsDisabled, + void* workspace, + nuint wkspSize, + int hufFlags + ) + { + byte* wkspStart = (byte*)workspace; + byte* wkspEnd = wkspStart + wkspSize; + byte* countWkspStart = wkspStart; + uint* countWksp = (uint*)workspace; + const nuint countWkspSize = (255 + 1) * sizeof(uint); + byte* nodeWksp = countWkspStart + countWkspSize; + nuint nodeWkspSize = (nuint)(wkspEnd - nodeWksp); + uint maxSymbolValue = 255; + uint huffLog = 11; + HUF_repeat repeat = prevHuf->repeatMode; + memcpy(nextHuf, prevHuf, (uint)sizeof(ZSTD_hufCTables_t)); + if (literalsCompressionIsDisabled != 0) + { + hufMetadata->hType = SymbolEncodingType_e.set_basic; + return 0; + } + + { + nuint minLitSize = (nuint)( + prevHuf->repeatMode == HUF_repeat.HUF_repeat_valid ? 6 : 63 + ); + if (srcSize <= minLitSize) { - repeat = HUF_repeat.HUF_repeat_none; + hufMetadata->hType = SymbolEncodingType_e.set_basic; + return 0; } + } - memset(&nextHuf->CTable.e0, 0, sizeof(ulong) * 257); - huffLog = HUF_optimalTableLog( - huffLog, - srcSize, - maxSymbolValue, - nodeWksp, - nodeWkspSize, - &nextHuf->CTable.e0, + { + nuint largest = HIST_count_wksp( countWksp, - hufFlags + &maxSymbolValue, + (byte*)src, + srcSize, + workspace, + wkspSize ); - assert(huffLog <= 11); { - nuint maxBits = HUF_buildCTable_wksp( - &nextHuf->CTable.e0, - countWksp, - maxSymbolValue, - huffLog, - nodeWksp, - nodeWkspSize - ); + nuint err_code = largest; + if (ERR_isError(err_code)) { - nuint err_code = maxBits; - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } - - huffLog = (uint)maxBits; } + if (largest == srcSize) { - nuint newCSize = HUF_estimateCompressedSize( - &nextHuf->CTable.e0, - countWksp, - maxSymbolValue - ); - nuint hSize = HUF_writeCTable_wksp( - hufMetadata->hufDesBuffer, - sizeof(byte) * 128, - &nextHuf->CTable.e0, - maxSymbolValue, - huffLog, - nodeWksp, - nodeWkspSize - ); - if (repeat != HUF_repeat.HUF_repeat_none) - { - nuint oldCSize = HUF_estimateCompressedSize( - &prevHuf->CTable.e0, - countWksp, - maxSymbolValue - ); - if ( - oldCSize < srcSize - && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize) - ) - { - memcpy(nextHuf, prevHuf, (uint)sizeof(ZSTD_hufCTables_t)); - hufMetadata->hType = SymbolEncodingType_e.set_repeat; - return 0; - } - } - - if (newCSize + hSize >= srcSize) - { - memcpy(nextHuf, prevHuf, (uint)sizeof(ZSTD_hufCTables_t)); - hufMetadata->hType = SymbolEncodingType_e.set_basic; - return 0; - } + hufMetadata->hType = SymbolEncodingType_e.set_rle; + return 0; + } - hufMetadata->hType = SymbolEncodingType_e.set_compressed; - nextHuf->repeatMode = HUF_repeat.HUF_repeat_check; - return hSize; + if (largest <= (srcSize >> 7) + 4) + { + hufMetadata->hType = SymbolEncodingType_e.set_basic; + return 0; } } - /* ZSTD_buildDummySequencesStatistics(): - * Returns a ZSTD_symbolEncodingTypeStats_t with all encoding types as set_basic, - * and updates nextEntropy to the appropriate repeatMode. - */ - private static ZSTD_symbolEncodingTypeStats_t ZSTD_buildDummySequencesStatistics( - ZSTD_fseCTables_t* nextEntropy + if ( + repeat == HUF_repeat.HUF_repeat_check + && HUF_validateCTable(&prevHuf->CTable.e0, countWksp, maxSymbolValue) == 0 ) { - ZSTD_symbolEncodingTypeStats_t stats = new ZSTD_symbolEncodingTypeStats_t - { - LLtype = (uint)SymbolEncodingType_e.set_basic, - Offtype = (uint)SymbolEncodingType_e.set_basic, - MLtype = (uint)SymbolEncodingType_e.set_basic, - size = 0, - lastCountSize = 0, - longOffsets = 0, - }; - nextEntropy->litlength_repeatMode = FSE_repeat.FSE_repeat_none; - nextEntropy->offcode_repeatMode = FSE_repeat.FSE_repeat_none; - nextEntropy->matchlength_repeatMode = FSE_repeat.FSE_repeat_none; - return stats; - } - - /** ZSTD_buildBlockEntropyStats_sequences() : - * Builds entropy for the sequences. - * Stores symbol compression modes and fse table to fseMetadata. - * Requires ENTROPY_WORKSPACE_SIZE wksp. - * @return : size of fse tables or error code */ - private static nuint ZSTD_buildBlockEntropyStats_sequences( - SeqStore_t* seqStorePtr, - ZSTD_fseCTables_t* prevEntropy, - ZSTD_fseCTables_t* nextEntropy, - ZSTD_CCtx_params_s* cctxParams, - ZSTD_fseCTablesMetadata_t* fseMetadata, - void* workspace, - nuint wkspSize - ) + repeat = HUF_repeat.HUF_repeat_none; + } + + memset(&nextHuf->CTable.e0, 0, sizeof(ulong) * 257); + huffLog = HUF_optimalTableLog( + huffLog, + srcSize, + maxSymbolValue, + nodeWksp, + nodeWkspSize, + &nextHuf->CTable.e0, + countWksp, + hufFlags + ); + assert(huffLog <= 11); { - ZSTD_strategy strategy = cctxParams->cParams.strategy; - nuint nbSeq = (nuint)(seqStorePtr->sequences - seqStorePtr->sequencesStart); - byte* ostart = fseMetadata->fseTablesBuffer; - byte* oend = ostart + sizeof(byte) * 133; - byte* op = ostart; - uint* countWorkspace = (uint*)workspace; - uint* entropyWorkspace = countWorkspace + (52 + 1); - nuint entropyWorkspaceSize = wkspSize - (52 + 1) * sizeof(uint); - ZSTD_symbolEncodingTypeStats_t stats; - stats = - nbSeq != 0 - ? ZSTD_buildSequencesStatistics( - seqStorePtr, - nbSeq, - prevEntropy, - nextEntropy, - op, - oend, - strategy, - countWorkspace, - entropyWorkspace, - entropyWorkspaceSize - ) - : ZSTD_buildDummySequencesStatistics(nextEntropy); + nuint maxBits = HUF_buildCTable_wksp( + &nextHuf->CTable.e0, + countWksp, + maxSymbolValue, + huffLog, + nodeWksp, + nodeWkspSize + ); { - nuint err_code = stats.size; + nuint err_code = maxBits; if (ERR_isError(err_code)) { return err_code; } } - fseMetadata->llType = (SymbolEncodingType_e)stats.LLtype; - fseMetadata->ofType = (SymbolEncodingType_e)stats.Offtype; - fseMetadata->mlType = (SymbolEncodingType_e)stats.MLtype; - fseMetadata->lastCountSize = stats.lastCountSize; - return stats.size; + huffLog = (uint)maxBits; } - /** ZSTD_buildBlockEntropyStats() : - * Builds entropy for the block. - * Requires workspace size ENTROPY_WORKSPACE_SIZE - * @return : 0 on success, or an error code - * Note : also employed in superblock - */ - private static nuint ZSTD_buildBlockEntropyStats( - SeqStore_t* seqStorePtr, - ZSTD_entropyCTables_t* prevEntropy, - ZSTD_entropyCTables_t* nextEntropy, - ZSTD_CCtx_params_s* cctxParams, - ZSTD_entropyCTablesMetadata_t* entropyMetadata, - void* workspace, - nuint wkspSize - ) { - nuint litSize = (nuint)(seqStorePtr->lit - seqStorePtr->litStart); - int huf_useOptDepth = - cctxParams->cParams.strategy >= ZSTD_strategy.ZSTD_btultra ? 1 : 0; - int hufFlags = huf_useOptDepth != 0 ? (int)HUF_flags_e.HUF_flags_optimalDepth : 0; - entropyMetadata->hufMetadata.hufDesSize = ZSTD_buildBlockEntropyStats_literals( - seqStorePtr->litStart, - litSize, - &prevEntropy->huf, - &nextEntropy->huf, - &entropyMetadata->hufMetadata, - ZSTD_literalsCompressionIsDisabled(cctxParams), - workspace, - wkspSize, - hufFlags + nuint newCSize = HUF_estimateCompressedSize( + &nextHuf->CTable.e0, + countWksp, + maxSymbolValue + ); + nuint hSize = HUF_writeCTable_wksp( + hufMetadata->hufDesBuffer, + sizeof(byte) * 128, + &nextHuf->CTable.e0, + maxSymbolValue, + huffLog, + nodeWksp, + nodeWkspSize ); + if (repeat != HUF_repeat.HUF_repeat_none) { - nuint err_code = entropyMetadata->hufMetadata.hufDesSize; - if (ERR_isError(err_code)) + nuint oldCSize = HUF_estimateCompressedSize( + &prevHuf->CTable.e0, + countWksp, + maxSymbolValue + ); + if ( + oldCSize < srcSize + && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize) + ) { - return err_code; + memcpy(nextHuf, prevHuf, (uint)sizeof(ZSTD_hufCTables_t)); + hufMetadata->hType = SymbolEncodingType_e.set_repeat; + return 0; } } - entropyMetadata->fseMetadata.fseTablesSize = ZSTD_buildBlockEntropyStats_sequences( - seqStorePtr, - &prevEntropy->fse, - &nextEntropy->fse, - cctxParams, - &entropyMetadata->fseMetadata, - workspace, - wkspSize - ); + if (newCSize + hSize >= srcSize) { - nuint err_code = entropyMetadata->fseMetadata.fseTablesSize; - if (ERR_isError(err_code)) - { - return err_code; - } + memcpy(nextHuf, prevHuf, (uint)sizeof(ZSTD_hufCTables_t)); + hufMetadata->hType = SymbolEncodingType_e.set_basic; + return 0; } - return 0; + hufMetadata->hType = SymbolEncodingType_e.set_compressed; + nextHuf->repeatMode = HUF_repeat.HUF_repeat_check; + return hSize; } + } - /* Returns the size estimate for the literals section (header + content) of a block */ - private static nuint ZSTD_estimateBlockSize_literal( - byte* literals, - nuint litSize, - ZSTD_hufCTables_t* huf, - ZSTD_hufCTablesMetadata_t* hufMetadata, - void* workspace, - nuint wkspSize, - int writeEntropy - ) + /* ZSTD_buildDummySequencesStatistics(): + * Returns a ZSTD_symbolEncodingTypeStats_t with all encoding types as set_basic, + * and updates nextEntropy to the appropriate repeatMode. + */ + private static ZSTD_symbolEncodingTypeStats_t ZSTD_buildDummySequencesStatistics( + ZSTD_fseCTables_t* nextEntropy + ) + { + ZSTD_symbolEncodingTypeStats_t stats = new ZSTD_symbolEncodingTypeStats_t + { + LLtype = (uint)SymbolEncodingType_e.set_basic, + Offtype = (uint)SymbolEncodingType_e.set_basic, + MLtype = (uint)SymbolEncodingType_e.set_basic, + size = 0, + lastCountSize = 0, + longOffsets = 0, + }; + nextEntropy->litlength_repeatMode = FSE_repeat.FSE_repeat_none; + nextEntropy->offcode_repeatMode = FSE_repeat.FSE_repeat_none; + nextEntropy->matchlength_repeatMode = FSE_repeat.FSE_repeat_none; + return stats; + } + + /** ZSTD_buildBlockEntropyStats_sequences() : + * Builds entropy for the sequences. + * Stores symbol compression modes and fse table to fseMetadata. + * Requires ENTROPY_WORKSPACE_SIZE wksp. + * @return : size of fse tables or error code */ + private static nuint ZSTD_buildBlockEntropyStats_sequences( + SeqStore_t* seqStorePtr, + ZSTD_fseCTables_t* prevEntropy, + ZSTD_fseCTables_t* nextEntropy, + ZSTD_CCtx_params_s* cctxParams, + ZSTD_fseCTablesMetadata_t* fseMetadata, + void* workspace, + nuint wkspSize + ) + { + ZSTD_strategy strategy = cctxParams->cParams.strategy; + nuint nbSeq = (nuint)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + byte* ostart = fseMetadata->fseTablesBuffer; + byte* oend = ostart + sizeof(byte) * 133; + byte* op = ostart; + uint* countWorkspace = (uint*)workspace; + uint* entropyWorkspace = countWorkspace + (52 + 1); + nuint entropyWorkspaceSize = wkspSize - (52 + 1) * sizeof(uint); + ZSTD_symbolEncodingTypeStats_t stats; + stats = + nbSeq != 0 + ? ZSTD_buildSequencesStatistics( + seqStorePtr, + nbSeq, + prevEntropy, + nextEntropy, + op, + oend, + strategy, + countWorkspace, + entropyWorkspace, + entropyWorkspaceSize + ) + : ZSTD_buildDummySequencesStatistics(nextEntropy); { - uint* countWksp = (uint*)workspace; - uint maxSymbolValue = 255; - nuint literalSectionHeaderSize = (nuint)( - 3 + (litSize >= 1 * (1 << 10) ? 1 : 0) + (litSize >= 16 * (1 << 10) ? 1 : 0) - ); - uint singleStream = litSize < 256 ? 1U : 0U; - if (hufMetadata->hType == SymbolEncodingType_e.set_basic) - return litSize; - else if (hufMetadata->hType == SymbolEncodingType_e.set_rle) - return 1; - else if ( - hufMetadata->hType == SymbolEncodingType_e.set_compressed - || hufMetadata->hType == SymbolEncodingType_e.set_repeat - ) + nuint err_code = stats.size; + if (ERR_isError(err_code)) { - nuint largest = HIST_count_wksp( - countWksp, - &maxSymbolValue, - literals, - litSize, - workspace, - wkspSize - ); - if (ERR_isError(largest)) - return litSize; - { - nuint cLitSizeEstimate = HUF_estimateCompressedSize( - &huf->CTable.e0, - countWksp, - maxSymbolValue - ); - if (writeEntropy != 0) - cLitSizeEstimate += hufMetadata->hufDesSize; - if (singleStream == 0) - cLitSizeEstimate += 6; - return cLitSizeEstimate + literalSectionHeaderSize; - } + return err_code; } - - assert(0 != 0); - return 0; } - /* Returns the size estimate for the FSE-compressed symbols (of, ml, ll) of a block */ - private static nuint ZSTD_estimateBlockSize_symbolType( - SymbolEncodingType_e type, - byte* codeTable, - nuint nbSeq, - uint maxCode, - uint* fseCTable, - byte* additionalBits, - short* defaultNorm, - uint defaultNormLog, - uint defaultMax, - void* workspace, - nuint wkspSize - ) - { - uint* countWksp = (uint*)workspace; - byte* ctp = codeTable; - byte* ctStart = ctp; - byte* ctEnd = ctStart + nbSeq; - nuint cSymbolTypeSizeEstimateInBits = 0; - uint max = maxCode; - HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); - if (type == SymbolEncodingType_e.set_basic) - { - assert(max <= defaultMax); - cSymbolTypeSizeEstimateInBits = ZSTD_crossEntropyCost( - defaultNorm, - defaultNormLog, - countWksp, - max - ); - } - else if (type == SymbolEncodingType_e.set_rle) - { - cSymbolTypeSizeEstimateInBits = 0; - } - else if ( - type == SymbolEncodingType_e.set_compressed - || type == SymbolEncodingType_e.set_repeat - ) - { - cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max); - } + fseMetadata->llType = (SymbolEncodingType_e)stats.LLtype; + fseMetadata->ofType = (SymbolEncodingType_e)stats.Offtype; + fseMetadata->mlType = (SymbolEncodingType_e)stats.MLtype; + fseMetadata->lastCountSize = stats.lastCountSize; + return stats.size; + } - if (ERR_isError(cSymbolTypeSizeEstimateInBits)) + /** ZSTD_buildBlockEntropyStats() : + * Builds entropy for the block. + * Requires workspace size ENTROPY_WORKSPACE_SIZE + * @return : 0 on success, or an error code + * Note : also employed in superblock + */ + private static nuint ZSTD_buildBlockEntropyStats( + SeqStore_t* seqStorePtr, + ZSTD_entropyCTables_t* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + ZSTD_CCtx_params_s* cctxParams, + ZSTD_entropyCTablesMetadata_t* entropyMetadata, + void* workspace, + nuint wkspSize + ) + { + nuint litSize = (nuint)(seqStorePtr->lit - seqStorePtr->litStart); + int huf_useOptDepth = + cctxParams->cParams.strategy >= ZSTD_strategy.ZSTD_btultra ? 1 : 0; + int hufFlags = huf_useOptDepth != 0 ? (int)HUF_flags_e.HUF_flags_optimalDepth : 0; + entropyMetadata->hufMetadata.hufDesSize = ZSTD_buildBlockEntropyStats_literals( + seqStorePtr->litStart, + litSize, + &prevEntropy->huf, + &nextEntropy->huf, + &entropyMetadata->hufMetadata, + ZSTD_literalsCompressionIsDisabled(cctxParams), + workspace, + wkspSize, + hufFlags + ); + { + nuint err_code = entropyMetadata->hufMetadata.hufDesSize; + if (ERR_isError(err_code)) { - return nbSeq * 10; + return err_code; } + } - while (ctp < ctEnd) + entropyMetadata->fseMetadata.fseTablesSize = ZSTD_buildBlockEntropyStats_sequences( + seqStorePtr, + &prevEntropy->fse, + &nextEntropy->fse, + cctxParams, + &entropyMetadata->fseMetadata, + workspace, + wkspSize + ); + { + nuint err_code = entropyMetadata->fseMetadata.fseTablesSize; + if (ERR_isError(err_code)) { - if (additionalBits != null) - cSymbolTypeSizeEstimateInBits += additionalBits[*ctp]; - else - cSymbolTypeSizeEstimateInBits += *ctp; - ctp++; + return err_code; } - - return cSymbolTypeSizeEstimateInBits >> 3; } - /* Returns the size estimate for the sequences section (header + content) of a block */ - private static nuint ZSTD_estimateBlockSize_sequences( - byte* ofCodeTable, - byte* llCodeTable, - byte* mlCodeTable, - nuint nbSeq, - ZSTD_fseCTables_t* fseTables, - ZSTD_fseCTablesMetadata_t* fseMetadata, - void* workspace, - nuint wkspSize, - int writeEntropy + return 0; + } + + /* Returns the size estimate for the literals section (header + content) of a block */ + private static nuint ZSTD_estimateBlockSize_literal( + byte* literals, + nuint litSize, + ZSTD_hufCTables_t* huf, + ZSTD_hufCTablesMetadata_t* hufMetadata, + void* workspace, + nuint wkspSize, + int writeEntropy + ) + { + uint* countWksp = (uint*)workspace; + uint maxSymbolValue = 255; + nuint literalSectionHeaderSize = (nuint)( + 3 + (litSize >= 1 * (1 << 10) ? 1 : 0) + (litSize >= 16 * (1 << 10) ? 1 : 0) + ); + uint singleStream = litSize < 256 ? 1U : 0U; + if (hufMetadata->hType == SymbolEncodingType_e.set_basic) + return litSize; + else if (hufMetadata->hType == SymbolEncodingType_e.set_rle) + return 1; + else if ( + hufMetadata->hType == SymbolEncodingType_e.set_compressed + || hufMetadata->hType == SymbolEncodingType_e.set_repeat ) { - /* seqHead */ - nuint sequencesSectionHeaderSize = (nuint)( - 1 + 1 + (nbSeq >= 128 ? 1 : 0) + (nbSeq >= 0x7F00 ? 1 : 0) - ); - nuint cSeqSizeEstimate = 0; - cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType( - fseMetadata->ofType, - ofCodeTable, - nbSeq, - 31, - fseTables->offcodeCTable, - null, - OF_defaultNorm, - OF_defaultNormLog, - 28, - workspace, - wkspSize - ); - cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType( - fseMetadata->llType, - llCodeTable, - nbSeq, - 35, - fseTables->litlengthCTable, - LL_bits, - LL_defaultNorm, - LL_defaultNormLog, - 35, + nuint largest = HIST_count_wksp( + countWksp, + &maxSymbolValue, + literals, + litSize, workspace, wkspSize ); - cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType( - fseMetadata->mlType, - mlCodeTable, - nbSeq, - 52, - fseTables->matchlengthCTable, - ML_bits, - ML_defaultNorm, - ML_defaultNormLog, - 52, - workspace, - wkspSize + if (ERR_isError(largest)) + return litSize; + { + nuint cLitSizeEstimate = HUF_estimateCompressedSize( + &huf->CTable.e0, + countWksp, + maxSymbolValue + ); + if (writeEntropy != 0) + cLitSizeEstimate += hufMetadata->hufDesSize; + if (singleStream == 0) + cLitSizeEstimate += 6; + return cLitSizeEstimate + literalSectionHeaderSize; + } + } + + assert(0 != 0); + return 0; + } + + /* Returns the size estimate for the FSE-compressed symbols (of, ml, ll) of a block */ + private static nuint ZSTD_estimateBlockSize_symbolType( + SymbolEncodingType_e type, + byte* codeTable, + nuint nbSeq, + uint maxCode, + uint* fseCTable, + byte* additionalBits, + short* defaultNorm, + uint defaultNormLog, + uint defaultMax, + void* workspace, + nuint wkspSize + ) + { + uint* countWksp = (uint*)workspace; + byte* ctp = codeTable; + byte* ctStart = ctp; + byte* ctEnd = ctStart + nbSeq; + nuint cSymbolTypeSizeEstimateInBits = 0; + uint max = maxCode; + HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); + if (type == SymbolEncodingType_e.set_basic) + { + assert(max <= defaultMax); + cSymbolTypeSizeEstimateInBits = ZSTD_crossEntropyCost( + defaultNorm, + defaultNormLog, + countWksp, + max ); - if (writeEntropy != 0) - cSeqSizeEstimate += fseMetadata->fseTablesSize; - return cSeqSizeEstimate + sequencesSectionHeaderSize; - } - - /* Returns the size estimate for a given stream of literals, of, ll, ml */ - private static nuint ZSTD_estimateBlockSize( - byte* literals, - nuint litSize, - byte* ofCodeTable, - byte* llCodeTable, - byte* mlCodeTable, - nuint nbSeq, - ZSTD_entropyCTables_t* entropy, - ZSTD_entropyCTablesMetadata_t* entropyMetadata, - void* workspace, - nuint wkspSize, - int writeLitEntropy, - int writeSeqEntropy + } + else if (type == SymbolEncodingType_e.set_rle) + { + cSymbolTypeSizeEstimateInBits = 0; + } + else if ( + type == SymbolEncodingType_e.set_compressed + || type == SymbolEncodingType_e.set_repeat ) { - nuint literalsSize = ZSTD_estimateBlockSize_literal( - literals, - litSize, - &entropy->huf, - &entropyMetadata->hufMetadata, - workspace, - wkspSize, - writeLitEntropy - ); - nuint seqSize = ZSTD_estimateBlockSize_sequences( - ofCodeTable, - llCodeTable, - mlCodeTable, - nbSeq, - &entropy->fse, - &entropyMetadata->fseMetadata, - workspace, - wkspSize, - writeSeqEntropy - ); - return seqSize + literalsSize + ZSTD_blockHeaderSize; + cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max); } - /* Builds entropy statistics and uses them for blocksize estimation. - * - * @return: estimated compressed size of the seqStore, or a zstd error. - */ - private static nuint ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize( - SeqStore_t* seqStore, - ZSTD_CCtx_s* zc - ) + if (ERR_isError(cSymbolTypeSizeEstimateInBits)) { - ZSTD_entropyCTablesMetadata_t* entropyMetadata = &zc->blockSplitCtx.entropyMetadata; - { - nuint err_code = ZSTD_buildBlockEntropyStats( - seqStore, - &zc->blockState.prevCBlock->entropy, - &zc->blockState.nextCBlock->entropy, - &zc->appliedParams, - entropyMetadata, - zc->tmpWorkspace, - zc->tmpWkspSize - ); - if (ERR_isError(err_code)) - { - return err_code; - } - } + return nbSeq * 10; + } + + while (ctp < ctEnd) + { + if (additionalBits != null) + cSymbolTypeSizeEstimateInBits += additionalBits[*ctp]; + else + cSymbolTypeSizeEstimateInBits += *ctp; + ctp++; + } + + return cSymbolTypeSizeEstimateInBits >> 3; + } + + /* Returns the size estimate for the sequences section (header + content) of a block */ + private static nuint ZSTD_estimateBlockSize_sequences( + byte* ofCodeTable, + byte* llCodeTable, + byte* mlCodeTable, + nuint nbSeq, + ZSTD_fseCTables_t* fseTables, + ZSTD_fseCTablesMetadata_t* fseMetadata, + void* workspace, + nuint wkspSize, + int writeEntropy + ) + { + /* seqHead */ + nuint sequencesSectionHeaderSize = (nuint)( + 1 + 1 + (nbSeq >= 128 ? 1 : 0) + (nbSeq >= 0x7F00 ? 1 : 0) + ); + nuint cSeqSizeEstimate = 0; + cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType( + fseMetadata->ofType, + ofCodeTable, + nbSeq, + 31, + fseTables->offcodeCTable, + null, + OF_defaultNorm, + OF_defaultNormLog, + 28, + workspace, + wkspSize + ); + cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType( + fseMetadata->llType, + llCodeTable, + nbSeq, + 35, + fseTables->litlengthCTable, + LL_bits, + LL_defaultNorm, + LL_defaultNormLog, + 35, + workspace, + wkspSize + ); + cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType( + fseMetadata->mlType, + mlCodeTable, + nbSeq, + 52, + fseTables->matchlengthCTable, + ML_bits, + ML_defaultNorm, + ML_defaultNormLog, + 52, + workspace, + wkspSize + ); + if (writeEntropy != 0) + cSeqSizeEstimate += fseMetadata->fseTablesSize; + return cSeqSizeEstimate + sequencesSectionHeaderSize; + } + + /* Returns the size estimate for a given stream of literals, of, ll, ml */ + private static nuint ZSTD_estimateBlockSize( + byte* literals, + nuint litSize, + byte* ofCodeTable, + byte* llCodeTable, + byte* mlCodeTable, + nuint nbSeq, + ZSTD_entropyCTables_t* entropy, + ZSTD_entropyCTablesMetadata_t* entropyMetadata, + void* workspace, + nuint wkspSize, + int writeLitEntropy, + int writeSeqEntropy + ) + { + nuint literalsSize = ZSTD_estimateBlockSize_literal( + literals, + litSize, + &entropy->huf, + &entropyMetadata->hufMetadata, + workspace, + wkspSize, + writeLitEntropy + ); + nuint seqSize = ZSTD_estimateBlockSize_sequences( + ofCodeTable, + llCodeTable, + mlCodeTable, + nbSeq, + &entropy->fse, + &entropyMetadata->fseMetadata, + workspace, + wkspSize, + writeSeqEntropy + ); + return seqSize + literalsSize + ZSTD_blockHeaderSize; + } - return ZSTD_estimateBlockSize( - seqStore->litStart, - (nuint)(seqStore->lit - seqStore->litStart), - seqStore->ofCode, - seqStore->llCode, - seqStore->mlCode, - (nuint)(seqStore->sequences - seqStore->sequencesStart), + /* Builds entropy statistics and uses them for blocksize estimation. + * + * @return: estimated compressed size of the seqStore, or a zstd error. + */ + private static nuint ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize( + SeqStore_t* seqStore, + ZSTD_CCtx_s* zc + ) + { + ZSTD_entropyCTablesMetadata_t* entropyMetadata = &zc->blockSplitCtx.entropyMetadata; + { + nuint err_code = ZSTD_buildBlockEntropyStats( + seqStore, + &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy, + &zc->appliedParams, entropyMetadata, zc->tmpWorkspace, - zc->tmpWkspSize, - entropyMetadata->hufMetadata.hType == SymbolEncodingType_e.set_compressed ? 1 : 0, - 1 + zc->tmpWkspSize ); - } - - /* Returns literals bytes represented in a seqStore */ - private static nuint ZSTD_countSeqStoreLiteralsBytes(SeqStore_t* seqStore) - { - nuint literalsBytes = 0; - nuint nbSeqs = (nuint)(seqStore->sequences - seqStore->sequencesStart); - nuint i; - for (i = 0; i < nbSeqs; ++i) + if (ERR_isError(err_code)) { - SeqDef_s seq = seqStore->sequencesStart[i]; - literalsBytes += seq.litLength; - if ( - i == seqStore->longLengthPos - && seqStore->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_literalLength - ) - { - literalsBytes += 0x10000; - } + return err_code; } - - return literalsBytes; } - /* Returns match bytes represented in a seqStore */ - private static nuint ZSTD_countSeqStoreMatchBytes(SeqStore_t* seqStore) + return ZSTD_estimateBlockSize( + seqStore->litStart, + (nuint)(seqStore->lit - seqStore->litStart), + seqStore->ofCode, + seqStore->llCode, + seqStore->mlCode, + (nuint)(seqStore->sequences - seqStore->sequencesStart), + &zc->blockState.nextCBlock->entropy, + entropyMetadata, + zc->tmpWorkspace, + zc->tmpWkspSize, + entropyMetadata->hufMetadata.hType == SymbolEncodingType_e.set_compressed ? 1 : 0, + 1 + ); + } + + /* Returns literals bytes represented in a seqStore */ + private static nuint ZSTD_countSeqStoreLiteralsBytes(SeqStore_t* seqStore) + { + nuint literalsBytes = 0; + nuint nbSeqs = (nuint)(seqStore->sequences - seqStore->sequencesStart); + nuint i; + for (i = 0; i < nbSeqs; ++i) { - nuint matchBytes = 0; - nuint nbSeqs = (nuint)(seqStore->sequences - seqStore->sequencesStart); - nuint i; - for (i = 0; i < nbSeqs; ++i) + SeqDef_s seq = seqStore->sequencesStart[i]; + literalsBytes += seq.litLength; + if ( + i == seqStore->longLengthPos + && seqStore->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_literalLength + ) { - SeqDef_s seq = seqStore->sequencesStart[i]; - matchBytes += (nuint)(seq.mlBase + 3); - if ( - i == seqStore->longLengthPos - && seqStore->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_matchLength - ) - { - matchBytes += 0x10000; - } + literalsBytes += 0x10000; } - - return matchBytes; } - /* Derives the seqStore that is a chunk of the originalSeqStore from [startIdx, endIdx). - * Stores the result in resultSeqStore. - */ - private static void ZSTD_deriveSeqStoreChunk( - SeqStore_t* resultSeqStore, - SeqStore_t* originalSeqStore, - nuint startIdx, - nuint endIdx - ) + return literalsBytes; + } + + /* Returns match bytes represented in a seqStore */ + private static nuint ZSTD_countSeqStoreMatchBytes(SeqStore_t* seqStore) + { + nuint matchBytes = 0; + nuint nbSeqs = (nuint)(seqStore->sequences - seqStore->sequencesStart); + nuint i; + for (i = 0; i < nbSeqs; ++i) { - *resultSeqStore = *originalSeqStore; - if (startIdx > 0) + SeqDef_s seq = seqStore->sequencesStart[i]; + matchBytes += (nuint)(seq.mlBase + 3); + if ( + i == seqStore->longLengthPos + && seqStore->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_matchLength + ) { - resultSeqStore->sequences = originalSeqStore->sequencesStart + startIdx; - resultSeqStore->litStart += ZSTD_countSeqStoreLiteralsBytes(resultSeqStore); + matchBytes += 0x10000; } + } - if (originalSeqStore->longLengthType != ZSTD_longLengthType_e.ZSTD_llt_none) - { - if ( - originalSeqStore->longLengthPos < startIdx - || originalSeqStore->longLengthPos > endIdx - ) - { - resultSeqStore->longLengthType = ZSTD_longLengthType_e.ZSTD_llt_none; - } - else - { - resultSeqStore->longLengthPos -= (uint)startIdx; - } - } + return matchBytes; + } + + /* Derives the seqStore that is a chunk of the originalSeqStore from [startIdx, endIdx). + * Stores the result in resultSeqStore. + */ + private static void ZSTD_deriveSeqStoreChunk( + SeqStore_t* resultSeqStore, + SeqStore_t* originalSeqStore, + nuint startIdx, + nuint endIdx + ) + { + *resultSeqStore = *originalSeqStore; + if (startIdx > 0) + { + resultSeqStore->sequences = originalSeqStore->sequencesStart + startIdx; + resultSeqStore->litStart += ZSTD_countSeqStoreLiteralsBytes(resultSeqStore); + } - resultSeqStore->sequencesStart = originalSeqStore->sequencesStart + startIdx; - resultSeqStore->sequences = originalSeqStore->sequencesStart + endIdx; - if (endIdx == (nuint)(originalSeqStore->sequences - originalSeqStore->sequencesStart)) + if (originalSeqStore->longLengthType != ZSTD_longLengthType_e.ZSTD_llt_none) + { + if ( + originalSeqStore->longLengthPos < startIdx + || originalSeqStore->longLengthPos > endIdx + ) { - assert(resultSeqStore->lit == originalSeqStore->lit); + resultSeqStore->longLengthType = ZSTD_longLengthType_e.ZSTD_llt_none; } else { - nuint literalsBytes = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore); - resultSeqStore->lit = resultSeqStore->litStart + literalsBytes; + resultSeqStore->longLengthPos -= (uint)startIdx; } - - resultSeqStore->llCode += startIdx; - resultSeqStore->mlCode += startIdx; - resultSeqStore->ofCode += startIdx; } - /** - * Returns the raw offset represented by the combination of offBase, ll0, and repcode history. - * offBase must represent a repcode in the numeric representation of ZSTD_storeSeq(). - */ - private static uint ZSTD_resolveRepcodeToRawOffset(uint* rep, uint offBase, uint ll0) + resultSeqStore->sequencesStart = originalSeqStore->sequencesStart + startIdx; + resultSeqStore->sequences = originalSeqStore->sequencesStart + endIdx; + if (endIdx == (nuint)(originalSeqStore->sequences - originalSeqStore->sequencesStart)) { - assert(1 <= offBase && offBase <= 3); - /* [ 0 - 3 ] */ - uint adjustedRepCode = offBase - 1 + ll0; - assert(1 <= offBase && offBase <= 3); - if (adjustedRepCode == 3) - { - assert(ll0 != 0); - return rep[0] - 1; - } + assert(resultSeqStore->lit == originalSeqStore->lit); + } + else + { + nuint literalsBytes = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore); + resultSeqStore->lit = resultSeqStore->litStart + literalsBytes; + } + + resultSeqStore->llCode += startIdx; + resultSeqStore->mlCode += startIdx; + resultSeqStore->ofCode += startIdx; + } - return rep[adjustedRepCode]; + /** + * Returns the raw offset represented by the combination of offBase, ll0, and repcode history. + * offBase must represent a repcode in the numeric representation of ZSTD_storeSeq(). + */ + private static uint ZSTD_resolveRepcodeToRawOffset(uint* rep, uint offBase, uint ll0) + { + assert(1 <= offBase && offBase <= 3); + /* [ 0 - 3 ] */ + uint adjustedRepCode = offBase - 1 + ll0; + assert(1 <= offBase && offBase <= 3); + if (adjustedRepCode == 3) + { + assert(ll0 != 0); + return rep[0] - 1; } - /** - * ZSTD_seqStore_resolveOffCodes() reconciles any possible divergences in offset history that may arise - * due to emission of RLE/raw blocks that disturb the offset history, - * and replaces any repcodes within the seqStore that may be invalid. - * - * dRepcodes are updated as would be on the decompression side. - * cRepcodes are updated exactly in accordance with the seqStore. - * - * Note : this function assumes seq->offBase respects the following numbering scheme : - * 0 : invalid - * 1-3 : repcode 1-3 - * 4+ : real_offset+3 - */ - private static void ZSTD_seqStore_resolveOffCodes( - repcodes_s* dRepcodes, - repcodes_s* cRepcodes, - SeqStore_t* seqStore, - uint nbSeq - ) + return rep[adjustedRepCode]; + } + + /** + * ZSTD_seqStore_resolveOffCodes() reconciles any possible divergences in offset history that may arise + * due to emission of RLE/raw blocks that disturb the offset history, + * and replaces any repcodes within the seqStore that may be invalid. + * + * dRepcodes are updated as would be on the decompression side. + * cRepcodes are updated exactly in accordance with the seqStore. + * + * Note : this function assumes seq->offBase respects the following numbering scheme : + * 0 : invalid + * 1-3 : repcode 1-3 + * 4+ : real_offset+3 + */ + private static void ZSTD_seqStore_resolveOffCodes( + repcodes_s* dRepcodes, + repcodes_s* cRepcodes, + SeqStore_t* seqStore, + uint nbSeq + ) + { + uint idx = 0; + uint longLitLenIdx = + seqStore->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_literalLength + ? seqStore->longLengthPos + : nbSeq; + for (; idx < nbSeq; ++idx) { - uint idx = 0; - uint longLitLenIdx = - seqStore->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_literalLength - ? seqStore->longLengthPos - : nbSeq; - for (; idx < nbSeq; ++idx) - { - SeqDef_s* seq = seqStore->sequencesStart + idx; - uint ll0 = seq->litLength == 0 && idx != longLitLenIdx ? 1U : 0U; - uint offBase = seq->offBase; - assert(offBase > 0); - if (1 <= offBase && offBase <= 3) + SeqDef_s* seq = seqStore->sequencesStart + idx; + uint ll0 = seq->litLength == 0 && idx != longLitLenIdx ? 1U : 0U; + uint offBase = seq->offBase; + assert(offBase > 0); + if (1 <= offBase && offBase <= 3) + { + uint dRawOffset = ZSTD_resolveRepcodeToRawOffset(dRepcodes->rep, offBase, ll0); + uint cRawOffset = ZSTD_resolveRepcodeToRawOffset(cRepcodes->rep, offBase, ll0); + if (dRawOffset != cRawOffset) { - uint dRawOffset = ZSTD_resolveRepcodeToRawOffset(dRepcodes->rep, offBase, ll0); - uint cRawOffset = ZSTD_resolveRepcodeToRawOffset(cRepcodes->rep, offBase, ll0); - if (dRawOffset != cRawOffset) - { - assert(cRawOffset > 0); - seq->offBase = cRawOffset + 3; - } + assert(cRawOffset > 0); + seq->offBase = cRawOffset + 3; } - - ZSTD_updateRep(dRepcodes->rep, seq->offBase, ll0); - ZSTD_updateRep(cRepcodes->rep, offBase, ll0); } + + ZSTD_updateRep(dRepcodes->rep, seq->offBase, ll0); + ZSTD_updateRep(cRepcodes->rep, offBase, ll0); } + } - /* ZSTD_compressSeqStore_singleBlock(): - * Compresses a seqStore into a block with a block header, into the buffer dst. - * - * Returns the total size of that block (including header) or a ZSTD error code. - */ - private static nuint ZSTD_compressSeqStore_singleBlock( - ZSTD_CCtx_s* zc, - SeqStore_t* seqStore, - repcodes_s* dRep, - repcodes_s* cRep, - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize, - uint lastBlock, - uint isPartition - ) + /* ZSTD_compressSeqStore_singleBlock(): + * Compresses a seqStore into a block with a block header, into the buffer dst. + * + * Returns the total size of that block (including header) or a ZSTD error code. + */ + private static nuint ZSTD_compressSeqStore_singleBlock( + ZSTD_CCtx_s* zc, + SeqStore_t* seqStore, + repcodes_s* dRep, + repcodes_s* cRep, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + uint lastBlock, + uint isPartition + ) + { + const uint rleMaxLength = 25; + byte* op = (byte*)dst; + byte* ip = (byte*)src; + nuint cSize; + nuint cSeqsSize; + /* In case of an RLE or raw block, the simulated decompression repcode history must be reset */ + repcodes_s dRepOriginal = *dRep; + if (isPartition != 0) + ZSTD_seqStore_resolveOffCodes( + dRep, + cRep, + seqStore, + (uint)(seqStore->sequences - seqStore->sequencesStart) + ); + if (dstCapacity < ZSTD_blockHeaderSize) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + cSeqsSize = ZSTD_entropyCompressSeqStore( + seqStore, + &zc->blockState.prevCBlock->entropy, + &zc->blockState.nextCBlock->entropy, + &zc->appliedParams, + op + ZSTD_blockHeaderSize, + dstCapacity - ZSTD_blockHeaderSize, + srcSize, + zc->tmpWorkspace, + zc->tmpWkspSize, + zc->bmi2 + ); { - const uint rleMaxLength = 25; - byte* op = (byte*)dst; - byte* ip = (byte*)src; - nuint cSize; - nuint cSeqsSize; - /* In case of an RLE or raw block, the simulated decompression repcode history must be reset */ - repcodes_s dRepOriginal = *dRep; - if (isPartition != 0) - ZSTD_seqStore_resolveOffCodes( - dRep, - cRep, - seqStore, - (uint)(seqStore->sequences - seqStore->sequencesStart) - ); - if (dstCapacity < ZSTD_blockHeaderSize) + nuint err_code = cSeqsSize; + if (ERR_isError(err_code)) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + return err_code; } + } - cSeqsSize = ZSTD_entropyCompressSeqStore( - seqStore, - &zc->blockState.prevCBlock->entropy, - &zc->blockState.nextCBlock->entropy, - &zc->appliedParams, - op + ZSTD_blockHeaderSize, - dstCapacity - ZSTD_blockHeaderSize, - srcSize, - zc->tmpWorkspace, - zc->tmpWkspSize, - zc->bmi2 - ); + if ( + zc->isFirstBlock == 0 + && cSeqsSize < rleMaxLength + && ZSTD_isRLE((byte*)src, srcSize) != 0 + ) + { + cSeqsSize = 1; + } + + if (zc->seqCollector.collectSequences != 0) + { { - nuint err_code = cSeqsSize; + nuint err_code = ZSTD_copyBlockSequences( + &zc->seqCollector, + seqStore, + dRepOriginal.rep + ); if (ERR_isError(err_code)) { return err_code; } } - if ( - zc->isFirstBlock == 0 - && cSeqsSize < rleMaxLength - && ZSTD_isRLE((byte*)src, srcSize) != 0 - ) - { - cSeqsSize = 1; - } + ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); + return 0; + } - if (zc->seqCollector.collectSequences != 0) + if (cSeqsSize == 0) + { + cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, srcSize, lastBlock); { + nuint err_code = cSize; + if (ERR_isError(err_code)) { - nuint err_code = ZSTD_copyBlockSequences( - &zc->seqCollector, - seqStore, - dRepOriginal.rep - ); - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } - - ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); - return 0; } - if (cSeqsSize == 0) + *dRep = dRepOriginal; + } + else if (cSeqsSize == 1) + { + cSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, srcSize, lastBlock); { - cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, srcSize, lastBlock); + nuint err_code = cSize; + if (ERR_isError(err_code)) { - nuint err_code = cSize; - if (ERR_isError(err_code)) - { - return err_code; - } - } - - *dRep = dRepOriginal; - } - else if (cSeqsSize == 1) - { - cSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, srcSize, lastBlock); - { - nuint err_code = cSize; - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } - - *dRep = dRepOriginal; - } - else - { - ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); - writeBlockHeader(op, cSeqsSize, srcSize, lastBlock); - cSize = ZSTD_blockHeaderSize + cSeqsSize; } - if ( - zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode - == FSE_repeat.FSE_repeat_valid - ) - zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = - FSE_repeat.FSE_repeat_check; - return cSize; + *dRep = dRepOriginal; + } + else + { + ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); + writeBlockHeader(op, cSeqsSize, srcSize, lastBlock); + cSize = ZSTD_blockHeaderSize + cSeqsSize; } - /* Helper function to perform the recursive search for block splits. - * Estimates the cost of seqStore prior to split, and estimates the cost of splitting the sequences in half. - * If advantageous to split, then we recurse down the two sub-blocks. - * If not, or if an error occurred in estimation, then we do not recurse. - * - * Note: The recursion depth is capped by a heuristic minimum number of sequences, - * defined by MIN_SEQUENCES_BLOCK_SPLITTING. - * In theory, this means the absolute largest recursion depth is 10 == log2(maxNbSeqInBlock/MIN_SEQUENCES_BLOCK_SPLITTING). - * In practice, recursion depth usually doesn't go beyond 4. - * - * Furthermore, the number of splits is capped by ZSTD_MAX_NB_BLOCK_SPLITS. - * At ZSTD_MAX_NB_BLOCK_SPLITS == 196 with the current existing blockSize - * maximum of 128 KB, this value is actually impossible to reach. - */ - private static void ZSTD_deriveBlockSplitsHelper( - seqStoreSplits* splits, - nuint startIdx, - nuint endIdx, - ZSTD_CCtx_s* zc, - SeqStore_t* origSeqStore + if ( + zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode + == FSE_repeat.FSE_repeat_valid ) - { - SeqStore_t* fullSeqStoreChunk = &zc->blockSplitCtx.fullSeqStoreChunk; - SeqStore_t* firstHalfSeqStore = &zc->blockSplitCtx.firstHalfSeqStore; - SeqStore_t* secondHalfSeqStore = &zc->blockSplitCtx.secondHalfSeqStore; - nuint estimatedOriginalSize; - nuint estimatedFirstHalfSize; - nuint estimatedSecondHalfSize; - nuint midIdx = (startIdx + endIdx) / 2; - assert(endIdx >= startIdx); - if (endIdx - startIdx < 300 || splits->idx >= 196) - { - return; - } - - ZSTD_deriveSeqStoreChunk(fullSeqStoreChunk, origSeqStore, startIdx, endIdx); - ZSTD_deriveSeqStoreChunk(firstHalfSeqStore, origSeqStore, startIdx, midIdx); - ZSTD_deriveSeqStoreChunk(secondHalfSeqStore, origSeqStore, midIdx, endIdx); - estimatedOriginalSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize( - fullSeqStoreChunk, - zc - ); - estimatedFirstHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize( - firstHalfSeqStore, - zc - ); - estimatedSecondHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize( - secondHalfSeqStore, - zc - ); - if ( - ERR_isError(estimatedOriginalSize) - || ERR_isError(estimatedFirstHalfSize) - || ERR_isError(estimatedSecondHalfSize) - ) - { - return; - } + zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = + FSE_repeat.FSE_repeat_check; + return cSize; + } - if (estimatedFirstHalfSize + estimatedSecondHalfSize < estimatedOriginalSize) - { - ZSTD_deriveBlockSplitsHelper(splits, startIdx, midIdx, zc, origSeqStore); - splits->splitLocations[splits->idx] = (uint)midIdx; - splits->idx++; - ZSTD_deriveBlockSplitsHelper(splits, midIdx, endIdx, zc, origSeqStore); - } + /* Helper function to perform the recursive search for block splits. + * Estimates the cost of seqStore prior to split, and estimates the cost of splitting the sequences in half. + * If advantageous to split, then we recurse down the two sub-blocks. + * If not, or if an error occurred in estimation, then we do not recurse. + * + * Note: The recursion depth is capped by a heuristic minimum number of sequences, + * defined by MIN_SEQUENCES_BLOCK_SPLITTING. + * In theory, this means the absolute largest recursion depth is 10 == log2(maxNbSeqInBlock/MIN_SEQUENCES_BLOCK_SPLITTING). + * In practice, recursion depth usually doesn't go beyond 4. + * + * Furthermore, the number of splits is capped by ZSTD_MAX_NB_BLOCK_SPLITS. + * At ZSTD_MAX_NB_BLOCK_SPLITS == 196 with the current existing blockSize + * maximum of 128 KB, this value is actually impossible to reach. + */ + private static void ZSTD_deriveBlockSplitsHelper( + seqStoreSplits* splits, + nuint startIdx, + nuint endIdx, + ZSTD_CCtx_s* zc, + SeqStore_t* origSeqStore + ) + { + SeqStore_t* fullSeqStoreChunk = &zc->blockSplitCtx.fullSeqStoreChunk; + SeqStore_t* firstHalfSeqStore = &zc->blockSplitCtx.firstHalfSeqStore; + SeqStore_t* secondHalfSeqStore = &zc->blockSplitCtx.secondHalfSeqStore; + nuint estimatedOriginalSize; + nuint estimatedFirstHalfSize; + nuint estimatedSecondHalfSize; + nuint midIdx = (startIdx + endIdx) / 2; + assert(endIdx >= startIdx); + if (endIdx - startIdx < 300 || splits->idx >= 196) + { + return; + } + + ZSTD_deriveSeqStoreChunk(fullSeqStoreChunk, origSeqStore, startIdx, endIdx); + ZSTD_deriveSeqStoreChunk(firstHalfSeqStore, origSeqStore, startIdx, midIdx); + ZSTD_deriveSeqStoreChunk(secondHalfSeqStore, origSeqStore, midIdx, endIdx); + estimatedOriginalSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize( + fullSeqStoreChunk, + zc + ); + estimatedFirstHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize( + firstHalfSeqStore, + zc + ); + estimatedSecondHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize( + secondHalfSeqStore, + zc + ); + if ( + ERR_isError(estimatedOriginalSize) + || ERR_isError(estimatedFirstHalfSize) + || ERR_isError(estimatedSecondHalfSize) + ) + { + return; } - /* Base recursive function. - * Populates a table with intra-block partition indices that can improve compression ratio. - * - * @return: number of splits made (which equals the size of the partition table - 1). - */ - private static nuint ZSTD_deriveBlockSplits(ZSTD_CCtx_s* zc, uint* partitions, uint nbSeq) + if (estimatedFirstHalfSize + estimatedSecondHalfSize < estimatedOriginalSize) { - seqStoreSplits splits; - splits.splitLocations = partitions; - splits.idx = 0; - if (nbSeq <= 4) - { - return 0; - } - - ZSTD_deriveBlockSplitsHelper(&splits, 0, nbSeq, zc, &zc->seqStore); - splits.splitLocations[splits.idx] = nbSeq; - return splits.idx; + ZSTD_deriveBlockSplitsHelper(splits, startIdx, midIdx, zc, origSeqStore); + splits->splitLocations[splits->idx] = (uint)midIdx; + splits->idx++; + ZSTD_deriveBlockSplitsHelper(splits, midIdx, endIdx, zc, origSeqStore); } + } - /* ZSTD_compressBlock_splitBlock(): - * Attempts to split a given block into multiple blocks to improve compression ratio. - * - * Returns combined size of all blocks (which includes headers), or a ZSTD error code. - */ - private static nuint ZSTD_compressBlock_splitBlock_internal( - ZSTD_CCtx_s* zc, - void* dst, - nuint dstCapacity, - void* src, - nuint blockSize, - uint lastBlock, - uint nbSeq - ) + /* Base recursive function. + * Populates a table with intra-block partition indices that can improve compression ratio. + * + * @return: number of splits made (which equals the size of the partition table - 1). + */ + private static nuint ZSTD_deriveBlockSplits(ZSTD_CCtx_s* zc, uint* partitions, uint nbSeq) + { + seqStoreSplits splits; + splits.splitLocations = partitions; + splits.idx = 0; + if (nbSeq <= 4) { - nuint cSize = 0; - byte* ip = (byte*)src; - byte* op = (byte*)dst; - nuint i = 0; - nuint srcBytesTotal = 0; - /* size == ZSTD_MAX_NB_BLOCK_SPLITS */ - uint* partitions = zc->blockSplitCtx.partitions; - SeqStore_t* nextSeqStore = &zc->blockSplitCtx.nextSeqStore; - SeqStore_t* currSeqStore = &zc->blockSplitCtx.currSeqStore; - nuint numSplits = ZSTD_deriveBlockSplits(zc, partitions, nbSeq); - /* If a block is split and some partitions are emitted as RLE/uncompressed, then repcode history - * may become invalid. In order to reconcile potentially invalid repcodes, we keep track of two - * separate repcode histories that simulate repcode history on compression and decompression side, - * and use the histories to determine whether we must replace a particular repcode with its raw offset. - * - * 1) cRep gets updated for each partition, regardless of whether the block was emitted as uncompressed - * or RLE. This allows us to retrieve the offset value that an invalid repcode references within - * a nocompress/RLE block. - * 2) dRep gets updated only for compressed partitions, and when a repcode gets replaced, will use - * the replacement offset value rather than the original repcode to update the repcode history. - * dRep also will be the final repcode history sent to the next block. - * - * See ZSTD_seqStore_resolveOffCodes() for more details. - */ - repcodes_s dRep; - repcodes_s cRep; - memcpy(dRep.rep, zc->blockState.prevCBlock->rep, (uint)sizeof(repcodes_s)); - memcpy(cRep.rep, zc->blockState.prevCBlock->rep, (uint)sizeof(repcodes_s)); - *nextSeqStore = new SeqStore_t(); - if (numSplits == 0) - { - nuint cSizeSingleBlock = ZSTD_compressSeqStore_singleBlock( - zc, - &zc->seqStore, - &dRep, - &cRep, - op, - dstCapacity, - ip, - blockSize, - lastBlock, - 0 - ); - { - nuint err_code = cSizeSingleBlock; - if (ERR_isError(err_code)) - { - return err_code; - } - } + return 0; + } - assert(zc->blockSizeMax <= 1 << 17); - assert(cSizeSingleBlock <= zc->blockSizeMax + ZSTD_blockHeaderSize); - return cSizeSingleBlock; - } + ZSTD_deriveBlockSplitsHelper(&splits, 0, nbSeq, zc, &zc->seqStore); + splits.splitLocations[splits.idx] = nbSeq; + return splits.idx; + } - ZSTD_deriveSeqStoreChunk(currSeqStore, &zc->seqStore, 0, partitions[0]); - for (i = 0; i <= numSplits; ++i) + /* ZSTD_compressBlock_splitBlock(): + * Attempts to split a given block into multiple blocks to improve compression ratio. + * + * Returns combined size of all blocks (which includes headers), or a ZSTD error code. + */ + private static nuint ZSTD_compressBlock_splitBlock_internal( + ZSTD_CCtx_s* zc, + void* dst, + nuint dstCapacity, + void* src, + nuint blockSize, + uint lastBlock, + uint nbSeq + ) + { + nuint cSize = 0; + byte* ip = (byte*)src; + byte* op = (byte*)dst; + nuint i = 0; + nuint srcBytesTotal = 0; + /* size == ZSTD_MAX_NB_BLOCK_SPLITS */ + uint* partitions = zc->blockSplitCtx.partitions; + SeqStore_t* nextSeqStore = &zc->blockSplitCtx.nextSeqStore; + SeqStore_t* currSeqStore = &zc->blockSplitCtx.currSeqStore; + nuint numSplits = ZSTD_deriveBlockSplits(zc, partitions, nbSeq); + /* If a block is split and some partitions are emitted as RLE/uncompressed, then repcode history + * may become invalid. In order to reconcile potentially invalid repcodes, we keep track of two + * separate repcode histories that simulate repcode history on compression and decompression side, + * and use the histories to determine whether we must replace a particular repcode with its raw offset. + * + * 1) cRep gets updated for each partition, regardless of whether the block was emitted as uncompressed + * or RLE. This allows us to retrieve the offset value that an invalid repcode references within + * a nocompress/RLE block. + * 2) dRep gets updated only for compressed partitions, and when a repcode gets replaced, will use + * the replacement offset value rather than the original repcode to update the repcode history. + * dRep also will be the final repcode history sent to the next block. + * + * See ZSTD_seqStore_resolveOffCodes() for more details. + */ + repcodes_s dRep; + repcodes_s cRep; + memcpy(dRep.rep, zc->blockState.prevCBlock->rep, (uint)sizeof(repcodes_s)); + memcpy(cRep.rep, zc->blockState.prevCBlock->rep, (uint)sizeof(repcodes_s)); + *nextSeqStore = new SeqStore_t(); + if (numSplits == 0) + { + nuint cSizeSingleBlock = ZSTD_compressSeqStore_singleBlock( + zc, + &zc->seqStore, + &dRep, + &cRep, + op, + dstCapacity, + ip, + blockSize, + lastBlock, + 0 + ); { - nuint cSizeChunk; - uint lastPartition = i == numSplits ? 1U : 0U; - uint lastBlockEntireSrc = 0; - nuint srcBytes = - ZSTD_countSeqStoreLiteralsBytes(currSeqStore) - + ZSTD_countSeqStoreMatchBytes(currSeqStore); - srcBytesTotal += srcBytes; - if (lastPartition != 0) - { - srcBytes += blockSize - srcBytesTotal; - lastBlockEntireSrc = lastBlock; - } - else - { - ZSTD_deriveSeqStoreChunk( - nextSeqStore, - &zc->seqStore, - partitions[i], - partitions[i + 1] - ); - } - - cSizeChunk = ZSTD_compressSeqStore_singleBlock( - zc, - currSeqStore, - &dRep, - &cRep, - op, - dstCapacity, - ip, - srcBytes, - lastBlockEntireSrc, - 1 - ); + nuint err_code = cSizeSingleBlock; + if (ERR_isError(err_code)) { - nuint err_code = cSizeChunk; - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } - - ip += srcBytes; - op += cSizeChunk; - dstCapacity -= cSizeChunk; - cSize += cSizeChunk; - *currSeqStore = *nextSeqStore; - assert(cSizeChunk <= zc->blockSizeMax + ZSTD_blockHeaderSize); } - memcpy(zc->blockState.prevCBlock->rep, dRep.rep, (uint)sizeof(repcodes_s)); - return cSize; + assert(zc->blockSizeMax <= 1 << 17); + assert(cSizeSingleBlock <= zc->blockSizeMax + ZSTD_blockHeaderSize); + return cSizeSingleBlock; } - private static nuint ZSTD_compressBlock_splitBlock( - ZSTD_CCtx_s* zc, - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize, - uint lastBlock - ) + ZSTD_deriveSeqStoreChunk(currSeqStore, &zc->seqStore, 0, partitions[0]); + for (i = 0; i <= numSplits; ++i) { - uint nbSeq; - nuint cSize; - assert(zc->appliedParams.postBlockSplitter == ZSTD_paramSwitch_e.ZSTD_ps_enable); + nuint cSizeChunk; + uint lastPartition = i == numSplits ? 1U : 0U; + uint lastBlockEntireSrc = 0; + nuint srcBytes = + ZSTD_countSeqStoreLiteralsBytes(currSeqStore) + + ZSTD_countSeqStoreMatchBytes(currSeqStore); + srcBytesTotal += srcBytes; + if (lastPartition != 0) { - nuint bss = ZSTD_buildSeqStore(zc, src, srcSize); - { - nuint err_code = bss; - if (ERR_isError(err_code)) - { - return err_code; - } - } - - if (bss == (nuint)ZSTD_BuildSeqStore_e.ZSTDbss_noCompress) - { - if ( - zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode - == FSE_repeat.FSE_repeat_valid - ) - zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = - FSE_repeat.FSE_repeat_check; - if (zc->seqCollector.collectSequences != 0) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_sequenceProducer_failed) - ); - } - - cSize = ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock); - { - nuint err_code = cSize; - if (ERR_isError(err_code)) - { - return err_code; - } - } - - return cSize; - } - - nbSeq = (uint)(zc->seqStore.sequences - zc->seqStore.sequencesStart); + srcBytes += blockSize - srcBytesTotal; + lastBlockEntireSrc = lastBlock; + } + else + { + ZSTD_deriveSeqStoreChunk( + nextSeqStore, + &zc->seqStore, + partitions[i], + partitions[i + 1] + ); } - cSize = ZSTD_compressBlock_splitBlock_internal( + cSizeChunk = ZSTD_compressSeqStore_singleBlock( zc, - dst, + currSeqStore, + &dRep, + &cRep, + op, dstCapacity, - src, - srcSize, - lastBlock, - nbSeq + ip, + srcBytes, + lastBlockEntireSrc, + 1 ); { - nuint err_code = cSize; + nuint err_code = cSizeChunk; if (ERR_isError(err_code)) { return err_code; } } - return cSize; + ip += srcBytes; + op += cSizeChunk; + dstCapacity -= cSizeChunk; + cSize += cSizeChunk; + *currSeqStore = *nextSeqStore; + assert(cSizeChunk <= zc->blockSizeMax + ZSTD_blockHeaderSize); } - private static nuint ZSTD_compressBlock_internal( - ZSTD_CCtx_s* zc, - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize, - uint frame - ) + memcpy(zc->blockState.prevCBlock->rep, dRep.rep, (uint)sizeof(repcodes_s)); + return cSize; + } + + private static nuint ZSTD_compressBlock_splitBlock( + ZSTD_CCtx_s* zc, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + uint lastBlock + ) + { + uint nbSeq; + nuint cSize; + assert(zc->appliedParams.postBlockSplitter == ZSTD_paramSwitch_e.ZSTD_ps_enable); { - /* This is an estimated upper bound for the length of an rle block. - * This isn't the actual upper bound. - * Finding the real threshold needs further investigation. - */ - const uint rleMaxLength = 25; - nuint cSize; - byte* ip = (byte*)src; - byte* op = (byte*)dst; + nuint bss = ZSTD_buildSeqStore(zc, src, srcSize); { - nuint bss = ZSTD_buildSeqStore(zc, src, srcSize); - { - nuint err_code = bss; - if (ERR_isError(err_code)) - { - return err_code; - } - } - - if (bss == (nuint)ZSTD_BuildSeqStore_e.ZSTDbss_noCompress) + nuint err_code = bss; + if (ERR_isError(err_code)) { - if (zc->seqCollector.collectSequences != 0) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_sequenceProducer_failed) - ); - } - - cSize = 0; - goto @out; + return err_code; } } - if (zc->seqCollector.collectSequences != 0) + if (bss == (nuint)ZSTD_BuildSeqStore_e.ZSTDbss_noCompress) { + if ( + zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode + == FSE_repeat.FSE_repeat_valid + ) + zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = + FSE_repeat.FSE_repeat_check; + if (zc->seqCollector.collectSequences != 0) { - nuint err_code = ZSTD_copyBlockSequences( - &zc->seqCollector, - ZSTD_getSeqStore(zc), - zc->blockState.prevCBlock->rep + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_sequenceProducer_failed) ); + } + + cSize = ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock); + { + nuint err_code = cSize; if (ERR_isError(err_code)) { return err_code; } } - ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); - return 0; + return cSize; } - cSize = ZSTD_entropyCompressSeqStore( - &zc->seqStore, - &zc->blockState.prevCBlock->entropy, - &zc->blockState.nextCBlock->entropy, - &zc->appliedParams, - dst, - dstCapacity, - srcSize, - zc->tmpWorkspace, - zc->tmpWkspSize, - zc->bmi2 - ); - if ( - frame != 0 - && zc->isFirstBlock == 0 - && cSize < rleMaxLength - && ZSTD_isRLE(ip, srcSize) != 0 - ) - { - cSize = 1; - op[0] = ip[0]; - } + nbSeq = (uint)(zc->seqStore.sequences - zc->seqStore.sequencesStart); + } - @out: - if (!ERR_isError(cSize) && cSize > 1) + cSize = ZSTD_compressBlock_splitBlock_internal( + zc, + dst, + dstCapacity, + src, + srcSize, + lastBlock, + nbSeq + ); + { + nuint err_code = cSize; + if (ERR_isError(err_code)) { - ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); + return err_code; } + } - if ( - zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode - == FSE_repeat.FSE_repeat_valid - ) - zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = - FSE_repeat.FSE_repeat_check; - return cSize; - } - - private static nuint ZSTD_compressBlock_targetCBlockSize_body( - ZSTD_CCtx_s* zc, - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize, - nuint bss, - uint lastBlock - ) + return cSize; + } + + private static nuint ZSTD_compressBlock_internal( + ZSTD_CCtx_s* zc, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + uint frame + ) + { + /* This is an estimated upper bound for the length of an rle block. + * This isn't the actual upper bound. + * Finding the real threshold needs further investigation. + */ + const uint rleMaxLength = 25; + nuint cSize; + byte* ip = (byte*)src; + byte* op = (byte*)dst; { - if (bss == (nuint)ZSTD_BuildSeqStore_e.ZSTDbss_compress) + nuint bss = ZSTD_buildSeqStore(zc, src, srcSize); { - if ( - zc->isFirstBlock == 0 - && ZSTD_maybeRLE(&zc->seqStore) != 0 - && ZSTD_isRLE((byte*)src, srcSize) != 0 - ) + nuint err_code = bss; + if (ERR_isError(err_code)) { - return ZSTD_rleCompressBlock(dst, dstCapacity, *(byte*)src, srcSize, lastBlock); + return err_code; } + } + if (bss == (nuint)ZSTD_BuildSeqStore_e.ZSTDbss_noCompress) + { + if (zc->seqCollector.collectSequences != 0) { - nuint cSize = ZSTD_compressSuperBlock( - zc, - dst, - dstCapacity, - src, - srcSize, - lastBlock + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_sequenceProducer_failed) ); - if ( - cSize - != unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)) - ) - { - nuint maxCSize = - srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy); - { - nuint err_code = cSize; - if (ERR_isError(err_code)) - { - return err_code; - } - } - - if (cSize != 0 && cSize < maxCSize + ZSTD_blockHeaderSize) - { - ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); - return cSize; - } - } } - } - return ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock); + cSize = 0; + goto @out; + } } - private static nuint ZSTD_compressBlock_targetCBlockSize( - ZSTD_CCtx_s* zc, - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize, - uint lastBlock - ) + if (zc->seqCollector.collectSequences != 0) { - nuint cSize = 0; - nuint bss = ZSTD_buildSeqStore(zc, src, srcSize); { - nuint err_code = bss; + nuint err_code = ZSTD_copyBlockSequences( + &zc->seqCollector, + ZSTD_getSeqStore(zc), + zc->blockState.prevCBlock->rep + ); if (ERR_isError(err_code)) { return err_code; } } - cSize = ZSTD_compressBlock_targetCBlockSize_body( - zc, - dst, - dstCapacity, - src, - srcSize, - bss, - lastBlock - ); - { - nuint err_code = cSize; - if (ERR_isError(err_code)) - { - return err_code; - } - } + ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); + return 0; + } - if ( - zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode - == FSE_repeat.FSE_repeat_valid - ) - zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = - FSE_repeat.FSE_repeat_check; - return cSize; + cSize = ZSTD_entropyCompressSeqStore( + &zc->seqStore, + &zc->blockState.prevCBlock->entropy, + &zc->blockState.nextCBlock->entropy, + &zc->appliedParams, + dst, + dstCapacity, + srcSize, + zc->tmpWorkspace, + zc->tmpWkspSize, + zc->bmi2 + ); + if ( + frame != 0 + && zc->isFirstBlock == 0 + && cSize < rleMaxLength + && ZSTD_isRLE(ip, srcSize) != 0 + ) + { + cSize = 1; + op[0] = ip[0]; } - private static void ZSTD_overflowCorrectIfNeeded( - ZSTD_MatchState_t* ms, - ZSTD_cwksp* ws, - ZSTD_CCtx_params_s* @params, - void* ip, - void* iend + @out: + if (!ERR_isError(cSize) && cSize > 1) + { + ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); + } + + if ( + zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode + == FSE_repeat.FSE_repeat_valid ) + zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = + FSE_repeat.FSE_repeat_check; + return cSize; + } + + private static nuint ZSTD_compressBlock_targetCBlockSize_body( + ZSTD_CCtx_s* zc, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + nuint bss, + uint lastBlock + ) + { + if (bss == (nuint)ZSTD_BuildSeqStore_e.ZSTDbss_compress) { - uint cycleLog = ZSTD_cycleLog(@params->cParams.chainLog, @params->cParams.strategy); - uint maxDist = (uint)1 << (int)@params->cParams.windowLog; if ( - ZSTD_window_needOverflowCorrection( - ms->window, - cycleLog, - maxDist, - ms->loadedDictEnd, - ip, - iend - ) != 0 + zc->isFirstBlock == 0 + && ZSTD_maybeRLE(&zc->seqStore) != 0 + && ZSTD_isRLE((byte*)src, srcSize) != 0 ) { - uint correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip); - ZSTD_cwksp_mark_tables_dirty(ws); - ZSTD_reduceIndex(ms, @params, correction); - ZSTD_cwksp_mark_tables_clean(ws); - if (ms->nextToUpdate < correction) - ms->nextToUpdate = 0; - else - ms->nextToUpdate -= correction; - ms->loadedDictEnd = 0; - ms->dictMatchState = null; + return ZSTD_rleCompressBlock(dst, dstCapacity, *(byte*)src, srcSize, lastBlock); } - } -#if NET7_0_OR_GREATER + { + nuint cSize = ZSTD_compressSuperBlock( + zc, + dst, + dstCapacity, + src, + srcSize, + lastBlock + ); + if ( + cSize + != unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)) + ) + { + nuint maxCSize = + srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy); + { + nuint err_code = cSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if (cSize != 0 && cSize < maxCSize + ZSTD_blockHeaderSize) + { + ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); + return cSize; + } + } + } + } + + return ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock); + } + + private static nuint ZSTD_compressBlock_targetCBlockSize( + ZSTD_CCtx_s* zc, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + uint lastBlock + ) + { + nuint cSize = 0; + nuint bss = ZSTD_buildSeqStore(zc, src, srcSize); + { + nuint err_code = bss; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + cSize = ZSTD_compressBlock_targetCBlockSize_body( + zc, + dst, + dstCapacity, + src, + srcSize, + bss, + lastBlock + ); + { + nuint err_code = cSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + if ( + zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode + == FSE_repeat.FSE_repeat_valid + ) + zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = + FSE_repeat.FSE_repeat_check; + return cSize; + } + + private static void ZSTD_overflowCorrectIfNeeded( + ZSTD_MatchState_t* ms, + ZSTD_cwksp* ws, + ZSTD_CCtx_params_s* @params, + void* ip, + void* iend + ) + { + uint cycleLog = ZSTD_cycleLog(@params->cParams.chainLog, @params->cParams.strategy); + uint maxDist = (uint)1 << (int)@params->cParams.windowLog; + if ( + ZSTD_window_needOverflowCorrection( + ms->window, + cycleLog, + maxDist, + ms->loadedDictEnd, + ip, + iend + ) != 0 + ) + { + uint correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip); + ZSTD_cwksp_mark_tables_dirty(ws); + ZSTD_reduceIndex(ms, @params, correction); + ZSTD_cwksp_mark_tables_clean(ws); + if (ms->nextToUpdate < correction) + ms->nextToUpdate = 0; + else + ms->nextToUpdate -= correction; + ms->loadedDictEnd = 0; + ms->dictMatchState = null; + } + } + +#if NET7_0_OR_GREATER private static ReadOnlySpan Span_splitLevels => new int[10] { 0, 0, 1, 2, 2, 3, 3, 4, 4, 4 }; private static int* splitLevels => @@ -6438,171 +6438,182 @@ ref MemoryMarshal.GetReference(Span_splitLevels) ); #else - private static readonly int* splitLevels = GetArrayPointer( - new int[10] { 0, 0, 1, 2, 2, 3, 3, 4, 4, 4 } - ); + private static readonly int* splitLevels = GetArrayPointer( + new int[10] { 0, 0, 1, 2, 2, 3, 3, 4, 4, 4 } + ); #endif - private static nuint ZSTD_optimalBlockSize( - ZSTD_CCtx_s* cctx, - void* src, - nuint srcSize, - nuint blockSizeMax, - int splitLevel, - ZSTD_strategy strat, - long savings - ) + private static nuint ZSTD_optimalBlockSize( + ZSTD_CCtx_s* cctx, + void* src, + nuint srcSize, + nuint blockSizeMax, + int splitLevel, + ZSTD_strategy strat, + long savings + ) + { + if (srcSize < 128 * (1 << 10) || blockSizeMax < 128 * (1 << 10)) + return srcSize < blockSizeMax ? srcSize : blockSizeMax; + if (savings < 3) { - if (srcSize < 128 * (1 << 10) || blockSizeMax < 128 * (1 << 10)) - return srcSize < blockSizeMax ? srcSize : blockSizeMax; - if (savings < 3) - { - return 128 * (1 << 10); - } + return 128 * (1 << 10); + } - if (splitLevel == 1) - return 128 * (1 << 10); - if (splitLevel == 0) - { - assert(ZSTD_strategy.ZSTD_fast <= strat && strat <= ZSTD_strategy.ZSTD_btultra2); - splitLevel = splitLevels[(int)strat]; - } - else - { - assert(2 <= splitLevel && splitLevel <= 6); - splitLevel -= 2; - } + if (splitLevel == 1) + return 128 * (1 << 10); + if (splitLevel == 0) + { + assert(ZSTD_strategy.ZSTD_fast <= strat && strat <= ZSTD_strategy.ZSTD_btultra2); + splitLevel = splitLevels[(int)strat]; + } + else + { + assert(2 <= splitLevel && splitLevel <= 6); + splitLevel -= 2; + } - return ZSTD_splitBlock( - src, + return ZSTD_splitBlock( + src, + blockSizeMax, + splitLevel, + cctx->tmpWorkspace, + cctx->tmpWkspSize + ); + } + + /*! ZSTD_compress_frameChunk() : + * Compress a chunk of data into one or multiple blocks. + * All blocks will be terminated, all input will be consumed. + * Function will issue an error if there is not enough `dstCapacity` to hold the compressed content. + * Frame is supposed already started (header already produced) + * @return : compressed size, or an error code + */ + private static nuint ZSTD_compress_frameChunk( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + uint lastFrameChunk + ) + { + nuint blockSizeMax = cctx->blockSizeMax; + nuint remaining = srcSize; + byte* ip = (byte*)src; + byte* ostart = (byte*)dst; + byte* op = ostart; + uint maxDist = (uint)1 << (int)cctx->appliedParams.cParams.windowLog; + long savings = (long)cctx->consumedSrcSize - (long)cctx->producedCSize; + assert(cctx->appliedParams.cParams.windowLog <= (uint)(sizeof(nuint) == 4 ? 30 : 31)); + if (cctx->appliedParams.fParams.checksumFlag != 0 && srcSize != 0) + ZSTD_XXH64_update(&cctx->xxhState, src, srcSize); + while (remaining != 0) + { + ZSTD_MatchState_t* ms = &cctx->blockState.matchState; + nuint blockSize = ZSTD_optimalBlockSize( + cctx, + ip, + remaining, blockSizeMax, - splitLevel, - cctx->tmpWorkspace, - cctx->tmpWkspSize + cctx->appliedParams.preBlockSplitter_level, + cctx->appliedParams.cParams.strategy, + savings ); - } + uint lastBlock = lastFrameChunk & (uint)(blockSize == remaining ? 1 : 0); + assert(blockSize <= remaining); + if (dstCapacity < ZSTD_blockHeaderSize + (nuint)(1 + 1) + 1) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } - /*! ZSTD_compress_frameChunk() : - * Compress a chunk of data into one or multiple blocks. - * All blocks will be terminated, all input will be consumed. - * Function will issue an error if there is not enough `dstCapacity` to hold the compressed content. - * Frame is supposed already started (header already produced) - * @return : compressed size, or an error code - */ - private static nuint ZSTD_compress_frameChunk( - ZSTD_CCtx_s* cctx, - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize, - uint lastFrameChunk - ) - { - nuint blockSizeMax = cctx->blockSizeMax; - nuint remaining = srcSize; - byte* ip = (byte*)src; - byte* ostart = (byte*)dst; - byte* op = ostart; - uint maxDist = (uint)1 << (int)cctx->appliedParams.cParams.windowLog; - long savings = (long)cctx->consumedSrcSize - (long)cctx->producedCSize; - assert(cctx->appliedParams.cParams.windowLog <= (uint)(sizeof(nuint) == 4 ? 30 : 31)); - if (cctx->appliedParams.fParams.checksumFlag != 0 && srcSize != 0) - ZSTD_XXH64_update(&cctx->xxhState, src, srcSize); - while (remaining != 0) - { - ZSTD_MatchState_t* ms = &cctx->blockState.matchState; - nuint blockSize = ZSTD_optimalBlockSize( - cctx, - ip, - remaining, - blockSizeMax, - cctx->appliedParams.preBlockSplitter_level, - cctx->appliedParams.cParams.strategy, - savings - ); - uint lastBlock = lastFrameChunk & (uint)(blockSize == remaining ? 1 : 0); - assert(blockSize <= remaining); - if (dstCapacity < ZSTD_blockHeaderSize + (nuint)(1 + 1) + 1) + ZSTD_overflowCorrectIfNeeded( + ms, + &cctx->workspace, + &cctx->appliedParams, + ip, + ip + blockSize + ); + ZSTD_checkDictValidity( + &ms->window, + ip + blockSize, + maxDist, + &ms->loadedDictEnd, + &ms->dictMatchState + ); + ZSTD_window_enforceMaxDist( + &ms->window, + ip, + maxDist, + &ms->loadedDictEnd, + &ms->dictMatchState + ); + if (ms->nextToUpdate < ms->window.lowLimit) + ms->nextToUpdate = ms->window.lowLimit; + { + nuint cSize; + if (ZSTD_useTargetCBlockSize(&cctx->appliedParams) != 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - } + cSize = ZSTD_compressBlock_targetCBlockSize( + cctx, + op, + dstCapacity, + ip, + blockSize, + lastBlock + ); + { + nuint err_code = cSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } - ZSTD_overflowCorrectIfNeeded( - ms, - &cctx->workspace, - &cctx->appliedParams, - ip, - ip + blockSize - ); - ZSTD_checkDictValidity( - &ms->window, - ip + blockSize, - maxDist, - &ms->loadedDictEnd, - &ms->dictMatchState - ); - ZSTD_window_enforceMaxDist( - &ms->window, - ip, - maxDist, - &ms->loadedDictEnd, - &ms->dictMatchState - ); - if (ms->nextToUpdate < ms->window.lowLimit) - ms->nextToUpdate = ms->window.lowLimit; + assert(cSize > 0); + assert(cSize <= blockSize + ZSTD_blockHeaderSize); + } + else if (ZSTD_blockSplitterEnabled(&cctx->appliedParams) != 0) { - nuint cSize; - if (ZSTD_useTargetCBlockSize(&cctx->appliedParams) != 0) + cSize = ZSTD_compressBlock_splitBlock( + cctx, + op, + dstCapacity, + ip, + blockSize, + lastBlock + ); { - cSize = ZSTD_compressBlock_targetCBlockSize( - cctx, - op, - dstCapacity, - ip, - blockSize, - lastBlock - ); + nuint err_code = cSize; + if (ERR_isError(err_code)) { - nuint err_code = cSize; - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } - - assert(cSize > 0); - assert(cSize <= blockSize + ZSTD_blockHeaderSize); } - else if (ZSTD_blockSplitterEnabled(&cctx->appliedParams) != 0) + + assert(cSize > 0 || cctx->seqCollector.collectSequences == 1); + } + else + { + cSize = ZSTD_compressBlock_internal( + cctx, + op + ZSTD_blockHeaderSize, + dstCapacity - ZSTD_blockHeaderSize, + ip, + blockSize, + 1 + ); { - cSize = ZSTD_compressBlock_splitBlock( - cctx, - op, - dstCapacity, - ip, - blockSize, - lastBlock - ); + nuint err_code = cSize; + if (ERR_isError(err_code)) { - nuint err_code = cSize; - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } - - assert(cSize > 0 || cctx->seqCollector.collectSequences == 1); } - else + + if (cSize == 0) { - cSize = ZSTD_compressBlock_internal( - cctx, - op + ZSTD_blockHeaderSize, - dstCapacity - ZSTD_blockHeaderSize, - ip, - blockSize, - 1 - ); + cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); { nuint err_code = cSize; if (ERR_isError(err_code)) @@ -6610,4924 +6621,4912 @@ uint lastFrameChunk return err_code; } } - - if (cSize == 0) - { - cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); - { - nuint err_code = cSize; - if (ERR_isError(err_code)) - { - return err_code; - } - } - } - else - { - uint cBlockHeader = - cSize == 1 - ? lastBlock - + ((uint)blockType_e.bt_rle << 1) - + (uint)(blockSize << 3) - : lastBlock - + ((uint)blockType_e.bt_compressed << 1) - + (uint)(cSize << 3); - MEM_writeLE24(op, cBlockHeader); - cSize += ZSTD_blockHeaderSize; - } } - - savings += (long)blockSize - (long)cSize; - ip += blockSize; - assert(remaining >= blockSize); - remaining -= blockSize; - op += cSize; - assert(dstCapacity >= cSize); - dstCapacity -= cSize; - cctx->isFirstBlock = 0; + else + { + uint cBlockHeader = + cSize == 1 + ? lastBlock + + ((uint)blockType_e.bt_rle << 1) + + (uint)(blockSize << 3) + : lastBlock + + ((uint)blockType_e.bt_compressed << 1) + + (uint)(cSize << 3); + MEM_writeLE24(op, cBlockHeader); + cSize += ZSTD_blockHeaderSize; + } } + + savings += (long)blockSize - (long)cSize; + ip += blockSize; + assert(remaining >= blockSize); + remaining -= blockSize; + op += cSize; + assert(dstCapacity >= cSize); + dstCapacity -= cSize; + cctx->isFirstBlock = 0; } + } - if (lastFrameChunk != 0 && op > ostart) - cctx->stage = ZSTD_compressionStage_e.ZSTDcs_ending; - return (nuint)(op - ostart); + if (lastFrameChunk != 0 && op > ostart) + cctx->stage = ZSTD_compressionStage_e.ZSTDcs_ending; + return (nuint)(op - ostart); + } + + private static nuint ZSTD_writeFrameHeader( + void* dst, + nuint dstCapacity, + ZSTD_CCtx_params_s* @params, + ulong pledgedSrcSize, + uint dictID + ) + { + byte* op = (byte*)dst; + /* 0-3 */ + uint dictIDSizeCodeLength = (uint)( + (dictID > 0 ? 1 : 0) + (dictID >= 256 ? 1 : 0) + (dictID >= 65536 ? 1 : 0) + ); + /* 0-3 */ + uint dictIDSizeCode = @params->fParams.noDictIDFlag != 0 ? 0 : dictIDSizeCodeLength; + uint checksumFlag = @params->fParams.checksumFlag > 0 ? 1U : 0U; + uint windowSize = (uint)1 << (int)@params->cParams.windowLog; + uint singleSegment = + @params->fParams.contentSizeFlag != 0 && windowSize >= pledgedSrcSize ? 1U : 0U; + byte windowLogByte = (byte)(@params->cParams.windowLog - 10 << 3); + uint fcsCode = (uint)( + @params->fParams.contentSizeFlag != 0 + ? (pledgedSrcSize >= 256 ? 1 : 0) + + (pledgedSrcSize >= 65536 + 256 ? 1 : 0) + + (pledgedSrcSize >= 0xFFFFFFFFU ? 1 : 0) + : 0 + ); + byte frameHeaderDescriptionByte = (byte)( + dictIDSizeCode + (checksumFlag << 2) + (singleSegment << 5) + (fcsCode << 6) + ); + nuint pos = 0; + assert( + !(@params->fParams.contentSizeFlag != 0 && pledgedSrcSize == unchecked(0UL - 1)) + ); + if (dstCapacity < 18) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); } - private static nuint ZSTD_writeFrameHeader( - void* dst, - nuint dstCapacity, - ZSTD_CCtx_params_s* @params, - ulong pledgedSrcSize, - uint dictID - ) + if (@params->format == ZSTD_format_e.ZSTD_f_zstd1) { - byte* op = (byte*)dst; - /* 0-3 */ - uint dictIDSizeCodeLength = (uint)( - (dictID > 0 ? 1 : 0) + (dictID >= 256 ? 1 : 0) + (dictID >= 65536 ? 1 : 0) - ); - /* 0-3 */ - uint dictIDSizeCode = @params->fParams.noDictIDFlag != 0 ? 0 : dictIDSizeCodeLength; - uint checksumFlag = @params->fParams.checksumFlag > 0 ? 1U : 0U; - uint windowSize = (uint)1 << (int)@params->cParams.windowLog; - uint singleSegment = - @params->fParams.contentSizeFlag != 0 && windowSize >= pledgedSrcSize ? 1U : 0U; - byte windowLogByte = (byte)(@params->cParams.windowLog - 10 << 3); - uint fcsCode = (uint)( - @params->fParams.contentSizeFlag != 0 - ? (pledgedSrcSize >= 256 ? 1 : 0) - + (pledgedSrcSize >= 65536 + 256 ? 1 : 0) - + (pledgedSrcSize >= 0xFFFFFFFFU ? 1 : 0) - : 0 - ); - byte frameHeaderDescriptionByte = (byte)( - dictIDSizeCode + (checksumFlag << 2) + (singleSegment << 5) + (fcsCode << 6) - ); - nuint pos = 0; - assert( - !(@params->fParams.contentSizeFlag != 0 && pledgedSrcSize == unchecked(0UL - 1)) - ); - if (dstCapacity < 18) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - } + MEM_writeLE32(dst, 0xFD2FB528); + pos = 4; + } - if (@params->format == ZSTD_format_e.ZSTD_f_zstd1) - { - MEM_writeLE32(dst, 0xFD2FB528); - pos = 4; - } + op[pos++] = frameHeaderDescriptionByte; + if (singleSegment == 0) + op[pos++] = windowLogByte; + switch (dictIDSizeCode) + { + default: + assert(0 != 0); + goto case 0; + case 0: + break; + case 1: + op[pos] = (byte)dictID; + pos++; + break; + case 2: + MEM_writeLE16(op + pos, (ushort)dictID); + pos += 2; + break; + case 3: + MEM_writeLE32(op + pos, dictID); + pos += 4; + break; + } + + switch (fcsCode) + { + default: + assert(0 != 0); + goto case 0; + case 0: + if (singleSegment != 0) + op[pos++] = (byte)pledgedSrcSize; + break; + case 1: + MEM_writeLE16(op + pos, (ushort)(pledgedSrcSize - 256)); + pos += 2; + break; + case 2: + MEM_writeLE32(op + pos, (uint)pledgedSrcSize); + pos += 4; + break; + case 3: + MEM_writeLE64(op + pos, pledgedSrcSize); + pos += 8; + break; + } + + return pos; + } - op[pos++] = frameHeaderDescriptionByte; - if (singleSegment == 0) - op[pos++] = windowLogByte; - switch (dictIDSizeCode) - { - default: - assert(0 != 0); - goto case 0; - case 0: - break; - case 1: - op[pos] = (byte)dictID; - pos++; - break; - case 2: - MEM_writeLE16(op + pos, (ushort)dictID); - pos += 2; - break; - case 3: - MEM_writeLE32(op + pos, dictID); - pos += 4; - break; - } + /* ZSTD_writeSkippableFrame_advanced() : + * Writes out a skippable frame with the specified magic number variant (16 are supported), + * from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15, and the desired source data. + * + * Returns the total number of bytes written, or a ZSTD error code. + */ + public static nuint ZSTD_writeSkippableFrame( + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + uint magicVariant + ) + { + byte* op = (byte*)dst; + if (dstCapacity < srcSize + 8) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } - switch (fcsCode) - { - default: - assert(0 != 0); - goto case 0; - case 0: - if (singleSegment != 0) - op[pos++] = (byte)pledgedSrcSize; - break; - case 1: - MEM_writeLE16(op + pos, (ushort)(pledgedSrcSize - 256)); - pos += 2; - break; - case 2: - MEM_writeLE32(op + pos, (uint)pledgedSrcSize); - pos += 4; - break; - case 3: - MEM_writeLE64(op + pos, pledgedSrcSize); - pos += 8; - break; - } + if (srcSize > 0xFFFFFFFF) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } - return pos; + if (magicVariant > 15) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } - /* ZSTD_writeSkippableFrame_advanced() : - * Writes out a skippable frame with the specified magic number variant (16 are supported), - * from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15, and the desired source data. - * - * Returns the total number of bytes written, or a ZSTD error code. - */ - public static nuint ZSTD_writeSkippableFrame( - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize, - uint magicVariant - ) + MEM_writeLE32(op, 0x184D2A50 + magicVariant); + MEM_writeLE32(op + 4, (uint)srcSize); + memcpy(op + 8, src, (uint)srcSize); + return srcSize + 8; + } + + /* ZSTD_writeLastEmptyBlock() : + * output an empty Block with end-of-frame mark to complete a frame + * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h)) + * or an error code if `dstCapacity` is too small ( 0xFFFFFFFF) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); - } + { + /*lastBlock*/ + uint cBlockHeader24 = 1 + ((uint)blockType_e.bt_raw << 1); + MEM_writeLE24(dst, cBlockHeader24); + return ZSTD_blockHeaderSize; + } + } - if (magicVariant > 15) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); - } + /* ZSTD_referenceExternalSequences() : + * Must be called before starting a compression operation. + * seqs must parse a prefix of the source. + * This cannot be used when long range matching is enabled. + * Zstd will use these sequences, and pass the literals to a secondary block + * compressor. + * NOTE: seqs are not verified! Invalid sequences can cause out-of-bounds memory + * access and data corruption. + */ + private static void ZSTD_referenceExternalSequences( + ZSTD_CCtx_s* cctx, + rawSeq* seq, + nuint nbSeq + ) + { + assert(cctx->stage == ZSTD_compressionStage_e.ZSTDcs_init); + assert( + nbSeq == 0 + || cctx->appliedParams.ldmParams.enableLdm != ZSTD_paramSwitch_e.ZSTD_ps_enable + ); + cctx->externSeqStore.seq = seq; + cctx->externSeqStore.size = nbSeq; + cctx->externSeqStore.capacity = nbSeq; + cctx->externSeqStore.pos = 0; + cctx->externSeqStore.posInSequence = 0; + } - MEM_writeLE32(op, 0x184D2A50 + magicVariant); - MEM_writeLE32(op + 4, (uint)srcSize); - memcpy(op + 8, src, (uint)srcSize); - return srcSize + 8; + private static nuint ZSTD_compressContinue_internal( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + uint frame, + uint lastFrameChunk + ) + { + ZSTD_MatchState_t* ms = &cctx->blockState.matchState; + nuint fhSize = 0; + if (cctx->stage == ZSTD_compressionStage_e.ZSTDcs_created) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); } - /* ZSTD_writeLastEmptyBlock() : - * output an empty Block with end-of-frame mark to complete a frame - * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h)) - * or an error code if `dstCapacity` is too small (stage == ZSTD_compressionStage_e.ZSTDcs_init) { - if (dstCapacity < ZSTD_blockHeaderSize) + fhSize = ZSTD_writeFrameHeader( + dst, + dstCapacity, + &cctx->appliedParams, + cctx->pledgedSrcSizePlusOne - 1, + cctx->dictID + ); { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + nuint err_code = fhSize; + if (ERR_isError(err_code)) + { + return err_code; + } } - { - /*lastBlock*/ - uint cBlockHeader24 = 1 + ((uint)blockType_e.bt_raw << 1); - MEM_writeLE24(dst, cBlockHeader24); - return ZSTD_blockHeaderSize; - } + assert(fhSize <= dstCapacity); + dstCapacity -= fhSize; + dst = (sbyte*)dst + fhSize; + cctx->stage = ZSTD_compressionStage_e.ZSTDcs_ongoing; } - /* ZSTD_referenceExternalSequences() : - * Must be called before starting a compression operation. - * seqs must parse a prefix of the source. - * This cannot be used when long range matching is enabled. - * Zstd will use these sequences, and pass the literals to a secondary block - * compressor. - * NOTE: seqs are not verified! Invalid sequences can cause out-of-bounds memory - * access and data corruption. - */ - private static void ZSTD_referenceExternalSequences( - ZSTD_CCtx_s* cctx, - rawSeq* seq, - nuint nbSeq - ) - { - assert(cctx->stage == ZSTD_compressionStage_e.ZSTDcs_init); - assert( - nbSeq == 0 - || cctx->appliedParams.ldmParams.enableLdm != ZSTD_paramSwitch_e.ZSTD_ps_enable - ); - cctx->externSeqStore.seq = seq; - cctx->externSeqStore.size = nbSeq; - cctx->externSeqStore.capacity = nbSeq; - cctx->externSeqStore.pos = 0; - cctx->externSeqStore.posInSequence = 0; - } - - private static nuint ZSTD_compressContinue_internal( - ZSTD_CCtx_s* cctx, - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize, - uint frame, - uint lastFrameChunk - ) + if (srcSize == 0) + return fhSize; + if (ZSTD_window_update(&ms->window, src, srcSize, ms->forceNonContiguous) == 0) { - ZSTD_MatchState_t* ms = &cctx->blockState.matchState; - nuint fhSize = 0; - if (cctx->stage == ZSTD_compressionStage_e.ZSTDcs_created) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); - } + ms->forceNonContiguous = 0; + ms->nextToUpdate = ms->window.dictLimit; + } - if (frame != 0 && cctx->stage == ZSTD_compressionStage_e.ZSTDcs_init) + if (cctx->appliedParams.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) + { + ZSTD_window_update(&cctx->ldmState.window, src, srcSize, 0); + } + + if (frame == 0) + { + ZSTD_overflowCorrectIfNeeded( + ms, + &cctx->workspace, + &cctx->appliedParams, + src, + (byte*)src + srcSize + ); + } + + { + nuint cSize = + frame != 0 + ? ZSTD_compress_frameChunk( + cctx, + dst, + dstCapacity, + src, + srcSize, + lastFrameChunk + ) + : ZSTD_compressBlock_internal(cctx, dst, dstCapacity, src, srcSize, 0); { - fhSize = ZSTD_writeFrameHeader( - dst, - dstCapacity, - &cctx->appliedParams, - cctx->pledgedSrcSizePlusOne - 1, - cctx->dictID - ); + nuint err_code = cSize; + if (ERR_isError(err_code)) { - nuint err_code = fhSize; - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } - - assert(fhSize <= dstCapacity); - dstCapacity -= fhSize; - dst = (sbyte*)dst + fhSize; - cctx->stage = ZSTD_compressionStage_e.ZSTDcs_ongoing; } - if (srcSize == 0) - return fhSize; - if (ZSTD_window_update(&ms->window, src, srcSize, ms->forceNonContiguous) == 0) + cctx->consumedSrcSize += srcSize; + cctx->producedCSize += cSize + fhSize; + assert( + !( + cctx->appliedParams.fParams.contentSizeFlag != 0 + && cctx->pledgedSrcSizePlusOne == 0 + ) + ); + if (cctx->pledgedSrcSizePlusOne != 0) { - ms->forceNonContiguous = 0; - ms->nextToUpdate = ms->window.dictLimit; + if (cctx->consumedSrcSize + 1 > cctx->pledgedSrcSizePlusOne) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } } - if (cctx->appliedParams.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) + return cSize + fhSize; + } + } + + private static nuint ZSTD_compressContinue_public( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize + ) + { + return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 0); + } + + /* NOTE: Must just wrap ZSTD_compressContinue_public() */ + public static nuint ZSTD_compressContinue( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize + ) + { + return ZSTD_compressContinue_public(cctx, dst, dstCapacity, src, srcSize); + } + + private static nuint ZSTD_getBlockSize_deprecated(ZSTD_CCtx_s* cctx) + { + ZSTD_compressionParameters cParams = cctx->appliedParams.cParams; + assert(ZSTD_checkCParams(cParams) == 0); + return cctx->appliedParams.maxBlockSize < (nuint)1 << (int)cParams.windowLog + ? cctx->appliedParams.maxBlockSize + : (nuint)1 << (int)cParams.windowLog; + } + + /* NOTE: Must just wrap ZSTD_getBlockSize_deprecated() */ + public static nuint ZSTD_getBlockSize(ZSTD_CCtx_s* cctx) + { + return ZSTD_getBlockSize_deprecated(cctx); + } + + /* NOTE: Must just wrap ZSTD_compressBlock_deprecated() */ + private static nuint ZSTD_compressBlock_deprecated( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize + ) + { + { + nuint blockSizeMax = ZSTD_getBlockSize_deprecated(cctx); + if (srcSize > blockSizeMax) { - ZSTD_window_update(&cctx->ldmState.window, src, srcSize, 0); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); } + } + + return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0, 0); + } - if (frame == 0) + /* NOTE: Must just wrap ZSTD_compressBlock_deprecated() */ + public static nuint ZSTD_compressBlock( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_deprecated(cctx, dst, dstCapacity, src, srcSize); + } + + /*! ZSTD_loadDictionaryContent() : + * @return : 0, or an error code + */ + private static nuint ZSTD_loadDictionaryContent( + ZSTD_MatchState_t* ms, + ldmState_t* ls, + ZSTD_cwksp* ws, + ZSTD_CCtx_params_s* @params, + void* src, + nuint srcSize, + ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp + ) + { + byte* ip = (byte*)src; + byte* iend = ip + srcSize; + int loadLdmDict = + @params->ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable && ls != null + ? 1 + : 0; + ZSTD_assertEqualCParams(@params->cParams, ms->cParams); + { + /* Allow the dictionary to set indices up to exactly ZSTD_CURRENT_MAX. + * Dictionaries right at the edge will immediately trigger overflow + * correction, but I don't want to insert extra constraints here. + */ + uint maxDictSize = (MEM_64bits ? 3500U * (1 << 20) : 2000U * (1 << 20)) - 2; + int CDictTaggedIndices = ZSTD_CDictIndicesAreTagged(&@params->cParams); + if (CDictTaggedIndices != 0 && tfp == ZSTD_tableFillPurpose_e.ZSTD_tfp_forCDict) { - ZSTD_overflowCorrectIfNeeded( - ms, - &cctx->workspace, - &cctx->appliedParams, - src, - (byte*)src + srcSize - ); + /* Some dictionary matchfinders in zstd use "short cache", + * which treats the lower ZSTD_SHORT_CACHE_TAG_BITS of each + * CDict hashtable entry as a tag rather than as part of an index. + * When short cache is used, we need to truncate the dictionary + * so that its indices don't overlap with the tag. */ + const uint shortCacheMaxDictSize = (1U << 32 - 8) - 2; + maxDictSize = + maxDictSize < shortCacheMaxDictSize ? maxDictSize : shortCacheMaxDictSize; + assert(loadLdmDict == 0); } + if (srcSize > maxDictSize) { - nuint cSize = - frame != 0 - ? ZSTD_compress_frameChunk( - cctx, - dst, - dstCapacity, - src, - srcSize, - lastFrameChunk - ) - : ZSTD_compressBlock_internal(cctx, dst, dstCapacity, src, srcSize, 0); - { - nuint err_code = cSize; - if (ERR_isError(err_code)) - { - return err_code; - } - } - - cctx->consumedSrcSize += srcSize; - cctx->producedCSize += cSize + fhSize; - assert( - !( - cctx->appliedParams.fParams.contentSizeFlag != 0 - && cctx->pledgedSrcSizePlusOne == 0 - ) - ); - if (cctx->pledgedSrcSizePlusOne != 0) - { - if (cctx->consumedSrcSize + 1 > cctx->pledgedSrcSizePlusOne) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); - } - } - - return cSize + fhSize; + ip = iend - maxDictSize; + src = ip; + srcSize = maxDictSize; } } - private static nuint ZSTD_compressContinue_public( - ZSTD_CCtx_s* cctx, - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize + if ( + srcSize + > unchecked((uint)-1) - (MEM_64bits ? 3500U * (1 << 20) : 2000U * (1 << 20)) ) { - return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 0); + assert(ZSTD_window_isEmpty(ms->window) != 0); +#if DEBUG + if (loadLdmDict != 0) + assert(ZSTD_window_isEmpty(ls->window) != 0); +#endif } - /* NOTE: Must just wrap ZSTD_compressContinue_public() */ - public static nuint ZSTD_compressContinue( - ZSTD_CCtx_s* cctx, - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize - ) + ZSTD_window_update(&ms->window, src, srcSize, 0); + if (loadLdmDict != 0) { - return ZSTD_compressContinue_public(cctx, dst, dstCapacity, src, srcSize); + ZSTD_window_update(&ls->window, src, srcSize, 0); + ls->loadedDictEnd = @params->forceWindow != 0 ? 0 : (uint)(iend - ls->window.@base); + ZSTD_ldm_fillHashTable(ls, ip, iend, &@params->ldmParams); } - private static nuint ZSTD_getBlockSize_deprecated(ZSTD_CCtx_s* cctx) { - ZSTD_compressionParameters cParams = cctx->appliedParams.cParams; - assert(ZSTD_checkCParams(cParams) == 0); - return cctx->appliedParams.maxBlockSize < (nuint)1 << (int)cParams.windowLog - ? cctx->appliedParams.maxBlockSize - : (nuint)1 << (int)cParams.windowLog; + uint maxDictSize = + 1U + << (int)( + ( + @params->cParams.hashLog + 3 > @params->cParams.chainLog + 1 + ? @params->cParams.hashLog + 3 + : @params->cParams.chainLog + 1 + ) < 31 + ? @params->cParams.hashLog + 3 > @params->cParams.chainLog + 1 + ? @params->cParams.hashLog + 3 + : @params->cParams.chainLog + 1 + : 31 + ); + if (srcSize > maxDictSize) + { + ip = iend - maxDictSize; + src = ip; + srcSize = maxDictSize; + } } - /* NOTE: Must just wrap ZSTD_getBlockSize_deprecated() */ - public static nuint ZSTD_getBlockSize(ZSTD_CCtx_s* cctx) + ms->nextToUpdate = (uint)(ip - ms->window.@base); + ms->loadedDictEnd = @params->forceWindow != 0 ? 0 : (uint)(iend - ms->window.@base); + ms->forceNonContiguous = @params->deterministicRefPrefix; + if (srcSize <= 8) + return 0; + ZSTD_overflowCorrectIfNeeded(ms, ws, @params, ip, iend); + switch (@params->cParams.strategy) + { + case ZSTD_strategy.ZSTD_fast: + ZSTD_fillHashTable(ms, iend, dtlm, tfp); + break; + case ZSTD_strategy.ZSTD_dfast: + ZSTD_fillDoubleHashTable(ms, iend, dtlm, tfp); + break; + case ZSTD_strategy.ZSTD_greedy: + case ZSTD_strategy.ZSTD_lazy: + case ZSTD_strategy.ZSTD_lazy2: + assert(srcSize >= 8); + if (ms->dedicatedDictSearch != 0) + { + assert(ms->chainTable != null); + ZSTD_dedicatedDictSearch_lazy_loadDictionary(ms, iend - 8); + } + else + { + assert(@params->useRowMatchFinder != ZSTD_paramSwitch_e.ZSTD_ps_auto); + if (@params->useRowMatchFinder == ZSTD_paramSwitch_e.ZSTD_ps_enable) + { + nuint tagTableSize = (nuint)1 << (int)@params->cParams.hashLog; + memset(ms->tagTable, 0, (uint)tagTableSize); + ZSTD_row_update(ms, iend - 8); + } + else + { + ZSTD_insertAndFindFirstIndex(ms, iend - 8); + } + } + + break; + case ZSTD_strategy.ZSTD_btlazy2: + case ZSTD_strategy.ZSTD_btopt: + case ZSTD_strategy.ZSTD_btultra: + case ZSTD_strategy.ZSTD_btultra2: + assert(srcSize >= 8); + ZSTD_updateTree(ms, iend - 8, iend); + break; + default: + assert(0 != 0); + break; + } + + ms->nextToUpdate = (uint)(iend - ms->window.@base); + return 0; + } + + /* Dictionaries that assign zero probability to symbols that show up causes problems + * when FSE encoding. Mark dictionaries with zero probability symbols as FSE_repeat_check + * and only dictionaries with 100% valid symbols can be assumed valid. + */ + private static FSE_repeat ZSTD_dictNCountRepeat( + short* normalizedCounter, + uint dictMaxSymbolValue, + uint maxSymbolValue + ) + { + uint s; + if (dictMaxSymbolValue < maxSymbolValue) { - return ZSTD_getBlockSize_deprecated(cctx); + return FSE_repeat.FSE_repeat_check; } - /* NOTE: Must just wrap ZSTD_compressBlock_deprecated() */ - private static nuint ZSTD_compressBlock_deprecated( - ZSTD_CCtx_s* cctx, - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize - ) + for (s = 0; s <= maxSymbolValue; ++s) { + if (normalizedCounter[s] == 0) { - nuint blockSizeMax = ZSTD_getBlockSize_deprecated(cctx); - if (srcSize > blockSizeMax) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); - } + return FSE_repeat.FSE_repeat_check; } - - return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0, 0); } - /* NOTE: Must just wrap ZSTD_compressBlock_deprecated() */ - public static nuint ZSTD_compressBlock( - ZSTD_CCtx_s* cctx, - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize - ) + return FSE_repeat.FSE_repeat_valid; + } + + /* ZSTD_loadCEntropy() : + * dict : must point at beginning of a valid zstd dictionary. + * return : size of dictionary header (size of magic number + dict ID + entropy tables) + * assumptions : magic number supposed already checked + * and dictSize >= 8 */ + private static nuint ZSTD_loadCEntropy( + ZSTD_compressedBlockState_t* bs, + void* workspace, + void* dict, + nuint dictSize + ) + { + short* offcodeNCount = stackalloc short[32]; + uint offcodeMaxValue = 31; + /* skip magic num and dict ID */ + byte* dictPtr = (byte*)dict; + byte* dictEnd = dictPtr + dictSize; + dictPtr += 8; + bs->entropy.huf.repeatMode = HUF_repeat.HUF_repeat_check; { - return ZSTD_compressBlock_deprecated(cctx, dst, dstCapacity, src, srcSize); + uint maxSymbolValue = 255; + uint hasZeroWeights = 1; + nuint hufHeaderSize = HUF_readCTable( + &bs->entropy.huf.CTable.e0, + &maxSymbolValue, + dictPtr, + (nuint)(dictEnd - dictPtr), + &hasZeroWeights + ); + if (hasZeroWeights == 0 && maxSymbolValue == 255) + bs->entropy.huf.repeatMode = HUF_repeat.HUF_repeat_valid; + if (ERR_isError(hufHeaderSize)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + + dictPtr += hufHeaderSize; } - /*! ZSTD_loadDictionaryContent() : - * @return : 0, or an error code - */ - private static nuint ZSTD_loadDictionaryContent( - ZSTD_MatchState_t* ms, - ldmState_t* ls, - ZSTD_cwksp* ws, - ZSTD_CCtx_params_s* @params, - void* src, - nuint srcSize, - ZSTD_dictTableLoadMethod_e dtlm, - ZSTD_tableFillPurpose_e tfp - ) { - byte* ip = (byte*)src; - byte* iend = ip + srcSize; - int loadLdmDict = - @params->ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable && ls != null - ? 1 - : 0; - ZSTD_assertEqualCParams(@params->cParams, ms->cParams); - { - /* Allow the dictionary to set indices up to exactly ZSTD_CURRENT_MAX. - * Dictionaries right at the edge will immediately trigger overflow - * correction, but I don't want to insert extra constraints here. - */ - uint maxDictSize = (MEM_64bits ? 3500U * (1 << 20) : 2000U * (1 << 20)) - 2; - int CDictTaggedIndices = ZSTD_CDictIndicesAreTagged(&@params->cParams); - if (CDictTaggedIndices != 0 && tfp == ZSTD_tableFillPurpose_e.ZSTD_tfp_forCDict) - { - /* Some dictionary matchfinders in zstd use "short cache", - * which treats the lower ZSTD_SHORT_CACHE_TAG_BITS of each - * CDict hashtable entry as a tag rather than as part of an index. - * When short cache is used, we need to truncate the dictionary - * so that its indices don't overlap with the tag. */ - const uint shortCacheMaxDictSize = (1U << 32 - 8) - 2; - maxDictSize = - maxDictSize < shortCacheMaxDictSize ? maxDictSize : shortCacheMaxDictSize; - assert(loadLdmDict == 0); - } + uint offcodeLog; + nuint offcodeHeaderSize = FSE_readNCount( + offcodeNCount, + &offcodeMaxValue, + &offcodeLog, + dictPtr, + (nuint)(dictEnd - dictPtr) + ); + if (ERR_isError(offcodeHeaderSize)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } - if (srcSize > maxDictSize) - { - ip = iend - maxDictSize; - src = ip; - srcSize = maxDictSize; - } + if (offcodeLog > 8) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); } if ( - srcSize - > unchecked((uint)-1) - (MEM_64bits ? 3500U * (1 << 20) : 2000U * (1 << 20)) + ERR_isError( + FSE_buildCTable_wksp( + bs->entropy.fse.offcodeCTable, + offcodeNCount, + 31, + offcodeLog, + workspace, + (8 << 10) + 512 + ) + ) ) { - assert(ZSTD_window_isEmpty(ms->window) != 0); -#if DEBUG - if (loadLdmDict != 0) - assert(ZSTD_window_isEmpty(ls->window) != 0); -#endif + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); } - ZSTD_window_update(&ms->window, src, srcSize, 0); - if (loadLdmDict != 0) + dictPtr += offcodeHeaderSize; + } + + { + short* matchlengthNCount = stackalloc short[53]; + uint matchlengthMaxValue = 52, + matchlengthLog; + nuint matchlengthHeaderSize = FSE_readNCount( + matchlengthNCount, + &matchlengthMaxValue, + &matchlengthLog, + dictPtr, + (nuint)(dictEnd - dictPtr) + ); + if (ERR_isError(matchlengthHeaderSize)) { - ZSTD_window_update(&ls->window, src, srcSize, 0); - ls->loadedDictEnd = @params->forceWindow != 0 ? 0 : (uint)(iend - ls->window.@base); - ZSTD_ldm_fillHashTable(ls, ip, iend, &@params->ldmParams); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); } + if (matchlengthLog > 9) { - uint maxDictSize = - 1U - << (int)( - ( - @params->cParams.hashLog + 3 > @params->cParams.chainLog + 1 - ? @params->cParams.hashLog + 3 - : @params->cParams.chainLog + 1 - ) < 31 - ? @params->cParams.hashLog + 3 > @params->cParams.chainLog + 1 - ? @params->cParams.hashLog + 3 - : @params->cParams.chainLog + 1 - : 31 - ); - if (srcSize > maxDictSize) - { - ip = iend - maxDictSize; - src = ip; - srcSize = maxDictSize; - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); } - ms->nextToUpdate = (uint)(ip - ms->window.@base); - ms->loadedDictEnd = @params->forceWindow != 0 ? 0 : (uint)(iend - ms->window.@base); - ms->forceNonContiguous = @params->deterministicRefPrefix; - if (srcSize <= 8) - return 0; - ZSTD_overflowCorrectIfNeeded(ms, ws, @params, ip, iend); - switch (@params->cParams.strategy) + if ( + ERR_isError( + FSE_buildCTable_wksp( + bs->entropy.fse.matchlengthCTable, + matchlengthNCount, + matchlengthMaxValue, + matchlengthLog, + workspace, + (8 << 10) + 512 + ) + ) + ) { - case ZSTD_strategy.ZSTD_fast: - ZSTD_fillHashTable(ms, iend, dtlm, tfp); - break; - case ZSTD_strategy.ZSTD_dfast: - ZSTD_fillDoubleHashTable(ms, iend, dtlm, tfp); - break; - case ZSTD_strategy.ZSTD_greedy: - case ZSTD_strategy.ZSTD_lazy: - case ZSTD_strategy.ZSTD_lazy2: - assert(srcSize >= 8); - if (ms->dedicatedDictSearch != 0) - { - assert(ms->chainTable != null); - ZSTD_dedicatedDictSearch_lazy_loadDictionary(ms, iend - 8); - } - else - { - assert(@params->useRowMatchFinder != ZSTD_paramSwitch_e.ZSTD_ps_auto); - if (@params->useRowMatchFinder == ZSTD_paramSwitch_e.ZSTD_ps_enable) - { - nuint tagTableSize = (nuint)1 << (int)@params->cParams.hashLog; - memset(ms->tagTable, 0, (uint)tagTableSize); - ZSTD_row_update(ms, iend - 8); - } - else - { - ZSTD_insertAndFindFirstIndex(ms, iend - 8); - } - } - - break; - case ZSTD_strategy.ZSTD_btlazy2: - case ZSTD_strategy.ZSTD_btopt: - case ZSTD_strategy.ZSTD_btultra: - case ZSTD_strategy.ZSTD_btultra2: - assert(srcSize >= 8); - ZSTD_updateTree(ms, iend - 8, iend); - break; - default: - assert(0 != 0); - break; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); } - ms->nextToUpdate = (uint)(iend - ms->window.@base); - return 0; + bs->entropy.fse.matchlength_repeatMode = ZSTD_dictNCountRepeat( + matchlengthNCount, + matchlengthMaxValue, + 52 + ); + dictPtr += matchlengthHeaderSize; } - /* Dictionaries that assign zero probability to symbols that show up causes problems - * when FSE encoding. Mark dictionaries with zero probability symbols as FSE_repeat_check - * and only dictionaries with 100% valid symbols can be assumed valid. - */ - private static FSE_repeat ZSTD_dictNCountRepeat( - short* normalizedCounter, - uint dictMaxSymbolValue, - uint maxSymbolValue - ) { - uint s; - if (dictMaxSymbolValue < maxSymbolValue) + short* litlengthNCount = stackalloc short[36]; + uint litlengthMaxValue = 35, + litlengthLog; + nuint litlengthHeaderSize = FSE_readNCount( + litlengthNCount, + &litlengthMaxValue, + &litlengthLog, + dictPtr, + (nuint)(dictEnd - dictPtr) + ); + if (ERR_isError(litlengthHeaderSize)) { - return FSE_repeat.FSE_repeat_check; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); } - for (s = 0; s <= maxSymbolValue; ++s) + if (litlengthLog > 9) { - if (normalizedCounter[s] == 0) - { - return FSE_repeat.FSE_repeat_check; - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + + if ( + ERR_isError( + FSE_buildCTable_wksp( + bs->entropy.fse.litlengthCTable, + litlengthNCount, + litlengthMaxValue, + litlengthLog, + workspace, + (8 << 10) + 512 + ) + ) + ) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); } - return FSE_repeat.FSE_repeat_valid; + bs->entropy.fse.litlength_repeatMode = ZSTD_dictNCountRepeat( + litlengthNCount, + litlengthMaxValue, + 35 + ); + dictPtr += litlengthHeaderSize; } - /* ZSTD_loadCEntropy() : - * dict : must point at beginning of a valid zstd dictionary. - * return : size of dictionary header (size of magic number + dict ID + entropy tables) - * assumptions : magic number supposed already checked - * and dictSize >= 8 */ - private static nuint ZSTD_loadCEntropy( - ZSTD_compressedBlockState_t* bs, - void* workspace, - void* dict, - nuint dictSize - ) + if (dictPtr + 12 > dictEnd) { - short* offcodeNCount = stackalloc short[32]; - uint offcodeMaxValue = 31; - /* skip magic num and dict ID */ - byte* dictPtr = (byte*)dict; - byte* dictEnd = dictPtr + dictSize; - dictPtr += 8; - bs->entropy.huf.repeatMode = HUF_repeat.HUF_repeat_check; - { - uint maxSymbolValue = 255; - uint hasZeroWeights = 1; - nuint hufHeaderSize = HUF_readCTable( - &bs->entropy.huf.CTable.e0, - &maxSymbolValue, - dictPtr, - (nuint)(dictEnd - dictPtr), - &hasZeroWeights - ); - if (hasZeroWeights == 0 && maxSymbolValue == 255) - bs->entropy.huf.repeatMode = HUF_repeat.HUF_repeat_valid; - if (ERR_isError(hufHeaderSize)) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } - dictPtr += hufHeaderSize; + bs->rep[0] = MEM_readLE32(dictPtr + 0); + bs->rep[1] = MEM_readLE32(dictPtr + 4); + bs->rep[2] = MEM_readLE32(dictPtr + 8); + dictPtr += 12; + { + nuint dictContentSize = (nuint)(dictEnd - dictPtr); + uint offcodeMax = 31; + if (dictContentSize <= unchecked((uint)-1) - 128 * (1 << 10)) + { + /* The maximum offset that must be supported */ + uint maxOffset = (uint)dictContentSize + 128 * (1 << 10); + offcodeMax = ZSTD_highbit32(maxOffset); } + bs->entropy.fse.offcode_repeatMode = ZSTD_dictNCountRepeat( + offcodeNCount, + offcodeMaxValue, + offcodeMax < 31 ? offcodeMax : 31 + ); { - uint offcodeLog; - nuint offcodeHeaderSize = FSE_readNCount( - offcodeNCount, - &offcodeMaxValue, - &offcodeLog, - dictPtr, - (nuint)(dictEnd - dictPtr) - ); - if (ERR_isError(offcodeHeaderSize)) + uint u; + for (u = 0; u < 3; u++) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); - } + if (bs->rep[u] == 0) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted) + ); + } - if (offcodeLog > 8) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + if (bs->rep[u] > dictContentSize) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted) + ); + } } + } + } - if ( - ERR_isError( - FSE_buildCTable_wksp( - bs->entropy.fse.offcodeCTable, - offcodeNCount, - 31, - offcodeLog, - workspace, - (8 << 10) + 512 - ) - ) - ) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); - } + return (nuint)(dictPtr - (byte*)dict); + } - dictPtr += offcodeHeaderSize; + /* Dictionary format : + * See : + * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#dictionary-format + */ + /*! ZSTD_loadZstdDictionary() : + * @return : dictID, or an error code + * assumptions : magic number supposed already checked + * dictSize supposed >= 8 + */ + private static nuint ZSTD_loadZstdDictionary( + ZSTD_compressedBlockState_t* bs, + ZSTD_MatchState_t* ms, + ZSTD_cwksp* ws, + ZSTD_CCtx_params_s* @params, + void* dict, + nuint dictSize, + ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp, + void* workspace + ) + { + byte* dictPtr = (byte*)dict; + byte* dictEnd = dictPtr + dictSize; + nuint dictID; + nuint eSize; + assert(dictSize >= 8); + assert(MEM_readLE32(dictPtr) == 0xEC30A437); + dictID = @params->fParams.noDictIDFlag != 0 ? 0 : MEM_readLE32(dictPtr + 4); + eSize = ZSTD_loadCEntropy(bs, workspace, dict, dictSize); + { + nuint err_code = eSize; + if (ERR_isError(err_code)) + { + return err_code; } + } + dictPtr += eSize; + { + nuint dictContentSize = (nuint)(dictEnd - dictPtr); { - short* matchlengthNCount = stackalloc short[53]; - uint matchlengthMaxValue = 52, - matchlengthLog; - nuint matchlengthHeaderSize = FSE_readNCount( - matchlengthNCount, - &matchlengthMaxValue, - &matchlengthLog, + nuint err_code = ZSTD_loadDictionaryContent( + ms, + null, + ws, + @params, dictPtr, - (nuint)(dictEnd - dictPtr) + dictContentSize, + dtlm, + tfp ); - if (ERR_isError(matchlengthHeaderSize)) + if (ERR_isError(err_code)) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + return err_code; } + } + } - if (matchlengthLog > 9) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); - } - - if ( - ERR_isError( - FSE_buildCTable_wksp( - bs->entropy.fse.matchlengthCTable, - matchlengthNCount, - matchlengthMaxValue, - matchlengthLog, - workspace, - (8 << 10) + 512 - ) - ) - ) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); - } - - bs->entropy.fse.matchlength_repeatMode = ZSTD_dictNCountRepeat( - matchlengthNCount, - matchlengthMaxValue, - 52 - ); - dictPtr += matchlengthHeaderSize; - } + return dictID; + } + /** ZSTD_compress_insertDictionary() : + * @return : dictID, or an error code */ + private static nuint ZSTD_compress_insertDictionary( + ZSTD_compressedBlockState_t* bs, + ZSTD_MatchState_t* ms, + ldmState_t* ls, + ZSTD_cwksp* ws, + ZSTD_CCtx_params_s* @params, + void* dict, + nuint dictSize, + ZSTD_dictContentType_e dictContentType, + ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp, + void* workspace + ) + { + if (dict == null || dictSize < 8) + { + if (dictContentType == ZSTD_dictContentType_e.ZSTD_dct_fullDict) { - short* litlengthNCount = stackalloc short[36]; - uint litlengthMaxValue = 35, - litlengthLog; - nuint litlengthHeaderSize = FSE_readNCount( - litlengthNCount, - &litlengthMaxValue, - &litlengthLog, - dictPtr, - (nuint)(dictEnd - dictPtr) - ); - if (ERR_isError(litlengthHeaderSize)) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); - } - - if (litlengthLog > 9) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_wrong)); + } - if ( - ERR_isError( - FSE_buildCTable_wksp( - bs->entropy.fse.litlengthCTable, - litlengthNCount, - litlengthMaxValue, - litlengthLog, - workspace, - (8 << 10) + 512 - ) - ) - ) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); - } + return 0; + } - bs->entropy.fse.litlength_repeatMode = ZSTD_dictNCountRepeat( - litlengthNCount, - litlengthMaxValue, - 35 + ZSTD_reset_compressedBlockState(bs); + if (dictContentType == ZSTD_dictContentType_e.ZSTD_dct_rawContent) + return ZSTD_loadDictionaryContent(ms, ls, ws, @params, dict, dictSize, dtlm, tfp); + if (MEM_readLE32(dict) != 0xEC30A437) + { + if (dictContentType == ZSTD_dictContentType_e.ZSTD_dct_auto) + { + return ZSTD_loadDictionaryContent( + ms, + ls, + ws, + @params, + dict, + dictSize, + dtlm, + tfp ); - dictPtr += litlengthHeaderSize; } - if (dictPtr + 12 > dictEnd) + if (dictContentType == ZSTD_dictContentType_e.ZSTD_dct_fullDict) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_wrong)); } - bs->rep[0] = MEM_readLE32(dictPtr + 0); - bs->rep[1] = MEM_readLE32(dictPtr + 4); - bs->rep[2] = MEM_readLE32(dictPtr + 8); - dictPtr += 12; - { - nuint dictContentSize = (nuint)(dictEnd - dictPtr); - uint offcodeMax = 31; - if (dictContentSize <= unchecked((uint)-1) - 128 * (1 << 10)) - { - /* The maximum offset that must be supported */ - uint maxOffset = (uint)dictContentSize + 128 * (1 << 10); - offcodeMax = ZSTD_highbit32(maxOffset); - } - - bs->entropy.fse.offcode_repeatMode = ZSTD_dictNCountRepeat( - offcodeNCount, - offcodeMaxValue, - offcodeMax < 31 ? offcodeMax : 31 - ); - { - uint u; - for (u = 0; u < 3; u++) - { - if (bs->rep[u] == 0) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted) - ); - } + assert(0 != 0); + } - if (bs->rep[u] > dictContentSize) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted) - ); - } - } - } - } + return ZSTD_loadZstdDictionary( + bs, + ms, + ws, + @params, + dict, + dictSize, + dtlm, + tfp, + workspace + ); + } - return (nuint)(dictPtr - (byte*)dict); + /*! ZSTD_compressBegin_internal() : + * Assumption : either @dict OR @cdict (or none) is non-NULL, never both + * @return : 0, or an error code */ + private static nuint ZSTD_compressBegin_internal( + ZSTD_CCtx_s* cctx, + void* dict, + nuint dictSize, + ZSTD_dictContentType_e dictContentType, + ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_CDict_s* cdict, + ZSTD_CCtx_params_s* @params, + ulong pledgedSrcSize, + ZSTD_buffered_policy_e zbuff + ) + { + nuint dictContentSize = cdict != null ? cdict->dictContentSize : dictSize; + assert(!ERR_isError(ZSTD_checkCParams(@params->cParams))); + assert(!(dict != null && cdict != null)); + if ( + cdict != null + && cdict->dictContentSize > 0 + && ( + pledgedSrcSize < 128 * (1 << 10) + || pledgedSrcSize < cdict->dictContentSize * 6UL + || pledgedSrcSize == unchecked(0UL - 1) + || cdict->compressionLevel == 0 + ) + && @params->attachDictPref != ZSTD_dictAttachPref_e.ZSTD_dictForceLoad + ) + { + return ZSTD_resetCCtx_usingCDict(cctx, cdict, @params, pledgedSrcSize, zbuff); } - /* Dictionary format : - * See : - * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#dictionary-format - */ - /*! ZSTD_loadZstdDictionary() : - * @return : dictID, or an error code - * assumptions : magic number supposed already checked - * dictSize supposed >= 8 - */ - private static nuint ZSTD_loadZstdDictionary( - ZSTD_compressedBlockState_t* bs, - ZSTD_MatchState_t* ms, - ZSTD_cwksp* ws, - ZSTD_CCtx_params_s* @params, - void* dict, - nuint dictSize, - ZSTD_dictTableLoadMethod_e dtlm, - ZSTD_tableFillPurpose_e tfp, - void* workspace - ) { - byte* dictPtr = (byte*)dict; - byte* dictEnd = dictPtr + dictSize; - nuint dictID; - nuint eSize; - assert(dictSize >= 8); - assert(MEM_readLE32(dictPtr) == 0xEC30A437); - dictID = @params->fParams.noDictIDFlag != 0 ? 0 : MEM_readLE32(dictPtr + 4); - eSize = ZSTD_loadCEntropy(bs, workspace, dict, dictSize); + nuint err_code = ZSTD_resetCCtx_internal( + cctx, + @params, + pledgedSrcSize, + dictContentSize, + ZSTD_compResetPolicy_e.ZSTDcrp_makeClean, + zbuff + ); + if (ERR_isError(err_code)) { - nuint err_code = eSize; - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } + } - dictPtr += eSize; - { - nuint dictContentSize = (nuint)(dictEnd - dictPtr); - { - nuint err_code = ZSTD_loadDictionaryContent( - ms, - null, - ws, - @params, - dictPtr, - dictContentSize, + { + nuint dictID = + cdict != null + ? ZSTD_compress_insertDictionary( + cctx->blockState.prevCBlock, + &cctx->blockState.matchState, + &cctx->ldmState, + &cctx->workspace, + &cctx->appliedParams, + cdict->dictContent, + cdict->dictContentSize, + cdict->dictContentType, dtlm, - tfp + ZSTD_tableFillPurpose_e.ZSTD_tfp_forCCtx, + cctx->tmpWorkspace + ) + : ZSTD_compress_insertDictionary( + cctx->blockState.prevCBlock, + &cctx->blockState.matchState, + &cctx->ldmState, + &cctx->workspace, + &cctx->appliedParams, + dict, + dictSize, + dictContentType, + dtlm, + ZSTD_tableFillPurpose_e.ZSTD_tfp_forCCtx, + cctx->tmpWorkspace ); - if (ERR_isError(err_code)) - { - return err_code; - } + { + nuint err_code = dictID; + if (ERR_isError(err_code)) + { + return err_code; } } - return dictID; + assert(dictID <= 0xffffffff); + cctx->dictID = (uint)dictID; + cctx->dictContentSize = dictContentSize; } - /** ZSTD_compress_insertDictionary() : - * @return : dictID, or an error code */ - private static nuint ZSTD_compress_insertDictionary( - ZSTD_compressedBlockState_t* bs, - ZSTD_MatchState_t* ms, - ldmState_t* ls, - ZSTD_cwksp* ws, - ZSTD_CCtx_params_s* @params, - void* dict, - nuint dictSize, - ZSTD_dictContentType_e dictContentType, - ZSTD_dictTableLoadMethod_e dtlm, - ZSTD_tableFillPurpose_e tfp, - void* workspace - ) + return 0; + } + + /* ZSTD_compressBegin_advanced_internal() : + * Private use only. To be called from zstdmt_compress.c. */ + private static nuint ZSTD_compressBegin_advanced_internal( + ZSTD_CCtx_s* cctx, + void* dict, + nuint dictSize, + ZSTD_dictContentType_e dictContentType, + ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_CDict_s* cdict, + ZSTD_CCtx_params_s* @params, + ulong pledgedSrcSize + ) + { { - if (dict == null || dictSize < 8) + /* compression parameters verification and optimization */ + nuint err_code = ZSTD_checkCParams(@params->cParams); + if (ERR_isError(err_code)) { - if (dictContentType == ZSTD_dictContentType_e.ZSTD_dct_fullDict) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_wrong)); - } - - return 0; + return err_code; } + } - ZSTD_reset_compressedBlockState(bs); - if (dictContentType == ZSTD_dictContentType_e.ZSTD_dct_rawContent) - return ZSTD_loadDictionaryContent(ms, ls, ws, @params, dict, dictSize, dtlm, tfp); - if (MEM_readLE32(dict) != 0xEC30A437) - { - if (dictContentType == ZSTD_dictContentType_e.ZSTD_dct_auto) - { - return ZSTD_loadDictionaryContent( - ms, - ls, - ws, - @params, - dict, - dictSize, - dtlm, - tfp - ); - } - - if (dictContentType == ZSTD_dictContentType_e.ZSTD_dct_fullDict) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_wrong)); - } + return ZSTD_compressBegin_internal( + cctx, + dict, + dictSize, + dictContentType, + dtlm, + cdict, + @params, + pledgedSrcSize, + ZSTD_buffered_policy_e.ZSTDb_not_buffered + ); + } - assert(0 != 0); - } + /*! ZSTD_compressBegin_advanced() : + * @return : 0, or an error code */ + public static nuint ZSTD_compressBegin_advanced( + ZSTD_CCtx_s* cctx, + void* dict, + nuint dictSize, + ZSTD_parameters @params, + ulong pledgedSrcSize + ) + { + ZSTD_CCtx_params_s cctxParams; + ZSTD_CCtxParams_init_internal(&cctxParams, &@params, 0); + return ZSTD_compressBegin_advanced_internal( + cctx, + dict, + dictSize, + ZSTD_dictContentType_e.ZSTD_dct_auto, + ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, + null, + &cctxParams, + pledgedSrcSize + ); + } - return ZSTD_loadZstdDictionary( - bs, - ms, - ws, - @params, - dict, + private static nuint ZSTD_compressBegin_usingDict_deprecated( + ZSTD_CCtx_s* cctx, + void* dict, + nuint dictSize, + int compressionLevel + ) + { + ZSTD_CCtx_params_s cctxParams; + { + ZSTD_parameters @params = ZSTD_getParams_internal( + compressionLevel, + unchecked(0UL - 1), dictSize, - dtlm, - tfp, - workspace + ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict + ); + ZSTD_CCtxParams_init_internal( + &cctxParams, + &@params, + compressionLevel == 0 ? 3 : compressionLevel ); } - /*! ZSTD_compressBegin_internal() : - * Assumption : either @dict OR @cdict (or none) is non-NULL, never both - * @return : 0, or an error code */ - private static nuint ZSTD_compressBegin_internal( - ZSTD_CCtx_s* cctx, - void* dict, - nuint dictSize, - ZSTD_dictContentType_e dictContentType, - ZSTD_dictTableLoadMethod_e dtlm, - ZSTD_CDict_s* cdict, - ZSTD_CCtx_params_s* @params, - ulong pledgedSrcSize, - ZSTD_buffered_policy_e zbuff - ) - { - nuint dictContentSize = cdict != null ? cdict->dictContentSize : dictSize; - assert(!ERR_isError(ZSTD_checkCParams(@params->cParams))); - assert(!(dict != null && cdict != null)); - if ( - cdict != null - && cdict->dictContentSize > 0 - && ( - pledgedSrcSize < 128 * (1 << 10) - || pledgedSrcSize < cdict->dictContentSize * 6UL - || pledgedSrcSize == unchecked(0UL - 1) - || cdict->compressionLevel == 0 - ) - && @params->attachDictPref != ZSTD_dictAttachPref_e.ZSTD_dictForceLoad - ) - { - return ZSTD_resetCCtx_usingCDict(cctx, cdict, @params, pledgedSrcSize, zbuff); - } - - { - nuint err_code = ZSTD_resetCCtx_internal( - cctx, - @params, - pledgedSrcSize, - dictContentSize, - ZSTD_compResetPolicy_e.ZSTDcrp_makeClean, - zbuff - ); - if (ERR_isError(err_code)) - { - return err_code; - } - } + return ZSTD_compressBegin_internal( + cctx, + dict, + dictSize, + ZSTD_dictContentType_e.ZSTD_dct_auto, + ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, + null, + &cctxParams, + unchecked(0UL - 1), + ZSTD_buffered_policy_e.ZSTDb_not_buffered + ); + } - { - nuint dictID = - cdict != null - ? ZSTD_compress_insertDictionary( - cctx->blockState.prevCBlock, - &cctx->blockState.matchState, - &cctx->ldmState, - &cctx->workspace, - &cctx->appliedParams, - cdict->dictContent, - cdict->dictContentSize, - cdict->dictContentType, - dtlm, - ZSTD_tableFillPurpose_e.ZSTD_tfp_forCCtx, - cctx->tmpWorkspace - ) - : ZSTD_compress_insertDictionary( - cctx->blockState.prevCBlock, - &cctx->blockState.matchState, - &cctx->ldmState, - &cctx->workspace, - &cctx->appliedParams, - dict, - dictSize, - dictContentType, - dtlm, - ZSTD_tableFillPurpose_e.ZSTD_tfp_forCCtx, - cctx->tmpWorkspace - ); - { - nuint err_code = dictID; - if (ERR_isError(err_code)) - { - return err_code; - } - } + public static nuint ZSTD_compressBegin_usingDict( + ZSTD_CCtx_s* cctx, + void* dict, + nuint dictSize, + int compressionLevel + ) + { + return ZSTD_compressBegin_usingDict_deprecated(cctx, dict, dictSize, compressionLevel); + } - assert(dictID <= 0xffffffff); - cctx->dictID = (uint)dictID; - cctx->dictContentSize = dictContentSize; - } + /*===== Buffer-less streaming compression functions =====*/ + public static nuint ZSTD_compressBegin(ZSTD_CCtx_s* cctx, int compressionLevel) + { + return ZSTD_compressBegin_usingDict_deprecated(cctx, null, 0, compressionLevel); + } - return 0; + /*! ZSTD_writeEpilogue() : + * Ends a frame. + * @return : nb of bytes written into dst (or an error code) */ + private static nuint ZSTD_writeEpilogue(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity) + { + byte* ostart = (byte*)dst; + byte* op = ostart; + if (cctx->stage == ZSTD_compressionStage_e.ZSTDcs_created) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); } - /* ZSTD_compressBegin_advanced_internal() : - * Private use only. To be called from zstdmt_compress.c. */ - private static nuint ZSTD_compressBegin_advanced_internal( - ZSTD_CCtx_s* cctx, - void* dict, - nuint dictSize, - ZSTD_dictContentType_e dictContentType, - ZSTD_dictTableLoadMethod_e dtlm, - ZSTD_CDict_s* cdict, - ZSTD_CCtx_params_s* @params, - ulong pledgedSrcSize - ) + if (cctx->stage == ZSTD_compressionStage_e.ZSTDcs_init) { + nuint fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0); { - /* compression parameters verification and optimization */ - nuint err_code = ZSTD_checkCParams(@params->cParams); + nuint err_code = fhSize; if (ERR_isError(err_code)) { return err_code; } } - return ZSTD_compressBegin_internal( - cctx, - dict, - dictSize, - dictContentType, - dtlm, - cdict, - @params, - pledgedSrcSize, - ZSTD_buffered_policy_e.ZSTDb_not_buffered - ); + dstCapacity -= fhSize; + op += fhSize; + cctx->stage = ZSTD_compressionStage_e.ZSTDcs_ongoing; } - /*! ZSTD_compressBegin_advanced() : - * @return : 0, or an error code */ - public static nuint ZSTD_compressBegin_advanced( - ZSTD_CCtx_s* cctx, - void* dict, - nuint dictSize, - ZSTD_parameters @params, - ulong pledgedSrcSize - ) + if (cctx->stage != ZSTD_compressionStage_e.ZSTDcs_ending) { - ZSTD_CCtx_params_s cctxParams; - ZSTD_CCtxParams_init_internal(&cctxParams, &@params, 0); - return ZSTD_compressBegin_advanced_internal( - cctx, - dict, - dictSize, - ZSTD_dictContentType_e.ZSTD_dct_auto, - ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, - null, - &cctxParams, - pledgedSrcSize - ); + /* last block */ + uint cBlockHeader24 = 1 + ((uint)blockType_e.bt_raw << 1) + 0; + if (dstCapacity < 3) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + MEM_writeLE24(op, cBlockHeader24); + op += ZSTD_blockHeaderSize; + dstCapacity -= ZSTD_blockHeaderSize; } - private static nuint ZSTD_compressBegin_usingDict_deprecated( - ZSTD_CCtx_s* cctx, - void* dict, - nuint dictSize, - int compressionLevel - ) + if (cctx->appliedParams.fParams.checksumFlag != 0) { - ZSTD_CCtx_params_s cctxParams; + uint checksum = (uint)ZSTD_XXH64_digest(&cctx->xxhState); + if (dstCapacity < 4) { - ZSTD_parameters @params = ZSTD_getParams_internal( - compressionLevel, - unchecked(0UL - 1), - dictSize, - ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict - ); - ZSTD_CCtxParams_init_internal( - &cctxParams, - &@params, - compressionLevel == 0 ? 3 : compressionLevel - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); } - return ZSTD_compressBegin_internal( - cctx, - dict, - dictSize, - ZSTD_dictContentType_e.ZSTD_dct_auto, - ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, - null, - &cctxParams, - unchecked(0UL - 1), - ZSTD_buffered_policy_e.ZSTDb_not_buffered - ); + MEM_writeLE32(op, checksum); + op += 4; } - public static nuint ZSTD_compressBegin_usingDict( - ZSTD_CCtx_s* cctx, - void* dict, - nuint dictSize, - int compressionLevel - ) - { - return ZSTD_compressBegin_usingDict_deprecated(cctx, dict, dictSize, compressionLevel); - } + cctx->stage = ZSTD_compressionStage_e.ZSTDcs_created; + return (nuint)(op - ostart); + } - /*===== Buffer-less streaming compression functions =====*/ - public static nuint ZSTD_compressBegin(ZSTD_CCtx_s* cctx, int compressionLevel) + /** ZSTD_CCtx_trace() : + * Trace the end of a compression call. + */ + private static void ZSTD_CCtx_trace(ZSTD_CCtx_s* cctx, nuint extraCSize) { } + + private static nuint ZSTD_compressEnd_public( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize + ) + { + nuint endResult; + nuint cSize = ZSTD_compressContinue_internal( + cctx, + dst, + dstCapacity, + src, + srcSize, + 1, + 1 + ); { - return ZSTD_compressBegin_usingDict_deprecated(cctx, null, 0, compressionLevel); + nuint err_code = cSize; + if (ERR_isError(err_code)) + { + return err_code; + } } - /*! ZSTD_writeEpilogue() : - * Ends a frame. - * @return : nb of bytes written into dst (or an error code) */ - private static nuint ZSTD_writeEpilogue(ZSTD_CCtx_s* cctx, void* dst, nuint dstCapacity) + endResult = ZSTD_writeEpilogue(cctx, (sbyte*)dst + cSize, dstCapacity - cSize); { - byte* ostart = (byte*)dst; - byte* op = ostart; - if (cctx->stage == ZSTD_compressionStage_e.ZSTDcs_created) + nuint err_code = endResult; + if (ERR_isError(err_code)) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); + return err_code; } + } - if (cctx->stage == ZSTD_compressionStage_e.ZSTDcs_init) + assert( + !( + cctx->appliedParams.fParams.contentSizeFlag != 0 + && cctx->pledgedSrcSizePlusOne == 0 + ) + ); + if (cctx->pledgedSrcSizePlusOne != 0) + { + if (cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize + 1) { - nuint fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0); - { - nuint err_code = fhSize; - if (ERR_isError(err_code)) - { - return err_code; - } - } - - dstCapacity -= fhSize; - op += fhSize; - cctx->stage = ZSTD_compressionStage_e.ZSTDcs_ongoing; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); } + } - if (cctx->stage != ZSTD_compressionStage_e.ZSTDcs_ending) - { - /* last block */ - uint cBlockHeader24 = 1 + ((uint)blockType_e.bt_raw << 1) + 0; - if (dstCapacity < 3) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - } + ZSTD_CCtx_trace(cctx, endResult); + return cSize + endResult; + } - MEM_writeLE24(op, cBlockHeader24); - op += ZSTD_blockHeaderSize; - dstCapacity -= ZSTD_blockHeaderSize; - } + /* NOTE: Must just wrap ZSTD_compressEnd_public() */ + public static nuint ZSTD_compressEnd( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize + ) + { + return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize); + } - if (cctx->appliedParams.fParams.checksumFlag != 0) + /*! ZSTD_compress_advanced() : + * Note : this function is now DEPRECATED. + * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters. + * This prototype will generate compilation warnings. */ + public static nuint ZSTD_compress_advanced( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + void* dict, + nuint dictSize, + ZSTD_parameters @params + ) + { + { + nuint err_code = ZSTD_checkCParams(@params.cParams); + if (ERR_isError(err_code)) { - uint checksum = (uint)ZSTD_XXH64_digest(&cctx->xxhState); - if (dstCapacity < 4) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - } - - MEM_writeLE32(op, checksum); - op += 4; + return err_code; } - - cctx->stage = ZSTD_compressionStage_e.ZSTDcs_created; - return (nuint)(op - ostart); } - /** ZSTD_CCtx_trace() : - * Trace the end of a compression call. - */ - private static void ZSTD_CCtx_trace(ZSTD_CCtx_s* cctx, nuint extraCSize) { } - - private static nuint ZSTD_compressEnd_public( - ZSTD_CCtx_s* cctx, - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize - ) + ZSTD_CCtxParams_init_internal(&cctx->simpleApiParams, &@params, 0); + return ZSTD_compress_advanced_internal( + cctx, + dst, + dstCapacity, + src, + srcSize, + dict, + dictSize, + &cctx->simpleApiParams + ); + } + + /* Internal */ + private static nuint ZSTD_compress_advanced_internal( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + void* dict, + nuint dictSize, + ZSTD_CCtx_params_s* @params + ) + { { - nuint endResult; - nuint cSize = ZSTD_compressContinue_internal( + nuint err_code = ZSTD_compressBegin_internal( cctx, - dst, - dstCapacity, - src, + dict, + dictSize, + ZSTD_dictContentType_e.ZSTD_dct_auto, + ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, + null, + @params, srcSize, - 1, - 1 + ZSTD_buffered_policy_e.ZSTDb_not_buffered ); + if (ERR_isError(err_code)) { - nuint err_code = cSize; - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } + } - endResult = ZSTD_writeEpilogue(cctx, (sbyte*)dst + cSize, dstCapacity - cSize); - { - nuint err_code = endResult; - if (ERR_isError(err_code)) - { - return err_code; - } - } + return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize); + } - assert( - !( - cctx->appliedParams.fParams.contentSizeFlag != 0 - && cctx->pledgedSrcSizePlusOne == 0 - ) + /************************** + * Simple dictionary API + ***************************/ + /*! ZSTD_compress_usingDict() : + * Compression at an explicit compression level using a Dictionary. + * A dictionary can be any arbitrary data segment (also called a prefix), + * or a buffer with specified information (see zdict.h). + * Note : This function loads the dictionary, resulting in significant startup delay. + * It's intended for a dictionary used only once. + * Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used. */ + public static nuint ZSTD_compress_usingDict( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + void* dict, + nuint dictSize, + int compressionLevel + ) + { + { + ZSTD_parameters @params = ZSTD_getParams_internal( + compressionLevel, + srcSize, + dict != null ? dictSize : 0, + ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict + ); + assert(@params.fParams.contentSizeFlag == 1); + ZSTD_CCtxParams_init_internal( + &cctx->simpleApiParams, + &@params, + compressionLevel == 0 ? 3 : compressionLevel ); - if (cctx->pledgedSrcSizePlusOne != 0) - { - if (cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize + 1) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); - } - } - - ZSTD_CCtx_trace(cctx, endResult); - return cSize + endResult; } - /* NOTE: Must just wrap ZSTD_compressEnd_public() */ - public static nuint ZSTD_compressEnd( - ZSTD_CCtx_s* cctx, - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize + return ZSTD_compress_advanced_internal( + cctx, + dst, + dstCapacity, + src, + srcSize, + dict, + dictSize, + &cctx->simpleApiParams + ); + } + + /*! ZSTD_compressCCtx() : + * Same as ZSTD_compress(), using an explicit ZSTD_CCtx. + * Important : in order to mirror `ZSTD_compress()` behavior, + * this function compresses at the requested compression level, + * __ignoring any other advanced parameter__ . + * If any advanced parameter was set using the advanced API, + * they will all be reset. Only @compressionLevel remains. + */ + public static nuint ZSTD_compressCCtx( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + int compressionLevel + ) + { + assert(cctx != null); + return ZSTD_compress_usingDict( + cctx, + dst, + dstCapacity, + src, + srcSize, + null, + 0, + compressionLevel + ); + } + + /*************************************** + * Simple Core API + ***************************************/ + /*! ZSTD_compress() : + * Compresses `src` content as a single zstd compressed frame into already allocated `dst`. + * NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have + * enough space to successfully compress the data. + * @return : compressed size written into `dst` (<= `dstCapacity), + * or an error code if it fails (which can be tested using ZSTD_isError()). */ + public static nuint ZSTD_compress( + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + int compressionLevel + ) + { + nuint result; + ZSTD_CCtx_s ctxBody; + ZSTD_initCCtx(&ctxBody, ZSTD_defaultCMem); + result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel); + ZSTD_freeCCtxContent(&ctxBody); + return result; + } + + /*! ZSTD_estimateCDictSize_advanced() : + * Estimate amount of memory that will be needed to create a dictionary with following arguments */ + public static nuint ZSTD_estimateCDictSize_advanced( + nuint dictSize, + ZSTD_compressionParameters cParams, + ZSTD_dictLoadMethod_e dictLoadMethod + ) + { + return ZSTD_cwksp_alloc_size((nuint)sizeof(ZSTD_CDict_s)) + + ZSTD_cwksp_alloc_size((8 << 10) + 512) + + ZSTD_sizeof_matchState( + &cParams, + ZSTD_resolveRowMatchFinderMode(ZSTD_paramSwitch_e.ZSTD_ps_auto, &cParams), + 1, + 0 + ) + + ( + dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef + ? 0 + : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, (nuint)sizeof(void*))) + ); + } + + /*! ZSTD_estimate?DictSize() : + * ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict(). + * ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced(). + * Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller. + */ + public static nuint ZSTD_estimateCDictSize(nuint dictSize, int compressionLevel) + { + ZSTD_compressionParameters cParams = ZSTD_getCParams_internal( + compressionLevel, + unchecked(0UL - 1), + dictSize, + ZSTD_CParamMode_e.ZSTD_cpm_createCDict + ); + return ZSTD_estimateCDictSize_advanced( + dictSize, + cParams, + ZSTD_dictLoadMethod_e.ZSTD_dlm_byCopy + ); + } + + public static nuint ZSTD_sizeof_CDict(ZSTD_CDict_s* cdict) + { + if (cdict == null) + return 0; + return (nuint)(cdict->workspace.workspace == cdict ? 0 : sizeof(ZSTD_CDict_s)) + + ZSTD_cwksp_sizeof(&cdict->workspace); + } + + private static nuint ZSTD_initCDict_internal( + ZSTD_CDict_s* cdict, + void* dictBuffer, + nuint dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType, + ZSTD_CCtx_params_s @params + ) + { + assert(ZSTD_checkCParams(@params.cParams) == 0); + cdict->matchState.cParams = @params.cParams; + cdict->matchState.dedicatedDictSearch = @params.enableDedicatedDictSearch; + if ( + dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef + || dictBuffer == null + || dictSize == 0 ) { - return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize); - } - - /*! ZSTD_compress_advanced() : - * Note : this function is now DEPRECATED. - * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters. - * This prototype will generate compilation warnings. */ - public static nuint ZSTD_compress_advanced( - ZSTD_CCtx_s* cctx, - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize, - void* dict, - nuint dictSize, - ZSTD_parameters @params - ) + cdict->dictContent = dictBuffer; + } + else { + void* internalBuffer = ZSTD_cwksp_reserve_object( + &cdict->workspace, + ZSTD_cwksp_align(dictSize, (nuint)sizeof(void*)) + ); + if (internalBuffer == null) { - nuint err_code = ZSTD_checkCParams(@params.cParams); - if (ERR_isError(err_code)) - { - return err_code; - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); } - ZSTD_CCtxParams_init_internal(&cctx->simpleApiParams, &@params, 0); - return ZSTD_compress_advanced_internal( - cctx, - dst, - dstCapacity, - src, - srcSize, - dict, - dictSize, - &cctx->simpleApiParams - ); + cdict->dictContent = internalBuffer; + memcpy(internalBuffer, dictBuffer, (uint)dictSize); } - /* Internal */ - private static nuint ZSTD_compress_advanced_internal( - ZSTD_CCtx_s* cctx, - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize, - void* dict, - nuint dictSize, - ZSTD_CCtx_params_s* @params - ) + cdict->dictContentSize = dictSize; + cdict->dictContentType = dictContentType; + cdict->entropyWorkspace = (uint*)ZSTD_cwksp_reserve_object( + &cdict->workspace, + (8 << 10) + 512 + ); + ZSTD_reset_compressedBlockState(&cdict->cBlockState); { + nuint err_code = ZSTD_reset_matchState( + &cdict->matchState, + &cdict->workspace, + &@params.cParams, + @params.useRowMatchFinder, + ZSTD_compResetPolicy_e.ZSTDcrp_makeClean, + ZSTD_indexResetPolicy_e.ZSTDirp_reset, + ZSTD_resetTarget_e.ZSTD_resetTarget_CDict + ); + if (ERR_isError(err_code)) { - nuint err_code = ZSTD_compressBegin_internal( - cctx, - dict, - dictSize, - ZSTD_dictContentType_e.ZSTD_dct_auto, - ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, - null, - @params, - srcSize, - ZSTD_buffered_policy_e.ZSTDb_not_buffered - ); - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } + } - return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize); - } - - /************************** - * Simple dictionary API - ***************************/ - /*! ZSTD_compress_usingDict() : - * Compression at an explicit compression level using a Dictionary. - * A dictionary can be any arbitrary data segment (also called a prefix), - * or a buffer with specified information (see zdict.h). - * Note : This function loads the dictionary, resulting in significant startup delay. - * It's intended for a dictionary used only once. - * Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used. */ - public static nuint ZSTD_compress_usingDict( - ZSTD_CCtx_s* cctx, - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize, - void* dict, - nuint dictSize, - int compressionLevel - ) { + @params.compressionLevel = 3; + @params.fParams.contentSizeFlag = 1; { - ZSTD_parameters @params = ZSTD_getParams_internal( - compressionLevel, - srcSize, - dict != null ? dictSize : 0, - ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict - ); - assert(@params.fParams.contentSizeFlag == 1); - ZSTD_CCtxParams_init_internal( - &cctx->simpleApiParams, + nuint dictID = ZSTD_compress_insertDictionary( + &cdict->cBlockState, + &cdict->matchState, + null, + &cdict->workspace, &@params, - compressionLevel == 0 ? 3 : compressionLevel + cdict->dictContent, + cdict->dictContentSize, + dictContentType, + ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_full, + ZSTD_tableFillPurpose_e.ZSTD_tfp_forCDict, + cdict->entropyWorkspace ); - } - - return ZSTD_compress_advanced_internal( - cctx, - dst, - dstCapacity, - src, - srcSize, - dict, - dictSize, - &cctx->simpleApiParams - ); - } + { + nuint err_code = dictID; + if (ERR_isError(err_code)) + { + return err_code; + } + } - /*! ZSTD_compressCCtx() : - * Same as ZSTD_compress(), using an explicit ZSTD_CCtx. - * Important : in order to mirror `ZSTD_compress()` behavior, - * this function compresses at the requested compression level, - * __ignoring any other advanced parameter__ . - * If any advanced parameter was set using the advanced API, - * they will all be reset. Only @compressionLevel remains. - */ - public static nuint ZSTD_compressCCtx( - ZSTD_CCtx_s* cctx, - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize, - int compressionLevel - ) - { - assert(cctx != null); - return ZSTD_compress_usingDict( - cctx, - dst, - dstCapacity, - src, - srcSize, - null, - 0, - compressionLevel - ); + assert(dictID <= unchecked((uint)-1)); + cdict->dictID = (uint)dictID; + } } - /*************************************** - * Simple Core API - ***************************************/ - /*! ZSTD_compress() : - * Compresses `src` content as a single zstd compressed frame into already allocated `dst`. - * NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have - * enough space to successfully compress the data. - * @return : compressed size written into `dst` (<= `dstCapacity), - * or an error code if it fails (which can be tested using ZSTD_isError()). */ - public static nuint ZSTD_compress( - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize, - int compressionLevel - ) - { - nuint result; - ZSTD_CCtx_s ctxBody; - ZSTD_initCCtx(&ctxBody, ZSTD_defaultCMem); - result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel); - ZSTD_freeCCtxContent(&ctxBody); - return result; - } + return 0; + } - /*! ZSTD_estimateCDictSize_advanced() : - * Estimate amount of memory that will be needed to create a dictionary with following arguments */ - public static nuint ZSTD_estimateCDictSize_advanced( - nuint dictSize, - ZSTD_compressionParameters cParams, - ZSTD_dictLoadMethod_e dictLoadMethod + private static ZSTD_CDict_s* ZSTD_createCDict_advanced_internal( + nuint dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_compressionParameters cParams, + ZSTD_paramSwitch_e useRowMatchFinder, + int enableDedicatedDictSearch, + ZSTD_customMem customMem + ) + { + if ( + ((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) + != 0 ) + return null; { - return ZSTD_cwksp_alloc_size((nuint)sizeof(ZSTD_CDict_s)) + nuint workspaceSize = + ZSTD_cwksp_alloc_size((nuint)sizeof(ZSTD_CDict_s)) + ZSTD_cwksp_alloc_size((8 << 10) + 512) + ZSTD_sizeof_matchState( &cParams, - ZSTD_resolveRowMatchFinderMode(ZSTD_paramSwitch_e.ZSTD_ps_auto, &cParams), - 1, + useRowMatchFinder, + enableDedicatedDictSearch, 0 ) + ( dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef ? 0 - : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, (nuint)sizeof(void*))) + : ZSTD_cwksp_alloc_size( + ZSTD_cwksp_align(dictSize, (nuint)sizeof(void*)) + ) ); + void* workspace = ZSTD_customMalloc(workspaceSize, customMem); + ZSTD_cwksp ws; + ZSTD_CDict_s* cdict; + if (workspace == null) + { + ZSTD_customFree(workspace, customMem); + return null; + } + + ZSTD_cwksp_init( + &ws, + workspace, + workspaceSize, + ZSTD_cwksp_static_alloc_e.ZSTD_cwksp_dynamic_alloc + ); + cdict = (ZSTD_CDict_s*)ZSTD_cwksp_reserve_object(&ws, (nuint)sizeof(ZSTD_CDict_s)); + assert(cdict != null); + ZSTD_cwksp_move(&cdict->workspace, &ws); + cdict->customMem = customMem; + cdict->compressionLevel = 0; + cdict->useRowMatchFinder = useRowMatchFinder; + return cdict; } + } - /*! ZSTD_estimate?DictSize() : - * ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict(). - * ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced(). - * Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller. - */ - public static nuint ZSTD_estimateCDictSize(nuint dictSize, int compressionLevel) + public static ZSTD_CDict_s* ZSTD_createCDict_advanced( + void* dictBuffer, + nuint dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType, + ZSTD_compressionParameters cParams, + ZSTD_customMem customMem + ) + { + ZSTD_CCtx_params_s cctxParams; + cctxParams = new ZSTD_CCtx_params_s(); + ZSTD_CCtxParams_init(&cctxParams, 0); + cctxParams.cParams = cParams; + cctxParams.customMem = customMem; + return ZSTD_createCDict_advanced2( + dictBuffer, + dictSize, + dictLoadMethod, + dictContentType, + &cctxParams, + customMem + ); + } + + /* + * This API is temporary and is expected to change or disappear in the future! + */ + public static ZSTD_CDict_s* ZSTD_createCDict_advanced2( + void* dict, + nuint dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType, + ZSTD_CCtx_params_s* originalCctxParams, + ZSTD_customMem customMem + ) + { + ZSTD_CCtx_params_s cctxParams = *originalCctxParams; + ZSTD_compressionParameters cParams; + ZSTD_CDict_s* cdict; + if ( + ((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) + != 0 + ) + return null; + if (cctxParams.enableDedicatedDictSearch != 0) + { + cParams = ZSTD_dedicatedDictSearch_getCParams( + cctxParams.compressionLevel, + dictSize + ); + ZSTD_overrideCParams(&cParams, &cctxParams.cParams); + } + else { - ZSTD_compressionParameters cParams = ZSTD_getCParams_internal( - compressionLevel, + cParams = ZSTD_getCParamsFromCCtxParams( + &cctxParams, unchecked(0UL - 1), dictSize, ZSTD_CParamMode_e.ZSTD_cpm_createCDict ); - return ZSTD_estimateCDictSize_advanced( - dictSize, - cParams, - ZSTD_dictLoadMethod_e.ZSTD_dlm_byCopy - ); } - public static nuint ZSTD_sizeof_CDict(ZSTD_CDict_s* cdict) + if (ZSTD_dedicatedDictSearch_isSupported(&cParams) == 0) { - if (cdict == null) - return 0; - return (nuint)(cdict->workspace.workspace == cdict ? 0 : sizeof(ZSTD_CDict_s)) - + ZSTD_cwksp_sizeof(&cdict->workspace); + cctxParams.enableDedicatedDictSearch = 0; + cParams = ZSTD_getCParamsFromCCtxParams( + &cctxParams, + unchecked(0UL - 1), + dictSize, + ZSTD_CParamMode_e.ZSTD_cpm_createCDict + ); } - private static nuint ZSTD_initCDict_internal( - ZSTD_CDict_s* cdict, - void* dictBuffer, - nuint dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod, - ZSTD_dictContentType_e dictContentType, - ZSTD_CCtx_params_s @params + cctxParams.cParams = cParams; + cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode( + cctxParams.useRowMatchFinder, + &cParams + ); + cdict = ZSTD_createCDict_advanced_internal( + dictSize, + dictLoadMethod, + cctxParams.cParams, + cctxParams.useRowMatchFinder, + cctxParams.enableDedicatedDictSearch, + customMem + ); + if ( + cdict == null + || ERR_isError( + ZSTD_initCDict_internal( + cdict, + dict, + dictSize, + dictLoadMethod, + dictContentType, + cctxParams + ) + ) ) { - assert(ZSTD_checkCParams(@params.cParams) == 0); - cdict->matchState.cParams = @params.cParams; - cdict->matchState.dedicatedDictSearch = @params.enableDedicatedDictSearch; - if ( - dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef - || dictBuffer == null - || dictSize == 0 - ) - { - cdict->dictContent = dictBuffer; - } - else - { - void* internalBuffer = ZSTD_cwksp_reserve_object( - &cdict->workspace, - ZSTD_cwksp_align(dictSize, (nuint)sizeof(void*)) - ); - if (internalBuffer == null) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); - } + ZSTD_freeCDict(cdict); + return null; + } - cdict->dictContent = internalBuffer; - memcpy(internalBuffer, dictBuffer, (uint)dictSize); - } + return cdict; + } - cdict->dictContentSize = dictSize; - cdict->dictContentType = dictContentType; - cdict->entropyWorkspace = (uint*)ZSTD_cwksp_reserve_object( - &cdict->workspace, - (8 << 10) + 512 - ); - ZSTD_reset_compressedBlockState(&cdict->cBlockState); - { - nuint err_code = ZSTD_reset_matchState( - &cdict->matchState, - &cdict->workspace, - &@params.cParams, - @params.useRowMatchFinder, - ZSTD_compResetPolicy_e.ZSTDcrp_makeClean, - ZSTD_indexResetPolicy_e.ZSTDirp_reset, - ZSTD_resetTarget_e.ZSTD_resetTarget_CDict - ); - if (ERR_isError(err_code)) - { - return err_code; - } - } + /*! ZSTD_createCDict() : + * When compressing multiple messages or blocks using the same dictionary, + * it's recommended to digest the dictionary only once, since it's a costly operation. + * ZSTD_createCDict() will create a state from digesting a dictionary. + * The resulting state can be used for future compression operations with very limited startup cost. + * ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only. + * @dictBuffer can be released after ZSTD_CDict creation, because its content is copied within CDict. + * Note 1 : Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate @dictBuffer content. + * Note 2 : A ZSTD_CDict can be created from an empty @dictBuffer, + * in which case the only thing that it transports is the @compressionLevel. + * This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively, + * expecting a ZSTD_CDict parameter with any data, including those without a known dictionary. */ + public static ZSTD_CDict_s* ZSTD_createCDict( + void* dict, + nuint dictSize, + int compressionLevel + ) + { + ZSTD_compressionParameters cParams = ZSTD_getCParams_internal( + compressionLevel, + unchecked(0UL - 1), + dictSize, + ZSTD_CParamMode_e.ZSTD_cpm_createCDict + ); + ZSTD_CDict_s* cdict = ZSTD_createCDict_advanced( + dict, + dictSize, + ZSTD_dictLoadMethod_e.ZSTD_dlm_byCopy, + ZSTD_dictContentType_e.ZSTD_dct_auto, + cParams, + ZSTD_defaultCMem + ); + if (cdict != null) + cdict->compressionLevel = compressionLevel == 0 ? 3 : compressionLevel; + return cdict; + } - { - @params.compressionLevel = 3; - @params.fParams.contentSizeFlag = 1; - { - nuint dictID = ZSTD_compress_insertDictionary( - &cdict->cBlockState, - &cdict->matchState, - null, - &cdict->workspace, - &@params, - cdict->dictContent, - cdict->dictContentSize, - dictContentType, - ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_full, - ZSTD_tableFillPurpose_e.ZSTD_tfp_forCDict, - cdict->entropyWorkspace - ); - { - nuint err_code = dictID; - if (ERR_isError(err_code)) - { - return err_code; - } - } + /*! ZSTD_createCDict_byReference() : + * Create a digested dictionary for compression + * Dictionary content is just referenced, not duplicated. + * As a consequence, `dictBuffer` **must** outlive CDict, + * and its content must remain unmodified throughout the lifetime of CDict. + * note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef */ + public static ZSTD_CDict_s* ZSTD_createCDict_byReference( + void* dict, + nuint dictSize, + int compressionLevel + ) + { + ZSTD_compressionParameters cParams = ZSTD_getCParams_internal( + compressionLevel, + unchecked(0UL - 1), + dictSize, + ZSTD_CParamMode_e.ZSTD_cpm_createCDict + ); + ZSTD_CDict_s* cdict = ZSTD_createCDict_advanced( + dict, + dictSize, + ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, + ZSTD_dictContentType_e.ZSTD_dct_auto, + cParams, + ZSTD_defaultCMem + ); + if (cdict != null) + cdict->compressionLevel = compressionLevel == 0 ? 3 : compressionLevel; + return cdict; + } - assert(dictID <= unchecked((uint)-1)); - cdict->dictID = (uint)dictID; - } + /*! ZSTD_freeCDict() : + * Function frees memory allocated by ZSTD_createCDict(). + * If a NULL pointer is passed, no operation is performed. */ + public static nuint ZSTD_freeCDict(ZSTD_CDict_s* cdict) + { + if (cdict == null) + return 0; + { + ZSTD_customMem cMem = cdict->customMem; + int cdictInWorkspace = ZSTD_cwksp_owns_buffer(&cdict->workspace, cdict); + ZSTD_cwksp_free(&cdict->workspace, cMem); + if (cdictInWorkspace == 0) + { + ZSTD_customFree(cdict, cMem); } return 0; } + } - private static ZSTD_CDict_s* ZSTD_createCDict_advanced_internal( - nuint dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod, - ZSTD_compressionParameters cParams, - ZSTD_paramSwitch_e useRowMatchFinder, - int enableDedicatedDictSearch, - ZSTD_customMem customMem - ) - { - if ( - ((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) - != 0 + /*! ZSTD_initStaticCDict_advanced() : + * Generate a digested dictionary in provided memory area. + * workspace: The memory area to emplace the dictionary into. + * Provided pointer must 8-bytes aligned. + * It must outlive dictionary usage. + * workspaceSize: Use ZSTD_estimateCDictSize() + * to determine how large workspace must be. + * cParams : use ZSTD_getCParams() to transform a compression level + * into its relevant cParams. + * @return : pointer to ZSTD_CDict*, or NULL if error (size too small) + * Note : there is no corresponding "free" function. + * Since workspace was allocated externally, it must be freed externally. + */ + public static ZSTD_CDict_s* ZSTD_initStaticCDict( + void* workspace, + nuint workspaceSize, + void* dict, + nuint dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType, + ZSTD_compressionParameters cParams + ) + { + ZSTD_paramSwitch_e useRowMatchFinder = ZSTD_resolveRowMatchFinderMode( + ZSTD_paramSwitch_e.ZSTD_ps_auto, + &cParams + ); + /* enableDedicatedDictSearch */ + nuint matchStateSize = ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, 1, 0); + nuint neededSize = + ZSTD_cwksp_alloc_size((nuint)sizeof(ZSTD_CDict_s)) + + ( + dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef + ? 0 + : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, (nuint)sizeof(void*))) ) - return null; - { - nuint workspaceSize = - ZSTD_cwksp_alloc_size((nuint)sizeof(ZSTD_CDict_s)) - + ZSTD_cwksp_alloc_size((8 << 10) + 512) - + ZSTD_sizeof_matchState( - &cParams, - useRowMatchFinder, - enableDedicatedDictSearch, - 0 - ) - + ( - dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef - ? 0 - : ZSTD_cwksp_alloc_size( - ZSTD_cwksp_align(dictSize, (nuint)sizeof(void*)) - ) - ); - void* workspace = ZSTD_customMalloc(workspaceSize, customMem); - ZSTD_cwksp ws; - ZSTD_CDict_s* cdict; - if (workspace == null) - { - ZSTD_customFree(workspace, customMem); - return null; - } - - ZSTD_cwksp_init( - &ws, - workspace, - workspaceSize, - ZSTD_cwksp_static_alloc_e.ZSTD_cwksp_dynamic_alloc - ); - cdict = (ZSTD_CDict_s*)ZSTD_cwksp_reserve_object(&ws, (nuint)sizeof(ZSTD_CDict_s)); - assert(cdict != null); - ZSTD_cwksp_move(&cdict->workspace, &ws); - cdict->customMem = customMem; - cdict->compressionLevel = 0; - cdict->useRowMatchFinder = useRowMatchFinder; - return cdict; - } - } - - public static ZSTD_CDict_s* ZSTD_createCDict_advanced( - void* dictBuffer, - nuint dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod, - ZSTD_dictContentType_e dictContentType, - ZSTD_compressionParameters cParams, - ZSTD_customMem customMem - ) + + ZSTD_cwksp_alloc_size((8 << 10) + 512) + + matchStateSize; + ZSTD_CDict_s* cdict; + ZSTD_CCtx_params_s @params; + if (((nuint)workspace & 7) != 0) + return null; { - ZSTD_CCtx_params_s cctxParams; - cctxParams = new ZSTD_CCtx_params_s(); - ZSTD_CCtxParams_init(&cctxParams, 0); - cctxParams.cParams = cParams; - cctxParams.customMem = customMem; - return ZSTD_createCDict_advanced2( - dictBuffer, - dictSize, - dictLoadMethod, - dictContentType, - &cctxParams, - customMem + ZSTD_cwksp ws; + ZSTD_cwksp_init( + &ws, + workspace, + workspaceSize, + ZSTD_cwksp_static_alloc_e.ZSTD_cwksp_static_alloc ); - } - - /* - * This API is temporary and is expected to change or disappear in the future! - */ - public static ZSTD_CDict_s* ZSTD_createCDict_advanced2( - void* dict, - nuint dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod, - ZSTD_dictContentType_e dictContentType, - ZSTD_CCtx_params_s* originalCctxParams, - ZSTD_customMem customMem - ) - { - ZSTD_CCtx_params_s cctxParams = *originalCctxParams; - ZSTD_compressionParameters cParams; - ZSTD_CDict_s* cdict; - if ( - ((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) - != 0 - ) + cdict = (ZSTD_CDict_s*)ZSTD_cwksp_reserve_object(&ws, (nuint)sizeof(ZSTD_CDict_s)); + if (cdict == null) return null; - if (cctxParams.enableDedicatedDictSearch != 0) - { - cParams = ZSTD_dedicatedDictSearch_getCParams( - cctxParams.compressionLevel, - dictSize - ); - ZSTD_overrideCParams(&cParams, &cctxParams.cParams); - } - else - { - cParams = ZSTD_getCParamsFromCCtxParams( - &cctxParams, - unchecked(0UL - 1), - dictSize, - ZSTD_CParamMode_e.ZSTD_cpm_createCDict - ); - } - - if (ZSTD_dedicatedDictSearch_isSupported(&cParams) == 0) - { - cctxParams.enableDedicatedDictSearch = 0; - cParams = ZSTD_getCParamsFromCCtxParams( - &cctxParams, - unchecked(0UL - 1), + ZSTD_cwksp_move(&cdict->workspace, &ws); + } + + if (workspaceSize < neededSize) + return null; + ZSTD_CCtxParams_init(&@params, 0); + @params.cParams = cParams; + @params.useRowMatchFinder = useRowMatchFinder; + cdict->useRowMatchFinder = useRowMatchFinder; + cdict->compressionLevel = 0; + if ( + ERR_isError( + ZSTD_initCDict_internal( + cdict, + dict, dictSize, - ZSTD_CParamMode_e.ZSTD_cpm_createCDict - ); - } - - cctxParams.cParams = cParams; - cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode( - cctxParams.useRowMatchFinder, - &cParams - ); - cdict = ZSTD_createCDict_advanced_internal( - dictSize, - dictLoadMethod, - cctxParams.cParams, - cctxParams.useRowMatchFinder, - cctxParams.enableDedicatedDictSearch, - customMem - ); - if ( - cdict == null - || ERR_isError( - ZSTD_initCDict_internal( - cdict, - dict, - dictSize, - dictLoadMethod, - dictContentType, - cctxParams - ) + dictLoadMethod, + dictContentType, + @params ) ) - { - ZSTD_freeCDict(cdict); - return null; - } + ) + return null; + return cdict; + } - return cdict; - } + /*! ZSTD_getCParamsFromCDict() : + * as the name implies */ + private static ZSTD_compressionParameters ZSTD_getCParamsFromCDict(ZSTD_CDict_s* cdict) + { + assert(cdict != null); + return cdict->matchState.cParams; + } - /*! ZSTD_createCDict() : - * When compressing multiple messages or blocks using the same dictionary, - * it's recommended to digest the dictionary only once, since it's a costly operation. - * ZSTD_createCDict() will create a state from digesting a dictionary. - * The resulting state can be used for future compression operations with very limited startup cost. - * ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only. - * @dictBuffer can be released after ZSTD_CDict creation, because its content is copied within CDict. - * Note 1 : Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate @dictBuffer content. - * Note 2 : A ZSTD_CDict can be created from an empty @dictBuffer, - * in which case the only thing that it transports is the @compressionLevel. - * This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively, - * expecting a ZSTD_CDict parameter with any data, including those without a known dictionary. */ - public static ZSTD_CDict_s* ZSTD_createCDict( - void* dict, - nuint dictSize, - int compressionLevel - ) - { - ZSTD_compressionParameters cParams = ZSTD_getCParams_internal( - compressionLevel, - unchecked(0UL - 1), - dictSize, - ZSTD_CParamMode_e.ZSTD_cpm_createCDict - ); - ZSTD_CDict_s* cdict = ZSTD_createCDict_advanced( - dict, - dictSize, - ZSTD_dictLoadMethod_e.ZSTD_dlm_byCopy, - ZSTD_dictContentType_e.ZSTD_dct_auto, - cParams, - ZSTD_defaultCMem - ); - if (cdict != null) - cdict->compressionLevel = compressionLevel == 0 ? 3 : compressionLevel; - return cdict; - } + /*! ZSTD_getDictID_fromCDict() : + * Provides the dictID of the dictionary loaded into `cdict`. + * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. + * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ + public static uint ZSTD_getDictID_fromCDict(ZSTD_CDict_s* cdict) + { + if (cdict == null) + return 0; + return cdict->dictID; + } - /*! ZSTD_createCDict_byReference() : - * Create a digested dictionary for compression - * Dictionary content is just referenced, not duplicated. - * As a consequence, `dictBuffer` **must** outlive CDict, - * and its content must remain unmodified throughout the lifetime of CDict. - * note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef */ - public static ZSTD_CDict_s* ZSTD_createCDict_byReference( - void* dict, - nuint dictSize, - int compressionLevel - ) + /* ZSTD_compressBegin_usingCDict_internal() : + * Implementation of various ZSTD_compressBegin_usingCDict* functions. + */ + private static nuint ZSTD_compressBegin_usingCDict_internal( + ZSTD_CCtx_s* cctx, + ZSTD_CDict_s* cdict, + ZSTD_frameParameters fParams, + ulong pledgedSrcSize + ) + { + ZSTD_CCtx_params_s cctxParams; + if (cdict == null) { - ZSTD_compressionParameters cParams = ZSTD_getCParams_internal( - compressionLevel, - unchecked(0UL - 1), - dictSize, - ZSTD_CParamMode_e.ZSTD_cpm_createCDict - ); - ZSTD_CDict_s* cdict = ZSTD_createCDict_advanced( - dict, - dictSize, - ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, - ZSTD_dictContentType_e.ZSTD_dct_auto, - cParams, - ZSTD_defaultCMem - ); - if (cdict != null) - cdict->compressionLevel = compressionLevel == 0 ? 3 : compressionLevel; - return cdict; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_wrong)); } - /*! ZSTD_freeCDict() : - * Function frees memory allocated by ZSTD_createCDict(). - * If a NULL pointer is passed, no operation is performed. */ - public static nuint ZSTD_freeCDict(ZSTD_CDict_s* cdict) { - if (cdict == null) - return 0; - { - ZSTD_customMem cMem = cdict->customMem; - int cdictInWorkspace = ZSTD_cwksp_owns_buffer(&cdict->workspace, cdict); - ZSTD_cwksp_free(&cdict->workspace, cMem); - if (cdictInWorkspace == 0) - { - ZSTD_customFree(cdict, cMem); - } + ZSTD_parameters @params; + @params.fParams = fParams; + @params.cParams = + pledgedSrcSize < 128 * (1 << 10) + || pledgedSrcSize < cdict->dictContentSize * 6UL + || pledgedSrcSize == unchecked(0UL - 1) + || cdict->compressionLevel == 0 + ? ZSTD_getCParamsFromCDict(cdict) + : ZSTD_getCParams( + cdict->compressionLevel, + pledgedSrcSize, + cdict->dictContentSize + ); + ZSTD_CCtxParams_init_internal(&cctxParams, &@params, cdict->compressionLevel); + } + + if (pledgedSrcSize != unchecked(0UL - 1)) + { + uint limitedSrcSize = (uint)(pledgedSrcSize < 1U << 19 ? pledgedSrcSize : 1U << 19); + uint limitedSrcLog = + limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1; + cctxParams.cParams.windowLog = + cctxParams.cParams.windowLog > limitedSrcLog + ? cctxParams.cParams.windowLog + : limitedSrcLog; + } + + return ZSTD_compressBegin_internal( + cctx, + null, + 0, + ZSTD_dictContentType_e.ZSTD_dct_auto, + ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, + cdict, + &cctxParams, + pledgedSrcSize, + ZSTD_buffered_policy_e.ZSTDb_not_buffered + ); + } - return 0; - } - } + /* ZSTD_compressBegin_usingCDict_advanced() : + * This function is DEPRECATED. + * cdict must be != NULL */ + public static nuint ZSTD_compressBegin_usingCDict_advanced( + ZSTD_CCtx_s* cctx, + ZSTD_CDict_s* cdict, + ZSTD_frameParameters fParams, + ulong pledgedSrcSize + ) + { + return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, pledgedSrcSize); + } - /*! ZSTD_initStaticCDict_advanced() : - * Generate a digested dictionary in provided memory area. - * workspace: The memory area to emplace the dictionary into. - * Provided pointer must 8-bytes aligned. - * It must outlive dictionary usage. - * workspaceSize: Use ZSTD_estimateCDictSize() - * to determine how large workspace must be. - * cParams : use ZSTD_getCParams() to transform a compression level - * into its relevant cParams. - * @return : pointer to ZSTD_CDict*, or NULL if error (size too small) - * Note : there is no corresponding "free" function. - * Since workspace was allocated externally, it must be freed externally. - */ - public static ZSTD_CDict_s* ZSTD_initStaticCDict( - void* workspace, - nuint workspaceSize, - void* dict, - nuint dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod, - ZSTD_dictContentType_e dictContentType, - ZSTD_compressionParameters cParams - ) + /* ZSTD_compressBegin_usingCDict() : + * cdict must be != NULL */ + private static nuint ZSTD_compressBegin_usingCDict_deprecated( + ZSTD_CCtx_s* cctx, + ZSTD_CDict_s* cdict + ) + { + /*content*/ + ZSTD_frameParameters fParams = new ZSTD_frameParameters + { + contentSizeFlag = 0, + checksumFlag = 0, + noDictIDFlag = 0, + }; + return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, unchecked(0UL - 1)); + } + + public static nuint ZSTD_compressBegin_usingCDict(ZSTD_CCtx_s* cctx, ZSTD_CDict_s* cdict) + { + return ZSTD_compressBegin_usingCDict_deprecated(cctx, cdict); + } + + /*! ZSTD_compress_usingCDict_internal(): + * Implementation of various ZSTD_compress_usingCDict* functions. + */ + private static nuint ZSTD_compress_usingCDict_internal( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + ZSTD_CDict_s* cdict, + ZSTD_frameParameters fParams + ) + { { - ZSTD_paramSwitch_e useRowMatchFinder = ZSTD_resolveRowMatchFinderMode( - ZSTD_paramSwitch_e.ZSTD_ps_auto, - &cParams + /* will check if cdict != NULL */ + nuint err_code = ZSTD_compressBegin_usingCDict_internal( + cctx, + cdict, + fParams, + srcSize ); - /* enableDedicatedDictSearch */ - nuint matchStateSize = ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, 1, 0); - nuint neededSize = - ZSTD_cwksp_alloc_size((nuint)sizeof(ZSTD_CDict_s)) - + ( - dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef - ? 0 - : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, (nuint)sizeof(void*))) - ) - + ZSTD_cwksp_alloc_size((8 << 10) + 512) - + matchStateSize; - ZSTD_CDict_s* cdict; - ZSTD_CCtx_params_s @params; - if (((nuint)workspace & 7) != 0) - return null; + if (ERR_isError(err_code)) { - ZSTD_cwksp ws; - ZSTD_cwksp_init( - &ws, - workspace, - workspaceSize, - ZSTD_cwksp_static_alloc_e.ZSTD_cwksp_static_alloc - ); - cdict = (ZSTD_CDict_s*)ZSTD_cwksp_reserve_object(&ws, (nuint)sizeof(ZSTD_CDict_s)); - if (cdict == null) - return null; - ZSTD_cwksp_move(&cdict->workspace, &ws); + return err_code; } - - if (workspaceSize < neededSize) - return null; - ZSTD_CCtxParams_init(&@params, 0); - @params.cParams = cParams; - @params.useRowMatchFinder = useRowMatchFinder; - cdict->useRowMatchFinder = useRowMatchFinder; - cdict->compressionLevel = 0; - if ( - ERR_isError( - ZSTD_initCDict_internal( - cdict, - dict, - dictSize, - dictLoadMethod, - dictContentType, - @params - ) - ) - ) - return null; - return cdict; } - /*! ZSTD_getCParamsFromCDict() : - * as the name implies */ - private static ZSTD_compressionParameters ZSTD_getCParamsFromCDict(ZSTD_CDict_s* cdict) - { - assert(cdict != null); - return cdict->matchState.cParams; - } + return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize); + } - /*! ZSTD_getDictID_fromCDict() : - * Provides the dictID of the dictionary loaded into `cdict`. - * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. - * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ - public static uint ZSTD_getDictID_fromCDict(ZSTD_CDict_s* cdict) - { - if (cdict == null) - return 0; - return cdict->dictID; - } + /*! ZSTD_compress_usingCDict_advanced(): + * This function is DEPRECATED. + */ + public static nuint ZSTD_compress_usingCDict_advanced( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + ZSTD_CDict_s* cdict, + ZSTD_frameParameters fParams + ) + { + return ZSTD_compress_usingCDict_internal( + cctx, + dst, + dstCapacity, + src, + srcSize, + cdict, + fParams + ); + } - /* ZSTD_compressBegin_usingCDict_internal() : - * Implementation of various ZSTD_compressBegin_usingCDict* functions. - */ - private static nuint ZSTD_compressBegin_usingCDict_internal( - ZSTD_CCtx_s* cctx, - ZSTD_CDict_s* cdict, - ZSTD_frameParameters fParams, - ulong pledgedSrcSize - ) - { - ZSTD_CCtx_params_s cctxParams; - if (cdict == null) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_wrong)); - } + /*! ZSTD_compress_usingCDict() : + * Compression using a digested Dictionary. + * Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times. + * Note that compression parameters are decided at CDict creation time + * while frame parameters are hardcoded */ + public static nuint ZSTD_compress_usingCDict( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + ZSTD_CDict_s* cdict + ) + { + /*content*/ + ZSTD_frameParameters fParams = new ZSTD_frameParameters + { + contentSizeFlag = 1, + checksumFlag = 0, + noDictIDFlag = 0, + }; + return ZSTD_compress_usingCDict_internal( + cctx, + dst, + dstCapacity, + src, + srcSize, + cdict, + fParams + ); + } - { - ZSTD_parameters @params; - @params.fParams = fParams; - @params.cParams = - pledgedSrcSize < 128 * (1 << 10) - || pledgedSrcSize < cdict->dictContentSize * 6UL - || pledgedSrcSize == unchecked(0UL - 1) - || cdict->compressionLevel == 0 - ? ZSTD_getCParamsFromCDict(cdict) - : ZSTD_getCParams( - cdict->compressionLevel, - pledgedSrcSize, - cdict->dictContentSize - ); - ZSTD_CCtxParams_init_internal(&cctxParams, &@params, cdict->compressionLevel); - } + /* ****************************************************************** + * Streaming + ********************************************************************/ + public static ZSTD_CCtx_s* ZSTD_createCStream() + { + return ZSTD_createCStream_advanced(ZSTD_defaultCMem); + } - if (pledgedSrcSize != unchecked(0UL - 1)) - { - uint limitedSrcSize = (uint)(pledgedSrcSize < 1U << 19 ? pledgedSrcSize : 1U << 19); - uint limitedSrcLog = - limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1; - cctxParams.cParams.windowLog = - cctxParams.cParams.windowLog > limitedSrcLog - ? cctxParams.cParams.windowLog - : limitedSrcLog; - } + public static ZSTD_CCtx_s* ZSTD_initStaticCStream(void* workspace, nuint workspaceSize) + { + return ZSTD_initStaticCCtx(workspace, workspaceSize); + } - return ZSTD_compressBegin_internal( - cctx, - null, - 0, - ZSTD_dictContentType_e.ZSTD_dct_auto, - ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, - cdict, - &cctxParams, - pledgedSrcSize, - ZSTD_buffered_policy_e.ZSTDb_not_buffered - ); - } + public static ZSTD_CCtx_s* ZSTD_createCStream_advanced(ZSTD_customMem customMem) + { + return ZSTD_createCCtx_advanced(customMem); + } - /* ZSTD_compressBegin_usingCDict_advanced() : - * This function is DEPRECATED. - * cdict must be != NULL */ - public static nuint ZSTD_compressBegin_usingCDict_advanced( - ZSTD_CCtx_s* cctx, - ZSTD_CDict_s* cdict, - ZSTD_frameParameters fParams, - ulong pledgedSrcSize - ) - { - return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, pledgedSrcSize); - } + public static nuint ZSTD_freeCStream(ZSTD_CCtx_s* zcs) + { + return ZSTD_freeCCtx(zcs); + } - /* ZSTD_compressBegin_usingCDict() : - * cdict must be != NULL */ - private static nuint ZSTD_compressBegin_usingCDict_deprecated( - ZSTD_CCtx_s* cctx, - ZSTD_CDict_s* cdict - ) - { - /*content*/ - ZSTD_frameParameters fParams = new ZSTD_frameParameters - { - contentSizeFlag = 0, - checksumFlag = 0, - noDictIDFlag = 0, - }; - return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, unchecked(0UL - 1)); - } + /*====== Initialization ======*/ + public static nuint ZSTD_CStreamInSize() + { + return 1 << 17; + } - public static nuint ZSTD_compressBegin_usingCDict(ZSTD_CCtx_s* cctx, ZSTD_CDict_s* cdict) - { - return ZSTD_compressBegin_usingCDict_deprecated(cctx, cdict); - } + public static nuint ZSTD_CStreamOutSize() + { + return ZSTD_compressBound(1 << 17) + ZSTD_blockHeaderSize + 4; + } + + private static ZSTD_CParamMode_e ZSTD_getCParamMode( + ZSTD_CDict_s* cdict, + ZSTD_CCtx_params_s* @params, + ulong pledgedSrcSize + ) + { + if (cdict != null && ZSTD_shouldAttachDict(cdict, @params, pledgedSrcSize) != 0) + return ZSTD_CParamMode_e.ZSTD_cpm_attachDict; + else + return ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict; + } - /*! ZSTD_compress_usingCDict_internal(): - * Implementation of various ZSTD_compress_usingCDict* functions. + /* ZSTD_resetCStream(): + * pledgedSrcSize == 0 means "unknown" */ + public static nuint ZSTD_resetCStream(ZSTD_CCtx_s* zcs, ulong pss) + { + /* temporary : 0 interpreted as "unknown" during transition period. + * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. + * 0 will be interpreted as "empty" in the future. */ - private static nuint ZSTD_compress_usingCDict_internal( - ZSTD_CCtx_s* cctx, - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize, - ZSTD_CDict_s* cdict, - ZSTD_frameParameters fParams - ) + ulong pledgedSrcSize = pss == 0 ? unchecked(0UL - 1) : pss; { + nuint err_code = ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); + if (ERR_isError(err_code)) { - /* will check if cdict != NULL */ - nuint err_code = ZSTD_compressBegin_usingCDict_internal( - cctx, - cdict, - fParams, - srcSize - ); - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } - - return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize); - } - - /*! ZSTD_compress_usingCDict_advanced(): - * This function is DEPRECATED. - */ - public static nuint ZSTD_compress_usingCDict_advanced( - ZSTD_CCtx_s* cctx, - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize, - ZSTD_CDict_s* cdict, - ZSTD_frameParameters fParams - ) - { - return ZSTD_compress_usingCDict_internal( - cctx, - dst, - dstCapacity, - src, - srcSize, - cdict, - fParams - ); } - /*! ZSTD_compress_usingCDict() : - * Compression using a digested Dictionary. - * Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times. - * Note that compression parameters are decided at CDict creation time - * while frame parameters are hardcoded */ - public static nuint ZSTD_compress_usingCDict( - ZSTD_CCtx_s* cctx, - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize, - ZSTD_CDict_s* cdict - ) { - /*content*/ - ZSTD_frameParameters fParams = new ZSTD_frameParameters + nuint err_code = ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); + if (ERR_isError(err_code)) { - contentSizeFlag = 1, - checksumFlag = 0, - noDictIDFlag = 0, - }; - return ZSTD_compress_usingCDict_internal( - cctx, - dst, - dstCapacity, - src, - srcSize, - cdict, - fParams - ); - } - - /* ****************************************************************** - * Streaming - ********************************************************************/ - public static ZSTD_CCtx_s* ZSTD_createCStream() - { - return ZSTD_createCStream_advanced(ZSTD_defaultCMem); + return err_code; + } } - public static ZSTD_CCtx_s* ZSTD_initStaticCStream(void* workspace, nuint workspaceSize) - { - return ZSTD_initStaticCCtx(workspace, workspaceSize); - } + return 0; + } - public static ZSTD_CCtx_s* ZSTD_createCStream_advanced(ZSTD_customMem customMem) + /*! ZSTD_initCStream_internal() : + * Note : for lib/compress only. Used by zstdmt_compress.c. + * Assumption 1 : params are valid + * Assumption 2 : either dict, or cdict, is defined, not both */ + private static nuint ZSTD_initCStream_internal( + ZSTD_CCtx_s* zcs, + void* dict, + nuint dictSize, + ZSTD_CDict_s* cdict, + ZSTD_CCtx_params_s* @params, + ulong pledgedSrcSize + ) + { { - return ZSTD_createCCtx_advanced(customMem); + nuint err_code = ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); + if (ERR_isError(err_code)) + { + return err_code; + } } - public static nuint ZSTD_freeCStream(ZSTD_CCtx_s* zcs) { - return ZSTD_freeCCtx(zcs); + nuint err_code = ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); + if (ERR_isError(err_code)) + { + return err_code; + } } - /*====== Initialization ======*/ - public static nuint ZSTD_CStreamInSize() + assert(!ERR_isError(ZSTD_checkCParams(@params->cParams))); + zcs->requestedParams = *@params; + assert(!(dict != null && cdict != null)); + if (dict != null) { - return 1 << 17; + nuint err_code = ZSTD_CCtx_loadDictionary(zcs, dict, dictSize); + if (ERR_isError(err_code)) + { + return err_code; + } } - - public static nuint ZSTD_CStreamOutSize() + else { - return ZSTD_compressBound(1 << 17) + ZSTD_blockHeaderSize + 4; + /* Dictionary is cleared if !cdict */ + nuint err_code = ZSTD_CCtx_refCDict(zcs, cdict); + if (ERR_isError(err_code)) + { + return err_code; + } } - private static ZSTD_CParamMode_e ZSTD_getCParamMode( - ZSTD_CDict_s* cdict, - ZSTD_CCtx_params_s* @params, - ulong pledgedSrcSize - ) - { - if (cdict != null && ZSTD_shouldAttachDict(cdict, @params, pledgedSrcSize) != 0) - return ZSTD_CParamMode_e.ZSTD_cpm_attachDict; - else - return ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict; - } + return 0; + } - /* ZSTD_resetCStream(): - * pledgedSrcSize == 0 means "unknown" */ - public static nuint ZSTD_resetCStream(ZSTD_CCtx_s* zcs, ulong pss) + /* ZSTD_initCStream_usingCDict_advanced() : + * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */ + public static nuint ZSTD_initCStream_usingCDict_advanced( + ZSTD_CCtx_s* zcs, + ZSTD_CDict_s* cdict, + ZSTD_frameParameters fParams, + ulong pledgedSrcSize + ) + { { - /* temporary : 0 interpreted as "unknown" during transition period. - * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. - * 0 will be interpreted as "empty" in the future. - */ - ulong pledgedSrcSize = pss == 0 ? unchecked(0UL - 1) : pss; + nuint err_code = ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); + if (ERR_isError(err_code)) { - nuint err_code = ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - { - nuint err_code = ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - return 0; - } - - /*! ZSTD_initCStream_internal() : - * Note : for lib/compress only. Used by zstdmt_compress.c. - * Assumption 1 : params are valid - * Assumption 2 : either dict, or cdict, is defined, not both */ - private static nuint ZSTD_initCStream_internal( - ZSTD_CCtx_s* zcs, - void* dict, - nuint dictSize, - ZSTD_CDict_s* cdict, - ZSTD_CCtx_params_s* @params, - ulong pledgedSrcSize - ) - { - { - nuint err_code = ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - { - nuint err_code = ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - assert(!ERR_isError(ZSTD_checkCParams(@params->cParams))); - zcs->requestedParams = *@params; - assert(!(dict != null && cdict != null)); - if (dict != null) - { - nuint err_code = ZSTD_CCtx_loadDictionary(zcs, dict, dictSize); - if (ERR_isError(err_code)) - { - return err_code; - } - } - else - { - /* Dictionary is cleared if !cdict */ - nuint err_code = ZSTD_CCtx_refCDict(zcs, cdict); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - return 0; - } - - /* ZSTD_initCStream_usingCDict_advanced() : - * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */ - public static nuint ZSTD_initCStream_usingCDict_advanced( - ZSTD_CCtx_s* zcs, - ZSTD_CDict_s* cdict, - ZSTD_frameParameters fParams, - ulong pledgedSrcSize - ) - { - { - nuint err_code = ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - { - nuint err_code = ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - zcs->requestedParams.fParams = fParams; - { - nuint err_code = ZSTD_CCtx_refCDict(zcs, cdict); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - return 0; - } - - /* note : cdict must outlive compression session */ - public static nuint ZSTD_initCStream_usingCDict(ZSTD_CCtx_s* zcs, ZSTD_CDict_s* cdict) - { - { - nuint err_code = ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - { - nuint err_code = ZSTD_CCtx_refCDict(zcs, cdict); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - return 0; - } - - /* ZSTD_initCStream_advanced() : - * pledgedSrcSize must be exact. - * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. - * dict is loaded with default parameters ZSTD_dct_auto and ZSTD_dlm_byCopy. */ - public static nuint ZSTD_initCStream_advanced( - ZSTD_CCtx_s* zcs, - void* dict, - nuint dictSize, - ZSTD_parameters @params, - ulong pss - ) - { - /* for compatibility with older programs relying on this behavior. - * Users should now specify ZSTD_CONTENTSIZE_UNKNOWN. - * This line will be removed in the future. - */ - ulong pledgedSrcSize = - pss == 0 && @params.fParams.contentSizeFlag == 0 ? unchecked(0UL - 1) : pss; - { - nuint err_code = ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - { - nuint err_code = ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - { - nuint err_code = ZSTD_checkCParams(@params.cParams); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - ZSTD_CCtxParams_setZstdParams(&zcs->requestedParams, &@params); - { - nuint err_code = ZSTD_CCtx_loadDictionary(zcs, dict, dictSize); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - return 0; - } - - /*! ZSTD_initCStream_usingDict() : - * This function is DEPRECATED, and is equivalent to: - * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); - * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); - * ZSTD_CCtx_loadDictionary(zcs, dict, dictSize); - * - * Creates of an internal CDict (incompatible with static CCtx), except if - * dict == NULL or dictSize < 8, in which case no dict is used. - * Note: dict is loaded with ZSTD_dct_auto (treated as a full zstd dictionary if - * it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy. - * This prototype will generate compilation warnings. - */ - public static nuint ZSTD_initCStream_usingDict( - ZSTD_CCtx_s* zcs, - void* dict, - nuint dictSize, - int compressionLevel - ) - { - { - nuint err_code = ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - { - nuint err_code = ZSTD_CCtx_setParameter( - zcs, - ZSTD_cParameter.ZSTD_c_compressionLevel, - compressionLevel - ); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - { - nuint err_code = ZSTD_CCtx_loadDictionary(zcs, dict, dictSize); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - return 0; - } - - /*! ZSTD_initCStream_srcSize() : - * This function is DEPRECATED, and equivalent to: - * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); - * ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any) - * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); - * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); - * - * pledgedSrcSize must be correct. If it is not known at init time, use - * ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs, - * "0" also disables frame content size field. It may be enabled in the future. - * This prototype will generate compilation warnings. - */ - public static nuint ZSTD_initCStream_srcSize( - ZSTD_CCtx_s* zcs, - int compressionLevel, - ulong pss - ) - { - /* temporary : 0 interpreted as "unknown" during transition period. - * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. - * 0 will be interpreted as "empty" in the future. - */ - ulong pledgedSrcSize = pss == 0 ? unchecked(0UL - 1) : pss; - { - nuint err_code = ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - { - nuint err_code = ZSTD_CCtx_refCDict(zcs, null); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - { - nuint err_code = ZSTD_CCtx_setParameter( - zcs, - ZSTD_cParameter.ZSTD_c_compressionLevel, - compressionLevel - ); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - { - nuint err_code = ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - return 0; - } - - /*! - * Equivalent to: - * - * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); - * ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any) - * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); - * - * Note that ZSTD_initCStream() clears any previously set dictionary. Use the new API - * to compress with a dictionary. - */ - public static nuint ZSTD_initCStream(ZSTD_CCtx_s* zcs, int compressionLevel) - { - { - nuint err_code = ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - { - nuint err_code = ZSTD_CCtx_refCDict(zcs, null); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - { - nuint err_code = ZSTD_CCtx_setParameter( - zcs, - ZSTD_cParameter.ZSTD_c_compressionLevel, - compressionLevel - ); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - return 0; - } - - /*====== Compression ======*/ - private static nuint ZSTD_nextInputSizeHint(ZSTD_CCtx_s* cctx) - { - if (cctx->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable) - { - return cctx->blockSizeMax - cctx->stableIn_notConsumed; - } - - assert(cctx->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered); - { - nuint hintInSize = cctx->inBuffTarget - cctx->inBuffPos; - if (hintInSize == 0) - hintInSize = cctx->blockSizeMax; - return hintInSize; - } - } - - /** ZSTD_compressStream_generic(): - * internal function for all *compressStream*() variants - * @return : hint size for next input to complete ongoing block */ - private static nuint ZSTD_compressStream_generic( - ZSTD_CCtx_s* zcs, - ZSTD_outBuffer_s* output, - ZSTD_inBuffer_s* input, - ZSTD_EndDirective flushMode - ) - { - assert(input != null); - sbyte* istart = (sbyte*)input->src; - sbyte* iend = istart != null ? istart + input->size : istart; - sbyte* ip = istart != null ? istart + input->pos : istart; - assert(output != null); - sbyte* ostart = (sbyte*)output->dst; - sbyte* oend = ostart != null ? ostart + output->size : ostart; - sbyte* op = ostart != null ? ostart + output->pos : ostart; - uint someMoreWork = 1; - assert(zcs != null); - if (zcs->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable) - { - assert(input->pos >= zcs->stableIn_notConsumed); - input->pos -= zcs->stableIn_notConsumed; - if (ip != null) - ip -= zcs->stableIn_notConsumed; - zcs->stableIn_notConsumed = 0; - } - -#if DEBUG - if (zcs->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered) - { - assert(zcs->inBuff != null); - assert(zcs->inBuffSize > 0); - } -#endif - -#if DEBUG - if (zcs->appliedParams.outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered) - { - assert(zcs->outBuff != null); - assert(zcs->outBuffSize > 0); - } -#endif - -#if DEBUG - if (input->src == null) - assert(input->size == 0); -#endif - assert(input->pos <= input->size); -#if DEBUG - if (output->dst == null) - assert(output->size == 0); -#endif - assert(output->pos <= output->size); - assert((uint)flushMode <= (uint)ZSTD_EndDirective.ZSTD_e_end); - while (someMoreWork != 0) - { - switch (zcs->streamStage) - { - case ZSTD_cStreamStage.zcss_init: - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_init_missing)); - case ZSTD_cStreamStage.zcss_load: - if ( - flushMode == ZSTD_EndDirective.ZSTD_e_end - && ( - (nuint)(oend - op) >= ZSTD_compressBound((nuint)(iend - ip)) - || zcs->appliedParams.outBufferMode - == ZSTD_bufferMode_e.ZSTD_bm_stable - ) - && zcs->inBuffPos == 0 - ) - { - /* shortcut to compression pass directly into output buffer */ - nuint cSize = ZSTD_compressEnd_public( - zcs, - op, - (nuint)(oend - op), - ip, - (nuint)(iend - ip) - ); - { - nuint err_code = cSize; - if (ERR_isError(err_code)) - { - return err_code; - } - } - - ip = iend; - op += cSize; - zcs->frameEnded = 1; - ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); - someMoreWork = 0; - break; - } - - if (zcs->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered) - { - nuint toLoad = zcs->inBuffTarget - zcs->inBuffPos; - nuint loaded = ZSTD_limitCopy( - zcs->inBuff + zcs->inBuffPos, - toLoad, - ip, - (nuint)(iend - ip) - ); - zcs->inBuffPos += loaded; - if (ip != null) - ip += loaded; - if ( - flushMode == ZSTD_EndDirective.ZSTD_e_continue - && zcs->inBuffPos < zcs->inBuffTarget - ) - { - someMoreWork = 0; - break; - } - - if ( - flushMode == ZSTD_EndDirective.ZSTD_e_flush - && zcs->inBuffPos == zcs->inToCompress - ) - { - someMoreWork = 0; - break; - } - } - else - { - assert( - zcs->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable - ); - if ( - flushMode == ZSTD_EndDirective.ZSTD_e_continue - && (nuint)(iend - ip) < zcs->blockSizeMax - ) - { - zcs->stableIn_notConsumed = (nuint)(iend - ip); - ip = iend; - someMoreWork = 0; - break; - } - - if (flushMode == ZSTD_EndDirective.ZSTD_e_flush && ip == iend) - { - someMoreWork = 0; - break; - } - } - - { - int inputBuffered = - zcs->appliedParams.inBufferMode - == ZSTD_bufferMode_e.ZSTD_bm_buffered - ? 1 - : 0; - void* cDst; - nuint cSize; - nuint oSize = (nuint)(oend - op); - nuint iSize = - inputBuffered != 0 ? zcs->inBuffPos - zcs->inToCompress - : (nuint)(iend - ip) < zcs->blockSizeMax ? (nuint)(iend - ip) - : zcs->blockSizeMax; - if ( - oSize >= ZSTD_compressBound(iSize) - || zcs->appliedParams.outBufferMode - == ZSTD_bufferMode_e.ZSTD_bm_stable - ) - cDst = op; - else - { - cDst = zcs->outBuff; - oSize = zcs->outBuffSize; - } - - if (inputBuffered != 0) - { - uint lastBlock = - flushMode == ZSTD_EndDirective.ZSTD_e_end && ip == iend - ? 1U - : 0U; - cSize = - lastBlock != 0 - ? ZSTD_compressEnd_public( - zcs, - cDst, - oSize, - zcs->inBuff + zcs->inToCompress, - iSize - ) - : ZSTD_compressContinue_public( - zcs, - cDst, - oSize, - zcs->inBuff + zcs->inToCompress, - iSize - ); - { - nuint err_code = cSize; - if (ERR_isError(err_code)) - { - return err_code; - } - } - - zcs->frameEnded = lastBlock; - zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSizeMax; - if (zcs->inBuffTarget > zcs->inBuffSize) - { - zcs->inBuffPos = 0; - zcs->inBuffTarget = zcs->blockSizeMax; - } - -#if DEBUG - if (lastBlock == 0) - assert(zcs->inBuffTarget <= zcs->inBuffSize); -#endif - zcs->inToCompress = zcs->inBuffPos; - } - else - { - uint lastBlock = - flushMode == ZSTD_EndDirective.ZSTD_e_end && ip + iSize == iend - ? 1U - : 0U; - cSize = - lastBlock != 0 - ? ZSTD_compressEnd_public(zcs, cDst, oSize, ip, iSize) - : ZSTD_compressContinue_public(zcs, cDst, oSize, ip, iSize); - if (ip != null) - ip += iSize; - { - nuint err_code = cSize; - if (ERR_isError(err_code)) - { - return err_code; - } - } - - zcs->frameEnded = lastBlock; -#if DEBUG - if (lastBlock != 0) - assert(ip == iend); -#endif - } - - if (cDst == op) - { - op += cSize; - if (zcs->frameEnded != 0) - { - someMoreWork = 0; - ZSTD_CCtx_reset( - zcs, - ZSTD_ResetDirective.ZSTD_reset_session_only - ); - } - - break; - } - - zcs->outBuffContentSize = cSize; - zcs->outBuffFlushedSize = 0; - zcs->streamStage = ZSTD_cStreamStage.zcss_flush; - } - - goto case ZSTD_cStreamStage.zcss_flush; - case ZSTD_cStreamStage.zcss_flush: - assert( - zcs->appliedParams.outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered - ); - - { - nuint toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; - nuint flushed = ZSTD_limitCopy( - op, - (nuint)(oend - op), - zcs->outBuff + zcs->outBuffFlushedSize, - toFlush - ); - if (flushed != 0) - op += flushed; - zcs->outBuffFlushedSize += flushed; - if (toFlush != flushed) - { - assert(op == oend); - someMoreWork = 0; - break; - } - - zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0; - if (zcs->frameEnded != 0) - { - someMoreWork = 0; - ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); - break; - } - - zcs->streamStage = ZSTD_cStreamStage.zcss_load; - break; - } - - default: - assert(0 != 0); - break; - } + return err_code; } - - input->pos = (nuint)(ip - istart); - output->pos = (nuint)(op - ostart); - if (zcs->frameEnded != 0) - return 0; - return ZSTD_nextInputSizeHint(zcs); } - private static nuint ZSTD_nextInputSizeHint_MTorST(ZSTD_CCtx_s* cctx) { - if (cctx->appliedParams.nbWorkers >= 1) + nuint err_code = ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); + if (ERR_isError(err_code)) { - assert(cctx->mtctx != null); - return ZSTDMT_nextInputSizeHint(cctx->mtctx); + return err_code; } - - return ZSTD_nextInputSizeHint(cctx); } - /*! - * Alternative for ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue). - * NOTE: The return value is different. ZSTD_compressStream() returns a hint for - * the next read size (if non-zero and not an error). ZSTD_compressStream2() - * returns the minimum nb of bytes left to flush (if non-zero and not an error). - */ - public static nuint ZSTD_compressStream( - ZSTD_CCtx_s* zcs, - ZSTD_outBuffer_s* output, - ZSTD_inBuffer_s* input - ) + zcs->requestedParams.fParams = fParams; { + nuint err_code = ZSTD_CCtx_refCDict(zcs, cdict); + if (ERR_isError(err_code)) { - nuint err_code = ZSTD_compressStream2( - zcs, - output, - input, - ZSTD_EndDirective.ZSTD_e_continue - ); - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } - - return ZSTD_nextInputSizeHint_MTorST(zcs); } - /* After a compression call set the expected input/output buffer. - * This is validated at the start of the next compression call. - */ - private static void ZSTD_setBufferExpectations( - ZSTD_CCtx_s* cctx, - ZSTD_outBuffer_s* output, - ZSTD_inBuffer_s* input - ) + return 0; + } + + /* note : cdict must outlive compression session */ + public static nuint ZSTD_initCStream_usingCDict(ZSTD_CCtx_s* zcs, ZSTD_CDict_s* cdict) + { { - if (cctx->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable) + nuint err_code = ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); + if (ERR_isError(err_code)) { - cctx->expectedInBuffer = *input; + return err_code; } + } - if (cctx->appliedParams.outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable) + { + nuint err_code = ZSTD_CCtx_refCDict(zcs, cdict); + if (ERR_isError(err_code)) { - cctx->expectedOutBufferSize = output->size - output->pos; + return err_code; } } - /* Validate that the input/output buffers match the expectations set by - * ZSTD_setBufferExpectations. + return 0; + } + + /* ZSTD_initCStream_advanced() : + * pledgedSrcSize must be exact. + * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. + * dict is loaded with default parameters ZSTD_dct_auto and ZSTD_dlm_byCopy. */ + public static nuint ZSTD_initCStream_advanced( + ZSTD_CCtx_s* zcs, + void* dict, + nuint dictSize, + ZSTD_parameters @params, + ulong pss + ) + { + /* for compatibility with older programs relying on this behavior. + * Users should now specify ZSTD_CONTENTSIZE_UNKNOWN. + * This line will be removed in the future. */ - private static nuint ZSTD_checkBufferStability( - ZSTD_CCtx_s* cctx, - ZSTD_outBuffer_s* output, - ZSTD_inBuffer_s* input, - ZSTD_EndDirective endOp - ) + ulong pledgedSrcSize = + pss == 0 && @params.fParams.contentSizeFlag == 0 ? unchecked(0UL - 1) : pss; { - if (cctx->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable) + nuint err_code = ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); + if (ERR_isError(err_code)) { - ZSTD_inBuffer_s expect = cctx->expectedInBuffer; - if (expect.src != input->src || expect.pos != input->pos) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stabilityCondition_notRespected) - ); + return err_code; } + } - if (cctx->appliedParams.outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable) + { + nuint err_code = ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); + if (ERR_isError(err_code)) { - nuint outBufferSize = output->size - output->pos; - if (cctx->expectedOutBufferSize != outBufferSize) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stabilityCondition_notRespected) - ); + return err_code; } - - return 0; } - /* - * If @endOp == ZSTD_e_end, @inSize becomes pledgedSrcSize. - * Otherwise, it's ignored. - * @return: 0 on success, or a ZSTD_error code otherwise. - */ - private static nuint ZSTD_CCtx_init_compressStream2( - ZSTD_CCtx_s* cctx, - ZSTD_EndDirective endOp, - nuint inSize - ) { - ZSTD_CCtx_params_s @params = cctx->requestedParams; - ZSTD_prefixDict_s prefixDict = cctx->prefixDict; + nuint err_code = ZSTD_checkCParams(@params.cParams); + if (ERR_isError(err_code)) { - /* Init the local dict if present. */ - nuint err_code = ZSTD_initLocalDict(cctx); - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } + } - cctx->prefixDict = new ZSTD_prefixDict_s(); - assert(prefixDict.dict == null || cctx->cdict == null); - if (cctx->cdict != null && cctx->localDict.cdict == null) + ZSTD_CCtxParams_setZstdParams(&zcs->requestedParams, &@params); + { + nuint err_code = ZSTD_CCtx_loadDictionary(zcs, dict, dictSize); + if (ERR_isError(err_code)) { - @params.compressionLevel = cctx->cdict->compressionLevel; + return err_code; } + } - if (endOp == ZSTD_EndDirective.ZSTD_e_end) - cctx->pledgedSrcSizePlusOne = inSize + 1; + return 0; + } + + /*! ZSTD_initCStream_usingDict() : + * This function is DEPRECATED, and is equivalent to: + * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); + * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); + * ZSTD_CCtx_loadDictionary(zcs, dict, dictSize); + * + * Creates of an internal CDict (incompatible with static CCtx), except if + * dict == NULL or dictSize < 8, in which case no dict is used. + * Note: dict is loaded with ZSTD_dct_auto (treated as a full zstd dictionary if + * it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy. + * This prototype will generate compilation warnings. + */ + public static nuint ZSTD_initCStream_usingDict( + ZSTD_CCtx_s* zcs, + void* dict, + nuint dictSize, + int compressionLevel + ) + { + { + nuint err_code = ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); + if (ERR_isError(err_code)) { - nuint dictSize = - prefixDict.dict != null ? prefixDict.dictSize - : cctx->cdict != null ? cctx->cdict->dictContentSize - : 0; - ZSTD_CParamMode_e mode = ZSTD_getCParamMode( - cctx->cdict, - &@params, - cctx->pledgedSrcSizePlusOne - 1 - ); - @params.cParams = ZSTD_getCParamsFromCCtxParams( - &@params, - cctx->pledgedSrcSizePlusOne - 1, - dictSize, - mode - ); + return err_code; } + } - @params.postBlockSplitter = ZSTD_resolveBlockSplitterMode( - @params.postBlockSplitter, - &@params.cParams - ); - @params.ldmParams.enableLdm = ZSTD_resolveEnableLdm( - @params.ldmParams.enableLdm, - &@params.cParams - ); - @params.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode( - @params.useRowMatchFinder, - &@params.cParams - ); - @params.validateSequences = ZSTD_resolveExternalSequenceValidation( - @params.validateSequences - ); - @params.maxBlockSize = ZSTD_resolveMaxBlockSize(@params.maxBlockSize); - @params.searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch( - @params.searchForExternalRepcodes, - @params.compressionLevel + { + nuint err_code = ZSTD_CCtx_setParameter( + zcs, + ZSTD_cParameter.ZSTD_c_compressionLevel, + compressionLevel ); - if (ZSTD_hasExtSeqProd(&@params) != 0 && @params.nbWorkers >= 1) + if (ERR_isError(err_code)) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_combination_unsupported) - ); + return err_code; } + } - if (cctx->pledgedSrcSizePlusOne - 1 <= 512 * (1 << 10)) + { + nuint err_code = ZSTD_CCtx_loadDictionary(zcs, dict, dictSize); + if (ERR_isError(err_code)) { - @params.nbWorkers = 0; + return err_code; } + } - if (@params.nbWorkers > 0) - { - if (cctx->mtctx == null) - { - cctx->mtctx = ZSTDMT_createCCtx_advanced( - (uint)@params.nbWorkers, - cctx->customMem, - cctx->pool - ); - if (cctx->mtctx == null) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) - ); - } - } - - { - nuint err_code = ZSTDMT_initCStream_internal( - cctx->mtctx, - prefixDict.dict, - prefixDict.dictSize, - prefixDict.dictContentType, - cctx->cdict, - @params, - cctx->pledgedSrcSizePlusOne - 1 - ); - if (ERR_isError(err_code)) - { - return err_code; - } - } + return 0; + } - cctx->dictID = cctx->cdict != null ? cctx->cdict->dictID : 0; - cctx->dictContentSize = - cctx->cdict != null ? cctx->cdict->dictContentSize : prefixDict.dictSize; - cctx->consumedSrcSize = 0; - cctx->producedCSize = 0; - cctx->streamStage = ZSTD_cStreamStage.zcss_load; - cctx->appliedParams = @params; - } - else + /*! ZSTD_initCStream_srcSize() : + * This function is DEPRECATED, and equivalent to: + * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); + * ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any) + * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); + * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); + * + * pledgedSrcSize must be correct. If it is not known at init time, use + * ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs, + * "0" also disables frame content size field. It may be enabled in the future. + * This prototype will generate compilation warnings. + */ + public static nuint ZSTD_initCStream_srcSize( + ZSTD_CCtx_s* zcs, + int compressionLevel, + ulong pss + ) + { + /* temporary : 0 interpreted as "unknown" during transition period. + * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. + * 0 will be interpreted as "empty" in the future. + */ + ulong pledgedSrcSize = pss == 0 ? unchecked(0UL - 1) : pss; + { + nuint err_code = ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); + if (ERR_isError(err_code)) { - ulong pledgedSrcSize = cctx->pledgedSrcSizePlusOne - 1; - assert(!ERR_isError(ZSTD_checkCParams(@params.cParams))); - { - nuint err_code = ZSTD_compressBegin_internal( - cctx, - prefixDict.dict, - prefixDict.dictSize, - prefixDict.dictContentType, - ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, - cctx->cdict, - &@params, - pledgedSrcSize, - ZSTD_buffered_policy_e.ZSTDb_buffered - ); - if (ERR_isError(err_code)) - { - return err_code; - } - } + return err_code; + } + } - assert(cctx->appliedParams.nbWorkers == 0); - cctx->inToCompress = 0; - cctx->inBuffPos = 0; - if (cctx->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered) - { - cctx->inBuffTarget = - cctx->blockSizeMax + (nuint)(cctx->blockSizeMax == pledgedSrcSize ? 1 : 0); - } - else - { - cctx->inBuffTarget = 0; - } + { + nuint err_code = ZSTD_CCtx_refCDict(zcs, null); + if (ERR_isError(err_code)) + { + return err_code; + } + } - cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0; - cctx->streamStage = ZSTD_cStreamStage.zcss_load; - cctx->frameEnded = 0; + { + nuint err_code = ZSTD_CCtx_setParameter( + zcs, + ZSTD_cParameter.ZSTD_c_compressionLevel, + compressionLevel + ); + if (ERR_isError(err_code)) + { + return err_code; } + } - return 0; + { + nuint err_code = ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); + if (ERR_isError(err_code)) + { + return err_code; + } } - /* @return provides a minimum amount of data remaining to be flushed from internal buffers - */ - public static nuint ZSTD_compressStream2( - ZSTD_CCtx_s* cctx, - ZSTD_outBuffer_s* output, - ZSTD_inBuffer_s* input, - ZSTD_EndDirective endOp - ) + return 0; + } + + /*! + * Equivalent to: + * + * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); + * ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any) + * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); + * + * Note that ZSTD_initCStream() clears any previously set dictionary. Use the new API + * to compress with a dictionary. + */ + public static nuint ZSTD_initCStream(ZSTD_CCtx_s* zcs, int compressionLevel) + { { - if (output->pos > output->size) + nuint err_code = ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); + if (ERR_isError(err_code)) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + return err_code; } + } - if (input->pos > input->size) + { + nuint err_code = ZSTD_CCtx_refCDict(zcs, null); + if (ERR_isError(err_code)) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + return err_code; } + } - if ((uint)endOp > (uint)ZSTD_EndDirective.ZSTD_e_end) + { + nuint err_code = ZSTD_CCtx_setParameter( + zcs, + ZSTD_cParameter.ZSTD_c_compressionLevel, + compressionLevel + ); + if (ERR_isError(err_code)) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return err_code; } + } + + return 0; + } + + /*====== Compression ======*/ + private static nuint ZSTD_nextInputSizeHint(ZSTD_CCtx_s* cctx) + { + if (cctx->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable) + { + return cctx->blockSizeMax - cctx->stableIn_notConsumed; + } + + assert(cctx->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered); + { + nuint hintInSize = cctx->inBuffTarget - cctx->inBuffPos; + if (hintInSize == 0) + hintInSize = cctx->blockSizeMax; + return hintInSize; + } + } + + /** ZSTD_compressStream_generic(): + * internal function for all *compressStream*() variants + * @return : hint size for next input to complete ongoing block */ + private static nuint ZSTD_compressStream_generic( + ZSTD_CCtx_s* zcs, + ZSTD_outBuffer_s* output, + ZSTD_inBuffer_s* input, + ZSTD_EndDirective flushMode + ) + { + assert(input != null); + sbyte* istart = (sbyte*)input->src; + sbyte* iend = istart != null ? istart + input->size : istart; + sbyte* ip = istart != null ? istart + input->pos : istart; + assert(output != null); + sbyte* ostart = (sbyte*)output->dst; + sbyte* oend = ostart != null ? ostart + output->size : ostart; + sbyte* op = ostart != null ? ostart + output->pos : ostart; + uint someMoreWork = 1; + assert(zcs != null); + if (zcs->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable) + { + assert(input->pos >= zcs->stableIn_notConsumed); + input->pos -= zcs->stableIn_notConsumed; + if (ip != null) + ip -= zcs->stableIn_notConsumed; + zcs->stableIn_notConsumed = 0; + } + +#if DEBUG + if (zcs->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered) + { + assert(zcs->inBuff != null); + assert(zcs->inBuffSize > 0); + } +#endif + +#if DEBUG + if (zcs->appliedParams.outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered) + { + assert(zcs->outBuff != null); + assert(zcs->outBuffSize > 0); + } +#endif - assert(cctx != null); - if (cctx->streamStage == ZSTD_cStreamStage.zcss_init) +#if DEBUG + if (input->src == null) + assert(input->size == 0); +#endif + assert(input->pos <= input->size); +#if DEBUG + if (output->dst == null) + assert(output->size == 0); +#endif + assert(output->pos <= output->size); + assert((uint)flushMode <= (uint)ZSTD_EndDirective.ZSTD_e_end); + while (someMoreWork != 0) + { + switch (zcs->streamStage) { - /* no obligation to start from pos==0 */ - nuint inputSize = input->size - input->pos; - nuint totalInputSize = inputSize + cctx->stableIn_notConsumed; - if ( - cctx->requestedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable - && endOp == ZSTD_EndDirective.ZSTD_e_continue - && totalInputSize < 1 << 17 - ) - { - if (cctx->stableIn_notConsumed != 0) + case ZSTD_cStreamStage.zcss_init: + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_init_missing)); + case ZSTD_cStreamStage.zcss_load: + if ( + flushMode == ZSTD_EndDirective.ZSTD_e_end + && ( + (nuint)(oend - op) >= ZSTD_compressBound((nuint)(iend - ip)) + || zcs->appliedParams.outBufferMode + == ZSTD_bufferMode_e.ZSTD_bm_stable + ) + && zcs->inBuffPos == 0 + ) { - if (input->src != cctx->expectedInBuffer.src) + /* shortcut to compression pass directly into output buffer */ + nuint cSize = ZSTD_compressEnd_public( + zcs, + op, + (nuint)(oend - op), + ip, + (nuint)(iend - ip) + ); { - return unchecked( - (nuint)( - -(int)ZSTD_ErrorCode.ZSTD_error_stabilityCondition_notRespected - ) - ); + nuint err_code = cSize; + if (ERR_isError(err_code)) + { + return err_code; + } } - if (input->pos != cctx->expectedInBuffer.size) + ip = iend; + op += cSize; + zcs->frameEnded = 1; + ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); + someMoreWork = 0; + break; + } + + if (zcs->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered) + { + nuint toLoad = zcs->inBuffTarget - zcs->inBuffPos; + nuint loaded = ZSTD_limitCopy( + zcs->inBuff + zcs->inBuffPos, + toLoad, + ip, + (nuint)(iend - ip) + ); + zcs->inBuffPos += loaded; + if (ip != null) + ip += loaded; + if ( + flushMode == ZSTD_EndDirective.ZSTD_e_continue + && zcs->inBuffPos < zcs->inBuffTarget + ) { - return unchecked( - (nuint)( - -(int)ZSTD_ErrorCode.ZSTD_error_stabilityCondition_notRespected - ) - ); + someMoreWork = 0; + break; + } + + if ( + flushMode == ZSTD_EndDirective.ZSTD_e_flush + && zcs->inBuffPos == zcs->inToCompress + ) + { + someMoreWork = 0; + break; } } + else + { + assert( + zcs->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable + ); + if ( + flushMode == ZSTD_EndDirective.ZSTD_e_continue + && (nuint)(iend - ip) < zcs->blockSizeMax + ) + { + zcs->stableIn_notConsumed = (nuint)(iend - ip); + ip = iend; + someMoreWork = 0; + break; + } - input->pos = input->size; - cctx->expectedInBuffer = *input; - cctx->stableIn_notConsumed += inputSize; - return (nuint)( - cctx->requestedParams.format == ZSTD_format_e.ZSTD_f_zstd1 ? 6 : 2 - ); - } + if (flushMode == ZSTD_EndDirective.ZSTD_e_flush && ip == iend) + { + someMoreWork = 0; + break; + } + } { - nuint err_code = ZSTD_CCtx_init_compressStream2(cctx, endOp, totalInputSize); - if (ERR_isError(err_code)) + int inputBuffered = + zcs->appliedParams.inBufferMode + == ZSTD_bufferMode_e.ZSTD_bm_buffered + ? 1 + : 0; + void* cDst; + nuint cSize; + nuint oSize = (nuint)(oend - op); + nuint iSize = + inputBuffered != 0 ? zcs->inBuffPos - zcs->inToCompress + : (nuint)(iend - ip) < zcs->blockSizeMax ? (nuint)(iend - ip) + : zcs->blockSizeMax; + if ( + oSize >= ZSTD_compressBound(iSize) + || zcs->appliedParams.outBufferMode + == ZSTD_bufferMode_e.ZSTD_bm_stable + ) + cDst = op; + else { - return err_code; + cDst = zcs->outBuff; + oSize = zcs->outBuffSize; + } + + if (inputBuffered != 0) + { + uint lastBlock = + flushMode == ZSTD_EndDirective.ZSTD_e_end && ip == iend + ? 1U + : 0U; + cSize = + lastBlock != 0 + ? ZSTD_compressEnd_public( + zcs, + cDst, + oSize, + zcs->inBuff + zcs->inToCompress, + iSize + ) + : ZSTD_compressContinue_public( + zcs, + cDst, + oSize, + zcs->inBuff + zcs->inToCompress, + iSize + ); + { + nuint err_code = cSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + zcs->frameEnded = lastBlock; + zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSizeMax; + if (zcs->inBuffTarget > zcs->inBuffSize) + { + zcs->inBuffPos = 0; + zcs->inBuffTarget = zcs->blockSizeMax; + } + +#if DEBUG + if (lastBlock == 0) + assert(zcs->inBuffTarget <= zcs->inBuffSize); +#endif + zcs->inToCompress = zcs->inBuffPos; + } + else + { + uint lastBlock = + flushMode == ZSTD_EndDirective.ZSTD_e_end && ip + iSize == iend + ? 1U + : 0U; + cSize = + lastBlock != 0 + ? ZSTD_compressEnd_public(zcs, cDst, oSize, ip, iSize) + : ZSTD_compressContinue_public(zcs, cDst, oSize, ip, iSize); + if (ip != null) + ip += iSize; + { + nuint err_code = cSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + zcs->frameEnded = lastBlock; +#if DEBUG + if (lastBlock != 0) + assert(ip == iend); +#endif } - } - ZSTD_setBufferExpectations(cctx, output, input); - } + if (cDst == op) + { + op += cSize; + if (zcs->frameEnded != 0) + { + someMoreWork = 0; + ZSTD_CCtx_reset( + zcs, + ZSTD_ResetDirective.ZSTD_reset_session_only + ); + } - { - /* end of transparent initialization stage */ - nuint err_code = ZSTD_checkBufferStability(cctx, output, input, endOp); - if (ERR_isError(err_code)) - { - return err_code; - } - } + break; + } - if (cctx->appliedParams.nbWorkers > 0) - { - nuint flushMin; - if (cctx->cParamsChanged != 0) - { - ZSTDMT_updateCParams_whileCompressing(cctx->mtctx, &cctx->requestedParams); - cctx->cParamsChanged = 0; + zcs->outBuffContentSize = cSize; + zcs->outBuffFlushedSize = 0; + zcs->streamStage = ZSTD_cStreamStage.zcss_flush; } - if (cctx->stableIn_notConsumed != 0) - { - assert(cctx->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable); - assert(input->pos >= cctx->stableIn_notConsumed); - input->pos -= cctx->stableIn_notConsumed; - cctx->stableIn_notConsumed = 0; - } + goto case ZSTD_cStreamStage.zcss_flush; + case ZSTD_cStreamStage.zcss_flush: + assert( + zcs->appliedParams.outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered + ); - for (; ; ) { - nuint ipos = input->pos; - nuint opos = output->pos; - flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp); - cctx->consumedSrcSize += input->pos - ipos; - cctx->producedCSize += output->pos - opos; - if ( - ERR_isError(flushMin) - || endOp == ZSTD_EndDirective.ZSTD_e_end && flushMin == 0 - ) + nuint toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; + nuint flushed = ZSTD_limitCopy( + op, + (nuint)(oend - op), + zcs->outBuff + zcs->outBuffFlushedSize, + toFlush + ); + if (flushed != 0) + op += flushed; + zcs->outBuffFlushedSize += flushed; + if (toFlush != flushed) { - if (flushMin == 0) - ZSTD_CCtx_trace(cctx, 0); - ZSTD_CCtx_reset(cctx, ZSTD_ResetDirective.ZSTD_reset_session_only); + assert(op == oend); + someMoreWork = 0; + break; } + zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0; + if (zcs->frameEnded != 0) { - nuint err_code = flushMin; - if (ERR_isError(err_code)) - { - return err_code; - } + someMoreWork = 0; + ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); + break; } - if (endOp == ZSTD_EndDirective.ZSTD_e_continue) - { - if ( - input->pos != ipos - || output->pos != opos - || input->pos == input->size - || output->pos == output->size - ) - break; - } - else - { - assert( - endOp == ZSTD_EndDirective.ZSTD_e_flush - || endOp == ZSTD_EndDirective.ZSTD_e_end - ); - if (flushMin == 0 || output->pos == output->size) - break; - } + zcs->streamStage = ZSTD_cStreamStage.zcss_load; + break; } - assert( - endOp == ZSTD_EndDirective.ZSTD_e_continue - || flushMin == 0 - || output->pos == output->size - ); - ZSTD_setBufferExpectations(cctx, output, input); - return flushMin; + default: + assert(0 != 0); + break; } + } + + input->pos = (nuint)(ip - istart); + output->pos = (nuint)(op - ostart); + if (zcs->frameEnded != 0) + return 0; + return ZSTD_nextInputSizeHint(zcs); + } + + private static nuint ZSTD_nextInputSizeHint_MTorST(ZSTD_CCtx_s* cctx) + { + if (cctx->appliedParams.nbWorkers >= 1) + { + assert(cctx->mtctx != null); + return ZSTDMT_nextInputSizeHint(cctx->mtctx); + } + + return ZSTD_nextInputSizeHint(cctx); + } + /*! + * Alternative for ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue). + * NOTE: The return value is different. ZSTD_compressStream() returns a hint for + * the next read size (if non-zero and not an error). ZSTD_compressStream2() + * returns the minimum nb of bytes left to flush (if non-zero and not an error). + */ + public static nuint ZSTD_compressStream( + ZSTD_CCtx_s* zcs, + ZSTD_outBuffer_s* output, + ZSTD_inBuffer_s* input + ) + { + { + nuint err_code = ZSTD_compressStream2( + zcs, + output, + input, + ZSTD_EndDirective.ZSTD_e_continue + ); + if (ERR_isError(err_code)) { - nuint err_code = ZSTD_compressStream_generic(cctx, output, input, endOp); - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } + } - ZSTD_setBufferExpectations(cctx, output, input); - return cctx->outBuffContentSize - cctx->outBuffFlushedSize; + return ZSTD_nextInputSizeHint_MTorST(zcs); + } + + /* After a compression call set the expected input/output buffer. + * This is validated at the start of the next compression call. + */ + private static void ZSTD_setBufferExpectations( + ZSTD_CCtx_s* cctx, + ZSTD_outBuffer_s* output, + ZSTD_inBuffer_s* input + ) + { + if (cctx->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable) + { + cctx->expectedInBuffer = *input; } - /*! ZSTD_compressStream2_simpleArgs() : - * Same as ZSTD_compressStream2(), - * but using only integral types as arguments. - * This variant might be helpful for binders from dynamic languages - * which have troubles handling structures containing memory pointers. - */ - public static nuint ZSTD_compressStream2_simpleArgs( - ZSTD_CCtx_s* cctx, - void* dst, - nuint dstCapacity, - nuint* dstPos, - void* src, - nuint srcSize, - nuint* srcPos, - ZSTD_EndDirective endOp - ) + if (cctx->appliedParams.outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable) { - ZSTD_outBuffer_s output; - ZSTD_inBuffer_s input; - output.dst = dst; - output.size = dstCapacity; - output.pos = *dstPos; - input.src = src; - input.size = srcSize; - input.pos = *srcPos; - { - nuint cErr = ZSTD_compressStream2(cctx, &output, &input, endOp); - *dstPos = output.pos; - *srcPos = input.pos; - return cErr; - } - } - - /*! ZSTD_compress2() : - * Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API. - * (note that this entry point doesn't even expose a compression level parameter). - * ZSTD_compress2() always starts a new frame. - * Should cctx hold data from a previously unfinished frame, everything about it is forgotten. - * - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*() - * - The function is always blocking, returns when compression is completed. - * NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have - * enough space to successfully compress the data, though it is possible it fails for other reasons. - * @return : compressed size written into `dst` (<= `dstCapacity), - * or an error code if it fails (which can be tested using ZSTD_isError()). - */ - public static nuint ZSTD_compress2( - ZSTD_CCtx_s* cctx, - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize - ) + cctx->expectedOutBufferSize = output->size - output->pos; + } + } + + /* Validate that the input/output buffers match the expectations set by + * ZSTD_setBufferExpectations. + */ + private static nuint ZSTD_checkBufferStability( + ZSTD_CCtx_s* cctx, + ZSTD_outBuffer_s* output, + ZSTD_inBuffer_s* input, + ZSTD_EndDirective endOp + ) + { + if (cctx->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable) { - ZSTD_bufferMode_e originalInBufferMode = cctx->requestedParams.inBufferMode; - ZSTD_bufferMode_e originalOutBufferMode = cctx->requestedParams.outBufferMode; - ZSTD_CCtx_reset(cctx, ZSTD_ResetDirective.ZSTD_reset_session_only); - cctx->requestedParams.inBufferMode = ZSTD_bufferMode_e.ZSTD_bm_stable; - cctx->requestedParams.outBufferMode = ZSTD_bufferMode_e.ZSTD_bm_stable; - { - nuint oPos = 0; - nuint iPos = 0; - nuint result = ZSTD_compressStream2_simpleArgs( - cctx, - dst, - dstCapacity, - &oPos, - src, - srcSize, - &iPos, - ZSTD_EndDirective.ZSTD_e_end + ZSTD_inBuffer_s expect = cctx->expectedInBuffer; + if (expect.src != input->src || expect.pos != input->pos) + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stabilityCondition_notRespected) ); - cctx->requestedParams.inBufferMode = originalInBufferMode; - cctx->requestedParams.outBufferMode = originalOutBufferMode; - { - nuint err_code = result; - if (ERR_isError(err_code)) - { - return err_code; - } - } + } - if (result != 0) - { - assert(oPos == dstCapacity); - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - } + if (cctx->appliedParams.outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable) + { + nuint outBufferSize = output->size - output->pos; + if (cctx->expectedOutBufferSize != outBufferSize) + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stabilityCondition_notRespected) + ); + } + + return 0; + } - assert(iPos == srcSize); - return oPos; + /* + * If @endOp == ZSTD_e_end, @inSize becomes pledgedSrcSize. + * Otherwise, it's ignored. + * @return: 0 on success, or a ZSTD_error code otherwise. + */ + private static nuint ZSTD_CCtx_init_compressStream2( + ZSTD_CCtx_s* cctx, + ZSTD_EndDirective endOp, + nuint inSize + ) + { + ZSTD_CCtx_params_s @params = cctx->requestedParams; + ZSTD_prefixDict_s prefixDict = cctx->prefixDict; + { + /* Init the local dict if present. */ + nuint err_code = ZSTD_initLocalDict(cctx); + if (ERR_isError(err_code)) + { + return err_code; } } - /* ZSTD_validateSequence() : - * @offBase : must use the format required by ZSTD_storeSeq() - * @returns a ZSTD error code if sequence is not valid - */ - private static nuint ZSTD_validateSequence( - uint offBase, - uint matchLength, - uint minMatch, - nuint posInSrc, - uint windowLog, - nuint dictSize, - int useSequenceProducer - ) + cctx->prefixDict = new ZSTD_prefixDict_s(); + assert(prefixDict.dict == null || cctx->cdict == null); + if (cctx->cdict != null && cctx->localDict.cdict == null) { - uint windowSize = 1U << (int)windowLog; - /* posInSrc represents the amount of data the decoder would decode up to this point. - * As long as the amount of data decoded is less than or equal to window size, offsets may be - * larger than the total length of output decoded in order to reference the dict, even larger than - * window size. After output surpasses windowSize, we're limited to windowSize offsets again. - */ - nuint offsetBound = posInSrc > windowSize ? windowSize : posInSrc + dictSize; - nuint matchLenLowerBound = (nuint)(minMatch == 3 || useSequenceProducer != 0 ? 3 : 4); + @params.compressionLevel = cctx->cdict->compressionLevel; + } + + if (endOp == ZSTD_EndDirective.ZSTD_e_end) + cctx->pledgedSrcSizePlusOne = inSize + 1; + { + nuint dictSize = + prefixDict.dict != null ? prefixDict.dictSize + : cctx->cdict != null ? cctx->cdict->dictContentSize + : 0; + ZSTD_CParamMode_e mode = ZSTD_getCParamMode( + cctx->cdict, + &@params, + cctx->pledgedSrcSizePlusOne - 1 + ); + @params.cParams = ZSTD_getCParamsFromCCtxParams( + &@params, + cctx->pledgedSrcSizePlusOne - 1, + dictSize, + mode + ); + } + + @params.postBlockSplitter = ZSTD_resolveBlockSplitterMode( + @params.postBlockSplitter, + &@params.cParams + ); + @params.ldmParams.enableLdm = ZSTD_resolveEnableLdm( + @params.ldmParams.enableLdm, + &@params.cParams + ); + @params.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode( + @params.useRowMatchFinder, + &@params.cParams + ); + @params.validateSequences = ZSTD_resolveExternalSequenceValidation( + @params.validateSequences + ); + @params.maxBlockSize = ZSTD_resolveMaxBlockSize(@params.maxBlockSize); + @params.searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch( + @params.searchForExternalRepcodes, + @params.compressionLevel + ); + if (ZSTD_hasExtSeqProd(&@params) != 0 && @params.nbWorkers >= 1) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_combination_unsupported) + ); + } + + if (cctx->pledgedSrcSizePlusOne - 1 <= 512 * (1 << 10)) + { + @params.nbWorkers = 0; + } + + if (@params.nbWorkers > 0) + { + if (cctx->mtctx == null) { - assert(offsetBound > 0); - if (offBase > offsetBound + 3) + cctx->mtctx = ZSTDMT_createCCtx_advanced( + (uint)@params.nbWorkers, + cctx->customMem, + cctx->pool + ); + if (cctx->mtctx == null) { return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) ); } } - if (matchLength < matchLenLowerBound) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) + nuint err_code = ZSTDMT_initCStream_internal( + cctx->mtctx, + prefixDict.dict, + prefixDict.dictSize, + prefixDict.dictContentType, + cctx->cdict, + @params, + cctx->pledgedSrcSizePlusOne - 1 ); + if (ERR_isError(err_code)) + { + return err_code; + } } - return 0; + cctx->dictID = cctx->cdict != null ? cctx->cdict->dictID : 0; + cctx->dictContentSize = + cctx->cdict != null ? cctx->cdict->dictContentSize : prefixDict.dictSize; + cctx->consumedSrcSize = 0; + cctx->producedCSize = 0; + cctx->streamStage = ZSTD_cStreamStage.zcss_load; + cctx->appliedParams = @params; } - - /* Returns an offset code, given a sequence's raw offset, the ongoing repcode array, and whether litLength == 0 */ - private static uint ZSTD_finalizeOffBase(uint rawOffset, uint* rep, uint ll0) + else { - assert(rawOffset > 0); - uint offBase = rawOffset + 3; - if (ll0 == 0 && rawOffset == rep[0]) - { - assert(1 >= 1); - assert(1 <= 3); - offBase = 1; - } - else if (rawOffset == rep[1]) + ulong pledgedSrcSize = cctx->pledgedSrcSizePlusOne - 1; + assert(!ERR_isError(ZSTD_checkCParams(@params.cParams))); { - assert(2 - ll0 >= 1); - assert(2 - ll0 <= 3); - offBase = 2 - ll0; + nuint err_code = ZSTD_compressBegin_internal( + cctx, + prefixDict.dict, + prefixDict.dictSize, + prefixDict.dictContentType, + ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, + cctx->cdict, + &@params, + pledgedSrcSize, + ZSTD_buffered_policy_e.ZSTDb_buffered + ); + if (ERR_isError(err_code)) + { + return err_code; + } } - else if (rawOffset == rep[2]) + + assert(cctx->appliedParams.nbWorkers == 0); + cctx->inToCompress = 0; + cctx->inBuffPos = 0; + if (cctx->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered) { - assert(3 - ll0 >= 1); - assert(3 - ll0 <= 3); - offBase = 3 - ll0; + cctx->inBuffTarget = + cctx->blockSizeMax + (nuint)(cctx->blockSizeMax == pledgedSrcSize ? 1 : 0); } - else if (ll0 != 0 && rawOffset == rep[0] - 1) + else { - assert(3 >= 1); - assert(3 <= 3); - offBase = 3; + cctx->inBuffTarget = 0; } - return offBase; + cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0; + cctx->streamStage = ZSTD_cStreamStage.zcss_load; + cctx->frameEnded = 0; } - /* This function scans through an array of ZSTD_Sequence, - * storing the sequences it reads, until it reaches a block delimiter. - * Note that the block delimiter includes the last literals of the block. - * @blockSize must be == sum(sequence_lengths). - * @returns @blockSize on success, and a ZSTD_error otherwise. - */ - private static nuint ZSTD_transferSequences_wBlockDelim( - ZSTD_CCtx_s* cctx, - ZSTD_SequencePosition* seqPos, - ZSTD_Sequence* inSeqs, - nuint inSeqsSize, - void* src, - nuint blockSize, - ZSTD_paramSwitch_e externalRepSearch - ) + return 0; + } + + /* @return provides a minimum amount of data remaining to be flushed from internal buffers + */ + public static nuint ZSTD_compressStream2( + ZSTD_CCtx_s* cctx, + ZSTD_outBuffer_s* output, + ZSTD_inBuffer_s* input, + ZSTD_EndDirective endOp + ) + { + if (output->pos > output->size) { - uint idx = seqPos->idx; - uint startIdx = idx; - byte* ip = (byte*)src; - byte* iend = ip + blockSize; - repcodes_s updatedRepcodes; - uint dictSize; - if (cctx->cdict != null) - { - dictSize = (uint)cctx->cdict->dictContentSize; - } - else if (cctx->prefixDict.dict != null) - { - dictSize = (uint)cctx->prefixDict.dictSize; - } - else - { - dictSize = 0; - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + if (input->pos > input->size) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + if ((uint)endOp > (uint)ZSTD_EndDirective.ZSTD_e_end) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + } - memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, (uint)sizeof(repcodes_s)); - for ( - ; - idx < inSeqsSize && (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0); - ++idx + assert(cctx != null); + if (cctx->streamStage == ZSTD_cStreamStage.zcss_init) + { + /* no obligation to start from pos==0 */ + nuint inputSize = input->size - input->pos; + nuint totalInputSize = inputSize + cctx->stableIn_notConsumed; + if ( + cctx->requestedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable + && endOp == ZSTD_EndDirective.ZSTD_e_continue + && totalInputSize < 1 << 17 ) { - uint litLength = inSeqs[idx].litLength; - uint matchLength = inSeqs[idx].matchLength; - uint offBase; - if (externalRepSearch == ZSTD_paramSwitch_e.ZSTD_ps_disable) - { - assert(inSeqs[idx].offset > 0); - offBase = inSeqs[idx].offset + 3; - } - else - { - uint ll0 = litLength == 0 ? 1U : 0U; - offBase = ZSTD_finalizeOffBase(inSeqs[idx].offset, updatedRepcodes.rep, ll0); - ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0); - } - - if (cctx->appliedParams.validateSequences != 0) + if (cctx->stableIn_notConsumed != 0) { - seqPos->posInSrc += litLength + matchLength; + if (input->src != cctx->expectedInBuffer.src) { - nuint err_code = ZSTD_validateSequence( - offBase, - matchLength, - cctx->appliedParams.cParams.minMatch, - seqPos->posInSrc, - cctx->appliedParams.cParams.windowLog, - dictSize, - ZSTD_hasExtSeqProd(&cctx->appliedParams) + return unchecked( + (nuint)( + -(int)ZSTD_ErrorCode.ZSTD_error_stabilityCondition_notRespected + ) ); - if (ERR_isError(err_code)) - { - return err_code; - } } - } - if (idx - seqPos->idx >= cctx->seqStore.maxNbSeq) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) - ); + if (input->pos != cctx->expectedInBuffer.size) + { + return unchecked( + (nuint)( + -(int)ZSTD_ErrorCode.ZSTD_error_stabilityCondition_notRespected + ) + ); + } } - ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offBase, matchLength); - ip += matchLength + litLength; - } - - if (idx == inSeqsSize) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) + input->pos = input->size; + cctx->expectedInBuffer = *input; + cctx->stableIn_notConsumed += inputSize; + return (nuint)( + cctx->requestedParams.format == ZSTD_format_e.ZSTD_f_zstd1 ? 6 : 2 ); } - assert(externalRepSearch != ZSTD_paramSwitch_e.ZSTD_ps_auto); - assert(idx >= startIdx); - if (externalRepSearch == ZSTD_paramSwitch_e.ZSTD_ps_disable && idx != startIdx) { - uint* rep = updatedRepcodes.rep; - /* index of last non-block-delimiter sequence */ - uint lastSeqIdx = idx - 1; - if (lastSeqIdx >= startIdx + 2) - { - rep[2] = inSeqs[lastSeqIdx - 2].offset; - rep[1] = inSeqs[lastSeqIdx - 1].offset; - rep[0] = inSeqs[lastSeqIdx].offset; - } - else if (lastSeqIdx == startIdx + 1) - { - rep[2] = rep[0]; - rep[1] = inSeqs[lastSeqIdx - 1].offset; - rep[0] = inSeqs[lastSeqIdx].offset; - } - else + nuint err_code = ZSTD_CCtx_init_compressStream2(cctx, endOp, totalInputSize); + if (ERR_isError(err_code)) { - assert(lastSeqIdx == startIdx); - rep[2] = rep[1]; - rep[1] = rep[0]; - rep[0] = inSeqs[lastSeqIdx].offset; + return err_code; } } - memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, (uint)sizeof(repcodes_s)); - if (inSeqs[idx].litLength != 0) - { - ZSTD_storeLastLiterals(&cctx->seqStore, ip, inSeqs[idx].litLength); - ip += inSeqs[idx].litLength; - seqPos->posInSrc += inSeqs[idx].litLength; - } - - if (ip != iend) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) - ); - } - - seqPos->idx = idx + 1; - return blockSize; + ZSTD_setBufferExpectations(cctx, output, input); } - /* - * This function attempts to scan through @blockSize bytes in @src - * represented by the sequences in @inSeqs, - * storing any (partial) sequences. - * - * Occasionally, we may want to reduce the actual number of bytes consumed from @src - * to avoid splitting a match, notably if it would produce a match smaller than MINMATCH. - * - * @returns the number of bytes consumed from @src, necessarily <= @blockSize. - * Otherwise, it may return a ZSTD error if something went wrong. - */ - private static nuint ZSTD_transferSequences_noDelim( - ZSTD_CCtx_s* cctx, - ZSTD_SequencePosition* seqPos, - ZSTD_Sequence* inSeqs, - nuint inSeqsSize, - void* src, - nuint blockSize, - ZSTD_paramSwitch_e externalRepSearch - ) { - uint idx = seqPos->idx; - uint startPosInSequence = seqPos->posInSequence; - uint endPosInSequence = seqPos->posInSequence + (uint)blockSize; - nuint dictSize; - byte* istart = (byte*)src; - byte* ip = istart; - /* May be adjusted if we decide to process fewer than blockSize bytes */ - byte* iend = istart + blockSize; - repcodes_s updatedRepcodes; - uint bytesAdjustment = 0; - uint finalMatchSplit = 0; - if (cctx->cdict != null) - { - dictSize = cctx->cdict->dictContentSize; - } - else if (cctx->prefixDict.dict != null) - { - dictSize = cctx->prefixDict.dictSize; - } - else + /* end of transparent initialization stage */ + nuint err_code = ZSTD_checkBufferStability(cctx, output, input, endOp); + if (ERR_isError(err_code)) { - dictSize = 0; + return err_code; } + } - memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, (uint)sizeof(repcodes_s)); - while (endPosInSequence != 0 && idx < inSeqsSize && finalMatchSplit == 0) - { - ZSTD_Sequence currSeq = inSeqs[idx]; - uint litLength = currSeq.litLength; - uint matchLength = currSeq.matchLength; - uint rawOffset = currSeq.offset; - uint offBase; - if (endPosInSequence >= currSeq.litLength + currSeq.matchLength) - { - if (startPosInSequence >= litLength) - { - startPosInSequence -= litLength; - litLength = 0; - matchLength -= startPosInSequence; - } - else - { - litLength -= startPosInSequence; - } - - endPosInSequence -= currSeq.litLength + currSeq.matchLength; - startPosInSequence = 0; - } - else - { - if (endPosInSequence > litLength) - { - uint firstHalfMatchLength; - litLength = - startPosInSequence >= litLength ? 0 : litLength - startPosInSequence; - firstHalfMatchLength = endPosInSequence - startPosInSequence - litLength; - if ( - matchLength > blockSize - && firstHalfMatchLength >= cctx->appliedParams.cParams.minMatch - ) - { - /* Only ever split the match if it is larger than the block size */ - uint secondHalfMatchLength = - currSeq.matchLength + currSeq.litLength - endPosInSequence; - if (secondHalfMatchLength < cctx->appliedParams.cParams.minMatch) - { - endPosInSequence -= - cctx->appliedParams.cParams.minMatch - secondHalfMatchLength; - bytesAdjustment = - cctx->appliedParams.cParams.minMatch - secondHalfMatchLength; - firstHalfMatchLength -= bytesAdjustment; - } + if (cctx->appliedParams.nbWorkers > 0) + { + nuint flushMin; + if (cctx->cParamsChanged != 0) + { + ZSTDMT_updateCParams_whileCompressing(cctx->mtctx, &cctx->requestedParams); + cctx->cParamsChanged = 0; + } - matchLength = firstHalfMatchLength; - finalMatchSplit = 1; - } - else - { - bytesAdjustment = endPosInSequence - currSeq.litLength; - endPosInSequence = currSeq.litLength; - break; - } - } - else - { - break; - } - } + if (cctx->stableIn_notConsumed != 0) + { + assert(cctx->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable); + assert(input->pos >= cctx->stableIn_notConsumed); + input->pos -= cctx->stableIn_notConsumed; + cctx->stableIn_notConsumed = 0; + } + for (; ; ) + { + nuint ipos = input->pos; + nuint opos = output->pos; + flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp); + cctx->consumedSrcSize += input->pos - ipos; + cctx->producedCSize += output->pos - opos; + if ( + ERR_isError(flushMin) + || endOp == ZSTD_EndDirective.ZSTD_e_end && flushMin == 0 + ) { - uint ll0 = litLength == 0 ? 1U : 0U; - offBase = ZSTD_finalizeOffBase(rawOffset, updatedRepcodes.rep, ll0); - ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0); + if (flushMin == 0) + ZSTD_CCtx_trace(cctx, 0); + ZSTD_CCtx_reset(cctx, ZSTD_ResetDirective.ZSTD_reset_session_only); } - if (cctx->appliedParams.validateSequences != 0) { - seqPos->posInSrc += litLength + matchLength; + nuint err_code = flushMin; + if (ERR_isError(err_code)) { - nuint err_code = ZSTD_validateSequence( - offBase, - matchLength, - cctx->appliedParams.cParams.minMatch, - seqPos->posInSrc, - cctx->appliedParams.cParams.windowLog, - dictSize, - ZSTD_hasExtSeqProd(&cctx->appliedParams) - ); - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } } - if (idx - seqPos->idx >= cctx->seqStore.maxNbSeq) + if (endOp == ZSTD_EndDirective.ZSTD_e_continue) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) + if ( + input->pos != ipos + || output->pos != opos + || input->pos == input->size + || output->pos == output->size + ) + break; + } + else + { + assert( + endOp == ZSTD_EndDirective.ZSTD_e_flush + || endOp == ZSTD_EndDirective.ZSTD_e_end ); + if (flushMin == 0 || output->pos == output->size) + break; } - - ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offBase, matchLength); - ip += matchLength + litLength; - if (finalMatchSplit == 0) - idx++; } assert( - idx == inSeqsSize - || endPosInSequence <= inSeqs[idx].litLength + inSeqs[idx].matchLength + endOp == ZSTD_EndDirective.ZSTD_e_continue + || flushMin == 0 + || output->pos == output->size ); - seqPos->idx = idx; - seqPos->posInSequence = endPosInSequence; - memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, (uint)sizeof(repcodes_s)); - iend -= bytesAdjustment; - if (ip != iend) - { - /* Store any last literals */ - uint lastLLSize = (uint)(iend - ip); - assert(ip <= iend); - ZSTD_storeLastLiterals(&cctx->seqStore, ip, lastLLSize); - seqPos->posInSrc += lastLLSize; - } - - return (nuint)(iend - istart); + ZSTD_setBufferExpectations(cctx, output, input); + return flushMin; } - private static void* ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode) { - assert( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam11, (int)mode) != 0 - ); - if (mode == ZSTD_sequenceFormat_e.ZSTD_sf_explicitBlockDelimiters) + nuint err_code = ZSTD_compressStream_generic(cctx, output, input, endOp); + if (ERR_isError(err_code)) { - return (delegate* managed< - ZSTD_CCtx_s*, - ZSTD_SequencePosition*, - ZSTD_Sequence*, - nuint, - void*, - nuint, - ZSTD_paramSwitch_e, - nuint>)(&ZSTD_transferSequences_wBlockDelim); + return err_code; } + } - assert(mode == ZSTD_sequenceFormat_e.ZSTD_sf_noBlockDelimiters); - return (delegate* managed< - ZSTD_CCtx_s*, - ZSTD_SequencePosition*, - ZSTD_Sequence*, - nuint, - void*, - nuint, - ZSTD_paramSwitch_e, - nuint>)(&ZSTD_transferSequences_noDelim); + ZSTD_setBufferExpectations(cctx, output, input); + return cctx->outBuffContentSize - cctx->outBuffFlushedSize; + } + + /*! ZSTD_compressStream2_simpleArgs() : + * Same as ZSTD_compressStream2(), + * but using only integral types as arguments. + * This variant might be helpful for binders from dynamic languages + * which have troubles handling structures containing memory pointers. + */ + public static nuint ZSTD_compressStream2_simpleArgs( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + nuint* dstPos, + void* src, + nuint srcSize, + nuint* srcPos, + ZSTD_EndDirective endOp + ) + { + ZSTD_outBuffer_s output; + ZSTD_inBuffer_s input; + output.dst = dst; + output.size = dstCapacity; + output.pos = *dstPos; + input.src = src; + input.size = srcSize; + input.pos = *srcPos; + { + nuint cErr = ZSTD_compressStream2(cctx, &output, &input, endOp); + *dstPos = output.pos; + *srcPos = input.pos; + return cErr; } + } - /* Discover the size of next block by searching for the delimiter. - * Note that a block delimiter **must** exist in this mode, - * otherwise it's an input error. - * The block size retrieved will be later compared to ensure it remains within bounds */ - private static nuint blockSize_explicitDelimiter( - ZSTD_Sequence* inSeqs, - nuint inSeqsSize, - ZSTD_SequencePosition seqPos - ) - { - int end = 0; - nuint blockSize = 0; - nuint spos = seqPos.idx; - assert(spos <= inSeqsSize); - while (spos < inSeqsSize) + /*! ZSTD_compress2() : + * Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API. + * (note that this entry point doesn't even expose a compression level parameter). + * ZSTD_compress2() always starts a new frame. + * Should cctx hold data from a previously unfinished frame, everything about it is forgotten. + * - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*() + * - The function is always blocking, returns when compression is completed. + * NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have + * enough space to successfully compress the data, though it is possible it fails for other reasons. + * @return : compressed size written into `dst` (<= `dstCapacity), + * or an error code if it fails (which can be tested using ZSTD_isError()). + */ + public static nuint ZSTD_compress2( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize + ) + { + ZSTD_bufferMode_e originalInBufferMode = cctx->requestedParams.inBufferMode; + ZSTD_bufferMode_e originalOutBufferMode = cctx->requestedParams.outBufferMode; + ZSTD_CCtx_reset(cctx, ZSTD_ResetDirective.ZSTD_reset_session_only); + cctx->requestedParams.inBufferMode = ZSTD_bufferMode_e.ZSTD_bm_stable; + cctx->requestedParams.outBufferMode = ZSTD_bufferMode_e.ZSTD_bm_stable; + { + nuint oPos = 0; + nuint iPos = 0; + nuint result = ZSTD_compressStream2_simpleArgs( + cctx, + dst, + dstCapacity, + &oPos, + src, + srcSize, + &iPos, + ZSTD_EndDirective.ZSTD_e_end + ); + cctx->requestedParams.inBufferMode = originalInBufferMode; + cctx->requestedParams.outBufferMode = originalOutBufferMode; { - end = inSeqs[spos].offset == 0 ? 1 : 0; - blockSize += inSeqs[spos].litLength + inSeqs[spos].matchLength; - if (end != 0) + nuint err_code = result; + if (ERR_isError(err_code)) { - if (inSeqs[spos].matchLength != 0) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) - ); - break; + return err_code; } + } - spos++; + if (result != 0) + { + assert(oPos == dstCapacity); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); } - if (end == 0) + assert(iPos == srcSize); + return oPos; + } + } + + /* ZSTD_validateSequence() : + * @offBase : must use the format required by ZSTD_storeSeq() + * @returns a ZSTD error code if sequence is not valid + */ + private static nuint ZSTD_validateSequence( + uint offBase, + uint matchLength, + uint minMatch, + nuint posInSrc, + uint windowLog, + nuint dictSize, + int useSequenceProducer + ) + { + uint windowSize = 1U << (int)windowLog; + /* posInSrc represents the amount of data the decoder would decode up to this point. + * As long as the amount of data decoded is less than or equal to window size, offsets may be + * larger than the total length of output decoded in order to reference the dict, even larger than + * window size. After output surpasses windowSize, we're limited to windowSize offsets again. + */ + nuint offsetBound = posInSrc > windowSize ? windowSize : posInSrc + dictSize; + nuint matchLenLowerBound = (nuint)(minMatch == 3 || useSequenceProducer != 0 ? 3 : 4); + { + assert(offsetBound > 0); + if (offBase > offsetBound + 3) + { return unchecked( (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) ); - return blockSize; + } + } + + if (matchLength < matchLenLowerBound) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) + ); + } + + return 0; + } + + /* Returns an offset code, given a sequence's raw offset, the ongoing repcode array, and whether litLength == 0 */ + private static uint ZSTD_finalizeOffBase(uint rawOffset, uint* rep, uint ll0) + { + assert(rawOffset > 0); + uint offBase = rawOffset + 3; + if (ll0 == 0 && rawOffset == rep[0]) + { + assert(1 >= 1); + assert(1 <= 3); + offBase = 1; + } + else if (rawOffset == rep[1]) + { + assert(2 - ll0 >= 1); + assert(2 - ll0 <= 3); + offBase = 2 - ll0; + } + else if (rawOffset == rep[2]) + { + assert(3 - ll0 >= 1); + assert(3 - ll0 <= 3); + offBase = 3 - ll0; + } + else if (ll0 != 0 && rawOffset == rep[0] - 1) + { + assert(3 >= 1); + assert(3 <= 3); + offBase = 3; + } + + return offBase; + } + + /* This function scans through an array of ZSTD_Sequence, + * storing the sequences it reads, until it reaches a block delimiter. + * Note that the block delimiter includes the last literals of the block. + * @blockSize must be == sum(sequence_lengths). + * @returns @blockSize on success, and a ZSTD_error otherwise. + */ + private static nuint ZSTD_transferSequences_wBlockDelim( + ZSTD_CCtx_s* cctx, + ZSTD_SequencePosition* seqPos, + ZSTD_Sequence* inSeqs, + nuint inSeqsSize, + void* src, + nuint blockSize, + ZSTD_paramSwitch_e externalRepSearch + ) + { + uint idx = seqPos->idx; + uint startIdx = idx; + byte* ip = (byte*)src; + byte* iend = ip + blockSize; + repcodes_s updatedRepcodes; + uint dictSize; + if (cctx->cdict != null) + { + dictSize = (uint)cctx->cdict->dictContentSize; + } + else if (cctx->prefixDict.dict != null) + { + dictSize = (uint)cctx->prefixDict.dictSize; + } + else + { + dictSize = 0; } - private static nuint determine_blockSize( - ZSTD_sequenceFormat_e mode, - nuint blockSize, - nuint remaining, - ZSTD_Sequence* inSeqs, - nuint inSeqsSize, - ZSTD_SequencePosition seqPos + memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, (uint)sizeof(repcodes_s)); + for ( + ; + idx < inSeqsSize && (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0); + ++idx ) { - if (mode == ZSTD_sequenceFormat_e.ZSTD_sf_noBlockDelimiters) + uint litLength = inSeqs[idx].litLength; + uint matchLength = inSeqs[idx].matchLength; + uint offBase; + if (externalRepSearch == ZSTD_paramSwitch_e.ZSTD_ps_disable) { - return remaining < blockSize ? remaining : blockSize; + assert(inSeqs[idx].offset > 0); + offBase = inSeqs[idx].offset + 3; + } + else + { + uint ll0 = litLength == 0 ? 1U : 0U; + offBase = ZSTD_finalizeOffBase(inSeqs[idx].offset, updatedRepcodes.rep, ll0); + ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0); } - assert(mode == ZSTD_sequenceFormat_e.ZSTD_sf_explicitBlockDelimiters); + if (cctx->appliedParams.validateSequences != 0) { - nuint explicitBlockSize = blockSize_explicitDelimiter(inSeqs, inSeqsSize, seqPos); + seqPos->posInSrc += litLength + matchLength; { - nuint err_code = explicitBlockSize; + nuint err_code = ZSTD_validateSequence( + offBase, + matchLength, + cctx->appliedParams.cParams.minMatch, + seqPos->posInSrc, + cctx->appliedParams.cParams.windowLog, + dictSize, + ZSTD_hasExtSeqProd(&cctx->appliedParams) + ); if (ERR_isError(err_code)) { return err_code; } } + } - if (explicitBlockSize > blockSize) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) - ); - if (explicitBlockSize > remaining) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) - ); - return explicitBlockSize; + if (idx - seqPos->idx >= cctx->seqStore.maxNbSeq) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) + ); } + + ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offBase, matchLength); + ip += matchLength + litLength; } - /* Compress all provided sequences, block-by-block. - * - * Returns the cumulative size of all compressed blocks (including their headers), - * otherwise a ZSTD error. - */ - private static nuint ZSTD_compressSequences_internal( - ZSTD_CCtx_s* cctx, - void* dst, - nuint dstCapacity, - ZSTD_Sequence* inSeqs, - nuint inSeqsSize, - void* src, - nuint srcSize - ) + if (idx == inSeqsSize) { - nuint cSize = 0; - nuint remaining = srcSize; - ZSTD_SequencePosition seqPos = new ZSTD_SequencePosition - { - idx = 0, - posInSequence = 0, - posInSrc = 0, - }; - byte* ip = (byte*)src; - byte* op = (byte*)dst; - void* sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters); - if (remaining == 0) - { - /* last block */ - uint cBlockHeader24 = 1 + ((uint)blockType_e.bt_raw << 1); - if (dstCapacity < 4) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - } + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) + ); + } - MEM_writeLE32(op, cBlockHeader24); - op += ZSTD_blockHeaderSize; - dstCapacity -= ZSTD_blockHeaderSize; - cSize += ZSTD_blockHeaderSize; + assert(externalRepSearch != ZSTD_paramSwitch_e.ZSTD_ps_auto); + assert(idx >= startIdx); + if (externalRepSearch == ZSTD_paramSwitch_e.ZSTD_ps_disable && idx != startIdx) + { + uint* rep = updatedRepcodes.rep; + /* index of last non-block-delimiter sequence */ + uint lastSeqIdx = idx - 1; + if (lastSeqIdx >= startIdx + 2) + { + rep[2] = inSeqs[lastSeqIdx - 2].offset; + rep[1] = inSeqs[lastSeqIdx - 1].offset; + rep[0] = inSeqs[lastSeqIdx].offset; } - - while (remaining != 0) + else if (lastSeqIdx == startIdx + 1) { - nuint compressedSeqsSize; - nuint cBlockSize; - nuint blockSize = determine_blockSize( - cctx->appliedParams.blockDelimiters, - cctx->blockSizeMax, - remaining, - inSeqs, - inSeqsSize, - seqPos - ); - uint lastBlock = blockSize == remaining ? 1U : 0U; - { - nuint err_code = blockSize; - if (ERR_isError(err_code)) - { - return err_code; - } - } + rep[2] = rep[0]; + rep[1] = inSeqs[lastSeqIdx - 1].offset; + rep[0] = inSeqs[lastSeqIdx].offset; + } + else + { + assert(lastSeqIdx == startIdx); + rep[2] = rep[1]; + rep[1] = rep[0]; + rep[0] = inSeqs[lastSeqIdx].offset; + } + } - assert(blockSize <= remaining); - ZSTD_resetSeqStore(&cctx->seqStore); - blockSize = ( - (delegate* managed< - ZSTD_CCtx_s*, - ZSTD_SequencePosition*, - ZSTD_Sequence*, - nuint, - void*, - nuint, - ZSTD_paramSwitch_e, - nuint>)sequenceCopier - )( - cctx, - &seqPos, - inSeqs, - inSeqsSize, - ip, - blockSize, - cctx->appliedParams.searchForExternalRepcodes - ); - { - nuint err_code = blockSize; - if (ERR_isError(err_code)) - { - return err_code; - } - } + memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, (uint)sizeof(repcodes_s)); + if (inSeqs[idx].litLength != 0) + { + ZSTD_storeLastLiterals(&cctx->seqStore, ip, inSeqs[idx].litLength); + ip += inSeqs[idx].litLength; + seqPos->posInSrc += inSeqs[idx].litLength; + } - if (blockSize < (nuint)(1 + 1) + ZSTD_blockHeaderSize + 1 + 1) - { - cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); - { - nuint err_code = cBlockSize; - if (ERR_isError(err_code)) - { - return err_code; - } - } + if (ip != iend) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) + ); + } - cSize += cBlockSize; - ip += blockSize; - op += cBlockSize; - remaining -= blockSize; - dstCapacity -= cBlockSize; - continue; - } + seqPos->idx = idx + 1; + return blockSize; + } - if (dstCapacity < ZSTD_blockHeaderSize) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - } + /* + * This function attempts to scan through @blockSize bytes in @src + * represented by the sequences in @inSeqs, + * storing any (partial) sequences. + * + * Occasionally, we may want to reduce the actual number of bytes consumed from @src + * to avoid splitting a match, notably if it would produce a match smaller than MINMATCH. + * + * @returns the number of bytes consumed from @src, necessarily <= @blockSize. + * Otherwise, it may return a ZSTD error if something went wrong. + */ + private static nuint ZSTD_transferSequences_noDelim( + ZSTD_CCtx_s* cctx, + ZSTD_SequencePosition* seqPos, + ZSTD_Sequence* inSeqs, + nuint inSeqsSize, + void* src, + nuint blockSize, + ZSTD_paramSwitch_e externalRepSearch + ) + { + uint idx = seqPos->idx; + uint startPosInSequence = seqPos->posInSequence; + uint endPosInSequence = seqPos->posInSequence + (uint)blockSize; + nuint dictSize; + byte* istart = (byte*)src; + byte* ip = istart; + /* May be adjusted if we decide to process fewer than blockSize bytes */ + byte* iend = istart + blockSize; + repcodes_s updatedRepcodes; + uint bytesAdjustment = 0; + uint finalMatchSplit = 0; + if (cctx->cdict != null) + { + dictSize = cctx->cdict->dictContentSize; + } + else if (cctx->prefixDict.dict != null) + { + dictSize = cctx->prefixDict.dictSize; + } + else + { + dictSize = 0; + } - compressedSeqsSize = ZSTD_entropyCompressSeqStore( - &cctx->seqStore, - &cctx->blockState.prevCBlock->entropy, - &cctx->blockState.nextCBlock->entropy, - &cctx->appliedParams, - op + ZSTD_blockHeaderSize, - dstCapacity - ZSTD_blockHeaderSize, - blockSize, - cctx->tmpWorkspace, - cctx->tmpWkspSize, - cctx->bmi2 - ); + memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, (uint)sizeof(repcodes_s)); + while (endPosInSequence != 0 && idx < inSeqsSize && finalMatchSplit == 0) + { + ZSTD_Sequence currSeq = inSeqs[idx]; + uint litLength = currSeq.litLength; + uint matchLength = currSeq.matchLength; + uint rawOffset = currSeq.offset; + uint offBase; + if (endPosInSequence >= currSeq.litLength + currSeq.matchLength) + { + if (startPosInSequence >= litLength) { - nuint err_code = compressedSeqsSize; - if (ERR_isError(err_code)) - { - return err_code; - } + startPosInSequence -= litLength; + litLength = 0; + matchLength -= startPosInSequence; } - - if ( - cctx->isFirstBlock == 0 - && ZSTD_maybeRLE(&cctx->seqStore) != 0 - && ZSTD_isRLE(ip, blockSize) != 0 - ) + else { - compressedSeqsSize = 1; + litLength -= startPosInSequence; } - if (compressedSeqsSize == 0) - { - cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); - { - nuint err_code = cBlockSize; - if (ERR_isError(err_code)) - { - return err_code; - } - } - } - else if (compressedSeqsSize == 1) + endPosInSequence -= currSeq.litLength + currSeq.matchLength; + startPosInSequence = 0; + } + else + { + if (endPosInSequence > litLength) { - cBlockSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, blockSize, lastBlock); + uint firstHalfMatchLength; + litLength = + startPosInSequence >= litLength ? 0 : litLength - startPosInSequence; + firstHalfMatchLength = endPosInSequence - startPosInSequence - litLength; + if ( + matchLength > blockSize + && firstHalfMatchLength >= cctx->appliedParams.cParams.minMatch + ) { - nuint err_code = cBlockSize; - if (ERR_isError(err_code)) + /* Only ever split the match if it is larger than the block size */ + uint secondHalfMatchLength = + currSeq.matchLength + currSeq.litLength - endPosInSequence; + if (secondHalfMatchLength < cctx->appliedParams.cParams.minMatch) { - return err_code; + endPosInSequence -= + cctx->appliedParams.cParams.minMatch - secondHalfMatchLength; + bytesAdjustment = + cctx->appliedParams.cParams.minMatch - secondHalfMatchLength; + firstHalfMatchLength -= bytesAdjustment; } - } - } - else - { - uint cBlockHeader; - ZSTD_blockState_confirmRepcodesAndEntropyTables(&cctx->blockState); - if ( - cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode - == FSE_repeat.FSE_repeat_valid - ) - cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = - FSE_repeat.FSE_repeat_check; - cBlockHeader = - lastBlock - + ((uint)blockType_e.bt_compressed << 1) - + (uint)(compressedSeqsSize << 3); - MEM_writeLE24(op, cBlockHeader); - cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize; - } - - cSize += cBlockSize; - if (lastBlock != 0) - { - break; - } - else - { - ip += blockSize; - op += cBlockSize; - remaining -= blockSize; - dstCapacity -= cBlockSize; - cctx->isFirstBlock = 0; - } - } - - return cSize; - } - /*! ZSTD_compressSequences() : - * Compress an array of ZSTD_Sequence, associated with @src buffer, into dst. - * @src contains the entire input (not just the literals). - * If @srcSize > sum(sequence.length), the remaining bytes are considered all literals - * If a dictionary is included, then the cctx should reference the dict (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.). - * The entire source is compressed into a single frame. - * - * The compression behavior changes based on cctx params. In particular: - * If ZSTD_c_blockDelimiters == ZSTD_sf_noBlockDelimiters, the array of ZSTD_Sequence is expected to contain - * no block delimiters (defined in ZSTD_Sequence). Block boundaries are roughly determined based on - * the block size derived from the cctx, and sequences may be split. This is the default setting. - * - * If ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, the array of ZSTD_Sequence is expected to contain - * valid block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided. - * - * When ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, it's possible to decide generating repcodes - * using the advanced parameter ZSTD_c_repcodeResolution. Repcodes will improve compression ratio, though the benefit - * can vary greatly depending on Sequences. On the other hand, repcode resolution is an expensive operation. - * By default, it's disabled at low (<10) compression levels, and enabled above the threshold (>=10). - * ZSTD_c_repcodeResolution makes it possible to directly manage this processing in either direction. - * - * If ZSTD_c_validateSequences == 0, this function blindly accepts the Sequences provided. Invalid Sequences cause undefined - * behavior. If ZSTD_c_validateSequences == 1, then the function will detect invalid Sequences (see doc/zstd_compression_format.md for - * specifics regarding offset/matchlength requirements) and then bail out and return an error. - * - * In addition to the two adjustable experimental params, there are other important cctx params. - * - ZSTD_c_minMatch MUST be set as less than or equal to the smallest match generated by the match finder. It has a minimum value of ZSTD_MINMATCH_MIN. - * - ZSTD_c_compressionLevel accordingly adjusts the strength of the entropy coder, as it would in typical compression. - * - ZSTD_c_windowLog affects offset validation: this function will return an error at higher debug levels if a provided offset - * is larger than what the spec allows for a given window log and dictionary (if present). See: doc/zstd_compression_format.md - * - * Note: Repcodes are, as of now, always re-calculated within this function, ZSTD_Sequence.rep is effectively unused. - * Dev Note: Once ability to ingest repcodes become available, the explicit block delims mode must respect those repcodes exactly, - * and cannot emit an RLE block that disagrees with the repcode history. - * @return : final compressed size, or a ZSTD error code. - */ - public static nuint ZSTD_compressSequences( - ZSTD_CCtx_s* cctx, - void* dst, - nuint dstCapacity, - ZSTD_Sequence* inSeqs, - nuint inSeqsSize, - void* src, - nuint srcSize - ) - { - byte* op = (byte*)dst; - nuint cSize = 0; - assert(cctx != null); - { - nuint err_code = ZSTD_CCtx_init_compressStream2( - cctx, - ZSTD_EndDirective.ZSTD_e_end, - srcSize - ); - if (ERR_isError(err_code)) + matchLength = firstHalfMatchLength; + finalMatchSplit = 1; + } + else + { + bytesAdjustment = endPosInSequence - currSeq.litLength; + endPosInSequence = currSeq.litLength; + break; + } + } + else { - return err_code; + break; } } { - nuint frameHeaderSize = ZSTD_writeFrameHeader( - op, - dstCapacity, - &cctx->appliedParams, - srcSize, - cctx->dictID - ); - op += frameHeaderSize; - assert(frameHeaderSize <= dstCapacity); - dstCapacity -= frameHeaderSize; - cSize += frameHeaderSize; - } - - if (cctx->appliedParams.fParams.checksumFlag != 0 && srcSize != 0) - { - ZSTD_XXH64_update(&cctx->xxhState, src, srcSize); + uint ll0 = litLength == 0 ? 1U : 0U; + offBase = ZSTD_finalizeOffBase(rawOffset, updatedRepcodes.rep, ll0); + ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0); } + if (cctx->appliedParams.validateSequences != 0) { - nuint cBlocksSize = ZSTD_compressSequences_internal( - cctx, - op, - dstCapacity, - inSeqs, - inSeqsSize, - src, - srcSize - ); + seqPos->posInSrc += litLength + matchLength; { - nuint err_code = cBlocksSize; + nuint err_code = ZSTD_validateSequence( + offBase, + matchLength, + cctx->appliedParams.cParams.minMatch, + seqPos->posInSrc, + cctx->appliedParams.cParams.windowLog, + dictSize, + ZSTD_hasExtSeqProd(&cctx->appliedParams) + ); if (ERR_isError(err_code)) { return err_code; } } - - cSize += cBlocksSize; - assert(cBlocksSize <= dstCapacity); - dstCapacity -= cBlocksSize; } - if (cctx->appliedParams.fParams.checksumFlag != 0) + if (idx - seqPos->idx >= cctx->seqStore.maxNbSeq) { - uint checksum = (uint)ZSTD_XXH64_digest(&cctx->xxhState); - if (dstCapacity < 4) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - } - - MEM_writeLE32((sbyte*)dst + cSize, checksum); - cSize += 4; + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) + ); } - return cSize; + ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offBase, matchLength); + ip += matchLength + litLength; + if (finalMatchSplit == 0) + idx++; } - private static nuint convertSequences_noRepcodes( - SeqDef_s* dstSeqs, - ZSTD_Sequence* inSeqs, - nuint nbSequences - ) + assert( + idx == inSeqsSize + || endPosInSequence <= inSeqs[idx].litLength + inSeqs[idx].matchLength + ); + seqPos->idx = idx; + seqPos->posInSequence = endPosInSequence; + memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, (uint)sizeof(repcodes_s)); + iend -= bytesAdjustment; + if (ip != iend) { - nuint longLen = 0; - nuint n; - for (n = 0; n < nbSequences; n++) - { - assert(inSeqs[n].offset > 0); - dstSeqs[n].offBase = inSeqs[n].offset + 3; - dstSeqs[n].litLength = (ushort)inSeqs[n].litLength; - dstSeqs[n].mlBase = (ushort)(inSeqs[n].matchLength - 3); - if (inSeqs[n].matchLength > 65535 + 3) - { - assert(longLen == 0); - longLen = n + 1; - } - - if (inSeqs[n].litLength > 65535) - { - assert(longLen == 0); - longLen = n + nbSequences + 1; - } - } - - return longLen; + /* Store any last literals */ + uint lastLLSize = (uint)(iend - ip); + assert(ip <= iend); + ZSTD_storeLastLiterals(&cctx->seqStore, ip, lastLLSize); + seqPos->posInSrc += lastLLSize; } - /* - * Precondition: Sequences must end on an explicit Block Delimiter - * @return: 0 on success, or an error code. - * Note: Sequence validation functionality has been disabled (removed). - * This is helpful to generate a lean main pipeline, improving performance. - * It may be re-inserted later. - */ - private static nuint ZSTD_convertBlockSequences( - ZSTD_CCtx_s* cctx, - ZSTD_Sequence* inSeqs, - nuint nbSequences, - int repcodeResolution - ) + return (nuint)(iend - istart); + } + + private static void* ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode) + { + assert( + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam11, (int)mode) != 0 + ); + if (mode == ZSTD_sequenceFormat_e.ZSTD_sf_explicitBlockDelimiters) { - repcodes_s updatedRepcodes; - nuint seqNb = 0; - if (nbSequences >= cctx->seqStore.maxNbSeq) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) - ); - } + return (delegate* managed< + ZSTD_CCtx_s*, + ZSTD_SequencePosition*, + ZSTD_Sequence*, + nuint, + void*, + nuint, + ZSTD_paramSwitch_e, + nuint>)(&ZSTD_transferSequences_wBlockDelim); + } + + assert(mode == ZSTD_sequenceFormat_e.ZSTD_sf_noBlockDelimiters); + return (delegate* managed< + ZSTD_CCtx_s*, + ZSTD_SequencePosition*, + ZSTD_Sequence*, + nuint, + void*, + nuint, + ZSTD_paramSwitch_e, + nuint>)(&ZSTD_transferSequences_noDelim); + } - memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, (uint)sizeof(repcodes_s)); - assert(nbSequences >= 1); - assert(inSeqs[nbSequences - 1].matchLength == 0); - assert(inSeqs[nbSequences - 1].offset == 0); - if (repcodeResolution == 0) - { - nuint longl = convertSequences_noRepcodes( - cctx->seqStore.sequencesStart, - inSeqs, - nbSequences - 1 - ); - cctx->seqStore.sequences = cctx->seqStore.sequencesStart + nbSequences - 1; - if (longl != 0) - { - assert(cctx->seqStore.longLengthType == ZSTD_longLengthType_e.ZSTD_llt_none); - if (longl <= nbSequences - 1) - { - cctx->seqStore.longLengthType = ZSTD_longLengthType_e.ZSTD_llt_matchLength; - cctx->seqStore.longLengthPos = (uint)(longl - 1); - } - else - { - assert(longl <= 2 * (nbSequences - 1)); - cctx->seqStore.longLengthType = - ZSTD_longLengthType_e.ZSTD_llt_literalLength; - cctx->seqStore.longLengthPos = (uint)(longl - (nbSequences - 1) - 1); - } - } - } - else + /* Discover the size of next block by searching for the delimiter. + * Note that a block delimiter **must** exist in this mode, + * otherwise it's an input error. + * The block size retrieved will be later compared to ensure it remains within bounds */ + private static nuint blockSize_explicitDelimiter( + ZSTD_Sequence* inSeqs, + nuint inSeqsSize, + ZSTD_SequencePosition seqPos + ) + { + int end = 0; + nuint blockSize = 0; + nuint spos = seqPos.idx; + assert(spos <= inSeqsSize); + while (spos < inSeqsSize) + { + end = inSeqs[spos].offset == 0 ? 1 : 0; + blockSize += inSeqs[spos].litLength + inSeqs[spos].matchLength; + if (end != 0) { - for (seqNb = 0; seqNb < nbSequences - 1; seqNb++) - { - uint litLength = inSeqs[seqNb].litLength; - uint matchLength = inSeqs[seqNb].matchLength; - uint ll0 = litLength == 0 ? 1U : 0U; - uint offBase = ZSTD_finalizeOffBase( - inSeqs[seqNb].offset, - updatedRepcodes.rep, - ll0 + if (inSeqs[spos].matchLength != 0) + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) ); - ZSTD_storeSeqOnly(&cctx->seqStore, litLength, offBase, matchLength); - ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0); - } + break; } - if (repcodeResolution == 0 && nbSequences > 1) - { - uint* rep = updatedRepcodes.rep; - if (nbSequences >= 4) - { - /* index of last full sequence */ - uint lastSeqIdx = (uint)nbSequences - 2; - rep[2] = inSeqs[lastSeqIdx - 2].offset; - rep[1] = inSeqs[lastSeqIdx - 1].offset; - rep[0] = inSeqs[lastSeqIdx].offset; - } - else if (nbSequences == 3) - { - rep[2] = rep[0]; - rep[1] = inSeqs[0].offset; - rep[0] = inSeqs[1].offset; - } - else - { - assert(nbSequences == 2); - rep[2] = rep[1]; - rep[1] = rep[0]; - rep[0] = inSeqs[0].offset; - } - } + spos++; + } - memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, (uint)sizeof(repcodes_s)); - return 0; + if (end == 0) + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) + ); + return blockSize; + } + + private static nuint determine_blockSize( + ZSTD_sequenceFormat_e mode, + nuint blockSize, + nuint remaining, + ZSTD_Sequence* inSeqs, + nuint inSeqsSize, + ZSTD_SequencePosition seqPos + ) + { + if (mode == ZSTD_sequenceFormat_e.ZSTD_sf_noBlockDelimiters) + { + return remaining < blockSize ? remaining : blockSize; } - private static BlockSummary ZSTD_get1BlockSummary(ZSTD_Sequence* seqs, nuint nbSeqs) + assert(mode == ZSTD_sequenceFormat_e.ZSTD_sf_explicitBlockDelimiters); { - nuint totalMatchSize = 0; - nuint litSize = 0; - nuint n; - assert(seqs != null); - for (n = 0; n < nbSeqs; n++) + nuint explicitBlockSize = blockSize_explicitDelimiter(inSeqs, inSeqsSize, seqPos); { - totalMatchSize += seqs[n].matchLength; - litSize += seqs[n].litLength; - if (seqs[n].matchLength == 0) + nuint err_code = explicitBlockSize; + if (ERR_isError(err_code)) { - assert(seqs[n].offset == 0); - break; + return err_code; } } - if (n == nbSeqs) - { - BlockSummary bs; - System.Runtime.CompilerServices.Unsafe.SkipInit(out bs); - bs.nbSequences = unchecked( + if (explicitBlockSize > blockSize) + return unchecked( (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) ); - return bs; - } + if (explicitBlockSize > remaining) + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) + ); + return explicitBlockSize; + } + } + /* Compress all provided sequences, block-by-block. + * + * Returns the cumulative size of all compressed blocks (including their headers), + * otherwise a ZSTD error. + */ + private static nuint ZSTD_compressSequences_internal( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + ZSTD_Sequence* inSeqs, + nuint inSeqsSize, + void* src, + nuint srcSize + ) + { + nuint cSize = 0; + nuint remaining = srcSize; + ZSTD_SequencePosition seqPos = new ZSTD_SequencePosition + { + idx = 0, + posInSequence = 0, + posInSrc = 0, + }; + byte* ip = (byte*)src; + byte* op = (byte*)dst; + void* sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters); + if (remaining == 0) + { + /* last block */ + uint cBlockHeader24 = 1 + ((uint)blockType_e.bt_raw << 1); + if (dstCapacity < 4) { - BlockSummary bs; - bs.nbSequences = n + 1; - bs.blockSize = litSize + totalMatchSize; - bs.litSize = litSize; - return bs; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); } + + MEM_writeLE32(op, cBlockHeader24); + op += ZSTD_blockHeaderSize; + dstCapacity -= ZSTD_blockHeaderSize; + cSize += ZSTD_blockHeaderSize; } - private static nuint ZSTD_compressSequencesAndLiterals_internal( - ZSTD_CCtx_s* cctx, - void* dst, - nuint dstCapacity, - ZSTD_Sequence* inSeqs, - nuint nbSequences, - void* literals, - nuint litSize, - nuint srcSize - ) + while (remaining != 0) { - nuint remaining = srcSize; - nuint cSize = 0; - byte* op = (byte*)dst; - int repcodeResolution = - cctx->appliedParams.searchForExternalRepcodes == ZSTD_paramSwitch_e.ZSTD_ps_enable - ? 1 - : 0; - assert( - cctx->appliedParams.searchForExternalRepcodes != ZSTD_paramSwitch_e.ZSTD_ps_auto + nuint compressedSeqsSize; + nuint cBlockSize; + nuint blockSize = determine_blockSize( + cctx->appliedParams.blockDelimiters, + cctx->blockSizeMax, + remaining, + inSeqs, + inSeqsSize, + seqPos ); - if (nbSequences == 0) + uint lastBlock = blockSize == remaining ? 1U : 0U; { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) - ); + nuint err_code = blockSize; + if (ERR_isError(err_code)) + { + return err_code; + } } - if (nbSequences == 1 && inSeqs[0].litLength == 0) + assert(blockSize <= remaining); + ZSTD_resetSeqStore(&cctx->seqStore); + blockSize = ( + (delegate* managed< + ZSTD_CCtx_s*, + ZSTD_SequencePosition*, + ZSTD_Sequence*, + nuint, + void*, + nuint, + ZSTD_paramSwitch_e, + nuint>)sequenceCopier + )( + cctx, + &seqPos, + inSeqs, + inSeqsSize, + ip, + blockSize, + cctx->appliedParams.searchForExternalRepcodes + ); { - /* last block */ - uint cBlockHeader24 = 1 + ((uint)blockType_e.bt_raw << 1); - if (dstCapacity < 3) + nuint err_code = blockSize; + if (ERR_isError(err_code)) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + return err_code; } - - MEM_writeLE24(op, cBlockHeader24); - op += ZSTD_blockHeaderSize; - dstCapacity -= ZSTD_blockHeaderSize; - cSize += ZSTD_blockHeaderSize; } - while (nbSequences != 0) + if (blockSize < (nuint)(1 + 1) + ZSTD_blockHeaderSize + 1 + 1) { - nuint compressedSeqsSize, - cBlockSize, - conversionStatus; - BlockSummary block = ZSTD_get1BlockSummary(inSeqs, nbSequences); - uint lastBlock = block.nbSequences == nbSequences ? 1U : 0U; + cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); { - nuint err_code = block.nbSequences; + nuint err_code = cBlockSize; if (ERR_isError(err_code)) { return err_code; } } - assert(block.nbSequences <= nbSequences); - if (block.litSize > litSize) + cSize += cBlockSize; + ip += blockSize; + op += cBlockSize; + remaining -= blockSize; + dstCapacity -= cBlockSize; + continue; + } + + if (dstCapacity < ZSTD_blockHeaderSize) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + compressedSeqsSize = ZSTD_entropyCompressSeqStore( + &cctx->seqStore, + &cctx->blockState.prevCBlock->entropy, + &cctx->blockState.nextCBlock->entropy, + &cctx->appliedParams, + op + ZSTD_blockHeaderSize, + dstCapacity - ZSTD_blockHeaderSize, + blockSize, + cctx->tmpWorkspace, + cctx->tmpWkspSize, + cctx->bmi2 + ); + { + nuint err_code = compressedSeqsSize; + if (ERR_isError(err_code)) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) - ); + return err_code; } + } - ZSTD_resetSeqStore(&cctx->seqStore); - conversionStatus = ZSTD_convertBlockSequences( - cctx, - inSeqs, - block.nbSequences, - repcodeResolution - ); + if ( + cctx->isFirstBlock == 0 + && ZSTD_maybeRLE(&cctx->seqStore) != 0 + && ZSTD_isRLE(ip, blockSize) != 0 + ) + { + compressedSeqsSize = 1; + } + + if (compressedSeqsSize == 0) + { + cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); { - nuint err_code = conversionStatus; + nuint err_code = cBlockSize; if (ERR_isError(err_code)) { return err_code; } } - - inSeqs += block.nbSequences; - nbSequences -= block.nbSequences; - remaining -= block.blockSize; - if (dstCapacity < ZSTD_blockHeaderSize) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - } - - compressedSeqsSize = ZSTD_entropyCompressSeqStore_internal( - op + ZSTD_blockHeaderSize, - dstCapacity - ZSTD_blockHeaderSize, - literals, - block.litSize, - &cctx->seqStore, - &cctx->blockState.prevCBlock->entropy, - &cctx->blockState.nextCBlock->entropy, - &cctx->appliedParams, - cctx->tmpWorkspace, - cctx->tmpWkspSize, - cctx->bmi2 - ); + } + else if (compressedSeqsSize == 1) + { + cBlockSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, blockSize, lastBlock); { - nuint err_code = compressedSeqsSize; + nuint err_code = cBlockSize; if (ERR_isError(err_code)) { return err_code; } } + } + else + { + uint cBlockHeader; + ZSTD_blockState_confirmRepcodesAndEntropyTables(&cctx->blockState); + if ( + cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode + == FSE_repeat.FSE_repeat_valid + ) + cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = + FSE_repeat.FSE_repeat_check; + cBlockHeader = + lastBlock + + ((uint)blockType_e.bt_compressed << 1) + + (uint)(compressedSeqsSize << 3); + MEM_writeLE24(op, cBlockHeader); + cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize; + } + + cSize += cBlockSize; + if (lastBlock != 0) + { + break; + } + else + { + ip += blockSize; + op += cBlockSize; + remaining -= blockSize; + dstCapacity -= cBlockSize; + cctx->isFirstBlock = 0; + } + } + + return cSize; + } + + /*! ZSTD_compressSequences() : + * Compress an array of ZSTD_Sequence, associated with @src buffer, into dst. + * @src contains the entire input (not just the literals). + * If @srcSize > sum(sequence.length), the remaining bytes are considered all literals + * If a dictionary is included, then the cctx should reference the dict (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.). + * The entire source is compressed into a single frame. + * + * The compression behavior changes based on cctx params. In particular: + * If ZSTD_c_blockDelimiters == ZSTD_sf_noBlockDelimiters, the array of ZSTD_Sequence is expected to contain + * no block delimiters (defined in ZSTD_Sequence). Block boundaries are roughly determined based on + * the block size derived from the cctx, and sequences may be split. This is the default setting. + * + * If ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, the array of ZSTD_Sequence is expected to contain + * valid block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided. + * + * When ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, it's possible to decide generating repcodes + * using the advanced parameter ZSTD_c_repcodeResolution. Repcodes will improve compression ratio, though the benefit + * can vary greatly depending on Sequences. On the other hand, repcode resolution is an expensive operation. + * By default, it's disabled at low (<10) compression levels, and enabled above the threshold (>=10). + * ZSTD_c_repcodeResolution makes it possible to directly manage this processing in either direction. + * + * If ZSTD_c_validateSequences == 0, this function blindly accepts the Sequences provided. Invalid Sequences cause undefined + * behavior. If ZSTD_c_validateSequences == 1, then the function will detect invalid Sequences (see doc/zstd_compression_format.md for + * specifics regarding offset/matchlength requirements) and then bail out and return an error. + * + * In addition to the two adjustable experimental params, there are other important cctx params. + * - ZSTD_c_minMatch MUST be set as less than or equal to the smallest match generated by the match finder. It has a minimum value of ZSTD_MINMATCH_MIN. + * - ZSTD_c_compressionLevel accordingly adjusts the strength of the entropy coder, as it would in typical compression. + * - ZSTD_c_windowLog affects offset validation: this function will return an error at higher debug levels if a provided offset + * is larger than what the spec allows for a given window log and dictionary (if present). See: doc/zstd_compression_format.md + * + * Note: Repcodes are, as of now, always re-calculated within this function, ZSTD_Sequence.rep is effectively unused. + * Dev Note: Once ability to ingest repcodes become available, the explicit block delims mode must respect those repcodes exactly, + * and cannot emit an RLE block that disagrees with the repcode history. + * @return : final compressed size, or a ZSTD error code. + */ + public static nuint ZSTD_compressSequences( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + ZSTD_Sequence* inSeqs, + nuint inSeqsSize, + void* src, + nuint srcSize + ) + { + byte* op = (byte*)dst; + nuint cSize = 0; + assert(cctx != null); + { + nuint err_code = ZSTD_CCtx_init_compressStream2( + cctx, + ZSTD_EndDirective.ZSTD_e_end, + srcSize + ); + if (ERR_isError(err_code)) + { + return err_code; + } + } + + { + nuint frameHeaderSize = ZSTD_writeFrameHeader( + op, + dstCapacity, + &cctx->appliedParams, + srcSize, + cctx->dictID + ); + op += frameHeaderSize; + assert(frameHeaderSize <= dstCapacity); + dstCapacity -= frameHeaderSize; + cSize += frameHeaderSize; + } + + if (cctx->appliedParams.fParams.checksumFlag != 0 && srcSize != 0) + { + ZSTD_XXH64_update(&cctx->xxhState, src, srcSize); + } - if (compressedSeqsSize > cctx->blockSizeMax) - compressedSeqsSize = 0; - litSize -= block.litSize; - literals = (sbyte*)literals + block.litSize; - if (compressedSeqsSize == 0) + { + nuint cBlocksSize = ZSTD_compressSequences_internal( + cctx, + op, + dstCapacity, + inSeqs, + inSeqsSize, + src, + srcSize + ); + { + nuint err_code = cBlocksSize; + if (ERR_isError(err_code)) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_cannotProduce_uncompressedBlock) - ); + return err_code; } - else + } + + cSize += cBlocksSize; + assert(cBlocksSize <= dstCapacity); + dstCapacity -= cBlocksSize; + } + + if (cctx->appliedParams.fParams.checksumFlag != 0) + { + uint checksum = (uint)ZSTD_XXH64_digest(&cctx->xxhState); + if (dstCapacity < 4) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + MEM_writeLE32((sbyte*)dst + cSize, checksum); + cSize += 4; + } + + return cSize; + } + + private static nuint convertSequences_noRepcodes( + SeqDef_s* dstSeqs, + ZSTD_Sequence* inSeqs, + nuint nbSequences + ) + { + nuint longLen = 0; + nuint n; + for (n = 0; n < nbSequences; n++) + { + assert(inSeqs[n].offset > 0); + dstSeqs[n].offBase = inSeqs[n].offset + 3; + dstSeqs[n].litLength = (ushort)inSeqs[n].litLength; + dstSeqs[n].mlBase = (ushort)(inSeqs[n].matchLength - 3); + if (inSeqs[n].matchLength > 65535 + 3) + { + assert(longLen == 0); + longLen = n + 1; + } + + if (inSeqs[n].litLength > 65535) + { + assert(longLen == 0); + longLen = n + nbSequences + 1; + } + } + + return longLen; + } + + /* + * Precondition: Sequences must end on an explicit Block Delimiter + * @return: 0 on success, or an error code. + * Note: Sequence validation functionality has been disabled (removed). + * This is helpful to generate a lean main pipeline, improving performance. + * It may be re-inserted later. + */ + private static nuint ZSTD_convertBlockSequences( + ZSTD_CCtx_s* cctx, + ZSTD_Sequence* inSeqs, + nuint nbSequences, + int repcodeResolution + ) + { + repcodes_s updatedRepcodes; + nuint seqNb = 0; + if (nbSequences >= cctx->seqStore.maxNbSeq) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) + ); + } + + memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, (uint)sizeof(repcodes_s)); + assert(nbSequences >= 1); + assert(inSeqs[nbSequences - 1].matchLength == 0); + assert(inSeqs[nbSequences - 1].offset == 0); + if (repcodeResolution == 0) + { + nuint longl = convertSequences_noRepcodes( + cctx->seqStore.sequencesStart, + inSeqs, + nbSequences - 1 + ); + cctx->seqStore.sequences = cctx->seqStore.sequencesStart + nbSequences - 1; + if (longl != 0) + { + assert(cctx->seqStore.longLengthType == ZSTD_longLengthType_e.ZSTD_llt_none); + if (longl <= nbSequences - 1) { - uint cBlockHeader; - assert(compressedSeqsSize > 1); - ZSTD_blockState_confirmRepcodesAndEntropyTables(&cctx->blockState); - if ( - cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode - == FSE_repeat.FSE_repeat_valid - ) - cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = - FSE_repeat.FSE_repeat_check; - cBlockHeader = - lastBlock - + ((uint)blockType_e.bt_compressed << 1) - + (uint)(compressedSeqsSize << 3); - MEM_writeLE24(op, cBlockHeader); - cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize; + cctx->seqStore.longLengthType = ZSTD_longLengthType_e.ZSTD_llt_matchLength; + cctx->seqStore.longLengthPos = (uint)(longl - 1); } - - cSize += cBlockSize; - op += cBlockSize; - dstCapacity -= cBlockSize; - cctx->isFirstBlock = 0; - if (lastBlock != 0) + else { - assert(nbSequences == 0); - break; + assert(longl <= 2 * (nbSequences - 1)); + cctx->seqStore.longLengthType = + ZSTD_longLengthType_e.ZSTD_llt_literalLength; + cctx->seqStore.longLengthPos = (uint)(longl - (nbSequences - 1) - 1); } } - - if (litSize != 0) + } + else + { + for (seqNb = 0; seqNb < nbSequences - 1; seqNb++) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) + uint litLength = inSeqs[seqNb].litLength; + uint matchLength = inSeqs[seqNb].matchLength; + uint ll0 = litLength == 0 ? 1U : 0U; + uint offBase = ZSTD_finalizeOffBase( + inSeqs[seqNb].offset, + updatedRepcodes.rep, + ll0 ); + ZSTD_storeSeqOnly(&cctx->seqStore, litLength, offBase, matchLength); + ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0); } + } - if (remaining != 0) + if (repcodeResolution == 0 && nbSequences > 1) + { + uint* rep = updatedRepcodes.rep; + if (nbSequences >= 4) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) - ); + /* index of last full sequence */ + uint lastSeqIdx = (uint)nbSequences - 2; + rep[2] = inSeqs[lastSeqIdx - 2].offset; + rep[1] = inSeqs[lastSeqIdx - 1].offset; + rep[0] = inSeqs[lastSeqIdx].offset; + } + else if (nbSequences == 3) + { + rep[2] = rep[0]; + rep[1] = inSeqs[0].offset; + rep[0] = inSeqs[1].offset; + } + else + { + assert(nbSequences == 2); + rep[2] = rep[1]; + rep[1] = rep[0]; + rep[0] = inSeqs[0].offset; + } + } + + memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, (uint)sizeof(repcodes_s)); + return 0; + } + + private static BlockSummary ZSTD_get1BlockSummary(ZSTD_Sequence* seqs, nuint nbSeqs) + { + nuint totalMatchSize = 0; + nuint litSize = 0; + nuint n; + assert(seqs != null); + for (n = 0; n < nbSeqs; n++) + { + totalMatchSize += seqs[n].matchLength; + litSize += seqs[n].litLength; + if (seqs[n].matchLength == 0) + { + assert(seqs[n].offset == 0); + break; } + } - return cSize; - } - - /*! ZSTD_compressSequencesAndLiterals() : - * This is a variant of ZSTD_compressSequences() which, - * instead of receiving (src,srcSize) as input parameter, receives (literals,litSize), - * aka all the literals, already extracted and laid out into a single continuous buffer. - * This can be useful if the process generating the sequences also happens to generate the buffer of literals, - * thus skipping an extraction + caching stage. - * It's a speed optimization, useful when the right conditions are met, - * but it also features the following limitations: - * - Only supports explicit delimiter mode - * - Currently does not support Sequences validation (so input Sequences are trusted) - * - Not compatible with frame checksum, which must be disabled - * - If any block is incompressible, will fail and return an error - * - @litSize must be == sum of all @.litLength fields in @inSeqs. Any discrepancy will generate an error. - * - @litBufCapacity is the size of the underlying buffer into which literals are written, starting at address @literals. - * @litBufCapacity must be at least 8 bytes larger than @litSize. - * - @decompressedSize must be correct, and correspond to the sum of all Sequences. Any discrepancy will generate an error. - * @return : final compressed size, or a ZSTD error code. - */ - public static nuint ZSTD_compressSequencesAndLiterals( - ZSTD_CCtx_s* cctx, - void* dst, - nuint dstCapacity, - ZSTD_Sequence* inSeqs, - nuint inSeqsSize, - void* literals, - nuint litSize, - nuint litCapacity, - nuint decompressedSize - ) + if (n == nbSeqs) + { + BlockSummary bs; + System.Runtime.CompilerServices.Unsafe.SkipInit(out bs); + bs.nbSequences = unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) + ); + return bs; + } + + { + BlockSummary bs; + bs.nbSequences = n + 1; + bs.blockSize = litSize + totalMatchSize; + bs.litSize = litSize; + return bs; + } + } + + private static nuint ZSTD_compressSequencesAndLiterals_internal( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + ZSTD_Sequence* inSeqs, + nuint nbSequences, + void* literals, + nuint litSize, + nuint srcSize + ) + { + nuint remaining = srcSize; + nuint cSize = 0; + byte* op = (byte*)dst; + int repcodeResolution = + cctx->appliedParams.searchForExternalRepcodes == ZSTD_paramSwitch_e.ZSTD_ps_enable + ? 1 + : 0; + assert( + cctx->appliedParams.searchForExternalRepcodes != ZSTD_paramSwitch_e.ZSTD_ps_auto + ); + if (nbSequences == 0) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) + ); + } + + if (nbSequences == 1 && inSeqs[0].litLength == 0) { - byte* op = (byte*)dst; - nuint cSize = 0; - assert(cctx != null); - if (litCapacity < litSize) + /* last block */ + uint cBlockHeader24 = 1 + ((uint)blockType_e.bt_raw << 1); + if (dstCapacity < 3) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_workSpace_tooSmall)); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); } + MEM_writeLE24(op, cBlockHeader24); + op += ZSTD_blockHeaderSize; + dstCapacity -= ZSTD_blockHeaderSize; + cSize += ZSTD_blockHeaderSize; + } + + while (nbSequences != 0) + { + nuint compressedSeqsSize, + cBlockSize, + conversionStatus; + BlockSummary block = ZSTD_get1BlockSummary(inSeqs, nbSequences); + uint lastBlock = block.nbSequences == nbSequences ? 1U : 0U; { - nuint err_code = ZSTD_CCtx_init_compressStream2( - cctx, - ZSTD_EndDirective.ZSTD_e_end, - decompressedSize - ); + nuint err_code = block.nbSequences; if (ERR_isError(err_code)) { return err_code; } } - if ( - cctx->appliedParams.blockDelimiters - == ZSTD_sequenceFormat_e.ZSTD_sf_noBlockDelimiters - ) + assert(block.nbSequences <= nbSequences); + if (block.litSize > litSize) { return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported) + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) ); } - if (cctx->appliedParams.validateSequences != 0) + ZSTD_resetSeqStore(&cctx->seqStore); + conversionStatus = ZSTD_convertBlockSequences( + cctx, + inSeqs, + block.nbSequences, + repcodeResolution + ); { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported)); + nuint err_code = conversionStatus; + if (ERR_isError(err_code)) + { + return err_code; + } } - if (cctx->appliedParams.fParams.checksumFlag != 0) + inSeqs += block.nbSequences; + nbSequences -= block.nbSequences; + remaining -= block.blockSize; + if (dstCapacity < ZSTD_blockHeaderSize) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); } + compressedSeqsSize = ZSTD_entropyCompressSeqStore_internal( + op + ZSTD_blockHeaderSize, + dstCapacity - ZSTD_blockHeaderSize, + literals, + block.litSize, + &cctx->seqStore, + &cctx->blockState.prevCBlock->entropy, + &cctx->blockState.nextCBlock->entropy, + &cctx->appliedParams, + cctx->tmpWorkspace, + cctx->tmpWkspSize, + cctx->bmi2 + ); { - nuint frameHeaderSize = ZSTD_writeFrameHeader( - op, - dstCapacity, - &cctx->appliedParams, - decompressedSize, - cctx->dictID - ); - op += frameHeaderSize; - assert(frameHeaderSize <= dstCapacity); - dstCapacity -= frameHeaderSize; - cSize += frameHeaderSize; + nuint err_code = compressedSeqsSize; + if (ERR_isError(err_code)) + { + return err_code; + } } + if (compressedSeqsSize > cctx->blockSizeMax) + compressedSeqsSize = 0; + litSize -= block.litSize; + literals = (sbyte*)literals + block.litSize; + if (compressedSeqsSize == 0) { - nuint cBlocksSize = ZSTD_compressSequencesAndLiterals_internal( - cctx, - op, - dstCapacity, - inSeqs, - inSeqsSize, - literals, - litSize, - decompressedSize + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_cannotProduce_uncompressedBlock) ); - { - nuint err_code = cBlocksSize; - if (ERR_isError(err_code)) - { - return err_code; - } - } + } + else + { + uint cBlockHeader; + assert(compressedSeqsSize > 1); + ZSTD_blockState_confirmRepcodesAndEntropyTables(&cctx->blockState); + if ( + cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode + == FSE_repeat.FSE_repeat_valid + ) + cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = + FSE_repeat.FSE_repeat_check; + cBlockHeader = + lastBlock + + ((uint)blockType_e.bt_compressed << 1) + + (uint)(compressedSeqsSize << 3); + MEM_writeLE24(op, cBlockHeader); + cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize; + } - cSize += cBlocksSize; - assert(cBlocksSize <= dstCapacity); - dstCapacity -= cBlocksSize; + cSize += cBlockSize; + op += cBlockSize; + dstCapacity -= cBlockSize; + cctx->isFirstBlock = 0; + if (lastBlock != 0) + { + assert(nbSequences == 0); + break; } + } - return cSize; + if (litSize != 0) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) + ); } - /*====== Finalize ======*/ - private static ZSTD_inBuffer_s inBuffer_forEndFlush(ZSTD_CCtx_s* zcs) + if (remaining != 0) { - ZSTD_inBuffer_s nullInput = new ZSTD_inBuffer_s - { - src = null, - size = 0, - pos = 0, - }; - int stableInput = - zcs->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable ? 1 : 0; - return stableInput != 0 ? zcs->expectedInBuffer : nullInput; + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) + ); } - /*! ZSTD_flushStream() : - * @return : amount of data remaining to flush */ - public static nuint ZSTD_flushStream(ZSTD_CCtx_s* zcs, ZSTD_outBuffer_s* output) + return cSize; + } + + /*! ZSTD_compressSequencesAndLiterals() : + * This is a variant of ZSTD_compressSequences() which, + * instead of receiving (src,srcSize) as input parameter, receives (literals,litSize), + * aka all the literals, already extracted and laid out into a single continuous buffer. + * This can be useful if the process generating the sequences also happens to generate the buffer of literals, + * thus skipping an extraction + caching stage. + * It's a speed optimization, useful when the right conditions are met, + * but it also features the following limitations: + * - Only supports explicit delimiter mode + * - Currently does not support Sequences validation (so input Sequences are trusted) + * - Not compatible with frame checksum, which must be disabled + * - If any block is incompressible, will fail and return an error + * - @litSize must be == sum of all @.litLength fields in @inSeqs. Any discrepancy will generate an error. + * - @litBufCapacity is the size of the underlying buffer into which literals are written, starting at address @literals. + * @litBufCapacity must be at least 8 bytes larger than @litSize. + * - @decompressedSize must be correct, and correspond to the sum of all Sequences. Any discrepancy will generate an error. + * @return : final compressed size, or a ZSTD error code. + */ + public static nuint ZSTD_compressSequencesAndLiterals( + ZSTD_CCtx_s* cctx, + void* dst, + nuint dstCapacity, + ZSTD_Sequence* inSeqs, + nuint inSeqsSize, + void* literals, + nuint litSize, + nuint litCapacity, + nuint decompressedSize + ) + { + byte* op = (byte*)dst; + nuint cSize = 0; + assert(cctx != null); + if (litCapacity < litSize) { - ZSTD_inBuffer_s input = inBuffer_forEndFlush(zcs); - input.size = input.pos; - return ZSTD_compressStream2(zcs, output, &input, ZSTD_EndDirective.ZSTD_e_flush); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_workSpace_tooSmall)); } - /*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_end). */ - public static nuint ZSTD_endStream(ZSTD_CCtx_s* zcs, ZSTD_outBuffer_s* output) { - ZSTD_inBuffer_s input = inBuffer_forEndFlush(zcs); - nuint remainingToFlush = ZSTD_compressStream2( - zcs, - output, - &input, - ZSTD_EndDirective.ZSTD_e_end + nuint err_code = ZSTD_CCtx_init_compressStream2( + cctx, + ZSTD_EndDirective.ZSTD_e_end, + decompressedSize ); + if (ERR_isError(err_code)) { - nuint err_code = remainingToFlush; - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } + } - if (zcs->appliedParams.nbWorkers > 0) - return remainingToFlush; - { - nuint lastBlockSize = (nuint)(zcs->frameEnded != 0 ? 0 : 3); - nuint checksumSize = (nuint)( - zcs->frameEnded != 0 ? 0 : zcs->appliedParams.fParams.checksumFlag * 4 - ); - nuint toFlush = remainingToFlush + lastBlockSize + checksumSize; - return toFlush; - } + if ( + cctx->appliedParams.blockDelimiters + == ZSTD_sequenceFormat_e.ZSTD_sf_noBlockDelimiters + ) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported) + ); } - public static int ZSTD_maxCLevel() + if (cctx->appliedParams.validateSequences != 0) { - return 22; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported)); } - public static int ZSTD_minCLevel() + if (cctx->appliedParams.fParams.checksumFlag != 0) { - return -(1 << 17); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported) + ); } - public static int ZSTD_defaultCLevel() { - return 3; + nuint frameHeaderSize = ZSTD_writeFrameHeader( + op, + dstCapacity, + &cctx->appliedParams, + decompressedSize, + cctx->dictID + ); + op += frameHeaderSize; + assert(frameHeaderSize <= dstCapacity); + dstCapacity -= frameHeaderSize; + cSize += frameHeaderSize; } - private static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams( - int compressionLevel, - nuint dictSize - ) { - ZSTD_compressionParameters cParams = ZSTD_getCParams_internal( - compressionLevel, - 0, - dictSize, - ZSTD_CParamMode_e.ZSTD_cpm_createCDict + nuint cBlocksSize = ZSTD_compressSequencesAndLiterals_internal( + cctx, + op, + dstCapacity, + inSeqs, + inSeqsSize, + literals, + litSize, + decompressedSize ); - switch (cParams.strategy) { - case ZSTD_strategy.ZSTD_fast: - case ZSTD_strategy.ZSTD_dfast: - break; - case ZSTD_strategy.ZSTD_greedy: - case ZSTD_strategy.ZSTD_lazy: - case ZSTD_strategy.ZSTD_lazy2: - cParams.hashLog += 2; - break; - case ZSTD_strategy.ZSTD_btlazy2: - case ZSTD_strategy.ZSTD_btopt: - case ZSTD_strategy.ZSTD_btultra: - case ZSTD_strategy.ZSTD_btultra2: - break; + nuint err_code = cBlocksSize; + if (ERR_isError(err_code)) + { + return err_code; + } } - return cParams; + cSize += cBlocksSize; + assert(cBlocksSize <= dstCapacity); + dstCapacity -= cBlocksSize; } - private static int ZSTD_dedicatedDictSearch_isSupported(ZSTD_compressionParameters* cParams) - { - return - cParams->strategy >= ZSTD_strategy.ZSTD_greedy - && cParams->strategy <= ZSTD_strategy.ZSTD_lazy2 - && cParams->hashLog > cParams->chainLog - && cParams->chainLog <= 24 - ? 1 - : 0; - } + return cSize; + } - /** - * Reverses the adjustment applied to cparams when enabling dedicated dict - * search. This is used to recover the params set to be used in the working - * context. (Otherwise, those tables would also grow.) - */ - private static void ZSTD_dedicatedDictSearch_revertCParams( - ZSTD_compressionParameters* cParams - ) - { - switch (cParams->strategy) - { - case ZSTD_strategy.ZSTD_fast: - case ZSTD_strategy.ZSTD_dfast: - break; - case ZSTD_strategy.ZSTD_greedy: - case ZSTD_strategy.ZSTD_lazy: - case ZSTD_strategy.ZSTD_lazy2: - cParams->hashLog -= 2; - if (cParams->hashLog < 6) - { - cParams->hashLog = 6; - } + /*====== Finalize ======*/ + private static ZSTD_inBuffer_s inBuffer_forEndFlush(ZSTD_CCtx_s* zcs) + { + ZSTD_inBuffer_s nullInput = new ZSTD_inBuffer_s + { + src = null, + size = 0, + pos = 0, + }; + int stableInput = + zcs->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable ? 1 : 0; + return stableInput != 0 ? zcs->expectedInBuffer : nullInput; + } - break; - case ZSTD_strategy.ZSTD_btlazy2: - case ZSTD_strategy.ZSTD_btopt: - case ZSTD_strategy.ZSTD_btultra: - case ZSTD_strategy.ZSTD_btultra2: - break; - } - } + /*! ZSTD_flushStream() : + * @return : amount of data remaining to flush */ + public static nuint ZSTD_flushStream(ZSTD_CCtx_s* zcs, ZSTD_outBuffer_s* output) + { + ZSTD_inBuffer_s input = inBuffer_forEndFlush(zcs); + input.size = input.pos; + return ZSTD_compressStream2(zcs, output, &input, ZSTD_EndDirective.ZSTD_e_flush); + } - private static ulong ZSTD_getCParamRowSize( - ulong srcSizeHint, - nuint dictSize, - ZSTD_CParamMode_e mode - ) + /*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_end). */ + public static nuint ZSTD_endStream(ZSTD_CCtx_s* zcs, ZSTD_outBuffer_s* output) + { + ZSTD_inBuffer_s input = inBuffer_forEndFlush(zcs); + nuint remainingToFlush = ZSTD_compressStream2( + zcs, + output, + &input, + ZSTD_EndDirective.ZSTD_e_end + ); { - switch (mode) - { - case ZSTD_CParamMode_e.ZSTD_cpm_unknown: - case ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict: - case ZSTD_CParamMode_e.ZSTD_cpm_createCDict: - break; - case ZSTD_CParamMode_e.ZSTD_cpm_attachDict: - dictSize = 0; - break; - default: - assert(0 != 0); - break; - } - + nuint err_code = remainingToFlush; + if (ERR_isError(err_code)) { - int unknown = srcSizeHint == unchecked(0UL - 1) ? 1 : 0; - nuint addedSize = (nuint)(unknown != 0 && dictSize > 0 ? 500 : 0); - return unknown != 0 && dictSize == 0 - ? unchecked(0UL - 1) - : srcSizeHint + dictSize + addedSize; + return err_code; } } - /*! ZSTD_getCParams_internal() : - * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize. - * Note: srcSizeHint 0 means 0, use ZSTD_CONTENTSIZE_UNKNOWN for unknown. - * Use dictSize == 0 for unknown or unused. - * Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_CParamMode_e`. */ - private static ZSTD_compressionParameters ZSTD_getCParams_internal( - int compressionLevel, - ulong srcSizeHint, - nuint dictSize, - ZSTD_CParamMode_e mode - ) + if (zcs->appliedParams.nbWorkers > 0) + return remainingToFlush; { - ulong rSize = ZSTD_getCParamRowSize(srcSizeHint, dictSize, mode); - uint tableID = (uint)( - (rSize <= 256 * (1 << 10) ? 1 : 0) - + (rSize <= 128 * (1 << 10) ? 1 : 0) - + (rSize <= 16 * (1 << 10) ? 1 : 0) + nuint lastBlockSize = (nuint)(zcs->frameEnded != 0 ? 0 : 3); + nuint checksumSize = (nuint)( + zcs->frameEnded != 0 ? 0 : zcs->appliedParams.fParams.checksumFlag * 4 ); - int row; - if (compressionLevel == 0) - row = 3; - else if (compressionLevel < 0) - row = 0; - else if (compressionLevel > 22) - row = 22; - else - row = compressionLevel; - { - ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row]; - if (compressionLevel < 0) + nuint toFlush = remainingToFlush + lastBlockSize + checksumSize; + return toFlush; + } + } + + public static int ZSTD_maxCLevel() + { + return 22; + } + + public static int ZSTD_minCLevel() + { + return -(1 << 17); + } + + public static int ZSTD_defaultCLevel() + { + return 3; + } + + private static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams( + int compressionLevel, + nuint dictSize + ) + { + ZSTD_compressionParameters cParams = ZSTD_getCParams_internal( + compressionLevel, + 0, + dictSize, + ZSTD_CParamMode_e.ZSTD_cpm_createCDict + ); + switch (cParams.strategy) + { + case ZSTD_strategy.ZSTD_fast: + case ZSTD_strategy.ZSTD_dfast: + break; + case ZSTD_strategy.ZSTD_greedy: + case ZSTD_strategy.ZSTD_lazy: + case ZSTD_strategy.ZSTD_lazy2: + cParams.hashLog += 2; + break; + case ZSTD_strategy.ZSTD_btlazy2: + case ZSTD_strategy.ZSTD_btopt: + case ZSTD_strategy.ZSTD_btultra: + case ZSTD_strategy.ZSTD_btultra2: + break; + } + + return cParams; + } + + private static int ZSTD_dedicatedDictSearch_isSupported(ZSTD_compressionParameters* cParams) + { + return + cParams->strategy >= ZSTD_strategy.ZSTD_greedy + && cParams->strategy <= ZSTD_strategy.ZSTD_lazy2 + && cParams->hashLog > cParams->chainLog + && cParams->chainLog <= 24 + ? 1 + : 0; + } + + /** + * Reverses the adjustment applied to cparams when enabling dedicated dict + * search. This is used to recover the params set to be used in the working + * context. (Otherwise, those tables would also grow.) + */ + private static void ZSTD_dedicatedDictSearch_revertCParams( + ZSTD_compressionParameters* cParams + ) + { + switch (cParams->strategy) + { + case ZSTD_strategy.ZSTD_fast: + case ZSTD_strategy.ZSTD_dfast: + break; + case ZSTD_strategy.ZSTD_greedy: + case ZSTD_strategy.ZSTD_lazy: + case ZSTD_strategy.ZSTD_lazy2: + cParams->hashLog -= 2; + if (cParams->hashLog < 6) { - int clampedCompressionLevel = - ZSTD_minCLevel() > compressionLevel ? ZSTD_minCLevel() : compressionLevel; - cp.targetLength = (uint)-clampedCompressionLevel; + cParams->hashLog = 6; } - return ZSTD_adjustCParams_internal( - cp, - srcSizeHint, - dictSize, - mode, - ZSTD_paramSwitch_e.ZSTD_ps_auto - ); - } + break; + case ZSTD_strategy.ZSTD_btlazy2: + case ZSTD_strategy.ZSTD_btopt: + case ZSTD_strategy.ZSTD_btultra: + case ZSTD_strategy.ZSTD_btultra2: + break; } + } - /*! ZSTD_getCParams() : - * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize. - * Size values are optional, provide 0 if not known or unused */ - public static ZSTD_compressionParameters ZSTD_getCParams( - int compressionLevel, - ulong srcSizeHint, - nuint dictSize - ) + private static ulong ZSTD_getCParamRowSize( + ulong srcSizeHint, + nuint dictSize, + ZSTD_CParamMode_e mode + ) + { + switch (mode) { - if (srcSizeHint == 0) - srcSizeHint = unchecked(0UL - 1); - return ZSTD_getCParams_internal( - compressionLevel, - srcSizeHint, - dictSize, - ZSTD_CParamMode_e.ZSTD_cpm_unknown - ); + case ZSTD_CParamMode_e.ZSTD_cpm_unknown: + case ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict: + case ZSTD_CParamMode_e.ZSTD_cpm_createCDict: + break; + case ZSTD_CParamMode_e.ZSTD_cpm_attachDict: + dictSize = 0; + break; + default: + assert(0 != 0); + break; } - /*! ZSTD_getParams() : - * same idea as ZSTD_getCParams() - * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`). - * Fields of `ZSTD_frameParameters` are set to default values */ - private static ZSTD_parameters ZSTD_getParams_internal( - int compressionLevel, - ulong srcSizeHint, - nuint dictSize, - ZSTD_CParamMode_e mode - ) { - ZSTD_parameters @params; - ZSTD_compressionParameters cParams = ZSTD_getCParams_internal( - compressionLevel, - srcSizeHint, - dictSize, - mode - ); - @params = new ZSTD_parameters { cParams = cParams }; - @params.fParams.contentSizeFlag = 1; - return @params; + int unknown = srcSizeHint == unchecked(0UL - 1) ? 1 : 0; + nuint addedSize = (nuint)(unknown != 0 && dictSize > 0 ? 500 : 0); + return unknown != 0 && dictSize == 0 + ? unchecked(0UL - 1) + : srcSizeHint + dictSize + addedSize; } + } - /*! ZSTD_getParams() : - * same idea as ZSTD_getCParams() - * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`). - * Fields of `ZSTD_frameParameters` are set to default values */ - public static ZSTD_parameters ZSTD_getParams( - int compressionLevel, - ulong srcSizeHint, - nuint dictSize - ) + /*! ZSTD_getCParams_internal() : + * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize. + * Note: srcSizeHint 0 means 0, use ZSTD_CONTENTSIZE_UNKNOWN for unknown. + * Use dictSize == 0 for unknown or unused. + * Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_CParamMode_e`. */ + private static ZSTD_compressionParameters ZSTD_getCParams_internal( + int compressionLevel, + ulong srcSizeHint, + nuint dictSize, + ZSTD_CParamMode_e mode + ) + { + ulong rSize = ZSTD_getCParamRowSize(srcSizeHint, dictSize, mode); + uint tableID = (uint)( + (rSize <= 256 * (1 << 10) ? 1 : 0) + + (rSize <= 128 * (1 << 10) ? 1 : 0) + + (rSize <= 16 * (1 << 10) ? 1 : 0) + ); + int row; + if (compressionLevel == 0) + row = 3; + else if (compressionLevel < 0) + row = 0; + else if (compressionLevel > 22) + row = 22; + else + row = compressionLevel; { - if (srcSizeHint == 0) - srcSizeHint = unchecked(0UL - 1); - return ZSTD_getParams_internal( - compressionLevel, + ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row]; + if (compressionLevel < 0) + { + int clampedCompressionLevel = + ZSTD_minCLevel() > compressionLevel ? ZSTD_minCLevel() : compressionLevel; + cp.targetLength = (uint)-clampedCompressionLevel; + } + + return ZSTD_adjustCParams_internal( + cp, srcSizeHint, dictSize, - ZSTD_CParamMode_e.ZSTD_cpm_unknown + mode, + ZSTD_paramSwitch_e.ZSTD_ps_auto ); } + } - /*! ZSTD_registerSequenceProducer() : - * Instruct zstd to use a block-level external sequence producer function. - * - * The sequenceProducerState must be initialized by the caller, and the caller is - * responsible for managing its lifetime. This parameter is sticky across - * compressions. It will remain set until the user explicitly resets compression - * parameters. - * - * Sequence producer registration is considered to be an "advanced parameter", - * part of the "advanced API". This means it will only have an effect on compression - * APIs which respect advanced parameters, such as compress2() and compressStream2(). - * Older compression APIs such as compressCCtx(), which predate the introduction of - * "advanced parameters", will ignore any external sequence producer setting. - * - * The sequence producer can be "cleared" by registering a NULL function pointer. This - * removes all limitations described above in the "LIMITATIONS" section of the API docs. - * - * The user is strongly encouraged to read the full API documentation (above) before - * calling this function. */ - public static void ZSTD_registerSequenceProducer( - ZSTD_CCtx_s* zc, - void* extSeqProdState, - void* extSeqProdFunc - ) + /*! ZSTD_getCParams() : + * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize. + * Size values are optional, provide 0 if not known or unused */ + public static ZSTD_compressionParameters ZSTD_getCParams( + int compressionLevel, + ulong srcSizeHint, + nuint dictSize + ) + { + if (srcSizeHint == 0) + srcSizeHint = unchecked(0UL - 1); + return ZSTD_getCParams_internal( + compressionLevel, + srcSizeHint, + dictSize, + ZSTD_CParamMode_e.ZSTD_cpm_unknown + ); + } + + /*! ZSTD_getParams() : + * same idea as ZSTD_getCParams() + * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`). + * Fields of `ZSTD_frameParameters` are set to default values */ + private static ZSTD_parameters ZSTD_getParams_internal( + int compressionLevel, + ulong srcSizeHint, + nuint dictSize, + ZSTD_CParamMode_e mode + ) + { + ZSTD_parameters @params; + ZSTD_compressionParameters cParams = ZSTD_getCParams_internal( + compressionLevel, + srcSizeHint, + dictSize, + mode + ); + @params = new ZSTD_parameters { cParams = cParams }; + @params.fParams.contentSizeFlag = 1; + return @params; + } + + /*! ZSTD_getParams() : + * same idea as ZSTD_getCParams() + * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`). + * Fields of `ZSTD_frameParameters` are set to default values */ + public static ZSTD_parameters ZSTD_getParams( + int compressionLevel, + ulong srcSizeHint, + nuint dictSize + ) + { + if (srcSizeHint == 0) + srcSizeHint = unchecked(0UL - 1); + return ZSTD_getParams_internal( + compressionLevel, + srcSizeHint, + dictSize, + ZSTD_CParamMode_e.ZSTD_cpm_unknown + ); + } + + /*! ZSTD_registerSequenceProducer() : + * Instruct zstd to use a block-level external sequence producer function. + * + * The sequenceProducerState must be initialized by the caller, and the caller is + * responsible for managing its lifetime. This parameter is sticky across + * compressions. It will remain set until the user explicitly resets compression + * parameters. + * + * Sequence producer registration is considered to be an "advanced parameter", + * part of the "advanced API". This means it will only have an effect on compression + * APIs which respect advanced parameters, such as compress2() and compressStream2(). + * Older compression APIs such as compressCCtx(), which predate the introduction of + * "advanced parameters", will ignore any external sequence producer setting. + * + * The sequence producer can be "cleared" by registering a NULL function pointer. This + * removes all limitations described above in the "LIMITATIONS" section of the API docs. + * + * The user is strongly encouraged to read the full API documentation (above) before + * calling this function. */ + public static void ZSTD_registerSequenceProducer( + ZSTD_CCtx_s* zc, + void* extSeqProdState, + void* extSeqProdFunc + ) + { + assert(zc != null); + ZSTD_CCtxParams_registerSequenceProducer( + &zc->requestedParams, + extSeqProdState, + extSeqProdFunc + ); + } + + /*! ZSTD_CCtxParams_registerSequenceProducer() : + * Same as ZSTD_registerSequenceProducer(), but operates on ZSTD_CCtx_params. + * This is used for accurate size estimation with ZSTD_estimateCCtxSize_usingCCtxParams(), + * which is needed when creating a ZSTD_CCtx with ZSTD_initStaticCCtx(). + * + * If you are using the external sequence producer API in a scenario where ZSTD_initStaticCCtx() + * is required, then this function is for you. Otherwise, you probably don't need it. + * + * See tests/zstreamtest.c for example usage. */ + public static void ZSTD_CCtxParams_registerSequenceProducer( + ZSTD_CCtx_params_s* @params, + void* extSeqProdState, + void* extSeqProdFunc + ) + { + assert(@params != null); + if (extSeqProdFunc != null) { - assert(zc != null); - ZSTD_CCtxParams_registerSequenceProducer( - &zc->requestedParams, - extSeqProdState, - extSeqProdFunc - ); + @params->extSeqProdFunc = extSeqProdFunc; + @params->extSeqProdState = extSeqProdState; } - - /*! ZSTD_CCtxParams_registerSequenceProducer() : - * Same as ZSTD_registerSequenceProducer(), but operates on ZSTD_CCtx_params. - * This is used for accurate size estimation with ZSTD_estimateCCtxSize_usingCCtxParams(), - * which is needed when creating a ZSTD_CCtx with ZSTD_initStaticCCtx(). - * - * If you are using the external sequence producer API in a scenario where ZSTD_initStaticCCtx() - * is required, then this function is for you. Otherwise, you probably don't need it. - * - * See tests/zstreamtest.c for example usage. */ - public static void ZSTD_CCtxParams_registerSequenceProducer( - ZSTD_CCtx_params_s* @params, - void* extSeqProdState, - void* extSeqProdFunc - ) + else { - assert(@params != null); - if (extSeqProdFunc != null) - { - @params->extSeqProdFunc = extSeqProdFunc; - @params->extSeqProdState = extSeqProdState; - } - else - { - @params->extSeqProdFunc = null; - @params->extSeqProdState = null; - } + @params->extSeqProdFunc = null; + @params->extSeqProdState = null; } } -} +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressInternal.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressInternal.cs index c3026112a..d41e50a70 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressInternal.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressInternal.cs @@ -1,48 +1,48 @@ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; -using static ZstdSharp.UnsafeHelper; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods + /** + * Returns the ZSTD_SequenceLength for the given sequences. It handles the decoding of long sequences + * indicated by longLengthPos and longLengthType, and adds MINMATCH back to matchLength. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ZSTD_SequenceLength ZSTD_getSequenceLength( + SeqStore_t* seqStore, + SeqDef_s* seq + ) { - /** - * Returns the ZSTD_SequenceLength for the given sequences. It handles the decoding of long sequences - * indicated by longLengthPos and longLengthType, and adds MINMATCH back to matchLength. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static ZSTD_SequenceLength ZSTD_getSequenceLength( - SeqStore_t* seqStore, - SeqDef_s* seq - ) + ZSTD_SequenceLength seqLen; + seqLen.litLength = seq->litLength; + seqLen.matchLength = (uint)(seq->mlBase + 3); + if (seqStore->longLengthPos == (uint)(seq - seqStore->sequencesStart)) { - ZSTD_SequenceLength seqLen; - seqLen.litLength = seq->litLength; - seqLen.matchLength = (uint)(seq->mlBase + 3); - if (seqStore->longLengthPos == (uint)(seq - seqStore->sequencesStart)) + if (seqStore->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_literalLength) { - if (seqStore->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_literalLength) - { - seqLen.litLength += 0x10000; - } - - if (seqStore->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_matchLength) - { - seqLen.matchLength += 0x10000; - } + seqLen.litLength += 0x10000; } - return seqLen; + if (seqStore->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_matchLength) + { + seqLen.matchLength += 0x10000; + } } - private static readonly RawSeqStore_t kNullRawSeqStore = new RawSeqStore_t( - seq: null, - pos: 0, - posInSequence: 0, - size: 0, - capacity: 0 - ); + return seqLen; + } + + private static readonly RawSeqStore_t kNullRawSeqStore = new RawSeqStore_t( + seq: null, + pos: 0, + posInSequence: 0, + size: 0, + capacity: 0 + ); #if NET7_0_OR_GREATER private static ReadOnlySpan Span_LL_Code => new byte[64] @@ -119,83 +119,83 @@ ref MemoryMarshal.GetReference(Span_LL_Code) ); #else - private static readonly byte* LL_Code = GetArrayPointer( - new byte[64] - { - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - 16, - 17, - 17, - 18, - 18, - 19, - 19, - 20, - 20, - 20, - 20, - 21, - 21, - 21, - 21, - 22, - 22, - 22, - 22, - 22, - 22, - 22, - 22, - 23, - 23, - 23, - 23, - 23, - 23, - 23, - 23, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - } - ); -#endif - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_LLcode(uint litLength) + private static readonly byte* LL_Code = GetArrayPointer( + new byte[64] { - const uint LL_deltaCode = 19; - return litLength > 63 ? ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength]; + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 16, + 17, + 17, + 18, + 18, + 19, + 19, + 20, + 20, + 20, + 20, + 21, + 21, + 21, + 21, + 22, + 22, + 22, + 22, + 22, + 22, + 22, + 22, + 23, + 23, + 23, + 23, + 23, + 23, + 23, + 23, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, } + ); +#endif + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_LLcode(uint litLength) + { + const uint LL_deltaCode = 19; + return litLength > 63 ? ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength]; + } #if NET7_0_OR_GREATER private static ReadOnlySpan Span_ML_Code => @@ -337,972 +337,972 @@ ref MemoryMarshal.GetReference(Span_ML_Code) ); #else - private static readonly byte* ML_Code = GetArrayPointer( - new byte[128] - { - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - 17, - 18, - 19, - 20, - 21, - 22, - 23, - 24, - 25, - 26, - 27, - 28, - 29, - 30, - 31, - 32, - 32, - 33, - 33, - 34, - 34, - 35, - 35, - 36, - 36, - 36, - 36, - 37, - 37, - 37, - 37, - 38, - 38, - 38, - 38, - 38, - 38, - 38, - 38, - 39, - 39, - 39, - 39, - 39, - 39, - 39, - 39, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 41, - 41, - 41, - 41, - 41, - 41, - 41, - 41, - 41, - 41, - 41, - 41, - 41, - 41, - 41, - 41, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - } - ); -#endif - /* ZSTD_MLcode() : - * note : mlBase = matchLength - MINMATCH; - * because it's the format it's stored in seqStore->sequences */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_MLcode(uint mlBase) + private static readonly byte* ML_Code = GetArrayPointer( + new byte[128] { - const uint ML_deltaCode = 36; - return mlBase > 127 ? ZSTD_highbit32(mlBase) + ML_deltaCode : ML_Code[mlBase]; + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 32, + 33, + 33, + 34, + 34, + 35, + 35, + 36, + 36, + 36, + 36, + 37, + 37, + 37, + 37, + 38, + 38, + 38, + 38, + 38, + 38, + 38, + 38, + 39, + 39, + 39, + 39, + 39, + 39, + 39, + 39, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, } + ); +#endif + /* ZSTD_MLcode() : + * note : mlBase = matchLength - MINMATCH; + * because it's the format it's stored in seqStore->sequences */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_MLcode(uint mlBase) + { + const uint ML_deltaCode = 36; + return mlBase > 127 ? ZSTD_highbit32(mlBase) + ML_deltaCode : ML_Code[mlBase]; + } + + /* ZSTD_cParam_withinBounds: + * @return 1 if value is within cParam bounds, + * 0 otherwise */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value) + { + ZSTD_bounds bounds = ZSTD_cParam_getBounds(cParam); + if (ERR_isError(bounds.error)) + return 0; + if (value < bounds.lowerBound) + return 0; + if (value > bounds.upperBound) + return 0; + return 1; + } + + /* ZSTD_selectAddr: + * @return index >= lowLimit ? candidate : backup, + * tries to force branchless codegen. */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static byte* ZSTD_selectAddr( + uint index, + uint lowLimit, + byte* candidate, + byte* backup + ) + { + return index >= lowLimit ? candidate : backup; + } - /* ZSTD_cParam_withinBounds: - * @return 1 if value is within cParam bounds, - * 0 otherwise */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value) + /* ZSTD_noCompressBlock() : + * Writes uncompressed block to dst buffer from given src. + * Returns the size of the block */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_noCompressBlock( + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + uint lastBlock + ) + { + uint cBlockHeader24 = + lastBlock + ((uint)blockType_e.bt_raw << 1) + (uint)(srcSize << 3); + if (srcSize + ZSTD_blockHeaderSize > dstCapacity) { - ZSTD_bounds bounds = ZSTD_cParam_getBounds(cParam); - if (ERR_isError(bounds.error)) - return 0; - if (value < bounds.lowerBound) - return 0; - if (value > bounds.upperBound) - return 0; - return 1; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); } - /* ZSTD_selectAddr: - * @return index >= lowLimit ? candidate : backup, - * tries to force branchless codegen. */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static byte* ZSTD_selectAddr( - uint index, - uint lowLimit, - byte* candidate, - byte* backup - ) + MEM_writeLE24(dst, cBlockHeader24); + memcpy((byte*)dst + ZSTD_blockHeaderSize, src, (uint)srcSize); + return ZSTD_blockHeaderSize + srcSize; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_rleCompressBlock( + void* dst, + nuint dstCapacity, + byte src, + nuint srcSize, + uint lastBlock + ) + { + byte* op = (byte*)dst; + uint cBlockHeader = lastBlock + ((uint)blockType_e.bt_rle << 1) + (uint)(srcSize << 3); + if (dstCapacity < 4) { - return index >= lowLimit ? candidate : backup; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); } - /* ZSTD_noCompressBlock() : - * Writes uncompressed block to dst buffer from given src. - * Returns the size of the block */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_noCompressBlock( - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize, - uint lastBlock - ) - { - uint cBlockHeader24 = - lastBlock + ((uint)blockType_e.bt_raw << 1) + (uint)(srcSize << 3); - if (srcSize + ZSTD_blockHeaderSize > dstCapacity) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - } + MEM_writeLE24(op, cBlockHeader); + op[3] = src; + return 4; + } - MEM_writeLE24(dst, cBlockHeader24); - memcpy((byte*)dst + ZSTD_blockHeaderSize, src, (uint)srcSize); - return ZSTD_blockHeaderSize + srcSize; - } + /* ZSTD_minGain() : + * minimum compression required + * to generate a compress block or a compressed literals section. + * note : use same formula for both situations */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_minGain(nuint srcSize, ZSTD_strategy strat) + { + uint minlog = strat >= ZSTD_strategy.ZSTD_btultra ? (uint)strat - 1 : 6; + assert(ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_strategy, (int)strat) != 0); + return (srcSize >> (int)minlog) + 2; + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_rleCompressBlock( - void* dst, - nuint dstCapacity, - byte src, - nuint srcSize, - uint lastBlock - ) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int ZSTD_literalsCompressionIsDisabled(ZSTD_CCtx_params_s* cctxParams) + { + switch (cctxParams->literalCompressionMode) { - byte* op = (byte*)dst; - uint cBlockHeader = lastBlock + ((uint)blockType_e.bt_rle << 1) + (uint)(srcSize << 3); - if (dstCapacity < 4) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - } - - MEM_writeLE24(op, cBlockHeader); - op[3] = src; - return 4; + case ZSTD_paramSwitch_e.ZSTD_ps_enable: + return 0; + case ZSTD_paramSwitch_e.ZSTD_ps_disable: + return 1; + default: + assert(0 != 0); + goto case ZSTD_paramSwitch_e.ZSTD_ps_auto; + case ZSTD_paramSwitch_e.ZSTD_ps_auto: + return + cctxParams->cParams.strategy == ZSTD_strategy.ZSTD_fast + && cctxParams->cParams.targetLength > 0 + ? 1 + : 0; } + } - /* ZSTD_minGain() : - * minimum compression required - * to generate a compress block or a compressed literals section. - * note : use same formula for both situations */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_minGain(nuint srcSize, ZSTD_strategy strat) + /*! ZSTD_safecopyLiterals() : + * memcpy() function that won't read beyond more than WILDCOPY_OVERLENGTH bytes past ilimit_w. + * Only called when the sequence ends past ilimit_w, so it only needs to be optimized for single + * large copies. + */ + private static void ZSTD_safecopyLiterals(byte* op, byte* ip, byte* iend, byte* ilimit_w) + { + assert(iend > ilimit_w); + if (ip <= ilimit_w) { - uint minlog = strat >= ZSTD_strategy.ZSTD_btultra ? (uint)strat - 1 : 6; - assert(ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_strategy, (int)strat) != 0); - return (srcSize >> (int)minlog) + 2; + ZSTD_wildcopy(op, ip, (nint)(ilimit_w - ip), ZSTD_overlap_e.ZSTD_no_overlap); + op += ilimit_w - ip; + ip = ilimit_w; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static int ZSTD_literalsCompressionIsDisabled(ZSTD_CCtx_params_s* cctxParams) - { - switch (cctxParams->literalCompressionMode) - { - case ZSTD_paramSwitch_e.ZSTD_ps_enable: - return 0; - case ZSTD_paramSwitch_e.ZSTD_ps_disable: - return 1; - default: - assert(0 != 0); - goto case ZSTD_paramSwitch_e.ZSTD_ps_auto; - case ZSTD_paramSwitch_e.ZSTD_ps_auto: - return - cctxParams->cParams.strategy == ZSTD_strategy.ZSTD_fast - && cctxParams->cParams.targetLength > 0 - ? 1 - : 0; - } - } + while (ip < iend) + *op++ = *ip++; + } - /*! ZSTD_safecopyLiterals() : - * memcpy() function that won't read beyond more than WILDCOPY_OVERLENGTH bytes past ilimit_w. - * Only called when the sequence ends past ilimit_w, so it only needs to be optimized for single - * large copies. - */ - private static void ZSTD_safecopyLiterals(byte* op, byte* ip, byte* iend, byte* ilimit_w) + /*! ZSTD_storeSeqOnly() : + * Store a sequence (litlen, litPtr, offBase and matchLength) into SeqStore_t. + * Literals themselves are not copied, but @litPtr is updated. + * @offBase : Users should employ macros REPCODE_TO_OFFBASE() and OFFSET_TO_OFFBASE(). + * @matchLength : must be >= MINMATCH + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_storeSeqOnly( + SeqStore_t* seqStorePtr, + nuint litLength, + uint offBase, + nuint matchLength + ) + { + assert( + (nuint)(seqStorePtr->sequences - seqStorePtr->sequencesStart) + < seqStorePtr->maxNbSeq + ); + assert(litLength <= 1 << 17); + if (litLength > 0xFFFF) { - assert(iend > ilimit_w); - if (ip <= ilimit_w) - { - ZSTD_wildcopy(op, ip, (nint)(ilimit_w - ip), ZSTD_overlap_e.ZSTD_no_overlap); - op += ilimit_w - ip; - ip = ilimit_w; - } - - while (ip < iend) - *op++ = *ip++; + assert(seqStorePtr->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_none); + seqStorePtr->longLengthType = ZSTD_longLengthType_e.ZSTD_llt_literalLength; + seqStorePtr->longLengthPos = (uint)( + seqStorePtr->sequences - seqStorePtr->sequencesStart + ); } - /*! ZSTD_storeSeqOnly() : - * Store a sequence (litlen, litPtr, offBase and matchLength) into SeqStore_t. - * Literals themselves are not copied, but @litPtr is updated. - * @offBase : Users should employ macros REPCODE_TO_OFFBASE() and OFFSET_TO_OFFBASE(). - * @matchLength : must be >= MINMATCH - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_storeSeqOnly( - SeqStore_t* seqStorePtr, - nuint litLength, - uint offBase, - nuint matchLength - ) + seqStorePtr->sequences[0].litLength = (ushort)litLength; + seqStorePtr->sequences[0].offBase = offBase; + assert(matchLength <= 1 << 17); + assert(matchLength >= 3); { - assert( - (nuint)(seqStorePtr->sequences - seqStorePtr->sequencesStart) - < seqStorePtr->maxNbSeq - ); - assert(litLength <= 1 << 17); - if (litLength > 0xFFFF) + nuint mlBase = matchLength - 3; + if (mlBase > 0xFFFF) { assert(seqStorePtr->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_none); - seqStorePtr->longLengthType = ZSTD_longLengthType_e.ZSTD_llt_literalLength; + seqStorePtr->longLengthType = ZSTD_longLengthType_e.ZSTD_llt_matchLength; seqStorePtr->longLengthPos = (uint)( seqStorePtr->sequences - seqStorePtr->sequencesStart ); } - seqStorePtr->sequences[0].litLength = (ushort)litLength; - seqStorePtr->sequences[0].offBase = offBase; - assert(matchLength <= 1 << 17); - assert(matchLength >= 3); - { - nuint mlBase = matchLength - 3; - if (mlBase > 0xFFFF) - { - assert(seqStorePtr->longLengthType == ZSTD_longLengthType_e.ZSTD_llt_none); - seqStorePtr->longLengthType = ZSTD_longLengthType_e.ZSTD_llt_matchLength; - seqStorePtr->longLengthPos = (uint)( - seqStorePtr->sequences - seqStorePtr->sequencesStart - ); - } - - seqStorePtr->sequences[0].mlBase = (ushort)mlBase; - } - - seqStorePtr->sequences++; + seqStorePtr->sequences[0].mlBase = (ushort)mlBase; } - /*! ZSTD_storeSeq() : - * Store a sequence (litlen, litPtr, offBase and matchLength) into SeqStore_t. - * @offBase : Users should employ macros REPCODE_TO_OFFBASE() and OFFSET_TO_OFFBASE(). - * @matchLength : must be >= MINMATCH - * Allowed to over-read literals up to litLimit. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_storeSeq( - SeqStore_t* seqStorePtr, - nuint litLength, - byte* literals, - byte* litLimit, - uint offBase, - nuint matchLength - ) + seqStorePtr->sequences++; + } + + /*! ZSTD_storeSeq() : + * Store a sequence (litlen, litPtr, offBase and matchLength) into SeqStore_t. + * @offBase : Users should employ macros REPCODE_TO_OFFBASE() and OFFSET_TO_OFFBASE(). + * @matchLength : must be >= MINMATCH + * Allowed to over-read literals up to litLimit. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_storeSeq( + SeqStore_t* seqStorePtr, + nuint litLength, + byte* literals, + byte* litLimit, + uint offBase, + nuint matchLength + ) + { + byte* litLimit_w = litLimit - 32; + byte* litEnd = literals + litLength; + assert( + (nuint)(seqStorePtr->sequences - seqStorePtr->sequencesStart) + < seqStorePtr->maxNbSeq + ); + assert(seqStorePtr->maxNbLit <= 128 * (1 << 10)); + assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit); + assert(literals + litLength <= litLimit); + if (litEnd <= litLimit_w) { - byte* litLimit_w = litLimit - 32; - byte* litEnd = literals + litLength; - assert( - (nuint)(seqStorePtr->sequences - seqStorePtr->sequencesStart) - < seqStorePtr->maxNbSeq - ); - assert(seqStorePtr->maxNbLit <= 128 * (1 << 10)); - assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit); - assert(literals + litLength <= litLimit); - if (litEnd <= litLimit_w) - { - ZSTD_copy16(seqStorePtr->lit, literals); - if (litLength > 16) - { - ZSTD_wildcopy( - seqStorePtr->lit + 16, - literals + 16, - (nint)litLength - 16, - ZSTD_overlap_e.ZSTD_no_overlap - ); - } - } - else + ZSTD_copy16(seqStorePtr->lit, literals); + if (litLength > 16) { - ZSTD_safecopyLiterals(seqStorePtr->lit, literals, litEnd, litLimit_w); + ZSTD_wildcopy( + seqStorePtr->lit + 16, + literals + 16, + (nint)litLength - 16, + ZSTD_overlap_e.ZSTD_no_overlap + ); } - - seqStorePtr->lit += litLength; - ZSTD_storeSeqOnly(seqStorePtr, litLength, offBase, matchLength); + } + else + { + ZSTD_safecopyLiterals(seqStorePtr->lit, literals, litEnd, litLimit_w); } - /* ZSTD_updateRep() : - * updates in-place @rep (array of repeat offsets) - * @offBase : sum-type, using numeric representation of ZSTD_storeSeq() - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_updateRep(uint* rep, uint offBase, uint ll0) + seqStorePtr->lit += litLength; + ZSTD_storeSeqOnly(seqStorePtr, litLength, offBase, matchLength); + } + + /* ZSTD_updateRep() : + * updates in-place @rep (array of repeat offsets) + * @offBase : sum-type, using numeric representation of ZSTD_storeSeq() + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_updateRep(uint* rep, uint offBase, uint ll0) + { + if (offBase > 3) { - if (offBase > 3) + rep[2] = rep[1]; + rep[1] = rep[0]; + assert(offBase > 3); + rep[0] = offBase - 3; + } + else + { + assert(1 <= offBase && offBase <= 3); + uint repCode = offBase - 1 + ll0; + if (repCode > 0) { - rep[2] = rep[1]; + uint currentOffset = repCode == 3 ? rep[0] - 1 : rep[repCode]; + rep[2] = repCode >= 2 ? rep[1] : rep[2]; rep[1] = rep[0]; - assert(offBase > 3); - rep[0] = offBase - 3; - } - else - { - assert(1 <= offBase && offBase <= 3); - uint repCode = offBase - 1 + ll0; - if (repCode > 0) - { - uint currentOffset = repCode == 3 ? rep[0] - 1 : rep[repCode]; - rep[2] = repCode >= 2 ? rep[1] : rep[2]; - rep[1] = rep[0]; - rep[0] = currentOffset; - } + rep[0] = currentOffset; } } + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static repcodes_s ZSTD_newRep(uint* rep, uint offBase, uint ll0) - { - repcodes_s newReps; - memcpy(&newReps, rep, (uint)sizeof(repcodes_s)); - ZSTD_updateRep(newReps.rep, offBase, ll0); - return newReps; - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static repcodes_s ZSTD_newRep(uint* rep, uint offBase, uint ll0) + { + repcodes_s newReps; + memcpy(&newReps, rep, (uint)sizeof(repcodes_s)); + ZSTD_updateRep(newReps.rep, offBase, ll0); + return newReps; + } - /*-************************************* - * Match length counter - ***************************************/ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_count(byte* pIn, byte* pMatch, byte* pInLimit) + /*-************************************* + * Match length counter + ***************************************/ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_count(byte* pIn, byte* pMatch, byte* pInLimit) + { + byte* pStart = pIn; + byte* pInLoopLimit = pInLimit - (sizeof(nuint) - 1); + if (pIn < pInLoopLimit) { - byte* pStart = pIn; - byte* pInLoopLimit = pInLimit - (sizeof(nuint) - 1); - if (pIn < pInLoopLimit) { - { - nuint diff = MEM_readST(pMatch) ^ MEM_readST(pIn); - if (diff != 0) - return ZSTD_NbCommonBytes(diff); - } - - pIn += sizeof(nuint); - pMatch += sizeof(nuint); - while (pIn < pInLoopLimit) - { - nuint diff = MEM_readST(pMatch) ^ MEM_readST(pIn); - if (diff == 0) - { - pIn += sizeof(nuint); - pMatch += sizeof(nuint); - continue; - } - - pIn += ZSTD_NbCommonBytes(diff); - return (nuint)(pIn - pStart); - } + nuint diff = MEM_readST(pMatch) ^ MEM_readST(pIn); + if (diff != 0) + return ZSTD_NbCommonBytes(diff); } - if (MEM_64bits && pIn < pInLimit - 3 && MEM_read32(pMatch) == MEM_read32(pIn)) + pIn += sizeof(nuint); + pMatch += sizeof(nuint); + while (pIn < pInLoopLimit) { - pIn += 4; - pMatch += 4; - } + nuint diff = MEM_readST(pMatch) ^ MEM_readST(pIn); + if (diff == 0) + { + pIn += sizeof(nuint); + pMatch += sizeof(nuint); + continue; + } - if (pIn < pInLimit - 1 && MEM_read16(pMatch) == MEM_read16(pIn)) - { - pIn += 2; - pMatch += 2; + pIn += ZSTD_NbCommonBytes(diff); + return (nuint)(pIn - pStart); } - - if (pIn < pInLimit && *pMatch == *pIn) - pIn++; - return (nuint)(pIn - pStart); } - /** ZSTD_count_2segments() : - * can count match length with `ip` & `match` in 2 different segments. - * convention : on reaching mEnd, match count continue starting from iStart - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_count_2segments( - byte* ip, - byte* match, - byte* iEnd, - byte* mEnd, - byte* iStart - ) + if (MEM_64bits && pIn < pInLimit - 3 && MEM_read32(pMatch) == MEM_read32(pIn)) { - byte* vEnd = ip + (mEnd - match) < iEnd ? ip + (mEnd - match) : iEnd; - nuint matchLength = ZSTD_count(ip, match, vEnd); - if (match + matchLength != mEnd) - return matchLength; - return matchLength + ZSTD_count(ip + matchLength, iStart, iEnd); + pIn += 4; + pMatch += 4; } - private const uint prime3bytes = 506832829U; - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_hash3(uint u, uint h, uint s) + if (pIn < pInLimit - 1 && MEM_read16(pMatch) == MEM_read16(pIn)) { - assert(h <= 32); - return ((u << 32 - 24) * prime3bytes ^ s) >> (int)(32 - h); + pIn += 2; + pMatch += 2; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_hash3Ptr(void* ptr, uint h) - { - return ZSTD_hash3(MEM_readLE32(ptr), h, 0); - } + if (pIn < pInLimit && *pMatch == *pIn) + pIn++; + return (nuint)(pIn - pStart); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_hash3PtrS(void* ptr, uint h, uint s) - { - return ZSTD_hash3(MEM_readLE32(ptr), h, s); - } + /** ZSTD_count_2segments() : + * can count match length with `ip` & `match` in 2 different segments. + * convention : on reaching mEnd, match count continue starting from iStart + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_count_2segments( + byte* ip, + byte* match, + byte* iEnd, + byte* mEnd, + byte* iStart + ) + { + byte* vEnd = ip + (mEnd - match) < iEnd ? ip + (mEnd - match) : iEnd; + nuint matchLength = ZSTD_count(ip, match, vEnd); + if (match + matchLength != mEnd) + return matchLength; + return matchLength + ZSTD_count(ip + matchLength, iStart, iEnd); + } - private const uint prime4bytes = 2654435761U; + private const uint prime3bytes = 506832829U; - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_hash4(uint u, uint h, uint s) - { - assert(h <= 32); - return (u * prime4bytes ^ s) >> (int)(32 - h); - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_hash3(uint u, uint h, uint s) + { + assert(h <= 32); + return ((u << 32 - 24) * prime3bytes ^ s) >> (int)(32 - h); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_hash4Ptr(void* ptr, uint h) - { - return ZSTD_hash4(MEM_readLE32(ptr), h, 0); - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hash3Ptr(void* ptr, uint h) + { + return ZSTD_hash3(MEM_readLE32(ptr), h, 0); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_hash4PtrS(void* ptr, uint h, uint s) - { - return ZSTD_hash4(MEM_readLE32(ptr), h, s); - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hash3PtrS(void* ptr, uint h, uint s) + { + return ZSTD_hash3(MEM_readLE32(ptr), h, s); + } - private const ulong prime5bytes = 889523592379UL; + private const uint prime4bytes = 2654435761U; - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_hash5(ulong u, uint h, ulong s) - { - assert(h <= 64); - return (nuint)(((u << 64 - 40) * prime5bytes ^ s) >> (int)(64 - h)); - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_hash4(uint u, uint h, uint s) + { + assert(h <= 32); + return (u * prime4bytes ^ s) >> (int)(32 - h); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_hash5Ptr(void* p, uint h) - { - return ZSTD_hash5(MEM_readLE64(p), h, 0); - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hash4Ptr(void* ptr, uint h) + { + return ZSTD_hash4(MEM_readLE32(ptr), h, 0); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_hash5PtrS(void* p, uint h, ulong s) - { - return ZSTD_hash5(MEM_readLE64(p), h, s); - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hash4PtrS(void* ptr, uint h, uint s) + { + return ZSTD_hash4(MEM_readLE32(ptr), h, s); + } - private const ulong prime6bytes = 227718039650203UL; + private const ulong prime5bytes = 889523592379UL; - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_hash6(ulong u, uint h, ulong s) - { - assert(h <= 64); - return (nuint)(((u << 64 - 48) * prime6bytes ^ s) >> (int)(64 - h)); - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hash5(ulong u, uint h, ulong s) + { + assert(h <= 64); + return (nuint)(((u << 64 - 40) * prime5bytes ^ s) >> (int)(64 - h)); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_hash6Ptr(void* p, uint h) - { - return ZSTD_hash6(MEM_readLE64(p), h, 0); - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hash5Ptr(void* p, uint h) + { + return ZSTD_hash5(MEM_readLE64(p), h, 0); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_hash6PtrS(void* p, uint h, ulong s) - { - return ZSTD_hash6(MEM_readLE64(p), h, s); - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hash5PtrS(void* p, uint h, ulong s) + { + return ZSTD_hash5(MEM_readLE64(p), h, s); + } - private const ulong prime7bytes = 58295818150454627UL; + private const ulong prime6bytes = 227718039650203UL; - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_hash7(ulong u, uint h, ulong s) - { - assert(h <= 64); - return (nuint)(((u << 64 - 56) * prime7bytes ^ s) >> (int)(64 - h)); - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hash6(ulong u, uint h, ulong s) + { + assert(h <= 64); + return (nuint)(((u << 64 - 48) * prime6bytes ^ s) >> (int)(64 - h)); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_hash7Ptr(void* p, uint h) - { - return ZSTD_hash7(MEM_readLE64(p), h, 0); - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hash6Ptr(void* p, uint h) + { + return ZSTD_hash6(MEM_readLE64(p), h, 0); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_hash7PtrS(void* p, uint h, ulong s) - { - return ZSTD_hash7(MEM_readLE64(p), h, s); - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hash6PtrS(void* p, uint h, ulong s) + { + return ZSTD_hash6(MEM_readLE64(p), h, s); + } - private const ulong prime8bytes = 0xCF1BBCDCB7A56463UL; + private const ulong prime7bytes = 58295818150454627UL; - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_hash8(ulong u, uint h, ulong s) - { - assert(h <= 64); - return (nuint)((u * prime8bytes ^ s) >> (int)(64 - h)); - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hash7(ulong u, uint h, ulong s) + { + assert(h <= 64); + return (nuint)(((u << 64 - 56) * prime7bytes ^ s) >> (int)(64 - h)); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_hash8Ptr(void* p, uint h) - { - return ZSTD_hash8(MEM_readLE64(p), h, 0); - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hash7Ptr(void* p, uint h) + { + return ZSTD_hash7(MEM_readLE64(p), h, 0); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_hash8PtrS(void* p, uint h, ulong s) - { - return ZSTD_hash8(MEM_readLE64(p), h, s); - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hash7PtrS(void* p, uint h, ulong s) + { + return ZSTD_hash7(MEM_readLE64(p), h, s); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_hashPtr(void* p, uint hBits, uint mls) - { - assert(hBits <= 32); - if (mls == 5) - return ZSTD_hash5Ptr(p, hBits); - if (mls == 6) - return ZSTD_hash6Ptr(p, hBits); - if (mls == 7) - return ZSTD_hash7Ptr(p, hBits); - if (mls == 8) - return ZSTD_hash8Ptr(p, hBits); - return ZSTD_hash4Ptr(p, hBits); - } + private const ulong prime8bytes = 0xCF1BBCDCB7A56463UL; - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_hashPtrSalted(void* p, uint hBits, uint mls, ulong hashSalt) - { - assert(hBits <= 32); - if (mls == 5) - return ZSTD_hash5PtrS(p, hBits, hashSalt); - if (mls == 6) - return ZSTD_hash6PtrS(p, hBits, hashSalt); - if (mls == 7) - return ZSTD_hash7PtrS(p, hBits, hashSalt); - if (mls == 8) - return ZSTD_hash8PtrS(p, hBits, hashSalt); - return ZSTD_hash4PtrS(p, hBits, (uint)hashSalt); - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hash8(ulong u, uint h, ulong s) + { + assert(h <= 64); + return (nuint)((u * prime8bytes ^ s) >> (int)(64 - h)); + } - /** ZSTD_ipow() : - * Return base^exponent. - */ - private static ulong ZSTD_ipow(ulong @base, ulong exponent) - { - ulong power = 1; - while (exponent != 0) - { - if ((exponent & 1) != 0) - power *= @base; - exponent >>= 1; - @base *= @base; - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hash8Ptr(void* p, uint h) + { + return ZSTD_hash8(MEM_readLE64(p), h, 0); + } - return power; - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hash8PtrS(void* p, uint h, ulong s) + { + return ZSTD_hash8(MEM_readLE64(p), h, s); + } - /** ZSTD_rollingHash_append() : - * Add the buffer to the hash value. - */ - private static ulong ZSTD_rollingHash_append(ulong hash, void* buf, nuint size) - { - byte* istart = (byte*)buf; - nuint pos; - for (pos = 0; pos < size; ++pos) - { - hash *= prime8bytes; - hash += (ulong)(istart[pos] + 10); - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hashPtr(void* p, uint hBits, uint mls) + { + assert(hBits <= 32); + if (mls == 5) + return ZSTD_hash5Ptr(p, hBits); + if (mls == 6) + return ZSTD_hash6Ptr(p, hBits); + if (mls == 7) + return ZSTD_hash7Ptr(p, hBits); + if (mls == 8) + return ZSTD_hash8Ptr(p, hBits); + return ZSTD_hash4Ptr(p, hBits); + } - return hash; - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_hashPtrSalted(void* p, uint hBits, uint mls, ulong hashSalt) + { + assert(hBits <= 32); + if (mls == 5) + return ZSTD_hash5PtrS(p, hBits, hashSalt); + if (mls == 6) + return ZSTD_hash6PtrS(p, hBits, hashSalt); + if (mls == 7) + return ZSTD_hash7PtrS(p, hBits, hashSalt); + if (mls == 8) + return ZSTD_hash8PtrS(p, hBits, hashSalt); + return ZSTD_hash4PtrS(p, hBits, (uint)hashSalt); + } - /** ZSTD_rollingHash_compute() : - * Compute the rolling hash value of the buffer. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static ulong ZSTD_rollingHash_compute(void* buf, nuint size) + /** ZSTD_ipow() : + * Return base^exponent. + */ + private static ulong ZSTD_ipow(ulong @base, ulong exponent) + { + ulong power = 1; + while (exponent != 0) { - return ZSTD_rollingHash_append(0, buf, size); + if ((exponent & 1) != 0) + power *= @base; + exponent >>= 1; + @base *= @base; } - /** ZSTD_rollingHash_primePower() : - * Compute the primePower to be passed to ZSTD_rollingHash_rotate() for a hash - * over a window of length bytes. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static ulong ZSTD_rollingHash_primePower(uint length) - { - return ZSTD_ipow(prime8bytes, length - 1); - } + return power; + } - /** ZSTD_rollingHash_rotate() : - * Rotate the rolling hash by one byte. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static ulong ZSTD_rollingHash_rotate( - ulong hash, - byte toRemove, - byte toAdd, - ulong primePower - ) + /** ZSTD_rollingHash_append() : + * Add the buffer to the hash value. + */ + private static ulong ZSTD_rollingHash_append(ulong hash, void* buf, nuint size) + { + byte* istart = (byte*)buf; + nuint pos; + for (pos = 0; pos < size; ++pos) { - hash -= (ulong)(toRemove + 10) * primePower; hash *= prime8bytes; - hash += (ulong)(toAdd + 10); - return hash; + hash += (ulong)(istart[pos] + 10); } - /** - * ZSTD_window_clear(): - * Clears the window containing the history by simply setting it to empty. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_window_clear(ZSTD_window_t* window) - { - nuint endT = (nuint)(window->nextSrc - window->@base); - uint end = (uint)endT; - window->lowLimit = end; - window->dictLimit = end; - } + return hash; + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_window_isEmpty(ZSTD_window_t window) - { - return - window.dictLimit == 2 && window.lowLimit == 2 && window.nextSrc - window.@base == 2 + /** ZSTD_rollingHash_compute() : + * Compute the rolling hash value of the buffer. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ulong ZSTD_rollingHash_compute(void* buf, nuint size) + { + return ZSTD_rollingHash_append(0, buf, size); + } + + /** ZSTD_rollingHash_primePower() : + * Compute the primePower to be passed to ZSTD_rollingHash_rotate() for a hash + * over a window of length bytes. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ulong ZSTD_rollingHash_primePower(uint length) + { + return ZSTD_ipow(prime8bytes, length - 1); + } + + /** ZSTD_rollingHash_rotate() : + * Rotate the rolling hash by one byte. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ulong ZSTD_rollingHash_rotate( + ulong hash, + byte toRemove, + byte toAdd, + ulong primePower + ) + { + hash -= (ulong)(toRemove + 10) * primePower; + hash *= prime8bytes; + hash += (ulong)(toAdd + 10); + return hash; + } + + /** + * ZSTD_window_clear(): + * Clears the window containing the history by simply setting it to empty. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_window_clear(ZSTD_window_t* window) + { + nuint endT = (nuint)(window->nextSrc - window->@base); + uint end = (uint)endT; + window->lowLimit = end; + window->dictLimit = end; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_window_isEmpty(ZSTD_window_t window) + { + return + window.dictLimit == 2 && window.lowLimit == 2 && window.nextSrc - window.@base == 2 ? 1U : 0U; - } + } + + /** + * ZSTD_window_hasExtDict(): + * Returns non-zero if the window has a non-empty extDict. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_window_hasExtDict(ZSTD_window_t window) + { + return window.lowLimit < window.dictLimit ? 1U : 0U; + } + + /** + * ZSTD_matchState_dictMode(): + * Inspects the provided matchState and figures out what dictMode should be + * passed to the compressor. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ZSTD_dictMode_e ZSTD_matchState_dictMode(ZSTD_MatchState_t* ms) + { + return ZSTD_window_hasExtDict(ms->window) != 0 ? ZSTD_dictMode_e.ZSTD_extDict + : ms->dictMatchState != null + ? ms->dictMatchState->dedicatedDictSearch != 0 + ? ZSTD_dictMode_e.ZSTD_dedicatedDictSearch + : ZSTD_dictMode_e.ZSTD_dictMatchState + : ZSTD_dictMode_e.ZSTD_noDict; + } - /** - * ZSTD_window_hasExtDict(): - * Returns non-zero if the window has a non-empty extDict. + /** + * ZSTD_window_canOverflowCorrect(): + * Returns non-zero if the indices are large enough for overflow correction + * to work correctly without impacting compression ratio. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_window_canOverflowCorrect( + ZSTD_window_t window, + uint cycleLog, + uint maxDist, + uint loadedDictEnd, + void* src + ) + { + uint cycleSize = 1U << (int)cycleLog; + uint curr = (uint)((byte*)src - window.@base); + uint minIndexToOverflowCorrect = + cycleSize + (maxDist > cycleSize ? maxDist : cycleSize) + 2; + /* Adjust the min index to backoff the overflow correction frequency, + * so we don't waste too much CPU in overflow correction. If this + * computation overflows we don't really care, we just need to make + * sure it is at least minIndexToOverflowCorrect. */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_window_hasExtDict(ZSTD_window_t window) - { - return window.lowLimit < window.dictLimit ? 1U : 0U; - } + uint adjustment = window.nbOverflowCorrections + 1; + uint adjustedIndex = + minIndexToOverflowCorrect * adjustment > minIndexToOverflowCorrect + ? minIndexToOverflowCorrect * adjustment + : minIndexToOverflowCorrect; + uint indexLargeEnough = curr > adjustedIndex ? 1U : 0U; + /* Only overflow correct early if the dictionary is invalidated already, + * so we don't hurt compression ratio. + */ + uint dictionaryInvalidated = curr > maxDist + loadedDictEnd ? 1U : 0U; + return indexLargeEnough != 0 && dictionaryInvalidated != 0 ? 1U : 0U; + } + + /** + * ZSTD_window_needOverflowCorrection(): + * Returns non-zero if the indices are getting too large and need overflow + * protection. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_window_needOverflowCorrection( + ZSTD_window_t window, + uint cycleLog, + uint maxDist, + uint loadedDictEnd, + void* src, + void* srcEnd + ) + { + uint curr = (uint)((byte*)srcEnd - window.@base); + return curr > (MEM_64bits ? 3500U * (1 << 20) : 2000U * (1 << 20)) ? 1U : 0U; + } - /** - * ZSTD_matchState_dictMode(): - * Inspects the provided matchState and figures out what dictMode should be - * passed to the compressor. + /** + * ZSTD_window_correctOverflow(): + * Reduces the indices to protect from index overflow. + * Returns the correction made to the indices, which must be applied to every + * stored index. + * + * The least significant cycleLog bits of the indices must remain the same, + * which may be 0. Every index up to maxDist in the past must be valid. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_window_correctOverflow( + ZSTD_window_t* window, + uint cycleLog, + uint maxDist, + void* src + ) + { + /* preemptive overflow correction: + * 1. correction is large enough: + * lowLimit > (3<<29) ==> current > 3<<29 + 1< (3<<29 + 1< (3<<29) - (1< (3<<29) - (1<<30) (NOTE: chainLog <= 30) + * > 1<<29 + * + * 2. (ip+ZSTD_CHUNKSIZE_MAX - cctx->base) doesn't overflow: + * After correction, current is less than (1<base < 1<<32. + * 3. (cctx->lowLimit + 1< 3<<29 + 1<@base); + uint currentCycle = curr & cycleMask; + /* Ensure newCurrent - maxDist >= ZSTD_WINDOW_START_INDEX. */ + uint currentCycleCorrection = + currentCycle < 2 + ? cycleSize > 2 + ? cycleSize + : 2 + : 0; + uint newCurrent = + currentCycle + currentCycleCorrection + (maxDist > cycleSize ? maxDist : cycleSize); + uint correction = curr - newCurrent; + assert((maxDist & maxDist - 1) == 0); + assert((curr & cycleMask) == (newCurrent & cycleMask)); + assert(curr > newCurrent); { - return ZSTD_window_hasExtDict(ms->window) != 0 ? ZSTD_dictMode_e.ZSTD_extDict - : ms->dictMatchState != null - ? ms->dictMatchState->dedicatedDictSearch != 0 - ? ZSTD_dictMode_e.ZSTD_dedicatedDictSearch - : ZSTD_dictMode_e.ZSTD_dictMatchState - : ZSTD_dictMode_e.ZSTD_noDict; + assert(correction > 1 << 28); } - /** - * ZSTD_window_canOverflowCorrect(): - * Returns non-zero if the indices are large enough for overflow correction - * to work correctly without impacting compression ratio. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_window_canOverflowCorrect( - ZSTD_window_t window, - uint cycleLog, - uint maxDist, - uint loadedDictEnd, - void* src - ) + window->@base += correction; + window->dictBase += correction; + if (window->lowLimit < correction + 2) { - uint cycleSize = 1U << (int)cycleLog; - uint curr = (uint)((byte*)src - window.@base); - uint minIndexToOverflowCorrect = - cycleSize + (maxDist > cycleSize ? maxDist : cycleSize) + 2; - /* Adjust the min index to backoff the overflow correction frequency, - * so we don't waste too much CPU in overflow correction. If this - * computation overflows we don't really care, we just need to make - * sure it is at least minIndexToOverflowCorrect. - */ - uint adjustment = window.nbOverflowCorrections + 1; - uint adjustedIndex = - minIndexToOverflowCorrect * adjustment > minIndexToOverflowCorrect - ? minIndexToOverflowCorrect * adjustment - : minIndexToOverflowCorrect; - uint indexLargeEnough = curr > adjustedIndex ? 1U : 0U; - /* Only overflow correct early if the dictionary is invalidated already, - * so we don't hurt compression ratio. - */ - uint dictionaryInvalidated = curr > maxDist + loadedDictEnd ? 1U : 0U; - return indexLargeEnough != 0 && dictionaryInvalidated != 0 ? 1U : 0U; + window->lowLimit = 2; } - - /** - * ZSTD_window_needOverflowCorrection(): - * Returns non-zero if the indices are getting too large and need overflow - * protection. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_window_needOverflowCorrection( - ZSTD_window_t window, - uint cycleLog, - uint maxDist, - uint loadedDictEnd, - void* src, - void* srcEnd - ) + else { - uint curr = (uint)((byte*)srcEnd - window.@base); - return curr > (MEM_64bits ? 3500U * (1 << 20) : 2000U * (1 << 20)) ? 1U : 0U; + window->lowLimit -= correction; } - /** - * ZSTD_window_correctOverflow(): - * Reduces the indices to protect from index overflow. - * Returns the correction made to the indices, which must be applied to every - * stored index. - * - * The least significant cycleLog bits of the indices must remain the same, - * which may be 0. Every index up to maxDist in the past must be valid. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_window_correctOverflow( - ZSTD_window_t* window, - uint cycleLog, - uint maxDist, - void* src - ) + if (window->dictLimit < correction + 2) { - /* preemptive overflow correction: - * 1. correction is large enough: - * lowLimit > (3<<29) ==> current > 3<<29 + 1< (3<<29 + 1< (3<<29) - (1< (3<<29) - (1<<30) (NOTE: chainLog <= 30) - * > 1<<29 - * - * 2. (ip+ZSTD_CHUNKSIZE_MAX - cctx->base) doesn't overflow: - * After correction, current is less than (1<base < 1<<32. - * 3. (cctx->lowLimit + 1< 3<<29 + 1<@base); - uint currentCycle = curr & cycleMask; - /* Ensure newCurrent - maxDist >= ZSTD_WINDOW_START_INDEX. */ - uint currentCycleCorrection = - currentCycle < 2 - ? cycleSize > 2 - ? cycleSize - : 2 - : 0; - uint newCurrent = - currentCycle + currentCycleCorrection + (maxDist > cycleSize ? maxDist : cycleSize); - uint correction = curr - newCurrent; - assert((maxDist & maxDist - 1) == 0); - assert((curr & cycleMask) == (newCurrent & cycleMask)); - assert(curr > newCurrent); - { - assert(correction > 1 << 28); - } + window->dictLimit = 2; + } + else + { + window->dictLimit -= correction; + } - window->@base += correction; - window->dictBase += correction; - if (window->lowLimit < correction + 2) - { - window->lowLimit = 2; - } - else - { - window->lowLimit -= correction; - } + assert(newCurrent >= maxDist); + assert(newCurrent - maxDist >= 2); + assert(window->lowLimit <= newCurrent); + assert(window->dictLimit <= newCurrent); + ++window->nbOverflowCorrections; + return correction; + } - if (window->dictLimit < correction + 2) - { - window->dictLimit = 2; - } - else + /** + * ZSTD_window_enforceMaxDist(): + * Updates lowLimit so that: + * (srcEnd - base) - lowLimit == maxDist + loadedDictEnd + * + * It ensures index is valid as long as index >= lowLimit. + * This must be called before a block compression call. + * + * loadedDictEnd is only defined if a dictionary is in use for current compression. + * As the name implies, loadedDictEnd represents the index at end of dictionary. + * The value lies within context's referential, it can be directly compared to blockEndIdx. + * + * If loadedDictEndPtr is NULL, no dictionary is in use, and we use loadedDictEnd == 0. + * If loadedDictEndPtr is not NULL, we set it to zero after updating lowLimit. + * This is because dictionaries are allowed to be referenced fully + * as long as the last byte of the dictionary is in the window. + * Once input has progressed beyond window size, dictionary cannot be referenced anymore. + * + * In normal dict mode, the dictionary lies between lowLimit and dictLimit. + * In dictMatchState mode, lowLimit and dictLimit are the same, + * and the dictionary is below them. + * forceWindow and dictMatchState are therefore incompatible. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_window_enforceMaxDist( + ZSTD_window_t* window, + void* blockEnd, + uint maxDist, + uint* loadedDictEndPtr, + ZSTD_MatchState_t** dictMatchStatePtr + ) + { + uint blockEndIdx = (uint)((byte*)blockEnd - window->@base); + uint loadedDictEnd = loadedDictEndPtr != null ? *loadedDictEndPtr : 0; + if (blockEndIdx > maxDist + loadedDictEnd) + { + uint newLowLimit = blockEndIdx - maxDist; + if (window->lowLimit < newLowLimit) + window->lowLimit = newLowLimit; + if (window->dictLimit < window->lowLimit) { - window->dictLimit -= correction; + window->dictLimit = window->lowLimit; } - assert(newCurrent >= maxDist); - assert(newCurrent - maxDist >= 2); - assert(window->lowLimit <= newCurrent); - assert(window->dictLimit <= newCurrent); - ++window->nbOverflowCorrections; - return correction; + if (loadedDictEndPtr != null) + *loadedDictEndPtr = 0; + if (dictMatchStatePtr != null) + *dictMatchStatePtr = null; } + } - /** - * ZSTD_window_enforceMaxDist(): - * Updates lowLimit so that: - * (srcEnd - base) - lowLimit == maxDist + loadedDictEnd - * - * It ensures index is valid as long as index >= lowLimit. - * This must be called before a block compression call. - * - * loadedDictEnd is only defined if a dictionary is in use for current compression. - * As the name implies, loadedDictEnd represents the index at end of dictionary. - * The value lies within context's referential, it can be directly compared to blockEndIdx. - * - * If loadedDictEndPtr is NULL, no dictionary is in use, and we use loadedDictEnd == 0. - * If loadedDictEndPtr is not NULL, we set it to zero after updating lowLimit. - * This is because dictionaries are allowed to be referenced fully - * as long as the last byte of the dictionary is in the window. - * Once input has progressed beyond window size, dictionary cannot be referenced anymore. - * - * In normal dict mode, the dictionary lies between lowLimit and dictLimit. - * In dictMatchState mode, lowLimit and dictLimit are the same, - * and the dictionary is below them. - * forceWindow and dictMatchState are therefore incompatible. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_window_enforceMaxDist( - ZSTD_window_t* window, - void* blockEnd, - uint maxDist, - uint* loadedDictEndPtr, - ZSTD_MatchState_t** dictMatchStatePtr - ) + /* Similar to ZSTD_window_enforceMaxDist(), + * but only invalidates dictionary + * when input progresses beyond window size. + * assumption : loadedDictEndPtr and dictMatchStatePtr are valid (non NULL) + * loadedDictEnd uses same referential as window->base + * maxDist is the window size */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_checkDictValidity( + ZSTD_window_t* window, + void* blockEnd, + uint maxDist, + uint* loadedDictEndPtr, + ZSTD_MatchState_t** dictMatchStatePtr + ) + { + assert(loadedDictEndPtr != null); + assert(dictMatchStatePtr != null); { uint blockEndIdx = (uint)((byte*)blockEnd - window->@base); - uint loadedDictEnd = loadedDictEndPtr != null ? *loadedDictEndPtr : 0; - if (blockEndIdx > maxDist + loadedDictEnd) - { - uint newLowLimit = blockEndIdx - maxDist; - if (window->lowLimit < newLowLimit) - window->lowLimit = newLowLimit; - if (window->dictLimit < window->lowLimit) - { - window->dictLimit = window->lowLimit; - } - - if (loadedDictEndPtr != null) - *loadedDictEndPtr = 0; - if (dictMatchStatePtr != null) - *dictMatchStatePtr = null; - } - } - - /* Similar to ZSTD_window_enforceMaxDist(), - * but only invalidates dictionary - * when input progresses beyond window size. - * assumption : loadedDictEndPtr and dictMatchStatePtr are valid (non NULL) - * loadedDictEnd uses same referential as window->base - * maxDist is the window size */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_checkDictValidity( - ZSTD_window_t* window, - void* blockEnd, - uint maxDist, - uint* loadedDictEndPtr, - ZSTD_MatchState_t** dictMatchStatePtr - ) - { - assert(loadedDictEndPtr != null); - assert(dictMatchStatePtr != null); + uint loadedDictEnd = *loadedDictEndPtr; + assert(blockEndIdx >= loadedDictEnd); + if (blockEndIdx > loadedDictEnd + maxDist || loadedDictEnd != window->dictLimit) { - uint blockEndIdx = (uint)((byte*)blockEnd - window->@base); - uint loadedDictEnd = *loadedDictEndPtr; - assert(blockEndIdx >= loadedDictEnd); - if (blockEndIdx > loadedDictEnd + maxDist || loadedDictEnd != window->dictLimit) - { - *loadedDictEndPtr = 0; - *dictMatchStatePtr = null; - } + *loadedDictEndPtr = 0; + *dictMatchStatePtr = null; } } + } #if NET7_0_OR_GREATER private static ReadOnlySpan Span_stringToByte_20_00 => new byte[] { 32, 0 }; @@ -1313,153 +1313,152 @@ ref MemoryMarshal.GetReference(Span_stringToByte_20_00) ); #else - private static readonly byte* stringToByte_20_00 = GetArrayPointer(new byte[] { 32, 0 }); + private static readonly byte* stringToByte_20_00 = GetArrayPointer(new byte[] { 32, 0 }); #endif - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_window_init(ZSTD_window_t* window) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_window_init(ZSTD_window_t* window) + { + *window = new ZSTD_window_t { - *window = new ZSTD_window_t - { - @base = stringToByte_20_00, - dictBase = stringToByte_20_00, - dictLimit = 2, - lowLimit = 2, - nextSrc = stringToByte_20_00 + 2, - nbOverflowCorrections = 0, - }; - } + @base = stringToByte_20_00, + dictBase = stringToByte_20_00, + dictLimit = 2, + lowLimit = 2, + nextSrc = stringToByte_20_00 + 2, + nbOverflowCorrections = 0, + }; + } - /** - * ZSTD_window_update(): - * Updates the window by appending [src, src + srcSize) to the window. - * If it is not contiguous, the current prefix becomes the extDict, and we - * forget about the extDict. Handles overlap of the prefix and extDict. - * Returns non-zero if the segment is contiguous. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_window_update( - ZSTD_window_t* window, - void* src, - nuint srcSize, - int forceNonContiguous - ) + /** + * ZSTD_window_update(): + * Updates the window by appending [src, src + srcSize) to the window. + * If it is not contiguous, the current prefix becomes the extDict, and we + * forget about the extDict. Handles overlap of the prefix and extDict. + * Returns non-zero if the segment is contiguous. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_window_update( + ZSTD_window_t* window, + void* src, + nuint srcSize, + int forceNonContiguous + ) + { + byte* ip = (byte*)src; + uint contiguous = 1; + if (srcSize == 0) + return contiguous; + assert(window->@base != null); + assert(window->dictBase != null); + if (src != window->nextSrc || forceNonContiguous != 0) { - byte* ip = (byte*)src; - uint contiguous = 1; - if (srcSize == 0) - return contiguous; - assert(window->@base != null); - assert(window->dictBase != null); - if (src != window->nextSrc || forceNonContiguous != 0) - { - /* not contiguous */ - nuint distanceFromBase = (nuint)(window->nextSrc - window->@base); + /* not contiguous */ + nuint distanceFromBase = (nuint)(window->nextSrc - window->@base); + window->lowLimit = window->dictLimit; + assert(distanceFromBase == (uint)distanceFromBase); + window->dictLimit = (uint)distanceFromBase; + window->dictBase = window->@base; + window->@base = ip - distanceFromBase; + if (window->dictLimit - window->lowLimit < 8) window->lowLimit = window->dictLimit; - assert(distanceFromBase == (uint)distanceFromBase); - window->dictLimit = (uint)distanceFromBase; - window->dictBase = window->@base; - window->@base = ip - distanceFromBase; - if (window->dictLimit - window->lowLimit < 8) - window->lowLimit = window->dictLimit; - contiguous = 0; - } - - window->nextSrc = ip + srcSize; - if ( - ip + srcSize > window->dictBase + window->lowLimit - && ip < window->dictBase + window->dictLimit - ) - { - nuint highInputIdx = (nuint)(ip + srcSize - window->dictBase); - uint lowLimitMax = - highInputIdx > window->dictLimit ? window->dictLimit : (uint)highInputIdx; - assert(highInputIdx < 0xffffffff); - window->lowLimit = lowLimitMax; - } - - return contiguous; + contiguous = 0; } - /** - * Returns the lowest allowed match index. It may either be in the ext-dict or the prefix. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_getLowestMatchIndex( - ZSTD_MatchState_t* ms, - uint curr, - uint windowLog + window->nextSrc = ip + srcSize; + if ( + ip + srcSize > window->dictBase + window->lowLimit + && ip < window->dictBase + window->dictLimit ) { - uint maxDistance = 1U << (int)windowLog; - uint lowestValid = ms->window.lowLimit; - uint withinWindow = curr - lowestValid > maxDistance ? curr - maxDistance : lowestValid; - uint isDictionary = ms->loadedDictEnd != 0 ? 1U : 0U; - /* When using a dictionary the entire dictionary is valid if a single byte of the dictionary - * is within the window. We invalidate the dictionary (and set loadedDictEnd to 0) when it isn't - * valid for the entire block. So this check is sufficient to find the lowest valid match index. - */ - uint matchLowest = isDictionary != 0 ? lowestValid : withinWindow; - return matchLowest; + nuint highInputIdx = (nuint)(ip + srcSize - window->dictBase); + uint lowLimitMax = + highInputIdx > window->dictLimit ? window->dictLimit : (uint)highInputIdx; + assert(highInputIdx < 0xffffffff); + window->lowLimit = lowLimitMax; } - /** - * Returns the lowest allowed match index in the prefix. + return contiguous; + } + + /** + * Returns the lowest allowed match index. It may either be in the ext-dict or the prefix. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_getLowestMatchIndex( + ZSTD_MatchState_t* ms, + uint curr, + uint windowLog + ) + { + uint maxDistance = 1U << (int)windowLog; + uint lowestValid = ms->window.lowLimit; + uint withinWindow = curr - lowestValid > maxDistance ? curr - maxDistance : lowestValid; + uint isDictionary = ms->loadedDictEnd != 0 ? 1U : 0U; + /* When using a dictionary the entire dictionary is valid if a single byte of the dictionary + * is within the window. We invalidate the dictionary (and set loadedDictEnd to 0) when it isn't + * valid for the entire block. So this check is sufficient to find the lowest valid match index. */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_getLowestPrefixIndex( - ZSTD_MatchState_t* ms, - uint curr, - uint windowLog - ) - { - uint maxDistance = 1U << (int)windowLog; - uint lowestValid = ms->window.dictLimit; - uint withinWindow = curr - lowestValid > maxDistance ? curr - maxDistance : lowestValid; - uint isDictionary = ms->loadedDictEnd != 0 ? 1U : 0U; - /* When computing the lowest prefix index we need to take the dictionary into account to handle - * the edge case where the dictionary and the source are contiguous in memory. - */ - uint matchLowest = isDictionary != 0 ? lowestValid : withinWindow; - return matchLowest; - } + uint matchLowest = isDictionary != 0 ? lowestValid : withinWindow; + return matchLowest; + } - /* index_safety_check: - * intentional underflow : ensure repIndex isn't overlapping dict + prefix - * @return 1 if values are not overlapping, - * 0 otherwise */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static int ZSTD_index_overlap_check(uint prefixLowestIndex, uint repIndex) - { - return prefixLowestIndex - 1 - repIndex >= 3 ? 1 : 0; - } + /** + * Returns the lowest allowed match index in the prefix. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_getLowestPrefixIndex( + ZSTD_MatchState_t* ms, + uint curr, + uint windowLog + ) + { + uint maxDistance = 1U << (int)windowLog; + uint lowestValid = ms->window.dictLimit; + uint withinWindow = curr - lowestValid > maxDistance ? curr - maxDistance : lowestValid; + uint isDictionary = ms->loadedDictEnd != 0 ? 1U : 0U; + /* When computing the lowest prefix index we need to take the dictionary into account to handle + * the edge case where the dictionary and the source are contiguous in memory. + */ + uint matchLowest = isDictionary != 0 ? lowestValid : withinWindow; + return matchLowest; + } - /* Helper function for ZSTD_fillHashTable and ZSTD_fillDoubleHashTable. - * Unpacks hashAndTag into (hash, tag), then packs (index, tag) into hashTable[hash]. */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_writeTaggedIndex(uint* hashTable, nuint hashAndTag, uint index) - { - nuint hash = hashAndTag >> 8; - uint tag = (uint)(hashAndTag & (1U << 8) - 1); - assert(index >> 32 - 8 == 0); - hashTable[hash] = index << 8 | tag; - } + /* index_safety_check: + * intentional underflow : ensure repIndex isn't overlapping dict + prefix + * @return 1 if values are not overlapping, + * 0 otherwise */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int ZSTD_index_overlap_check(uint prefixLowestIndex, uint repIndex) + { + return prefixLowestIndex - 1 - repIndex >= 3 ? 1 : 0; + } - /* Helper function for short cache matchfinders. - * Unpacks tag1 and tag2 from lower bits of packedTag1 and packedTag2, then checks if the tags match. */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static int ZSTD_comparePackedTags(nuint packedTag1, nuint packedTag2) - { - uint tag1 = (uint)(packedTag1 & (1U << 8) - 1); - uint tag2 = (uint)(packedTag2 & (1U << 8) - 1); - return tag1 == tag2 ? 1 : 0; - } + /* Helper function for ZSTD_fillHashTable and ZSTD_fillDoubleHashTable. + * Unpacks hashAndTag into (hash, tag), then packs (index, tag) into hashTable[hash]. */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_writeTaggedIndex(uint* hashTable, nuint hashAndTag, uint index) + { + nuint hash = hashAndTag >> 8; + uint tag = (uint)(hashAndTag & (1U << 8) - 1); + assert(index >> 32 - 8 == 0); + hashTable[hash] = index << 8 | tag; + } - /* Returns 1 if an external sequence producer is registered, otherwise returns 0. */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static int ZSTD_hasExtSeqProd(ZSTD_CCtx_params_s* @params) - { - return @params->extSeqProdFunc != null ? 1 : 0; - } + /* Helper function for short cache matchfinders. + * Unpacks tag1 and tag2 from lower bits of packedTag1 and packedTag2, then checks if the tags match. */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int ZSTD_comparePackedTags(nuint packedTag1, nuint packedTag2) + { + uint tag1 = (uint)(packedTag1 & (1U << 8) - 1); + uint tag2 = (uint)(packedTag2 & (1U << 8) - 1); + return tag1 == tag2 ? 1 : 0; + } + + /* Returns 1 if an external sequence producer is registered, otherwise returns 0. */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int ZSTD_hasExtSeqProd(ZSTD_CCtx_params_s* @params) + { + return @params->extSeqProdFunc != null ? 1 : 0; } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressLiterals.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressLiterals.cs index 803f67493..06fe5a4f7 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressLiterals.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressLiterals.cs @@ -1,219 +1,192 @@ -using static ZstdSharp.UnsafeHelper; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods + /* ************************************************************** + * Literals compression - special cases + ****************************************************************/ + private static nuint ZSTD_noCompressLiterals( + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize + ) { - /* ************************************************************** - * Literals compression - special cases - ****************************************************************/ - private static nuint ZSTD_noCompressLiterals( - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize - ) + byte* ostart = (byte*)dst; + uint flSize = (uint)(1 + (srcSize > 31 ? 1 : 0) + (srcSize > 4095 ? 1 : 0)); + if (srcSize + flSize > dstCapacity) { - byte* ostart = (byte*)dst; - uint flSize = (uint)(1 + (srcSize > 31 ? 1 : 0) + (srcSize > 4095 ? 1 : 0)); - if (srcSize + flSize > dstCapacity) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - } - - switch (flSize) - { - case 1: - ostart[0] = (byte)((uint)SymbolEncodingType_e.set_basic + (srcSize << 3)); - break; - case 2: - MEM_writeLE16( - ostart, - (ushort)((uint)SymbolEncodingType_e.set_basic + (1 << 2) + (srcSize << 4)) - ); - break; - case 3: - MEM_writeLE32( - ostart, - (uint)((uint)SymbolEncodingType_e.set_basic + (3 << 2) + (srcSize << 4)) - ); - break; - default: - assert(0 != 0); - break; - } - - memcpy(ostart + flSize, src, (uint)srcSize); - return srcSize + flSize; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); } - private static int allBytesIdentical(void* src, nuint srcSize) + switch (flSize) { - assert(srcSize >= 1); - assert(src != null); - { - byte b = ((byte*)src)[0]; - nuint p; - for (p = 1; p < srcSize; p++) - { - if (((byte*)src)[p] != b) - return 0; - } - - return 1; - } + case 1: + ostart[0] = (byte)((uint)SymbolEncodingType_e.set_basic + (srcSize << 3)); + break; + case 2: + MEM_writeLE16( + ostart, + (ushort)((uint)SymbolEncodingType_e.set_basic + (1 << 2) + (srcSize << 4)) + ); + break; + case 3: + MEM_writeLE32( + ostart, + (uint)((uint)SymbolEncodingType_e.set_basic + (3 << 2) + (srcSize << 4)) + ); + break; + default: + assert(0 != 0); + break; } - /* ZSTD_compressRleLiteralsBlock() : - * Conditions : - * - All bytes in @src are identical - * - dstCapacity >= 4 */ - private static nuint ZSTD_compressRleLiteralsBlock( - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize - ) + memcpy(ostart + flSize, src, (uint)srcSize); + return srcSize + flSize; + } + + private static int allBytesIdentical(void* src, nuint srcSize) + { + assert(srcSize >= 1); + assert(src != null); { - byte* ostart = (byte*)dst; - uint flSize = (uint)(1 + (srcSize > 31 ? 1 : 0) + (srcSize > 4095 ? 1 : 0)); - assert(dstCapacity >= 4); - assert(allBytesIdentical(src, srcSize) != 0); - switch (flSize) + byte b = ((byte*)src)[0]; + nuint p; + for (p = 1; p < srcSize; p++) { - case 1: - ostart[0] = (byte)((uint)SymbolEncodingType_e.set_rle + (srcSize << 3)); - break; - case 2: - MEM_writeLE16( - ostart, - (ushort)((uint)SymbolEncodingType_e.set_rle + (1 << 2) + (srcSize << 4)) - ); - break; - case 3: - MEM_writeLE32( - ostart, - (uint)((uint)SymbolEncodingType_e.set_rle + (3 << 2) + (srcSize << 4)) - ); - break; - default: - assert(0 != 0); - break; + if (((byte*)src)[p] != b) + return 0; } - ostart[flSize] = *(byte*)src; - return flSize + 1; + return 1; } + } - /* ZSTD_minLiteralsToCompress() : - * returns minimal amount of literals - * for literal compression to even be attempted. - * Minimum is made tighter as compression strategy increases. - */ - private static nuint ZSTD_minLiteralsToCompress( - ZSTD_strategy strategy, - HUF_repeat huf_repeat - ) + /* ZSTD_compressRleLiteralsBlock() : + * Conditions : + * - All bytes in @src are identical + * - dstCapacity >= 4 */ + private static nuint ZSTD_compressRleLiteralsBlock( + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize + ) + { + byte* ostart = (byte*)dst; + uint flSize = (uint)(1 + (srcSize > 31 ? 1 : 0) + (srcSize > 4095 ? 1 : 0)); + assert(dstCapacity >= 4); + assert(allBytesIdentical(src, srcSize) != 0); + switch (flSize) { - assert((int)strategy >= 0); - assert((int)strategy <= 9); - { - int shift = 9 - (int)strategy < 3 ? 9 - (int)strategy : 3; - nuint mintc = huf_repeat == HUF_repeat.HUF_repeat_valid ? 6 : (nuint)8 << shift; - return mintc; - } + case 1: + ostart[0] = (byte)((uint)SymbolEncodingType_e.set_rle + (srcSize << 3)); + break; + case 2: + MEM_writeLE16( + ostart, + (ushort)((uint)SymbolEncodingType_e.set_rle + (1 << 2) + (srcSize << 4)) + ); + break; + case 3: + MEM_writeLE32( + ostart, + (uint)((uint)SymbolEncodingType_e.set_rle + (3 << 2) + (srcSize << 4)) + ); + break; + default: + assert(0 != 0); + break; } - /* ZSTD_compressLiterals(): - * @entropyWorkspace: must be aligned on 4-bytes boundaries - * @entropyWorkspaceSize : must be >= HUF_WORKSPACE_SIZE - * @suspectUncompressible: sampling checks, to potentially skip huffman coding - */ - private static nuint ZSTD_compressLiterals( - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize, - void* entropyWorkspace, - nuint entropyWorkspaceSize, - ZSTD_hufCTables_t* prevHuf, - ZSTD_hufCTables_t* nextHuf, - ZSTD_strategy strategy, - int disableLiteralCompression, - int suspectUncompressible, - int bmi2 - ) + ostart[flSize] = *(byte*)src; + return flSize + 1; + } + + /* ZSTD_minLiteralsToCompress() : + * returns minimal amount of literals + * for literal compression to even be attempted. + * Minimum is made tighter as compression strategy increases. + */ + private static nuint ZSTD_minLiteralsToCompress( + ZSTD_strategy strategy, + HUF_repeat huf_repeat + ) + { + assert((int)strategy >= 0); + assert((int)strategy <= 9); { - nuint lhSize = (nuint)( - 3 + (srcSize >= 1 * (1 << 10) ? 1 : 0) + (srcSize >= 16 * (1 << 10) ? 1 : 0) - ); - byte* ostart = (byte*)dst; - uint singleStream = srcSize < 256 ? 1U : 0U; - SymbolEncodingType_e hType = SymbolEncodingType_e.set_compressed; - nuint cLitSize; - memcpy(nextHuf, prevHuf, (uint)sizeof(ZSTD_hufCTables_t)); - if (disableLiteralCompression != 0) - return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); - if (srcSize < ZSTD_minLiteralsToCompress(strategy, prevHuf->repeatMode)) - return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); - if (dstCapacity < lhSize + 1) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - } + int shift = 9 - (int)strategy < 3 ? 9 - (int)strategy : 3; + nuint mintc = huf_repeat == HUF_repeat.HUF_repeat_valid ? 6 : (nuint)8 << shift; + return mintc; + } + } - { - HUF_repeat repeat = prevHuf->repeatMode; - int flags = - 0 - | (bmi2 != 0 ? (int)HUF_flags_e.HUF_flags_bmi2 : 0) - | ( - strategy < ZSTD_strategy.ZSTD_lazy && srcSize <= 1024 - ? (int)HUF_flags_e.HUF_flags_preferRepeat - : 0 - ) - | ( - strategy >= ZSTD_strategy.ZSTD_btultra - ? (int)HUF_flags_e.HUF_flags_optimalDepth - : 0 - ) - | ( - suspectUncompressible != 0 - ? (int)HUF_flags_e.HUF_flags_suspectUncompressible - : 0 - ); - void* huf_compress; - if (repeat == HUF_repeat.HUF_repeat_valid && lhSize == 3) - singleStream = 1; - huf_compress = - singleStream != 0 - ? (delegate* managed< - void*, - nuint, - void*, - nuint, - uint, - uint, - void*, - nuint, - nuint*, - HUF_repeat*, - int, - nuint>)(&HUF_compress1X_repeat) - : (delegate* managed< - void*, - nuint, - void*, - nuint, - uint, - uint, - void*, - nuint, - nuint*, - HUF_repeat*, - int, - nuint>)(&HUF_compress4X_repeat); - cLitSize = ( - (delegate* managed< + /* ZSTD_compressLiterals(): + * @entropyWorkspace: must be aligned on 4-bytes boundaries + * @entropyWorkspaceSize : must be >= HUF_WORKSPACE_SIZE + * @suspectUncompressible: sampling checks, to potentially skip huffman coding + */ + private static nuint ZSTD_compressLiterals( + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + void* entropyWorkspace, + nuint entropyWorkspaceSize, + ZSTD_hufCTables_t* prevHuf, + ZSTD_hufCTables_t* nextHuf, + ZSTD_strategy strategy, + int disableLiteralCompression, + int suspectUncompressible, + int bmi2 + ) + { + nuint lhSize = (nuint)( + 3 + (srcSize >= 1 * (1 << 10) ? 1 : 0) + (srcSize >= 16 * (1 << 10) ? 1 : 0) + ); + byte* ostart = (byte*)dst; + uint singleStream = srcSize < 256 ? 1U : 0U; + SymbolEncodingType_e hType = SymbolEncodingType_e.set_compressed; + nuint cLitSize; + memcpy(nextHuf, prevHuf, (uint)sizeof(ZSTD_hufCTables_t)); + if (disableLiteralCompression != 0) + return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); + if (srcSize < ZSTD_minLiteralsToCompress(strategy, prevHuf->repeatMode)) + return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); + if (dstCapacity < lhSize + 1) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + { + HUF_repeat repeat = prevHuf->repeatMode; + int flags = + 0 + | (bmi2 != 0 ? (int)HUF_flags_e.HUF_flags_bmi2 : 0) + | ( + strategy < ZSTD_strategy.ZSTD_lazy && srcSize <= 1024 + ? (int)HUF_flags_e.HUF_flags_preferRepeat + : 0 + ) + | ( + strategy >= ZSTD_strategy.ZSTD_btultra + ? (int)HUF_flags_e.HUF_flags_optimalDepth + : 0 + ) + | ( + suspectUncompressible != 0 + ? (int)HUF_flags_e.HUF_flags_suspectUncompressible + : 0 + ); + void* huf_compress; + if (repeat == HUF_repeat.HUF_repeat_valid && lhSize == 3) + singleStream = 1; + huf_compress = + singleStream != 0 + ? (delegate* managed< void*, nuint, void*, @@ -225,98 +198,124 @@ int bmi2 nuint*, HUF_repeat*, int, - nuint>)huf_compress - )( - ostart + lhSize, - dstCapacity - lhSize, - src, - srcSize, - 255, - 11, - entropyWorkspace, - entropyWorkspaceSize, - &nextHuf->CTable.e0, - &repeat, - flags - ); - if (repeat != HUF_repeat.HUF_repeat_none) - { - hType = SymbolEncodingType_e.set_repeat; - } - } - + nuint>)(&HUF_compress1X_repeat) + : (delegate* managed< + void*, + nuint, + void*, + nuint, + uint, + uint, + void*, + nuint, + nuint*, + HUF_repeat*, + int, + nuint>)(&HUF_compress4X_repeat); + cLitSize = ( + (delegate* managed< + void*, + nuint, + void*, + nuint, + uint, + uint, + void*, + nuint, + nuint*, + HUF_repeat*, + int, + nuint>)huf_compress + )( + ostart + lhSize, + dstCapacity - lhSize, + src, + srcSize, + 255, + 11, + entropyWorkspace, + entropyWorkspaceSize, + &nextHuf->CTable.e0, + &repeat, + flags + ); + if (repeat != HUF_repeat.HUF_repeat_none) { - nuint minGain = ZSTD_minGain(srcSize, strategy); - if (cLitSize == 0 || cLitSize >= srcSize - minGain || ERR_isError(cLitSize)) - { - memcpy(nextHuf, prevHuf, (uint)sizeof(ZSTD_hufCTables_t)); - return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); - } + hType = SymbolEncodingType_e.set_repeat; } + } - if (cLitSize == 1) + { + nuint minGain = ZSTD_minGain(srcSize, strategy); + if (cLitSize == 0 || cLitSize >= srcSize - minGain || ERR_isError(cLitSize)) { - if (srcSize >= 8 || allBytesIdentical(src, srcSize) != 0) - { - memcpy(nextHuf, prevHuf, (uint)sizeof(ZSTD_hufCTables_t)); - return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize); - } + memcpy(nextHuf, prevHuf, (uint)sizeof(ZSTD_hufCTables_t)); + return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); } + } - if (hType == SymbolEncodingType_e.set_compressed) + if (cLitSize == 1) + { + if (srcSize >= 8 || allBytesIdentical(src, srcSize) != 0) { - nextHuf->repeatMode = HUF_repeat.HUF_repeat_check; + memcpy(nextHuf, prevHuf, (uint)sizeof(ZSTD_hufCTables_t)); + return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize); } + } - switch (lhSize) - { - case 3: + if (hType == SymbolEncodingType_e.set_compressed) + { + nextHuf->repeatMode = HUF_repeat.HUF_repeat_check; + } + + switch (lhSize) + { + case 3: #if DEBUG - if (singleStream == 0) - assert(srcSize >= 6); + if (singleStream == 0) + assert(srcSize >= 6); #endif - { - uint lhc = - (uint)hType - + ((singleStream == 0 ? 1U : 0U) << 2) - + ((uint)srcSize << 4) - + ((uint)cLitSize << 14); - MEM_writeLE24(ostart, lhc); - break; - } - - case 4: - assert(srcSize >= 6); + { + uint lhc = + (uint)hType + + ((singleStream == 0 ? 1U : 0U) << 2) + + ((uint)srcSize << 4) + + ((uint)cLitSize << 14); + MEM_writeLE24(ostart, lhc); + break; + } - { - uint lhc = - (uint)(hType + (2 << 2)) - + ((uint)srcSize << 4) - + ((uint)cLitSize << 18); - MEM_writeLE32(ostart, lhc); - break; - } + case 4: + assert(srcSize >= 6); - case 5: - assert(srcSize >= 6); + { + uint lhc = + (uint)(hType + (2 << 2)) + + ((uint)srcSize << 4) + + ((uint)cLitSize << 18); + MEM_writeLE32(ostart, lhc); + break; + } - { - uint lhc = - (uint)(hType + (3 << 2)) - + ((uint)srcSize << 4) - + ((uint)cLitSize << 22); - MEM_writeLE32(ostart, lhc); - ostart[4] = (byte)(cLitSize >> 10); - break; - } + case 5: + assert(srcSize >= 6); - default: - assert(0 != 0); - break; + { + uint lhc = + (uint)(hType + (3 << 2)) + + ((uint)srcSize << 4) + + ((uint)cLitSize << 22); + MEM_writeLE32(ostart, lhc); + ostart[4] = (byte)(cLitSize >> 10); + break; } - return lhSize + cLitSize; + default: + assert(0 != 0); + break; } + + return lhSize + cLitSize; } -} +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressSequences.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressSequences.cs index c336ff447..26650be8f 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressSequences.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressSequences.cs @@ -1,12 +1,11 @@ using System; -using System.Runtime.CompilerServices; using System.Runtime.InteropServices; -using static ZstdSharp.UnsafeHelper; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods - { #if NET7_0_OR_GREATER private static ReadOnlySpan Span_kInverseProbabilityLog256 => new uint[256] @@ -275,575 +274,608 @@ ref MemoryMarshal.GetReference(Span_kInverseProbabilityLog256) ); #else - private static readonly uint* kInverseProbabilityLog256 = GetArrayPointer( - new uint[256] + private static readonly uint* kInverseProbabilityLog256 = GetArrayPointer( + new uint[256] + { + 0, + 2048, + 1792, + 1642, + 1536, + 1453, + 1386, + 1329, + 1280, + 1236, + 1197, + 1162, + 1130, + 1100, + 1073, + 1047, + 1024, + 1001, + 980, + 960, + 941, + 923, + 906, + 889, + 874, + 859, + 844, + 830, + 817, + 804, + 791, + 779, + 768, + 756, + 745, + 734, + 724, + 714, + 704, + 694, + 685, + 676, + 667, + 658, + 650, + 642, + 633, + 626, + 618, + 610, + 603, + 595, + 588, + 581, + 574, + 567, + 561, + 554, + 548, + 542, + 535, + 529, + 523, + 517, + 512, + 506, + 500, + 495, + 489, + 484, + 478, + 473, + 468, + 463, + 458, + 453, + 448, + 443, + 438, + 434, + 429, + 424, + 420, + 415, + 411, + 407, + 402, + 398, + 394, + 390, + 386, + 382, + 377, + 373, + 370, + 366, + 362, + 358, + 354, + 350, + 347, + 343, + 339, + 336, + 332, + 329, + 325, + 322, + 318, + 315, + 311, + 308, + 305, + 302, + 298, + 295, + 292, + 289, + 286, + 282, + 279, + 276, + 273, + 270, + 267, + 264, + 261, + 258, + 256, + 253, + 250, + 247, + 244, + 241, + 239, + 236, + 233, + 230, + 228, + 225, + 222, + 220, + 217, + 215, + 212, + 209, + 207, + 204, + 202, + 199, + 197, + 194, + 192, + 190, + 187, + 185, + 182, + 180, + 178, + 175, + 173, + 171, + 168, + 166, + 164, + 162, + 159, + 157, + 155, + 153, + 151, + 149, + 146, + 144, + 142, + 140, + 138, + 136, + 134, + 132, + 130, + 128, + 126, + 123, + 121, + 119, + 117, + 115, + 114, + 112, + 110, + 108, + 106, + 104, + 102, + 100, + 98, + 96, + 94, + 93, + 91, + 89, + 87, + 85, + 83, + 82, + 80, + 78, + 76, + 74, + 73, + 71, + 69, + 67, + 66, + 64, + 62, + 61, + 59, + 57, + 55, + 54, + 52, + 50, + 49, + 47, + 46, + 44, + 42, + 41, + 39, + 37, + 36, + 34, + 33, + 31, + 30, + 28, + 26, + 25, + 23, + 22, + 20, + 19, + 17, + 16, + 14, + 13, + 11, + 10, + 8, + 7, + 5, + 4, + 2, + 1, + } + ); +#endif + + private static uint ZSTD_getFSEMaxSymbolValue(uint* ctable) + { + void* ptr = ctable; + ushort* u16ptr = (ushort*)ptr; + uint maxSymbolValue = MEM_read16(u16ptr + 1); + return maxSymbolValue; + } + + /** + * Returns true if we should use ncount=-1 else we should + * use ncount=1 for low probability symbols instead. + */ + private static uint ZSTD_useLowProbCount(nuint nbSeq) + { + return nbSeq >= 2048 ? 1U : 0U; + } + + /** + * Returns the cost in bytes of encoding the normalized count header. + * Returns an error if any of the helper functions return an error. + */ + private static nuint ZSTD_NCountCost(uint* count, uint max, nuint nbSeq, uint FSELog) + { + byte* wksp = stackalloc byte[512]; + short* norm = stackalloc short[53]; + uint tableLog = FSE_optimalTableLog(FSELog, nbSeq, max); + { + nuint err_code = FSE_normalizeCount( + norm, + tableLog, + count, + nbSeq, + max, + ZSTD_useLowProbCount(nbSeq) + ); + if (ERR_isError(err_code)) { - 0, - 2048, - 1792, - 1642, - 1536, - 1453, - 1386, - 1329, - 1280, - 1236, - 1197, - 1162, - 1130, - 1100, - 1073, - 1047, - 1024, - 1001, - 980, - 960, - 941, - 923, - 906, - 889, - 874, - 859, - 844, - 830, - 817, - 804, - 791, - 779, - 768, - 756, - 745, - 734, - 724, - 714, - 704, - 694, - 685, - 676, - 667, - 658, - 650, - 642, - 633, - 626, - 618, - 610, - 603, - 595, - 588, - 581, - 574, - 567, - 561, - 554, - 548, - 542, - 535, - 529, - 523, - 517, - 512, - 506, - 500, - 495, - 489, - 484, - 478, - 473, - 468, - 463, - 458, - 453, - 448, - 443, - 438, - 434, - 429, - 424, - 420, - 415, - 411, - 407, - 402, - 398, - 394, - 390, - 386, - 382, - 377, - 373, - 370, - 366, - 362, - 358, - 354, - 350, - 347, - 343, - 339, - 336, - 332, - 329, - 325, - 322, - 318, - 315, - 311, - 308, - 305, - 302, - 298, - 295, - 292, - 289, - 286, - 282, - 279, - 276, - 273, - 270, - 267, - 264, - 261, - 258, - 256, - 253, - 250, - 247, - 244, - 241, - 239, - 236, - 233, - 230, - 228, - 225, - 222, - 220, - 217, - 215, - 212, - 209, - 207, - 204, - 202, - 199, - 197, - 194, - 192, - 190, - 187, - 185, - 182, - 180, - 178, - 175, - 173, - 171, - 168, - 166, - 164, - 162, - 159, - 157, - 155, - 153, - 151, - 149, - 146, - 144, - 142, - 140, - 138, - 136, - 134, - 132, - 130, - 128, - 126, - 123, - 121, - 119, - 117, - 115, - 114, - 112, - 110, - 108, - 106, - 104, - 102, - 100, - 98, - 96, - 94, - 93, - 91, - 89, - 87, - 85, - 83, - 82, - 80, - 78, - 76, - 74, - 73, - 71, - 69, - 67, - 66, - 64, - 62, - 61, - 59, - 57, - 55, - 54, - 52, - 50, - 49, - 47, - 46, - 44, - 42, - 41, - 39, - 37, - 36, - 34, - 33, - 31, - 30, - 28, - 26, - 25, - 23, - 22, - 20, - 19, - 17, - 16, - 14, - 13, - 11, - 10, - 8, - 7, - 5, - 4, - 2, - 1, + return err_code; } - ); -#endif + } - private static uint ZSTD_getFSEMaxSymbolValue(uint* ctable) + return FSE_writeNCount(wksp, sizeof(byte) * 512, norm, max, tableLog); + } + + /** + * Returns the cost in bits of encoding the distribution described by count + * using the entropy bound. + */ + private static nuint ZSTD_entropyCost(uint* count, uint max, nuint total) + { + uint cost = 0; + uint s; + assert(total > 0); + for (s = 0; s <= max; ++s) { - void* ptr = ctable; - ushort* u16ptr = (ushort*)ptr; - uint maxSymbolValue = MEM_read16(u16ptr + 1); - return maxSymbolValue; + uint norm = (uint)(256 * count[s] / total); + if (count[s] != 0 && norm == 0) + norm = 1; + assert(count[s] < total); + cost += count[s] * kInverseProbabilityLog256[norm]; } - /** - * Returns true if we should use ncount=-1 else we should - * use ncount=1 for low probability symbols instead. - */ - private static uint ZSTD_useLowProbCount(nuint nbSeq) + return cost >> 8; + } + + /** + * Returns the cost in bits of encoding the distribution in count using ctable. + * Returns an error if ctable cannot represent all the symbols in count. + */ + private static nuint ZSTD_fseBitCost(uint* ctable, uint* count, uint max) + { + const uint kAccuracyLog = 8; + nuint cost = 0; + uint s; + FSE_CState_t cstate; + FSE_initCState(&cstate, ctable); + if (ZSTD_getFSEMaxSymbolValue(ctable) < max) { - return nbSeq >= 2048 ? 1U : 0U; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); } - /** - * Returns the cost in bytes of encoding the normalized count header. - * Returns an error if any of the helper functions return an error. - */ - private static nuint ZSTD_NCountCost(uint* count, uint max, nuint nbSeq, uint FSELog) + for (s = 0; s <= max; ++s) { - byte* wksp = stackalloc byte[512]; - short* norm = stackalloc short[53]; - uint tableLog = FSE_optimalTableLog(FSELog, nbSeq, max); + uint tableLog = cstate.stateLog; + uint badCost = tableLog + 1 << (int)kAccuracyLog; + uint bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog); + if (count[s] == 0) + continue; + if (bitCost >= badCost) { - nuint err_code = FSE_normalizeCount( - norm, - tableLog, - count, - nbSeq, - max, - ZSTD_useLowProbCount(nbSeq) - ); - if (ERR_isError(err_code)) - { - return err_code; - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); } - return FSE_writeNCount(wksp, sizeof(byte) * 512, norm, max, tableLog); + cost += (nuint)count[s] * bitCost; } - /** - * Returns the cost in bits of encoding the distribution described by count - * using the entropy bound. - */ - private static nuint ZSTD_entropyCost(uint* count, uint max, nuint total) - { - uint cost = 0; - uint s; - assert(total > 0); - for (s = 0; s <= max; ++s) - { - uint norm = (uint)(256 * count[s] / total); - if (count[s] != 0 && norm == 0) - norm = 1; - assert(count[s] < total); - cost += count[s] * kInverseProbabilityLog256[norm]; - } + return cost >> (int)kAccuracyLog; + } - return cost >> 8; + /** + * Returns the cost in bits of encoding the distribution in count using the + * table described by norm. The max symbol support by norm is assumed >= max. + * norm must be valid for every symbol with non-zero probability in count. + */ + private static nuint ZSTD_crossEntropyCost( + short* norm, + uint accuracyLog, + uint* count, + uint max + ) + { + uint shift = 8 - accuracyLog; + nuint cost = 0; + uint s; + assert(accuracyLog <= 8); + for (s = 0; s <= max; ++s) + { + uint normAcc = norm[s] != -1 ? (uint)norm[s] : 1; + uint norm256 = normAcc << (int)shift; + assert(norm256 > 0); + assert(norm256 < 256); + cost += count[s] * kInverseProbabilityLog256[norm256]; } - /** - * Returns the cost in bits of encoding the distribution in count using ctable. - * Returns an error if ctable cannot represent all the symbols in count. - */ - private static nuint ZSTD_fseBitCost(uint* ctable, uint* count, uint max) + return cost >> 8; + } + + private static SymbolEncodingType_e ZSTD_selectEncodingType( + FSE_repeat* repeatMode, + uint* count, + uint max, + nuint mostFrequent, + nuint nbSeq, + uint FSELog, + uint* prevCTable, + short* defaultNorm, + uint defaultNormLog, + ZSTD_DefaultPolicy_e isDefaultAllowed, + ZSTD_strategy strategy + ) + { + if (mostFrequent == nbSeq) { - const uint kAccuracyLog = 8; - nuint cost = 0; - uint s; - FSE_CState_t cstate; - FSE_initCState(&cstate, ctable); - if (ZSTD_getFSEMaxSymbolValue(ctable) < max) + *repeatMode = FSE_repeat.FSE_repeat_none; + if (isDefaultAllowed != default && nbSeq <= 2) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + return SymbolEncodingType_e.set_basic; } - for (s = 0; s <= max; ++s) + return SymbolEncodingType_e.set_rle; + } + + if (strategy < ZSTD_strategy.ZSTD_lazy) + { + if (isDefaultAllowed != default) { - uint tableLog = cstate.stateLog; - uint badCost = tableLog + 1 << (int)kAccuracyLog; - uint bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog); - if (count[s] == 0) - continue; - if (bitCost >= badCost) + const nuint staticFse_nbSeq_max = 1000; + nuint mult = (nuint)(10 - strategy); + const nuint baseLog = 3; + /* 28-36 for offset, 56-72 for lengths */ + nuint dynamicFse_nbSeq_min = + ((nuint)1 << (int)defaultNormLog) * mult >> (int)baseLog; + assert(defaultNormLog >= 5 && defaultNormLog <= 6); + assert(mult <= 9 && mult >= 7); + if (*repeatMode == FSE_repeat.FSE_repeat_valid && nbSeq < staticFse_nbSeq_max) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + return SymbolEncodingType_e.set_repeat; } - cost += (nuint)count[s] * bitCost; + if ( + nbSeq < dynamicFse_nbSeq_min + || mostFrequent < nbSeq >> (int)(defaultNormLog - 1) + ) + { + *repeatMode = FSE_repeat.FSE_repeat_none; + return SymbolEncodingType_e.set_basic; + } } - - return cost >> (int)kAccuracyLog; } - - /** - * Returns the cost in bits of encoding the distribution in count using the - * table described by norm. The max symbol support by norm is assumed >= max. - * norm must be valid for every symbol with non-zero probability in count. - */ - private static nuint ZSTD_crossEntropyCost( - short* norm, - uint accuracyLog, - uint* count, - uint max - ) + else { - uint shift = 8 - accuracyLog; - nuint cost = 0; - uint s; - assert(accuracyLog <= 8); - for (s = 0; s <= max; ++s) + nuint basicCost = + isDefaultAllowed != default + ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, count, max) + : unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + nuint repeatCost = + *repeatMode != FSE_repeat.FSE_repeat_none + ? ZSTD_fseBitCost(prevCTable, count, max) + : unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + nuint NCountCost = ZSTD_NCountCost(count, max, nbSeq, FSELog); + nuint compressedCost = (NCountCost << 3) + ZSTD_entropyCost(count, max, nbSeq); +#if DEBUG + if (isDefaultAllowed != default) { - uint normAcc = norm[s] != -1 ? (uint)norm[s] : 1; - uint norm256 = normAcc << (int)shift; - assert(norm256 > 0); - assert(norm256 < 256); - cost += count[s] * kInverseProbabilityLog256[norm256]; + assert(!ERR_isError(basicCost)); + assert( + !(*repeatMode == FSE_repeat.FSE_repeat_valid && ERR_isError(repeatCost)) + ); } +#endif - return cost >> 8; - } - - private static SymbolEncodingType_e ZSTD_selectEncodingType( - FSE_repeat* repeatMode, - uint* count, - uint max, - nuint mostFrequent, - nuint nbSeq, - uint FSELog, - uint* prevCTable, - short* defaultNorm, - uint defaultNormLog, - ZSTD_DefaultPolicy_e isDefaultAllowed, - ZSTD_strategy strategy - ) - { - if (mostFrequent == nbSeq) + assert(!ERR_isError(NCountCost)); + assert( + compressedCost < unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxCode)) + ); + if (basicCost <= repeatCost && basicCost <= compressedCost) { + assert(isDefaultAllowed != default); *repeatMode = FSE_repeat.FSE_repeat_none; - if (isDefaultAllowed != default && nbSeq <= 2) - { - return SymbolEncodingType_e.set_basic; - } - - return SymbolEncodingType_e.set_rle; + return SymbolEncodingType_e.set_basic; } - if (strategy < ZSTD_strategy.ZSTD_lazy) + if (repeatCost <= compressedCost) { - if (isDefaultAllowed != default) - { - const nuint staticFse_nbSeq_max = 1000; - nuint mult = (nuint)(10 - strategy); - const nuint baseLog = 3; - /* 28-36 for offset, 56-72 for lengths */ - nuint dynamicFse_nbSeq_min = - ((nuint)1 << (int)defaultNormLog) * mult >> (int)baseLog; - assert(defaultNormLog >= 5 && defaultNormLog <= 6); - assert(mult <= 9 && mult >= 7); - if (*repeatMode == FSE_repeat.FSE_repeat_valid && nbSeq < staticFse_nbSeq_max) - { - return SymbolEncodingType_e.set_repeat; - } - - if ( - nbSeq < dynamicFse_nbSeq_min - || mostFrequent < nbSeq >> (int)(defaultNormLog - 1) - ) - { - *repeatMode = FSE_repeat.FSE_repeat_none; - return SymbolEncodingType_e.set_basic; - } - } + assert(!ERR_isError(repeatCost)); + return SymbolEncodingType_e.set_repeat; } - else + + assert(compressedCost < basicCost && compressedCost < repeatCost); + } + + *repeatMode = FSE_repeat.FSE_repeat_check; + return SymbolEncodingType_e.set_compressed; + } + + private static nuint ZSTD_buildCTable( + void* dst, + nuint dstCapacity, + uint* nextCTable, + uint FSELog, + SymbolEncodingType_e type, + uint* count, + uint max, + byte* codeTable, + nuint nbSeq, + short* defaultNorm, + uint defaultNormLog, + uint defaultMax, + uint* prevCTable, + nuint prevCTableSize, + void* entropyWorkspace, + nuint entropyWorkspaceSize + ) + { + byte* op = (byte*)dst; + byte* oend = op + dstCapacity; + switch (type) + { + case SymbolEncodingType_e.set_rle: { - nuint basicCost = - isDefaultAllowed != default - ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, count, max) - : unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); - nuint repeatCost = - *repeatMode != FSE_repeat.FSE_repeat_none - ? ZSTD_fseBitCost(prevCTable, count, max) - : unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); - nuint NCountCost = ZSTD_NCountCost(count, max, nbSeq, FSELog); - nuint compressedCost = (NCountCost << 3) + ZSTD_entropyCost(count, max, nbSeq); -#if DEBUG - if (isDefaultAllowed != default) + nuint err_code = FSE_buildCTable_rle(nextCTable, (byte)max); + if (ERR_isError(err_code)) { - assert(!ERR_isError(basicCost)); - assert( - !(*repeatMode == FSE_repeat.FSE_repeat_valid && ERR_isError(repeatCost)) - ); + return err_code; } -#endif + } - assert(!ERR_isError(NCountCost)); - assert( - compressedCost < unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxCode)) - ); - if (basicCost <= repeatCost && basicCost <= compressedCost) + if (dstCapacity == 0) { - assert(isDefaultAllowed != default); - *repeatMode = FSE_repeat.FSE_repeat_none; - return SymbolEncodingType_e.set_basic; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); } - if (repeatCost <= compressedCost) + *op = codeTable[0]; + return 1; + case SymbolEncodingType_e.set_repeat: + memcpy(nextCTable, prevCTable, (uint)prevCTableSize); + return 0; + case SymbolEncodingType_e.set_basic: + { + /* note : could be pre-calculated */ + nuint err_code = FSE_buildCTable_wksp( + nextCTable, + defaultNorm, + defaultMax, + defaultNormLog, + entropyWorkspace, + entropyWorkspaceSize + ); + if (ERR_isError(err_code)) { - assert(!ERR_isError(repeatCost)); - return SymbolEncodingType_e.set_repeat; + return err_code; } - - assert(compressedCost < basicCost && compressedCost < repeatCost); } - *repeatMode = FSE_repeat.FSE_repeat_check; - return SymbolEncodingType_e.set_compressed; - } - - private static nuint ZSTD_buildCTable( - void* dst, - nuint dstCapacity, - uint* nextCTable, - uint FSELog, - SymbolEncodingType_e type, - uint* count, - uint max, - byte* codeTable, - nuint nbSeq, - short* defaultNorm, - uint defaultNormLog, - uint defaultMax, - uint* prevCTable, - nuint prevCTableSize, - void* entropyWorkspace, - nuint entropyWorkspaceSize - ) - { - byte* op = (byte*)dst; - byte* oend = op + dstCapacity; - switch (type) + return 0; + case SymbolEncodingType_e.set_compressed: { - case SymbolEncodingType_e.set_rle: - { - nuint err_code = FSE_buildCTable_rle(nextCTable, (byte)max); - if (ERR_isError(err_code)) - { - return err_code; - } - } + ZSTD_BuildCTableWksp* wksp = (ZSTD_BuildCTableWksp*)entropyWorkspace; + nuint nbSeq_1 = nbSeq; + uint tableLog = FSE_optimalTableLog(FSELog, nbSeq, max); + if (count[codeTable[nbSeq - 1]] > 1) + { + count[codeTable[nbSeq - 1]]--; + nbSeq_1--; + } - if (dstCapacity == 0) + assert(nbSeq_1 > 1); + assert(entropyWorkspaceSize >= (nuint)sizeof(ZSTD_BuildCTableWksp)); + { + nuint err_code = FSE_normalizeCount( + wksp->norm, + tableLog, + count, + nbSeq_1, + max, + ZSTD_useLowProbCount(nbSeq_1) + ); + if (ERR_isError(err_code)) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + return err_code; } + } - *op = codeTable[0]; - return 1; - case SymbolEncodingType_e.set_repeat: - memcpy(nextCTable, prevCTable, (uint)prevCTableSize); - return 0; - case SymbolEncodingType_e.set_basic: + assert(oend >= op); + { + /* overflow protected */ + nuint NCountSize = FSE_writeNCount( + op, + (nuint)(oend - op), + wksp->norm, + max, + tableLog + ); { - /* note : could be pre-calculated */ - nuint err_code = FSE_buildCTable_wksp( - nextCTable, - defaultNorm, - defaultMax, - defaultNormLog, - entropyWorkspace, - entropyWorkspaceSize - ); + nuint err_code = NCountSize; if (ERR_isError(err_code)) { return err_code; } } - return 0; - case SymbolEncodingType_e.set_compressed: - { - ZSTD_BuildCTableWksp* wksp = (ZSTD_BuildCTableWksp*)entropyWorkspace; - nuint nbSeq_1 = nbSeq; - uint tableLog = FSE_optimalTableLog(FSELog, nbSeq, max); - if (count[codeTable[nbSeq - 1]] > 1) - { - count[codeTable[nbSeq - 1]]--; - nbSeq_1--; - } - - assert(nbSeq_1 > 1); - assert(entropyWorkspaceSize >= (nuint)sizeof(ZSTD_BuildCTableWksp)); { - nuint err_code = FSE_normalizeCount( + nuint err_code = FSE_buildCTable_wksp( + nextCTable, wksp->norm, - tableLog, - count, - nbSeq_1, max, - ZSTD_useLowProbCount(nbSeq_1) + tableLog, + wksp->wksp, + sizeof(uint) * 285 ); if (ERR_isError(err_code)) { @@ -851,372 +883,338 @@ nuint entropyWorkspaceSize } } - assert(oend >= op); - { - /* overflow protected */ - nuint NCountSize = FSE_writeNCount( - op, - (nuint)(oend - op), - wksp->norm, - max, - tableLog - ); - { - nuint err_code = NCountSize; - if (ERR_isError(err_code)) - { - return err_code; - } - } - - { - nuint err_code = FSE_buildCTable_wksp( - nextCTable, - wksp->norm, - max, - tableLog, - wksp->wksp, - sizeof(uint) * 285 - ); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - return NCountSize; - } + return NCountSize; } - - default: - assert(0 != 0); - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); } + + default: + assert(0 != 0); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); } + } - private static nuint ZSTD_encodeSequences_body( - void* dst, - nuint dstCapacity, - uint* CTable_MatchLength, - byte* mlCodeTable, - uint* CTable_OffsetBits, - byte* ofCodeTable, - uint* CTable_LitLength, - byte* llCodeTable, - SeqDef_s* sequences, - nuint nbSeq, - int longOffsets - ) + private static nuint ZSTD_encodeSequences_body( + void* dst, + nuint dstCapacity, + uint* CTable_MatchLength, + byte* mlCodeTable, + uint* CTable_OffsetBits, + byte* ofCodeTable, + uint* CTable_LitLength, + byte* llCodeTable, + SeqDef_s* sequences, + nuint nbSeq, + int longOffsets + ) + { + BIT_CStream_t blockStream; + System.Runtime.CompilerServices.Unsafe.SkipInit(out blockStream); + FSE_CState_t stateMatchLength; + System.Runtime.CompilerServices.Unsafe.SkipInit(out stateMatchLength); + FSE_CState_t stateOffsetBits; + System.Runtime.CompilerServices.Unsafe.SkipInit(out stateOffsetBits); + FSE_CState_t stateLitLength; + System.Runtime.CompilerServices.Unsafe.SkipInit(out stateLitLength); + if (ERR_isError(BIT_initCStream(ref blockStream, dst, dstCapacity))) { - BIT_CStream_t blockStream; - System.Runtime.CompilerServices.Unsafe.SkipInit(out blockStream); - FSE_CState_t stateMatchLength; - System.Runtime.CompilerServices.Unsafe.SkipInit(out stateMatchLength); - FSE_CState_t stateOffsetBits; - System.Runtime.CompilerServices.Unsafe.SkipInit(out stateOffsetBits); - FSE_CState_t stateLitLength; - System.Runtime.CompilerServices.Unsafe.SkipInit(out stateLitLength); - if (ERR_isError(BIT_initCStream(ref blockStream, dst, dstCapacity))) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } - nuint blockStream_bitContainer = blockStream.bitContainer; - uint blockStream_bitPos = blockStream.bitPos; - sbyte* blockStream_ptr = blockStream.ptr; - sbyte* blockStream_endPtr = blockStream.endPtr; - FSE_initCState2(ref stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq - 1]); - FSE_initCState2(ref stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq - 1]); - FSE_initCState2(ref stateLitLength, CTable_LitLength, llCodeTable[nbSeq - 1]); - BIT_addBits( + nuint blockStream_bitContainer = blockStream.bitContainer; + uint blockStream_bitPos = blockStream.bitPos; + sbyte* blockStream_ptr = blockStream.ptr; + sbyte* blockStream_endPtr = blockStream.endPtr; + FSE_initCState2(ref stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq - 1]); + FSE_initCState2(ref stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq - 1]); + FSE_initCState2(ref stateLitLength, CTable_LitLength, llCodeTable[nbSeq - 1]); + BIT_addBits( + ref blockStream_bitContainer, + ref blockStream_bitPos, + sequences[nbSeq - 1].litLength, + LL_bits[llCodeTable[nbSeq - 1]] + ); + if (MEM_32bits) + BIT_flushBits( ref blockStream_bitContainer, ref blockStream_bitPos, - sequences[nbSeq - 1].litLength, - LL_bits[llCodeTable[nbSeq - 1]] + ref blockStream_ptr, + blockStream_endPtr ); - if (MEM_32bits) + BIT_addBits( + ref blockStream_bitContainer, + ref blockStream_bitPos, + sequences[nbSeq - 1].mlBase, + ML_bits[mlCodeTable[nbSeq - 1]] + ); + if (MEM_32bits) + BIT_flushBits( + ref blockStream_bitContainer, + ref blockStream_bitPos, + ref blockStream_ptr, + blockStream_endPtr + ); + if (longOffsets != 0) + { + uint ofBits = ofCodeTable[nbSeq - 1]; + uint extraBits = + ofBits + - ( + ofBits < (uint)(MEM_32bits ? 25 : 57) - 1 + ? ofBits + : (uint)(MEM_32bits ? 25 : 57) - 1 + ); + if (extraBits != 0) + { + BIT_addBits( + ref blockStream_bitContainer, + ref blockStream_bitPos, + sequences[nbSeq - 1].offBase, + extraBits + ); BIT_flushBits( ref blockStream_bitContainer, ref blockStream_bitPos, ref blockStream_ptr, blockStream_endPtr ); + } + BIT_addBits( ref blockStream_bitContainer, ref blockStream_bitPos, - sequences[nbSeq - 1].mlBase, - ML_bits[mlCodeTable[nbSeq - 1]] + sequences[nbSeq - 1].offBase >> (int)extraBits, + ofBits - extraBits ); - if (MEM_32bits) - BIT_flushBits( + } + else + { + BIT_addBits( + ref blockStream_bitContainer, + ref blockStream_bitPos, + sequences[nbSeq - 1].offBase, + ofCodeTable[nbSeq - 1] + ); + } + + BIT_flushBits( + ref blockStream_bitContainer, + ref blockStream_bitPos, + ref blockStream_ptr, + blockStream_endPtr + ); + { + nuint n; + for (n = nbSeq - 2; n < nbSeq; n--) + { + byte llCode = llCodeTable[n]; + byte ofCode = ofCodeTable[n]; + byte mlCode = mlCodeTable[n]; + uint llBits = LL_bits[llCode]; + uint ofBits = ofCode; + uint mlBits = ML_bits[mlCode]; + FSE_encodeSymbol( ref blockStream_bitContainer, ref blockStream_bitPos, - ref blockStream_ptr, - blockStream_endPtr + ref stateOffsetBits, + ofCode ); - if (longOffsets != 0) - { - uint ofBits = ofCodeTable[nbSeq - 1]; - uint extraBits = - ofBits - - ( - ofBits < (uint)(MEM_32bits ? 25 : 57) - 1 - ? ofBits - : (uint)(MEM_32bits ? 25 : 57) - 1 - ); - if (extraBits != 0) - { - BIT_addBits( + FSE_encodeSymbol( + ref blockStream_bitContainer, + ref blockStream_bitPos, + ref stateMatchLength, + mlCode + ); + if (MEM_32bits) + BIT_flushBits( ref blockStream_bitContainer, ref blockStream_bitPos, - sequences[nbSeq - 1].offBase, - extraBits + ref blockStream_ptr, + blockStream_endPtr ); + FSE_encodeSymbol( + ref blockStream_bitContainer, + ref blockStream_bitPos, + ref stateLitLength, + llCode + ); + if (MEM_32bits || ofBits + mlBits + llBits >= 64 - 7 - (9 + 9 + 8)) BIT_flushBits( ref blockStream_bitContainer, ref blockStream_bitPos, ref blockStream_ptr, blockStream_endPtr ); - } - BIT_addBits( ref blockStream_bitContainer, ref blockStream_bitPos, - sequences[nbSeq - 1].offBase >> (int)extraBits, - ofBits - extraBits + sequences[n].litLength, + llBits ); - } - else - { + if (MEM_32bits && llBits + mlBits > 24) + BIT_flushBits( + ref blockStream_bitContainer, + ref blockStream_bitPos, + ref blockStream_ptr, + blockStream_endPtr + ); BIT_addBits( ref blockStream_bitContainer, ref blockStream_bitPos, - sequences[nbSeq - 1].offBase, - ofCodeTable[nbSeq - 1] + sequences[n].mlBase, + mlBits ); - } - - BIT_flushBits( - ref blockStream_bitContainer, - ref blockStream_bitPos, - ref blockStream_ptr, - blockStream_endPtr - ); - { - nuint n; - for (n = nbSeq - 2; n < nbSeq; n--) - { - byte llCode = llCodeTable[n]; - byte ofCode = ofCodeTable[n]; - byte mlCode = mlCodeTable[n]; - uint llBits = LL_bits[llCode]; - uint ofBits = ofCode; - uint mlBits = ML_bits[mlCode]; - FSE_encodeSymbol( - ref blockStream_bitContainer, - ref blockStream_bitPos, - ref stateOffsetBits, - ofCode - ); - FSE_encodeSymbol( + if (MEM_32bits || ofBits + mlBits + llBits > 56) + BIT_flushBits( ref blockStream_bitContainer, ref blockStream_bitPos, - ref stateMatchLength, - mlCode + ref blockStream_ptr, + blockStream_endPtr ); - if (MEM_32bits) - BIT_flushBits( - ref blockStream_bitContainer, - ref blockStream_bitPos, - ref blockStream_ptr, - blockStream_endPtr + if (longOffsets != 0) + { + uint extraBits = + ofBits + - ( + ofBits < (uint)(MEM_32bits ? 25 : 57) - 1 + ? ofBits + : (uint)(MEM_32bits ? 25 : 57) - 1 ); - FSE_encodeSymbol( - ref blockStream_bitContainer, - ref blockStream_bitPos, - ref stateLitLength, - llCode - ); - if (MEM_32bits || ofBits + mlBits + llBits >= 64 - 7 - (9 + 9 + 8)) - BIT_flushBits( + if (extraBits != 0) + { + BIT_addBits( ref blockStream_bitContainer, ref blockStream_bitPos, - ref blockStream_ptr, - blockStream_endPtr + sequences[n].offBase, + extraBits ); - BIT_addBits( - ref blockStream_bitContainer, - ref blockStream_bitPos, - sequences[n].litLength, - llBits - ); - if (MEM_32bits && llBits + mlBits > 24) BIT_flushBits( ref blockStream_bitContainer, ref blockStream_bitPos, ref blockStream_ptr, blockStream_endPtr ); + } + BIT_addBits( ref blockStream_bitContainer, ref blockStream_bitPos, - sequences[n].mlBase, - mlBits + sequences[n].offBase >> (int)extraBits, + ofBits - extraBits ); - if (MEM_32bits || ofBits + mlBits + llBits > 56) - BIT_flushBits( - ref blockStream_bitContainer, - ref blockStream_bitPos, - ref blockStream_ptr, - blockStream_endPtr - ); - if (longOffsets != 0) - { - uint extraBits = - ofBits - - ( - ofBits < (uint)(MEM_32bits ? 25 : 57) - 1 - ? ofBits - : (uint)(MEM_32bits ? 25 : 57) - 1 - ); - if (extraBits != 0) - { - BIT_addBits( - ref blockStream_bitContainer, - ref blockStream_bitPos, - sequences[n].offBase, - extraBits - ); - BIT_flushBits( - ref blockStream_bitContainer, - ref blockStream_bitPos, - ref blockStream_ptr, - blockStream_endPtr - ); - } - - BIT_addBits( - ref blockStream_bitContainer, - ref blockStream_bitPos, - sequences[n].offBase >> (int)extraBits, - ofBits - extraBits - ); - } - else - { - BIT_addBits( - ref blockStream_bitContainer, - ref blockStream_bitPos, - sequences[n].offBase, - ofBits - ); - } - - BIT_flushBits( + } + else + { + BIT_addBits( ref blockStream_bitContainer, ref blockStream_bitPos, - ref blockStream_ptr, - blockStream_endPtr + sequences[n].offBase, + ofBits ); } - } - FSE_flushCState( - ref blockStream_bitContainer, - ref blockStream_bitPos, - ref blockStream_ptr, - blockStream_endPtr, - ref stateMatchLength - ); - FSE_flushCState( - ref blockStream_bitContainer, - ref blockStream_bitPos, - ref blockStream_ptr, - blockStream_endPtr, - ref stateOffsetBits - ); - FSE_flushCState( - ref blockStream_bitContainer, - ref blockStream_bitPos, - ref blockStream_ptr, - blockStream_endPtr, - ref stateLitLength - ); - { - nuint streamSize = BIT_closeCStream( + BIT_flushBits( ref blockStream_bitContainer, ref blockStream_bitPos, - blockStream_ptr, - blockStream_endPtr, - blockStream.startPtr + ref blockStream_ptr, + blockStream_endPtr ); - if (streamSize == 0) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - } - - return streamSize; } } - private static nuint ZSTD_encodeSequences_default( - void* dst, - nuint dstCapacity, - uint* CTable_MatchLength, - byte* mlCodeTable, - uint* CTable_OffsetBits, - byte* ofCodeTable, - uint* CTable_LitLength, - byte* llCodeTable, - SeqDef_s* sequences, - nuint nbSeq, - int longOffsets - ) + FSE_flushCState( + ref blockStream_bitContainer, + ref blockStream_bitPos, + ref blockStream_ptr, + blockStream_endPtr, + ref stateMatchLength + ); + FSE_flushCState( + ref blockStream_bitContainer, + ref blockStream_bitPos, + ref blockStream_ptr, + blockStream_endPtr, + ref stateOffsetBits + ); + FSE_flushCState( + ref blockStream_bitContainer, + ref blockStream_bitPos, + ref blockStream_ptr, + blockStream_endPtr, + ref stateLitLength + ); { - return ZSTD_encodeSequences_body( - dst, - dstCapacity, - CTable_MatchLength, - mlCodeTable, - CTable_OffsetBits, - ofCodeTable, - CTable_LitLength, - llCodeTable, - sequences, - nbSeq, - longOffsets + nuint streamSize = BIT_closeCStream( + ref blockStream_bitContainer, + ref blockStream_bitPos, + blockStream_ptr, + blockStream_endPtr, + blockStream.startPtr ); - } + if (streamSize == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } - private static nuint ZSTD_encodeSequences( - void* dst, - nuint dstCapacity, - uint* CTable_MatchLength, - byte* mlCodeTable, - uint* CTable_OffsetBits, - byte* ofCodeTable, - uint* CTable_LitLength, - byte* llCodeTable, - SeqDef_s* sequences, - nuint nbSeq, - int longOffsets, - int bmi2 - ) - { - return ZSTD_encodeSequences_default( - dst, - dstCapacity, - CTable_MatchLength, - mlCodeTable, - CTable_OffsetBits, - ofCodeTable, - CTable_LitLength, - llCodeTable, - sequences, - nbSeq, - longOffsets - ); + return streamSize; } } + + private static nuint ZSTD_encodeSequences_default( + void* dst, + nuint dstCapacity, + uint* CTable_MatchLength, + byte* mlCodeTable, + uint* CTable_OffsetBits, + byte* ofCodeTable, + uint* CTable_LitLength, + byte* llCodeTable, + SeqDef_s* sequences, + nuint nbSeq, + int longOffsets + ) + { + return ZSTD_encodeSequences_body( + dst, + dstCapacity, + CTable_MatchLength, + mlCodeTable, + CTable_OffsetBits, + ofCodeTable, + CTable_LitLength, + llCodeTable, + sequences, + nbSeq, + longOffsets + ); + } + + private static nuint ZSTD_encodeSequences( + void* dst, + nuint dstCapacity, + uint* CTable_MatchLength, + byte* mlCodeTable, + uint* CTable_OffsetBits, + byte* ofCodeTable, + uint* CTable_LitLength, + byte* llCodeTable, + SeqDef_s* sequences, + nuint nbSeq, + int longOffsets, + int bmi2 + ) + { + return ZSTD_encodeSequences_default( + dst, + dstCapacity, + CTable_MatchLength, + mlCodeTable, + CTable_OffsetBits, + ofCodeTable, + CTable_LitLength, + llCodeTable, + sequences, + nbSeq, + longOffsets + ); + } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressSuperblock.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressSuperblock.cs index 1f12375c5..bc5dcbdac 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressSuperblock.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressSuperblock.cs @@ -1,981 +1,980 @@ -using static ZstdSharp.UnsafeHelper; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods + /** ZSTD_compressSubBlock_literal() : + * Compresses literals section for a sub-block. + * When we have to write the Huffman table we will sometimes choose a header + * size larger than necessary. This is because we have to pick the header size + * before we know the table size + compressed size, so we have a bound on the + * table size. If we guessed incorrectly, we fall back to uncompressed literals. + * + * We write the header when writeEntropy=1 and set entropyWritten=1 when we succeeded + * in writing the header, otherwise it is set to 0. + * + * hufMetadata->hType has literals block type info. + * If it is set_basic, all sub-blocks literals section will be Raw_Literals_Block. + * If it is set_rle, all sub-blocks literals section will be RLE_Literals_Block. + * If it is set_compressed, first sub-block's literals section will be Compressed_Literals_Block + * If it is set_compressed, first sub-block's literals section will be Treeless_Literals_Block + * and the following sub-blocks' literals sections will be Treeless_Literals_Block. + * @return : compressed size of literals section of a sub-block + * Or 0 if unable to compress. + * Or error code */ + private static nuint ZSTD_compressSubBlock_literal( + nuint* hufTable, + ZSTD_hufCTablesMetadata_t* hufMetadata, + byte* literals, + nuint litSize, + void* dst, + nuint dstSize, + int bmi2, + int writeEntropy, + int* entropyWritten + ) { - /** ZSTD_compressSubBlock_literal() : - * Compresses literals section for a sub-block. - * When we have to write the Huffman table we will sometimes choose a header - * size larger than necessary. This is because we have to pick the header size - * before we know the table size + compressed size, so we have a bound on the - * table size. If we guessed incorrectly, we fall back to uncompressed literals. - * - * We write the header when writeEntropy=1 and set entropyWritten=1 when we succeeded - * in writing the header, otherwise it is set to 0. - * - * hufMetadata->hType has literals block type info. - * If it is set_basic, all sub-blocks literals section will be Raw_Literals_Block. - * If it is set_rle, all sub-blocks literals section will be RLE_Literals_Block. - * If it is set_compressed, first sub-block's literals section will be Compressed_Literals_Block - * If it is set_compressed, first sub-block's literals section will be Treeless_Literals_Block - * and the following sub-blocks' literals sections will be Treeless_Literals_Block. - * @return : compressed size of literals section of a sub-block - * Or 0 if unable to compress. - * Or error code */ - private static nuint ZSTD_compressSubBlock_literal( - nuint* hufTable, - ZSTD_hufCTablesMetadata_t* hufMetadata, - byte* literals, - nuint litSize, - void* dst, - nuint dstSize, - int bmi2, - int writeEntropy, - int* entropyWritten - ) + nuint header = (nuint)(writeEntropy != 0 ? 200 : 0); + nuint lhSize = (nuint)( + 3 + + (litSize >= 1 * (1 << 10) - header ? 1 : 0) + + (litSize >= 16 * (1 << 10) - header ? 1 : 0) + ); + byte* ostart = (byte*)dst; + byte* oend = ostart + dstSize; + byte* op = ostart + lhSize; + uint singleStream = lhSize == 3 ? 1U : 0U; + SymbolEncodingType_e hType = + writeEntropy != 0 ? hufMetadata->hType : SymbolEncodingType_e.set_repeat; + nuint cLitSize = 0; + *entropyWritten = 0; + if (litSize == 0 || hufMetadata->hType == SymbolEncodingType_e.set_basic) { - nuint header = (nuint)(writeEntropy != 0 ? 200 : 0); - nuint lhSize = (nuint)( - 3 - + (litSize >= 1 * (1 << 10) - header ? 1 : 0) - + (litSize >= 16 * (1 << 10) - header ? 1 : 0) - ); - byte* ostart = (byte*)dst; - byte* oend = ostart + dstSize; - byte* op = ostart + lhSize; - uint singleStream = lhSize == 3 ? 1U : 0U; - SymbolEncodingType_e hType = - writeEntropy != 0 ? hufMetadata->hType : SymbolEncodingType_e.set_repeat; - nuint cLitSize = 0; - *entropyWritten = 0; - if (litSize == 0 || hufMetadata->hType == SymbolEncodingType_e.set_basic) - { - return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); - } - else if (hufMetadata->hType == SymbolEncodingType_e.set_rle) - { - return ZSTD_compressRleLiteralsBlock(dst, dstSize, literals, litSize); - } - - assert(litSize > 0); - assert( - hufMetadata->hType == SymbolEncodingType_e.set_compressed - || hufMetadata->hType == SymbolEncodingType_e.set_repeat - ); - if (writeEntropy != 0 && hufMetadata->hType == SymbolEncodingType_e.set_compressed) - { - memcpy(op, hufMetadata->hufDesBuffer, (uint)hufMetadata->hufDesSize); - op += hufMetadata->hufDesSize; - cLitSize += hufMetadata->hufDesSize; - } - - { - int flags = bmi2 != 0 ? (int)HUF_flags_e.HUF_flags_bmi2 : 0; - nuint cSize = - singleStream != 0 - ? HUF_compress1X_usingCTable( - op, - (nuint)(oend - op), - literals, - litSize, - hufTable, - flags - ) - : HUF_compress4X_usingCTable( - op, - (nuint)(oend - op), - literals, - litSize, - hufTable, - flags - ); - op += cSize; - cLitSize += cSize; - if (cSize == 0 || ERR_isError(cSize)) - { - return 0; - } + return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); + } + else if (hufMetadata->hType == SymbolEncodingType_e.set_rle) + { + return ZSTD_compressRleLiteralsBlock(dst, dstSize, literals, litSize); + } - if (writeEntropy == 0 && cLitSize >= litSize) - { - return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); - } + assert(litSize > 0); + assert( + hufMetadata->hType == SymbolEncodingType_e.set_compressed + || hufMetadata->hType == SymbolEncodingType_e.set_repeat + ); + if (writeEntropy != 0 && hufMetadata->hType == SymbolEncodingType_e.set_compressed) + { + memcpy(op, hufMetadata->hufDesBuffer, (uint)hufMetadata->hufDesSize); + op += hufMetadata->hufDesSize; + cLitSize += hufMetadata->hufDesSize; + } - if ( - lhSize - < (nuint)( - 3 - + (cLitSize >= 1 * (1 << 10) ? 1 : 0) - + (cLitSize >= 16 * (1 << 10) ? 1 : 0) + { + int flags = bmi2 != 0 ? (int)HUF_flags_e.HUF_flags_bmi2 : 0; + nuint cSize = + singleStream != 0 + ? HUF_compress1X_usingCTable( + op, + (nuint)(oend - op), + literals, + litSize, + hufTable, + flags ) - ) - { - assert(cLitSize > litSize); - return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); - } + : HUF_compress4X_usingCTable( + op, + (nuint)(oend - op), + literals, + litSize, + hufTable, + flags + ); + op += cSize; + cLitSize += cSize; + if (cSize == 0 || ERR_isError(cSize)) + { + return 0; } - switch (lhSize) + if (writeEntropy == 0 && cLitSize >= litSize) { - case 3: - { - uint lhc = - (uint)hType - + ((singleStream == 0 ? 1U : 0U) << 2) - + ((uint)litSize << 4) - + ((uint)cLitSize << 14); - MEM_writeLE24(ostart, lhc); - break; - } - - case 4: - { - uint lhc = - (uint)(hType + (2 << 2)) + ((uint)litSize << 4) + ((uint)cLitSize << 18); - MEM_writeLE32(ostart, lhc); - break; - } - - case 5: - { - uint lhc = - (uint)(hType + (3 << 2)) + ((uint)litSize << 4) + ((uint)cLitSize << 22); - MEM_writeLE32(ostart, lhc); - ostart[4] = (byte)(cLitSize >> 10); - break; - } - - default: - assert(0 != 0); - break; + return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); } - *entropyWritten = 1; - return (nuint)(op - ostart); - } - - private static nuint ZSTD_seqDecompressedSize( - SeqStore_t* seqStore, - SeqDef_s* sequences, - nuint nbSeqs, - nuint litSize, - int lastSubBlock - ) - { - nuint matchLengthSum = 0; - nuint litLengthSum = 0; - nuint n; - for (n = 0; n < nbSeqs; n++) + if ( + lhSize + < (nuint)( + 3 + + (cLitSize >= 1 * (1 << 10) ? 1 : 0) + + (cLitSize >= 16 * (1 << 10) ? 1 : 0) + ) + ) { - ZSTD_SequenceLength seqLen = ZSTD_getSequenceLength(seqStore, sequences + n); - litLengthSum += seqLen.litLength; - matchLengthSum += seqLen.matchLength; + assert(cLitSize > litSize); + return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); } - - if (lastSubBlock == 0) - assert(litLengthSum == litSize); - else - assert(litLengthSum <= litSize); - return matchLengthSum + litSize; } - /** ZSTD_compressSubBlock_sequences() : - * Compresses sequences section for a sub-block. - * fseMetadata->llType, fseMetadata->ofType, and fseMetadata->mlType have - * symbol compression modes for the super-block. - * The first successfully compressed block will have these in its header. - * We set entropyWritten=1 when we succeed in compressing the sequences. - * The following sub-blocks will always have repeat mode. - * @return : compressed size of sequences section of a sub-block - * Or 0 if it is unable to compress - * Or error code. */ - private static nuint ZSTD_compressSubBlock_sequences( - ZSTD_fseCTables_t* fseTables, - ZSTD_fseCTablesMetadata_t* fseMetadata, - SeqDef_s* sequences, - nuint nbSeq, - byte* llCode, - byte* mlCode, - byte* ofCode, - ZSTD_CCtx_params_s* cctxParams, - void* dst, - nuint dstCapacity, - int bmi2, - int writeEntropy, - int* entropyWritten - ) + switch (lhSize) { - int longOffsets = cctxParams->cParams.windowLog > (uint)(MEM_32bits ? 25 : 57) ? 1 : 0; - byte* ostart = (byte*)dst; - byte* oend = ostart + dstCapacity; - byte* op = ostart; - byte* seqHead; - *entropyWritten = 0; - if (oend - op < 3 + 1) + case 3: { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + uint lhc = + (uint)hType + + ((singleStream == 0 ? 1U : 0U) << 2) + + ((uint)litSize << 4) + + ((uint)cLitSize << 14); + MEM_writeLE24(ostart, lhc); + break; } - if (nbSeq < 128) - *op++ = (byte)nbSeq; - else if (nbSeq < 0x7F00) + case 4: { - op[0] = (byte)((nbSeq >> 8) + 0x80); - op[1] = (byte)nbSeq; - op += 2; - } - else - { - op[0] = 0xFF; - MEM_writeLE16(op + 1, (ushort)(nbSeq - 0x7F00)); - op += 3; + uint lhc = + (uint)(hType + (2 << 2)) + ((uint)litSize << 4) + ((uint)cLitSize << 18); + MEM_writeLE32(ostart, lhc); + break; } - if (nbSeq == 0) + case 5: { - return (nuint)(op - ostart); + uint lhc = + (uint)(hType + (3 << 2)) + ((uint)litSize << 4) + ((uint)cLitSize << 22); + MEM_writeLE32(ostart, lhc); + ostart[4] = (byte)(cLitSize >> 10); + break; } - seqHead = op++; - if (writeEntropy != 0) - { - uint LLtype = (uint)fseMetadata->llType; - uint Offtype = (uint)fseMetadata->ofType; - uint MLtype = (uint)fseMetadata->mlType; - *seqHead = (byte)((LLtype << 6) + (Offtype << 4) + (MLtype << 2)); - memcpy(op, fseMetadata->fseTablesBuffer, (uint)fseMetadata->fseTablesSize); - op += fseMetadata->fseTablesSize; - } - else - { - uint repeat = (uint)SymbolEncodingType_e.set_repeat; - *seqHead = (byte)((repeat << 6) + (repeat << 4) + (repeat << 2)); - } + default: + assert(0 != 0); + break; + } - { - nuint bitstreamSize = ZSTD_encodeSequences( - op, - (nuint)(oend - op), - fseTables->matchlengthCTable, - mlCode, - fseTables->offcodeCTable, - ofCode, - fseTables->litlengthCTable, - llCode, - sequences, - nbSeq, - longOffsets, - bmi2 - ); - { - nuint err_code = bitstreamSize; - if (ERR_isError(err_code)) - { - return err_code; - } - } + *entropyWritten = 1; + return (nuint)(op - ostart); + } - op += bitstreamSize; - if ( - writeEntropy != 0 - && fseMetadata->lastCountSize != 0 - && fseMetadata->lastCountSize + bitstreamSize < 4 - ) - { - assert(fseMetadata->lastCountSize + bitstreamSize == 3); - return 0; - } - } + private static nuint ZSTD_seqDecompressedSize( + SeqStore_t* seqStore, + SeqDef_s* sequences, + nuint nbSeqs, + nuint litSize, + int lastSubBlock + ) + { + nuint matchLengthSum = 0; + nuint litLengthSum = 0; + nuint n; + for (n = 0; n < nbSeqs; n++) + { + ZSTD_SequenceLength seqLen = ZSTD_getSequenceLength(seqStore, sequences + n); + litLengthSum += seqLen.litLength; + matchLengthSum += seqLen.matchLength; + } - if (op - seqHead < 4) - { - return 0; - } + if (lastSubBlock == 0) + assert(litLengthSum == litSize); + else + assert(litLengthSum <= litSize); + return matchLengthSum + litSize; + } - *entropyWritten = 1; - return (nuint)(op - ostart); + /** ZSTD_compressSubBlock_sequences() : + * Compresses sequences section for a sub-block. + * fseMetadata->llType, fseMetadata->ofType, and fseMetadata->mlType have + * symbol compression modes for the super-block. + * The first successfully compressed block will have these in its header. + * We set entropyWritten=1 when we succeed in compressing the sequences. + * The following sub-blocks will always have repeat mode. + * @return : compressed size of sequences section of a sub-block + * Or 0 if it is unable to compress + * Or error code. */ + private static nuint ZSTD_compressSubBlock_sequences( + ZSTD_fseCTables_t* fseTables, + ZSTD_fseCTablesMetadata_t* fseMetadata, + SeqDef_s* sequences, + nuint nbSeq, + byte* llCode, + byte* mlCode, + byte* ofCode, + ZSTD_CCtx_params_s* cctxParams, + void* dst, + nuint dstCapacity, + int bmi2, + int writeEntropy, + int* entropyWritten + ) + { + int longOffsets = cctxParams->cParams.windowLog > (uint)(MEM_32bits ? 25 : 57) ? 1 : 0; + byte* ostart = (byte*)dst; + byte* oend = ostart + dstCapacity; + byte* op = ostart; + byte* seqHead; + *entropyWritten = 0; + if (oend - op < 3 + 1) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); } - /** ZSTD_compressSubBlock() : - * Compresses a single sub-block. - * @return : compressed size of the sub-block - * Or 0 if it failed to compress. */ - private static nuint ZSTD_compressSubBlock( - ZSTD_entropyCTables_t* entropy, - ZSTD_entropyCTablesMetadata_t* entropyMetadata, - SeqDef_s* sequences, - nuint nbSeq, - byte* literals, - nuint litSize, - byte* llCode, - byte* mlCode, - byte* ofCode, - ZSTD_CCtx_params_s* cctxParams, - void* dst, - nuint dstCapacity, - int bmi2, - int writeLitEntropy, - int writeSeqEntropy, - int* litEntropyWritten, - int* seqEntropyWritten, - uint lastBlock - ) + if (nbSeq < 128) + *op++ = (byte)nbSeq; + else if (nbSeq < 0x7F00) { - byte* ostart = (byte*)dst; - byte* oend = ostart + dstCapacity; - byte* op = ostart + ZSTD_blockHeaderSize; - { - nuint cLitSize = ZSTD_compressSubBlock_literal( - &entropy->huf.CTable.e0, - &entropyMetadata->hufMetadata, - literals, - litSize, - op, - (nuint)(oend - op), - bmi2, - writeLitEntropy, - litEntropyWritten - ); - { - nuint err_code = cLitSize; - if (ERR_isError(err_code)) - { - return err_code; - } - } + op[0] = (byte)((nbSeq >> 8) + 0x80); + op[1] = (byte)nbSeq; + op += 2; + } + else + { + op[0] = 0xFF; + MEM_writeLE16(op + 1, (ushort)(nbSeq - 0x7F00)); + op += 3; + } - if (cLitSize == 0) - return 0; - op += cLitSize; - } + if (nbSeq == 0) + { + return (nuint)(op - ostart); + } + seqHead = op++; + if (writeEntropy != 0) + { + uint LLtype = (uint)fseMetadata->llType; + uint Offtype = (uint)fseMetadata->ofType; + uint MLtype = (uint)fseMetadata->mlType; + *seqHead = (byte)((LLtype << 6) + (Offtype << 4) + (MLtype << 2)); + memcpy(op, fseMetadata->fseTablesBuffer, (uint)fseMetadata->fseTablesSize); + op += fseMetadata->fseTablesSize; + } + else + { + uint repeat = (uint)SymbolEncodingType_e.set_repeat; + *seqHead = (byte)((repeat << 6) + (repeat << 4) + (repeat << 2)); + } + + { + nuint bitstreamSize = ZSTD_encodeSequences( + op, + (nuint)(oend - op), + fseTables->matchlengthCTable, + mlCode, + fseTables->offcodeCTable, + ofCode, + fseTables->litlengthCTable, + llCode, + sequences, + nbSeq, + longOffsets, + bmi2 + ); { - nuint cSeqSize = ZSTD_compressSubBlock_sequences( - &entropy->fse, - &entropyMetadata->fseMetadata, - sequences, - nbSeq, - llCode, - mlCode, - ofCode, - cctxParams, - op, - (nuint)(oend - op), - bmi2, - writeSeqEntropy, - seqEntropyWritten - ); + nuint err_code = bitstreamSize; + if (ERR_isError(err_code)) { - nuint err_code = cSeqSize; - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } - - if (cSeqSize == 0) - return 0; - op += cSeqSize; } + op += bitstreamSize; + if ( + writeEntropy != 0 + && fseMetadata->lastCountSize != 0 + && fseMetadata->lastCountSize + bitstreamSize < 4 + ) { - nuint cSize = (nuint)(op - ostart) - ZSTD_blockHeaderSize; - uint cBlockHeader24 = - lastBlock + ((uint)blockType_e.bt_compressed << 1) + (uint)(cSize << 3); - MEM_writeLE24(ostart, cBlockHeader24); + assert(fseMetadata->lastCountSize + bitstreamSize == 3); + return 0; } + } - return (nuint)(op - ostart); + if (op - seqHead < 4) + { + return 0; } - private static nuint ZSTD_estimateSubBlockSize_literal( - byte* literals, - nuint litSize, - ZSTD_hufCTables_t* huf, - ZSTD_hufCTablesMetadata_t* hufMetadata, - void* workspace, - nuint wkspSize, - int writeEntropy - ) + *entropyWritten = 1; + return (nuint)(op - ostart); + } + + /** ZSTD_compressSubBlock() : + * Compresses a single sub-block. + * @return : compressed size of the sub-block + * Or 0 if it failed to compress. */ + private static nuint ZSTD_compressSubBlock( + ZSTD_entropyCTables_t* entropy, + ZSTD_entropyCTablesMetadata_t* entropyMetadata, + SeqDef_s* sequences, + nuint nbSeq, + byte* literals, + nuint litSize, + byte* llCode, + byte* mlCode, + byte* ofCode, + ZSTD_CCtx_params_s* cctxParams, + void* dst, + nuint dstCapacity, + int bmi2, + int writeLitEntropy, + int writeSeqEntropy, + int* litEntropyWritten, + int* seqEntropyWritten, + uint lastBlock + ) + { + byte* ostart = (byte*)dst; + byte* oend = ostart + dstCapacity; + byte* op = ostart + ZSTD_blockHeaderSize; { - uint* countWksp = (uint*)workspace; - uint maxSymbolValue = 255; - /* Use hard coded size of 3 bytes */ - nuint literalSectionHeaderSize = 3; - if (hufMetadata->hType == SymbolEncodingType_e.set_basic) - return litSize; - else if (hufMetadata->hType == SymbolEncodingType_e.set_rle) - return 1; - else if ( - hufMetadata->hType == SymbolEncodingType_e.set_compressed - || hufMetadata->hType == SymbolEncodingType_e.set_repeat - ) + nuint cLitSize = ZSTD_compressSubBlock_literal( + &entropy->huf.CTable.e0, + &entropyMetadata->hufMetadata, + literals, + litSize, + op, + (nuint)(oend - op), + bmi2, + writeLitEntropy, + litEntropyWritten + ); { - nuint largest = HIST_count_wksp( - countWksp, - &maxSymbolValue, - literals, - litSize, - workspace, - wkspSize - ); - if (ERR_isError(largest)) - return litSize; + nuint err_code = cLitSize; + if (ERR_isError(err_code)) { - nuint cLitSizeEstimate = HUF_estimateCompressedSize( - &huf->CTable.e0, - countWksp, - maxSymbolValue - ); - if (writeEntropy != 0) - cLitSizeEstimate += hufMetadata->hufDesSize; - return cLitSizeEstimate + literalSectionHeaderSize; + return err_code; } } - assert(0 != 0); - return 0; + if (cLitSize == 0) + return 0; + op += cLitSize; } - private static nuint ZSTD_estimateSubBlockSize_symbolType( - SymbolEncodingType_e type, - byte* codeTable, - uint maxCode, - nuint nbSeq, - uint* fseCTable, - byte* additionalBits, - short* defaultNorm, - uint defaultNormLog, - uint defaultMax, - void* workspace, - nuint wkspSize - ) { - uint* countWksp = (uint*)workspace; - byte* ctp = codeTable; - byte* ctStart = ctp; - byte* ctEnd = ctStart + nbSeq; - nuint cSymbolTypeSizeEstimateInBits = 0; - uint max = maxCode; - HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); - if (type == SymbolEncodingType_e.set_basic) - { - assert(max <= defaultMax); - cSymbolTypeSizeEstimateInBits = - max <= defaultMax - ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max) - : unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); - } - else if (type == SymbolEncodingType_e.set_rle) - { - cSymbolTypeSizeEstimateInBits = 0; - } - else if ( - type == SymbolEncodingType_e.set_compressed - || type == SymbolEncodingType_e.set_repeat - ) - { - cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max); - } - - if (ERR_isError(cSymbolTypeSizeEstimateInBits)) - return nbSeq * 10; - while (ctp < ctEnd) + nuint cSeqSize = ZSTD_compressSubBlock_sequences( + &entropy->fse, + &entropyMetadata->fseMetadata, + sequences, + nbSeq, + llCode, + mlCode, + ofCode, + cctxParams, + op, + (nuint)(oend - op), + bmi2, + writeSeqEntropy, + seqEntropyWritten + ); { - if (additionalBits != null) - cSymbolTypeSizeEstimateInBits += additionalBits[*ctp]; - else - cSymbolTypeSizeEstimateInBits += *ctp; - ctp++; + nuint err_code = cSeqSize; + if (ERR_isError(err_code)) + { + return err_code; + } } - return cSymbolTypeSizeEstimateInBits / 8; + if (cSeqSize == 0) + return 0; + op += cSeqSize; } - private static nuint ZSTD_estimateSubBlockSize_sequences( - byte* ofCodeTable, - byte* llCodeTable, - byte* mlCodeTable, - nuint nbSeq, - ZSTD_fseCTables_t* fseTables, - ZSTD_fseCTablesMetadata_t* fseMetadata, - void* workspace, - nuint wkspSize, - int writeEntropy - ) { - /* Use hard coded size of 3 bytes */ - const nuint sequencesSectionHeaderSize = 3; - nuint cSeqSizeEstimate = 0; - if (nbSeq == 0) - return sequencesSectionHeaderSize; - cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType( - fseMetadata->ofType, - ofCodeTable, - 31, - nbSeq, - fseTables->offcodeCTable, - null, - OF_defaultNorm, - OF_defaultNormLog, - 28, - workspace, - wkspSize - ); - cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType( - fseMetadata->llType, - llCodeTable, - 35, - nbSeq, - fseTables->litlengthCTable, - LL_bits, - LL_defaultNorm, - LL_defaultNormLog, - 35, - workspace, - wkspSize - ); - cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType( - fseMetadata->mlType, - mlCodeTable, - 52, - nbSeq, - fseTables->matchlengthCTable, - ML_bits, - ML_defaultNorm, - ML_defaultNormLog, - 52, - workspace, - wkspSize - ); - if (writeEntropy != 0) - cSeqSizeEstimate += fseMetadata->fseTablesSize; - return cSeqSizeEstimate + sequencesSectionHeaderSize; + nuint cSize = (nuint)(op - ostart) - ZSTD_blockHeaderSize; + uint cBlockHeader24 = + lastBlock + ((uint)blockType_e.bt_compressed << 1) + (uint)(cSize << 3); + MEM_writeLE24(ostart, cBlockHeader24); } - private static EstimatedBlockSize ZSTD_estimateSubBlockSize( - byte* literals, - nuint litSize, - byte* ofCodeTable, - byte* llCodeTable, - byte* mlCodeTable, - nuint nbSeq, - ZSTD_entropyCTables_t* entropy, - ZSTD_entropyCTablesMetadata_t* entropyMetadata, - void* workspace, - nuint wkspSize, - int writeLitEntropy, - int writeSeqEntropy + return (nuint)(op - ostart); + } + + private static nuint ZSTD_estimateSubBlockSize_literal( + byte* literals, + nuint litSize, + ZSTD_hufCTables_t* huf, + ZSTD_hufCTablesMetadata_t* hufMetadata, + void* workspace, + nuint wkspSize, + int writeEntropy + ) + { + uint* countWksp = (uint*)workspace; + uint maxSymbolValue = 255; + /* Use hard coded size of 3 bytes */ + nuint literalSectionHeaderSize = 3; + if (hufMetadata->hType == SymbolEncodingType_e.set_basic) + return litSize; + else if (hufMetadata->hType == SymbolEncodingType_e.set_rle) + return 1; + else if ( + hufMetadata->hType == SymbolEncodingType_e.set_compressed + || hufMetadata->hType == SymbolEncodingType_e.set_repeat ) { - EstimatedBlockSize ebs; - ebs.estLitSize = ZSTD_estimateSubBlockSize_literal( + nuint largest = HIST_count_wksp( + countWksp, + &maxSymbolValue, literals, litSize, - &entropy->huf, - &entropyMetadata->hufMetadata, workspace, - wkspSize, - writeLitEntropy - ); - ebs.estBlockSize = ZSTD_estimateSubBlockSize_sequences( - ofCodeTable, - llCodeTable, - mlCodeTable, - nbSeq, - &entropy->fse, - &entropyMetadata->fseMetadata, - workspace, - wkspSize, - writeSeqEntropy + wkspSize ); - ebs.estBlockSize += ebs.estLitSize + ZSTD_blockHeaderSize; - return ebs; + if (ERR_isError(largest)) + return litSize; + { + nuint cLitSizeEstimate = HUF_estimateCompressedSize( + &huf->CTable.e0, + countWksp, + maxSymbolValue + ); + if (writeEntropy != 0) + cLitSizeEstimate += hufMetadata->hufDesSize; + return cLitSizeEstimate + literalSectionHeaderSize; + } } - private static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t* fseMetadata) + assert(0 != 0); + return 0; + } + + private static nuint ZSTD_estimateSubBlockSize_symbolType( + SymbolEncodingType_e type, + byte* codeTable, + uint maxCode, + nuint nbSeq, + uint* fseCTable, + byte* additionalBits, + short* defaultNorm, + uint defaultNormLog, + uint defaultMax, + void* workspace, + nuint wkspSize + ) + { + uint* countWksp = (uint*)workspace; + byte* ctp = codeTable; + byte* ctStart = ctp; + byte* ctEnd = ctStart + nbSeq; + nuint cSymbolTypeSizeEstimateInBits = 0; + uint max = maxCode; + HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); + if (type == SymbolEncodingType_e.set_basic) { - if ( - fseMetadata->llType == SymbolEncodingType_e.set_compressed - || fseMetadata->llType == SymbolEncodingType_e.set_rle - ) - return 1; - if ( - fseMetadata->mlType == SymbolEncodingType_e.set_compressed - || fseMetadata->mlType == SymbolEncodingType_e.set_rle - ) - return 1; - if ( - fseMetadata->ofType == SymbolEncodingType_e.set_compressed - || fseMetadata->ofType == SymbolEncodingType_e.set_rle - ) - return 1; - return 0; + assert(max <= defaultMax); + cSymbolTypeSizeEstimateInBits = + max <= defaultMax + ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max) + : unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); } - - private static nuint countLiterals(SeqStore_t* seqStore, SeqDef_s* sp, nuint seqCount) + else if (type == SymbolEncodingType_e.set_rle) { - nuint n, - total = 0; - assert(sp != null); - for (n = 0; n < seqCount; n++) - { - total += ZSTD_getSequenceLength(seqStore, sp + n).litLength; - } + cSymbolTypeSizeEstimateInBits = 0; + } + else if ( + type == SymbolEncodingType_e.set_compressed + || type == SymbolEncodingType_e.set_repeat + ) + { + cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max); + } - return total; + if (ERR_isError(cSymbolTypeSizeEstimateInBits)) + return nbSeq * 10; + while (ctp < ctEnd) + { + if (additionalBits != null) + cSymbolTypeSizeEstimateInBits += additionalBits[*ctp]; + else + cSymbolTypeSizeEstimateInBits += *ctp; + ctp++; } - private static nuint sizeBlockSequences( - SeqDef_s* sp, - nuint nbSeqs, - nuint targetBudget, - nuint avgLitCost, - nuint avgSeqCost, - int firstSubBlock + return cSymbolTypeSizeEstimateInBits / 8; + } + + private static nuint ZSTD_estimateSubBlockSize_sequences( + byte* ofCodeTable, + byte* llCodeTable, + byte* mlCodeTable, + nuint nbSeq, + ZSTD_fseCTables_t* fseTables, + ZSTD_fseCTablesMetadata_t* fseMetadata, + void* workspace, + nuint wkspSize, + int writeEntropy + ) + { + /* Use hard coded size of 3 bytes */ + const nuint sequencesSectionHeaderSize = 3; + nuint cSeqSizeEstimate = 0; + if (nbSeq == 0) + return sequencesSectionHeaderSize; + cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType( + fseMetadata->ofType, + ofCodeTable, + 31, + nbSeq, + fseTables->offcodeCTable, + null, + OF_defaultNorm, + OF_defaultNormLog, + 28, + workspace, + wkspSize + ); + cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType( + fseMetadata->llType, + llCodeTable, + 35, + nbSeq, + fseTables->litlengthCTable, + LL_bits, + LL_defaultNorm, + LL_defaultNormLog, + 35, + workspace, + wkspSize + ); + cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType( + fseMetadata->mlType, + mlCodeTable, + 52, + nbSeq, + fseTables->matchlengthCTable, + ML_bits, + ML_defaultNorm, + ML_defaultNormLog, + 52, + workspace, + wkspSize + ); + if (writeEntropy != 0) + cSeqSizeEstimate += fseMetadata->fseTablesSize; + return cSeqSizeEstimate + sequencesSectionHeaderSize; + } + + private static EstimatedBlockSize ZSTD_estimateSubBlockSize( + byte* literals, + nuint litSize, + byte* ofCodeTable, + byte* llCodeTable, + byte* mlCodeTable, + nuint nbSeq, + ZSTD_entropyCTables_t* entropy, + ZSTD_entropyCTablesMetadata_t* entropyMetadata, + void* workspace, + nuint wkspSize, + int writeLitEntropy, + int writeSeqEntropy + ) + { + EstimatedBlockSize ebs; + ebs.estLitSize = ZSTD_estimateSubBlockSize_literal( + literals, + litSize, + &entropy->huf, + &entropyMetadata->hufMetadata, + workspace, + wkspSize, + writeLitEntropy + ); + ebs.estBlockSize = ZSTD_estimateSubBlockSize_sequences( + ofCodeTable, + llCodeTable, + mlCodeTable, + nbSeq, + &entropy->fse, + &entropyMetadata->fseMetadata, + workspace, + wkspSize, + writeSeqEntropy + ); + ebs.estBlockSize += ebs.estLitSize + ZSTD_blockHeaderSize; + return ebs; + } + + private static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t* fseMetadata) + { + if ( + fseMetadata->llType == SymbolEncodingType_e.set_compressed + || fseMetadata->llType == SymbolEncodingType_e.set_rle ) + return 1; + if ( + fseMetadata->mlType == SymbolEncodingType_e.set_compressed + || fseMetadata->mlType == SymbolEncodingType_e.set_rle + ) + return 1; + if ( + fseMetadata->ofType == SymbolEncodingType_e.set_compressed + || fseMetadata->ofType == SymbolEncodingType_e.set_rle + ) + return 1; + return 0; + } + + private static nuint countLiterals(SeqStore_t* seqStore, SeqDef_s* sp, nuint seqCount) + { + nuint n, + total = 0; + assert(sp != null); + for (n = 0; n < seqCount; n++) { - nuint n, - budget = 0, - inSize = 0; - /* generous estimate */ - nuint headerSize = (nuint)firstSubBlock * 120 * 256; - assert(firstSubBlock == 0 || firstSubBlock == 1); - budget += headerSize; - budget += sp[0].litLength * avgLitCost + avgSeqCost; - if (budget > targetBudget) - return 1; - inSize = (nuint)(sp[0].litLength + (sp[0].mlBase + 3)); - for (n = 1; n < nbSeqs; n++) - { - nuint currentCost = sp[n].litLength * avgLitCost + avgSeqCost; - budget += currentCost; - inSize += (nuint)(sp[n].litLength + (sp[n].mlBase + 3)); - if (budget > targetBudget && budget < inSize * 256) - break; - } + total += ZSTD_getSequenceLength(seqStore, sp + n).litLength; + } - return n; + return total; + } + + private static nuint sizeBlockSequences( + SeqDef_s* sp, + nuint nbSeqs, + nuint targetBudget, + nuint avgLitCost, + nuint avgSeqCost, + int firstSubBlock + ) + { + nuint n, + budget = 0, + inSize = 0; + /* generous estimate */ + nuint headerSize = (nuint)firstSubBlock * 120 * 256; + assert(firstSubBlock == 0 || firstSubBlock == 1); + budget += headerSize; + budget += sp[0].litLength * avgLitCost + avgSeqCost; + if (budget > targetBudget) + return 1; + inSize = (nuint)(sp[0].litLength + (sp[0].mlBase + 3)); + for (n = 1; n < nbSeqs; n++) + { + nuint currentCost = sp[n].litLength * avgLitCost + avgSeqCost; + budget += currentCost; + inSize += (nuint)(sp[n].litLength + (sp[n].mlBase + 3)); + if (budget > targetBudget && budget < inSize * 256) + break; } - /** ZSTD_compressSubBlock_multi() : - * Breaks super-block into multiple sub-blocks and compresses them. - * Entropy will be written into the first block. - * The following blocks use repeat_mode to compress. - * Sub-blocks are all compressed, except the last one when beneficial. - * @return : compressed size of the super block (which features multiple ZSTD blocks) - * or 0 if it failed to compress. */ - private static nuint ZSTD_compressSubBlock_multi( - SeqStore_t* seqStorePtr, - ZSTD_compressedBlockState_t* prevCBlock, - ZSTD_compressedBlockState_t* nextCBlock, - ZSTD_entropyCTablesMetadata_t* entropyMetadata, - ZSTD_CCtx_params_s* cctxParams, - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize, - int bmi2, - uint lastBlock, - void* workspace, - nuint wkspSize - ) + return n; + } + + /** ZSTD_compressSubBlock_multi() : + * Breaks super-block into multiple sub-blocks and compresses them. + * Entropy will be written into the first block. + * The following blocks use repeat_mode to compress. + * Sub-blocks are all compressed, except the last one when beneficial. + * @return : compressed size of the super block (which features multiple ZSTD blocks) + * or 0 if it failed to compress. */ + private static nuint ZSTD_compressSubBlock_multi( + SeqStore_t* seqStorePtr, + ZSTD_compressedBlockState_t* prevCBlock, + ZSTD_compressedBlockState_t* nextCBlock, + ZSTD_entropyCTablesMetadata_t* entropyMetadata, + ZSTD_CCtx_params_s* cctxParams, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + int bmi2, + uint lastBlock, + void* workspace, + nuint wkspSize + ) + { + SeqDef_s* sstart = seqStorePtr->sequencesStart; + SeqDef_s* send = seqStorePtr->sequences; + /* tracks progresses within seqStorePtr->sequences */ + SeqDef_s* sp = sstart; + nuint nbSeqs = (nuint)(send - sstart); + byte* lstart = seqStorePtr->litStart; + byte* lend = seqStorePtr->lit; + byte* lp = lstart; + nuint nbLiterals = (nuint)(lend - lstart); + byte* ip = (byte*)src; + byte* iend = ip + srcSize; + byte* ostart = (byte*)dst; + byte* oend = ostart + dstCapacity; + byte* op = ostart; + byte* llCodePtr = seqStorePtr->llCode; + byte* mlCodePtr = seqStorePtr->mlCode; + byte* ofCodePtr = seqStorePtr->ofCode; + /* enforce minimum size, to reduce undesirable side effects */ + const nuint minTarget = 1340; + nuint targetCBlockSize = + minTarget > cctxParams->targetCBlockSize ? minTarget : cctxParams->targetCBlockSize; + int writeLitEntropy = + entropyMetadata->hufMetadata.hType == SymbolEncodingType_e.set_compressed ? 1 : 0; + int writeSeqEntropy = 1; + if (nbSeqs > 0) { - SeqDef_s* sstart = seqStorePtr->sequencesStart; - SeqDef_s* send = seqStorePtr->sequences; - /* tracks progresses within seqStorePtr->sequences */ - SeqDef_s* sp = sstart; - nuint nbSeqs = (nuint)(send - sstart); - byte* lstart = seqStorePtr->litStart; - byte* lend = seqStorePtr->lit; - byte* lp = lstart; - nuint nbLiterals = (nuint)(lend - lstart); - byte* ip = (byte*)src; - byte* iend = ip + srcSize; - byte* ostart = (byte*)dst; - byte* oend = ostart + dstCapacity; - byte* op = ostart; - byte* llCodePtr = seqStorePtr->llCode; - byte* mlCodePtr = seqStorePtr->mlCode; - byte* ofCodePtr = seqStorePtr->ofCode; - /* enforce minimum size, to reduce undesirable side effects */ - const nuint minTarget = 1340; - nuint targetCBlockSize = - minTarget > cctxParams->targetCBlockSize ? minTarget : cctxParams->targetCBlockSize; - int writeLitEntropy = - entropyMetadata->hufMetadata.hType == SymbolEncodingType_e.set_compressed ? 1 : 0; - int writeSeqEntropy = 1; - if (nbSeqs > 0) + EstimatedBlockSize ebs = ZSTD_estimateSubBlockSize( + lp, + nbLiterals, + ofCodePtr, + llCodePtr, + mlCodePtr, + nbSeqs, + &nextCBlock->entropy, + entropyMetadata, + workspace, + wkspSize, + writeLitEntropy, + writeSeqEntropy + ); + /* quick estimation */ + nuint avgLitCost = nbLiterals != 0 ? ebs.estLitSize * 256 / nbLiterals : 256; + nuint avgSeqCost = (ebs.estBlockSize - ebs.estLitSize) * 256 / nbSeqs; + nuint nbSubBlocks = + (ebs.estBlockSize + targetCBlockSize / 2) / targetCBlockSize > 1 + ? (ebs.estBlockSize + targetCBlockSize / 2) / targetCBlockSize + : 1; + nuint n, + avgBlockBudget, + blockBudgetSupp = 0; + avgBlockBudget = ebs.estBlockSize * 256 / nbSubBlocks; + if (ebs.estBlockSize > srcSize) + return 0; + assert(nbSubBlocks > 0); + for (n = 0; n < nbSubBlocks - 1; n++) { - EstimatedBlockSize ebs = ZSTD_estimateSubBlockSize( - lp, - nbLiterals, - ofCodePtr, - llCodePtr, - mlCodePtr, - nbSeqs, - &nextCBlock->entropy, - entropyMetadata, - workspace, - wkspSize, - writeLitEntropy, - writeSeqEntropy + /* determine nb of sequences for current sub-block + nbLiterals from next sequence */ + nuint seqCount = sizeBlockSequences( + sp, + (nuint)(send - sp), + avgBlockBudget + blockBudgetSupp, + avgLitCost, + avgSeqCost, + n == 0 ? 1 : 0 ); - /* quick estimation */ - nuint avgLitCost = nbLiterals != 0 ? ebs.estLitSize * 256 / nbLiterals : 256; - nuint avgSeqCost = (ebs.estBlockSize - ebs.estLitSize) * 256 / nbSeqs; - nuint nbSubBlocks = - (ebs.estBlockSize + targetCBlockSize / 2) / targetCBlockSize > 1 - ? (ebs.estBlockSize + targetCBlockSize / 2) / targetCBlockSize - : 1; - nuint n, - avgBlockBudget, - blockBudgetSupp = 0; - avgBlockBudget = ebs.estBlockSize * 256 / nbSubBlocks; - if (ebs.estBlockSize > srcSize) - return 0; - assert(nbSubBlocks > 0); - for (n = 0; n < nbSubBlocks - 1; n++) + assert(seqCount <= (nuint)(send - sp)); + if (sp + seqCount == send) + break; + assert(seqCount > 0); { - /* determine nb of sequences for current sub-block + nbLiterals from next sequence */ - nuint seqCount = sizeBlockSequences( + int litEntropyWritten = 0; + int seqEntropyWritten = 0; + nuint litSize = countLiterals(seqStorePtr, sp, seqCount); + nuint decompressedSize = ZSTD_seqDecompressedSize( + seqStorePtr, sp, - (nuint)(send - sp), - avgBlockBudget + blockBudgetSupp, - avgLitCost, - avgSeqCost, - n == 0 ? 1 : 0 + seqCount, + litSize, + 0 + ); + nuint cSize = ZSTD_compressSubBlock( + &nextCBlock->entropy, + entropyMetadata, + sp, + seqCount, + lp, + litSize, + llCodePtr, + mlCodePtr, + ofCodePtr, + cctxParams, + op, + (nuint)(oend - op), + bmi2, + writeLitEntropy, + writeSeqEntropy, + &litEntropyWritten, + &seqEntropyWritten, + 0 ); - assert(seqCount <= (nuint)(send - sp)); - if (sp + seqCount == send) - break; - assert(seqCount > 0); { - int litEntropyWritten = 0; - int seqEntropyWritten = 0; - nuint litSize = countLiterals(seqStorePtr, sp, seqCount); - nuint decompressedSize = ZSTD_seqDecompressedSize( - seqStorePtr, - sp, - seqCount, - litSize, - 0 - ); - nuint cSize = ZSTD_compressSubBlock( - &nextCBlock->entropy, - entropyMetadata, - sp, - seqCount, - lp, - litSize, - llCodePtr, - mlCodePtr, - ofCodePtr, - cctxParams, - op, - (nuint)(oend - op), - bmi2, - writeLitEntropy, - writeSeqEntropy, - &litEntropyWritten, - &seqEntropyWritten, - 0 - ); - { - nuint err_code = cSize; - if (ERR_isError(err_code)) - { - return err_code; - } - } - - if (cSize > 0 && cSize < decompressedSize) + nuint err_code = cSize; + if (ERR_isError(err_code)) { - assert(ip + decompressedSize <= iend); - ip += decompressedSize; - lp += litSize; - op += cSize; - llCodePtr += seqCount; - mlCodePtr += seqCount; - ofCodePtr += seqCount; - if (litEntropyWritten != 0) - { - writeLitEntropy = 0; - } - - if (seqEntropyWritten != 0) - { - writeSeqEntropy = 0; - } - - sp += seqCount; - blockBudgetSupp = 0; + return err_code; } } - } - } - { - int litEntropyWritten = 0; - int seqEntropyWritten = 0; - nuint litSize = (nuint)(lend - lp); - nuint seqCount = (nuint)(send - sp); - nuint decompressedSize = ZSTD_seqDecompressedSize( - seqStorePtr, - sp, - seqCount, - litSize, - 1 - ); - nuint cSize = ZSTD_compressSubBlock( - &nextCBlock->entropy, - entropyMetadata, - sp, - seqCount, - lp, - litSize, - llCodePtr, - mlCodePtr, - ofCodePtr, - cctxParams, - op, - (nuint)(oend - op), - bmi2, - writeLitEntropy, - writeSeqEntropy, - &litEntropyWritten, - &seqEntropyWritten, - lastBlock - ); - { - nuint err_code = cSize; - if (ERR_isError(err_code)) + if (cSize > 0 && cSize < decompressedSize) { - return err_code; - } - } + assert(ip + decompressedSize <= iend); + ip += decompressedSize; + lp += litSize; + op += cSize; + llCodePtr += seqCount; + mlCodePtr += seqCount; + ofCodePtr += seqCount; + if (litEntropyWritten != 0) + { + writeLitEntropy = 0; + } - if (cSize > 0 && cSize < decompressedSize) - { - assert(ip + decompressedSize <= iend); - ip += decompressedSize; - lp += litSize; - op += cSize; - llCodePtr += seqCount; - mlCodePtr += seqCount; - ofCodePtr += seqCount; - if (litEntropyWritten != 0) - { - writeLitEntropy = 0; - } + if (seqEntropyWritten != 0) + { + writeSeqEntropy = 0; + } - if (seqEntropyWritten != 0) - { - writeSeqEntropy = 0; + sp += seqCount; + blockBudgetSupp = 0; } - - sp += seqCount; } } + } - if (writeLitEntropy != 0) - { - memcpy( - &nextCBlock->entropy.huf, - &prevCBlock->entropy.huf, - (uint)sizeof(ZSTD_hufCTables_t) - ); - } - - if ( - writeSeqEntropy != 0 - && ZSTD_needSequenceEntropyTables(&entropyMetadata->fseMetadata) != 0 - ) + { + int litEntropyWritten = 0; + int seqEntropyWritten = 0; + nuint litSize = (nuint)(lend - lp); + nuint seqCount = (nuint)(send - sp); + nuint decompressedSize = ZSTD_seqDecompressedSize( + seqStorePtr, + sp, + seqCount, + litSize, + 1 + ); + nuint cSize = ZSTD_compressSubBlock( + &nextCBlock->entropy, + entropyMetadata, + sp, + seqCount, + lp, + litSize, + llCodePtr, + mlCodePtr, + ofCodePtr, + cctxParams, + op, + (nuint)(oend - op), + bmi2, + writeLitEntropy, + writeSeqEntropy, + &litEntropyWritten, + &seqEntropyWritten, + lastBlock + ); { - return 0; + nuint err_code = cSize; + if (ERR_isError(err_code)) + { + return err_code; + } } - if (ip < iend) + if (cSize > 0 && cSize < decompressedSize) { - /* some data left : last part of the block sent uncompressed */ - nuint rSize = (nuint)(iend - ip); - nuint cSize = ZSTD_noCompressBlock(op, (nuint)(oend - op), ip, rSize, lastBlock); + assert(ip + decompressedSize <= iend); + ip += decompressedSize; + lp += litSize; + op += cSize; + llCodePtr += seqCount; + mlCodePtr += seqCount; + ofCodePtr += seqCount; + if (litEntropyWritten != 0) { - nuint err_code = cSize; - if (ERR_isError(err_code)) - { - return err_code; - } + writeLitEntropy = 0; } - assert(cSize != 0); - op += cSize; - if (sp < send) + if (seqEntropyWritten != 0) { - SeqDef_s* seq; - repcodes_s rep; - memcpy(&rep, prevCBlock->rep, (uint)sizeof(repcodes_s)); - for (seq = sstart; seq < sp; ++seq) - { - ZSTD_updateRep( - rep.rep, - seq->offBase, - ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0 ? 1U : 0U - ); - } - - memcpy(nextCBlock->rep, &rep, (uint)sizeof(repcodes_s)); + writeSeqEntropy = 0; } + + sp += seqCount; } + } - return (nuint)(op - ostart); + if (writeLitEntropy != 0) + { + memcpy( + &nextCBlock->entropy.huf, + &prevCBlock->entropy.huf, + (uint)sizeof(ZSTD_hufCTables_t) + ); } - /* ZSTD_compressSuperBlock() : - * Used to compress a super block when targetCBlockSize is being used. - * The given block will be compressed into multiple sub blocks that are around targetCBlockSize. */ - private static nuint ZSTD_compressSuperBlock( - ZSTD_CCtx_s* zc, - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize, - uint lastBlock + if ( + writeSeqEntropy != 0 + && ZSTD_needSequenceEntropyTables(&entropyMetadata->fseMetadata) != 0 ) { - ZSTD_entropyCTablesMetadata_t entropyMetadata; + return 0; + } + + if (ip < iend) + { + /* some data left : last part of the block sent uncompressed */ + nuint rSize = (nuint)(iend - ip); + nuint cSize = ZSTD_noCompressBlock(op, (nuint)(oend - op), ip, rSize, lastBlock); { - nuint err_code = ZSTD_buildBlockEntropyStats( - &zc->seqStore, - &zc->blockState.prevCBlock->entropy, - &zc->blockState.nextCBlock->entropy, - &zc->appliedParams, - &entropyMetadata, - zc->tmpWorkspace, - zc->tmpWkspSize - ); + nuint err_code = cSize; if (ERR_isError(err_code)) { return err_code; } } - return ZSTD_compressSubBlock_multi( + assert(cSize != 0); + op += cSize; + if (sp < send) + { + SeqDef_s* seq; + repcodes_s rep; + memcpy(&rep, prevCBlock->rep, (uint)sizeof(repcodes_s)); + for (seq = sstart; seq < sp; ++seq) + { + ZSTD_updateRep( + rep.rep, + seq->offBase, + ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0 ? 1U : 0U + ); + } + + memcpy(nextCBlock->rep, &rep, (uint)sizeof(repcodes_s)); + } + } + + return (nuint)(op - ostart); + } + + /* ZSTD_compressSuperBlock() : + * Used to compress a super block when targetCBlockSize is being used. + * The given block will be compressed into multiple sub blocks that are around targetCBlockSize. */ + private static nuint ZSTD_compressSuperBlock( + ZSTD_CCtx_s* zc, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + uint lastBlock + ) + { + ZSTD_entropyCTablesMetadata_t entropyMetadata; + { + nuint err_code = ZSTD_buildBlockEntropyStats( &zc->seqStore, - zc->blockState.prevCBlock, - zc->blockState.nextCBlock, - &entropyMetadata, + &zc->blockState.prevCBlock->entropy, + &zc->blockState.nextCBlock->entropy, &zc->appliedParams, - dst, - dstCapacity, - src, - srcSize, - zc->bmi2, - lastBlock, + &entropyMetadata, zc->tmpWorkspace, zc->tmpWkspSize ); + if (ERR_isError(err_code)) + { + return err_code; + } } + + return ZSTD_compressSubBlock_multi( + &zc->seqStore, + zc->blockState.prevCBlock, + zc->blockState.nextCBlock, + &entropyMetadata, + &zc->appliedParams, + dst, + dstCapacity, + src, + srcSize, + zc->bmi2, + lastBlock, + zc->tmpWorkspace, + zc->tmpWkspSize + ); } -} +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCwksp.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCwksp.cs index 414e75db0..4197aa631 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCwksp.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCwksp.cs @@ -1,587 +1,586 @@ using System.Diagnostics; using System.Runtime.CompilerServices; -using static ZstdSharp.UnsafeHelper; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods + [MethodImpl(MethodImplOptions.AggressiveInlining)] + [Conditional("DEBUG")] + private static void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) { - [MethodImpl(MethodImplOptions.AggressiveInlining)] - [Conditional("DEBUG")] - private static void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) - { - assert(ws->workspace <= ws->objectEnd); - assert(ws->objectEnd <= ws->tableEnd); - assert(ws->objectEnd <= ws->tableValidEnd); - assert(ws->tableEnd <= ws->allocStart); - assert(ws->tableValidEnd <= ws->allocStart); - assert(ws->allocStart <= ws->workspaceEnd); - assert(ws->initOnceStart <= ZSTD_cwksp_initialAllocStart(ws)); - assert(ws->workspace <= ws->initOnceStart); - } + assert(ws->workspace <= ws->objectEnd); + assert(ws->objectEnd <= ws->tableEnd); + assert(ws->objectEnd <= ws->tableValidEnd); + assert(ws->tableEnd <= ws->allocStart); + assert(ws->tableValidEnd <= ws->allocStart); + assert(ws->allocStart <= ws->workspaceEnd); + assert(ws->initOnceStart <= ZSTD_cwksp_initialAllocStart(ws)); + assert(ws->workspace <= ws->initOnceStart); + } - /** - * Align must be a power of 2. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_cwksp_align(nuint size, nuint align) - { - nuint mask = align - 1; - assert(ZSTD_isPower2(align) != 0); - return size + mask & ~mask; - } + /** + * Align must be a power of 2. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_cwksp_align(nuint size, nuint align) + { + nuint mask = align - 1; + assert(ZSTD_isPower2(align) != 0); + return size + mask & ~mask; + } - /** - * Use this to determine how much space in the workspace we will consume to - * allocate this object. (Normally it should be exactly the size of the object, - * but under special conditions, like ASAN, where we pad each object, it might - * be larger.) - * - * Since tables aren't currently redzoned, you don't need to call through this - * to figure out how much space you need for the matchState tables. Everything - * else is though. - * - * Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned64_alloc_size(). - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_cwksp_alloc_size(nuint size) - { - if (size == 0) - return 0; - return size; - } + /** + * Use this to determine how much space in the workspace we will consume to + * allocate this object. (Normally it should be exactly the size of the object, + * but under special conditions, like ASAN, where we pad each object, it might + * be larger.) + * + * Since tables aren't currently redzoned, you don't need to call through this + * to figure out how much space you need for the matchState tables. Everything + * else is though. + * + * Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned64_alloc_size(). + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_cwksp_alloc_size(nuint size) + { + if (size == 0) + return 0; + return size; + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_cwksp_aligned_alloc_size(nuint size, nuint alignment) - { - return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, alignment)); - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_cwksp_aligned_alloc_size(nuint size, nuint alignment) + { + return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, alignment)); + } - /** - * Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes. - * Used to determine the number of bytes required for a given "aligned". - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_cwksp_aligned64_alloc_size(nuint size) - { - return ZSTD_cwksp_aligned_alloc_size(size, 64); - } + /** + * Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes. + * Used to determine the number of bytes required for a given "aligned". + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_cwksp_aligned64_alloc_size(nuint size) + { + return ZSTD_cwksp_aligned_alloc_size(size, 64); + } - /** - * Returns the amount of additional space the cwksp must allocate - * for internal purposes (currently only alignment). + /** + * Returns the amount of additional space the cwksp must allocate + * for internal purposes (currently only alignment). + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_cwksp_slack_space_required() + { + /* For alignment, the wksp will always allocate an additional 2*ZSTD_CWKSP_ALIGNMENT_BYTES + * bytes to align the beginning of tables section and end of buffers; */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_cwksp_slack_space_required() - { - /* For alignment, the wksp will always allocate an additional 2*ZSTD_CWKSP_ALIGNMENT_BYTES - * bytes to align the beginning of tables section and end of buffers; - */ - const nuint slackSpace = 64 * 2; - return slackSpace; - } + const nuint slackSpace = 64 * 2; + return slackSpace; + } - /** - * Return the number of additional bytes required to align a pointer to the given number of bytes. - * alignBytes must be a power of two. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_cwksp_bytes_to_align_ptr(void* ptr, nuint alignBytes) - { - nuint alignBytesMask = alignBytes - 1; - nuint bytes = alignBytes - ((nuint)ptr & alignBytesMask) & alignBytesMask; - assert(ZSTD_isPower2(alignBytes) != 0); - assert(bytes < alignBytes); - return bytes; - } + /** + * Return the number of additional bytes required to align a pointer to the given number of bytes. + * alignBytes must be a power of two. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_cwksp_bytes_to_align_ptr(void* ptr, nuint alignBytes) + { + nuint alignBytesMask = alignBytes - 1; + nuint bytes = alignBytes - ((nuint)ptr & alignBytesMask) & alignBytesMask; + assert(ZSTD_isPower2(alignBytes) != 0); + assert(bytes < alignBytes); + return bytes; + } - /** - * Returns the initial value for allocStart which is used to determine the position from - * which we can allocate from the end of the workspace. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws) + /** + * Returns the initial value for allocStart which is used to determine the position from + * which we can allocate from the end of the workspace. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws) + { + sbyte* endPtr = (sbyte*)ws->workspaceEnd; + assert(ZSTD_isPower2(64) != 0); + endPtr = endPtr - (nuint)endPtr % 64; + return endPtr; + } + + /** + * Internal function. Do not use directly. + * Reserves the given number of bytes within the aligned/buffer segment of the wksp, + * which counts from the end of the wksp (as opposed to the object/table segment). + * + * Returns a pointer to the beginning of that space. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void* ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, nuint bytes) + { + void* alloc = (byte*)ws->allocStart - bytes; + void* bottom = ws->tableEnd; + ZSTD_cwksp_assert_internal_consistency(ws); + assert(alloc >= bottom); + if (alloc < bottom) { - sbyte* endPtr = (sbyte*)ws->workspaceEnd; - assert(ZSTD_isPower2(64) != 0); - endPtr = endPtr - (nuint)endPtr % 64; - return endPtr; + ws->allocFailed = 1; + return null; } - /** - * Internal function. Do not use directly. - * Reserves the given number of bytes within the aligned/buffer segment of the wksp, - * which counts from the end of the wksp (as opposed to the object/table segment). - * - * Returns a pointer to the beginning of that space. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void* ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, nuint bytes) + if (alloc < ws->tableValidEnd) { - void* alloc = (byte*)ws->allocStart - bytes; - void* bottom = ws->tableEnd; - ZSTD_cwksp_assert_internal_consistency(ws); - assert(alloc >= bottom); - if (alloc < bottom) - { - ws->allocFailed = 1; - return null; - } - - if (alloc < ws->tableValidEnd) - { - ws->tableValidEnd = alloc; - } - - ws->allocStart = alloc; - return alloc; + ws->tableValidEnd = alloc; } - /** - * Moves the cwksp to the next phase, and does any necessary allocations. - * cwksp initialization must necessarily go through each phase in order. - * Returns a 0 on success, or zstd error - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_cwksp_internal_advance_phase( - ZSTD_cwksp* ws, - ZSTD_cwksp_alloc_phase_e phase - ) + ws->allocStart = alloc; + return alloc; + } + + /** + * Moves the cwksp to the next phase, and does any necessary allocations. + * cwksp initialization must necessarily go through each phase in order. + * Returns a 0 on success, or zstd error + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_cwksp_internal_advance_phase( + ZSTD_cwksp* ws, + ZSTD_cwksp_alloc_phase_e phase + ) + { + assert(phase >= ws->phase); + if (phase > ws->phase) { - assert(phase >= ws->phase); - if (phase > ws->phase) + if ( + ws->phase < ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_aligned_init_once + && phase >= ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_aligned_init_once + ) { - if ( - ws->phase < ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_aligned_init_once - && phase >= ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_aligned_init_once - ) + ws->tableValidEnd = ws->objectEnd; + ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws); { - ws->tableValidEnd = ws->objectEnd; - ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws); + void* alloc = ws->objectEnd; + nuint bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, 64); + void* objectEnd = (byte*)alloc + bytesToAlign; + if (objectEnd > ws->workspaceEnd) { - void* alloc = ws->objectEnd; - nuint bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, 64); - void* objectEnd = (byte*)alloc + bytesToAlign; - if (objectEnd > ws->workspaceEnd) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) - ); - } - - ws->objectEnd = objectEnd; - ws->tableEnd = objectEnd; - if (ws->tableValidEnd < ws->tableEnd) - { - ws->tableValidEnd = ws->tableEnd; - } + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) + ); } - } - ws->phase = phase; - ZSTD_cwksp_assert_internal_consistency(ws); + ws->objectEnd = objectEnd; + ws->tableEnd = objectEnd; + if (ws->tableValidEnd < ws->tableEnd) + { + ws->tableValidEnd = ws->tableEnd; + } + } } - return 0; - } - - /** - * Returns whether this object/buffer/etc was allocated in this workspace. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static int ZSTD_cwksp_owns_buffer(ZSTD_cwksp* ws, void* ptr) - { - return ptr != null && ws->workspace <= ptr && ptr < ws->workspaceEnd ? 1 : 0; + ws->phase = phase; + ZSTD_cwksp_assert_internal_consistency(ws); } - /** - * Internal function. Do not use directly. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void* ZSTD_cwksp_reserve_internal( - ZSTD_cwksp* ws, - nuint bytes, - ZSTD_cwksp_alloc_phase_e phase - ) - { - void* alloc; - if (ERR_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) - { - return null; - } + return 0; + } - alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes); - return alloc; - } + /** + * Returns whether this object/buffer/etc was allocated in this workspace. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int ZSTD_cwksp_owns_buffer(ZSTD_cwksp* ws, void* ptr) + { + return ptr != null && ws->workspace <= ptr && ptr < ws->workspaceEnd ? 1 : 0; + } - /** - * Reserves and returns unaligned memory. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static byte* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, nuint bytes) + /** + * Internal function. Do not use directly. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void* ZSTD_cwksp_reserve_internal( + ZSTD_cwksp* ws, + nuint bytes, + ZSTD_cwksp_alloc_phase_e phase + ) + { + void* alloc; + if (ERR_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) { - return (byte*)ZSTD_cwksp_reserve_internal( - ws, - bytes, - ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_buffers - ); + return null; } - /** - * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes). - * This memory has been initialized at least once in the past. - * This doesn't mean it has been initialized this time, and it might contain data from previous - * operations. - * The main usage is for algorithms that might need read access into uninitialized memory. - * The algorithm must maintain safety under these conditions and must make sure it doesn't - * leak any of the past data (directly or in side channels). - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void* ZSTD_cwksp_reserve_aligned_init_once(ZSTD_cwksp* ws, nuint bytes) - { - nuint alignedBytes = ZSTD_cwksp_align(bytes, 64); - void* ptr = ZSTD_cwksp_reserve_internal( - ws, - alignedBytes, - ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_aligned_init_once - ); - assert(((nuint)ptr & 64 - 1) == 0); - if (ptr != null && ptr < ws->initOnceStart) - { - memset( - ptr, - 0, - (uint)( - (nuint)((byte*)ws->initOnceStart - (byte*)ptr) < alignedBytes - ? (nuint)((byte*)ws->initOnceStart - (byte*)ptr) - : alignedBytes - ) - ); - ws->initOnceStart = ptr; - } + alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes); + return alloc; + } - return ptr; - } + /** + * Reserves and returns unaligned memory. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static byte* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, nuint bytes) + { + return (byte*)ZSTD_cwksp_reserve_internal( + ws, + bytes, + ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_buffers + ); + } - /** - * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes). - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void* ZSTD_cwksp_reserve_aligned64(ZSTD_cwksp* ws, nuint bytes) - { - void* ptr = ZSTD_cwksp_reserve_internal( - ws, - ZSTD_cwksp_align(bytes, 64), - ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_aligned + /** + * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes). + * This memory has been initialized at least once in the past. + * This doesn't mean it has been initialized this time, and it might contain data from previous + * operations. + * The main usage is for algorithms that might need read access into uninitialized memory. + * The algorithm must maintain safety under these conditions and must make sure it doesn't + * leak any of the past data (directly or in side channels). + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void* ZSTD_cwksp_reserve_aligned_init_once(ZSTD_cwksp* ws, nuint bytes) + { + nuint alignedBytes = ZSTD_cwksp_align(bytes, 64); + void* ptr = ZSTD_cwksp_reserve_internal( + ws, + alignedBytes, + ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_aligned_init_once + ); + assert(((nuint)ptr & 64 - 1) == 0); + if (ptr != null && ptr < ws->initOnceStart) + { + memset( + ptr, + 0, + (uint)( + (nuint)((byte*)ws->initOnceStart - (byte*)ptr) < alignedBytes + ? (nuint)((byte*)ws->initOnceStart - (byte*)ptr) + : alignedBytes + ) ); - assert(((nuint)ptr & 64 - 1) == 0); - return ptr; + ws->initOnceStart = ptr; } - /** - * Aligned on 64 bytes. These buffers have the special property that - * their values remain constrained, allowing us to reuse them without - * memset()-ing them. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, nuint bytes) - { - ZSTD_cwksp_alloc_phase_e phase = - ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_aligned_init_once; - void* alloc; - void* end; - void* top; - if (ws->phase < phase) - { - if (ERR_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) - { - return null; - } - } + return ptr; + } - alloc = ws->tableEnd; - end = (byte*)alloc + bytes; - top = ws->allocStart; - assert((bytes & sizeof(uint) - 1) == 0); - ZSTD_cwksp_assert_internal_consistency(ws); - assert(end <= top); - if (end > top) + /** + * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes). + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void* ZSTD_cwksp_reserve_aligned64(ZSTD_cwksp* ws, nuint bytes) + { + void* ptr = ZSTD_cwksp_reserve_internal( + ws, + ZSTD_cwksp_align(bytes, 64), + ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_aligned + ); + assert(((nuint)ptr & 64 - 1) == 0); + return ptr; + } + + /** + * Aligned on 64 bytes. These buffers have the special property that + * their values remain constrained, allowing us to reuse them without + * memset()-ing them. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, nuint bytes) + { + ZSTD_cwksp_alloc_phase_e phase = + ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_aligned_init_once; + void* alloc; + void* end; + void* top; + if (ws->phase < phase) + { + if (ERR_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) { - ws->allocFailed = 1; return null; } - - ws->tableEnd = end; - assert((bytes & 64 - 1) == 0); - assert(((nuint)alloc & 64 - 1) == 0); - return alloc; } - /** - * Aligned on sizeof(void*). - * Note : should happen only once, at workspace first initialization - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, nuint bytes) + alloc = ws->tableEnd; + end = (byte*)alloc + bytes; + top = ws->allocStart; + assert((bytes & sizeof(uint) - 1) == 0); + ZSTD_cwksp_assert_internal_consistency(ws); + assert(end <= top); + if (end > top) { - nuint roundedBytes = ZSTD_cwksp_align(bytes, (nuint)sizeof(void*)); - void* alloc = ws->objectEnd; - void* end = (byte*)alloc + roundedBytes; - assert((nuint)alloc % (nuint)sizeof(void*) == 0); - assert(bytes % (nuint)sizeof(void*) == 0); - ZSTD_cwksp_assert_internal_consistency(ws); - if ( - ws->phase != ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_objects - || end > ws->workspaceEnd - ) - { - ws->allocFailed = 1; - return null; - } - - ws->objectEnd = end; - ws->tableEnd = end; - ws->tableValidEnd = end; - return alloc; + ws->allocFailed = 1; + return null; } - /** - * with alignment control - * Note : should happen only once, at workspace first initialization - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void* ZSTD_cwksp_reserve_object_aligned( - ZSTD_cwksp* ws, - nuint byteSize, - nuint alignment + ws->tableEnd = end; + assert((bytes & 64 - 1) == 0); + assert(((nuint)alloc & 64 - 1) == 0); + return alloc; + } + + /** + * Aligned on sizeof(void*). + * Note : should happen only once, at workspace first initialization + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, nuint bytes) + { + nuint roundedBytes = ZSTD_cwksp_align(bytes, (nuint)sizeof(void*)); + void* alloc = ws->objectEnd; + void* end = (byte*)alloc + roundedBytes; + assert((nuint)alloc % (nuint)sizeof(void*) == 0); + assert(bytes % (nuint)sizeof(void*) == 0); + ZSTD_cwksp_assert_internal_consistency(ws); + if ( + ws->phase != ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_objects + || end > ws->workspaceEnd ) { - nuint mask = alignment - 1; - nuint surplus = alignment > (nuint)sizeof(void*) ? alignment - (nuint)sizeof(void*) : 0; - void* start = ZSTD_cwksp_reserve_object(ws, byteSize + surplus); - if (start == null) - return null; - if (surplus == 0) - return start; - assert(ZSTD_isPower2(alignment) != 0); - return (void*)((nuint)start + surplus & ~mask); + ws->allocFailed = 1; + return null; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) - { - assert(ws->tableValidEnd >= ws->objectEnd); - assert(ws->tableValidEnd <= ws->allocStart); - ws->tableValidEnd = ws->objectEnd; - ZSTD_cwksp_assert_internal_consistency(ws); - } + ws->objectEnd = end; + ws->tableEnd = end; + ws->tableValidEnd = end; + return alloc; + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) - { - assert(ws->tableValidEnd >= ws->objectEnd); - assert(ws->tableValidEnd <= ws->allocStart); - if (ws->tableValidEnd < ws->tableEnd) - { - ws->tableValidEnd = ws->tableEnd; - } + /** + * with alignment control + * Note : should happen only once, at workspace first initialization + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void* ZSTD_cwksp_reserve_object_aligned( + ZSTD_cwksp* ws, + nuint byteSize, + nuint alignment + ) + { + nuint mask = alignment - 1; + nuint surplus = alignment > (nuint)sizeof(void*) ? alignment - (nuint)sizeof(void*) : 0; + void* start = ZSTD_cwksp_reserve_object(ws, byteSize + surplus); + if (start == null) + return null; + if (surplus == 0) + return start; + assert(ZSTD_isPower2(alignment) != 0); + return (void*)((nuint)start + surplus & ~mask); + } - ZSTD_cwksp_assert_internal_consistency(ws); - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) + { + assert(ws->tableValidEnd >= ws->objectEnd); + assert(ws->tableValidEnd <= ws->allocStart); + ws->tableValidEnd = ws->objectEnd; + ZSTD_cwksp_assert_internal_consistency(ws); + } - /** - * Zero the part of the allocated tables not already marked clean. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) + { + assert(ws->tableValidEnd >= ws->objectEnd); + assert(ws->tableValidEnd <= ws->allocStart); + if (ws->tableValidEnd < ws->tableEnd) { - assert(ws->tableValidEnd >= ws->objectEnd); - assert(ws->tableValidEnd <= ws->allocStart); - if (ws->tableValidEnd < ws->tableEnd) - { - memset( - ws->tableValidEnd, - 0, - (uint)(nuint)((byte*)ws->tableEnd - (byte*)ws->tableValidEnd) - ); - } - - ZSTD_cwksp_mark_tables_clean(ws); + ws->tableValidEnd = ws->tableEnd; } - /** - * Invalidates table allocations. - * All other allocations remain valid. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) - { - ws->tableEnd = ws->objectEnd; - ZSTD_cwksp_assert_internal_consistency(ws); + ZSTD_cwksp_assert_internal_consistency(ws); + } + + /** + * Zero the part of the allocated tables not already marked clean. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) + { + assert(ws->tableValidEnd >= ws->objectEnd); + assert(ws->tableValidEnd <= ws->allocStart); + if (ws->tableValidEnd < ws->tableEnd) + { + memset( + ws->tableValidEnd, + 0, + (uint)(nuint)((byte*)ws->tableEnd - (byte*)ws->tableValidEnd) + ); } - /** - * Invalidates all buffer, aligned, and table allocations. - * Object allocations remain valid. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_cwksp_clear(ZSTD_cwksp* ws) - { - ws->tableEnd = ws->objectEnd; - ws->allocStart = ZSTD_cwksp_initialAllocStart(ws); - ws->allocFailed = 0; - if (ws->phase > ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_aligned_init_once) - { - ws->phase = ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_aligned_init_once; - } + ZSTD_cwksp_mark_tables_clean(ws); + } - ZSTD_cwksp_assert_internal_consistency(ws); - } + /** + * Invalidates table allocations. + * All other allocations remain valid. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) + { + ws->tableEnd = ws->objectEnd; + ZSTD_cwksp_assert_internal_consistency(ws); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_cwksp_sizeof(ZSTD_cwksp* ws) + /** + * Invalidates all buffer, aligned, and table allocations. + * Object allocations remain valid. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_cwksp_clear(ZSTD_cwksp* ws) + { + ws->tableEnd = ws->objectEnd; + ws->allocStart = ZSTD_cwksp_initialAllocStart(ws); + ws->allocFailed = 0; + if (ws->phase > ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_aligned_init_once) { - return (nuint)((byte*)ws->workspaceEnd - (byte*)ws->workspace); + ws->phase = ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_aligned_init_once; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_cwksp_used(ZSTD_cwksp* ws) - { - return (nuint)((byte*)ws->tableEnd - (byte*)ws->workspace) - + (nuint)((byte*)ws->workspaceEnd - (byte*)ws->allocStart); - } + ZSTD_cwksp_assert_internal_consistency(ws); + } - /** - * The provided workspace takes ownership of the buffer [start, start+size). - * Any existing values in the workspace are ignored (the previously managed - * buffer, if present, must be separately freed). - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_cwksp_init( - ZSTD_cwksp* ws, - void* start, - nuint size, - ZSTD_cwksp_static_alloc_e isStatic - ) - { - assert(((nuint)start & (nuint)(sizeof(void*) - 1)) == 0); - ws->workspace = start; - ws->workspaceEnd = (byte*)start + size; - ws->objectEnd = ws->workspace; - ws->tableValidEnd = ws->objectEnd; - ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws); - ws->phase = ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_objects; - ws->isStatic = isStatic; - ZSTD_cwksp_clear(ws); - ws->workspaceOversizedDuration = 0; - ZSTD_cwksp_assert_internal_consistency(ws); - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_cwksp_sizeof(ZSTD_cwksp* ws) + { + return (nuint)((byte*)ws->workspaceEnd - (byte*)ws->workspace); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_cwksp_create(ZSTD_cwksp* ws, nuint size, ZSTD_customMem customMem) - { - void* workspace = ZSTD_customMalloc(size, customMem); - if (workspace == null) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_cwksp_used(ZSTD_cwksp* ws) + { + return (nuint)((byte*)ws->tableEnd - (byte*)ws->workspace) + + (nuint)((byte*)ws->workspaceEnd - (byte*)ws->allocStart); + } - ZSTD_cwksp_init( - ws, - workspace, - size, - ZSTD_cwksp_static_alloc_e.ZSTD_cwksp_dynamic_alloc - ); - return 0; - } + /** + * The provided workspace takes ownership of the buffer [start, start+size). + * Any existing values in the workspace are ignored (the previously managed + * buffer, if present, must be separately freed). + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_cwksp_init( + ZSTD_cwksp* ws, + void* start, + nuint size, + ZSTD_cwksp_static_alloc_e isStatic + ) + { + assert(((nuint)start & (nuint)(sizeof(void*) - 1)) == 0); + ws->workspace = start; + ws->workspaceEnd = (byte*)start + size; + ws->objectEnd = ws->workspace; + ws->tableValidEnd = ws->objectEnd; + ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws); + ws->phase = ZSTD_cwksp_alloc_phase_e.ZSTD_cwksp_alloc_objects; + ws->isStatic = isStatic; + ZSTD_cwksp_clear(ws); + ws->workspaceOversizedDuration = 0; + ZSTD_cwksp_assert_internal_consistency(ws); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_cwksp_create(ZSTD_cwksp* ws, nuint size, ZSTD_customMem customMem) + { + void* workspace = ZSTD_customMalloc(size, customMem); + if (workspace == null) { - void* ptr = ws->workspace; - *ws = new ZSTD_cwksp(); - ZSTD_customFree(ptr, customMem); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); } - /** - * Moves the management of a workspace from one cwksp to another. The src cwksp - * is left in an invalid state (src must be re-init()'ed before it's used again). - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) - { - *dst = *src; - *src = new ZSTD_cwksp(); - } + ZSTD_cwksp_init( + ws, + workspace, + size, + ZSTD_cwksp_static_alloc_e.ZSTD_cwksp_dynamic_alloc + ); + return 0; + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static int ZSTD_cwksp_reserve_failed(ZSTD_cwksp* ws) - { - return ws->allocFailed; - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) + { + void* ptr = ws->workspace; + *ws = new ZSTD_cwksp(); + ZSTD_customFree(ptr, customMem); + } - /* ZSTD_alignmentSpaceWithinBounds() : - * Returns if the estimated space needed for a wksp is within an acceptable limit of the - * actual amount of space used. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static int ZSTD_cwksp_estimated_space_within_bounds( - ZSTD_cwksp* ws, - nuint estimatedSpace - ) - { - return - estimatedSpace - ZSTD_cwksp_slack_space_required() <= ZSTD_cwksp_used(ws) - && ZSTD_cwksp_used(ws) <= estimatedSpace + /** + * Moves the management of a workspace from one cwksp to another. The src cwksp + * is left in an invalid state (src must be re-init()'ed before it's used again). + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) + { + *dst = *src; + *src = new ZSTD_cwksp(); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int ZSTD_cwksp_reserve_failed(ZSTD_cwksp* ws) + { + return ws->allocFailed; + } + + /* ZSTD_alignmentSpaceWithinBounds() : + * Returns if the estimated space needed for a wksp is within an acceptable limit of the + * actual amount of space used. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int ZSTD_cwksp_estimated_space_within_bounds( + ZSTD_cwksp* ws, + nuint estimatedSpace + ) + { + return + estimatedSpace - ZSTD_cwksp_slack_space_required() <= ZSTD_cwksp_used(ws) + && ZSTD_cwksp_used(ws) <= estimatedSpace ? 1 : 0; - } + } - /*-************************************* - * Functions - ***************************************/ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_cwksp_available_space(ZSTD_cwksp* ws) - { - return (nuint)((byte*)ws->allocStart - (byte*)ws->tableEnd); - } + /*-************************************* + * Functions + ***************************************/ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_cwksp_available_space(ZSTD_cwksp* ws) + { + return (nuint)((byte*)ws->allocStart - (byte*)ws->tableEnd); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, nuint additionalNeededSpace) - { - return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace ? 1 : 0; - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, nuint additionalNeededSpace) + { + return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace ? 1 : 0; + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, nuint additionalNeededSpace) - { - return ZSTD_cwksp_check_available(ws, additionalNeededSpace * 3); - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, nuint additionalNeededSpace) + { + return ZSTD_cwksp_check_available(ws, additionalNeededSpace * 3); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, nuint additionalNeededSpace) - { - return - ZSTD_cwksp_check_too_large(ws, additionalNeededSpace) != 0 - && ws->workspaceOversizedDuration > 128 + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, nuint additionalNeededSpace) + { + return + ZSTD_cwksp_check_too_large(ws, additionalNeededSpace) != 0 + && ws->workspaceOversizedDuration > 128 ? 1 : 0; - } + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_cwksp_bump_oversized_duration( - ZSTD_cwksp* ws, - nuint additionalNeededSpace - ) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_cwksp_bump_oversized_duration( + ZSTD_cwksp* ws, + nuint additionalNeededSpace + ) + { + if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace) != 0) { - if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace) != 0) - { - ws->workspaceOversizedDuration++; - } - else - { - ws->workspaceOversizedDuration = 0; - } + ws->workspaceOversizedDuration++; + } + else + { + ws->workspaceOversizedDuration = 0; } } -} +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDdict.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDdict.cs index 49d786a5a..0a840a2ff 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDdict.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDdict.cs @@ -1,307 +1,306 @@ -using static ZstdSharp.UnsafeHelper; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods + /* note: several prototypes are already published in `zstd.h` : + * ZSTD_createDDict() + * ZSTD_createDDict_byReference() + * ZSTD_createDDict_advanced() + * ZSTD_freeDDict() + * ZSTD_initStaticDDict() + * ZSTD_sizeof_DDict() + * ZSTD_estimateDDictSize() + * ZSTD_getDictID_fromDict() + */ + private static void* ZSTD_DDict_dictContent(ZSTD_DDict_s* ddict) + { + assert(ddict != null); + return ddict->dictContent; + } + + private static nuint ZSTD_DDict_dictSize(ZSTD_DDict_s* ddict) + { + assert(ddict != null); + return ddict->dictSize; + } + + private static void ZSTD_copyDDictParameters(ZSTD_DCtx_s* dctx, ZSTD_DDict_s* ddict) { - /* note: several prototypes are already published in `zstd.h` : - * ZSTD_createDDict() - * ZSTD_createDDict_byReference() - * ZSTD_createDDict_advanced() - * ZSTD_freeDDict() - * ZSTD_initStaticDDict() - * ZSTD_sizeof_DDict() - * ZSTD_estimateDDictSize() - * ZSTD_getDictID_fromDict() - */ - private static void* ZSTD_DDict_dictContent(ZSTD_DDict_s* ddict) + assert(dctx != null); + assert(ddict != null); + dctx->dictID = ddict->dictID; + dctx->prefixStart = ddict->dictContent; + dctx->virtualStart = ddict->dictContent; + dctx->dictEnd = (byte*)ddict->dictContent + ddict->dictSize; + dctx->previousDstEnd = dctx->dictEnd; + if (ddict->entropyPresent != 0) { - assert(ddict != null); - return ddict->dictContent; + dctx->litEntropy = 1; + dctx->fseEntropy = 1; + dctx->LLTptr = &ddict->entropy.LLTable.e0; + dctx->MLTptr = &ddict->entropy.MLTable.e0; + dctx->OFTptr = &ddict->entropy.OFTable.e0; + dctx->HUFptr = ddict->entropy.hufTable; + dctx->entropy.rep[0] = ddict->entropy.rep[0]; + dctx->entropy.rep[1] = ddict->entropy.rep[1]; + dctx->entropy.rep[2] = ddict->entropy.rep[2]; } - - private static nuint ZSTD_DDict_dictSize(ZSTD_DDict_s* ddict) + else { - assert(ddict != null); - return ddict->dictSize; + dctx->litEntropy = 0; + dctx->fseEntropy = 0; } + } - private static void ZSTD_copyDDictParameters(ZSTD_DCtx_s* dctx, ZSTD_DDict_s* ddict) + private static nuint ZSTD_loadEntropy_intoDDict( + ZSTD_DDict_s* ddict, + ZSTD_dictContentType_e dictContentType + ) + { + ddict->dictID = 0; + ddict->entropyPresent = 0; + if (dictContentType == ZSTD_dictContentType_e.ZSTD_dct_rawContent) + return 0; + if (ddict->dictSize < 8) { - assert(dctx != null); - assert(ddict != null); - dctx->dictID = ddict->dictID; - dctx->prefixStart = ddict->dictContent; - dctx->virtualStart = ddict->dictContent; - dctx->dictEnd = (byte*)ddict->dictContent + ddict->dictSize; - dctx->previousDstEnd = dctx->dictEnd; - if (ddict->entropyPresent != 0) - { - dctx->litEntropy = 1; - dctx->fseEntropy = 1; - dctx->LLTptr = &ddict->entropy.LLTable.e0; - dctx->MLTptr = &ddict->entropy.MLTable.e0; - dctx->OFTptr = &ddict->entropy.OFTable.e0; - dctx->HUFptr = ddict->entropy.hufTable; - dctx->entropy.rep[0] = ddict->entropy.rep[0]; - dctx->entropy.rep[1] = ddict->entropy.rep[1]; - dctx->entropy.rep[2] = ddict->entropy.rep[2]; - } - else - { - dctx->litEntropy = 0; - dctx->fseEntropy = 0; - } + if (dictContentType == ZSTD_dictContentType_e.ZSTD_dct_fullDict) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + return 0; } - private static nuint ZSTD_loadEntropy_intoDDict( - ZSTD_DDict_s* ddict, - ZSTD_dictContentType_e dictContentType - ) { - ddict->dictID = 0; - ddict->entropyPresent = 0; - if (dictContentType == ZSTD_dictContentType_e.ZSTD_dct_rawContent) - return 0; - if (ddict->dictSize < 8) + uint magic = MEM_readLE32(ddict->dictContent); + if (magic != 0xEC30A437) { if (dictContentType == ZSTD_dictContentType_e.ZSTD_dct_fullDict) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted) + ); return 0; } - - { - uint magic = MEM_readLE32(ddict->dictContent); - if (magic != 0xEC30A437) - { - if (dictContentType == ZSTD_dictContentType_e.ZSTD_dct_fullDict) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted) - ); - return 0; - } - } - - ddict->dictID = MEM_readLE32((sbyte*)ddict->dictContent + 4); - if ( - ERR_isError(ZSTD_loadDEntropy(&ddict->entropy, ddict->dictContent, ddict->dictSize)) - ) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); - } - - ddict->entropyPresent = 1; - return 0; } - private static nuint ZSTD_initDDict_internal( - ZSTD_DDict_s* ddict, - void* dict, - nuint dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod, - ZSTD_dictContentType_e dictContentType + ddict->dictID = MEM_readLE32((sbyte*)ddict->dictContent + 4); + if ( + ERR_isError(ZSTD_loadDEntropy(&ddict->entropy, ddict->dictContent, ddict->dictSize)) ) { - if ( - dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef - || dict == null - || dictSize == 0 - ) - { - ddict->dictBuffer = null; - ddict->dictContent = dict; - if (dict == null) - dictSize = 0; - } - else - { - void* internalBuffer = ZSTD_customMalloc(dictSize, ddict->cMem); - ddict->dictBuffer = internalBuffer; - ddict->dictContent = internalBuffer; - if (internalBuffer == null) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); - memcpy(internalBuffer, dict, (uint)dictSize); - } - - ddict->dictSize = dictSize; - ddict->entropy.hufTable[0] = 12 * 0x1000001; - { - /* parse dictionary content */ - nuint err_code = ZSTD_loadEntropy_intoDDict(ddict, dictContentType); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - return 0; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); } - public static ZSTD_DDict_s* ZSTD_createDDict_advanced( - void* dict, - nuint dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod, - ZSTD_dictContentType_e dictContentType, - ZSTD_customMem customMem + ddict->entropyPresent = 1; + return 0; + } + + private static nuint ZSTD_initDDict_internal( + ZSTD_DDict_s* ddict, + void* dict, + nuint dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType + ) + { + if ( + dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef + || dict == null + || dictSize == 0 ) { - if ( - ((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) - != 0 - ) - return null; - { - ZSTD_DDict_s* ddict = (ZSTD_DDict_s*)ZSTD_customMalloc( - (nuint)sizeof(ZSTD_DDict_s), - customMem - ); - if (ddict == null) - return null; - ddict->cMem = customMem; - { - nuint initResult = ZSTD_initDDict_internal( - ddict, - dict, - dictSize, - dictLoadMethod, - dictContentType - ); - if (ERR_isError(initResult)) - { - ZSTD_freeDDict(ddict); - return null; - } - } - - return ddict; - } + ddict->dictBuffer = null; + ddict->dictContent = dict; + if (dict == null) + dictSize = 0; } - - /*! ZSTD_createDDict() : - * Create a digested dictionary, to start decompression without startup delay. - * `dict` content is copied inside DDict. - * Consequently, `dict` can be released after `ZSTD_DDict` creation */ - public static ZSTD_DDict_s* ZSTD_createDDict(void* dict, nuint dictSize) + else { - ZSTD_customMem allocator = new ZSTD_customMem - { - customAlloc = null, - customFree = null, - opaque = null, - }; - return ZSTD_createDDict_advanced( - dict, - dictSize, - ZSTD_dictLoadMethod_e.ZSTD_dlm_byCopy, - ZSTD_dictContentType_e.ZSTD_dct_auto, - allocator - ); + void* internalBuffer = ZSTD_customMalloc(dictSize, ddict->cMem); + ddict->dictBuffer = internalBuffer; + ddict->dictContent = internalBuffer; + if (internalBuffer == null) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + memcpy(internalBuffer, dict, (uint)dictSize); } - /*! ZSTD_createDDict_byReference() : - * Create a digested dictionary, to start decompression without startup delay. - * Dictionary content is simply referenced, it will be accessed during decompression. - * Warning : dictBuffer must outlive DDict (DDict must be freed before dictBuffer) */ - public static ZSTD_DDict_s* ZSTD_createDDict_byReference(void* dictBuffer, nuint dictSize) + ddict->dictSize = dictSize; + ddict->entropy.hufTable[0] = 12 * 0x1000001; { - ZSTD_customMem allocator = new ZSTD_customMem + /* parse dictionary content */ + nuint err_code = ZSTD_loadEntropy_intoDDict(ddict, dictContentType); + if (ERR_isError(err_code)) { - customAlloc = null, - customFree = null, - opaque = null, - }; - return ZSTD_createDDict_advanced( - dictBuffer, - dictSize, - ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, - ZSTD_dictContentType_e.ZSTD_dct_auto, - allocator - ); + return err_code; + } } - public static ZSTD_DDict_s* ZSTD_initStaticDDict( - void* sBuffer, - nuint sBufferSize, - void* dict, - nuint dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod, - ZSTD_dictContentType_e dictContentType + return 0; + } + + public static ZSTD_DDict_s* ZSTD_createDDict_advanced( + void* dict, + nuint dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType, + ZSTD_customMem customMem + ) + { + if ( + ((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) + != 0 ) + return null; { - nuint neededSpace = - (nuint)sizeof(ZSTD_DDict_s) - + (dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef ? 0 : dictSize); - ZSTD_DDict_s* ddict = (ZSTD_DDict_s*)sBuffer; - assert(sBuffer != null); - assert(dict != null); - if (((nuint)sBuffer & 7) != 0) - return null; - if (sBufferSize < neededSpace) + ZSTD_DDict_s* ddict = (ZSTD_DDict_s*)ZSTD_customMalloc( + (nuint)sizeof(ZSTD_DDict_s), + customMem + ); + if (ddict == null) return null; - if (dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byCopy) + ddict->cMem = customMem; { - memcpy(ddict + 1, dict, (uint)dictSize); - dict = ddict + 1; + nuint initResult = ZSTD_initDDict_internal( + ddict, + dict, + dictSize, + dictLoadMethod, + dictContentType + ); + if (ERR_isError(initResult)) + { + ZSTD_freeDDict(ddict); + return null; + } } - if ( - ERR_isError( - ZSTD_initDDict_internal( - ddict, - dict, - dictSize, - ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, - dictContentType - ) - ) - ) - return null; return ddict; } + } - /*! ZSTD_freeDDict() : - * Function frees memory allocated with ZSTD_createDDict() - * If a NULL pointer is passed, no operation is performed. */ - public static nuint ZSTD_freeDDict(ZSTD_DDict_s* ddict) + /*! ZSTD_createDDict() : + * Create a digested dictionary, to start decompression without startup delay. + * `dict` content is copied inside DDict. + * Consequently, `dict` can be released after `ZSTD_DDict` creation */ + public static ZSTD_DDict_s* ZSTD_createDDict(void* dict, nuint dictSize) + { + ZSTD_customMem allocator = new ZSTD_customMem { - if (ddict == null) - return 0; - { - ZSTD_customMem cMem = ddict->cMem; - ZSTD_customFree(ddict->dictBuffer, cMem); - ZSTD_customFree(ddict, cMem); - return 0; - } - } + customAlloc = null, + customFree = null, + opaque = null, + }; + return ZSTD_createDDict_advanced( + dict, + dictSize, + ZSTD_dictLoadMethod_e.ZSTD_dlm_byCopy, + ZSTD_dictContentType_e.ZSTD_dct_auto, + allocator + ); + } - /*! ZSTD_estimateDDictSize() : - * Estimate amount of memory that will be needed to create a dictionary for decompression. - * Note : dictionary created by reference using ZSTD_dlm_byRef are smaller */ - public static nuint ZSTD_estimateDDictSize( - nuint dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod - ) + /*! ZSTD_createDDict_byReference() : + * Create a digested dictionary, to start decompression without startup delay. + * Dictionary content is simply referenced, it will be accessed during decompression. + * Warning : dictBuffer must outlive DDict (DDict must be freed before dictBuffer) */ + public static ZSTD_DDict_s* ZSTD_createDDict_byReference(void* dictBuffer, nuint dictSize) + { + ZSTD_customMem allocator = new ZSTD_customMem { - return (nuint)sizeof(ZSTD_DDict_s) - + (dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef ? 0 : dictSize); - } + customAlloc = null, + customFree = null, + opaque = null, + }; + return ZSTD_createDDict_advanced( + dictBuffer, + dictSize, + ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, + ZSTD_dictContentType_e.ZSTD_dct_auto, + allocator + ); + } - public static nuint ZSTD_sizeof_DDict(ZSTD_DDict_s* ddict) + public static ZSTD_DDict_s* ZSTD_initStaticDDict( + void* sBuffer, + nuint sBufferSize, + void* dict, + nuint dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType + ) + { + nuint neededSpace = + (nuint)sizeof(ZSTD_DDict_s) + + (dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef ? 0 : dictSize); + ZSTD_DDict_s* ddict = (ZSTD_DDict_s*)sBuffer; + assert(sBuffer != null); + assert(dict != null); + if (((nuint)sBuffer & 7) != 0) + return null; + if (sBufferSize < neededSpace) + return null; + if (dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byCopy) { - if (ddict == null) - return 0; - return (nuint)sizeof(ZSTD_DDict_s) + (ddict->dictBuffer != null ? ddict->dictSize : 0); + memcpy(ddict + 1, dict, (uint)dictSize); + dict = ddict + 1; } - /*! ZSTD_getDictID_fromDDict() : - * Provides the dictID of the dictionary loaded into `ddict`. - * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. - * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ - public static uint ZSTD_getDictID_fromDDict(ZSTD_DDict_s* ddict) + if ( + ERR_isError( + ZSTD_initDDict_internal( + ddict, + dict, + dictSize, + ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, + dictContentType + ) + ) + ) + return null; + return ddict; + } + + /*! ZSTD_freeDDict() : + * Function frees memory allocated with ZSTD_createDDict() + * If a NULL pointer is passed, no operation is performed. */ + public static nuint ZSTD_freeDDict(ZSTD_DDict_s* ddict) + { + if (ddict == null) + return 0; { - if (ddict == null) - return 0; - return ddict->dictID; + ZSTD_customMem cMem = ddict->cMem; + ZSTD_customFree(ddict->dictBuffer, cMem); + ZSTD_customFree(ddict, cMem); + return 0; } } + + /*! ZSTD_estimateDDictSize() : + * Estimate amount of memory that will be needed to create a dictionary for decompression. + * Note : dictionary created by reference using ZSTD_dlm_byRef are smaller */ + public static nuint ZSTD_estimateDDictSize( + nuint dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod + ) + { + return (nuint)sizeof(ZSTD_DDict_s) + + (dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef ? 0 : dictSize); + } + + public static nuint ZSTD_sizeof_DDict(ZSTD_DDict_s* ddict) + { + if (ddict == null) + return 0; + return (nuint)sizeof(ZSTD_DDict_s) + (ddict->dictBuffer != null ? ddict->dictSize : 0); + } + + /*! ZSTD_getDictID_fromDDict() : + * Provides the dictID of the dictionary loaded into `ddict`. + * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. + * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ + public static uint ZSTD_getDictID_fromDDict(ZSTD_DDict_s* ddict) + { + if (ddict == null) + return 0; + return ddict->dictID; + } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompress.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompress.cs index 737aae945..8e629a5c6 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompress.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompress.cs @@ -1,1176 +1,1642 @@ -using static ZstdSharp.UnsafeHelper; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods + /* Hash function to determine starting position of dict insertion within the table + * Returns an index between [0, hashSet->ddictPtrTableSize] + */ + private static nuint ZSTD_DDictHashSet_getIndex(ZSTD_DDictHashSet* hashSet, uint dictID) { - /* Hash function to determine starting position of dict insertion within the table - * Returns an index between [0, hashSet->ddictPtrTableSize] - */ - private static nuint ZSTD_DDictHashSet_getIndex(ZSTD_DDictHashSet* hashSet, uint dictID) + ulong hash = ZSTD_XXH64(&dictID, sizeof(uint), 0); + return (nuint)(hash & hashSet->ddictPtrTableSize - 1); + } + + /* Adds DDict to a hashset without resizing it. + * If inserting a DDict with a dictID that already exists in the set, replaces the one in the set. + * Returns 0 if successful, or a zstd error code if something went wrong. + */ + private static nuint ZSTD_DDictHashSet_emplaceDDict( + ZSTD_DDictHashSet* hashSet, + ZSTD_DDict_s* ddict + ) + { + uint dictID = ZSTD_getDictID_fromDDict(ddict); + nuint idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID); + nuint idxRangeMask = hashSet->ddictPtrTableSize - 1; + if (hashSet->ddictPtrCount == hashSet->ddictPtrTableSize) { - ulong hash = ZSTD_XXH64(&dictID, sizeof(uint), 0); - return (nuint)(hash & hashSet->ddictPtrTableSize - 1); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); } - /* Adds DDict to a hashset without resizing it. - * If inserting a DDict with a dictID that already exists in the set, replaces the one in the set. - * Returns 0 if successful, or a zstd error code if something went wrong. - */ - private static nuint ZSTD_DDictHashSet_emplaceDDict( - ZSTD_DDictHashSet* hashSet, - ZSTD_DDict_s* ddict - ) + while (hashSet->ddictPtrTable[idx] != null) { - uint dictID = ZSTD_getDictID_fromDDict(ddict); - nuint idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID); - nuint idxRangeMask = hashSet->ddictPtrTableSize - 1; - if (hashSet->ddictPtrCount == hashSet->ddictPtrTableSize) + if (ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]) == dictID) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + hashSet->ddictPtrTable[idx] = ddict; + return 0; } - while (hashSet->ddictPtrTable[idx] != null) - { - if (ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]) == dictID) - { - hashSet->ddictPtrTable[idx] = ddict; - return 0; - } + idx &= idxRangeMask; + idx++; + } - idx &= idxRangeMask; - idx++; - } + hashSet->ddictPtrTable[idx] = ddict; + hashSet->ddictPtrCount++; + return 0; + } - hashSet->ddictPtrTable[idx] = ddict; - hashSet->ddictPtrCount++; - return 0; + /* Expands hash table by factor of DDICT_HASHSET_RESIZE_FACTOR and + * rehashes all values, allocates new table, frees old table. + * Returns 0 on success, otherwise a zstd error code. + */ + private static nuint ZSTD_DDictHashSet_expand( + ZSTD_DDictHashSet* hashSet, + ZSTD_customMem customMem + ) + { + nuint newTableSize = hashSet->ddictPtrTableSize * 2; + ZSTD_DDict_s** newTable = (ZSTD_DDict_s**)ZSTD_customCalloc( + (nuint)sizeof(ZSTD_DDict_s*) * newTableSize, + customMem + ); + ZSTD_DDict_s** oldTable = hashSet->ddictPtrTable; + nuint oldTableSize = hashSet->ddictPtrTableSize; + nuint i; + if (newTable == null) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); } - /* Expands hash table by factor of DDICT_HASHSET_RESIZE_FACTOR and - * rehashes all values, allocates new table, frees old table. - * Returns 0 on success, otherwise a zstd error code. - */ - private static nuint ZSTD_DDictHashSet_expand( - ZSTD_DDictHashSet* hashSet, - ZSTD_customMem customMem - ) + hashSet->ddictPtrTable = newTable; + hashSet->ddictPtrTableSize = newTableSize; + hashSet->ddictPtrCount = 0; + for (i = 0; i < oldTableSize; ++i) { - nuint newTableSize = hashSet->ddictPtrTableSize * 2; - ZSTD_DDict_s** newTable = (ZSTD_DDict_s**)ZSTD_customCalloc( - (nuint)sizeof(ZSTD_DDict_s*) * newTableSize, - customMem - ); - ZSTD_DDict_s** oldTable = hashSet->ddictPtrTable; - nuint oldTableSize = hashSet->ddictPtrTableSize; - nuint i; - if (newTable == null) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); - } - - hashSet->ddictPtrTable = newTable; - hashSet->ddictPtrTableSize = newTableSize; - hashSet->ddictPtrCount = 0; - for (i = 0; i < oldTableSize; ++i) + if (oldTable[i] != null) { - if (oldTable[i] != null) + nuint err_code = ZSTD_DDictHashSet_emplaceDDict(hashSet, oldTable[i]); + if (ERR_isError(err_code)) { - nuint err_code = ZSTD_DDictHashSet_emplaceDDict(hashSet, oldTable[i]); - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } } - - ZSTD_customFree(oldTable, customMem); - return 0; } - /* Fetches a DDict with the given dictID - * Returns the ZSTD_DDict* with the requested dictID. If it doesn't exist, then returns NULL. - */ - private static ZSTD_DDict_s* ZSTD_DDictHashSet_getDDict( - ZSTD_DDictHashSet* hashSet, - uint dictID - ) + ZSTD_customFree(oldTable, customMem); + return 0; + } + + /* Fetches a DDict with the given dictID + * Returns the ZSTD_DDict* with the requested dictID. If it doesn't exist, then returns NULL. + */ + private static ZSTD_DDict_s* ZSTD_DDictHashSet_getDDict( + ZSTD_DDictHashSet* hashSet, + uint dictID + ) + { + nuint idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID); + nuint idxRangeMask = hashSet->ddictPtrTableSize - 1; + for (; ; ) { - nuint idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID); - nuint idxRangeMask = hashSet->ddictPtrTableSize - 1; - for (; ; ) + nuint currDictID = ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]); + if (currDictID == dictID || currDictID == 0) { - nuint currDictID = ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]); - if (currDictID == dictID || currDictID == 0) - { - break; - } - else - { - idx &= idxRangeMask; - idx++; - } + break; } - - return hashSet->ddictPtrTable[idx]; - } - - /* Allocates space for and returns a ddict hash set - * The hash set's ZSTD_DDict* table has all values automatically set to NULL to begin with. - * Returns NULL if allocation failed. - */ - private static ZSTD_DDictHashSet* ZSTD_createDDictHashSet(ZSTD_customMem customMem) - { - ZSTD_DDictHashSet* ret = (ZSTD_DDictHashSet*)ZSTD_customMalloc( - (nuint)sizeof(ZSTD_DDictHashSet), - customMem - ); - if (ret == null) - return null; - ret->ddictPtrTable = (ZSTD_DDict_s**)ZSTD_customCalloc( - (nuint)(64 * sizeof(ZSTD_DDict_s*)), - customMem - ); - if (ret->ddictPtrTable == null) + else { - ZSTD_customFree(ret, customMem); - return null; + idx &= idxRangeMask; + idx++; } + } + + return hashSet->ddictPtrTable[idx]; + } + + /* Allocates space for and returns a ddict hash set + * The hash set's ZSTD_DDict* table has all values automatically set to NULL to begin with. + * Returns NULL if allocation failed. + */ + private static ZSTD_DDictHashSet* ZSTD_createDDictHashSet(ZSTD_customMem customMem) + { + ZSTD_DDictHashSet* ret = (ZSTD_DDictHashSet*)ZSTD_customMalloc( + (nuint)sizeof(ZSTD_DDictHashSet), + customMem + ); + if (ret == null) + return null; + ret->ddictPtrTable = (ZSTD_DDict_s**)ZSTD_customCalloc( + (nuint)(64 * sizeof(ZSTD_DDict_s*)), + customMem + ); + if (ret->ddictPtrTable == null) + { + ZSTD_customFree(ret, customMem); + return null; + } + + ret->ddictPtrTableSize = 64; + ret->ddictPtrCount = 0; + return ret; + } - ret->ddictPtrTableSize = 64; - ret->ddictPtrCount = 0; - return ret; + /* Frees the table of ZSTD_DDict* within a hashset, then frees the hashset itself. + * Note: The ZSTD_DDict* within the table are NOT freed. + */ + private static void ZSTD_freeDDictHashSet( + ZSTD_DDictHashSet* hashSet, + ZSTD_customMem customMem + ) + { + if (hashSet != null && hashSet->ddictPtrTable != null) + { + ZSTD_customFree(hashSet->ddictPtrTable, customMem); } - /* Frees the table of ZSTD_DDict* within a hashset, then frees the hashset itself. - * Note: The ZSTD_DDict* within the table are NOT freed. - */ - private static void ZSTD_freeDDictHashSet( - ZSTD_DDictHashSet* hashSet, - ZSTD_customMem customMem - ) + if (hashSet != null) { - if (hashSet != null && hashSet->ddictPtrTable != null) - { - ZSTD_customFree(hashSet->ddictPtrTable, customMem); - } + ZSTD_customFree(hashSet, customMem); + } + } - if (hashSet != null) + /* Public function: Adds a DDict into the ZSTD_DDictHashSet, possibly triggering a resize of the hash set. + * Returns 0 on success, or a ZSTD error. + */ + private static nuint ZSTD_DDictHashSet_addDDict( + ZSTD_DDictHashSet* hashSet, + ZSTD_DDict_s* ddict, + ZSTD_customMem customMem + ) + { + if (hashSet->ddictPtrCount * 4 / hashSet->ddictPtrTableSize * 3 != 0) + { + nuint err_code = ZSTD_DDictHashSet_expand(hashSet, customMem); + if (ERR_isError(err_code)) { - ZSTD_customFree(hashSet, customMem); + return err_code; } } - /* Public function: Adds a DDict into the ZSTD_DDictHashSet, possibly triggering a resize of the hash set. - * Returns 0 on success, or a ZSTD error. - */ - private static nuint ZSTD_DDictHashSet_addDDict( - ZSTD_DDictHashSet* hashSet, - ZSTD_DDict_s* ddict, - ZSTD_customMem customMem - ) { - if (hashSet->ddictPtrCount * 4 / hashSet->ddictPtrTableSize * 3 != 0) + nuint err_code = ZSTD_DDictHashSet_emplaceDDict(hashSet, ddict); + if (ERR_isError(err_code)) { - nuint err_code = ZSTD_DDictHashSet_expand(hashSet, customMem); - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } + } - { - nuint err_code = ZSTD_DDictHashSet_emplaceDDict(hashSet, ddict); - if (ERR_isError(err_code)) - { - return err_code; - } - } + return 0; + } + /*-************************************************************* + * Context management + ***************************************************************/ + public static nuint ZSTD_sizeof_DCtx(ZSTD_DCtx_s* dctx) + { + if (dctx == null) return 0; - } + return (nuint)sizeof(ZSTD_DCtx_s) + + ZSTD_sizeof_DDict(dctx->ddictLocal) + + dctx->inBuffSize + + dctx->outBuffSize; + } - /*-************************************************************* - * Context management - ***************************************************************/ - public static nuint ZSTD_sizeof_DCtx(ZSTD_DCtx_s* dctx) - { - if (dctx == null) - return 0; - return (nuint)sizeof(ZSTD_DCtx_s) - + ZSTD_sizeof_DDict(dctx->ddictLocal) - + dctx->inBuffSize - + dctx->outBuffSize; - } + public static nuint ZSTD_estimateDCtxSize() + { + return (nuint)sizeof(ZSTD_DCtx_s); + } - public static nuint ZSTD_estimateDCtxSize() - { - return (nuint)sizeof(ZSTD_DCtx_s); - } + private static nuint ZSTD_startingInputLength(ZSTD_format_e format) + { + nuint startingInputLength = (nuint)(format == ZSTD_format_e.ZSTD_f_zstd1 ? 5 : 1); + assert( + format == ZSTD_format_e.ZSTD_f_zstd1 + || format == ZSTD_format_e.ZSTD_f_zstd1_magicless + ); + return startingInputLength; + } + + private static void ZSTD_DCtx_resetParameters(ZSTD_DCtx_s* dctx) + { + assert(dctx->streamStage == ZSTD_dStreamStage.zdss_init); + dctx->format = ZSTD_format_e.ZSTD_f_zstd1; + dctx->maxWindowSize = ((uint)1 << 27) + 1; + dctx->outBufferMode = ZSTD_bufferMode_e.ZSTD_bm_buffered; + dctx->forceIgnoreChecksum = ZSTD_forceIgnoreChecksum_e.ZSTD_d_validateChecksum; + dctx->refMultipleDDicts = ZSTD_refMultipleDDicts_e.ZSTD_rmd_refSingleDDict; + dctx->disableHufAsm = 0; + dctx->maxBlockSizeParam = 0; + } + + private static void ZSTD_initDCtx_internal(ZSTD_DCtx_s* dctx) + { + dctx->staticSize = 0; + dctx->ddict = null; + dctx->ddictLocal = null; + dctx->dictEnd = null; + dctx->ddictIsCold = 0; + dctx->dictUses = ZSTD_dictUses_e.ZSTD_dont_use; + dctx->inBuff = null; + dctx->inBuffSize = 0; + dctx->outBuffSize = 0; + dctx->streamStage = ZSTD_dStreamStage.zdss_init; + dctx->noForwardProgress = 0; + dctx->oversizedDuration = 0; + dctx->isFrameDecompression = 1; + dctx->ddictSet = null; + ZSTD_DCtx_resetParameters(dctx); + } + + public static ZSTD_DCtx_s* ZSTD_initStaticDCtx(void* workspace, nuint workspaceSize) + { + ZSTD_DCtx_s* dctx = (ZSTD_DCtx_s*)workspace; + if (((nuint)workspace & 7) != 0) + return null; + if (workspaceSize < (nuint)sizeof(ZSTD_DCtx_s)) + return null; + ZSTD_initDCtx_internal(dctx); + dctx->staticSize = workspaceSize; + dctx->inBuff = (sbyte*)(dctx + 1); + return dctx; + } - private static nuint ZSTD_startingInputLength(ZSTD_format_e format) + private static ZSTD_DCtx_s* ZSTD_createDCtx_internal(ZSTD_customMem customMem) + { + if ( + ((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) + != 0 + ) + return null; { - nuint startingInputLength = (nuint)(format == ZSTD_format_e.ZSTD_f_zstd1 ? 5 : 1); - assert( - format == ZSTD_format_e.ZSTD_f_zstd1 - || format == ZSTD_format_e.ZSTD_f_zstd1_magicless + ZSTD_DCtx_s* dctx = (ZSTD_DCtx_s*)ZSTD_customMalloc( + (nuint)sizeof(ZSTD_DCtx_s), + customMem ); - return startingInputLength; + if (dctx == null) + return null; + dctx->customMem = customMem; + ZSTD_initDCtx_internal(dctx); + return dctx; } + } + + public static ZSTD_DCtx_s* ZSTD_createDCtx_advanced(ZSTD_customMem customMem) + { + return ZSTD_createDCtx_internal(customMem); + } + + public static ZSTD_DCtx_s* ZSTD_createDCtx() + { + return ZSTD_createDCtx_internal(ZSTD_defaultCMem); + } + + private static void ZSTD_clearDict(ZSTD_DCtx_s* dctx) + { + ZSTD_freeDDict(dctx->ddictLocal); + dctx->ddictLocal = null; + dctx->ddict = null; + dctx->dictUses = ZSTD_dictUses_e.ZSTD_dont_use; + } - private static void ZSTD_DCtx_resetParameters(ZSTD_DCtx_s* dctx) + public static nuint ZSTD_freeDCtx(ZSTD_DCtx_s* dctx) + { + if (dctx == null) + return 0; + if (dctx->staticSize != 0) { - assert(dctx->streamStage == ZSTD_dStreamStage.zdss_init); - dctx->format = ZSTD_format_e.ZSTD_f_zstd1; - dctx->maxWindowSize = ((uint)1 << 27) + 1; - dctx->outBufferMode = ZSTD_bufferMode_e.ZSTD_bm_buffered; - dctx->forceIgnoreChecksum = ZSTD_forceIgnoreChecksum_e.ZSTD_d_validateChecksum; - dctx->refMultipleDDicts = ZSTD_refMultipleDDicts_e.ZSTD_rmd_refSingleDDict; - dctx->disableHufAsm = 0; - dctx->maxBlockSizeParam = 0; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); } - private static void ZSTD_initDCtx_internal(ZSTD_DCtx_s* dctx) { - dctx->staticSize = 0; - dctx->ddict = null; - dctx->ddictLocal = null; - dctx->dictEnd = null; - dctx->ddictIsCold = 0; - dctx->dictUses = ZSTD_dictUses_e.ZSTD_dont_use; + ZSTD_customMem cMem = dctx->customMem; + ZSTD_clearDict(dctx); + ZSTD_customFree(dctx->inBuff, cMem); dctx->inBuff = null; - dctx->inBuffSize = 0; - dctx->outBuffSize = 0; - dctx->streamStage = ZSTD_dStreamStage.zdss_init; - dctx->noForwardProgress = 0; - dctx->oversizedDuration = 0; - dctx->isFrameDecompression = 1; - dctx->ddictSet = null; - ZSTD_DCtx_resetParameters(dctx); - } + if (dctx->ddictSet != null) + { + ZSTD_freeDDictHashSet(dctx->ddictSet, cMem); + dctx->ddictSet = null; + } - public static ZSTD_DCtx_s* ZSTD_initStaticDCtx(void* workspace, nuint workspaceSize) - { - ZSTD_DCtx_s* dctx = (ZSTD_DCtx_s*)workspace; - if (((nuint)workspace & 7) != 0) - return null; - if (workspaceSize < (nuint)sizeof(ZSTD_DCtx_s)) - return null; - ZSTD_initDCtx_internal(dctx); - dctx->staticSize = workspaceSize; - dctx->inBuff = (sbyte*)(dctx + 1); - return dctx; + ZSTD_customFree(dctx, cMem); + return 0; } + } + + /* no longer useful */ + public static void ZSTD_copyDCtx(ZSTD_DCtx_s* dstDCtx, ZSTD_DCtx_s* srcDCtx) + { + nuint toCopy = (nuint)((sbyte*)&dstDCtx->inBuff - (sbyte*)dstDCtx); + memcpy(dstDCtx, srcDCtx, (uint)toCopy); + } - private static ZSTD_DCtx_s* ZSTD_createDCtx_internal(ZSTD_customMem customMem) + /* Given a dctx with a digested frame params, re-selects the correct ZSTD_DDict based on + * the requested dict ID from the frame. If there exists a reference to the correct ZSTD_DDict, then + * accordingly sets the ddict to be used to decompress the frame. + * + * If no DDict is found, then no action is taken, and the ZSTD_DCtx::ddict remains as-is. + * + * ZSTD_d_refMultipleDDicts must be enabled for this function to be called. + */ + private static void ZSTD_DCtx_selectFrameDDict(ZSTD_DCtx_s* dctx) + { + assert(dctx->refMultipleDDicts != default && dctx->ddictSet != null); + if (dctx->ddict != null) { - if ( - ((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) - != 0 - ) - return null; + ZSTD_DDict_s* frameDDict = ZSTD_DDictHashSet_getDDict( + dctx->ddictSet, + dctx->fParams.dictID + ); + if (frameDDict != null) { - ZSTD_DCtx_s* dctx = (ZSTD_DCtx_s*)ZSTD_customMalloc( - (nuint)sizeof(ZSTD_DCtx_s), - customMem - ); - if (dctx == null) - return null; - dctx->customMem = customMem; - ZSTD_initDCtx_internal(dctx); - return dctx; + ZSTD_clearDict(dctx); + dctx->dictID = dctx->fParams.dictID; + dctx->ddict = frameDDict; + dctx->dictUses = ZSTD_dictUses_e.ZSTD_use_indefinitely; } } + } + + /*! ZSTD_isFrame() : + * Tells if the content of `buffer` starts with a valid Frame Identifier. + * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. + * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled. + * Note 3 : Skippable Frame Identifiers are considered valid. */ + public static uint ZSTD_isFrame(void* buffer, nuint size) + { + if (size < 4) + return 0; + { + uint magic = MEM_readLE32(buffer); + if (magic == 0xFD2FB528) + return 1; + if ((magic & 0xFFFFFFF0) == 0x184D2A50) + return 1; + } + + return 0; + } - public static ZSTD_DCtx_s* ZSTD_createDCtx_advanced(ZSTD_customMem customMem) + /*! ZSTD_isSkippableFrame() : + * Tells if the content of `buffer` starts with a valid Frame Identifier for a skippable frame. + * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. + */ + public static uint ZSTD_isSkippableFrame(void* buffer, nuint size) + { + if (size < 4) + return 0; { - return ZSTD_createDCtx_internal(customMem); + uint magic = MEM_readLE32(buffer); + if ((magic & 0xFFFFFFF0) == 0x184D2A50) + return 1; } - public static ZSTD_DCtx_s* ZSTD_createDCtx() + return 0; + } + + /** ZSTD_frameHeaderSize_internal() : + * srcSize must be large enough to reach header size fields. + * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless. + * @return : size of the Frame Header + * or an error code, which can be tested with ZSTD_isError() */ + private static nuint ZSTD_frameHeaderSize_internal( + void* src, + nuint srcSize, + ZSTD_format_e format + ) + { + nuint minInputSize = ZSTD_startingInputLength(format); + if (srcSize < minInputSize) { - return ZSTD_createDCtx_internal(ZSTD_defaultCMem); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); } - private static void ZSTD_clearDict(ZSTD_DCtx_s* dctx) { - ZSTD_freeDDict(dctx->ddictLocal); - dctx->ddictLocal = null; - dctx->ddict = null; - dctx->dictUses = ZSTD_dictUses_e.ZSTD_dont_use; + byte fhd = ((byte*)src)[minInputSize - 1]; + uint dictID = (uint)(fhd & 3); + uint singleSegment = (uint)(fhd >> 5 & 1); + uint fcsId = (uint)(fhd >> 6); + return minInputSize + + (nuint)(singleSegment == 0 ? 1 : 0) + + ZSTD_did_fieldSize[dictID] + + ZSTD_fcs_fieldSize[fcsId] + + (nuint)(singleSegment != 0 && fcsId == 0 ? 1 : 0); } + } + + /** ZSTD_frameHeaderSize() : + * srcSize must be >= ZSTD_frameHeaderSize_prefix. + * @return : size of the Frame Header, + * or an error code (if srcSize is too small) */ + public static nuint ZSTD_frameHeaderSize(void* src, nuint srcSize) + { + return ZSTD_frameHeaderSize_internal(src, srcSize, ZSTD_format_e.ZSTD_f_zstd1); + } - public static nuint ZSTD_freeDCtx(ZSTD_DCtx_s* dctx) + /** ZSTD_getFrameHeader_advanced() : + * decode Frame Header, or require larger `srcSize`. + * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless + * @return : 0, `zfhPtr` is correctly filled, + * >0, `srcSize` is too small, value is wanted `srcSize` amount, + ** or an error code, which can be tested using ZSTD_isError() */ + public static nuint ZSTD_getFrameHeader_advanced( + ZSTD_frameHeader* zfhPtr, + void* src, + nuint srcSize, + ZSTD_format_e format + ) + { + byte* ip = (byte*)src; + nuint minInputSize = ZSTD_startingInputLength(format); + if (srcSize > 0) { - if (dctx == null) - return 0; - if (dctx->staticSize != 0) + if (src == null) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); } + } + if (srcSize < minInputSize) + { + if (srcSize > 0 && format != ZSTD_format_e.ZSTD_f_zstd1_magicless) { - ZSTD_customMem cMem = dctx->customMem; - ZSTD_clearDict(dctx); - ZSTD_customFree(dctx->inBuff, cMem); - dctx->inBuff = null; - if (dctx->ddictSet != null) + /* when receiving less than @minInputSize bytes, + * control these bytes at least correspond to a supported magic number + * in order to error out early if they don't. + **/ + nuint toCopy = 4 < srcSize ? 4 : srcSize; + byte* hbuf = stackalloc byte[4]; + MEM_writeLE32(hbuf, 0xFD2FB528); + assert(src != null); + memcpy(hbuf, src, (uint)toCopy); + if (MEM_readLE32(hbuf) != 0xFD2FB528) { - ZSTD_freeDDictHashSet(dctx->ddictSet, cMem); - dctx->ddictSet = null; + MEM_writeLE32(hbuf, 0x184D2A50); + memcpy(hbuf, src, (uint)toCopy); + if ((MEM_readLE32(hbuf) & 0xFFFFFFF0) != 0x184D2A50) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_prefix_unknown) + ); + } } + } + + return minInputSize; + } - ZSTD_customFree(dctx, cMem); + *zfhPtr = new ZSTD_frameHeader(); + if (format != ZSTD_format_e.ZSTD_f_zstd1_magicless && MEM_readLE32(src) != 0xFD2FB528) + { + if ((MEM_readLE32(src) & 0xFFFFFFF0) == 0x184D2A50) + { + if (srcSize < 8) + return 8; + *zfhPtr = new ZSTD_frameHeader + { + frameType = ZSTD_frameType_e.ZSTD_skippableFrame, + dictID = MEM_readLE32(src) - 0x184D2A50, + headerSize = 8, + frameContentSize = MEM_readLE32((sbyte*)src + 4), + }; return 0; } + + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_prefix_unknown)); } - /* no longer useful */ - public static void ZSTD_copyDCtx(ZSTD_DCtx_s* dstDCtx, ZSTD_DCtx_s* srcDCtx) { - nuint toCopy = (nuint)((sbyte*)&dstDCtx->inBuff - (sbyte*)dstDCtx); - memcpy(dstDCtx, srcDCtx, (uint)toCopy); + nuint fhsize = ZSTD_frameHeaderSize_internal(src, srcSize, format); + if (srcSize < fhsize) + return fhsize; + zfhPtr->headerSize = (uint)fhsize; } - /* Given a dctx with a digested frame params, re-selects the correct ZSTD_DDict based on - * the requested dict ID from the frame. If there exists a reference to the correct ZSTD_DDict, then - * accordingly sets the ddict to be used to decompress the frame. - * - * If no DDict is found, then no action is taken, and the ZSTD_DCtx::ddict remains as-is. - * - * ZSTD_d_refMultipleDDicts must be enabled for this function to be called. - */ - private static void ZSTD_DCtx_selectFrameDDict(ZSTD_DCtx_s* dctx) { - assert(dctx->refMultipleDDicts != default && dctx->ddictSet != null); - if (dctx->ddict != null) + byte fhdByte = ip[minInputSize - 1]; + nuint pos = minInputSize; + uint dictIDSizeCode = (uint)(fhdByte & 3); + uint checksumFlag = (uint)(fhdByte >> 2 & 1); + uint singleSegment = (uint)(fhdByte >> 5 & 1); + uint fcsID = (uint)(fhdByte >> 6); + ulong windowSize = 0; + uint dictID = 0; + ulong frameContentSize = unchecked(0UL - 1); + if ((fhdByte & 0x08) != 0) { - ZSTD_DDict_s* frameDDict = ZSTD_DDictHashSet_getDDict( - dctx->ddictSet, - dctx->fParams.dictID + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported) ); - if (frameDDict != null) + } + + if (singleSegment == 0) + { + byte wlByte = ip[pos++]; + uint windowLog = (uint)((wlByte >> 3) + 10); + if (windowLog > (uint)(sizeof(nuint) == 4 ? 30 : 31)) { - ZSTD_clearDict(dctx); - dctx->dictID = dctx->fParams.dictID; - dctx->ddict = frameDDict; - dctx->dictUses = ZSTD_dictUses_e.ZSTD_use_indefinitely; + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_windowTooLarge) + ); } + + windowSize = 1UL << (int)windowLog; + windowSize += (windowSize >> 3) * (ulong)(wlByte & 7); } - } - /*! ZSTD_isFrame() : - * Tells if the content of `buffer` starts with a valid Frame Identifier. - * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. - * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled. - * Note 3 : Skippable Frame Identifiers are considered valid. */ - public static uint ZSTD_isFrame(void* buffer, nuint size) - { - if (size < 4) - return 0; + switch (dictIDSizeCode) + { + default: + assert(0 != 0); + goto case 0; + case 0: + break; + case 1: + dictID = ip[pos]; + pos++; + break; + case 2: + dictID = MEM_readLE16(ip + pos); + pos += 2; + break; + case 3: + dictID = MEM_readLE32(ip + pos); + pos += 4; + break; + } + + switch (fcsID) { - uint magic = MEM_readLE32(buffer); - if (magic == 0xFD2FB528) - return 1; - if ((magic & 0xFFFFFFF0) == 0x184D2A50) - return 1; + default: + assert(0 != 0); + goto case 0; + case 0: + if (singleSegment != 0) + frameContentSize = ip[pos]; + break; + case 1: + frameContentSize = (ulong)(MEM_readLE16(ip + pos) + 256); + break; + case 2: + frameContentSize = MEM_readLE32(ip + pos); + break; + case 3: + frameContentSize = MEM_readLE64(ip + pos); + break; } + if (singleSegment != 0) + windowSize = frameContentSize; + zfhPtr->frameType = ZSTD_frameType_e.ZSTD_frame; + zfhPtr->frameContentSize = frameContentSize; + zfhPtr->windowSize = windowSize; + zfhPtr->blockSizeMax = (uint)(windowSize < 1 << 17 ? windowSize : 1 << 17); + zfhPtr->dictID = dictID; + zfhPtr->checksumFlag = checksumFlag; + } + + return 0; + } + + /** ZSTD_getFrameHeader() : + * decode Frame Header, or require larger `srcSize`. + * note : this function does not consume input, it only reads it. + * @return : 0, `zfhPtr` is correctly filled, + * >0, `srcSize` is too small, value is wanted `srcSize` amount, + * or an error code, which can be tested using ZSTD_isError() */ + public static nuint ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, void* src, nuint srcSize) + { + return ZSTD_getFrameHeader_advanced(zfhPtr, src, srcSize, ZSTD_format_e.ZSTD_f_zstd1); + } + + /** ZSTD_getFrameContentSize() : + * compatible with legacy mode + * @return : decompressed size of the single frame pointed to be `src` if known, otherwise + * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined + * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */ + public static ulong ZSTD_getFrameContentSize(void* src, nuint srcSize) + { + ZSTD_frameHeader zfh; + if (ZSTD_getFrameHeader(&zfh, src, srcSize) != 0) + return unchecked(0UL - 2); + if (zfh.frameType == ZSTD_frameType_e.ZSTD_skippableFrame) + { return 0; } + else + { + return zfh.frameContentSize; + } + } - /*! ZSTD_isSkippableFrame() : - * Tells if the content of `buffer` starts with a valid Frame Identifier for a skippable frame. - * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. - */ - public static uint ZSTD_isSkippableFrame(void* buffer, nuint size) + private static nuint readSkippableFrameSize(void* src, nuint srcSize) + { + const nuint skippableHeaderSize = 8; + uint sizeU32; + if (srcSize < 8) { - if (size < 4) - return 0; - { - uint magic = MEM_readLE32(buffer); - if ((magic & 0xFFFFFFF0) == 0x184D2A50) - return 1; - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } - return 0; + sizeU32 = MEM_readLE32((byte*)src + 4); + if (sizeU32 + 8 < sizeU32) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported) + ); } - /** ZSTD_frameHeaderSize_internal() : - * srcSize must be large enough to reach header size fields. - * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless. - * @return : size of the Frame Header - * or an error code, which can be tested with ZSTD_isError() */ - private static nuint ZSTD_frameHeaderSize_internal( - void* src, - nuint srcSize, - ZSTD_format_e format - ) { - nuint minInputSize = ZSTD_startingInputLength(format); - if (srcSize < minInputSize) + nuint skippableSize = skippableHeaderSize + sizeU32; + if (skippableSize > srcSize) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); } - { - byte fhd = ((byte*)src)[minInputSize - 1]; - uint dictID = (uint)(fhd & 3); - uint singleSegment = (uint)(fhd >> 5 & 1); - uint fcsId = (uint)(fhd >> 6); - return minInputSize - + (nuint)(singleSegment == 0 ? 1 : 0) - + ZSTD_did_fieldSize[dictID] - + ZSTD_fcs_fieldSize[fcsId] - + (nuint)(singleSegment != 0 && fcsId == 0 ? 1 : 0); - } + return skippableSize; } + } - /** ZSTD_frameHeaderSize() : - * srcSize must be >= ZSTD_frameHeaderSize_prefix. - * @return : size of the Frame Header, - * or an error code (if srcSize is too small) */ - public static nuint ZSTD_frameHeaderSize(void* src, nuint srcSize) + /*! ZSTD_readSkippableFrame() : + * Retrieves content of a skippable frame, and writes it to dst buffer. + * + * The parameter magicVariant will receive the magicVariant that was supplied when the frame was written, + * i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. This can be NULL if the caller is not interested + * in the magicVariant. + * + * Returns an error if destination buffer is not large enough, or if this is not a valid skippable frame. + * + * @return : number of bytes written or a ZSTD error. + */ + public static nuint ZSTD_readSkippableFrame( + void* dst, + nuint dstCapacity, + uint* magicVariant, + void* src, + nuint srcSize + ) + { + if (srcSize < 8) { - return ZSTD_frameHeaderSize_internal(src, srcSize, ZSTD_format_e.ZSTD_f_zstd1); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); } - /** ZSTD_getFrameHeader_advanced() : - * decode Frame Header, or require larger `srcSize`. - * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless - * @return : 0, `zfhPtr` is correctly filled, - * >0, `srcSize` is too small, value is wanted `srcSize` amount, - ** or an error code, which can be tested using ZSTD_isError() */ - public static nuint ZSTD_getFrameHeader_advanced( - ZSTD_frameHeader* zfhPtr, - void* src, - nuint srcSize, - ZSTD_format_e format - ) { - byte* ip = (byte*)src; - nuint minInputSize = ZSTD_startingInputLength(format); - if (srcSize > 0) + uint magicNumber = MEM_readLE32(src); + nuint skippableFrameSize = readSkippableFrameSize(src, srcSize); + nuint skippableContentSize = skippableFrameSize - 8; + if (ZSTD_isSkippableFrame(src, srcSize) == 0) { - if (src == null) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); - } + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported) + ); } - if (srcSize < minInputSize) + if (skippableFrameSize < 8 || skippableFrameSize > srcSize) { - if (srcSize > 0 && format != ZSTD_format_e.ZSTD_f_zstd1_magicless) - { - /* when receiving less than @minInputSize bytes, - * control these bytes at least correspond to a supported magic number - * in order to error out early if they don't. - **/ - nuint toCopy = 4 < srcSize ? 4 : srcSize; - byte* hbuf = stackalloc byte[4]; - MEM_writeLE32(hbuf, 0xFD2FB528); - assert(src != null); - memcpy(hbuf, src, (uint)toCopy); - if (MEM_readLE32(hbuf) != 0xFD2FB528) - { - MEM_writeLE32(hbuf, 0x184D2A50); - memcpy(hbuf, src, (uint)toCopy); - if ((MEM_readLE32(hbuf) & 0xFFFFFFF0) != 0x184D2A50) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_prefix_unknown) - ); - } - } - } - - return minInputSize; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); } - *zfhPtr = new ZSTD_frameHeader(); - if (format != ZSTD_format_e.ZSTD_f_zstd1_magicless && MEM_readLE32(src) != 0xFD2FB528) + if (skippableContentSize > dstCapacity) { - if ((MEM_readLE32(src) & 0xFFFFFFF0) == 0x184D2A50) - { - if (srcSize < 8) - return 8; - *zfhPtr = new ZSTD_frameHeader - { - frameType = ZSTD_frameType_e.ZSTD_skippableFrame, - dictID = MEM_readLE32(src) - 0x184D2A50, - headerSize = 8, - frameContentSize = MEM_readLE32((sbyte*)src + 4), - }; - return 0; - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_prefix_unknown)); + if (skippableContentSize > 0 && dst != null) + memcpy(dst, (byte*)src + 8, (uint)skippableContentSize); + if (magicVariant != null) + *magicVariant = magicNumber - 0x184D2A50; + return skippableContentSize; + } + } + + /** ZSTD_findDecompressedSize() : + * `srcSize` must be the exact length of some number of ZSTD compressed and/or + * skippable frames + * note: compatible with legacy mode + * @return : decompressed size of the frames contained */ + public static ulong ZSTD_findDecompressedSize(void* src, nuint srcSize) + { + ulong totalDstSize = 0; + while (srcSize >= ZSTD_startingInputLength(ZSTD_format_e.ZSTD_f_zstd1)) + { + uint magicNumber = MEM_readLE32(src); + if ((magicNumber & 0xFFFFFFF0) == 0x184D2A50) + { + nuint skippableSize = readSkippableFrameSize(src, srcSize); + if (ERR_isError(skippableSize)) + return unchecked(0UL - 2); + assert(skippableSize <= srcSize); + src = (byte*)src + skippableSize; + srcSize -= skippableSize; + continue; } { - nuint fhsize = ZSTD_frameHeaderSize_internal(src, srcSize, format); - if (srcSize < fhsize) - return fhsize; - zfhPtr->headerSize = (uint)fhsize; + ulong fcs = ZSTD_getFrameContentSize(src, srcSize); + if (fcs >= unchecked(0UL - 2)) + return fcs; + if (totalDstSize + fcs < totalDstSize) + return unchecked(0UL - 2); + totalDstSize += fcs; } { - byte fhdByte = ip[minInputSize - 1]; - nuint pos = minInputSize; - uint dictIDSizeCode = (uint)(fhdByte & 3); - uint checksumFlag = (uint)(fhdByte >> 2 & 1); - uint singleSegment = (uint)(fhdByte >> 5 & 1); - uint fcsID = (uint)(fhdByte >> 6); - ulong windowSize = 0; - uint dictID = 0; - ulong frameContentSize = unchecked(0UL - 1); - if ((fhdByte & 0x08) != 0) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported) - ); - } + nuint frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize); + if (ERR_isError(frameSrcSize)) + return unchecked(0UL - 2); + assert(frameSrcSize <= srcSize); + src = (byte*)src + frameSrcSize; + srcSize -= frameSrcSize; + } + } - if (singleSegment == 0) - { - byte wlByte = ip[pos++]; - uint windowLog = (uint)((wlByte >> 3) + 10); - if (windowLog > (uint)(sizeof(nuint) == 4 ? 30 : 31)) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_windowTooLarge) - ); - } + if (srcSize != 0) + return unchecked(0UL - 2); + return totalDstSize; + } - windowSize = 1UL << (int)windowLog; - windowSize += (windowSize >> 3) * (ulong)(wlByte & 7); - } + /** ZSTD_getDecompressedSize() : + * compatible with legacy mode + * @return : decompressed size if known, 0 otherwise + note : 0 can mean any of the following : + - frame content is empty + - decompressed size field is not present in frame header + - frame header unknown / not supported + - frame header not complete (`srcSize` too small) */ + public static ulong ZSTD_getDecompressedSize(void* src, nuint srcSize) + { + ulong ret = ZSTD_getFrameContentSize(src, srcSize); + return ret >= unchecked(0UL - 2) ? 0 : ret; + } - switch (dictIDSizeCode) - { - default: - assert(0 != 0); - goto case 0; - case 0: - break; - case 1: - dictID = ip[pos]; - pos++; - break; - case 2: - dictID = MEM_readLE16(ip + pos); - pos += 2; - break; - case 3: - dictID = MEM_readLE32(ip + pos); - pos += 4; - break; - } + /** ZSTD_decodeFrameHeader() : + * `headerSize` must be the size provided by ZSTD_frameHeaderSize(). + * If multiple DDict references are enabled, also will choose the correct DDict to use. + * @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */ + private static nuint ZSTD_decodeFrameHeader(ZSTD_DCtx_s* dctx, void* src, nuint headerSize) + { + nuint result = ZSTD_getFrameHeader_advanced( + &dctx->fParams, + src, + headerSize, + dctx->format + ); + if (ERR_isError(result)) + return result; + if (result > 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + if ( + dctx->refMultipleDDicts == ZSTD_refMultipleDDicts_e.ZSTD_rmd_refMultipleDDicts + && dctx->ddictSet != null + ) + { + ZSTD_DCtx_selectFrameDDict(dctx); + } - switch (fcsID) - { - default: - assert(0 != 0); - goto case 0; - case 0: - if (singleSegment != 0) - frameContentSize = ip[pos]; - break; - case 1: - frameContentSize = (ulong)(MEM_readLE16(ip + pos) + 256); - break; - case 2: - frameContentSize = MEM_readLE32(ip + pos); - break; - case 3: - frameContentSize = MEM_readLE64(ip + pos); - break; - } + if (dctx->fParams.dictID != 0 && dctx->dictID != dctx->fParams.dictID) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_wrong)); + } - if (singleSegment != 0) - windowSize = frameContentSize; - zfhPtr->frameType = ZSTD_frameType_e.ZSTD_frame; - zfhPtr->frameContentSize = frameContentSize; - zfhPtr->windowSize = windowSize; - zfhPtr->blockSizeMax = (uint)(windowSize < 1 << 17 ? windowSize : 1 << 17); - zfhPtr->dictID = dictID; - zfhPtr->checksumFlag = checksumFlag; - } + dctx->validateChecksum = (uint)( + dctx->fParams.checksumFlag != 0 && dctx->forceIgnoreChecksum == default ? 1 : 0 + ); + if (dctx->validateChecksum != 0) + ZSTD_XXH64_reset(&dctx->xxhState, 0); + dctx->processedCSize += headerSize; + return 0; + } - return 0; - } + private static ZSTD_frameSizeInfo ZSTD_errorFrameSizeInfo(nuint ret) + { + ZSTD_frameSizeInfo frameSizeInfo; + System.Runtime.CompilerServices.Unsafe.SkipInit(out frameSizeInfo); + frameSizeInfo.compressedSize = ret; + frameSizeInfo.decompressedBound = unchecked(0UL - 2); + return frameSizeInfo; + } - /** ZSTD_getFrameHeader() : - * decode Frame Header, or require larger `srcSize`. - * note : this function does not consume input, it only reads it. - * @return : 0, `zfhPtr` is correctly filled, - * >0, `srcSize` is too small, value is wanted `srcSize` amount, - * or an error code, which can be tested using ZSTD_isError() */ - public static nuint ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, void* src, nuint srcSize) + private static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo( + void* src, + nuint srcSize, + ZSTD_format_e format + ) + { + ZSTD_frameSizeInfo frameSizeInfo; + frameSizeInfo = new ZSTD_frameSizeInfo(); + if ( + format == ZSTD_format_e.ZSTD_f_zstd1 + && srcSize >= 8 + && (MEM_readLE32(src) & 0xFFFFFFF0) == 0x184D2A50 + ) { - return ZSTD_getFrameHeader_advanced(zfhPtr, src, srcSize, ZSTD_format_e.ZSTD_f_zstd1); + frameSizeInfo.compressedSize = readSkippableFrameSize(src, srcSize); + assert( + ERR_isError(frameSizeInfo.compressedSize) + || frameSizeInfo.compressedSize <= srcSize + ); + return frameSizeInfo; } - - /** ZSTD_getFrameContentSize() : - * compatible with legacy mode - * @return : decompressed size of the single frame pointed to be `src` if known, otherwise - * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined - * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */ - public static ulong ZSTD_getFrameContentSize(void* src, nuint srcSize) + else { + byte* ip = (byte*)src; + byte* ipstart = ip; + nuint remainingSize = srcSize; + nuint nbBlocks = 0; ZSTD_frameHeader zfh; - if (ZSTD_getFrameHeader(&zfh, src, srcSize) != 0) - return unchecked(0UL - 2); - if (zfh.frameType == ZSTD_frameType_e.ZSTD_skippableFrame) - { - return 0; - } - else { - return zfh.frameContentSize; + nuint ret = ZSTD_getFrameHeader_advanced(&zfh, src, srcSize, format); + if (ERR_isError(ret)) + return ZSTD_errorFrameSizeInfo(ret); + if (ret > 0) + return ZSTD_errorFrameSizeInfo( + unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)) + ); } - } - private static nuint readSkippableFrameSize(void* src, nuint srcSize) - { - const nuint skippableHeaderSize = 8; - uint sizeU32; - if (srcSize < 8) + ip += zfh.headerSize; + remainingSize -= zfh.headerSize; + while (true) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + blockProperties_t blockProperties; + nuint cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties); + if (ERR_isError(cBlockSize)) + return ZSTD_errorFrameSizeInfo(cBlockSize); + if (ZSTD_blockHeaderSize + cBlockSize > remainingSize) + return ZSTD_errorFrameSizeInfo( + unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)) + ); + ip += ZSTD_blockHeaderSize + cBlockSize; + remainingSize -= ZSTD_blockHeaderSize + cBlockSize; + nbBlocks++; + if (blockProperties.lastBlock != 0) + break; } - sizeU32 = MEM_readLE32((byte*)src + 4); - if (sizeU32 + 8 < sizeU32) + if (zfh.checksumFlag != 0) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported) - ); + if (remainingSize < 4) + return ZSTD_errorFrameSizeInfo( + unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)) + ); + ip += 4; } - { - nuint skippableSize = skippableHeaderSize + sizeU32; - if (skippableSize > srcSize) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); - } + frameSizeInfo.nbBlocks = nbBlocks; + frameSizeInfo.compressedSize = (nuint)(ip - ipstart); + frameSizeInfo.decompressedBound = + zfh.frameContentSize != unchecked(0UL - 1) + ? zfh.frameContentSize + : (ulong)nbBlocks * zfh.blockSizeMax; + return frameSizeInfo; + } + } - return skippableSize; - } + private static nuint ZSTD_findFrameCompressedSize_advanced( + void* src, + nuint srcSize, + ZSTD_format_e format + ) + { + ZSTD_frameSizeInfo frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize, format); + return frameSizeInfo.compressedSize; + } + + /** ZSTD_findFrameCompressedSize() : + * See docs in zstd.h + * Note: compatible with legacy mode */ + public static nuint ZSTD_findFrameCompressedSize(void* src, nuint srcSize) + { + return ZSTD_findFrameCompressedSize_advanced(src, srcSize, ZSTD_format_e.ZSTD_f_zstd1); + } + + /** ZSTD_decompressBound() : + * compatible with legacy mode + * `src` must point to the start of a ZSTD frame or a skippable frame + * `srcSize` must be at least as large as the frame contained + * @return : the maximum decompressed size of the compressed source + */ + public static ulong ZSTD_decompressBound(void* src, nuint srcSize) + { + ulong bound = 0; + while (srcSize > 0) + { + ZSTD_frameSizeInfo frameSizeInfo = ZSTD_findFrameSizeInfo( + src, + srcSize, + ZSTD_format_e.ZSTD_f_zstd1 + ); + nuint compressedSize = frameSizeInfo.compressedSize; + ulong decompressedBound = frameSizeInfo.decompressedBound; + if (ERR_isError(compressedSize) || decompressedBound == unchecked(0UL - 2)) + return unchecked(0UL - 2); + assert(srcSize >= compressedSize); + src = (byte*)src + compressedSize; + srcSize -= compressedSize; + bound += decompressedBound; } - /*! ZSTD_readSkippableFrame() : - * Retrieves content of a skippable frame, and writes it to dst buffer. - * - * The parameter magicVariant will receive the magicVariant that was supplied when the frame was written, - * i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. This can be NULL if the caller is not interested - * in the magicVariant. - * - * Returns an error if destination buffer is not large enough, or if this is not a valid skippable frame. - * - * @return : number of bytes written or a ZSTD error. - */ - public static nuint ZSTD_readSkippableFrame( - void* dst, - nuint dstCapacity, - uint* magicVariant, - void* src, - nuint srcSize - ) + return bound; + } + + /*! ZSTD_decompressionMargin() : + * Zstd supports in-place decompression, where the input and output buffers overlap. + * In this case, the output buffer must be at least (Margin + Output_Size) bytes large, + * and the input buffer must be at the end of the output buffer. + * + * _______________________ Output Buffer ________________________ + * | | + * | ____ Input Buffer ____| + * | | | + * v v v + * |---------------------------------------|-----------|----------| + * ^ ^ ^ + * |___________________ Output_Size ___________________|_ Margin _| + * + * NOTE: See also ZSTD_DECOMPRESSION_MARGIN(). + * NOTE: This applies only to single-pass decompression through ZSTD_decompress() or + * ZSTD_decompressDCtx(). + * NOTE: This function supports multi-frame input. + * + * @param src The compressed frame(s) + * @param srcSize The size of the compressed frame(s) + * @returns The decompression margin or an error that can be checked with ZSTD_isError(). + */ + public static nuint ZSTD_decompressionMargin(void* src, nuint srcSize) + { + nuint margin = 0; + uint maxBlockSize = 0; + while (srcSize > 0) { - if (srcSize < 8) + ZSTD_frameSizeInfo frameSizeInfo = ZSTD_findFrameSizeInfo( + src, + srcSize, + ZSTD_format_e.ZSTD_f_zstd1 + ); + nuint compressedSize = frameSizeInfo.compressedSize; + ulong decompressedBound = frameSizeInfo.decompressedBound; + ZSTD_frameHeader zfh; { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + nuint err_code = ZSTD_getFrameHeader(&zfh, src, srcSize); + if (ERR_isError(err_code)) + { + return err_code; + } } + if (ERR_isError(compressedSize) || decompressedBound == unchecked(0UL - 2)) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + if (zfh.frameType == ZSTD_frameType_e.ZSTD_frame) { - uint magicNumber = MEM_readLE32(src); - nuint skippableFrameSize = readSkippableFrameSize(src, srcSize); - nuint skippableContentSize = skippableFrameSize - 8; - if (ZSTD_isSkippableFrame(src, srcSize) == 0) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported) - ); - } + margin += zfh.headerSize; + margin += (nuint)(zfh.checksumFlag != 0 ? 4 : 0); + margin += 3 * frameSizeInfo.nbBlocks; + maxBlockSize = + maxBlockSize > zfh.blockSizeMax ? maxBlockSize : zfh.blockSizeMax; + } + else + { + assert(zfh.frameType == ZSTD_frameType_e.ZSTD_skippableFrame); + margin += compressedSize; + } - if (skippableFrameSize < 8 || skippableFrameSize > srcSize) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); - } + assert(srcSize >= compressedSize); + src = (byte*)src + compressedSize; + srcSize -= compressedSize; + } - if (skippableContentSize > dstCapacity) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - } + margin += maxBlockSize; + return margin; + } - if (skippableContentSize > 0 && dst != null) - memcpy(dst, (byte*)src + 8, (uint)skippableContentSize); - if (magicVariant != null) - *magicVariant = magicNumber - 0x184D2A50; - return skippableContentSize; - } + /** ZSTD_insertBlock() : + * insert `src` block into `dctx` history. Useful to track uncompressed blocks. */ + public static nuint ZSTD_insertBlock(ZSTD_DCtx_s* dctx, void* blockStart, nuint blockSize) + { + ZSTD_checkContinuity(dctx, blockStart, blockSize); + dctx->previousDstEnd = (sbyte*)blockStart + blockSize; + return blockSize; + } + + private static nuint ZSTD_copyRawBlock( + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize + ) + { + if (srcSize > dstCapacity) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); } - /** ZSTD_findDecompressedSize() : - * `srcSize` must be the exact length of some number of ZSTD compressed and/or - * skippable frames - * note: compatible with legacy mode - * @return : decompressed size of the frames contained */ - public static ulong ZSTD_findDecompressedSize(void* src, nuint srcSize) + if (dst == null) { - ulong totalDstSize = 0; - while (srcSize >= ZSTD_startingInputLength(ZSTD_format_e.ZSTD_f_zstd1)) - { - uint magicNumber = MEM_readLE32(src); - if ((magicNumber & 0xFFFFFFF0) == 0x184D2A50) - { - nuint skippableSize = readSkippableFrameSize(src, srcSize); - if (ERR_isError(skippableSize)) - return unchecked(0UL - 2); - assert(skippableSize <= srcSize); - src = (byte*)src + skippableSize; - srcSize -= skippableSize; - continue; - } + if (srcSize == 0) + return 0; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstBuffer_null)); + } - { - ulong fcs = ZSTD_getFrameContentSize(src, srcSize); - if (fcs >= unchecked(0UL - 2)) - return fcs; - if (totalDstSize + fcs < totalDstSize) - return unchecked(0UL - 2); - totalDstSize += fcs; - } + memmove(dst, src, srcSize); + return srcSize; + } - { - nuint frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize); - if (ERR_isError(frameSrcSize)) - return unchecked(0UL - 2); - assert(frameSrcSize <= srcSize); - src = (byte*)src + frameSrcSize; - srcSize -= frameSrcSize; - } - } + private static nuint ZSTD_setRleBlock(void* dst, nuint dstCapacity, byte b, nuint regenSize) + { + if (regenSize > dstCapacity) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } - if (srcSize != 0) - return unchecked(0UL - 2); - return totalDstSize; + if (dst == null) + { + if (regenSize == 0) + return 0; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstBuffer_null)); } - /** ZSTD_getDecompressedSize() : - * compatible with legacy mode - * @return : decompressed size if known, 0 otherwise - note : 0 can mean any of the following : - - frame content is empty - - decompressed size field is not present in frame header - - frame header unknown / not supported - - frame header not complete (`srcSize` too small) */ - public static ulong ZSTD_getDecompressedSize(void* src, nuint srcSize) + memset(dst, b, (uint)regenSize); + return regenSize; + } + + private static void ZSTD_DCtx_trace_end( + ZSTD_DCtx_s* dctx, + ulong uncompressedSize, + ulong compressedSize, + int streaming + ) { } + + /*! ZSTD_decompressFrame() : + * @dctx must be properly initialized + * will update *srcPtr and *srcSizePtr, + * to make *srcPtr progress by one frame. */ + private static nuint ZSTD_decompressFrame( + ZSTD_DCtx_s* dctx, + void* dst, + nuint dstCapacity, + void** srcPtr, + nuint* srcSizePtr + ) + { + byte* istart = (byte*)*srcPtr; + byte* ip = istart; + byte* ostart = (byte*)dst; + byte* oend = dstCapacity != 0 ? ostart + dstCapacity : ostart; + byte* op = ostart; + nuint remainingSrcSize = *srcSizePtr; + if ( + remainingSrcSize + < (nuint)(dctx->format == ZSTD_format_e.ZSTD_f_zstd1 ? 6 : 2) + ZSTD_blockHeaderSize + ) { - ulong ret = ZSTD_getFrameContentSize(src, srcSize); - return ret >= unchecked(0UL - 2) ? 0 : ret; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); } - /** ZSTD_decodeFrameHeader() : - * `headerSize` must be the size provided by ZSTD_frameHeaderSize(). - * If multiple DDict references are enabled, also will choose the correct DDict to use. - * @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */ - private static nuint ZSTD_decodeFrameHeader(ZSTD_DCtx_s* dctx, void* src, nuint headerSize) { - nuint result = ZSTD_getFrameHeader_advanced( - &dctx->fParams, - src, - headerSize, + nuint frameHeaderSize = ZSTD_frameHeaderSize_internal( + ip, + (nuint)(dctx->format == ZSTD_format_e.ZSTD_f_zstd1 ? 5 : 1), dctx->format ); - if (ERR_isError(result)) - return result; - if (result > 0) + if (ERR_isError(frameHeaderSize)) + return frameHeaderSize; + if (remainingSrcSize < frameHeaderSize + ZSTD_blockHeaderSize) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); } - if ( - dctx->refMultipleDDicts == ZSTD_refMultipleDDicts_e.ZSTD_rmd_refMultipleDDicts - && dctx->ddictSet != null - ) - { - ZSTD_DCtx_selectFrameDDict(dctx); - } - - if (dctx->fParams.dictID != 0 && dctx->dictID != dctx->fParams.dictID) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_wrong)); + nuint err_code = ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize); + if (ERR_isError(err_code)) + { + return err_code; + } } - dctx->validateChecksum = (uint)( - dctx->fParams.checksumFlag != 0 && dctx->forceIgnoreChecksum == default ? 1 : 0 - ); - if (dctx->validateChecksum != 0) - ZSTD_XXH64_reset(&dctx->xxhState, 0); - dctx->processedCSize += headerSize; - return 0; + ip += frameHeaderSize; + remainingSrcSize -= frameHeaderSize; } - private static ZSTD_frameSizeInfo ZSTD_errorFrameSizeInfo(nuint ret) + if (dctx->maxBlockSizeParam != 0) + dctx->fParams.blockSizeMax = + dctx->fParams.blockSizeMax < (uint)dctx->maxBlockSizeParam + ? dctx->fParams.blockSizeMax + : (uint)dctx->maxBlockSizeParam; + while (true) { - ZSTD_frameSizeInfo frameSizeInfo; - System.Runtime.CompilerServices.Unsafe.SkipInit(out frameSizeInfo); - frameSizeInfo.compressedSize = ret; - frameSizeInfo.decompressedBound = unchecked(0UL - 2); - return frameSizeInfo; - } + byte* oBlockEnd = oend; + nuint decodedSize; + blockProperties_t blockProperties; + nuint cBlockSize = ZSTD_getcBlockSize(ip, remainingSrcSize, &blockProperties); + if (ERR_isError(cBlockSize)) + return cBlockSize; + ip += ZSTD_blockHeaderSize; + remainingSrcSize -= ZSTD_blockHeaderSize; + if (cBlockSize > remainingSrcSize) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } - private static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo( - void* src, - nuint srcSize, - ZSTD_format_e format - ) - { - ZSTD_frameSizeInfo frameSizeInfo; - frameSizeInfo = new ZSTD_frameSizeInfo(); - if ( - format == ZSTD_format_e.ZSTD_f_zstd1 - && srcSize >= 8 - && (MEM_readLE32(src) & 0xFFFFFFF0) == 0x184D2A50 - ) + if (ip >= op && ip < oBlockEnd) { - frameSizeInfo.compressedSize = readSkippableFrameSize(src, srcSize); - assert( - ERR_isError(frameSizeInfo.compressedSize) - || frameSizeInfo.compressedSize <= srcSize - ); - return frameSizeInfo; + oBlockEnd = op + (ip - op); } - else + + switch (blockProperties.blockType) { - byte* ip = (byte*)src; - byte* ipstart = ip; - nuint remainingSize = srcSize; - nuint nbBlocks = 0; - ZSTD_frameHeader zfh; - { - nuint ret = ZSTD_getFrameHeader_advanced(&zfh, src, srcSize, format); - if (ERR_isError(ret)) - return ZSTD_errorFrameSizeInfo(ret); - if (ret > 0) - return ZSTD_errorFrameSizeInfo( - unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)) - ); - } + case blockType_e.bt_compressed: + assert(dctx->isFrameDecompression == 1); + decodedSize = ZSTD_decompressBlock_internal( + dctx, + op, + (nuint)(oBlockEnd - op), + ip, + cBlockSize, + streaming_operation.not_streaming + ); + break; + case blockType_e.bt_raw: + decodedSize = ZSTD_copyRawBlock(op, (nuint)(oend - op), ip, cBlockSize); + break; + case blockType_e.bt_rle: + decodedSize = ZSTD_setRleBlock( + op, + (nuint)(oBlockEnd - op), + *ip, + blockProperties.origSize + ); + break; + case blockType_e.bt_reserved: + default: + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); + } - ip += zfh.headerSize; - remainingSize -= zfh.headerSize; - while (true) + { + nuint err_code = decodedSize; + if (ERR_isError(err_code)) { - blockProperties_t blockProperties; - nuint cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties); - if (ERR_isError(cBlockSize)) - return ZSTD_errorFrameSizeInfo(cBlockSize); - if (ZSTD_blockHeaderSize + cBlockSize > remainingSize) - return ZSTD_errorFrameSizeInfo( - unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)) - ); - ip += ZSTD_blockHeaderSize + cBlockSize; - remainingSize -= ZSTD_blockHeaderSize + cBlockSize; - nbBlocks++; - if (blockProperties.lastBlock != 0) - break; + return err_code; } + } - if (zfh.checksumFlag != 0) - { - if (remainingSize < 4) - return ZSTD_errorFrameSizeInfo( - unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)) - ); - ip += 4; - } + if (dctx->validateChecksum != 0) + { + ZSTD_XXH64_update(&dctx->xxhState, op, decodedSize); + } - frameSizeInfo.nbBlocks = nbBlocks; - frameSizeInfo.compressedSize = (nuint)(ip - ipstart); - frameSizeInfo.decompressedBound = - zfh.frameContentSize != unchecked(0UL - 1) - ? zfh.frameContentSize - : (ulong)nbBlocks * zfh.blockSizeMax; - return frameSizeInfo; + if (decodedSize != 0) + { + op += decodedSize; } - } - private static nuint ZSTD_findFrameCompressedSize_advanced( - void* src, - nuint srcSize, - ZSTD_format_e format - ) - { - ZSTD_frameSizeInfo frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize, format); - return frameSizeInfo.compressedSize; + assert(ip != null); + ip += cBlockSize; + remainingSrcSize -= cBlockSize; + if (blockProperties.lastBlock != 0) + break; } - /** ZSTD_findFrameCompressedSize() : - * See docs in zstd.h - * Note: compatible with legacy mode */ - public static nuint ZSTD_findFrameCompressedSize(void* src, nuint srcSize) + if (dctx->fParams.frameContentSize != unchecked(0UL - 1)) { - return ZSTD_findFrameCompressedSize_advanced(src, srcSize, ZSTD_format_e.ZSTD_f_zstd1); + if ((ulong)(op - ostart) != dctx->fParams.frameContentSize) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } } - /** ZSTD_decompressBound() : - * compatible with legacy mode - * `src` must point to the start of a ZSTD frame or a skippable frame - * `srcSize` must be at least as large as the frame contained - * @return : the maximum decompressed size of the compressed source - */ - public static ulong ZSTD_decompressBound(void* src, nuint srcSize) + if (dctx->fParams.checksumFlag != 0) { - ulong bound = 0; - while (srcSize > 0) + if (remainingSrcSize < 4) { - ZSTD_frameSizeInfo frameSizeInfo = ZSTD_findFrameSizeInfo( - src, - srcSize, - ZSTD_format_e.ZSTD_f_zstd1 - ); - nuint compressedSize = frameSizeInfo.compressedSize; - ulong decompressedBound = frameSizeInfo.decompressedBound; - if (ERR_isError(compressedSize) || decompressedBound == unchecked(0UL - 2)) - return unchecked(0UL - 2); - assert(srcSize >= compressedSize); - src = (byte*)src + compressedSize; - srcSize -= compressedSize; - bound += decompressedBound; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_checksum_wrong)); } - return bound; - } - - /*! ZSTD_decompressionMargin() : - * Zstd supports in-place decompression, where the input and output buffers overlap. - * In this case, the output buffer must be at least (Margin + Output_Size) bytes large, - * and the input buffer must be at the end of the output buffer. - * - * _______________________ Output Buffer ________________________ - * | | - * | ____ Input Buffer ____| - * | | | - * v v v - * |---------------------------------------|-----------|----------| - * ^ ^ ^ - * |___________________ Output_Size ___________________|_ Margin _| - * - * NOTE: See also ZSTD_DECOMPRESSION_MARGIN(). - * NOTE: This applies only to single-pass decompression through ZSTD_decompress() or - * ZSTD_decompressDCtx(). - * NOTE: This function supports multi-frame input. - * - * @param src The compressed frame(s) - * @param srcSize The size of the compressed frame(s) - * @returns The decompression margin or an error that can be checked with ZSTD_isError(). - */ - public static nuint ZSTD_decompressionMargin(void* src, nuint srcSize) - { - nuint margin = 0; - uint maxBlockSize = 0; - while (srcSize > 0) + if (dctx->forceIgnoreChecksum == default) { - ZSTD_frameSizeInfo frameSizeInfo = ZSTD_findFrameSizeInfo( - src, - srcSize, - ZSTD_format_e.ZSTD_f_zstd1 - ); - nuint compressedSize = frameSizeInfo.compressedSize; - ulong decompressedBound = frameSizeInfo.decompressedBound; - ZSTD_frameHeader zfh; + uint checkCalc = (uint)ZSTD_XXH64_digest(&dctx->xxhState); + uint checkRead; + checkRead = MEM_readLE32(ip); + if (checkRead != checkCalc) { - nuint err_code = ZSTD_getFrameHeader(&zfh, src, srcSize); - if (ERR_isError(err_code)) - { - return err_code; - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_checksum_wrong)); } + } - if (ERR_isError(compressedSize) || decompressedBound == unchecked(0UL - 2)) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - if (zfh.frameType == ZSTD_frameType_e.ZSTD_frame) - { - margin += zfh.headerSize; - margin += (nuint)(zfh.checksumFlag != 0 ? 4 : 0); - margin += 3 * frameSizeInfo.nbBlocks; - maxBlockSize = - maxBlockSize > zfh.blockSizeMax ? maxBlockSize : zfh.blockSizeMax; - } - else - { - assert(zfh.frameType == ZSTD_frameType_e.ZSTD_skippableFrame); - margin += compressedSize; - } - - assert(srcSize >= compressedSize); - src = (byte*)src + compressedSize; - srcSize -= compressedSize; - } - - margin += maxBlockSize; - return margin; + ip += 4; + remainingSrcSize -= 4; } - /** ZSTD_insertBlock() : - * insert `src` block into `dctx` history. Useful to track uncompressed blocks. */ - public static nuint ZSTD_insertBlock(ZSTD_DCtx_s* dctx, void* blockStart, nuint blockSize) + ZSTD_DCtx_trace_end(dctx, (ulong)(op - ostart), (ulong)(ip - istart), 0); + *srcPtr = ip; + *srcSizePtr = remainingSrcSize; + return (nuint)(op - ostart); + } + + private static nuint ZSTD_decompressMultiFrame( + ZSTD_DCtx_s* dctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + void* dict, + nuint dictSize, + ZSTD_DDict_s* ddict + ) + { + void* dststart = dst; + int moreThan1Frame = 0; + assert(dict == null || ddict == null); + if (ddict != null) { - ZSTD_checkContinuity(dctx, blockStart, blockSize); - dctx->previousDstEnd = (sbyte*)blockStart + blockSize; - return blockSize; + dict = ZSTD_DDict_dictContent(ddict); + dictSize = ZSTD_DDict_dictSize(ddict); } - private static nuint ZSTD_copyRawBlock( - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize - ) + while (srcSize >= ZSTD_startingInputLength(dctx->format)) { - if (srcSize > dstCapacity) + if (dctx->format == ZSTD_format_e.ZSTD_f_zstd1 && srcSize >= 4) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - } + uint magicNumber = MEM_readLE32(src); + if ((magicNumber & 0xFFFFFFF0) == 0x184D2A50) + { + /* skippable frame detected : skip it */ + nuint skippableSize = readSkippableFrameSize(src, srcSize); + { + nuint err_code = skippableSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } - if (dst == null) - { - if (srcSize == 0) - return 0; - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstBuffer_null)); + assert(skippableSize <= srcSize); + src = (byte*)src + skippableSize; + srcSize -= skippableSize; + continue; + } } - memmove(dst, src, srcSize); - return srcSize; - } - - private static nuint ZSTD_setRleBlock(void* dst, nuint dstCapacity, byte b, nuint regenSize) - { - if (regenSize > dstCapacity) + if (ddict != null) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + /* we were called from ZSTD_decompress_usingDDict */ + nuint err_code = ZSTD_decompressBegin_usingDDict(dctx, ddict); + if (ERR_isError(err_code)) + { + return err_code; + } } - - if (dst == null) - { - if (regenSize == 0) - return 0; - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstBuffer_null)); - } - - memset(dst, b, (uint)regenSize); - return regenSize; - } - - private static void ZSTD_DCtx_trace_end( - ZSTD_DCtx_s* dctx, - ulong uncompressedSize, - ulong compressedSize, - int streaming - ) { } - - /*! ZSTD_decompressFrame() : - * @dctx must be properly initialized - * will update *srcPtr and *srcSizePtr, - * to make *srcPtr progress by one frame. */ - private static nuint ZSTD_decompressFrame( - ZSTD_DCtx_s* dctx, - void* dst, - nuint dstCapacity, - void** srcPtr, - nuint* srcSizePtr - ) - { - byte* istart = (byte*)*srcPtr; - byte* ip = istart; - byte* ostart = (byte*)dst; - byte* oend = dstCapacity != 0 ? ostart + dstCapacity : ostart; - byte* op = ostart; - nuint remainingSrcSize = *srcSizePtr; - if ( - remainingSrcSize - < (nuint)(dctx->format == ZSTD_format_e.ZSTD_f_zstd1 ? 6 : 2) + ZSTD_blockHeaderSize - ) + else { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + /* this will initialize correctly with no dict if dict == NULL, so + * use this in all cases but ddict */ + nuint err_code = ZSTD_decompressBegin_usingDict(dctx, dict, dictSize); + if (ERR_isError(err_code)) + { + return err_code; + } } + ZSTD_checkContinuity(dctx, dst, dstCapacity); { - nuint frameHeaderSize = ZSTD_frameHeaderSize_internal( - ip, - (nuint)(dctx->format == ZSTD_format_e.ZSTD_f_zstd1 ? 5 : 1), - dctx->format - ); - if (ERR_isError(frameHeaderSize)) - return frameHeaderSize; - if (remainingSrcSize < frameHeaderSize + ZSTD_blockHeaderSize) + nuint res = ZSTD_decompressFrame(dctx, dst, dstCapacity, &src, &srcSize); + if ( + ZSTD_getErrorCode(res) == ZSTD_ErrorCode.ZSTD_error_prefix_unknown + && moreThan1Frame == 1 + ) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); } + if (ERR_isError(res)) + return res; + assert(res <= dstCapacity); + if (res != 0) + dst = (byte*)dst + res; + dstCapacity -= res; + } + + moreThan1Frame = 1; + } + + if (srcSize != 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + return (nuint)((byte*)dst - (byte*)dststart); + } + + /*! ZSTD_decompress_usingDict() : + * Decompression using a known Dictionary. + * Dictionary must be identical to the one used during compression. + * Note : This function loads the dictionary, resulting in significant startup delay. + * It's intended for a dictionary used only once. + * Note : When `dict == NULL || dictSize < 8` no dictionary is used. */ + public static nuint ZSTD_decompress_usingDict( + ZSTD_DCtx_s* dctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + void* dict, + nuint dictSize + ) + { + return ZSTD_decompressMultiFrame( + dctx, + dst, + dstCapacity, + src, + srcSize, + dict, + dictSize, + null + ); + } + + private static ZSTD_DDict_s* ZSTD_getDDict(ZSTD_DCtx_s* dctx) + { + switch (dctx->dictUses) + { + default: + assert(0 != 0); + goto case ZSTD_dictUses_e.ZSTD_dont_use; + case ZSTD_dictUses_e.ZSTD_dont_use: + ZSTD_clearDict(dctx); + return null; + case ZSTD_dictUses_e.ZSTD_use_indefinitely: + return dctx->ddict; + case ZSTD_dictUses_e.ZSTD_use_once: + dctx->dictUses = ZSTD_dictUses_e.ZSTD_dont_use; + return dctx->ddict; + } + } + + /*! ZSTD_decompressDCtx() : + * Same as ZSTD_decompress(), + * requires an allocated ZSTD_DCtx. + * Compatible with sticky parameters (see below). + */ + public static nuint ZSTD_decompressDCtx( + ZSTD_DCtx_s* dctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize + ) + { + return ZSTD_decompress_usingDDict( + dctx, + dst, + dstCapacity, + src, + srcSize, + ZSTD_getDDict(dctx) + ); + } + + /*! ZSTD_decompress() : + * `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames. + * Multiple compressed frames can be decompressed at once with this method. + * The result will be the concatenation of all decompressed frames, back to back. + * `dstCapacity` is an upper bound of originalSize to regenerate. + * First frame's decompressed size can be extracted using ZSTD_getFrameContentSize(). + * If maximum upper bound isn't known, prefer using streaming mode to decompress data. + * @return : the number of bytes decompressed into `dst` (<= `dstCapacity`), + * or an errorCode if it fails (which can be tested using ZSTD_isError()). */ + public static nuint ZSTD_decompress(void* dst, nuint dstCapacity, void* src, nuint srcSize) + { + nuint regenSize; + ZSTD_DCtx_s* dctx = ZSTD_createDCtx_internal(ZSTD_defaultCMem); + if (dctx == null) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } + + regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize); + ZSTD_freeDCtx(dctx); + return regenSize; + } + + /*-************************************** + * Advanced Streaming Decompression API + * Bufferless and synchronous + ****************************************/ + public static nuint ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx_s* dctx) + { + return dctx->expected; + } + + /** + * Similar to ZSTD_nextSrcSizeToDecompress(), but when a block input can be streamed, we + * allow taking a partial block as the input. Currently only raw uncompressed blocks can + * be streamed. + * + * For blocks that can be streamed, this allows us to reduce the latency until we produce + * output, and avoid copying the input. + * + * @param inputSize - The total amount of input that the caller currently has. + */ + private static nuint ZSTD_nextSrcSizeToDecompressWithInputSize( + ZSTD_DCtx_s* dctx, + nuint inputSize + ) + { + if ( + !( + dctx->stage == ZSTD_dStage.ZSTDds_decompressBlock + || dctx->stage == ZSTD_dStage.ZSTDds_decompressLastBlock + ) + ) + return dctx->expected; + if (dctx->bType != blockType_e.bt_raw) + return dctx->expected; + return inputSize <= 1 ? 1 + : inputSize <= dctx->expected ? inputSize + : dctx->expected; + } + + public static ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx_s* dctx) + { + switch (dctx->stage) + { + default: + assert(0 != 0); + goto case ZSTD_dStage.ZSTDds_getFrameHeaderSize; + case ZSTD_dStage.ZSTDds_getFrameHeaderSize: + case ZSTD_dStage.ZSTDds_decodeFrameHeader: + return ZSTD_nextInputType_e.ZSTDnit_frameHeader; + case ZSTD_dStage.ZSTDds_decodeBlockHeader: + return ZSTD_nextInputType_e.ZSTDnit_blockHeader; + case ZSTD_dStage.ZSTDds_decompressBlock: + return ZSTD_nextInputType_e.ZSTDnit_block; + case ZSTD_dStage.ZSTDds_decompressLastBlock: + return ZSTD_nextInputType_e.ZSTDnit_lastBlock; + case ZSTD_dStage.ZSTDds_checkChecksum: + return ZSTD_nextInputType_e.ZSTDnit_checksum; + case ZSTD_dStage.ZSTDds_decodeSkippableHeader: + case ZSTD_dStage.ZSTDds_skipFrame: + return ZSTD_nextInputType_e.ZSTDnit_skippableFrame; + } + } + + private static int ZSTD_isSkipFrame(ZSTD_DCtx_s* dctx) + { + return dctx->stage == ZSTD_dStage.ZSTDds_skipFrame ? 1 : 0; + } + + /** ZSTD_decompressContinue() : + * srcSize : must be the exact nb of bytes expected (see ZSTD_nextSrcSizeToDecompress()) + * @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity) + * or an error code, which can be tested using ZSTD_isError() */ + public static nuint ZSTD_decompressContinue( + ZSTD_DCtx_s* dctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize + ) + { + if (srcSize != ZSTD_nextSrcSizeToDecompressWithInputSize(dctx, srcSize)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + ZSTD_checkContinuity(dctx, dst, dstCapacity); + dctx->processedCSize += srcSize; + switch (dctx->stage) + { + case ZSTD_dStage.ZSTDds_getFrameHeaderSize: + assert(src != null); + if (dctx->format == ZSTD_format_e.ZSTD_f_zstd1) { - nuint err_code = ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize); - if (ERR_isError(err_code)) + assert(srcSize >= 4); + if ((MEM_readLE32(src) & 0xFFFFFFF0) == 0x184D2A50) { - return err_code; + memcpy(dctx->headerBuffer, src, (uint)srcSize); + dctx->expected = 8 - srcSize; + dctx->stage = ZSTD_dStage.ZSTDds_decodeSkippableHeader; + return 0; } } - ip += frameHeaderSize; - remainingSrcSize -= frameHeaderSize; + dctx->headerSize = ZSTD_frameHeaderSize_internal(src, srcSize, dctx->format); + if (ERR_isError(dctx->headerSize)) + return dctx->headerSize; + memcpy(dctx->headerBuffer, src, (uint)srcSize); + dctx->expected = dctx->headerSize - srcSize; + dctx->stage = ZSTD_dStage.ZSTDds_decodeFrameHeader; + return 0; + case ZSTD_dStage.ZSTDds_decodeFrameHeader: + assert(src != null); + memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, (uint)srcSize); + + { + nuint err_code = ZSTD_decodeFrameHeader( + dctx, + dctx->headerBuffer, + dctx->headerSize + ); + if (ERR_isError(err_code)) + { + return err_code; + } } - if (dctx->maxBlockSizeParam != 0) - dctx->fParams.blockSizeMax = - dctx->fParams.blockSizeMax < (uint)dctx->maxBlockSizeParam - ? dctx->fParams.blockSizeMax - : (uint)dctx->maxBlockSizeParam; - while (true) + dctx->expected = ZSTD_blockHeaderSize; + dctx->stage = ZSTD_dStage.ZSTDds_decodeBlockHeader; + return 0; + case ZSTD_dStage.ZSTDds_decodeBlockHeader: { - byte* oBlockEnd = oend; - nuint decodedSize; - blockProperties_t blockProperties; - nuint cBlockSize = ZSTD_getcBlockSize(ip, remainingSrcSize, &blockProperties); + blockProperties_t bp; + nuint cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp); if (ERR_isError(cBlockSize)) return cBlockSize; - ip += ZSTD_blockHeaderSize; - remainingSrcSize -= ZSTD_blockHeaderSize; - if (cBlockSize > remainingSrcSize) + if (cBlockSize > dctx->fParams.blockSizeMax) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); + } + + dctx->expected = cBlockSize; + dctx->bType = bp.blockType; + dctx->rleSize = bp.origSize; + if (cBlockSize != 0) + { + dctx->stage = + bp.lastBlock != 0 + ? ZSTD_dStage.ZSTDds_decompressLastBlock + : ZSTD_dStage.ZSTDds_decompressBlock; + return 0; } - if (ip >= op && ip < oBlockEnd) + if (bp.lastBlock != 0) + { + if (dctx->fParams.checksumFlag != 0) + { + dctx->expected = 4; + dctx->stage = ZSTD_dStage.ZSTDds_checkChecksum; + } + else + { + dctx->expected = 0; + dctx->stage = ZSTD_dStage.ZSTDds_getFrameHeaderSize; + } + } + else { - oBlockEnd = op + (ip - op); + dctx->expected = ZSTD_blockHeaderSize; + dctx->stage = ZSTD_dStage.ZSTDds_decodeBlockHeader; } - switch (blockProperties.blockType) + return 0; + } + + case ZSTD_dStage.ZSTDds_decompressLastBlock: + case ZSTD_dStage.ZSTDds_decompressBlock: + { + nuint rSize; + switch (dctx->bType) { case blockType_e.bt_compressed: assert(dctx->isFrameDecompression == 1); - decodedSize = ZSTD_decompressBlock_internal( + rSize = ZSTD_decompressBlock_internal( dctx, - op, - (nuint)(oBlockEnd - op), - ip, - cBlockSize, - streaming_operation.not_streaming + dst, + dstCapacity, + src, + srcSize, + streaming_operation.is_streaming ); + dctx->expected = 0; break; case blockType_e.bt_raw: - decodedSize = ZSTD_copyRawBlock(op, (nuint)(oend - op), ip, cBlockSize); + assert(srcSize <= dctx->expected); + rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize); + + { + nuint err_code = rSize; + if (ERR_isError(err_code)) + { + return err_code; + } + } + + assert(rSize == srcSize); + dctx->expected -= rSize; break; case blockType_e.bt_rle: - decodedSize = ZSTD_setRleBlock( - op, - (nuint)(oBlockEnd - op), - *ip, - blockProperties.origSize - ); + rSize = ZSTD_setRleBlock(dst, dstCapacity, *(byte*)src, dctx->rleSize); + dctx->expected = 0; break; case blockType_e.bt_reserved: default: @@ -1180,1992 +1646,1424 @@ private static nuint ZSTD_decompressFrame( } { - nuint err_code = decodedSize; + nuint err_code = rSize; if (ERR_isError(err_code)) { return err_code; } } - if (dctx->validateChecksum != 0) - { - ZSTD_XXH64_update(&dctx->xxhState, op, decodedSize); - } - - if (decodedSize != 0) - { - op += decodedSize; - } - - assert(ip != null); - ip += cBlockSize; - remainingSrcSize -= cBlockSize; - if (blockProperties.lastBlock != 0) - break; - } - - if (dctx->fParams.frameContentSize != unchecked(0UL - 1)) - { - if ((ulong)(op - ostart) != dctx->fParams.frameContentSize) + if (rSize > dctx->fParams.blockSizeMax) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); } - } - if (dctx->fParams.checksumFlag != 0) - { - if (remainingSrcSize < 4) + dctx->decodedSize += rSize; + if (dctx->validateChecksum != 0) + ZSTD_XXH64_update(&dctx->xxhState, dst, rSize); + dctx->previousDstEnd = (sbyte*)dst + rSize; + if (dctx->expected > 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_checksum_wrong)); + return rSize; } - if (dctx->forceIgnoreChecksum == default) + if (dctx->stage == ZSTD_dStage.ZSTDds_decompressLastBlock) { - uint checkCalc = (uint)ZSTD_XXH64_digest(&dctx->xxhState); - uint checkRead; - checkRead = MEM_readLE32(ip); - if (checkRead != checkCalc) + if ( + dctx->fParams.frameContentSize != unchecked(0UL - 1) + && dctx->decodedSize != dctx->fParams.frameContentSize + ) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_checksum_wrong)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); } - } - - ip += 4; - remainingSrcSize -= 4; - } - - ZSTD_DCtx_trace_end(dctx, (ulong)(op - ostart), (ulong)(ip - istart), 0); - *srcPtr = ip; - *srcSizePtr = remainingSrcSize; - return (nuint)(op - ostart); - } - private static nuint ZSTD_decompressMultiFrame( - ZSTD_DCtx_s* dctx, - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize, - void* dict, - nuint dictSize, - ZSTD_DDict_s* ddict - ) - { - void* dststart = dst; - int moreThan1Frame = 0; - assert(dict == null || ddict == null); - if (ddict != null) - { - dict = ZSTD_DDict_dictContent(ddict); - dictSize = ZSTD_DDict_dictSize(ddict); - } - - while (srcSize >= ZSTD_startingInputLength(dctx->format)) - { - if (dctx->format == ZSTD_format_e.ZSTD_f_zstd1 && srcSize >= 4) - { - uint magicNumber = MEM_readLE32(src); - if ((magicNumber & 0xFFFFFFF0) == 0x184D2A50) + if (dctx->fParams.checksumFlag != 0) { - /* skippable frame detected : skip it */ - nuint skippableSize = readSkippableFrameSize(src, srcSize); - { - nuint err_code = skippableSize; - if (ERR_isError(err_code)) - { - return err_code; - } - } - - assert(skippableSize <= srcSize); - src = (byte*)src + skippableSize; - srcSize -= skippableSize; - continue; + dctx->expected = 4; + dctx->stage = ZSTD_dStage.ZSTDds_checkChecksum; } - } - - if (ddict != null) - { - /* we were called from ZSTD_decompress_usingDDict */ - nuint err_code = ZSTD_decompressBegin_usingDDict(dctx, ddict); - if (ERR_isError(err_code)) + else { - return err_code; + ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, 1); + dctx->expected = 0; + dctx->stage = ZSTD_dStage.ZSTDds_getFrameHeaderSize; } } else { - /* this will initialize correctly with no dict if dict == NULL, so - * use this in all cases but ddict */ - nuint err_code = ZSTD_decompressBegin_usingDict(dctx, dict, dictSize); - if (ERR_isError(err_code)) - { - return err_code; - } + dctx->stage = ZSTD_dStage.ZSTDds_decodeBlockHeader; + dctx->expected = ZSTD_blockHeaderSize; } - ZSTD_checkContinuity(dctx, dst, dstCapacity); + return rSize; + } + + case ZSTD_dStage.ZSTDds_checkChecksum: + assert(srcSize == 4); + + { + if (dctx->validateChecksum != 0) { - nuint res = ZSTD_decompressFrame(dctx, dst, dstCapacity, &src, &srcSize); - if ( - ZSTD_getErrorCode(res) == ZSTD_ErrorCode.ZSTD_error_prefix_unknown - && moreThan1Frame == 1 - ) + uint h32 = (uint)ZSTD_XXH64_digest(&dctx->xxhState); + uint check32 = MEM_readLE32(src); + if (check32 != h32) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_checksum_wrong) + ); } - - if (ERR_isError(res)) - return res; - assert(res <= dstCapacity); - if (res != 0) - dst = (byte*)dst + res; - dstCapacity -= res; } - moreThan1Frame = 1; + ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, 1); + dctx->expected = 0; + dctx->stage = ZSTD_dStage.ZSTDds_getFrameHeaderSize; + return 0; } - if (srcSize != 0) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); - } + case ZSTD_dStage.ZSTDds_decodeSkippableHeader: + assert(src != null); + assert(srcSize <= 8); + assert(dctx->format != ZSTD_format_e.ZSTD_f_zstd1_magicless); + memcpy(dctx->headerBuffer + (8 - srcSize), src, (uint)srcSize); + dctx->expected = MEM_readLE32(dctx->headerBuffer + 4); + dctx->stage = ZSTD_dStage.ZSTDds_skipFrame; + return 0; + case ZSTD_dStage.ZSTDds_skipFrame: + dctx->expected = 0; + dctx->stage = ZSTD_dStage.ZSTDds_getFrameHeaderSize; + return 0; + default: + assert(0 != 0); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + } + } + + private static nuint ZSTD_refDictContent(ZSTD_DCtx_s* dctx, void* dict, nuint dictSize) + { + dctx->dictEnd = dctx->previousDstEnd; + dctx->virtualStart = + (sbyte*)dict - ((sbyte*)dctx->previousDstEnd - (sbyte*)dctx->prefixStart); + dctx->prefixStart = dict; + dctx->previousDstEnd = (sbyte*)dict + dictSize; + return 0; + } - return (nuint)((byte*)dst - (byte*)dststart); + /*! ZSTD_loadDEntropy() : + * dict : must point at beginning of a valid zstd dictionary. + * @return : size of entropy tables read */ + private static nuint ZSTD_loadDEntropy( + ZSTD_entropyDTables_t* entropy, + void* dict, + nuint dictSize + ) + { + byte* dictPtr = (byte*)dict; + byte* dictEnd = dictPtr + dictSize; + if (dictSize <= 8) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); } - /*! ZSTD_decompress_usingDict() : - * Decompression using a known Dictionary. - * Dictionary must be identical to the one used during compression. - * Note : This function loads the dictionary, resulting in significant startup delay. - * It's intended for a dictionary used only once. - * Note : When `dict == NULL || dictSize < 8` no dictionary is used. */ - public static nuint ZSTD_decompress_usingDict( - ZSTD_DCtx_s* dctx, - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize, - void* dict, - nuint dictSize - ) + assert(MEM_readLE32(dict) == 0xEC30A437); + dictPtr += 8; { - return ZSTD_decompressMultiFrame( - dctx, - dst, - dstCapacity, - src, - srcSize, - dict, - dictSize, - null + /* use fse tables as temporary workspace; implies fse tables are grouped together */ + void* workspace = &entropy->LLTable; + nuint workspaceSize = (nuint)( + sizeof(ZSTD_seqSymbol) * 513 + + sizeof(ZSTD_seqSymbol) * 257 + + sizeof(ZSTD_seqSymbol) * 513 ); + nuint hSize = HUF_readDTableX2_wksp( + entropy->hufTable, + dictPtr, + (nuint)(dictEnd - dictPtr), + workspace, + workspaceSize, + 0 + ); + if (ERR_isError(hSize)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + + dictPtr += hSize; } - private static ZSTD_DDict_s* ZSTD_getDDict(ZSTD_DCtx_s* dctx) { - switch (dctx->dictUses) - { - default: - assert(0 != 0); - goto case ZSTD_dictUses_e.ZSTD_dont_use; - case ZSTD_dictUses_e.ZSTD_dont_use: - ZSTD_clearDict(dctx); - return null; - case ZSTD_dictUses_e.ZSTD_use_indefinitely: - return dctx->ddict; - case ZSTD_dictUses_e.ZSTD_use_once: - dctx->dictUses = ZSTD_dictUses_e.ZSTD_dont_use; - return dctx->ddict; - } - } - - /*! ZSTD_decompressDCtx() : - * Same as ZSTD_decompress(), - * requires an allocated ZSTD_DCtx. - * Compatible with sticky parameters (see below). - */ - public static nuint ZSTD_decompressDCtx( - ZSTD_DCtx_s* dctx, - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize - ) - { - return ZSTD_decompress_usingDDict( - dctx, - dst, - dstCapacity, - src, - srcSize, - ZSTD_getDDict(dctx) - ); - } - - /*! ZSTD_decompress() : - * `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames. - * Multiple compressed frames can be decompressed at once with this method. - * The result will be the concatenation of all decompressed frames, back to back. - * `dstCapacity` is an upper bound of originalSize to regenerate. - * First frame's decompressed size can be extracted using ZSTD_getFrameContentSize(). - * If maximum upper bound isn't known, prefer using streaming mode to decompress data. - * @return : the number of bytes decompressed into `dst` (<= `dstCapacity`), - * or an errorCode if it fails (which can be tested using ZSTD_isError()). */ - public static nuint ZSTD_decompress(void* dst, nuint dstCapacity, void* src, nuint srcSize) - { - nuint regenSize; - ZSTD_DCtx_s* dctx = ZSTD_createDCtx_internal(ZSTD_defaultCMem); - if (dctx == null) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); - } - - regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize); - ZSTD_freeDCtx(dctx); - return regenSize; - } - - /*-************************************** - * Advanced Streaming Decompression API - * Bufferless and synchronous - ****************************************/ - public static nuint ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx_s* dctx) - { - return dctx->expected; - } - - /** - * Similar to ZSTD_nextSrcSizeToDecompress(), but when a block input can be streamed, we - * allow taking a partial block as the input. Currently only raw uncompressed blocks can - * be streamed. - * - * For blocks that can be streamed, this allows us to reduce the latency until we produce - * output, and avoid copying the input. - * - * @param inputSize - The total amount of input that the caller currently has. - */ - private static nuint ZSTD_nextSrcSizeToDecompressWithInputSize( - ZSTD_DCtx_s* dctx, - nuint inputSize - ) - { - if ( - !( - dctx->stage == ZSTD_dStage.ZSTDds_decompressBlock - || dctx->stage == ZSTD_dStage.ZSTDds_decompressLastBlock - ) - ) - return dctx->expected; - if (dctx->bType != blockType_e.bt_raw) - return dctx->expected; - return inputSize <= 1 ? 1 - : inputSize <= dctx->expected ? inputSize - : dctx->expected; - } - - public static ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx_s* dctx) - { - switch (dctx->stage) - { - default: - assert(0 != 0); - goto case ZSTD_dStage.ZSTDds_getFrameHeaderSize; - case ZSTD_dStage.ZSTDds_getFrameHeaderSize: - case ZSTD_dStage.ZSTDds_decodeFrameHeader: - return ZSTD_nextInputType_e.ZSTDnit_frameHeader; - case ZSTD_dStage.ZSTDds_decodeBlockHeader: - return ZSTD_nextInputType_e.ZSTDnit_blockHeader; - case ZSTD_dStage.ZSTDds_decompressBlock: - return ZSTD_nextInputType_e.ZSTDnit_block; - case ZSTD_dStage.ZSTDds_decompressLastBlock: - return ZSTD_nextInputType_e.ZSTDnit_lastBlock; - case ZSTD_dStage.ZSTDds_checkChecksum: - return ZSTD_nextInputType_e.ZSTDnit_checksum; - case ZSTD_dStage.ZSTDds_decodeSkippableHeader: - case ZSTD_dStage.ZSTDds_skipFrame: - return ZSTD_nextInputType_e.ZSTDnit_skippableFrame; - } - } - - private static int ZSTD_isSkipFrame(ZSTD_DCtx_s* dctx) - { - return dctx->stage == ZSTD_dStage.ZSTDds_skipFrame ? 1 : 0; - } - - /** ZSTD_decompressContinue() : - * srcSize : must be the exact nb of bytes expected (see ZSTD_nextSrcSizeToDecompress()) - * @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity) - * or an error code, which can be tested using ZSTD_isError() */ - public static nuint ZSTD_decompressContinue( - ZSTD_DCtx_s* dctx, - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize - ) - { - if (srcSize != ZSTD_nextSrcSizeToDecompressWithInputSize(dctx, srcSize)) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); - } - - ZSTD_checkContinuity(dctx, dst, dstCapacity); - dctx->processedCSize += srcSize; - switch (dctx->stage) - { - case ZSTD_dStage.ZSTDds_getFrameHeaderSize: - assert(src != null); - if (dctx->format == ZSTD_format_e.ZSTD_f_zstd1) - { - assert(srcSize >= 4); - if ((MEM_readLE32(src) & 0xFFFFFFF0) == 0x184D2A50) - { - memcpy(dctx->headerBuffer, src, (uint)srcSize); - dctx->expected = 8 - srcSize; - dctx->stage = ZSTD_dStage.ZSTDds_decodeSkippableHeader; - return 0; - } - } - - dctx->headerSize = ZSTD_frameHeaderSize_internal(src, srcSize, dctx->format); - if (ERR_isError(dctx->headerSize)) - return dctx->headerSize; - memcpy(dctx->headerBuffer, src, (uint)srcSize); - dctx->expected = dctx->headerSize - srcSize; - dctx->stage = ZSTD_dStage.ZSTDds_decodeFrameHeader; - return 0; - case ZSTD_dStage.ZSTDds_decodeFrameHeader: - assert(src != null); - memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, (uint)srcSize); - - { - nuint err_code = ZSTD_decodeFrameHeader( - dctx, - dctx->headerBuffer, - dctx->headerSize - ); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - dctx->expected = ZSTD_blockHeaderSize; - dctx->stage = ZSTD_dStage.ZSTDds_decodeBlockHeader; - return 0; - case ZSTD_dStage.ZSTDds_decodeBlockHeader: - { - blockProperties_t bp; - nuint cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp); - if (ERR_isError(cBlockSize)) - return cBlockSize; - if (cBlockSize > dctx->fParams.blockSizeMax) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); - } - - dctx->expected = cBlockSize; - dctx->bType = bp.blockType; - dctx->rleSize = bp.origSize; - if (cBlockSize != 0) - { - dctx->stage = - bp.lastBlock != 0 - ? ZSTD_dStage.ZSTDds_decompressLastBlock - : ZSTD_dStage.ZSTDds_decompressBlock; - return 0; - } - - if (bp.lastBlock != 0) - { - if (dctx->fParams.checksumFlag != 0) - { - dctx->expected = 4; - dctx->stage = ZSTD_dStage.ZSTDds_checkChecksum; - } - else - { - dctx->expected = 0; - dctx->stage = ZSTD_dStage.ZSTDds_getFrameHeaderSize; - } - } - else - { - dctx->expected = ZSTD_blockHeaderSize; - dctx->stage = ZSTD_dStage.ZSTDds_decodeBlockHeader; - } - - return 0; - } - - case ZSTD_dStage.ZSTDds_decompressLastBlock: - case ZSTD_dStage.ZSTDds_decompressBlock: - { - nuint rSize; - switch (dctx->bType) - { - case blockType_e.bt_compressed: - assert(dctx->isFrameDecompression == 1); - rSize = ZSTD_decompressBlock_internal( - dctx, - dst, - dstCapacity, - src, - srcSize, - streaming_operation.is_streaming - ); - dctx->expected = 0; - break; - case blockType_e.bt_raw: - assert(srcSize <= dctx->expected); - rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize); - - { - nuint err_code = rSize; - if (ERR_isError(err_code)) - { - return err_code; - } - } - - assert(rSize == srcSize); - dctx->expected -= rSize; - break; - case blockType_e.bt_rle: - rSize = ZSTD_setRleBlock(dst, dstCapacity, *(byte*)src, dctx->rleSize); - dctx->expected = 0; - break; - case blockType_e.bt_reserved: - default: - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); - } - - { - nuint err_code = rSize; - if (ERR_isError(err_code)) - { - return err_code; - } - } - - if (rSize > dctx->fParams.blockSizeMax) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); - } - - dctx->decodedSize += rSize; - if (dctx->validateChecksum != 0) - ZSTD_XXH64_update(&dctx->xxhState, dst, rSize); - dctx->previousDstEnd = (sbyte*)dst + rSize; - if (dctx->expected > 0) - { - return rSize; - } - - if (dctx->stage == ZSTD_dStage.ZSTDds_decompressLastBlock) - { - if ( - dctx->fParams.frameContentSize != unchecked(0UL - 1) - && dctx->decodedSize != dctx->fParams.frameContentSize - ) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); - } - - if (dctx->fParams.checksumFlag != 0) - { - dctx->expected = 4; - dctx->stage = ZSTD_dStage.ZSTDds_checkChecksum; - } - else - { - ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, 1); - dctx->expected = 0; - dctx->stage = ZSTD_dStage.ZSTDds_getFrameHeaderSize; - } - } - else - { - dctx->stage = ZSTD_dStage.ZSTDds_decodeBlockHeader; - dctx->expected = ZSTD_blockHeaderSize; - } - - return rSize; - } - - case ZSTD_dStage.ZSTDds_checkChecksum: - assert(srcSize == 4); - - { - if (dctx->validateChecksum != 0) - { - uint h32 = (uint)ZSTD_XXH64_digest(&dctx->xxhState); - uint check32 = MEM_readLE32(src); - if (check32 != h32) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_checksum_wrong) - ); - } - } - - ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, 1); - dctx->expected = 0; - dctx->stage = ZSTD_dStage.ZSTDds_getFrameHeaderSize; - return 0; - } - - case ZSTD_dStage.ZSTDds_decodeSkippableHeader: - assert(src != null); - assert(srcSize <= 8); - assert(dctx->format != ZSTD_format_e.ZSTD_f_zstd1_magicless); - memcpy(dctx->headerBuffer + (8 - srcSize), src, (uint)srcSize); - dctx->expected = MEM_readLE32(dctx->headerBuffer + 4); - dctx->stage = ZSTD_dStage.ZSTDds_skipFrame; - return 0; - case ZSTD_dStage.ZSTDds_skipFrame: - dctx->expected = 0; - dctx->stage = ZSTD_dStage.ZSTDds_getFrameHeaderSize; - return 0; - default: - assert(0 != 0); - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); - } - } - - private static nuint ZSTD_refDictContent(ZSTD_DCtx_s* dctx, void* dict, nuint dictSize) - { - dctx->dictEnd = dctx->previousDstEnd; - dctx->virtualStart = - (sbyte*)dict - ((sbyte*)dctx->previousDstEnd - (sbyte*)dctx->prefixStart); - dctx->prefixStart = dict; - dctx->previousDstEnd = (sbyte*)dict + dictSize; - return 0; - } - - /*! ZSTD_loadDEntropy() : - * dict : must point at beginning of a valid zstd dictionary. - * @return : size of entropy tables read */ - private static nuint ZSTD_loadDEntropy( - ZSTD_entropyDTables_t* entropy, - void* dict, - nuint dictSize - ) - { - byte* dictPtr = (byte*)dict; - byte* dictEnd = dictPtr + dictSize; - if (dictSize <= 8) + short* offcodeNCount = stackalloc short[32]; + uint offcodeMaxValue = 31, + offcodeLog; + nuint offcodeHeaderSize = FSE_readNCount( + offcodeNCount, + &offcodeMaxValue, + &offcodeLog, + dictPtr, + (nuint)(dictEnd - dictPtr) + ); + if (ERR_isError(offcodeHeaderSize)) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); } - assert(MEM_readLE32(dict) == 0xEC30A437); - dictPtr += 8; - { - /* use fse tables as temporary workspace; implies fse tables are grouped together */ - void* workspace = &entropy->LLTable; - nuint workspaceSize = (nuint)( - sizeof(ZSTD_seqSymbol) * 513 - + sizeof(ZSTD_seqSymbol) * 257 - + sizeof(ZSTD_seqSymbol) * 513 - ); - nuint hSize = HUF_readDTableX2_wksp( - entropy->hufTable, - dictPtr, - (nuint)(dictEnd - dictPtr), - workspace, - workspaceSize, - 0 - ); - if (ERR_isError(hSize)) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); - } - - dictPtr += hSize; - } - - { - short* offcodeNCount = stackalloc short[32]; - uint offcodeMaxValue = 31, - offcodeLog; - nuint offcodeHeaderSize = FSE_readNCount( - offcodeNCount, - &offcodeMaxValue, - &offcodeLog, - dictPtr, - (nuint)(dictEnd - dictPtr) - ); - if (ERR_isError(offcodeHeaderSize)) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); - } - - if (offcodeMaxValue > 31) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); - } - - if (offcodeLog > 8) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); - } - - ZSTD_buildFSETable( - &entropy->OFTable.e0, - offcodeNCount, - offcodeMaxValue, - OF_base, - OF_bits, - offcodeLog, - entropy->workspace, - sizeof(uint) * 157, - 0 - ); - dictPtr += offcodeHeaderSize; - } - - { - short* matchlengthNCount = stackalloc short[53]; - uint matchlengthMaxValue = 52, - matchlengthLog; - nuint matchlengthHeaderSize = FSE_readNCount( - matchlengthNCount, - &matchlengthMaxValue, - &matchlengthLog, - dictPtr, - (nuint)(dictEnd - dictPtr) - ); - if (ERR_isError(matchlengthHeaderSize)) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); - } - - if (matchlengthMaxValue > 52) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); - } - - if (matchlengthLog > 9) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); - } - - ZSTD_buildFSETable( - &entropy->MLTable.e0, - matchlengthNCount, - matchlengthMaxValue, - ML_base, - ML_bits, - matchlengthLog, - entropy->workspace, - sizeof(uint) * 157, - 0 - ); - dictPtr += matchlengthHeaderSize; - } - - { - short* litlengthNCount = stackalloc short[36]; - uint litlengthMaxValue = 35, - litlengthLog; - nuint litlengthHeaderSize = FSE_readNCount( - litlengthNCount, - &litlengthMaxValue, - &litlengthLog, - dictPtr, - (nuint)(dictEnd - dictPtr) - ); - if (ERR_isError(litlengthHeaderSize)) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); - } - - if (litlengthMaxValue > 35) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); - } - - if (litlengthLog > 9) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); - } - - ZSTD_buildFSETable( - &entropy->LLTable.e0, - litlengthNCount, - litlengthMaxValue, - LL_base, - LL_bits, - litlengthLog, - entropy->workspace, - sizeof(uint) * 157, - 0 - ); - dictPtr += litlengthHeaderSize; - } - - if (dictPtr + 12 > dictEnd) + if (offcodeMaxValue > 31) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); } - - { - int i; - nuint dictContentSize = (nuint)(dictEnd - (dictPtr + 12)); - for (i = 0; i < 3; i++) - { - uint rep = MEM_readLE32(dictPtr); - dictPtr += 4; - if (rep == 0 || rep > dictContentSize) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted) - ); - } - - entropy->rep[i] = rep; - } - } - - return (nuint)(dictPtr - (byte*)dict); - } - - private static nuint ZSTD_decompress_insertDictionary( - ZSTD_DCtx_s* dctx, - void* dict, - nuint dictSize - ) - { - if (dictSize < 8) - return ZSTD_refDictContent(dctx, dict, dictSize); - { - uint magic = MEM_readLE32(dict); - if (magic != 0xEC30A437) - { - return ZSTD_refDictContent(dctx, dict, dictSize); - } - } - - dctx->dictID = MEM_readLE32((sbyte*)dict + 4); - { - nuint eSize = ZSTD_loadDEntropy(&dctx->entropy, dict, dictSize); - if (ERR_isError(eSize)) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); - } - - dict = (sbyte*)dict + eSize; - dictSize -= eSize; - } - - dctx->litEntropy = dctx->fseEntropy = 1; - return ZSTD_refDictContent(dctx, dict, dictSize); - } - - public static nuint ZSTD_decompressBegin(ZSTD_DCtx_s* dctx) - { - assert(dctx != null); - dctx->expected = ZSTD_startingInputLength(dctx->format); - dctx->stage = ZSTD_dStage.ZSTDds_getFrameHeaderSize; - dctx->processedCSize = 0; - dctx->decodedSize = 0; - dctx->previousDstEnd = null; - dctx->prefixStart = null; - dctx->virtualStart = null; - dctx->dictEnd = null; - dctx->entropy.hufTable[0] = 12 * 0x1000001; - dctx->litEntropy = dctx->fseEntropy = 0; - dctx->dictID = 0; - dctx->bType = blockType_e.bt_reserved; - dctx->isFrameDecompression = 1; - memcpy(dctx->entropy.rep, repStartValue, sizeof(uint) * 3); - dctx->LLTptr = &dctx->entropy.LLTable.e0; - dctx->MLTptr = &dctx->entropy.MLTable.e0; - dctx->OFTptr = &dctx->entropy.OFTable.e0; - dctx->HUFptr = dctx->entropy.hufTable; - return 0; - } - - public static nuint ZSTD_decompressBegin_usingDict( - ZSTD_DCtx_s* dctx, - void* dict, - nuint dictSize - ) - { - { - nuint err_code = ZSTD_decompressBegin(dctx); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - if (dict != null && dictSize != 0) - if (ERR_isError(ZSTD_decompress_insertDictionary(dctx, dict, dictSize))) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); - } - - return 0; - } - - /* ====== ZSTD_DDict ====== */ - public static nuint ZSTD_decompressBegin_usingDDict(ZSTD_DCtx_s* dctx, ZSTD_DDict_s* ddict) - { - assert(dctx != null); - if (ddict != null) - { - sbyte* dictStart = (sbyte*)ZSTD_DDict_dictContent(ddict); - nuint dictSize = ZSTD_DDict_dictSize(ddict); - void* dictEnd = dictStart + dictSize; - dctx->ddictIsCold = dctx->dictEnd != dictEnd ? 1 : 0; - } - - { - nuint err_code = ZSTD_decompressBegin(dctx); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - if (ddict != null) - { - ZSTD_copyDDictParameters(dctx, ddict); - } - - return 0; - } - - /*! ZSTD_getDictID_fromDict() : - * Provides the dictID stored within dictionary. - * if @return == 0, the dictionary is not conformant with Zstandard specification. - * It can still be loaded, but as a content-only dictionary. */ - public static uint ZSTD_getDictID_fromDict(void* dict, nuint dictSize) - { - if (dictSize < 8) - return 0; - if (MEM_readLE32(dict) != 0xEC30A437) - return 0; - return MEM_readLE32((sbyte*)dict + 4); - } - - /*! ZSTD_getDictID_fromFrame() : - * Provides the dictID required to decompress frame stored within `src`. - * If @return == 0, the dictID could not be decoded. - * This could for one of the following reasons : - * - The frame does not require a dictionary (most common case). - * - The frame was built with dictID intentionally removed. - * Needed dictionary is a hidden piece of information. - * Note : this use case also happens when using a non-conformant dictionary. - * - `srcSize` is too small, and as a result, frame header could not be decoded. - * Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`. - * - This is not a Zstandard frame. - * When identifying the exact failure cause, it's possible to use - * ZSTD_getFrameHeader(), which will provide a more precise error code. */ - public static uint ZSTD_getDictID_fromFrame(void* src, nuint srcSize) - { - ZSTD_frameHeader zfp = new ZSTD_frameHeader - { - frameContentSize = 0, - windowSize = 0, - blockSizeMax = 0, - frameType = ZSTD_frameType_e.ZSTD_frame, - headerSize = 0, - dictID = 0, - checksumFlag = 0, - _reserved1 = 0, - _reserved2 = 0, - }; - nuint hError = ZSTD_getFrameHeader(&zfp, src, srcSize); - if (ERR_isError(hError)) - return 0; - return zfp.dictID; - } - - /*! ZSTD_decompress_usingDDict() : - * Decompression using a pre-digested Dictionary - * Use dictionary without significant overhead. */ - public static nuint ZSTD_decompress_usingDDict( - ZSTD_DCtx_s* dctx, - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize, - ZSTD_DDict_s* ddict - ) - { - return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, null, 0, ddict); - } - - /*===================================== - * Streaming decompression - *====================================*/ - public static ZSTD_DCtx_s* ZSTD_createDStream() - { - return ZSTD_createDCtx_internal(ZSTD_defaultCMem); - } - - public static ZSTD_DCtx_s* ZSTD_initStaticDStream(void* workspace, nuint workspaceSize) - { - return ZSTD_initStaticDCtx(workspace, workspaceSize); - } - - public static ZSTD_DCtx_s* ZSTD_createDStream_advanced(ZSTD_customMem customMem) - { - return ZSTD_createDCtx_internal(customMem); - } - - public static nuint ZSTD_freeDStream(ZSTD_DCtx_s* zds) - { - return ZSTD_freeDCtx(zds); - } - - /* *** Initialization *** */ - public static nuint ZSTD_DStreamInSize() - { - return (nuint)(1 << 17) + ZSTD_blockHeaderSize; - } - - public static nuint ZSTD_DStreamOutSize() - { - return 1 << 17; - } - - /*! ZSTD_DCtx_loadDictionary_advanced() : - * Same as ZSTD_DCtx_loadDictionary(), - * but gives direct control over - * how to load the dictionary (by copy ? by reference ?) - * and how to interpret it (automatic ? force raw mode ? full mode only ?). */ - public static nuint ZSTD_DCtx_loadDictionary_advanced( - ZSTD_DCtx_s* dctx, - void* dict, - nuint dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod, - ZSTD_dictContentType_e dictContentType - ) - { - if (dctx->streamStage != ZSTD_dStreamStage.zdss_init) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); - } - - ZSTD_clearDict(dctx); - if (dict != null && dictSize != 0) - { - dctx->ddictLocal = ZSTD_createDDict_advanced( - dict, - dictSize, - dictLoadMethod, - dictContentType, - dctx->customMem - ); - if (dctx->ddictLocal == null) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); - } - - dctx->ddict = dctx->ddictLocal; - dctx->dictUses = ZSTD_dictUses_e.ZSTD_use_indefinitely; - } - - return 0; - } - - /*! ZSTD_DCtx_loadDictionary_byReference() : - * Same as ZSTD_DCtx_loadDictionary(), - * but references `dict` content instead of copying it into `dctx`. - * This saves memory if `dict` remains around., - * However, it's imperative that `dict` remains accessible (and unmodified) while being used, so it must outlive decompression. */ - public static nuint ZSTD_DCtx_loadDictionary_byReference( - ZSTD_DCtx_s* dctx, - void* dict, - nuint dictSize - ) - { - return ZSTD_DCtx_loadDictionary_advanced( - dctx, - dict, - dictSize, - ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, - ZSTD_dictContentType_e.ZSTD_dct_auto - ); - } - - /*! ZSTD_DCtx_loadDictionary() : Requires v1.4.0+ - * Create an internal DDict from dict buffer, to be used to decompress all future frames. - * The dictionary remains valid for all future frames, until explicitly invalidated, or - * a new dictionary is loaded. - * @result : 0, or an error code (which can be tested with ZSTD_isError()). - * Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary, - * meaning "return to no-dictionary mode". - * Note 1 : Loading a dictionary involves building tables, - * which has a non-negligible impact on CPU usage and latency. - * It's recommended to "load once, use many times", to amortize the cost - * Note 2 :`dict` content will be copied internally, so `dict` can be released after loading. - * Use ZSTD_DCtx_loadDictionary_byReference() to reference dictionary content instead. - * Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to take control of - * how dictionary content is loaded and interpreted. - */ - public static nuint ZSTD_DCtx_loadDictionary(ZSTD_DCtx_s* dctx, void* dict, nuint dictSize) - { - return ZSTD_DCtx_loadDictionary_advanced( - dctx, - dict, - dictSize, - ZSTD_dictLoadMethod_e.ZSTD_dlm_byCopy, - ZSTD_dictContentType_e.ZSTD_dct_auto - ); - } - - /*! ZSTD_DCtx_refPrefix_advanced() : - * Same as ZSTD_DCtx_refPrefix(), but gives finer control over - * how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */ - public static nuint ZSTD_DCtx_refPrefix_advanced( - ZSTD_DCtx_s* dctx, - void* prefix, - nuint prefixSize, - ZSTD_dictContentType_e dictContentType - ) - { - { - nuint err_code = ZSTD_DCtx_loadDictionary_advanced( - dctx, - prefix, - prefixSize, - ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, - dictContentType - ); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - dctx->dictUses = ZSTD_dictUses_e.ZSTD_use_once; - return 0; - } - - /*! ZSTD_DCtx_refPrefix() : Requires v1.4.0+ - * Reference a prefix (single-usage dictionary) to decompress next frame. - * This is the reverse operation of ZSTD_CCtx_refPrefix(), - * and must use the same prefix as the one used during compression. - * Prefix is **only used once**. Reference is discarded at end of frame. - * End of frame is reached when ZSTD_decompressStream() returns 0. - * @result : 0, or an error code (which can be tested with ZSTD_isError()). - * Note 1 : Adding any prefix (including NULL) invalidates any previously set prefix or dictionary - * Note 2 : Prefix buffer is referenced. It **must** outlive decompression. - * Prefix buffer must remain unmodified up to the end of frame, - * reached when ZSTD_decompressStream() returns 0. - * Note 3 : By default, the prefix is treated as raw content (ZSTD_dct_rawContent). - * Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode (Experimental section) - * Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost. - * A full dictionary is more costly, as it requires building tables. - */ - public static nuint ZSTD_DCtx_refPrefix(ZSTD_DCtx_s* dctx, void* prefix, nuint prefixSize) - { - return ZSTD_DCtx_refPrefix_advanced( - dctx, - prefix, - prefixSize, - ZSTD_dictContentType_e.ZSTD_dct_rawContent - ); - } - - /* ZSTD_initDStream_usingDict() : - * return : expected size, aka ZSTD_startingInputLength(). - * this function cannot fail */ - public static nuint ZSTD_initDStream_usingDict(ZSTD_DCtx_s* zds, void* dict, nuint dictSize) - { - { - nuint err_code = ZSTD_DCtx_reset(zds, ZSTD_ResetDirective.ZSTD_reset_session_only); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - { - nuint err_code = ZSTD_DCtx_loadDictionary(zds, dict, dictSize); - if (ERR_isError(err_code)) - { - return err_code; - } + + if (offcodeLog > 8) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); } - return ZSTD_startingInputLength(zds->format); + ZSTD_buildFSETable( + &entropy->OFTable.e0, + offcodeNCount, + offcodeMaxValue, + OF_base, + OF_bits, + offcodeLog, + entropy->workspace, + sizeof(uint) * 157, + 0 + ); + dictPtr += offcodeHeaderSize; } - /* note : this variant can't fail */ - public static nuint ZSTD_initDStream(ZSTD_DCtx_s* zds) { + short* matchlengthNCount = stackalloc short[53]; + uint matchlengthMaxValue = 52, + matchlengthLog; + nuint matchlengthHeaderSize = FSE_readNCount( + matchlengthNCount, + &matchlengthMaxValue, + &matchlengthLog, + dictPtr, + (nuint)(dictEnd - dictPtr) + ); + if (ERR_isError(matchlengthHeaderSize)) { - nuint err_code = ZSTD_DCtx_reset(zds, ZSTD_ResetDirective.ZSTD_reset_session_only); - if (ERR_isError(err_code)) - { - return err_code; - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); } + if (matchlengthMaxValue > 52) { - nuint err_code = ZSTD_DCtx_refDDict(zds, null); - if (ERR_isError(err_code)) - { - return err_code; - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + + if (matchlengthLog > 9) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); } - return ZSTD_startingInputLength(zds->format); + ZSTD_buildFSETable( + &entropy->MLTable.e0, + matchlengthNCount, + matchlengthMaxValue, + ML_base, + ML_bits, + matchlengthLog, + entropy->workspace, + sizeof(uint) * 157, + 0 + ); + dictPtr += matchlengthHeaderSize; } - /* ZSTD_initDStream_usingDDict() : - * ddict will just be referenced, and must outlive decompression session - * this function cannot fail */ - public static nuint ZSTD_initDStream_usingDDict(ZSTD_DCtx_s* dctx, ZSTD_DDict_s* ddict) { + short* litlengthNCount = stackalloc short[36]; + uint litlengthMaxValue = 35, + litlengthLog; + nuint litlengthHeaderSize = FSE_readNCount( + litlengthNCount, + &litlengthMaxValue, + &litlengthLog, + dictPtr, + (nuint)(dictEnd - dictPtr) + ); + if (ERR_isError(litlengthHeaderSize)) { - nuint err_code = ZSTD_DCtx_reset(dctx, ZSTD_ResetDirective.ZSTD_reset_session_only); - if (ERR_isError(err_code)) - { - return err_code; - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); } + if (litlengthMaxValue > 35) { - nuint err_code = ZSTD_DCtx_refDDict(dctx, ddict); - if (ERR_isError(err_code)) - { - return err_code; - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); + } + + if (litlengthLog > 9) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); } - return ZSTD_startingInputLength(dctx->format); + ZSTD_buildFSETable( + &entropy->LLTable.e0, + litlengthNCount, + litlengthMaxValue, + LL_base, + LL_bits, + litlengthLog, + entropy->workspace, + sizeof(uint) * 157, + 0 + ); + dictPtr += litlengthHeaderSize; + } + + if (dictPtr + 12 > dictEnd) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); } - /* ZSTD_resetDStream() : - * return : expected size, aka ZSTD_startingInputLength(). - * this function cannot fail */ - public static nuint ZSTD_resetDStream(ZSTD_DCtx_s* dctx) { + int i; + nuint dictContentSize = (nuint)(dictEnd - (dictPtr + 12)); + for (i = 0; i < 3; i++) { - nuint err_code = ZSTD_DCtx_reset(dctx, ZSTD_ResetDirective.ZSTD_reset_session_only); - if (ERR_isError(err_code)) + uint rep = MEM_readLE32(dictPtr); + dictPtr += 4; + if (rep == 0 || rep > dictContentSize) { - return err_code; + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted) + ); } - } - return ZSTD_startingInputLength(dctx->format); + entropy->rep[i] = rep; + } } - /*! ZSTD_DCtx_refDDict() : Requires v1.4.0+ - * Reference a prepared dictionary, to be used to decompress next frames. - * The dictionary remains active for decompression of future frames using same DCtx. - * - * If called with ZSTD_d_refMultipleDDicts enabled, repeated calls of this function - * will store the DDict references in a table, and the DDict used for decompression - * will be determined at decompression time, as per the dict ID in the frame. - * The memory for the table is allocated on the first call to refDDict, and can be - * freed with ZSTD_freeDCtx(). - * - * If called with ZSTD_d_refMultipleDDicts disabled (the default), only one dictionary - * will be managed, and referencing a dictionary effectively "discards" any previous one. - * - * @result : 0, or an error code (which can be tested with ZSTD_isError()). - * Special: referencing a NULL DDict means "return to no-dictionary mode". - * Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx. - */ - public static nuint ZSTD_DCtx_refDDict(ZSTD_DCtx_s* dctx, ZSTD_DDict_s* ddict) + return (nuint)(dictPtr - (byte*)dict); + } + + private static nuint ZSTD_decompress_insertDictionary( + ZSTD_DCtx_s* dctx, + void* dict, + nuint dictSize + ) + { + if (dictSize < 8) + return ZSTD_refDictContent(dctx, dict, dictSize); { - if (dctx->streamStage != ZSTD_dStreamStage.zdss_init) + uint magic = MEM_readLE32(dict); + if (magic != 0xEC30A437) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); + return ZSTD_refDictContent(dctx, dict, dictSize); } + } - ZSTD_clearDict(dctx); - if (ddict != null) + dctx->dictID = MEM_readLE32((sbyte*)dict + 4); + { + nuint eSize = ZSTD_loadDEntropy(&dctx->entropy, dict, dictSize); + if (ERR_isError(eSize)) { - dctx->ddict = ddict; - dctx->dictUses = ZSTD_dictUses_e.ZSTD_use_indefinitely; - if (dctx->refMultipleDDicts == ZSTD_refMultipleDDicts_e.ZSTD_rmd_refMultipleDDicts) - { - if (dctx->ddictSet == null) - { - dctx->ddictSet = ZSTD_createDDictHashSet(dctx->customMem); - if (dctx->ddictSet == null) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) - ); - } - } - - assert(dctx->staticSize == 0); - { - nuint err_code = ZSTD_DDictHashSet_addDDict( - dctx->ddictSet, - ddict, - dctx->customMem - ); - if (ERR_isError(err_code)) - { - return err_code; - } - } - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); } - return 0; + dict = (sbyte*)dict + eSize; + dictSize -= eSize; } - /* ZSTD_DCtx_setMaxWindowSize() : - * note : no direct equivalence in ZSTD_DCtx_setParameter, - * since this version sets windowSize, and the other sets windowLog */ - public static nuint ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx_s* dctx, nuint maxWindowSize) - { - ZSTD_bounds bounds = ZSTD_dParam_getBounds(ZSTD_dParameter.ZSTD_d_windowLogMax); - nuint min = (nuint)1 << bounds.lowerBound; - nuint max = (nuint)1 << bounds.upperBound; - if (dctx->streamStage != ZSTD_dStreamStage.zdss_init) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); - } + dctx->litEntropy = dctx->fseEntropy = 1; + return ZSTD_refDictContent(dctx, dict, dictSize); + } + + public static nuint ZSTD_decompressBegin(ZSTD_DCtx_s* dctx) + { + assert(dctx != null); + dctx->expected = ZSTD_startingInputLength(dctx->format); + dctx->stage = ZSTD_dStage.ZSTDds_getFrameHeaderSize; + dctx->processedCSize = 0; + dctx->decodedSize = 0; + dctx->previousDstEnd = null; + dctx->prefixStart = null; + dctx->virtualStart = null; + dctx->dictEnd = null; + dctx->entropy.hufTable[0] = 12 * 0x1000001; + dctx->litEntropy = dctx->fseEntropy = 0; + dctx->dictID = 0; + dctx->bType = blockType_e.bt_reserved; + dctx->isFrameDecompression = 1; + memcpy(dctx->entropy.rep, repStartValue, sizeof(uint) * 3); + dctx->LLTptr = &dctx->entropy.LLTable.e0; + dctx->MLTptr = &dctx->entropy.MLTable.e0; + dctx->OFTptr = &dctx->entropy.OFTable.e0; + dctx->HUFptr = dctx->entropy.hufTable; + return 0; + } - if (maxWindowSize < min) + public static nuint ZSTD_decompressBegin_usingDict( + ZSTD_DCtx_s* dctx, + void* dict, + nuint dictSize + ) + { + { + nuint err_code = ZSTD_decompressBegin(dctx); + if (ERR_isError(err_code)) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return err_code; } + } - if (maxWindowSize > max) + if (dict != null && dictSize != 0) + if (ERR_isError(ZSTD_decompress_insertDictionary(dctx, dict, dictSize))) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); } - dctx->maxWindowSize = maxWindowSize; - return 0; - } + return 0; + } - /*! ZSTD_DCtx_setFormat() : - * This function is REDUNDANT. Prefer ZSTD_DCtx_setParameter(). - * Instruct the decoder context about what kind of data to decode next. - * This instruction is mandatory to decode data without a fully-formed header, - * such ZSTD_f_zstd1_magicless for example. - * @return : 0, or an error code (which can be tested using ZSTD_isError()). */ - public static nuint ZSTD_DCtx_setFormat(ZSTD_DCtx_s* dctx, ZSTD_format_e format) + /* ====== ZSTD_DDict ====== */ + public static nuint ZSTD_decompressBegin_usingDDict(ZSTD_DCtx_s* dctx, ZSTD_DDict_s* ddict) + { + assert(dctx != null); + if (ddict != null) { - return ZSTD_DCtx_setParameter( - dctx, - ZSTD_dParameter.ZSTD_d_experimentalParam1, - (int)format - ); + sbyte* dictStart = (sbyte*)ZSTD_DDict_dictContent(ddict); + nuint dictSize = ZSTD_DDict_dictSize(ddict); + void* dictEnd = dictStart + dictSize; + dctx->ddictIsCold = dctx->dictEnd != dictEnd ? 1 : 0; } - /*! ZSTD_dParam_getBounds() : - * All parameters must belong to an interval with lower and upper bounds, - * otherwise they will either trigger an error or be automatically clamped. - * @return : a structure, ZSTD_bounds, which contains - * - an error status field, which must be tested using ZSTD_isError() - * - both lower and upper bounds, inclusive - */ - public static ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam) - { - ZSTD_bounds bounds = new ZSTD_bounds - { - error = 0, - lowerBound = 0, - upperBound = 0, - }; - switch (dParam) - { - case ZSTD_dParameter.ZSTD_d_windowLogMax: - bounds.lowerBound = 10; - bounds.upperBound = sizeof(nuint) == 4 ? 30 : 31; - return bounds; - case ZSTD_dParameter.ZSTD_d_experimentalParam1: - bounds.lowerBound = (int)ZSTD_format_e.ZSTD_f_zstd1; - bounds.upperBound = (int)ZSTD_format_e.ZSTD_f_zstd1_magicless; - return bounds; - case ZSTD_dParameter.ZSTD_d_experimentalParam2: - bounds.lowerBound = (int)ZSTD_bufferMode_e.ZSTD_bm_buffered; - bounds.upperBound = (int)ZSTD_bufferMode_e.ZSTD_bm_stable; - return bounds; - case ZSTD_dParameter.ZSTD_d_experimentalParam3: - bounds.lowerBound = (int)ZSTD_forceIgnoreChecksum_e.ZSTD_d_validateChecksum; - bounds.upperBound = (int)ZSTD_forceIgnoreChecksum_e.ZSTD_d_ignoreChecksum; - return bounds; - case ZSTD_dParameter.ZSTD_d_experimentalParam4: - bounds.lowerBound = (int)ZSTD_refMultipleDDicts_e.ZSTD_rmd_refSingleDDict; - bounds.upperBound = (int)ZSTD_refMultipleDDicts_e.ZSTD_rmd_refMultipleDDicts; - return bounds; - case ZSTD_dParameter.ZSTD_d_experimentalParam5: - bounds.lowerBound = 0; - bounds.upperBound = 1; - return bounds; - case ZSTD_dParameter.ZSTD_d_experimentalParam6: - bounds.lowerBound = 1 << 10; - bounds.upperBound = 1 << 17; - return bounds; - default: - break; + { + nuint err_code = ZSTD_decompressBegin(dctx); + if (ERR_isError(err_code)) + { + return err_code; } - - bounds.error = unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported) - ); - return bounds; } - /* ZSTD_dParam_withinBounds: - * @return 1 if value is within dParam bounds, - * 0 otherwise */ - private static int ZSTD_dParam_withinBounds(ZSTD_dParameter dParam, int value) + if (ddict != null) { - ZSTD_bounds bounds = ZSTD_dParam_getBounds(dParam); - if (ERR_isError(bounds.error)) - return 0; - if (value < bounds.lowerBound) - return 0; - if (value > bounds.upperBound) - return 0; - return 1; + ZSTD_copyDDictParameters(dctx, ddict); } - /*! ZSTD_DCtx_getParameter() : - * Get the requested decompression parameter value, selected by enum ZSTD_dParameter, - * and store it into int* value. - * @return : 0, or an error code (which can be tested with ZSTD_isError()). - */ - public static nuint ZSTD_DCtx_getParameter( - ZSTD_DCtx_s* dctx, - ZSTD_dParameter param, - int* value - ) - { - switch (param) - { - case ZSTD_dParameter.ZSTD_d_windowLogMax: - *value = (int)ZSTD_highbit32((uint)dctx->maxWindowSize); - return 0; - case ZSTD_dParameter.ZSTD_d_experimentalParam1: - *value = (int)dctx->format; - return 0; - case ZSTD_dParameter.ZSTD_d_experimentalParam2: - *value = (int)dctx->outBufferMode; - return 0; - case ZSTD_dParameter.ZSTD_d_experimentalParam3: - *value = (int)dctx->forceIgnoreChecksum; - return 0; - case ZSTD_dParameter.ZSTD_d_experimentalParam4: - *value = (int)dctx->refMultipleDDicts; - return 0; - case ZSTD_dParameter.ZSTD_d_experimentalParam5: - *value = dctx->disableHufAsm; - return 0; - case ZSTD_dParameter.ZSTD_d_experimentalParam6: - *value = dctx->maxBlockSizeParam; - return 0; - default: - break; - } + return 0; + } + + /*! ZSTD_getDictID_fromDict() : + * Provides the dictID stored within dictionary. + * if @return == 0, the dictionary is not conformant with Zstandard specification. + * It can still be loaded, but as a content-only dictionary. */ + public static uint ZSTD_getDictID_fromDict(void* dict, nuint dictSize) + { + if (dictSize < 8) + return 0; + if (MEM_readLE32(dict) != 0xEC30A437) + return 0; + return MEM_readLE32((sbyte*)dict + 4); + } + + /*! ZSTD_getDictID_fromFrame() : + * Provides the dictID required to decompress frame stored within `src`. + * If @return == 0, the dictID could not be decoded. + * This could for one of the following reasons : + * - The frame does not require a dictionary (most common case). + * - The frame was built with dictID intentionally removed. + * Needed dictionary is a hidden piece of information. + * Note : this use case also happens when using a non-conformant dictionary. + * - `srcSize` is too small, and as a result, frame header could not be decoded. + * Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`. + * - This is not a Zstandard frame. + * When identifying the exact failure cause, it's possible to use + * ZSTD_getFrameHeader(), which will provide a more precise error code. */ + public static uint ZSTD_getDictID_fromFrame(void* src, nuint srcSize) + { + ZSTD_frameHeader zfp = new ZSTD_frameHeader + { + frameContentSize = 0, + windowSize = 0, + blockSizeMax = 0, + frameType = ZSTD_frameType_e.ZSTD_frame, + headerSize = 0, + dictID = 0, + checksumFlag = 0, + _reserved1 = 0, + _reserved2 = 0, + }; + nuint hError = ZSTD_getFrameHeader(&zfp, src, srcSize); + if (ERR_isError(hError)) + return 0; + return zfp.dictID; + } + + /*! ZSTD_decompress_usingDDict() : + * Decompression using a pre-digested Dictionary + * Use dictionary without significant overhead. */ + public static nuint ZSTD_decompress_usingDDict( + ZSTD_DCtx_s* dctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + ZSTD_DDict_s* ddict + ) + { + return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, null, 0, ddict); + } + + /*===================================== + * Streaming decompression + *====================================*/ + public static ZSTD_DCtx_s* ZSTD_createDStream() + { + return ZSTD_createDCtx_internal(ZSTD_defaultCMem); + } + + public static ZSTD_DCtx_s* ZSTD_initStaticDStream(void* workspace, nuint workspaceSize) + { + return ZSTD_initStaticDCtx(workspace, workspaceSize); + } + + public static ZSTD_DCtx_s* ZSTD_createDStream_advanced(ZSTD_customMem customMem) + { + return ZSTD_createDCtx_internal(customMem); + } + + public static nuint ZSTD_freeDStream(ZSTD_DCtx_s* zds) + { + return ZSTD_freeDCtx(zds); + } + + /* *** Initialization *** */ + public static nuint ZSTD_DStreamInSize() + { + return (nuint)(1 << 17) + ZSTD_blockHeaderSize; + } + + public static nuint ZSTD_DStreamOutSize() + { + return 1 << 17; + } - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported)); + /*! ZSTD_DCtx_loadDictionary_advanced() : + * Same as ZSTD_DCtx_loadDictionary(), + * but gives direct control over + * how to load the dictionary (by copy ? by reference ?) + * and how to interpret it (automatic ? force raw mode ? full mode only ?). */ + public static nuint ZSTD_DCtx_loadDictionary_advanced( + ZSTD_DCtx_s* dctx, + void* dict, + nuint dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType + ) + { + if (dctx->streamStage != ZSTD_dStreamStage.zdss_init) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); } - /*! ZSTD_DCtx_setParameter() : - * Set one compression parameter, selected by enum ZSTD_dParameter. - * All parameters have valid bounds. Bounds can be queried using ZSTD_dParam_getBounds(). - * Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter). - * Setting a parameter is only possible during frame initialization (before starting decompression). - * @return : 0, or an error code (which can be tested using ZSTD_isError()). - */ - public static nuint ZSTD_DCtx_setParameter( - ZSTD_DCtx_s* dctx, - ZSTD_dParameter dParam, - int value - ) + ZSTD_clearDict(dctx); + if (dict != null && dictSize != 0) { - if (dctx->streamStage != ZSTD_dStreamStage.zdss_init) + dctx->ddictLocal = ZSTD_createDDict_advanced( + dict, + dictSize, + dictLoadMethod, + dictContentType, + dctx->customMem + ); + if (dctx->ddictLocal == null) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); } - switch (dParam) - { - case ZSTD_dParameter.ZSTD_d_windowLogMax: - if (value == 0) - value = 27; - - { - if ( - ZSTD_dParam_withinBounds(ZSTD_dParameter.ZSTD_d_windowLogMax, value) - == 0 - ) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); - } - } - - dctx->maxWindowSize = (nuint)1 << value; - return 0; - case ZSTD_dParameter.ZSTD_d_experimentalParam1: - { - if ( - ZSTD_dParam_withinBounds( - ZSTD_dParameter.ZSTD_d_experimentalParam1, - value - ) == 0 - ) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); - } - } + dctx->ddict = dctx->ddictLocal; + dctx->dictUses = ZSTD_dictUses_e.ZSTD_use_indefinitely; + } - dctx->format = (ZSTD_format_e)value; - return 0; - case ZSTD_dParameter.ZSTD_d_experimentalParam2: - { - if ( - ZSTD_dParam_withinBounds( - ZSTD_dParameter.ZSTD_d_experimentalParam2, - value - ) == 0 - ) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); - } - } + return 0; + } - dctx->outBufferMode = (ZSTD_bufferMode_e)value; - return 0; - case ZSTD_dParameter.ZSTD_d_experimentalParam3: - { - if ( - ZSTD_dParam_withinBounds( - ZSTD_dParameter.ZSTD_d_experimentalParam3, - value - ) == 0 - ) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); - } - } + /*! ZSTD_DCtx_loadDictionary_byReference() : + * Same as ZSTD_DCtx_loadDictionary(), + * but references `dict` content instead of copying it into `dctx`. + * This saves memory if `dict` remains around., + * However, it's imperative that `dict` remains accessible (and unmodified) while being used, so it must outlive decompression. */ + public static nuint ZSTD_DCtx_loadDictionary_byReference( + ZSTD_DCtx_s* dctx, + void* dict, + nuint dictSize + ) + { + return ZSTD_DCtx_loadDictionary_advanced( + dctx, + dict, + dictSize, + ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, + ZSTD_dictContentType_e.ZSTD_dct_auto + ); + } - dctx->forceIgnoreChecksum = (ZSTD_forceIgnoreChecksum_e)value; - return 0; - case ZSTD_dParameter.ZSTD_d_experimentalParam4: - { - if ( - ZSTD_dParam_withinBounds( - ZSTD_dParameter.ZSTD_d_experimentalParam4, - value - ) == 0 - ) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); - } - } + /*! ZSTD_DCtx_loadDictionary() : Requires v1.4.0+ + * Create an internal DDict from dict buffer, to be used to decompress all future frames. + * The dictionary remains valid for all future frames, until explicitly invalidated, or + * a new dictionary is loaded. + * @result : 0, or an error code (which can be tested with ZSTD_isError()). + * Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary, + * meaning "return to no-dictionary mode". + * Note 1 : Loading a dictionary involves building tables, + * which has a non-negligible impact on CPU usage and latency. + * It's recommended to "load once, use many times", to amortize the cost + * Note 2 :`dict` content will be copied internally, so `dict` can be released after loading. + * Use ZSTD_DCtx_loadDictionary_byReference() to reference dictionary content instead. + * Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to take control of + * how dictionary content is loaded and interpreted. + */ + public static nuint ZSTD_DCtx_loadDictionary(ZSTD_DCtx_s* dctx, void* dict, nuint dictSize) + { + return ZSTD_DCtx_loadDictionary_advanced( + dctx, + dict, + dictSize, + ZSTD_dictLoadMethod_e.ZSTD_dlm_byCopy, + ZSTD_dictContentType_e.ZSTD_dct_auto + ); + } - if (dctx->staticSize != 0) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported) - ); - } + /*! ZSTD_DCtx_refPrefix_advanced() : + * Same as ZSTD_DCtx_refPrefix(), but gives finer control over + * how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */ + public static nuint ZSTD_DCtx_refPrefix_advanced( + ZSTD_DCtx_s* dctx, + void* prefix, + nuint prefixSize, + ZSTD_dictContentType_e dictContentType + ) + { + { + nuint err_code = ZSTD_DCtx_loadDictionary_advanced( + dctx, + prefix, + prefixSize, + ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, + dictContentType + ); + if (ERR_isError(err_code)) + { + return err_code; + } + } - dctx->refMultipleDDicts = (ZSTD_refMultipleDDicts_e)value; - return 0; - case ZSTD_dParameter.ZSTD_d_experimentalParam5: - { - if ( - ZSTD_dParam_withinBounds( - ZSTD_dParameter.ZSTD_d_experimentalParam5, - value - ) == 0 - ) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); - } - } + dctx->dictUses = ZSTD_dictUses_e.ZSTD_use_once; + return 0; + } - dctx->disableHufAsm = value != 0 ? 1 : 0; - return 0; - case ZSTD_dParameter.ZSTD_d_experimentalParam6: - if (value != 0) - { - if ( - ZSTD_dParam_withinBounds( - ZSTD_dParameter.ZSTD_d_experimentalParam6, - value - ) == 0 - ) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); - } - } + /*! ZSTD_DCtx_refPrefix() : Requires v1.4.0+ + * Reference a prefix (single-usage dictionary) to decompress next frame. + * This is the reverse operation of ZSTD_CCtx_refPrefix(), + * and must use the same prefix as the one used during compression. + * Prefix is **only used once**. Reference is discarded at end of frame. + * End of frame is reached when ZSTD_decompressStream() returns 0. + * @result : 0, or an error code (which can be tested with ZSTD_isError()). + * Note 1 : Adding any prefix (including NULL) invalidates any previously set prefix or dictionary + * Note 2 : Prefix buffer is referenced. It **must** outlive decompression. + * Prefix buffer must remain unmodified up to the end of frame, + * reached when ZSTD_decompressStream() returns 0. + * Note 3 : By default, the prefix is treated as raw content (ZSTD_dct_rawContent). + * Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode (Experimental section) + * Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost. + * A full dictionary is more costly, as it requires building tables. + */ + public static nuint ZSTD_DCtx_refPrefix(ZSTD_DCtx_s* dctx, void* prefix, nuint prefixSize) + { + return ZSTD_DCtx_refPrefix_advanced( + dctx, + prefix, + prefixSize, + ZSTD_dictContentType_e.ZSTD_dct_rawContent + ); + } - dctx->maxBlockSizeParam = value; - return 0; - default: - break; + /* ZSTD_initDStream_usingDict() : + * return : expected size, aka ZSTD_startingInputLength(). + * this function cannot fail */ + public static nuint ZSTD_initDStream_usingDict(ZSTD_DCtx_s* zds, void* dict, nuint dictSize) + { + { + nuint err_code = ZSTD_DCtx_reset(zds, ZSTD_ResetDirective.ZSTD_reset_session_only); + if (ERR_isError(err_code)) + { + return err_code; } - - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported)); } - /*! ZSTD_DCtx_reset() : - * Return a DCtx to clean state. - * Session and parameters can be reset jointly or separately. - * Parameters can only be reset when no active frame is being decompressed. - * @return : 0, or an error code, which can be tested with ZSTD_isError() - */ - public static nuint ZSTD_DCtx_reset(ZSTD_DCtx_s* dctx, ZSTD_ResetDirective reset) { - if ( - reset == ZSTD_ResetDirective.ZSTD_reset_session_only - || reset == ZSTD_ResetDirective.ZSTD_reset_session_and_parameters - ) + nuint err_code = ZSTD_DCtx_loadDictionary(zds, dict, dictSize); + if (ERR_isError(err_code)) { - dctx->streamStage = ZSTD_dStreamStage.zdss_init; - dctx->noForwardProgress = 0; - dctx->isFrameDecompression = 1; + return err_code; } + } - if ( - reset == ZSTD_ResetDirective.ZSTD_reset_parameters - || reset == ZSTD_ResetDirective.ZSTD_reset_session_and_parameters - ) - { - if (dctx->streamStage != ZSTD_dStreamStage.zdss_init) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); - } + return ZSTD_startingInputLength(zds->format); + } - ZSTD_clearDict(dctx); - ZSTD_DCtx_resetParameters(dctx); + /* note : this variant can't fail */ + public static nuint ZSTD_initDStream(ZSTD_DCtx_s* zds) + { + { + nuint err_code = ZSTD_DCtx_reset(zds, ZSTD_ResetDirective.ZSTD_reset_session_only); + if (ERR_isError(err_code)) + { + return err_code; } - - return 0; } - public static nuint ZSTD_sizeof_DStream(ZSTD_DCtx_s* dctx) { - return ZSTD_sizeof_DCtx(dctx); + nuint err_code = ZSTD_DCtx_refDDict(zds, null); + if (ERR_isError(err_code)) + { + return err_code; + } } - private static nuint ZSTD_decodingBufferSize_internal( - ulong windowSize, - ulong frameContentSize, - nuint blockSizeMax - ) + return ZSTD_startingInputLength(zds->format); + } + + /* ZSTD_initDStream_usingDDict() : + * ddict will just be referenced, and must outlive decompression session + * this function cannot fail */ + public static nuint ZSTD_initDStream_usingDDict(ZSTD_DCtx_s* dctx, ZSTD_DDict_s* ddict) + { { - nuint blockSize = - (nuint)(windowSize < 1 << 17 ? windowSize : 1 << 17) < blockSizeMax - ? (nuint)(windowSize < 1 << 17 ? windowSize : 1 << 17) - : blockSizeMax; - /* We need blockSize + WILDCOPY_OVERLENGTH worth of buffer so that if a block - * ends at windowSize + WILDCOPY_OVERLENGTH + 1 bytes, we can start writing - * the block at the beginning of the output buffer, and maintain a full window. - * - * We need another blockSize worth of buffer so that we can store split - * literals at the end of the block without overwriting the extDict window. - */ - ulong neededRBSize = windowSize + blockSize * 2 + 32 * 2; - ulong neededSize = frameContentSize < neededRBSize ? frameContentSize : neededRBSize; - nuint minRBSize = (nuint)neededSize; - if (minRBSize != neededSize) + nuint err_code = ZSTD_DCtx_reset(dctx, ZSTD_ResetDirective.ZSTD_reset_session_only); + if (ERR_isError(err_code)) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_windowTooLarge) - ); + return err_code; } + } - return minRBSize; + { + nuint err_code = ZSTD_DCtx_refDDict(dctx, ddict); + if (ERR_isError(err_code)) + { + return err_code; + } } - /*===== Buffer-less streaming decompression functions =====*/ - public static nuint ZSTD_decodingBufferSize_min(ulong windowSize, ulong frameContentSize) + return ZSTD_startingInputLength(dctx->format); + } + + /* ZSTD_resetDStream() : + * return : expected size, aka ZSTD_startingInputLength(). + * this function cannot fail */ + public static nuint ZSTD_resetDStream(ZSTD_DCtx_s* dctx) + { { - return ZSTD_decodingBufferSize_internal(windowSize, frameContentSize, 1 << 17); + nuint err_code = ZSTD_DCtx_reset(dctx, ZSTD_ResetDirective.ZSTD_reset_session_only); + if (ERR_isError(err_code)) + { + return err_code; + } } - public static nuint ZSTD_estimateDStreamSize(nuint windowSize) + return ZSTD_startingInputLength(dctx->format); + } + + /*! ZSTD_DCtx_refDDict() : Requires v1.4.0+ + * Reference a prepared dictionary, to be used to decompress next frames. + * The dictionary remains active for decompression of future frames using same DCtx. + * + * If called with ZSTD_d_refMultipleDDicts enabled, repeated calls of this function + * will store the DDict references in a table, and the DDict used for decompression + * will be determined at decompression time, as per the dict ID in the frame. + * The memory for the table is allocated on the first call to refDDict, and can be + * freed with ZSTD_freeDCtx(). + * + * If called with ZSTD_d_refMultipleDDicts disabled (the default), only one dictionary + * will be managed, and referencing a dictionary effectively "discards" any previous one. + * + * @result : 0, or an error code (which can be tested with ZSTD_isError()). + * Special: referencing a NULL DDict means "return to no-dictionary mode". + * Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx. + */ + public static nuint ZSTD_DCtx_refDDict(ZSTD_DCtx_s* dctx, ZSTD_DDict_s* ddict) + { + if (dctx->streamStage != ZSTD_dStreamStage.zdss_init) { - nuint blockSize = windowSize < 1 << 17 ? windowSize : 1 << 17; - /* no block can be larger */ - nuint inBuffSize = blockSize; - nuint outBuffSize = ZSTD_decodingBufferSize_min(windowSize, unchecked(0UL - 1)); - return ZSTD_estimateDCtxSize() + inBuffSize + outBuffSize; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); } - public static nuint ZSTD_estimateDStreamSize_fromFrame(void* src, nuint srcSize) + ZSTD_clearDict(dctx); + if (ddict != null) { - /* note : should be user-selectable, but requires an additional parameter (or a dctx) */ - uint windowSizeMax = 1U << (sizeof(nuint) == 4 ? 30 : 31); - ZSTD_frameHeader zfh; - nuint err = ZSTD_getFrameHeader(&zfh, src, srcSize); - if (ERR_isError(err)) - return err; - if (err > 0) + dctx->ddict = ddict; + dctx->dictUses = ZSTD_dictUses_e.ZSTD_use_indefinitely; + if (dctx->refMultipleDDicts == ZSTD_refMultipleDDicts_e.ZSTD_rmd_refMultipleDDicts) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); - } + if (dctx->ddictSet == null) + { + dctx->ddictSet = ZSTD_createDDictHashSet(dctx->customMem); + if (dctx->ddictSet == null) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) + ); + } + } - if (zfh.windowSize > windowSizeMax) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_windowTooLarge) - ); + assert(dctx->staticSize == 0); + { + nuint err_code = ZSTD_DDictHashSet_addDDict( + dctx->ddictSet, + ddict, + dctx->customMem + ); + if (ERR_isError(err_code)) + { + return err_code; + } + } } - - return ZSTD_estimateDStreamSize((nuint)zfh.windowSize); } - /* ***** Decompression ***** */ - private static int ZSTD_DCtx_isOverflow( - ZSTD_DCtx_s* zds, - nuint neededInBuffSize, - nuint neededOutBuffSize - ) + return 0; + } + + /* ZSTD_DCtx_setMaxWindowSize() : + * note : no direct equivalence in ZSTD_DCtx_setParameter, + * since this version sets windowSize, and the other sets windowLog */ + public static nuint ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx_s* dctx, nuint maxWindowSize) + { + ZSTD_bounds bounds = ZSTD_dParam_getBounds(ZSTD_dParameter.ZSTD_d_windowLogMax); + nuint min = (nuint)1 << bounds.lowerBound; + nuint max = (nuint)1 << bounds.upperBound; + if (dctx->streamStage != ZSTD_dStreamStage.zdss_init) { - return zds->inBuffSize + zds->outBuffSize >= (neededInBuffSize + neededOutBuffSize) * 3 - ? 1 - : 0; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); } - private static void ZSTD_DCtx_updateOversizedDuration( - ZSTD_DCtx_s* zds, - nuint neededInBuffSize, - nuint neededOutBuffSize - ) + if (maxWindowSize < min) { - if (ZSTD_DCtx_isOverflow(zds, neededInBuffSize, neededOutBuffSize) != 0) - zds->oversizedDuration++; - else - zds->oversizedDuration = 0; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } - private static int ZSTD_DCtx_isOversizedTooLong(ZSTD_DCtx_s* zds) + if (maxWindowSize > max) { - return zds->oversizedDuration >= 128 ? 1 : 0; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } - /* Checks that the output buffer hasn't changed if ZSTD_obm_stable is used. */ - private static nuint ZSTD_checkOutBuffer(ZSTD_DCtx_s* zds, ZSTD_outBuffer_s* output) + dctx->maxWindowSize = maxWindowSize; + return 0; + } + + /*! ZSTD_DCtx_setFormat() : + * This function is REDUNDANT. Prefer ZSTD_DCtx_setParameter(). + * Instruct the decoder context about what kind of data to decode next. + * This instruction is mandatory to decode data without a fully-formed header, + * such ZSTD_f_zstd1_magicless for example. + * @return : 0, or an error code (which can be tested using ZSTD_isError()). */ + public static nuint ZSTD_DCtx_setFormat(ZSTD_DCtx_s* dctx, ZSTD_format_e format) + { + return ZSTD_DCtx_setParameter( + dctx, + ZSTD_dParameter.ZSTD_d_experimentalParam1, + (int)format + ); + } + + /*! ZSTD_dParam_getBounds() : + * All parameters must belong to an interval with lower and upper bounds, + * otherwise they will either trigger an error or be automatically clamped. + * @return : a structure, ZSTD_bounds, which contains + * - an error status field, which must be tested using ZSTD_isError() + * - both lower and upper bounds, inclusive + */ + public static ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam) + { + ZSTD_bounds bounds = new ZSTD_bounds + { + error = 0, + lowerBound = 0, + upperBound = 0, + }; + switch (dParam) + { + case ZSTD_dParameter.ZSTD_d_windowLogMax: + bounds.lowerBound = 10; + bounds.upperBound = sizeof(nuint) == 4 ? 30 : 31; + return bounds; + case ZSTD_dParameter.ZSTD_d_experimentalParam1: + bounds.lowerBound = (int)ZSTD_format_e.ZSTD_f_zstd1; + bounds.upperBound = (int)ZSTD_format_e.ZSTD_f_zstd1_magicless; + return bounds; + case ZSTD_dParameter.ZSTD_d_experimentalParam2: + bounds.lowerBound = (int)ZSTD_bufferMode_e.ZSTD_bm_buffered; + bounds.upperBound = (int)ZSTD_bufferMode_e.ZSTD_bm_stable; + return bounds; + case ZSTD_dParameter.ZSTD_d_experimentalParam3: + bounds.lowerBound = (int)ZSTD_forceIgnoreChecksum_e.ZSTD_d_validateChecksum; + bounds.upperBound = (int)ZSTD_forceIgnoreChecksum_e.ZSTD_d_ignoreChecksum; + return bounds; + case ZSTD_dParameter.ZSTD_d_experimentalParam4: + bounds.lowerBound = (int)ZSTD_refMultipleDDicts_e.ZSTD_rmd_refSingleDDict; + bounds.upperBound = (int)ZSTD_refMultipleDDicts_e.ZSTD_rmd_refMultipleDDicts; + return bounds; + case ZSTD_dParameter.ZSTD_d_experimentalParam5: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + case ZSTD_dParameter.ZSTD_d_experimentalParam6: + bounds.lowerBound = 1 << 10; + bounds.upperBound = 1 << 17; + return bounds; + default: + break; + } + + bounds.error = unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported) + ); + return bounds; + } + + /* ZSTD_dParam_withinBounds: + * @return 1 if value is within dParam bounds, + * 0 otherwise */ + private static int ZSTD_dParam_withinBounds(ZSTD_dParameter dParam, int value) + { + ZSTD_bounds bounds = ZSTD_dParam_getBounds(dParam); + if (ERR_isError(bounds.error)) + return 0; + if (value < bounds.lowerBound) + return 0; + if (value > bounds.upperBound) + return 0; + return 1; + } + + /*! ZSTD_DCtx_getParameter() : + * Get the requested decompression parameter value, selected by enum ZSTD_dParameter, + * and store it into int* value. + * @return : 0, or an error code (which can be tested with ZSTD_isError()). + */ + public static nuint ZSTD_DCtx_getParameter( + ZSTD_DCtx_s* dctx, + ZSTD_dParameter param, + int* value + ) + { + switch (param) { - ZSTD_outBuffer_s expect = zds->expectedOutBuffer; - if (zds->outBufferMode != ZSTD_bufferMode_e.ZSTD_bm_stable) + case ZSTD_dParameter.ZSTD_d_windowLogMax: + *value = (int)ZSTD_highbit32((uint)dctx->maxWindowSize); return 0; - if (zds->streamStage == ZSTD_dStreamStage.zdss_init) + case ZSTD_dParameter.ZSTD_d_experimentalParam1: + *value = (int)dctx->format; return 0; - if ( - expect.dst == output->dst - && expect.pos == output->pos - && expect.size == output->size - ) + case ZSTD_dParameter.ZSTD_d_experimentalParam2: + *value = (int)dctx->outBufferMode; return 0; - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstBuffer_wrong)); + case ZSTD_dParameter.ZSTD_d_experimentalParam3: + *value = (int)dctx->forceIgnoreChecksum; + return 0; + case ZSTD_dParameter.ZSTD_d_experimentalParam4: + *value = (int)dctx->refMultipleDDicts; + return 0; + case ZSTD_dParameter.ZSTD_d_experimentalParam5: + *value = dctx->disableHufAsm; + return 0; + case ZSTD_dParameter.ZSTD_d_experimentalParam6: + *value = dctx->maxBlockSizeParam; + return 0; + default: + break; } - /* Calls ZSTD_decompressContinue() with the right parameters for ZSTD_decompressStream() - * and updates the stage and the output buffer state. This call is extracted so it can be - * used both when reading directly from the ZSTD_inBuffer, and in buffered input mode. - * NOTE: You must break after calling this function since the streamStage is modified. - */ - private static nuint ZSTD_decompressContinueStream( - ZSTD_DCtx_s* zds, - sbyte** op, - sbyte* oend, - void* src, - nuint srcSize - ) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported)); + } + + /*! ZSTD_DCtx_setParameter() : + * Set one compression parameter, selected by enum ZSTD_dParameter. + * All parameters have valid bounds. Bounds can be queried using ZSTD_dParam_getBounds(). + * Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter). + * Setting a parameter is only possible during frame initialization (before starting decompression). + * @return : 0, or an error code (which can be tested using ZSTD_isError()). + */ + public static nuint ZSTD_DCtx_setParameter( + ZSTD_DCtx_s* dctx, + ZSTD_dParameter dParam, + int value + ) + { + if (dctx->streamStage != ZSTD_dStreamStage.zdss_init) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); + } + + switch (dParam) { - int isSkipFrame = ZSTD_isSkipFrame(zds); - if (zds->outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered) + case ZSTD_dParameter.ZSTD_d_windowLogMax: + if (value == 0) + value = 27; + { - nuint dstSize = isSkipFrame != 0 ? 0 : zds->outBuffSize - zds->outStart; - nuint decodedSize = ZSTD_decompressContinue( - zds, - zds->outBuff + zds->outStart, - dstSize, - src, - srcSize - ); + if ( + ZSTD_dParam_withinBounds(ZSTD_dParameter.ZSTD_d_windowLogMax, value) + == 0 + ) { - nuint err_code = decodedSize; - if (ERR_isError(err_code)) - { - return err_code; - } + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } + } - if (decodedSize == 0 && isSkipFrame == 0) + dctx->maxWindowSize = (nuint)1 << value; + return 0; + case ZSTD_dParameter.ZSTD_d_experimentalParam1: + { + if ( + ZSTD_dParam_withinBounds( + ZSTD_dParameter.ZSTD_d_experimentalParam1, + value + ) == 0 + ) { - zds->streamStage = ZSTD_dStreamStage.zdss_read; + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } - else + } + + dctx->format = (ZSTD_format_e)value; + return 0; + case ZSTD_dParameter.ZSTD_d_experimentalParam2: + { + if ( + ZSTD_dParam_withinBounds( + ZSTD_dParameter.ZSTD_d_experimentalParam2, + value + ) == 0 + ) { - zds->outEnd = zds->outStart + decodedSize; - zds->streamStage = ZSTD_dStreamStage.zdss_flush; + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } } - else + + dctx->outBufferMode = (ZSTD_bufferMode_e)value; + return 0; + case ZSTD_dParameter.ZSTD_d_experimentalParam3: { - /* Write directly into the output buffer */ - nuint dstSize = isSkipFrame != 0 ? 0 : (nuint)(oend - *op); - nuint decodedSize = ZSTD_decompressContinue(zds, *op, dstSize, src, srcSize); + if ( + ZSTD_dParam_withinBounds( + ZSTD_dParameter.ZSTD_d_experimentalParam3, + value + ) == 0 + ) { - nuint err_code = decodedSize; - if (ERR_isError(err_code)) + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); + } + } + + dctx->forceIgnoreChecksum = (ZSTD_forceIgnoreChecksum_e)value; + return 0; + case ZSTD_dParameter.ZSTD_d_experimentalParam4: + { + if ( + ZSTD_dParam_withinBounds( + ZSTD_dParameter.ZSTD_d_experimentalParam4, + value + ) == 0 + ) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); + } + } + + if (dctx->staticSize != 0) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported) + ); + } + + dctx->refMultipleDDicts = (ZSTD_refMultipleDDicts_e)value; + return 0; + case ZSTD_dParameter.ZSTD_d_experimentalParam5: + { + if ( + ZSTD_dParam_withinBounds( + ZSTD_dParameter.ZSTD_d_experimentalParam5, + value + ) == 0 + ) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); + } + } + + dctx->disableHufAsm = value != 0 ? 1 : 0; + return 0; + case ZSTD_dParameter.ZSTD_d_experimentalParam6: + if (value != 0) + { + if ( + ZSTD_dParam_withinBounds( + ZSTD_dParameter.ZSTD_d_experimentalParam6, + value + ) == 0 + ) { - return err_code; + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); } } - *op += decodedSize; - zds->streamStage = ZSTD_dStreamStage.zdss_read; - assert(*op <= oend); - assert(zds->outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable); + dctx->maxBlockSizeParam = value; + return 0; + default: + break; + } + + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported)); + } + + /*! ZSTD_DCtx_reset() : + * Return a DCtx to clean state. + * Session and parameters can be reset jointly or separately. + * Parameters can only be reset when no active frame is being decompressed. + * @return : 0, or an error code, which can be tested with ZSTD_isError() + */ + public static nuint ZSTD_DCtx_reset(ZSTD_DCtx_s* dctx, ZSTD_ResetDirective reset) + { + if ( + reset == ZSTD_ResetDirective.ZSTD_reset_session_only + || reset == ZSTD_ResetDirective.ZSTD_reset_session_and_parameters + ) + { + dctx->streamStage = ZSTD_dStreamStage.zdss_init; + dctx->noForwardProgress = 0; + dctx->isFrameDecompression = 1; + } + + if ( + reset == ZSTD_ResetDirective.ZSTD_reset_parameters + || reset == ZSTD_ResetDirective.ZSTD_reset_session_and_parameters + ) + { + if (dctx->streamStage != ZSTD_dStreamStage.zdss_init) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); } - return 0; + ZSTD_clearDict(dctx); + ZSTD_DCtx_resetParameters(dctx); } - /*! ZSTD_decompressStream() : - * Streaming decompression function. - * Call repetitively to consume full input updating it as necessary. - * Function will update both input and output `pos` fields exposing current state via these fields: - * - `input.pos < input.size`, some input remaining and caller should provide remaining input - * on the next call. - * - `output.pos < output.size`, decoder flushed internal output buffer. - * - `output.pos == output.size`, unflushed data potentially present in the internal buffers, - * check ZSTD_decompressStream() @return value, - * if > 0, invoke it again to flush remaining data to output. - * Note : with no additional input, amount of data flushed <= ZSTD_BLOCKSIZE_MAX. - * - * @return : 0 when a frame is completely decoded and fully flushed, - * or an error code, which can be tested using ZSTD_isError(), - * or any other value > 0, which means there is some decoding or flushing to do to complete current frame. + return 0; + } + + public static nuint ZSTD_sizeof_DStream(ZSTD_DCtx_s* dctx) + { + return ZSTD_sizeof_DCtx(dctx); + } + + private static nuint ZSTD_decodingBufferSize_internal( + ulong windowSize, + ulong frameContentSize, + nuint blockSizeMax + ) + { + nuint blockSize = + (nuint)(windowSize < 1 << 17 ? windowSize : 1 << 17) < blockSizeMax + ? (nuint)(windowSize < 1 << 17 ? windowSize : 1 << 17) + : blockSizeMax; + /* We need blockSize + WILDCOPY_OVERLENGTH worth of buffer so that if a block + * ends at windowSize + WILDCOPY_OVERLENGTH + 1 bytes, we can start writing + * the block at the beginning of the output buffer, and maintain a full window. * - * Note: when an operation returns with an error code, the @zds state may be left in undefined state. - * It's UB to invoke `ZSTD_decompressStream()` on such a state. - * In order to re-use such a state, it must be first reset, - * which can be done explicitly (`ZSTD_DCtx_reset()`), - * or is implied for operations starting some new decompression job (`ZSTD_initDStream`, `ZSTD_decompressDCtx()`, `ZSTD_decompress_usingDict()`) + * We need another blockSize worth of buffer so that we can store split + * literals at the end of the block without overwriting the extDict window. */ - public static nuint ZSTD_decompressStream( - ZSTD_DCtx_s* zds, - ZSTD_outBuffer_s* output, - ZSTD_inBuffer_s* input - ) + ulong neededRBSize = windowSize + blockSize * 2 + 32 * 2; + ulong neededSize = frameContentSize < neededRBSize ? frameContentSize : neededRBSize; + nuint minRBSize = (nuint)neededSize; + if (minRBSize != neededSize) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_windowTooLarge) + ); + } + + return minRBSize; + } + + /*===== Buffer-less streaming decompression functions =====*/ + public static nuint ZSTD_decodingBufferSize_min(ulong windowSize, ulong frameContentSize) + { + return ZSTD_decodingBufferSize_internal(windowSize, frameContentSize, 1 << 17); + } + + public static nuint ZSTD_estimateDStreamSize(nuint windowSize) + { + nuint blockSize = windowSize < 1 << 17 ? windowSize : 1 << 17; + /* no block can be larger */ + nuint inBuffSize = blockSize; + nuint outBuffSize = ZSTD_decodingBufferSize_min(windowSize, unchecked(0UL - 1)); + return ZSTD_estimateDCtxSize() + inBuffSize + outBuffSize; + } + + public static nuint ZSTD_estimateDStreamSize_fromFrame(void* src, nuint srcSize) + { + /* note : should be user-selectable, but requires an additional parameter (or a dctx) */ + uint windowSizeMax = 1U << (sizeof(nuint) == 4 ? 30 : 31); + ZSTD_frameHeader zfh; + nuint err = ZSTD_getFrameHeader(&zfh, src, srcSize); + if (ERR_isError(err)) + return err; + if (err > 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + if (zfh.windowSize > windowSizeMax) { - sbyte* src = (sbyte*)input->src; - sbyte* istart = input->pos != 0 ? src + input->pos : src; - sbyte* iend = input->size != 0 ? src + input->size : src; - sbyte* ip = istart; - sbyte* dst = (sbyte*)output->dst; - sbyte* ostart = output->pos != 0 ? dst + output->pos : dst; - sbyte* oend = output->size != 0 ? dst + output->size : dst; - sbyte* op = ostart; - uint someMoreWork = 1; - assert(zds != null); - if (input->pos > input->size) + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_windowTooLarge) + ); + } + + return ZSTD_estimateDStreamSize((nuint)zfh.windowSize); + } + + /* ***** Decompression ***** */ + private static int ZSTD_DCtx_isOverflow( + ZSTD_DCtx_s* zds, + nuint neededInBuffSize, + nuint neededOutBuffSize + ) + { + return zds->inBuffSize + zds->outBuffSize >= (neededInBuffSize + neededOutBuffSize) * 3 + ? 1 + : 0; + } + + private static void ZSTD_DCtx_updateOversizedDuration( + ZSTD_DCtx_s* zds, + nuint neededInBuffSize, + nuint neededOutBuffSize + ) + { + if (ZSTD_DCtx_isOverflow(zds, neededInBuffSize, neededOutBuffSize) != 0) + zds->oversizedDuration++; + else + zds->oversizedDuration = 0; + } + + private static int ZSTD_DCtx_isOversizedTooLong(ZSTD_DCtx_s* zds) + { + return zds->oversizedDuration >= 128 ? 1 : 0; + } + + /* Checks that the output buffer hasn't changed if ZSTD_obm_stable is used. */ + private static nuint ZSTD_checkOutBuffer(ZSTD_DCtx_s* zds, ZSTD_outBuffer_s* output) + { + ZSTD_outBuffer_s expect = zds->expectedOutBuffer; + if (zds->outBufferMode != ZSTD_bufferMode_e.ZSTD_bm_stable) + return 0; + if (zds->streamStage == ZSTD_dStreamStage.zdss_init) + return 0; + if ( + expect.dst == output->dst + && expect.pos == output->pos + && expect.size == output->size + ) + return 0; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstBuffer_wrong)); + } + + /* Calls ZSTD_decompressContinue() with the right parameters for ZSTD_decompressStream() + * and updates the stage and the output buffer state. This call is extracted so it can be + * used both when reading directly from the ZSTD_inBuffer, and in buffered input mode. + * NOTE: You must break after calling this function since the streamStage is modified. + */ + private static nuint ZSTD_decompressContinueStream( + ZSTD_DCtx_s* zds, + sbyte** op, + sbyte* oend, + void* src, + nuint srcSize + ) + { + int isSkipFrame = ZSTD_isSkipFrame(zds); + if (zds->outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered) + { + nuint dstSize = isSkipFrame != 0 ? 0 : zds->outBuffSize - zds->outStart; + nuint decodedSize = ZSTD_decompressContinue( + zds, + zds->outBuff + zds->outStart, + dstSize, + src, + srcSize + ); { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + nuint err_code = decodedSize; + if (ERR_isError(err_code)) + { + return err_code; + } } - if (output->pos > output->size) + if (decodedSize == 0 && isSkipFrame == 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + zds->streamStage = ZSTD_dStreamStage.zdss_read; + } + else + { + zds->outEnd = zds->outStart + decodedSize; + zds->streamStage = ZSTD_dStreamStage.zdss_flush; + } + } + else + { + /* Write directly into the output buffer */ + nuint dstSize = isSkipFrame != 0 ? 0 : (nuint)(oend - *op); + nuint decodedSize = ZSTD_decompressContinue(zds, *op, dstSize, src, srcSize); + { + nuint err_code = decodedSize; + if (ERR_isError(err_code)) + { + return err_code; + } } + *op += decodedSize; + zds->streamStage = ZSTD_dStreamStage.zdss_read; + assert(*op <= oend); + assert(zds->outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable); + } + + return 0; + } + + /*! ZSTD_decompressStream() : + * Streaming decompression function. + * Call repetitively to consume full input updating it as necessary. + * Function will update both input and output `pos` fields exposing current state via these fields: + * - `input.pos < input.size`, some input remaining and caller should provide remaining input + * on the next call. + * - `output.pos < output.size`, decoder flushed internal output buffer. + * - `output.pos == output.size`, unflushed data potentially present in the internal buffers, + * check ZSTD_decompressStream() @return value, + * if > 0, invoke it again to flush remaining data to output. + * Note : with no additional input, amount of data flushed <= ZSTD_BLOCKSIZE_MAX. + * + * @return : 0 when a frame is completely decoded and fully flushed, + * or an error code, which can be tested using ZSTD_isError(), + * or any other value > 0, which means there is some decoding or flushing to do to complete current frame. + * + * Note: when an operation returns with an error code, the @zds state may be left in undefined state. + * It's UB to invoke `ZSTD_decompressStream()` on such a state. + * In order to re-use such a state, it must be first reset, + * which can be done explicitly (`ZSTD_DCtx_reset()`), + * or is implied for operations starting some new decompression job (`ZSTD_initDStream`, `ZSTD_decompressDCtx()`, `ZSTD_decompress_usingDict()`) + */ + public static nuint ZSTD_decompressStream( + ZSTD_DCtx_s* zds, + ZSTD_outBuffer_s* output, + ZSTD_inBuffer_s* input + ) + { + sbyte* src = (sbyte*)input->src; + sbyte* istart = input->pos != 0 ? src + input->pos : src; + sbyte* iend = input->size != 0 ? src + input->size : src; + sbyte* ip = istart; + sbyte* dst = (sbyte*)output->dst; + sbyte* ostart = output->pos != 0 ? dst + output->pos : dst; + sbyte* oend = output->size != 0 ? dst + output->size : dst; + sbyte* op = ostart; + uint someMoreWork = 1; + assert(zds != null); + if (input->pos > input->size) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + if (output->pos > output->size) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + { + nuint err_code = ZSTD_checkOutBuffer(zds, output); + if (ERR_isError(err_code)) { - nuint err_code = ZSTD_checkOutBuffer(zds, output); - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } + } - while (someMoreWork != 0) + while (someMoreWork != 0) + { + switch (zds->streamStage) { - switch (zds->streamStage) + case ZSTD_dStreamStage.zdss_init: + zds->streamStage = ZSTD_dStreamStage.zdss_loadHeader; + zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0; + zds->hostageByte = 0; + zds->expectedOutBuffer = *output; + goto case ZSTD_dStreamStage.zdss_loadHeader; + case ZSTD_dStreamStage.zdss_loadHeader: { - case ZSTD_dStreamStage.zdss_init: - zds->streamStage = ZSTD_dStreamStage.zdss_loadHeader; - zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0; - zds->hostageByte = 0; - zds->expectedOutBuffer = *output; - goto case ZSTD_dStreamStage.zdss_loadHeader; - case ZSTD_dStreamStage.zdss_loadHeader: - { - nuint hSize = ZSTD_getFrameHeader_advanced( - &zds->fParams, - zds->headerBuffer, - zds->lhSize, - zds->format - ); - if (zds->refMultipleDDicts != default && zds->ddictSet != null) - { - ZSTD_DCtx_selectFrameDDict(zds); - } - - if (ERR_isError(hSize)) - { - return hSize; - } - - if (hSize != 0) - { - /* if hSize!=0, hSize > zds->lhSize */ - nuint toLoad = hSize - zds->lhSize; - nuint remainingInput = (nuint)(iend - ip); - assert(iend >= ip); - if (toLoad > remainingInput) - { - if (remainingInput > 0) - { - memcpy( - zds->headerBuffer + zds->lhSize, - ip, - (uint)remainingInput - ); - zds->lhSize += remainingInput; - } - - input->pos = input->size; - { - /* check first few bytes */ - nuint err_code = ZSTD_getFrameHeader_advanced( - &zds->fParams, - zds->headerBuffer, - zds->lhSize, - zds->format - ); - if (ERR_isError(err_code)) - { - return err_code; - } - } - - return ( - (nuint)( - zds->format == ZSTD_format_e.ZSTD_f_zstd1 ? 6 : 2 - ) > hSize - ? (nuint)( - zds->format == ZSTD_format_e.ZSTD_f_zstd1 - ? 6 - : 2 - ) - : hSize - ) - - zds->lhSize - + ZSTD_blockHeaderSize; - } + nuint hSize = ZSTD_getFrameHeader_advanced( + &zds->fParams, + zds->headerBuffer, + zds->lhSize, + zds->format + ); + if (zds->refMultipleDDicts != default && zds->ddictSet != null) + { + ZSTD_DCtx_selectFrameDDict(zds); + } - assert(ip != null); - memcpy(zds->headerBuffer + zds->lhSize, ip, (uint)toLoad); - zds->lhSize = hSize; - ip += toLoad; - break; - } - } + if (ERR_isError(hSize)) + { + return hSize; + } - if ( - zds->fParams.frameContentSize != unchecked(0UL - 1) - && zds->fParams.frameType != ZSTD_frameType_e.ZSTD_skippableFrame - && (nuint)(oend - op) >= zds->fParams.frameContentSize - ) + if (hSize != 0) + { + /* if hSize!=0, hSize > zds->lhSize */ + nuint toLoad = hSize - zds->lhSize; + nuint remainingInput = (nuint)(iend - ip); + assert(iend >= ip); + if (toLoad > remainingInput) { - nuint cSize = ZSTD_findFrameCompressedSize_advanced( - istart, - (nuint)(iend - istart), - zds->format - ); - if (cSize <= (nuint)(iend - istart)) + if (remainingInput > 0) { - /* shortcut : using single-pass mode */ - nuint decompressedSize = ZSTD_decompress_usingDDict( - zds, - op, - (nuint)(oend - op), - istart, - cSize, - ZSTD_getDDict(zds) + memcpy( + zds->headerBuffer + zds->lhSize, + ip, + (uint)remainingInput ); - if (ERR_isError(decompressedSize)) - return decompressedSize; - assert(istart != null); - ip = istart + cSize; - op = op != null ? op + decompressedSize : op; - zds->expected = 0; - zds->streamStage = ZSTD_dStreamStage.zdss_init; - someMoreWork = 0; - break; - } - } - - if ( - zds->outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable - && zds->fParams.frameType != ZSTD_frameType_e.ZSTD_skippableFrame - && zds->fParams.frameContentSize != unchecked(0UL - 1) - && (nuint)(oend - op) < zds->fParams.frameContentSize - ) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) - ); - } - - { - nuint err_code = ZSTD_decompressBegin_usingDDict( - zds, - ZSTD_getDDict(zds) - ); - if (ERR_isError(err_code)) - { - return err_code; + zds->lhSize += remainingInput; } - } - if ( - zds->format == ZSTD_format_e.ZSTD_f_zstd1 - && (MEM_readLE32(zds->headerBuffer) & 0xFFFFFFF0) == 0x184D2A50 - ) - { - zds->expected = MEM_readLE32(zds->headerBuffer + 4); - zds->stage = ZSTD_dStage.ZSTDds_skipFrame; - } - else - { + input->pos = input->size; { - nuint err_code = ZSTD_decodeFrameHeader( - zds, + /* check first few bytes */ + nuint err_code = ZSTD_getFrameHeader_advanced( + &zds->fParams, zds->headerBuffer, - zds->lhSize + zds->lhSize, + zds->format ); if (ERR_isError(err_code)) { @@ -3173,195 +3071,222 @@ public static nuint ZSTD_decompressStream( } } - zds->expected = ZSTD_blockHeaderSize; - zds->stage = ZSTD_dStage.ZSTDds_decodeBlockHeader; + return ( + (nuint)( + zds->format == ZSTD_format_e.ZSTD_f_zstd1 ? 6 : 2 + ) > hSize + ? (nuint)( + zds->format == ZSTD_format_e.ZSTD_f_zstd1 + ? 6 + : 2 + ) + : hSize + ) + - zds->lhSize + + ZSTD_blockHeaderSize; } - zds->fParams.windowSize = - zds->fParams.windowSize > 1U << 10 ? zds->fParams.windowSize : 1U << 10; - if (zds->fParams.windowSize > zds->maxWindowSize) + assert(ip != null); + memcpy(zds->headerBuffer + zds->lhSize, ip, (uint)toLoad); + zds->lhSize = hSize; + ip += toLoad; + break; + } + } + + if ( + zds->fParams.frameContentSize != unchecked(0UL - 1) + && zds->fParams.frameType != ZSTD_frameType_e.ZSTD_skippableFrame + && (nuint)(oend - op) >= zds->fParams.frameContentSize + ) + { + nuint cSize = ZSTD_findFrameCompressedSize_advanced( + istart, + (nuint)(iend - istart), + zds->format + ); + if (cSize <= (nuint)(iend - istart)) { - return unchecked( - (nuint)( - -(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_windowTooLarge - ) + /* shortcut : using single-pass mode */ + nuint decompressedSize = ZSTD_decompress_usingDDict( + zds, + op, + (nuint)(oend - op), + istart, + cSize, + ZSTD_getDDict(zds) ); + if (ERR_isError(decompressedSize)) + return decompressedSize; + assert(istart != null); + ip = istart + cSize; + op = op != null ? op + decompressedSize : op; + zds->expected = 0; + zds->streamStage = ZSTD_dStreamStage.zdss_init; + someMoreWork = 0; + break; } + } - if (zds->maxBlockSizeParam != 0) - zds->fParams.blockSizeMax = - zds->fParams.blockSizeMax < (uint)zds->maxBlockSizeParam - ? zds->fParams.blockSizeMax - : (uint)zds->maxBlockSizeParam; + if ( + zds->outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable + && zds->fParams.frameType != ZSTD_frameType_e.ZSTD_skippableFrame + && zds->fParams.frameContentSize != unchecked(0UL - 1) + && (nuint)(oend - op) < zds->fParams.frameContentSize + ) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) + ); + } + + { + nuint err_code = ZSTD_decompressBegin_usingDDict( + zds, + ZSTD_getDDict(zds) + ); + if (ERR_isError(err_code)) + { + return err_code; + } + } + if ( + zds->format == ZSTD_format_e.ZSTD_f_zstd1 + && (MEM_readLE32(zds->headerBuffer) & 0xFFFFFFF0) == 0x184D2A50 + ) + { + zds->expected = MEM_readLE32(zds->headerBuffer + 4); + zds->stage = ZSTD_dStage.ZSTDds_skipFrame; + } + else + { { - /* frame checksum */ - nuint neededInBuffSize = - zds->fParams.blockSizeMax > 4 ? zds->fParams.blockSizeMax : 4; - nuint neededOutBuffSize = - zds->outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered - ? ZSTD_decodingBufferSize_internal( - zds->fParams.windowSize, - zds->fParams.frameContentSize, - zds->fParams.blockSizeMax - ) - : 0; - ZSTD_DCtx_updateOversizedDuration( + nuint err_code = ZSTD_decodeFrameHeader( zds, - neededInBuffSize, - neededOutBuffSize + zds->headerBuffer, + zds->lhSize ); + if (ERR_isError(err_code)) { - int tooSmall = - zds->inBuffSize < neededInBuffSize - || zds->outBuffSize < neededOutBuffSize - ? 1 - : 0; - int tooLarge = ZSTD_DCtx_isOversizedTooLong(zds); - if (tooSmall != 0 || tooLarge != 0) - { - nuint bufferSize = neededInBuffSize + neededOutBuffSize; - if (zds->staticSize != 0) - { - assert(zds->staticSize >= (nuint)sizeof(ZSTD_DCtx_s)); - if ( - bufferSize - > zds->staticSize - (nuint)sizeof(ZSTD_DCtx_s) - ) - { - return unchecked( - (nuint)( - -(int) - ZSTD_ErrorCode.ZSTD_error_memory_allocation - ) - ); - } - } - else - { - ZSTD_customFree(zds->inBuff, zds->customMem); - zds->inBuffSize = 0; - zds->outBuffSize = 0; - zds->inBuff = (sbyte*)ZSTD_customMalloc( - bufferSize, - zds->customMem - ); - if (zds->inBuff == null) - { - return unchecked( - (nuint)( - -(int) - ZSTD_ErrorCode.ZSTD_error_memory_allocation - ) - ); - } - } - - zds->inBuffSize = neededInBuffSize; - zds->outBuff = zds->inBuff + zds->inBuffSize; - zds->outBuffSize = neededOutBuffSize; - } + return err_code; } } - zds->streamStage = ZSTD_dStreamStage.zdss_read; - goto case ZSTD_dStreamStage.zdss_read; - case ZSTD_dStreamStage.zdss_read: - { - nuint neededInSize = ZSTD_nextSrcSizeToDecompressWithInputSize( - zds, - (nuint)(iend - ip) - ); - if (neededInSize == 0) - { - zds->streamStage = ZSTD_dStreamStage.zdss_init; - someMoreWork = 0; - break; - } + zds->expected = ZSTD_blockHeaderSize; + zds->stage = ZSTD_dStage.ZSTDds_decodeBlockHeader; + } + + zds->fParams.windowSize = + zds->fParams.windowSize > 1U << 10 ? zds->fParams.windowSize : 1U << 10; + if (zds->fParams.windowSize > zds->maxWindowSize) + { + return unchecked( + (nuint)( + -(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_windowTooLarge + ) + ); + } - if ((nuint)(iend - ip) >= neededInSize) + if (zds->maxBlockSizeParam != 0) + zds->fParams.blockSizeMax = + zds->fParams.blockSizeMax < (uint)zds->maxBlockSizeParam + ? zds->fParams.blockSizeMax + : (uint)zds->maxBlockSizeParam; + + { + /* frame checksum */ + nuint neededInBuffSize = + zds->fParams.blockSizeMax > 4 ? zds->fParams.blockSizeMax : 4; + nuint neededOutBuffSize = + zds->outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered + ? ZSTD_decodingBufferSize_internal( + zds->fParams.windowSize, + zds->fParams.frameContentSize, + zds->fParams.blockSizeMax + ) + : 0; + ZSTD_DCtx_updateOversizedDuration( + zds, + neededInBuffSize, + neededOutBuffSize + ); + { + int tooSmall = + zds->inBuffSize < neededInBuffSize + || zds->outBuffSize < neededOutBuffSize + ? 1 + : 0; + int tooLarge = ZSTD_DCtx_isOversizedTooLong(zds); + if (tooSmall != 0 || tooLarge != 0) + { + nuint bufferSize = neededInBuffSize + neededOutBuffSize; + if (zds->staticSize != 0) { + assert(zds->staticSize >= (nuint)sizeof(ZSTD_DCtx_s)); + if ( + bufferSize + > zds->staticSize - (nuint)sizeof(ZSTD_DCtx_s) + ) { - nuint err_code = ZSTD_decompressContinueStream( - zds, - &op, - oend, - ip, - neededInSize + return unchecked( + (nuint)( + -(int) + ZSTD_ErrorCode.ZSTD_error_memory_allocation + ) ); - if (ERR_isError(err_code)) - { - return err_code; - } } - - assert(ip != null); - ip += neededInSize; - break; } - } - - if (ip == iend) - { - someMoreWork = 0; - break; - } - - zds->streamStage = ZSTD_dStreamStage.zdss_load; - goto case ZSTD_dStreamStage.zdss_load; - case ZSTD_dStreamStage.zdss_load: - { - nuint neededInSize = ZSTD_nextSrcSizeToDecompress(zds); - nuint toLoad = neededInSize - zds->inPos; - int isSkipFrame = ZSTD_isSkipFrame(zds); - nuint loadedSize; - assert( - neededInSize - == ZSTD_nextSrcSizeToDecompressWithInputSize( - zds, - (nuint)(iend - ip) - ) - ); - if (isSkipFrame != 0) - { - loadedSize = toLoad < (nuint)(iend - ip) ? toLoad : (nuint)(iend - ip); - } - else - { - if (toLoad > zds->inBuffSize - zds->inPos) + else { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ZSTD_customFree(zds->inBuff, zds->customMem); + zds->inBuffSize = 0; + zds->outBuffSize = 0; + zds->inBuff = (sbyte*)ZSTD_customMalloc( + bufferSize, + zds->customMem ); + if (zds->inBuff == null) + { + return unchecked( + (nuint)( + -(int) + ZSTD_ErrorCode.ZSTD_error_memory_allocation + ) + ); + } } - loadedSize = ZSTD_limitCopy( - zds->inBuff + zds->inPos, - toLoad, - ip, - (nuint)(iend - ip) - ); - } - - if (loadedSize != 0) - { - ip += loadedSize; - zds->inPos += loadedSize; + zds->inBuffSize = neededInBuffSize; + zds->outBuff = zds->inBuff + zds->inBuffSize; + zds->outBuffSize = neededOutBuffSize; } + } + } - if (loadedSize < toLoad) - { - someMoreWork = 0; - break; - } + zds->streamStage = ZSTD_dStreamStage.zdss_read; + goto case ZSTD_dStreamStage.zdss_read; + case ZSTD_dStreamStage.zdss_read: + { + nuint neededInSize = ZSTD_nextSrcSizeToDecompressWithInputSize( + zds, + (nuint)(iend - ip) + ); + if (neededInSize == 0) + { + zds->streamStage = ZSTD_dStreamStage.zdss_init; + someMoreWork = 0; + break; + } - zds->inPos = 0; + if ((nuint)(iend - ip) >= neededInSize) + { { nuint err_code = ZSTD_decompressContinueStream( zds, &op, oend, - zds->inBuff, + ip, neededInSize ); if (ERR_isError(err_code)) @@ -3370,143 +3295,217 @@ public static nuint ZSTD_decompressStream( } } + assert(ip != null); + ip += neededInSize; + break; + } + } + + if (ip == iend) + { + someMoreWork = 0; break; } - case ZSTD_dStreamStage.zdss_flush: + zds->streamStage = ZSTD_dStreamStage.zdss_load; + goto case ZSTD_dStreamStage.zdss_load; + case ZSTD_dStreamStage.zdss_load: + { + nuint neededInSize = ZSTD_nextSrcSizeToDecompress(zds); + nuint toLoad = neededInSize - zds->inPos; + int isSkipFrame = ZSTD_isSkipFrame(zds); + nuint loadedSize; + assert( + neededInSize + == ZSTD_nextSrcSizeToDecompressWithInputSize( + zds, + (nuint)(iend - ip) + ) + ); + if (isSkipFrame != 0) + { + loadedSize = toLoad < (nuint)(iend - ip) ? toLoad : (nuint)(iend - ip); + } + else + { + if (toLoad > zds->inBuffSize - zds->inPos) { - nuint toFlushSize = zds->outEnd - zds->outStart; - nuint flushedSize = ZSTD_limitCopy( - op, - (nuint)(oend - op), - zds->outBuff + zds->outStart, - toFlushSize + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) ); - op = op != null ? op + flushedSize : op; - zds->outStart += flushedSize; - if (flushedSize == toFlushSize) - { - zds->streamStage = ZSTD_dStreamStage.zdss_read; - if ( - zds->outBuffSize < zds->fParams.frameContentSize - && zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize - ) - { - zds->outStart = zds->outEnd = 0; - } - - break; - } } + loadedSize = ZSTD_limitCopy( + zds->inBuff + zds->inPos, + toLoad, + ip, + (nuint)(iend - ip) + ); + } + + if (loadedSize != 0) + { + ip += loadedSize; + zds->inPos += loadedSize; + } + + if (loadedSize < toLoad) + { someMoreWork = 0; break; - default: - assert(0 != 0); - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); - } - } + } - input->pos = (nuint)(ip - (sbyte*)input->src); - output->pos = (nuint)(op - (sbyte*)output->dst); - zds->expectedOutBuffer = *output; - if (ip == istart && op == ostart) - { - zds->noForwardProgress++; - if (zds->noForwardProgress >= 16) - { - if (op == oend) + zds->inPos = 0; { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_noForwardProgress_destFull) + nuint err_code = ZSTD_decompressContinueStream( + zds, + &op, + oend, + zds->inBuff, + neededInSize ); + if (ERR_isError(err_code)) + { + return err_code; + } } - if (ip == iend) + break; + } + + case ZSTD_dStreamStage.zdss_flush: + { + nuint toFlushSize = zds->outEnd - zds->outStart; + nuint flushedSize = ZSTD_limitCopy( + op, + (nuint)(oend - op), + zds->outBuff + zds->outStart, + toFlushSize + ); + op = op != null ? op + flushedSize : op; + zds->outStart += flushedSize; + if (flushedSize == toFlushSize) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_noForwardProgress_inputEmpty) - ); + zds->streamStage = ZSTD_dStreamStage.zdss_read; + if ( + zds->outBuffSize < zds->fParams.frameContentSize + && zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize + ) + { + zds->outStart = zds->outEnd = 0; + } + + break; } + } + someMoreWork = 0; + break; + default: assert(0 != 0); - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); } - else + } + + input->pos = (nuint)(ip - (sbyte*)input->src); + output->pos = (nuint)(op - (sbyte*)output->dst); + zds->expectedOutBuffer = *output; + if (ip == istart && op == ostart) + { + zds->noForwardProgress++; + if (zds->noForwardProgress >= 16) { - zds->noForwardProgress = 0; + if (op == oend) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_noForwardProgress_destFull) + ); + } + + if (ip == iend) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_noForwardProgress_inputEmpty) + ); + } + + assert(0 != 0); } + } + else + { + zds->noForwardProgress = 0; + } + { + nuint nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds); + if (nextSrcSizeHint == 0) { - nuint nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds); - if (nextSrcSizeHint == 0) + if (zds->outEnd == zds->outStart) { - if (zds->outEnd == zds->outStart) + if (zds->hostageByte != 0) { - if (zds->hostageByte != 0) + if (input->pos >= input->size) { - if (input->pos >= input->size) - { - zds->streamStage = ZSTD_dStreamStage.zdss_read; - return 1; - } - - input->pos++; + zds->streamStage = ZSTD_dStreamStage.zdss_read; + return 1; } - return 0; + input->pos++; } - if (zds->hostageByte == 0) - { - input->pos--; - zds->hostageByte = 1; - } + return 0; + } - return 1; + if (zds->hostageByte == 0) + { + input->pos--; + zds->hostageByte = 1; } - nextSrcSizeHint += - ZSTD_blockHeaderSize - * (nuint)( - ZSTD_nextInputType(zds) == ZSTD_nextInputType_e.ZSTDnit_block ? 1 : 0 - ); - assert(zds->inPos <= nextSrcSizeHint); - nextSrcSizeHint -= zds->inPos; - return nextSrcSizeHint; + return 1; } + + nextSrcSizeHint += + ZSTD_blockHeaderSize + * (nuint)( + ZSTD_nextInputType(zds) == ZSTD_nextInputType_e.ZSTDnit_block ? 1 : 0 + ); + assert(zds->inPos <= nextSrcSizeHint); + nextSrcSizeHint -= zds->inPos; + return nextSrcSizeHint; } + } - /*! ZSTD_decompressStream_simpleArgs() : - * Same as ZSTD_decompressStream(), - * but using only integral types as arguments. - * This can be helpful for binders from dynamic languages - * which have troubles handling structures containing memory pointers. - */ - public static nuint ZSTD_decompressStream_simpleArgs( - ZSTD_DCtx_s* dctx, - void* dst, - nuint dstCapacity, - nuint* dstPos, - void* src, - nuint srcSize, - nuint* srcPos - ) - { - ZSTD_outBuffer_s output; - ZSTD_inBuffer_s input; - output.dst = dst; - output.size = dstCapacity; - output.pos = *dstPos; - input.src = src; - input.size = srcSize; - input.pos = *srcPos; - { - nuint cErr = ZSTD_decompressStream(dctx, &output, &input); - *dstPos = output.pos; - *srcPos = input.pos; - return cErr; - } + /*! ZSTD_decompressStream_simpleArgs() : + * Same as ZSTD_decompressStream(), + * but using only integral types as arguments. + * This can be helpful for binders from dynamic languages + * which have troubles handling structures containing memory pointers. + */ + public static nuint ZSTD_decompressStream_simpleArgs( + ZSTD_DCtx_s* dctx, + void* dst, + nuint dstCapacity, + nuint* dstPos, + void* src, + nuint srcSize, + nuint* srcPos + ) + { + ZSTD_outBuffer_s output; + ZSTD_inBuffer_s input; + output.dst = dst; + output.size = dstCapacity; + output.pos = *dstPos; + input.src = src; + input.size = srcSize; + input.pos = *srcPos; + { + nuint cErr = ZSTD_decompressStream(dctx, &output, &input); + *dstPos = output.pos; + *srcPos = input.pos; + return cErr; } } -} +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompressBlock.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompressBlock.cs index a91c728a8..380f9525a 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompressBlock.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompressBlock.cs @@ -1,1290 +1,1290 @@ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; -using static ZstdSharp.UnsafeHelper; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods + /*_******************************************************* + * Memory operations + **********************************************************/ + private static void ZSTD_copy4(void* dst, void* src) { - /*_******************************************************* - * Memory operations - **********************************************************/ - private static void ZSTD_copy4(void* dst, void* src) - { - memcpy(dst, src, 4); - } + memcpy(dst, src, 4); + } - /*-************************************************************* - * Block decoding - ***************************************************************/ - private static nuint ZSTD_blockSizeMax(ZSTD_DCtx_s* dctx) + /*-************************************************************* + * Block decoding + ***************************************************************/ + private static nuint ZSTD_blockSizeMax(ZSTD_DCtx_s* dctx) + { + nuint blockSizeMax = + dctx->isFrameDecompression != 0 ? dctx->fParams.blockSizeMax : 1 << 17; + assert(blockSizeMax <= 1 << 17); + return blockSizeMax; + } + + /*! ZSTD_getcBlockSize() : + * Provides the size of compressed block from block header `src` */ + private static nuint ZSTD_getcBlockSize(void* src, nuint srcSize, blockProperties_t* bpPtr) + { + if (srcSize < ZSTD_blockHeaderSize) { - nuint blockSizeMax = - dctx->isFrameDecompression != 0 ? dctx->fParams.blockSizeMax : 1 << 17; - assert(blockSizeMax <= 1 << 17); - return blockSizeMax; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); } - /*! ZSTD_getcBlockSize() : - * Provides the size of compressed block from block header `src` */ - private static nuint ZSTD_getcBlockSize(void* src, nuint srcSize, blockProperties_t* bpPtr) { - if (srcSize < ZSTD_blockHeaderSize) + uint cBlockHeader = MEM_readLE24(src); + uint cSize = cBlockHeader >> 3; + bpPtr->lastBlock = cBlockHeader & 1; + bpPtr->blockType = (blockType_e)(cBlockHeader >> 1 & 3); + bpPtr->origSize = cSize; + if (bpPtr->blockType == blockType_e.bt_rle) + return 1; + if (bpPtr->blockType == blockType_e.bt_reserved) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); } - { - uint cBlockHeader = MEM_readLE24(src); - uint cSize = cBlockHeader >> 3; - bpPtr->lastBlock = cBlockHeader & 1; - bpPtr->blockType = (blockType_e)(cBlockHeader >> 1 & 3); - bpPtr->origSize = cSize; - if (bpPtr->blockType == blockType_e.bt_rle) - return 1; - if (bpPtr->blockType == blockType_e.bt_reserved) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - } - - return cSize; - } + return cSize; } + } - /* Allocate buffer for literals, either overlapping current dst, or split between dst and litExtraBuffer, or stored entirely within litExtraBuffer */ - private static void ZSTD_allocateLiteralsBuffer( - ZSTD_DCtx_s* dctx, - void* dst, - nuint dstCapacity, - nuint litSize, - streaming_operation streaming, - nuint expectedWriteSize, - uint splitImmediately + /* Allocate buffer for literals, either overlapping current dst, or split between dst and litExtraBuffer, or stored entirely within litExtraBuffer */ + private static void ZSTD_allocateLiteralsBuffer( + ZSTD_DCtx_s* dctx, + void* dst, + nuint dstCapacity, + nuint litSize, + streaming_operation streaming, + nuint expectedWriteSize, + uint splitImmediately + ) + { + nuint blockSizeMax = ZSTD_blockSizeMax(dctx); + assert(litSize <= blockSizeMax); + assert( + dctx->isFrameDecompression != 0 || streaming == streaming_operation.not_streaming + ); + assert(expectedWriteSize <= blockSizeMax); + if ( + streaming == streaming_operation.not_streaming + && dstCapacity > blockSizeMax + 32 + litSize + 32 ) { - nuint blockSizeMax = ZSTD_blockSizeMax(dctx); - assert(litSize <= blockSizeMax); - assert( - dctx->isFrameDecompression != 0 || streaming == streaming_operation.not_streaming - ); - assert(expectedWriteSize <= blockSizeMax); - if ( - streaming == streaming_operation.not_streaming - && dstCapacity > blockSizeMax + 32 + litSize + 32 - ) - { - dctx->litBuffer = (byte*)dst + blockSizeMax + 32; - dctx->litBufferEnd = dctx->litBuffer + litSize; - dctx->litBufferLocation = ZSTD_litLocation_e.ZSTD_in_dst; - } - else if (litSize <= 1 << 16) + dctx->litBuffer = (byte*)dst + blockSizeMax + 32; + dctx->litBufferEnd = dctx->litBuffer + litSize; + dctx->litBufferLocation = ZSTD_litLocation_e.ZSTD_in_dst; + } + else if (litSize <= 1 << 16) + { + dctx->litBuffer = dctx->litExtraBuffer; + dctx->litBufferEnd = dctx->litBuffer + litSize; + dctx->litBufferLocation = ZSTD_litLocation_e.ZSTD_not_in_dst; + } + else + { + assert(blockSizeMax > 1 << 16); + if (splitImmediately != 0) { - dctx->litBuffer = dctx->litExtraBuffer; - dctx->litBufferEnd = dctx->litBuffer + litSize; - dctx->litBufferLocation = ZSTD_litLocation_e.ZSTD_not_in_dst; + dctx->litBuffer = (byte*)dst + expectedWriteSize - litSize + (1 << 16) - 32; + dctx->litBufferEnd = dctx->litBuffer + litSize - (1 << 16); } else { - assert(blockSizeMax > 1 << 16); - if (splitImmediately != 0) - { - dctx->litBuffer = (byte*)dst + expectedWriteSize - litSize + (1 << 16) - 32; - dctx->litBufferEnd = dctx->litBuffer + litSize - (1 << 16); - } - else - { - dctx->litBuffer = (byte*)dst + expectedWriteSize - litSize; - dctx->litBufferEnd = (byte*)dst + expectedWriteSize; - } + dctx->litBuffer = (byte*)dst + expectedWriteSize - litSize; + dctx->litBufferEnd = (byte*)dst + expectedWriteSize; + } - dctx->litBufferLocation = ZSTD_litLocation_e.ZSTD_split; - assert(dctx->litBufferEnd <= (byte*)dst + expectedWriteSize); - } - } - - /*! ZSTD_decodeLiteralsBlock() : - * Where it is possible to do so without being stomped by the output during decompression, the literals block will be stored - * in the dstBuffer. If there is room to do so, it will be stored in full in the excess dst space after where the current - * block will be output. Otherwise it will be stored at the end of the current dst blockspace, with a small portion being - * stored in dctx->litExtraBuffer to help keep it "ahead" of the current output write. - * - * @return : nb of bytes read from src (< srcSize ) - * note : symbol not declared but exposed for fullbench */ - private static nuint ZSTD_decodeLiteralsBlock( - ZSTD_DCtx_s* dctx, - void* src, - nuint srcSize, - void* dst, - nuint dstCapacity, - streaming_operation streaming - ) + dctx->litBufferLocation = ZSTD_litLocation_e.ZSTD_split; + assert(dctx->litBufferEnd <= (byte*)dst + expectedWriteSize); + } + } + + /*! ZSTD_decodeLiteralsBlock() : + * Where it is possible to do so without being stomped by the output during decompression, the literals block will be stored + * in the dstBuffer. If there is room to do so, it will be stored in full in the excess dst space after where the current + * block will be output. Otherwise it will be stored at the end of the current dst blockspace, with a small portion being + * stored in dctx->litExtraBuffer to help keep it "ahead" of the current output write. + * + * @return : nb of bytes read from src (< srcSize ) + * note : symbol not declared but exposed for fullbench */ + private static nuint ZSTD_decodeLiteralsBlock( + ZSTD_DCtx_s* dctx, + void* src, + nuint srcSize, + void* dst, + nuint dstCapacity, + streaming_operation streaming + ) + { + if (srcSize < 1 + 1) { - if (srcSize < 1 + 1) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + { + byte* istart = (byte*)src; + SymbolEncodingType_e litEncType = (SymbolEncodingType_e)(istart[0] & 3); + nuint blockSizeMax = ZSTD_blockSizeMax(dctx); + switch (litEncType) { - byte* istart = (byte*)src; - SymbolEncodingType_e litEncType = (SymbolEncodingType_e)(istart[0] & 3); - nuint blockSizeMax = ZSTD_blockSizeMax(dctx); - switch (litEncType) + case SymbolEncodingType_e.set_repeat: + if (dctx->litEntropy == 0) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted) + ); + } + + goto case SymbolEncodingType_e.set_compressed; + case SymbolEncodingType_e.set_compressed: + if (srcSize < 5) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); + } + { - case SymbolEncodingType_e.set_repeat: - if (dctx->litEntropy == 0) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted) - ); - } + nuint lhSize, + litSize, + litCSize; + uint singleStream = 0; + uint lhlCode = (uint)(istart[0] >> 2 & 3); + uint lhc = MEM_readLE32(istart); + nuint hufSuccess; + nuint expectedWriteSize = + blockSizeMax < dstCapacity ? blockSizeMax : dstCapacity; + int flags = + 0 + | ( + ZSTD_DCtx_get_bmi2(dctx) != 0 + ? (int)HUF_flags_e.HUF_flags_bmi2 + : 0 + ) + | ( + dctx->disableHufAsm != 0 + ? (int)HUF_flags_e.HUF_flags_disableAsm + : 0 + ); + switch (lhlCode) + { + case 0: + case 1: + default: + singleStream = lhlCode == 0 ? 1U : 0U; + lhSize = 3; + litSize = lhc >> 4 & 0x3FF; + litCSize = lhc >> 14 & 0x3FF; + break; + case 2: + lhSize = 4; + litSize = lhc >> 4 & 0x3FFF; + litCSize = lhc >> 18; + break; + case 3: + lhSize = 5; + litSize = lhc >> 4 & 0x3FFFF; + litCSize = (lhc >> 22) + ((nuint)istart[4] << 10); + break; + } + + if (litSize > 0 && dst == null) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) + ); + } + + if (litSize > blockSizeMax) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); + } - goto case SymbolEncodingType_e.set_compressed; - case SymbolEncodingType_e.set_compressed: - if (srcSize < 5) + if (singleStream == 0) + if (litSize < 6) { return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + (nuint)( + -(int)ZSTD_ErrorCode.ZSTD_error_literals_headerWrong + ) ); } - { - nuint lhSize, - litSize, - litCSize; - uint singleStream = 0; - uint lhlCode = (uint)(istart[0] >> 2 & 3); - uint lhc = MEM_readLE32(istart); - nuint hufSuccess; - nuint expectedWriteSize = - blockSizeMax < dstCapacity ? blockSizeMax : dstCapacity; - int flags = - 0 - | ( - ZSTD_DCtx_get_bmi2(dctx) != 0 - ? (int)HUF_flags_e.HUF_flags_bmi2 - : 0 - ) - | ( - dctx->disableHufAsm != 0 - ? (int)HUF_flags_e.HUF_flags_disableAsm - : 0 - ); - switch (lhlCode) - { - case 0: - case 1: - default: - singleStream = lhlCode == 0 ? 1U : 0U; - lhSize = 3; - litSize = lhc >> 4 & 0x3FF; - litCSize = lhc >> 14 & 0x3FF; - break; - case 2: - lhSize = 4; - litSize = lhc >> 4 & 0x3FFF; - litCSize = lhc >> 18; - break; - case 3: - lhSize = 5; - litSize = lhc >> 4 & 0x3FFFF; - litCSize = (lhc >> 22) + ((nuint)istart[4] << 10); - break; - } - - if (litSize > 0 && dst == null) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) - ); - } - - if (litSize > blockSizeMax) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); - } - - if (singleStream == 0) - if (litSize < 6) - { - return unchecked( - (nuint)( - -(int)ZSTD_ErrorCode.ZSTD_error_literals_headerWrong - ) - ); - } - - if (litCSize + lhSize > srcSize) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); - } + if (litCSize + lhSize > srcSize) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); + } - if (expectedWriteSize < litSize) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) - ); - } + if (expectedWriteSize < litSize) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) + ); + } - ZSTD_allocateLiteralsBuffer( - dctx, - dst, - dstCapacity, - litSize, - streaming, - expectedWriteSize, - 0 - ); - if (dctx->ddictIsCold != 0 && litSize > 768) - { - sbyte* _ptr = (sbyte*)dctx->HUFptr; - const nuint _size = sizeof(uint) * 4097; - nuint _pos; - for (_pos = 0; _pos < _size; _pos += 64) - { + ZSTD_allocateLiteralsBuffer( + dctx, + dst, + dstCapacity, + litSize, + streaming, + expectedWriteSize, + 0 + ); + if (dctx->ddictIsCold != 0 && litSize > 768) + { + sbyte* _ptr = (sbyte*)dctx->HUFptr; + const nuint _size = sizeof(uint) * 4097; + nuint _pos; + for (_pos = 0; _pos < _size; _pos += 64) + { #if NETCOREAPP3_0_OR_GREATER if (System.Runtime.Intrinsics.X86.Sse.IsSupported) { System.Runtime.Intrinsics.X86.Sse.Prefetch1(_ptr + _pos); } #endif - } - } - - if (litEncType == SymbolEncodingType_e.set_repeat) - { - if (singleStream != 0) - { - hufSuccess = HUF_decompress1X_usingDTable( - dctx->litBuffer, - litSize, - istart + lhSize, - litCSize, - dctx->HUFptr, - flags - ); - } - else - { - assert(litSize >= 6); - hufSuccess = HUF_decompress4X_usingDTable( - dctx->litBuffer, - litSize, - istart + lhSize, - litCSize, - dctx->HUFptr, - flags - ); - } - } - else - { - if (singleStream != 0) - { - hufSuccess = HUF_decompress1X1_DCtx_wksp( - dctx->entropy.hufTable, - dctx->litBuffer, - litSize, - istart + lhSize, - litCSize, - dctx->workspace, - sizeof(uint) * 640, - flags - ); - } - else - { - hufSuccess = HUF_decompress4X_hufOnly_wksp( - dctx->entropy.hufTable, - dctx->litBuffer, - litSize, - istart + lhSize, - litCSize, - dctx->workspace, - sizeof(uint) * 640, - flags - ); - } - } - - if (dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split) - { - assert(litSize > 1 << 16); - memcpy( - dctx->litExtraBuffer, - dctx->litBufferEnd - (1 << 16), - 1 << 16 - ); - memmove( - dctx->litBuffer + (1 << 16) - 32, - dctx->litBuffer, - litSize - (1 << 16) - ); - dctx->litBuffer += (1 << 16) - 32; - dctx->litBufferEnd -= 32; - assert(dctx->litBufferEnd <= (byte*)dst + blockSizeMax); - } - - if (ERR_isError(hufSuccess)) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); - } - - dctx->litPtr = dctx->litBuffer; - dctx->litSize = litSize; - dctx->litEntropy = 1; - if (litEncType == SymbolEncodingType_e.set_compressed) - dctx->HUFptr = dctx->entropy.hufTable; - return litCSize + lhSize; } + } - case SymbolEncodingType_e.set_basic: + if (litEncType == SymbolEncodingType_e.set_repeat) { - nuint litSize, - lhSize; - uint lhlCode = (uint)(istart[0] >> 2 & 3); - nuint expectedWriteSize = - blockSizeMax < dstCapacity ? blockSizeMax : dstCapacity; - switch (lhlCode) + if (singleStream != 0) { - case 0: - case 2: - default: - lhSize = 1; - litSize = (nuint)(istart[0] >> 3); - break; - case 1: - lhSize = 2; - litSize = (nuint)(MEM_readLE16(istart) >> 4); - break; - case 3: - lhSize = 3; - if (srcSize < 3) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); - } - - litSize = MEM_readLE24(istart) >> 4; - break; + hufSuccess = HUF_decompress1X_usingDTable( + dctx->litBuffer, + litSize, + istart + lhSize, + litCSize, + dctx->HUFptr, + flags + ); } - - if (litSize > 0 && dst == null) + else { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) + assert(litSize >= 6); + hufSuccess = HUF_decompress4X_usingDTable( + dctx->litBuffer, + litSize, + istart + lhSize, + litCSize, + dctx->HUFptr, + flags ); } - - if (litSize > blockSizeMax) + } + else + { + if (singleStream != 0) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + hufSuccess = HUF_decompress1X1_DCtx_wksp( + dctx->entropy.hufTable, + dctx->litBuffer, + litSize, + istart + lhSize, + litCSize, + dctx->workspace, + sizeof(uint) * 640, + flags ); } - - if (expectedWriteSize < litSize) + else { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) + hufSuccess = HUF_decompress4X_hufOnly_wksp( + dctx->entropy.hufTable, + dctx->litBuffer, + litSize, + istart + lhSize, + litCSize, + dctx->workspace, + sizeof(uint) * 640, + flags ); } + } - ZSTD_allocateLiteralsBuffer( - dctx, - dst, - dstCapacity, - litSize, - streaming, - expectedWriteSize, - 1 + if (dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split) + { + assert(litSize > 1 << 16); + memcpy( + dctx->litExtraBuffer, + dctx->litBufferEnd - (1 << 16), + 1 << 16 ); - if (lhSize + litSize + 32 > srcSize) - { - if (litSize + lhSize > srcSize) + memmove( + dctx->litBuffer + (1 << 16) - 32, + dctx->litBuffer, + litSize - (1 << 16) + ); + dctx->litBuffer += (1 << 16) - 32; + dctx->litBufferEnd -= 32; + assert(dctx->litBufferEnd <= (byte*)dst + blockSizeMax); + } + + if (ERR_isError(hufSuccess)) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); + } + + dctx->litPtr = dctx->litBuffer; + dctx->litSize = litSize; + dctx->litEntropy = 1; + if (litEncType == SymbolEncodingType_e.set_compressed) + dctx->HUFptr = dctx->entropy.hufTable; + return litCSize + lhSize; + } + + case SymbolEncodingType_e.set_basic: + { + nuint litSize, + lhSize; + uint lhlCode = (uint)(istart[0] >> 2 & 3); + nuint expectedWriteSize = + blockSizeMax < dstCapacity ? blockSizeMax : dstCapacity; + switch (lhlCode) + { + case 0: + case 2: + default: + lhSize = 1; + litSize = (nuint)(istart[0] >> 3); + break; + case 1: + lhSize = 2; + litSize = (nuint)(MEM_readLE16(istart) >> 4); + break; + case 3: + lhSize = 3; + if (srcSize < 3) { return unchecked( (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) ); } - if (dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split) - { - memcpy( - dctx->litBuffer, - istart + lhSize, - (uint)(litSize - (1 << 16)) - ); - memcpy( - dctx->litExtraBuffer, - istart + lhSize + litSize - (1 << 16), - 1 << 16 - ); - } - else - { - memcpy(dctx->litBuffer, istart + lhSize, (uint)litSize); - } - - dctx->litPtr = dctx->litBuffer; - dctx->litSize = litSize; - return lhSize + litSize; - } - - dctx->litPtr = istart + lhSize; - dctx->litSize = litSize; - dctx->litBufferEnd = dctx->litPtr + litSize; - dctx->litBufferLocation = ZSTD_litLocation_e.ZSTD_not_in_dst; - return lhSize + litSize; + litSize = MEM_readLE24(istart) >> 4; + break; } - case SymbolEncodingType_e.set_rle: + if (litSize > 0 && dst == null) { - uint lhlCode = (uint)(istart[0] >> 2 & 3); - nuint litSize, - lhSize; - nuint expectedWriteSize = - blockSizeMax < dstCapacity ? blockSizeMax : dstCapacity; - switch (lhlCode) - { - case 0: - case 2: - default: - lhSize = 1; - litSize = (nuint)(istart[0] >> 3); - break; - case 1: - lhSize = 2; - if (srcSize < 3) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); - } - - litSize = (nuint)(MEM_readLE16(istart) >> 4); - break; - case 3: - lhSize = 3; - if (srcSize < 4) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); - } + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) + ); + } - litSize = MEM_readLE24(istart) >> 4; - break; - } + if (litSize > blockSizeMax) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); + } - if (litSize > 0 && dst == null) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) - ); - } + if (expectedWriteSize < litSize) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) + ); + } - if (litSize > blockSizeMax) + ZSTD_allocateLiteralsBuffer( + dctx, + dst, + dstCapacity, + litSize, + streaming, + expectedWriteSize, + 1 + ); + if (lhSize + litSize + 32 > srcSize) + { + if (litSize + lhSize > srcSize) { return unchecked( (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) ); } - if (expectedWriteSize < litSize) + if (dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) + memcpy( + dctx->litBuffer, + istart + lhSize, + (uint)(litSize - (1 << 16)) + ); + memcpy( + dctx->litExtraBuffer, + istart + lhSize + litSize - (1 << 16), + 1 << 16 ); } - - ZSTD_allocateLiteralsBuffer( - dctx, - dst, - dstCapacity, - litSize, - streaming, - expectedWriteSize, - 1 - ); - if (dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split) - { - memset(dctx->litBuffer, istart[lhSize], (uint)(litSize - (1 << 16))); - memset(dctx->litExtraBuffer, istart[lhSize], 1 << 16); - } else { - memset(dctx->litBuffer, istart[lhSize], (uint)litSize); + memcpy(dctx->litBuffer, istart + lhSize, (uint)litSize); } dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; - return lhSize + 1; + return lhSize + litSize; + } + + dctx->litPtr = istart + lhSize; + dctx->litSize = litSize; + dctx->litBufferEnd = dctx->litPtr + litSize; + dctx->litBufferLocation = ZSTD_litLocation_e.ZSTD_not_in_dst; + return lhSize + litSize; + } + + case SymbolEncodingType_e.set_rle: + { + uint lhlCode = (uint)(istart[0] >> 2 & 3); + nuint litSize, + lhSize; + nuint expectedWriteSize = + blockSizeMax < dstCapacity ? blockSizeMax : dstCapacity; + switch (lhlCode) + { + case 0: + case 2: + default: + lhSize = 1; + litSize = (nuint)(istart[0] >> 3); + break; + case 1: + lhSize = 2; + if (srcSize < 3) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); + } + + litSize = (nuint)(MEM_readLE16(istart) >> 4); + break; + case 3: + lhSize = 3; + if (srcSize < 4) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); + } + + litSize = MEM_readLE24(istart) >> 4; + break; + } + + if (litSize > 0 && dst == null) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) + ); } - default: + if (litSize > blockSizeMax) + { return unchecked( (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) ); + } + + if (expectedWriteSize < litSize) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) + ); + } + + ZSTD_allocateLiteralsBuffer( + dctx, + dst, + dstCapacity, + litSize, + streaming, + expectedWriteSize, + 1 + ); + if (dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split) + { + memset(dctx->litBuffer, istart[lhSize], (uint)(litSize - (1 << 16))); + memset(dctx->litExtraBuffer, istart[lhSize], 1 << 16); + } + else + { + memset(dctx->litBuffer, istart[lhSize], (uint)litSize); + } + + dctx->litPtr = dctx->litBuffer; + dctx->litSize = litSize; + return lhSize + 1; } - } - } - /* Hidden declaration for fullbench */ - private static nuint ZSTD_decodeLiteralsBlock_wrapper( - ZSTD_DCtx_s* dctx, - void* src, - nuint srcSize, - void* dst, - nuint dstCapacity - ) - { - dctx->isFrameDecompression = 0; - return ZSTD_decodeLiteralsBlock( - dctx, - src, - srcSize, - dst, - dstCapacity, - streaming_operation.not_streaming - ); + default: + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); + } } + } - private static readonly ZSTD_seqSymbol* LL_defaultDTable = GetArrayPointer( - new ZSTD_seqSymbol[65] - { - new ZSTD_seqSymbol(nextState: 1, nbAdditionalBits: 1, nbBits: 1, baseValue: 6), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 4, baseValue: 0), - new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 0, nbBits: 4, baseValue: 0), - new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 1), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 3), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 4), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 6), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 7), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 9), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 10), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 12), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 14), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 5, baseValue: 16), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 5, baseValue: 20), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 5, baseValue: 22), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 2, nbBits: 5, baseValue: 28), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 3, nbBits: 5, baseValue: 32), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 4, nbBits: 5, baseValue: 48), - new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 6, nbBits: 5, baseValue: 64), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 7, nbBits: 5, baseValue: 128), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 8, nbBits: 6, baseValue: 256), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 10, nbBits: 6, baseValue: 1024), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 12, nbBits: 6, baseValue: 4096), - new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 4, baseValue: 0), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 4, baseValue: 1), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 2), - new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 4), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 5), - new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 7), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 8), - new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 10), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 11), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 13), - new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 1, nbBits: 5, baseValue: 16), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 5, baseValue: 18), - new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 1, nbBits: 5, baseValue: 22), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 2, nbBits: 5, baseValue: 24), - new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 3, nbBits: 5, baseValue: 32), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 3, nbBits: 5, baseValue: 40), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 6, nbBits: 4, baseValue: 64), - new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 6, nbBits: 4, baseValue: 64), - new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 7, nbBits: 5, baseValue: 128), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 9, nbBits: 6, baseValue: 512), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 11, nbBits: 6, baseValue: 2048), - new ZSTD_seqSymbol(nextState: 48, nbAdditionalBits: 0, nbBits: 4, baseValue: 0), - new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 0, nbBits: 4, baseValue: 1), - new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 2), - new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 3), - new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 5), - new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 6), - new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 8), - new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 9), - new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 11), - new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 12), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 15), - new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 1, nbBits: 5, baseValue: 18), - new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 1, nbBits: 5, baseValue: 20), - new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 2, nbBits: 5, baseValue: 24), - new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 2, nbBits: 5, baseValue: 28), - new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 3, nbBits: 5, baseValue: 40), - new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 4, nbBits: 5, baseValue: 48), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 16, nbBits: 6, baseValue: 65536), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 15, nbBits: 6, baseValue: 32768), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 14, nbBits: 6, baseValue: 16384), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 13, nbBits: 6, baseValue: 8192), - } - ); - private static readonly ZSTD_seqSymbol* OF_defaultDTable = GetArrayPointer( - new ZSTD_seqSymbol[33] - { - new ZSTD_seqSymbol(nextState: 1, nbAdditionalBits: 1, nbBits: 1, baseValue: 5), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 0), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 6, nbBits: 4, baseValue: 61), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 9, nbBits: 5, baseValue: 509), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 15, nbBits: 5, baseValue: 32765), - new ZSTD_seqSymbol( - nextState: 0, - nbAdditionalBits: 21, - nbBits: 5, - baseValue: 2097149 - ), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 3, nbBits: 5, baseValue: 5), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 7, nbBits: 4, baseValue: 125), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 12, nbBits: 5, baseValue: 4093), - new ZSTD_seqSymbol( - nextState: 0, - nbAdditionalBits: 18, - nbBits: 5, - baseValue: 262141 - ), - new ZSTD_seqSymbol( - nextState: 0, - nbAdditionalBits: 23, - nbBits: 5, - baseValue: 8388605 - ), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 5, nbBits: 5, baseValue: 29), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 8, nbBits: 4, baseValue: 253), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 14, nbBits: 5, baseValue: 16381), - new ZSTD_seqSymbol( - nextState: 0, - nbAdditionalBits: 20, - nbBits: 5, - baseValue: 1048573 - ), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 2, nbBits: 5, baseValue: 1), - new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 7, nbBits: 4, baseValue: 125), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 11, nbBits: 5, baseValue: 2045), - new ZSTD_seqSymbol( - nextState: 0, - nbAdditionalBits: 17, - nbBits: 5, - baseValue: 131069 - ), - new ZSTD_seqSymbol( - nextState: 0, - nbAdditionalBits: 22, - nbBits: 5, - baseValue: 4194301 - ), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 4, nbBits: 5, baseValue: 13), - new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 8, nbBits: 4, baseValue: 253), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 13, nbBits: 5, baseValue: 8189), - new ZSTD_seqSymbol( - nextState: 0, - nbAdditionalBits: 19, - nbBits: 5, - baseValue: 524285 - ), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 5, baseValue: 1), - new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 6, nbBits: 4, baseValue: 61), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 10, nbBits: 5, baseValue: 1021), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 16, nbBits: 5, baseValue: 65533), - new ZSTD_seqSymbol( - nextState: 0, - nbAdditionalBits: 28, - nbBits: 5, - baseValue: 268435453 - ), - new ZSTD_seqSymbol( - nextState: 0, - nbAdditionalBits: 27, - nbBits: 5, - baseValue: 134217725 - ), - new ZSTD_seqSymbol( - nextState: 0, - nbAdditionalBits: 26, - nbBits: 5, - baseValue: 67108861 - ), - new ZSTD_seqSymbol( - nextState: 0, - nbAdditionalBits: 25, - nbBits: 5, - baseValue: 33554429 - ), - new ZSTD_seqSymbol( - nextState: 0, - nbAdditionalBits: 24, - nbBits: 5, - baseValue: 16777213 - ), - } - ); - private static readonly ZSTD_seqSymbol* ML_defaultDTable = GetArrayPointer( - new ZSTD_seqSymbol[65] - { - new ZSTD_seqSymbol(nextState: 1, nbAdditionalBits: 1, nbBits: 1, baseValue: 6), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 3), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 4, baseValue: 4), - new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 5), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 6), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 8), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 9), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 11), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 13), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 16), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 19), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 22), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 25), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 28), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 31), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 34), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 6, baseValue: 37), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 6, baseValue: 41), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 2, nbBits: 6, baseValue: 47), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 3, nbBits: 6, baseValue: 59), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 4, nbBits: 6, baseValue: 83), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 7, nbBits: 6, baseValue: 131), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 9, nbBits: 6, baseValue: 515), - new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 0, nbBits: 4, baseValue: 4), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 4, baseValue: 5), - new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 6), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 7), - new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 9), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 10), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 12), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 15), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 18), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 21), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 24), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 27), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 30), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 33), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 6, baseValue: 35), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 6, baseValue: 39), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 2, nbBits: 6, baseValue: 43), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 3, nbBits: 6, baseValue: 51), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 4, nbBits: 6, baseValue: 67), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 5, nbBits: 6, baseValue: 99), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 8, nbBits: 6, baseValue: 259), - new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 4, baseValue: 4), - new ZSTD_seqSymbol(nextState: 48, nbAdditionalBits: 0, nbBits: 4, baseValue: 4), - new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 0, nbBits: 4, baseValue: 5), - new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 7), - new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 8), - new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 10), - new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 11), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 14), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 17), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 20), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 23), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 26), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 29), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 32), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 16, nbBits: 6, baseValue: 65539), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 15, nbBits: 6, baseValue: 32771), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 14, nbBits: 6, baseValue: 16387), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 13, nbBits: 6, baseValue: 8195), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 12, nbBits: 6, baseValue: 4099), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 11, nbBits: 6, baseValue: 2051), - new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 10, nbBits: 6, baseValue: 1027), - } + /* Hidden declaration for fullbench */ + private static nuint ZSTD_decodeLiteralsBlock_wrapper( + ZSTD_DCtx_s* dctx, + void* src, + nuint srcSize, + void* dst, + nuint dstCapacity + ) + { + dctx->isFrameDecompression = 0; + return ZSTD_decodeLiteralsBlock( + dctx, + src, + srcSize, + dst, + dstCapacity, + streaming_operation.not_streaming ); + } - private static void ZSTD_buildSeqTable_rle( - ZSTD_seqSymbol* dt, - uint baseValue, - byte nbAddBits - ) + private static readonly ZSTD_seqSymbol* LL_defaultDTable = GetArrayPointer( + new ZSTD_seqSymbol[65] { - void* ptr = dt; - ZSTD_seqSymbol_header* DTableH = (ZSTD_seqSymbol_header*)ptr; - ZSTD_seqSymbol* cell = dt + 1; - DTableH->tableLog = 0; - DTableH->fastMode = 0; - cell->nbBits = 0; - cell->nextState = 0; - assert(nbAddBits < 255); - cell->nbAdditionalBits = nbAddBits; - cell->baseValue = baseValue; - } - - /* ZSTD_buildFSETable() : - * generate FSE decoding table for one symbol (ll, ml or off) - * cannot fail if input is valid => - * all inputs are presumed validated at this stage */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_buildFSETable_body( - ZSTD_seqSymbol* dt, - short* normalizedCounter, - uint maxSymbolValue, - uint* baseValue, - byte* nbAdditionalBits, - uint tableLog, - void* wksp, - nuint wkspSize - ) + new ZSTD_seqSymbol(nextState: 1, nbAdditionalBits: 1, nbBits: 1, baseValue: 6), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 4, baseValue: 0), + new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 0, nbBits: 4, baseValue: 0), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 1), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 3), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 4), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 6), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 7), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 9), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 10), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 12), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 14), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 5, baseValue: 16), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 5, baseValue: 20), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 5, baseValue: 22), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 2, nbBits: 5, baseValue: 28), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 3, nbBits: 5, baseValue: 32), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 4, nbBits: 5, baseValue: 48), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 6, nbBits: 5, baseValue: 64), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 7, nbBits: 5, baseValue: 128), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 8, nbBits: 6, baseValue: 256), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 10, nbBits: 6, baseValue: 1024), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 12, nbBits: 6, baseValue: 4096), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 4, baseValue: 0), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 4, baseValue: 1), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 2), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 4), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 5), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 7), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 8), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 10), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 11), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 13), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 1, nbBits: 5, baseValue: 16), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 5, baseValue: 18), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 1, nbBits: 5, baseValue: 22), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 2, nbBits: 5, baseValue: 24), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 3, nbBits: 5, baseValue: 32), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 3, nbBits: 5, baseValue: 40), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 6, nbBits: 4, baseValue: 64), + new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 6, nbBits: 4, baseValue: 64), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 7, nbBits: 5, baseValue: 128), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 9, nbBits: 6, baseValue: 512), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 11, nbBits: 6, baseValue: 2048), + new ZSTD_seqSymbol(nextState: 48, nbAdditionalBits: 0, nbBits: 4, baseValue: 0), + new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 0, nbBits: 4, baseValue: 1), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 2), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 3), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 5), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 6), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 8), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 9), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 11), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 12), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 15), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 1, nbBits: 5, baseValue: 18), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 1, nbBits: 5, baseValue: 20), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 2, nbBits: 5, baseValue: 24), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 2, nbBits: 5, baseValue: 28), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 3, nbBits: 5, baseValue: 40), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 4, nbBits: 5, baseValue: 48), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 16, nbBits: 6, baseValue: 65536), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 15, nbBits: 6, baseValue: 32768), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 14, nbBits: 6, baseValue: 16384), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 13, nbBits: 6, baseValue: 8192), + } + ); + private static readonly ZSTD_seqSymbol* OF_defaultDTable = GetArrayPointer( + new ZSTD_seqSymbol[33] { - ZSTD_seqSymbol* tableDecode = dt + 1; - uint maxSV1 = maxSymbolValue + 1; - uint tableSize = (uint)(1 << (int)tableLog); - ushort* symbolNext = (ushort*)wksp; - byte* spread = (byte*)(symbolNext + 52 + 1); - uint highThreshold = tableSize - 1; - assert(maxSymbolValue <= 52); - assert(tableLog <= 9); - assert(wkspSize >= sizeof(short) * (52 + 1) + (1U << 9) + sizeof(ulong)); - { - ZSTD_seqSymbol_header DTableH; - DTableH.tableLog = tableLog; - DTableH.fastMode = 1; - { - short largeLimit = (short)(1 << (int)(tableLog - 1)); - uint s; - for (s = 0; s < maxSV1; s++) - { - if (normalizedCounter[s] == -1) - { - tableDecode[highThreshold--].baseValue = s; - symbolNext[s] = 1; - } - else - { - if (normalizedCounter[s] >= largeLimit) - DTableH.fastMode = 0; - assert(normalizedCounter[s] >= 0); - symbolNext[s] = (ushort)normalizedCounter[s]; - } - } - } + new ZSTD_seqSymbol(nextState: 1, nbAdditionalBits: 1, nbBits: 1, baseValue: 5), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 0), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 6, nbBits: 4, baseValue: 61), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 9, nbBits: 5, baseValue: 509), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 15, nbBits: 5, baseValue: 32765), + new ZSTD_seqSymbol( + nextState: 0, + nbAdditionalBits: 21, + nbBits: 5, + baseValue: 2097149 + ), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 3, nbBits: 5, baseValue: 5), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 7, nbBits: 4, baseValue: 125), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 12, nbBits: 5, baseValue: 4093), + new ZSTD_seqSymbol( + nextState: 0, + nbAdditionalBits: 18, + nbBits: 5, + baseValue: 262141 + ), + new ZSTD_seqSymbol( + nextState: 0, + nbAdditionalBits: 23, + nbBits: 5, + baseValue: 8388605 + ), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 5, nbBits: 5, baseValue: 29), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 8, nbBits: 4, baseValue: 253), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 14, nbBits: 5, baseValue: 16381), + new ZSTD_seqSymbol( + nextState: 0, + nbAdditionalBits: 20, + nbBits: 5, + baseValue: 1048573 + ), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 2, nbBits: 5, baseValue: 1), + new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 7, nbBits: 4, baseValue: 125), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 11, nbBits: 5, baseValue: 2045), + new ZSTD_seqSymbol( + nextState: 0, + nbAdditionalBits: 17, + nbBits: 5, + baseValue: 131069 + ), + new ZSTD_seqSymbol( + nextState: 0, + nbAdditionalBits: 22, + nbBits: 5, + baseValue: 4194301 + ), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 4, nbBits: 5, baseValue: 13), + new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 8, nbBits: 4, baseValue: 253), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 13, nbBits: 5, baseValue: 8189), + new ZSTD_seqSymbol( + nextState: 0, + nbAdditionalBits: 19, + nbBits: 5, + baseValue: 524285 + ), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 5, baseValue: 1), + new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 6, nbBits: 4, baseValue: 61), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 10, nbBits: 5, baseValue: 1021), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 16, nbBits: 5, baseValue: 65533), + new ZSTD_seqSymbol( + nextState: 0, + nbAdditionalBits: 28, + nbBits: 5, + baseValue: 268435453 + ), + new ZSTD_seqSymbol( + nextState: 0, + nbAdditionalBits: 27, + nbBits: 5, + baseValue: 134217725 + ), + new ZSTD_seqSymbol( + nextState: 0, + nbAdditionalBits: 26, + nbBits: 5, + baseValue: 67108861 + ), + new ZSTD_seqSymbol( + nextState: 0, + nbAdditionalBits: 25, + nbBits: 5, + baseValue: 33554429 + ), + new ZSTD_seqSymbol( + nextState: 0, + nbAdditionalBits: 24, + nbBits: 5, + baseValue: 16777213 + ), + } + ); + private static readonly ZSTD_seqSymbol* ML_defaultDTable = GetArrayPointer( + new ZSTD_seqSymbol[65] + { + new ZSTD_seqSymbol(nextState: 1, nbAdditionalBits: 1, nbBits: 1, baseValue: 6), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 3), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 4, baseValue: 4), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 5), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 6), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 8), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 9), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 11), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 13), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 16), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 19), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 22), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 25), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 28), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 31), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 34), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 6, baseValue: 37), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 6, baseValue: 41), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 2, nbBits: 6, baseValue: 47), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 3, nbBits: 6, baseValue: 59), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 4, nbBits: 6, baseValue: 83), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 7, nbBits: 6, baseValue: 131), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 9, nbBits: 6, baseValue: 515), + new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 0, nbBits: 4, baseValue: 4), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 4, baseValue: 5), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 6), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 7), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 9), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 5, baseValue: 10), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 12), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 15), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 18), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 21), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 24), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 27), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 30), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 33), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 6, baseValue: 35), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 6, baseValue: 39), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 2, nbBits: 6, baseValue: 43), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 3, nbBits: 6, baseValue: 51), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 4, nbBits: 6, baseValue: 67), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 5, nbBits: 6, baseValue: 99), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 8, nbBits: 6, baseValue: 259), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 4, baseValue: 4), + new ZSTD_seqSymbol(nextState: 48, nbAdditionalBits: 0, nbBits: 4, baseValue: 4), + new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 0, nbBits: 4, baseValue: 5), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 7), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 8), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 10), + new ZSTD_seqSymbol(nextState: 32, nbAdditionalBits: 0, nbBits: 5, baseValue: 11), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 14), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 17), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 20), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 23), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 26), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 29), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 0, nbBits: 6, baseValue: 32), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 16, nbBits: 6, baseValue: 65539), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 15, nbBits: 6, baseValue: 32771), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 14, nbBits: 6, baseValue: 16387), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 13, nbBits: 6, baseValue: 8195), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 12, nbBits: 6, baseValue: 4099), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 11, nbBits: 6, baseValue: 2051), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 10, nbBits: 6, baseValue: 1027), + } + ); - memcpy(dt, &DTableH, (uint)sizeof(ZSTD_seqSymbol_header)); - } + private static void ZSTD_buildSeqTable_rle( + ZSTD_seqSymbol* dt, + uint baseValue, + byte nbAddBits + ) + { + void* ptr = dt; + ZSTD_seqSymbol_header* DTableH = (ZSTD_seqSymbol_header*)ptr; + ZSTD_seqSymbol* cell = dt + 1; + DTableH->tableLog = 0; + DTableH->fastMode = 0; + cell->nbBits = 0; + cell->nextState = 0; + assert(nbAddBits < 255); + cell->nbAdditionalBits = nbAddBits; + cell->baseValue = baseValue; + } - assert(tableSize <= 512); - if (highThreshold == tableSize - 1) + /* ZSTD_buildFSETable() : + * generate FSE decoding table for one symbol (ll, ml or off) + * cannot fail if input is valid => + * all inputs are presumed validated at this stage */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_buildFSETable_body( + ZSTD_seqSymbol* dt, + short* normalizedCounter, + uint maxSymbolValue, + uint* baseValue, + byte* nbAdditionalBits, + uint tableLog, + void* wksp, + nuint wkspSize + ) + { + ZSTD_seqSymbol* tableDecode = dt + 1; + uint maxSV1 = maxSymbolValue + 1; + uint tableSize = (uint)(1 << (int)tableLog); + ushort* symbolNext = (ushort*)wksp; + byte* spread = (byte*)(symbolNext + 52 + 1); + uint highThreshold = tableSize - 1; + assert(maxSymbolValue <= 52); + assert(tableLog <= 9); + assert(wkspSize >= sizeof(short) * (52 + 1) + (1U << 9) + sizeof(ulong)); + { + ZSTD_seqSymbol_header DTableH; + DTableH.tableLog = tableLog; + DTableH.fastMode = 1; { - nuint tableMask = tableSize - 1; - nuint step = (tableSize >> 1) + (tableSize >> 3) + 3; + short largeLimit = (short)(1 << (int)(tableLog - 1)); + uint s; + for (s = 0; s < maxSV1; s++) { - const ulong add = 0x0101010101010101UL; - nuint pos = 0; - ulong sv = 0; - uint s; - for (s = 0; s < maxSV1; ++s, sv += add) + if (normalizedCounter[s] == -1) { - int i; - int n = normalizedCounter[s]; - MEM_write64(spread + pos, sv); - for (i = 8; i < n; i += 8) - { - MEM_write64(spread + pos + i, sv); - } - - assert(n >= 0); - pos += (nuint)n; + tableDecode[highThreshold--].baseValue = s; + symbolNext[s] = 1; } - } - - { - nuint position = 0; - nuint s; - const nuint unroll = 2; - assert(tableSize % unroll == 0); - for (s = 0; s < tableSize; s += unroll) + else { - nuint u; - for (u = 0; u < unroll; ++u) - { - nuint uPosition = position + u * step & tableMask; - tableDecode[uPosition].baseValue = spread[s + u]; - } - - position = position + unroll * step & tableMask; + if (normalizedCounter[s] >= largeLimit) + DTableH.fastMode = 0; + assert(normalizedCounter[s] >= 0); + symbolNext[s] = (ushort)normalizedCounter[s]; } - - assert(position == 0); } } - else + + memcpy(dt, &DTableH, (uint)sizeof(ZSTD_seqSymbol_header)); + } + + assert(tableSize <= 512); + if (highThreshold == tableSize - 1) + { + nuint tableMask = tableSize - 1; + nuint step = (tableSize >> 1) + (tableSize >> 3) + 3; { - uint tableMask = tableSize - 1; - uint step = (tableSize >> 1) + (tableSize >> 3) + 3; - uint s, - position = 0; - for (s = 0; s < maxSV1; s++) + const ulong add = 0x0101010101010101UL; + nuint pos = 0; + ulong sv = 0; + uint s; + for (s = 0; s < maxSV1; ++s, sv += add) { int i; int n = normalizedCounter[s]; - for (i = 0; i < n; i++) + MEM_write64(spread + pos, sv); + for (i = 8; i < n; i += 8) { - tableDecode[position].baseValue = s; - position = position + step & tableMask; - while (position > highThreshold) - position = position + step & tableMask; + MEM_write64(spread + pos + i, sv); } - } - assert(position == 0); + assert(n >= 0); + pos += (nuint)n; + } } { - uint u; - for (u = 0; u < tableSize; u++) + nuint position = 0; + nuint s; + const nuint unroll = 2; + assert(tableSize % unroll == 0); + for (s = 0; s < tableSize; s += unroll) { - uint symbol = tableDecode[u].baseValue; - uint nextState = symbolNext[symbol]++; - tableDecode[u].nbBits = (byte)(tableLog - ZSTD_highbit32(nextState)); - tableDecode[u].nextState = (ushort)( - (nextState << tableDecode[u].nbBits) - tableSize - ); - assert(nbAdditionalBits[symbol] < 255); - tableDecode[u].nbAdditionalBits = nbAdditionalBits[symbol]; - tableDecode[u].baseValue = baseValue[symbol]; + nuint u; + for (u = 0; u < unroll; ++u) + { + nuint uPosition = position + u * step & tableMask; + tableDecode[uPosition].baseValue = spread[s + u]; + } + + position = position + unroll * step & tableMask; } + + assert(position == 0); } } - - /* Avoids the FORCE_INLINE of the _body() function. */ - private static void ZSTD_buildFSETable_body_default( - ZSTD_seqSymbol* dt, - short* normalizedCounter, - uint maxSymbolValue, - uint* baseValue, - byte* nbAdditionalBits, - uint tableLog, - void* wksp, - nuint wkspSize - ) + else { - ZSTD_buildFSETable_body( - dt, - normalizedCounter, - maxSymbolValue, - baseValue, - nbAdditionalBits, - tableLog, - wksp, - wkspSize - ); + uint tableMask = tableSize - 1; + uint step = (tableSize >> 1) + (tableSize >> 3) + 3; + uint s, + position = 0; + for (s = 0; s < maxSV1; s++) + { + int i; + int n = normalizedCounter[s]; + for (i = 0; i < n; i++) + { + tableDecode[position].baseValue = s; + position = position + step & tableMask; + while (position > highThreshold) + position = position + step & tableMask; + } + } + + assert(position == 0); } - /* ZSTD_buildFSETable() : - * generate FSE decoding table for one symbol (ll, ml or off) - * this function must be called with valid parameters only - * (dt is large enough, normalizedCounter distribution total is a power of 2, max is within range, etc.) - * in which case it cannot fail. - * The workspace must be 4-byte aligned and at least ZSTD_BUILD_FSE_TABLE_WKSP_SIZE bytes, which is - * defined in zstd_decompress_internal.h. - * Internal use only. - */ - private static void ZSTD_buildFSETable( - ZSTD_seqSymbol* dt, - short* normalizedCounter, - uint maxSymbolValue, - uint* baseValue, - byte* nbAdditionalBits, - uint tableLog, - void* wksp, - nuint wkspSize, - int bmi2 - ) { - ZSTD_buildFSETable_body_default( - dt, - normalizedCounter, - maxSymbolValue, - baseValue, - nbAdditionalBits, - tableLog, - wksp, - wkspSize - ); + uint u; + for (u = 0; u < tableSize; u++) + { + uint symbol = tableDecode[u].baseValue; + uint nextState = symbolNext[symbol]++; + tableDecode[u].nbBits = (byte)(tableLog - ZSTD_highbit32(nextState)); + tableDecode[u].nextState = (ushort)( + (nextState << tableDecode[u].nbBits) - tableSize + ); + assert(nbAdditionalBits[symbol] < 255); + tableDecode[u].nbAdditionalBits = nbAdditionalBits[symbol]; + tableDecode[u].baseValue = baseValue[symbol]; + } } + } - /*! ZSTD_buildSeqTable() : - * @return : nb bytes read from src, - * or an error code if it fails */ - private static nuint ZSTD_buildSeqTable( - ZSTD_seqSymbol* DTableSpace, - ZSTD_seqSymbol** DTablePtr, - SymbolEncodingType_e type, - uint max, - uint maxLog, - void* src, - nuint srcSize, - uint* baseValue, - byte* nbAdditionalBits, - ZSTD_seqSymbol* defaultTable, - uint flagRepeatTable, - int ddictIsCold, - int nbSeq, - uint* wksp, - nuint wkspSize, - int bmi2 - ) + /* Avoids the FORCE_INLINE of the _body() function. */ + private static void ZSTD_buildFSETable_body_default( + ZSTD_seqSymbol* dt, + short* normalizedCounter, + uint maxSymbolValue, + uint* baseValue, + byte* nbAdditionalBits, + uint tableLog, + void* wksp, + nuint wkspSize + ) + { + ZSTD_buildFSETable_body( + dt, + normalizedCounter, + maxSymbolValue, + baseValue, + nbAdditionalBits, + tableLog, + wksp, + wkspSize + ); + } + + /* ZSTD_buildFSETable() : + * generate FSE decoding table for one symbol (ll, ml or off) + * this function must be called with valid parameters only + * (dt is large enough, normalizedCounter distribution total is a power of 2, max is within range, etc.) + * in which case it cannot fail. + * The workspace must be 4-byte aligned and at least ZSTD_BUILD_FSE_TABLE_WKSP_SIZE bytes, which is + * defined in zstd_decompress_internal.h. + * Internal use only. + */ + private static void ZSTD_buildFSETable( + ZSTD_seqSymbol* dt, + short* normalizedCounter, + uint maxSymbolValue, + uint* baseValue, + byte* nbAdditionalBits, + uint tableLog, + void* wksp, + nuint wkspSize, + int bmi2 + ) + { + ZSTD_buildFSETable_body_default( + dt, + normalizedCounter, + maxSymbolValue, + baseValue, + nbAdditionalBits, + tableLog, + wksp, + wkspSize + ); + } + + /*! ZSTD_buildSeqTable() : + * @return : nb bytes read from src, + * or an error code if it fails */ + private static nuint ZSTD_buildSeqTable( + ZSTD_seqSymbol* DTableSpace, + ZSTD_seqSymbol** DTablePtr, + SymbolEncodingType_e type, + uint max, + uint maxLog, + void* src, + nuint srcSize, + uint* baseValue, + byte* nbAdditionalBits, + ZSTD_seqSymbol* defaultTable, + uint flagRepeatTable, + int ddictIsCold, + int nbSeq, + uint* wksp, + nuint wkspSize, + int bmi2 + ) + { + switch (type) { - switch (type) - { - case SymbolEncodingType_e.set_rle: - if (srcSize == 0) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); - } + case SymbolEncodingType_e.set_rle: + if (srcSize == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } - if (*(byte*)src > max) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); - } + if (*(byte*)src > max) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); + } - { - uint symbol = *(byte*)src; - uint baseline = baseValue[symbol]; - byte nbBits = nbAdditionalBits[symbol]; - ZSTD_buildSeqTable_rle(DTableSpace, baseline, nbBits); - } + { + uint symbol = *(byte*)src; + uint baseline = baseValue[symbol]; + byte nbBits = nbAdditionalBits[symbol]; + ZSTD_buildSeqTable_rle(DTableSpace, baseline, nbBits); + } - *DTablePtr = DTableSpace; - return 1; - case SymbolEncodingType_e.set_basic: - *DTablePtr = defaultTable; - return 0; - case SymbolEncodingType_e.set_repeat: - if (flagRepeatTable == 0) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); - } + *DTablePtr = DTableSpace; + return 1; + case SymbolEncodingType_e.set_basic: + *DTablePtr = defaultTable; + return 0; + case SymbolEncodingType_e.set_repeat: + if (flagRepeatTable == 0) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); + } - if (ddictIsCold != 0 && nbSeq > 24) + if (ddictIsCold != 0 && nbSeq > 24) + { + void* pStart = *DTablePtr; + nuint pSize = (nuint)(sizeof(ZSTD_seqSymbol) * (1 + (1 << (int)maxLog))); { - void* pStart = *DTablePtr; - nuint pSize = (nuint)(sizeof(ZSTD_seqSymbol) * (1 + (1 << (int)maxLog))); + sbyte* _ptr = (sbyte*)pStart; + nuint _size = pSize; + nuint _pos; + for (_pos = 0; _pos < _size; _pos += 64) { - sbyte* _ptr = (sbyte*)pStart; - nuint _size = pSize; - nuint _pos; - for (_pos = 0; _pos < _size; _pos += 64) - { #if NETCOREAPP3_0_OR_GREATER if (System.Runtime.Intrinsics.X86.Sse.IsSupported) { System.Runtime.Intrinsics.X86.Sse.Prefetch1(_ptr + _pos); } #endif - } } } + } - return 0; - case SymbolEncodingType_e.set_compressed: + return 0; + case SymbolEncodingType_e.set_compressed: + { + uint tableLog; + short* norm = stackalloc short[53]; + nuint headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize); + if (ERR_isError(headerSize)) { - uint tableLog; - short* norm = stackalloc short[53]; - nuint headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize); - if (ERR_isError(headerSize)) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); - } - - if (tableLog > maxLog) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); - } + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); + } - ZSTD_buildFSETable( - DTableSpace, - norm, - max, - baseValue, - nbAdditionalBits, - tableLog, - wksp, - wkspSize, - bmi2 + if (tableLog > maxLog) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) ); - *DTablePtr = DTableSpace; - return headerSize; } - default: - assert(0 != 0); - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); + ZSTD_buildFSETable( + DTableSpace, + norm, + max, + baseValue, + nbAdditionalBits, + tableLog, + wksp, + wkspSize, + bmi2 + ); + *DTablePtr = DTableSpace; + return headerSize; } + + default: + assert(0 != 0); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC)); } + } - /*! ZSTD_decodeSeqHeaders() : - * decode sequence header from src */ - /* Used by: zstd_decompress_block, fullbench */ - private static nuint ZSTD_decodeSeqHeaders( - ZSTD_DCtx_s* dctx, - int* nbSeqPtr, - void* src, - nuint srcSize - ) + /*! ZSTD_decodeSeqHeaders() : + * decode sequence header from src */ + /* Used by: zstd_decompress_block, fullbench */ + private static nuint ZSTD_decodeSeqHeaders( + ZSTD_DCtx_s* dctx, + int* nbSeqPtr, + void* src, + nuint srcSize + ) + { + byte* istart = (byte*)src; + byte* iend = istart + srcSize; + byte* ip = istart; + int nbSeq; + if (srcSize < 1) { - byte* istart = (byte*)src; - byte* iend = istart + srcSize; - byte* ip = istart; - int nbSeq; - if (srcSize < 1) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } - nbSeq = *ip++; - if (nbSeq > 0x7F) + nbSeq = *ip++; + if (nbSeq > 0x7F) + { + if (nbSeq == 0xFF) { - if (nbSeq == 0xFF) + if (ip + 2 > iend) { - if (ip + 2 > iend) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); - } - - nbSeq = MEM_readLE16(ip) + 0x7F00; - ip += 2; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); } - else - { - if (ip >= iend) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); - } - nbSeq = (nbSeq - 0x80 << 8) + *ip++; - } + nbSeq = MEM_readLE16(ip) + 0x7F00; + ip += 2; } - - *nbSeqPtr = nbSeq; - if (nbSeq == 0) + else { - if (ip != iend) + if (ip >= iend) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); } - return (nuint)(ip - istart); - } - - if (ip + 1 > iend) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + nbSeq = (nbSeq - 0x80 << 8) + *ip++; } + } - if ((*ip & 3) != 0) + *nbSeqPtr = nbSeq; + if (nbSeq == 0) + { + if (ip != iend) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); } - { - SymbolEncodingType_e LLtype = (SymbolEncodingType_e)(*ip >> 6); - SymbolEncodingType_e OFtype = (SymbolEncodingType_e)(*ip >> 4 & 3); - SymbolEncodingType_e MLtype = (SymbolEncodingType_e)(*ip >> 2 & 3); - ip++; + return (nuint)(ip - istart); + } + + if (ip + 1 > iend) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + } + + if ((*ip & 3) != 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + { + SymbolEncodingType_e LLtype = (SymbolEncodingType_e)(*ip >> 6); + SymbolEncodingType_e OFtype = (SymbolEncodingType_e)(*ip >> 4 & 3); + SymbolEncodingType_e MLtype = (SymbolEncodingType_e)(*ip >> 2 & 3); + ip++; + { + nuint llhSize = ZSTD_buildSeqTable( + &dctx->entropy.LLTable.e0, + &dctx->LLTptr, + LLtype, + 35, + 9, + ip, + (nuint)(iend - ip), + LL_base, + LL_bits, + LL_defaultDTable, + dctx->fseEntropy, + dctx->ddictIsCold, + nbSeq, + dctx->workspace, + sizeof(uint) * 640, + ZSTD_DCtx_get_bmi2(dctx) + ); + if (ERR_isError(llhSize)) { - nuint llhSize = ZSTD_buildSeqTable( - &dctx->entropy.LLTable.e0, - &dctx->LLTptr, - LLtype, - 35, - 9, - ip, - (nuint)(iend - ip), - LL_base, - LL_bits, - LL_defaultDTable, - dctx->fseEntropy, - dctx->ddictIsCold, - nbSeq, - dctx->workspace, - sizeof(uint) * 640, - ZSTD_DCtx_get_bmi2(dctx) + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) ); - if (ERR_isError(llhSize)) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); - } - - ip += llhSize; } + ip += llhSize; + } + + { + nuint ofhSize = ZSTD_buildSeqTable( + &dctx->entropy.OFTable.e0, + &dctx->OFTptr, + OFtype, + 31, + 8, + ip, + (nuint)(iend - ip), + OF_base, + OF_bits, + OF_defaultDTable, + dctx->fseEntropy, + dctx->ddictIsCold, + nbSeq, + dctx->workspace, + sizeof(uint) * 640, + ZSTD_DCtx_get_bmi2(dctx) + ); + if (ERR_isError(ofhSize)) { - nuint ofhSize = ZSTD_buildSeqTable( - &dctx->entropy.OFTable.e0, - &dctx->OFTptr, - OFtype, - 31, - 8, - ip, - (nuint)(iend - ip), - OF_base, - OF_bits, - OF_defaultDTable, - dctx->fseEntropy, - dctx->ddictIsCold, - nbSeq, - dctx->workspace, - sizeof(uint) * 640, - ZSTD_DCtx_get_bmi2(dctx) + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) ); - if (ERR_isError(ofhSize)) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); - } - - ip += ofhSize; } + ip += ofhSize; + } + + { + nuint mlhSize = ZSTD_buildSeqTable( + &dctx->entropy.MLTable.e0, + &dctx->MLTptr, + MLtype, + 52, + 9, + ip, + (nuint)(iend - ip), + ML_base, + ML_bits, + ML_defaultDTable, + dctx->fseEntropy, + dctx->ddictIsCold, + nbSeq, + dctx->workspace, + sizeof(uint) * 640, + ZSTD_DCtx_get_bmi2(dctx) + ); + if (ERR_isError(mlhSize)) { - nuint mlhSize = ZSTD_buildSeqTable( - &dctx->entropy.MLTable.e0, - &dctx->MLTptr, - MLtype, - 52, - 9, - ip, - (nuint)(iend - ip), - ML_base, - ML_bits, - ML_defaultDTable, - dctx->fseEntropy, - dctx->ddictIsCold, - nbSeq, - dctx->workspace, - sizeof(uint) * 640, - ZSTD_DCtx_get_bmi2(dctx) + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) ); - if (ERR_isError(mlhSize)) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); - } - - ip += mlhSize; } - } - return (nuint)(ip - istart); + ip += mlhSize; + } } + return (nuint)(ip - istart); + } + #if NET7_0_OR_GREATER private static ReadOnlySpan Span_dec32table => new uint[8] { 0, 1, 2, 1, 4, 4, 4, 4 }; private static uint* dec32table => @@ -1294,9 +1294,9 @@ ref MemoryMarshal.GetReference(Span_dec32table) ); #else - private static readonly uint* dec32table = GetArrayPointer( - new uint[8] { 0, 1, 2, 1, 4, 4, 4, 4 } - ); + private static readonly uint* dec32table = GetArrayPointer( + new uint[8] { 0, 1, 2, 1, 4, 4, 4, 4 } + ); #endif #if NET7_0_OR_GREATER private static ReadOnlySpan Span_dec64table => new int[8] { 8, 8, 8, 7, 8, 9, 10, 11 }; @@ -1307,784 +1307,756 @@ ref MemoryMarshal.GetReference(Span_dec64table) ); #else - private static readonly int* dec64table = GetArrayPointer( - new int[8] { 8, 8, 8, 7, 8, 9, 10, 11 } - ); + private static readonly int* dec64table = GetArrayPointer( + new int[8] { 8, 8, 8, 7, 8, 9, 10, 11 } + ); #endif - /*! ZSTD_overlapCopy8() : - * Copies 8 bytes from ip to op and updates op and ip where ip <= op. - * If the offset is < 8 then the offset is spread to at least 8 bytes. - * - * Precondition: *ip <= *op - * Postcondition: *op - *op >= 8 - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_overlapCopy8(byte** op, byte** ip, nuint offset) - { - assert(*ip <= *op); - if (offset < 8) - { - int sub2 = dec64table[offset]; - (*op)[0] = (*ip)[0]; - (*op)[1] = (*ip)[1]; - (*op)[2] = (*ip)[2]; - (*op)[3] = (*ip)[3]; - *ip += dec32table[offset]; - ZSTD_copy4(*op + 4, *ip); - *ip -= sub2; - } - else - { - ZSTD_copy8(*op, *ip); - } - - *ip += 8; - *op += 8; - assert(*op - *ip >= 8); - } - - /*! ZSTD_safecopy() : - * Specialized version of memcpy() that is allowed to READ up to WILDCOPY_OVERLENGTH past the input buffer - * and write up to 16 bytes past oend_w (op >= oend_w is allowed). - * This function is only called in the uncommon case where the sequence is near the end of the block. It - * should be fast for a single long sequence, but can be slow for several short sequences. - * - * @param ovtype controls the overlap detection - * - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart. - * - ZSTD_overlap_src_before_dst: The src and dst may overlap and may be any distance apart. - * The src buffer must be before the dst buffer. - */ - private static void ZSTD_safecopy( - byte* op, - byte* oend_w, - byte* ip, - nint length, - ZSTD_overlap_e ovtype - ) + /*! ZSTD_overlapCopy8() : + * Copies 8 bytes from ip to op and updates op and ip where ip <= op. + * If the offset is < 8 then the offset is spread to at least 8 bytes. + * + * Precondition: *ip <= *op + * Postcondition: *op - *op >= 8 + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_overlapCopy8(byte** op, byte** ip, nuint offset) + { + assert(*ip <= *op); + if (offset < 8) { - nint diff = (nint)(op - ip); - byte* oend = op + length; - assert( - ovtype == ZSTD_overlap_e.ZSTD_no_overlap - && (diff <= -8 || diff >= 8 || op >= oend_w) - || ovtype == ZSTD_overlap_e.ZSTD_overlap_src_before_dst && diff >= 0 - ); - if (length < 8) - { - while (op < oend) - *op++ = *ip++; - return; - } - - if (ovtype == ZSTD_overlap_e.ZSTD_overlap_src_before_dst) - { - assert(length >= 8); - ZSTD_overlapCopy8(&op, &ip, (nuint)diff); - length -= 8; - assert(op - ip >= 8); - assert(op <= oend); - } - - if (oend <= oend_w) - { - ZSTD_wildcopy(op, ip, length, ovtype); - return; - } + int sub2 = dec64table[offset]; + (*op)[0] = (*ip)[0]; + (*op)[1] = (*ip)[1]; + (*op)[2] = (*ip)[2]; + (*op)[3] = (*ip)[3]; + *ip += dec32table[offset]; + ZSTD_copy4(*op + 4, *ip); + *ip -= sub2; + } + else + { + ZSTD_copy8(*op, *ip); + } - if (op <= oend_w) - { - assert(oend > oend_w); - ZSTD_wildcopy(op, ip, (nint)(oend_w - op), ovtype); - ip += oend_w - op; - op += oend_w - op; - } + *ip += 8; + *op += 8; + assert(*op - *ip >= 8); + } + /*! ZSTD_safecopy() : + * Specialized version of memcpy() that is allowed to READ up to WILDCOPY_OVERLENGTH past the input buffer + * and write up to 16 bytes past oend_w (op >= oend_w is allowed). + * This function is only called in the uncommon case where the sequence is near the end of the block. It + * should be fast for a single long sequence, but can be slow for several short sequences. + * + * @param ovtype controls the overlap detection + * - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart. + * - ZSTD_overlap_src_before_dst: The src and dst may overlap and may be any distance apart. + * The src buffer must be before the dst buffer. + */ + private static void ZSTD_safecopy( + byte* op, + byte* oend_w, + byte* ip, + nint length, + ZSTD_overlap_e ovtype + ) + { + nint diff = (nint)(op - ip); + byte* oend = op + length; + assert( + ovtype == ZSTD_overlap_e.ZSTD_no_overlap + && (diff <= -8 || diff >= 8 || op >= oend_w) + || ovtype == ZSTD_overlap_e.ZSTD_overlap_src_before_dst && diff >= 0 + ); + if (length < 8) + { while (op < oend) *op++ = *ip++; + return; } - /* ZSTD_safecopyDstBeforeSrc(): - * This version allows overlap with dst before src, or handles the non-overlap case with dst after src - * Kept separate from more common ZSTD_safecopy case to avoid performance impact to the safecopy common case */ - private static void ZSTD_safecopyDstBeforeSrc(byte* op, byte* ip, nint length) + if (ovtype == ZSTD_overlap_e.ZSTD_overlap_src_before_dst) { - nint diff = (nint)(op - ip); - byte* oend = op + length; - if (length < 8 || diff > -8) - { - while (op < oend) - *op++ = *ip++; - return; - } + assert(length >= 8); + ZSTD_overlapCopy8(&op, &ip, (nuint)diff); + length -= 8; + assert(op - ip >= 8); + assert(op <= oend); + } - if (op <= oend - 32 && diff < -16) - { - ZSTD_wildcopy(op, ip, (nint)(oend - 32 - op), ZSTD_overlap_e.ZSTD_no_overlap); - ip += oend - 32 - op; - op += oend - 32 - op; - } + if (oend <= oend_w) + { + ZSTD_wildcopy(op, ip, length, ovtype); + return; + } + + if (op <= oend_w) + { + assert(oend > oend_w); + ZSTD_wildcopy(op, ip, (nint)(oend_w - op), ovtype); + ip += oend_w - op; + op += oend_w - op; + } + while (op < oend) + *op++ = *ip++; + } + + /* ZSTD_safecopyDstBeforeSrc(): + * This version allows overlap with dst before src, or handles the non-overlap case with dst after src + * Kept separate from more common ZSTD_safecopy case to avoid performance impact to the safecopy common case */ + private static void ZSTD_safecopyDstBeforeSrc(byte* op, byte* ip, nint length) + { + nint diff = (nint)(op - ip); + byte* oend = op + length; + if (length < 8 || diff > -8) + { while (op < oend) *op++ = *ip++; + return; } - /* ZSTD_execSequenceEnd(): - * This version handles cases that are near the end of the output buffer. It requires - * more careful checks to make sure there is no overflow. By separating out these hard - * and unlikely cases, we can speed up the common cases. - * - * NOTE: This function needs to be fast for a single long sequence, but doesn't need - * to be optimized for many small sequences, since those fall into ZSTD_execSequence(). - */ - private static nuint ZSTD_execSequenceEnd( - byte* op, - byte* oend, - seq_t sequence, - byte** litPtr, - byte* litLimit, - byte* prefixStart, - byte* virtualStart, - byte* dictEnd - ) + if (op <= oend - 32 && diff < -16) + { + ZSTD_wildcopy(op, ip, (nint)(oend - 32 - op), ZSTD_overlap_e.ZSTD_no_overlap); + ip += oend - 32 - op; + op += oend - 32 - op; + } + + while (op < oend) + *op++ = *ip++; + } + + /* ZSTD_execSequenceEnd(): + * This version handles cases that are near the end of the output buffer. It requires + * more careful checks to make sure there is no overflow. By separating out these hard + * and unlikely cases, we can speed up the common cases. + * + * NOTE: This function needs to be fast for a single long sequence, but doesn't need + * to be optimized for many small sequences, since those fall into ZSTD_execSequence(). + */ + private static nuint ZSTD_execSequenceEnd( + byte* op, + byte* oend, + seq_t sequence, + byte** litPtr, + byte* litLimit, + byte* prefixStart, + byte* virtualStart, + byte* dictEnd + ) + { + byte* oLitEnd = op + sequence.litLength; + nuint sequenceLength = sequence.litLength + sequence.matchLength; + byte* iLitEnd = *litPtr + sequence.litLength; + byte* match = oLitEnd - sequence.offset; + byte* oend_w = oend - 32; + if (sequenceLength > (nuint)(oend - op)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + if (sequence.litLength > (nuint)(litLimit - *litPtr)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + assert(op < op + sequenceLength); + assert(oLitEnd < op + sequenceLength); + ZSTD_safecopy( + op, + oend_w, + *litPtr, + (nint)sequence.litLength, + ZSTD_overlap_e.ZSTD_no_overlap + ); + op = oLitEnd; + *litPtr = iLitEnd; + if (sequence.offset > (nuint)(oLitEnd - prefixStart)) { - byte* oLitEnd = op + sequence.litLength; - nuint sequenceLength = sequence.litLength + sequence.matchLength; - byte* iLitEnd = *litPtr + sequence.litLength; - byte* match = oLitEnd - sequence.offset; - byte* oend_w = oend - 32; - if (sequenceLength > (nuint)(oend - op)) + if (sequence.offset > (nuint)(oLitEnd - virtualStart)) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); } - if (sequence.litLength > (nuint)(litLimit - *litPtr)) + match = dictEnd - (prefixStart - match); + if (match + sequence.matchLength <= dictEnd) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + memmove(oLitEnd, match, sequence.matchLength); + return sequenceLength; } - assert(op < op + sequenceLength); - assert(oLitEnd < op + sequenceLength); - ZSTD_safecopy( - op, - oend_w, - *litPtr, - (nint)sequence.litLength, - ZSTD_overlap_e.ZSTD_no_overlap - ); - op = oLitEnd; - *litPtr = iLitEnd; - if (sequence.offset > (nuint)(oLitEnd - prefixStart)) { - if (sequence.offset > (nuint)(oLitEnd - virtualStart)) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - } + nuint length1 = (nuint)(dictEnd - match); + memmove(oLitEnd, match, length1); + op = oLitEnd + length1; + sequence.matchLength -= length1; + match = prefixStart; + } + } - match = dictEnd - (prefixStart - match); - if (match + sequence.matchLength <= dictEnd) - { - memmove(oLitEnd, match, sequence.matchLength); - return sequenceLength; - } + ZSTD_safecopy( + op, + oend_w, + match, + (nint)sequence.matchLength, + ZSTD_overlap_e.ZSTD_overlap_src_before_dst + ); + return sequenceLength; + } - { - nuint length1 = (nuint)(dictEnd - match); - memmove(oLitEnd, match, length1); - op = oLitEnd + length1; - sequence.matchLength -= length1; - match = prefixStart; - } - } + /* ZSTD_execSequenceEndSplitLitBuffer(): + * This version is intended to be used during instances where the litBuffer is still split. It is kept separate to avoid performance impact for the good case. + */ + private static nuint ZSTD_execSequenceEndSplitLitBuffer( + byte* op, + byte* oend, + byte* oend_w, + seq_t sequence, + byte** litPtr, + byte* litLimit, + byte* prefixStart, + byte* virtualStart, + byte* dictEnd + ) + { + byte* oLitEnd = op + sequence.litLength; + nuint sequenceLength = sequence.litLength + sequence.matchLength; + byte* iLitEnd = *litPtr + sequence.litLength; + byte* match = oLitEnd - sequence.offset; + if (sequenceLength > (nuint)(oend - op)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } - ZSTD_safecopy( - op, - oend_w, - match, - (nint)sequence.matchLength, - ZSTD_overlap_e.ZSTD_overlap_src_before_dst - ); - return sequenceLength; + if (sequence.litLength > (nuint)(litLimit - *litPtr)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); } - /* ZSTD_execSequenceEndSplitLitBuffer(): - * This version is intended to be used during instances where the litBuffer is still split. It is kept separate to avoid performance impact for the good case. - */ - private static nuint ZSTD_execSequenceEndSplitLitBuffer( - byte* op, - byte* oend, - byte* oend_w, - seq_t sequence, - byte** litPtr, - byte* litLimit, - byte* prefixStart, - byte* virtualStart, - byte* dictEnd - ) + assert(op < op + sequenceLength); + assert(oLitEnd < op + sequenceLength); + if (op > *litPtr && op < *litPtr + sequence.litLength) { - byte* oLitEnd = op + sequence.litLength; - nuint sequenceLength = sequence.litLength + sequence.matchLength; - byte* iLitEnd = *litPtr + sequence.litLength; - byte* match = oLitEnd - sequence.offset; - if (sequenceLength > (nuint)(oend - op)) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } - if (sequence.litLength > (nuint)(litLimit - *litPtr)) + ZSTD_safecopyDstBeforeSrc(op, *litPtr, (nint)sequence.litLength); + op = oLitEnd; + *litPtr = iLitEnd; + if (sequence.offset > (nuint)(oLitEnd - prefixStart)) + { + if (sequence.offset > (nuint)(oLitEnd - virtualStart)) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); } - assert(op < op + sequenceLength); - assert(oLitEnd < op + sequenceLength); - if (op > *litPtr && op < *litPtr + sequence.litLength) + match = dictEnd - (prefixStart - match); + if (match + sequence.matchLength <= dictEnd) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + memmove(oLitEnd, match, sequence.matchLength); + return sequenceLength; } - ZSTD_safecopyDstBeforeSrc(op, *litPtr, (nint)sequence.litLength); - op = oLitEnd; - *litPtr = iLitEnd; - if (sequence.offset > (nuint)(oLitEnd - prefixStart)) { - if (sequence.offset > (nuint)(oLitEnd - virtualStart)) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - } - - match = dictEnd - (prefixStart - match); - if (match + sequence.matchLength <= dictEnd) - { - memmove(oLitEnd, match, sequence.matchLength); - return sequenceLength; - } - - { - nuint length1 = (nuint)(dictEnd - match); - memmove(oLitEnd, match, length1); - op = oLitEnd + length1; - sequence.matchLength -= length1; - match = prefixStart; - } + nuint length1 = (nuint)(dictEnd - match); + memmove(oLitEnd, match, length1); + op = oLitEnd + length1; + sequence.matchLength -= length1; + match = prefixStart; } + } + + ZSTD_safecopy( + op, + oend_w, + match, + (nint)sequence.matchLength, + ZSTD_overlap_e.ZSTD_overlap_src_before_dst + ); + return sequenceLength; + } - ZSTD_safecopy( + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_execSequence( + byte* op, + byte* oend, + seq_t sequence, + byte** litPtr, + byte* litLimit, + byte* prefixStart, + byte* virtualStart, + byte* dictEnd + ) + { + var sequence_litLength = sequence.litLength; + var sequence_matchLength = sequence.matchLength; + var sequence_offset = sequence.offset; + byte* oLitEnd = op + sequence_litLength; + nuint sequenceLength = sequence_litLength + sequence_matchLength; + /* risk : address space overflow (32-bits) */ + byte* oMatchEnd = op + sequenceLength; + /* risk : address space underflow on oend=NULL */ + byte* oend_w = oend - 32; + byte* iLitEnd = *litPtr + sequence_litLength; + byte* match = oLitEnd - sequence_offset; + assert(op != null); + assert(oend_w < oend); + if ( + iLitEnd > litLimit + || oMatchEnd > oend_w + || MEM_32bits && (nuint)(oend - op) < sequenceLength + 32 + ) + return ZSTD_execSequenceEnd( op, - oend_w, - match, - (nint)sequence.matchLength, - ZSTD_overlap_e.ZSTD_overlap_src_before_dst + oend, + new seq_t + { + litLength = sequence_litLength, + matchLength = sequence_matchLength, + offset = sequence_offset, + }, + litPtr, + litLimit, + prefixStart, + virtualStart, + dictEnd + ); + assert(op <= oLitEnd); + assert(oLitEnd < oMatchEnd); + assert(oMatchEnd <= oend); + assert(iLitEnd <= litLimit); + assert(oLitEnd <= oend_w); + assert(oMatchEnd <= oend_w); + assert(32 >= 16); + ZSTD_copy16(op, *litPtr); + if (sequence_litLength > 16) + { + ZSTD_wildcopy( + op + 16, + *litPtr + 16, + (nint)(sequence_litLength - 16), + ZSTD_overlap_e.ZSTD_no_overlap ); - return sequenceLength; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_execSequence( - byte* op, - byte* oend, - seq_t sequence, - byte** litPtr, - byte* litLimit, - byte* prefixStart, - byte* virtualStart, - byte* dictEnd - ) + op = oLitEnd; + *litPtr = iLitEnd; + if (sequence_offset > (nuint)(oLitEnd - prefixStart)) { - var sequence_litLength = sequence.litLength; - var sequence_matchLength = sequence.matchLength; - var sequence_offset = sequence.offset; - byte* oLitEnd = op + sequence_litLength; - nuint sequenceLength = sequence_litLength + sequence_matchLength; - /* risk : address space overflow (32-bits) */ - byte* oMatchEnd = op + sequenceLength; - /* risk : address space underflow on oend=NULL */ - byte* oend_w = oend - 32; - byte* iLitEnd = *litPtr + sequence_litLength; - byte* match = oLitEnd - sequence_offset; - assert(op != null); - assert(oend_w < oend); - if ( - iLitEnd > litLimit - || oMatchEnd > oend_w - || MEM_32bits && (nuint)(oend - op) < sequenceLength + 32 - ) - return ZSTD_execSequenceEnd( - op, - oend, - new seq_t - { - litLength = sequence_litLength, - matchLength = sequence_matchLength, - offset = sequence_offset, - }, - litPtr, - litLimit, - prefixStart, - virtualStart, - dictEnd - ); - assert(op <= oLitEnd); - assert(oLitEnd < oMatchEnd); - assert(oMatchEnd <= oend); - assert(iLitEnd <= litLimit); - assert(oLitEnd <= oend_w); - assert(oMatchEnd <= oend_w); - assert(32 >= 16); - ZSTD_copy16(op, *litPtr); - if (sequence_litLength > 16) - { - ZSTD_wildcopy( - op + 16, - *litPtr + 16, - (nint)(sequence_litLength - 16), - ZSTD_overlap_e.ZSTD_no_overlap - ); - } - - op = oLitEnd; - *litPtr = iLitEnd; - if (sequence_offset > (nuint)(oLitEnd - prefixStart)) + if (sequence_offset > (nuint)(oLitEnd - virtualStart)) { - if (sequence_offset > (nuint)(oLitEnd - virtualStart)) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - } - - match = dictEnd + (match - prefixStart); - if (match + sequence_matchLength <= dictEnd) - { - memmove(oLitEnd, match, sequence_matchLength); - return sequenceLength; - } - - { - nuint length1 = (nuint)(dictEnd - match); - memmove(oLitEnd, match, length1); - op = oLitEnd + length1; - sequence_matchLength -= length1; - match = prefixStart; - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); } - assert(op <= oMatchEnd); - assert(oMatchEnd <= oend_w); - assert(match >= prefixStart); - assert(sequence_matchLength >= 1); - if (sequence_offset >= 16) + match = dictEnd + (match - prefixStart); + if (match + sequence_matchLength <= dictEnd) { - ZSTD_wildcopy( - op, - match, - (nint)sequence_matchLength, - ZSTD_overlap_e.ZSTD_no_overlap - ); + memmove(oLitEnd, match, sequence_matchLength); return sequenceLength; } - assert(sequence_offset < 16); - ZSTD_overlapCopy8(ref op, ref match, sequence_offset); - if (sequence_matchLength > 8) { - assert(op < oMatchEnd); - ZSTD_wildcopy( - op, - match, - (nint)sequence_matchLength - 8, - ZSTD_overlap_e.ZSTD_overlap_src_before_dst - ); + nuint length1 = (nuint)(dictEnd - match); + memmove(oLitEnd, match, length1); + op = oLitEnd + length1; + sequence_matchLength -= length1; + match = prefixStart; } + } + assert(op <= oMatchEnd); + assert(oMatchEnd <= oend_w); + assert(match >= prefixStart); + assert(sequence_matchLength >= 1); + if (sequence_offset >= 16) + { + ZSTD_wildcopy( + op, + match, + (nint)sequence_matchLength, + ZSTD_overlap_e.ZSTD_no_overlap + ); return sequenceLength; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_execSequenceSplitLitBuffer( - byte* op, - byte* oend, - byte* oend_w, - seq_t sequence, - byte** litPtr, - byte* litLimit, - byte* prefixStart, - byte* virtualStart, - byte* dictEnd - ) + assert(sequence_offset < 16); + ZSTD_overlapCopy8(ref op, ref match, sequence_offset); + if (sequence_matchLength > 8) { - byte* oLitEnd = op + sequence.litLength; - nuint sequenceLength = sequence.litLength + sequence.matchLength; - /* risk : address space overflow (32-bits) */ - byte* oMatchEnd = op + sequenceLength; - byte* iLitEnd = *litPtr + sequence.litLength; - byte* match = oLitEnd - sequence.offset; - assert(op != null); - assert(oend_w < oend); - if ( - iLitEnd > litLimit - || oMatchEnd > oend_w - || MEM_32bits && (nuint)(oend - op) < sequenceLength + 32 - ) - return ZSTD_execSequenceEndSplitLitBuffer( - op, - oend, - oend_w, - sequence, - litPtr, - litLimit, - prefixStart, - virtualStart, - dictEnd - ); - assert(op <= oLitEnd); - assert(oLitEnd < oMatchEnd); - assert(oMatchEnd <= oend); - assert(iLitEnd <= litLimit); - assert(oLitEnd <= oend_w); - assert(oMatchEnd <= oend_w); - assert(32 >= 16); - ZSTD_copy16(op, *litPtr); - if (sequence.litLength > 16) - { - ZSTD_wildcopy( - op + 16, - *litPtr + 16, - (nint)(sequence.litLength - 16), - ZSTD_overlap_e.ZSTD_no_overlap - ); - } + assert(op < oMatchEnd); + ZSTD_wildcopy( + op, + match, + (nint)sequence_matchLength - 8, + ZSTD_overlap_e.ZSTD_overlap_src_before_dst + ); + } - op = oLitEnd; - *litPtr = iLitEnd; - if (sequence.offset > (nuint)(oLitEnd - prefixStart)) - { - if (sequence.offset > (nuint)(oLitEnd - virtualStart)) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - } + return sequenceLength; + } - match = dictEnd + (match - prefixStart); - if (match + sequence.matchLength <= dictEnd) - { - memmove(oLitEnd, match, sequence.matchLength); - return sequenceLength; - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_execSequenceSplitLitBuffer( + byte* op, + byte* oend, + byte* oend_w, + seq_t sequence, + byte** litPtr, + byte* litLimit, + byte* prefixStart, + byte* virtualStart, + byte* dictEnd + ) + { + byte* oLitEnd = op + sequence.litLength; + nuint sequenceLength = sequence.litLength + sequence.matchLength; + /* risk : address space overflow (32-bits) */ + byte* oMatchEnd = op + sequenceLength; + byte* iLitEnd = *litPtr + sequence.litLength; + byte* match = oLitEnd - sequence.offset; + assert(op != null); + assert(oend_w < oend); + if ( + iLitEnd > litLimit + || oMatchEnd > oend_w + || MEM_32bits && (nuint)(oend - op) < sequenceLength + 32 + ) + return ZSTD_execSequenceEndSplitLitBuffer( + op, + oend, + oend_w, + sequence, + litPtr, + litLimit, + prefixStart, + virtualStart, + dictEnd + ); + assert(op <= oLitEnd); + assert(oLitEnd < oMatchEnd); + assert(oMatchEnd <= oend); + assert(iLitEnd <= litLimit); + assert(oLitEnd <= oend_w); + assert(oMatchEnd <= oend_w); + assert(32 >= 16); + ZSTD_copy16(op, *litPtr); + if (sequence.litLength > 16) + { + ZSTD_wildcopy( + op + 16, + *litPtr + 16, + (nint)(sequence.litLength - 16), + ZSTD_overlap_e.ZSTD_no_overlap + ); + } - { - nuint length1 = (nuint)(dictEnd - match); - memmove(oLitEnd, match, length1); - op = oLitEnd + length1; - sequence.matchLength -= length1; - match = prefixStart; - } + op = oLitEnd; + *litPtr = iLitEnd; + if (sequence.offset > (nuint)(oLitEnd - prefixStart)) + { + if (sequence.offset > (nuint)(oLitEnd - virtualStart)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); } - assert(op <= oMatchEnd); - assert(oMatchEnd <= oend_w); - assert(match >= prefixStart); - assert(sequence.matchLength >= 1); - if (sequence.offset >= 16) + match = dictEnd + (match - prefixStart); + if (match + sequence.matchLength <= dictEnd) { - ZSTD_wildcopy( - op, - match, - (nint)sequence.matchLength, - ZSTD_overlap_e.ZSTD_no_overlap - ); + memmove(oLitEnd, match, sequence.matchLength); return sequenceLength; } - assert(sequence.offset < 16); - ZSTD_overlapCopy8(&op, &match, sequence.offset); - if (sequence.matchLength > 8) { - assert(op < oMatchEnd); - ZSTD_wildcopy( - op, - match, - (nint)sequence.matchLength - 8, - ZSTD_overlap_e.ZSTD_overlap_src_before_dst - ); + nuint length1 = (nuint)(dictEnd - match); + memmove(oLitEnd, match, length1); + op = oLitEnd + length1; + sequence.matchLength -= length1; + match = prefixStart; } + } + assert(op <= oMatchEnd); + assert(oMatchEnd <= oend_w); + assert(match >= prefixStart); + assert(sequence.matchLength >= 1); + if (sequence.offset >= 16) + { + ZSTD_wildcopy( + op, + match, + (nint)sequence.matchLength, + ZSTD_overlap_e.ZSTD_no_overlap + ); return sequenceLength; } - private static void ZSTD_initFseState( - ZSTD_fseState* DStatePtr, - BIT_DStream_t* bitD, - ZSTD_seqSymbol* dt - ) + assert(sequence.offset < 16); + ZSTD_overlapCopy8(&op, &match, sequence.offset); + if (sequence.matchLength > 8) { - void* ptr = dt; - ZSTD_seqSymbol_header* DTableH = (ZSTD_seqSymbol_header*)ptr; - DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog); - BIT_reloadDStream(bitD); - DStatePtr->table = dt + 1; + assert(op < oMatchEnd); + ZSTD_wildcopy( + op, + match, + (nint)sequence.matchLength - 8, + ZSTD_overlap_e.ZSTD_overlap_src_before_dst + ); } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_updateFseStateWithDInfo( - ZSTD_fseState* DStatePtr, - BIT_DStream_t* bitD, - ushort nextState, - uint nbBits - ) - { - nuint lowBits = BIT_readBits(bitD, nbBits); - DStatePtr->state = nextState + lowBits; - } - - /** - * ZSTD_decodeSequence(): - * @p longOffsets : tells the decoder to reload more bit while decoding large offsets - * only used in 32-bit mode - * @return : Sequence (litL + matchL + offset) - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static seq_t ZSTD_decodeSequence( - seqState_t* seqState, - ZSTD_longOffset_e longOffsets, - int isLastSeq - ) + return sequenceLength; + } + + private static void ZSTD_initFseState( + ZSTD_fseState* DStatePtr, + BIT_DStream_t* bitD, + ZSTD_seqSymbol* dt + ) + { + void* ptr = dt; + ZSTD_seqSymbol_header* DTableH = (ZSTD_seqSymbol_header*)ptr; + DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog); + BIT_reloadDStream(bitD); + DStatePtr->table = dt + 1; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_updateFseStateWithDInfo( + ZSTD_fseState* DStatePtr, + BIT_DStream_t* bitD, + ushort nextState, + uint nbBits + ) + { + nuint lowBits = BIT_readBits(bitD, nbBits); + DStatePtr->state = nextState + lowBits; + } + + /** + * ZSTD_decodeSequence(): + * @p longOffsets : tells the decoder to reload more bit while decoding large offsets + * only used in 32-bit mode + * @return : Sequence (litL + matchL + offset) + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static seq_t ZSTD_decodeSequence( + seqState_t* seqState, + ZSTD_longOffset_e longOffsets, + int isLastSeq + ) + { + seq_t seq; + ZSTD_seqSymbol* llDInfo = seqState->stateLL.table + seqState->stateLL.state; + ZSTD_seqSymbol* mlDInfo = seqState->stateML.table + seqState->stateML.state; + ZSTD_seqSymbol* ofDInfo = seqState->stateOffb.table + seqState->stateOffb.state; + seq.matchLength = mlDInfo->baseValue; + seq.litLength = llDInfo->baseValue; { - seq_t seq; - ZSTD_seqSymbol* llDInfo = seqState->stateLL.table + seqState->stateLL.state; - ZSTD_seqSymbol* mlDInfo = seqState->stateML.table + seqState->stateML.state; - ZSTD_seqSymbol* ofDInfo = seqState->stateOffb.table + seqState->stateOffb.state; - seq.matchLength = mlDInfo->baseValue; - seq.litLength = llDInfo->baseValue; - { - uint ofBase = ofDInfo->baseValue; - byte llBits = llDInfo->nbAdditionalBits; - byte mlBits = mlDInfo->nbAdditionalBits; - byte ofBits = ofDInfo->nbAdditionalBits; - byte totalBits = (byte)(llBits + mlBits + ofBits); - ushort llNext = llDInfo->nextState; - ushort mlNext = mlDInfo->nextState; - ushort ofNext = ofDInfo->nextState; - uint llnbBits = llDInfo->nbBits; - uint mlnbBits = mlDInfo->nbBits; - uint ofnbBits = ofDInfo->nbBits; - assert(llBits <= 16); - assert(mlBits <= 16); - assert(ofBits <= 31); - { - nuint offset; - if (ofBits > 1) + uint ofBase = ofDInfo->baseValue; + byte llBits = llDInfo->nbAdditionalBits; + byte mlBits = mlDInfo->nbAdditionalBits; + byte ofBits = ofDInfo->nbAdditionalBits; + byte totalBits = (byte)(llBits + mlBits + ofBits); + ushort llNext = llDInfo->nextState; + ushort mlNext = mlDInfo->nextState; + ushort ofNext = ofDInfo->nextState; + uint llnbBits = llDInfo->nbBits; + uint mlnbBits = mlDInfo->nbBits; + uint ofnbBits = ofDInfo->nbBits; + assert(llBits <= 16); + assert(mlBits <= 16); + assert(ofBits <= 31); + { + nuint offset; + if (ofBits > 1) + { + if (MEM_32bits && longOffsets != default && ofBits >= 25) { - if (MEM_32bits && longOffsets != default && ofBits >= 25) - { - /* Always read extra bits, this keeps the logic simple, - * avoids branches, and avoids accidentally reading 0 bits. - */ - const uint extraBits = 30 - 25; - offset = - ofBase - + ( - BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) - << (int)extraBits - ); + /* Always read extra bits, this keeps the logic simple, + * avoids branches, and avoids accidentally reading 0 bits. + */ + const uint extraBits = 30 - 25; + offset = + ofBase + + ( + BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) + << (int)extraBits + ); + BIT_reloadDStream(&seqState->DStream); + offset += BIT_readBitsFast(&seqState->DStream, extraBits); + } + else + { + offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits); + if (MEM_32bits) BIT_reloadDStream(&seqState->DStream); - offset += BIT_readBitsFast(&seqState->DStream, extraBits); - } - else - { - offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits); - if (MEM_32bits) - BIT_reloadDStream(&seqState->DStream); - } + } - seqState->prevOffset.e2 = seqState->prevOffset.e1; - seqState->prevOffset.e1 = seqState->prevOffset.e0; + seqState->prevOffset.e2 = seqState->prevOffset.e1; + seqState->prevOffset.e1 = seqState->prevOffset.e0; + seqState->prevOffset.e0 = offset; + } + else + { + uint ll0 = llDInfo->baseValue == 0 ? 1U : 0U; + if (ofBits == 0) + { + offset = (&seqState->prevOffset.e0)[ll0]; + seqState->prevOffset.e1 = (&seqState->prevOffset.e0)[ll0 == 0 ? 1 : 0]; seqState->prevOffset.e0 = offset; } else { - uint ll0 = llDInfo->baseValue == 0 ? 1U : 0U; - if (ofBits == 0) - { - offset = (&seqState->prevOffset.e0)[ll0]; - seqState->prevOffset.e1 = (&seqState->prevOffset.e0)[ll0 == 0 ? 1 : 0]; - seqState->prevOffset.e0 = offset; - } - else + offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1); { - offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1); - { - nuint temp = - offset == 3 - ? seqState->prevOffset.e0 - 1 - : (&seqState->prevOffset.e0)[offset]; - temp -= temp == 0 ? 1U : 0U; - if (offset != 1) - seqState->prevOffset.e2 = seqState->prevOffset.e1; - seqState->prevOffset.e1 = seqState->prevOffset.e0; - seqState->prevOffset.e0 = offset = temp; - } + nuint temp = + offset == 3 + ? seqState->prevOffset.e0 - 1 + : (&seqState->prevOffset.e0)[offset]; + temp -= temp == 0 ? 1U : 0U; + if (offset != 1) + seqState->prevOffset.e2 = seqState->prevOffset.e1; + seqState->prevOffset.e1 = seqState->prevOffset.e0; + seqState->prevOffset.e0 = offset = temp; } } - - seq.offset = offset; } - if (mlBits > 0) - seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits); - if (MEM_32bits && mlBits + llBits >= 25 - (30 - 25)) - BIT_reloadDStream(&seqState->DStream); - if (MEM_64bits && totalBits >= 57 - (9 + 9 + 8)) - BIT_reloadDStream(&seqState->DStream); - if (llBits > 0) - seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits); + seq.offset = offset; + } + + if (mlBits > 0) + seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits); + if (MEM_32bits && mlBits + llBits >= 25 - (30 - 25)) + BIT_reloadDStream(&seqState->DStream); + if (MEM_64bits && totalBits >= 57 - (9 + 9 + 8)) + BIT_reloadDStream(&seqState->DStream); + if (llBits > 0) + seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits); + if (MEM_32bits) + BIT_reloadDStream(&seqState->DStream); + if (isLastSeq == 0) + { + ZSTD_updateFseStateWithDInfo( + &seqState->stateLL, + &seqState->DStream, + llNext, + llnbBits + ); + ZSTD_updateFseStateWithDInfo( + &seqState->stateML, + &seqState->DStream, + mlNext, + mlnbBits + ); if (MEM_32bits) BIT_reloadDStream(&seqState->DStream); - if (isLastSeq == 0) - { - ZSTD_updateFseStateWithDInfo( - &seqState->stateLL, - &seqState->DStream, - llNext, - llnbBits - ); - ZSTD_updateFseStateWithDInfo( - &seqState->stateML, - &seqState->DStream, - mlNext, - mlnbBits - ); - if (MEM_32bits) - BIT_reloadDStream(&seqState->DStream); - ZSTD_updateFseStateWithDInfo( - &seqState->stateOffb, - &seqState->DStream, - ofNext, - ofnbBits - ); - BIT_reloadDStream(&seqState->DStream); - } + ZSTD_updateFseStateWithDInfo( + &seqState->stateOffb, + &seqState->DStream, + ofNext, + ofnbBits + ); + BIT_reloadDStream(&seqState->DStream); } - - return seq; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_decompressSequences_bodySplitLitBuffer( - ZSTD_DCtx_s* dctx, - void* dst, - nuint maxDstSize, - void* seqStart, - nuint seqSize, - int nbSeq, - ZSTD_longOffset_e isLongOffset - ) + return seq; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_decompressSequences_bodySplitLitBuffer( + ZSTD_DCtx_s* dctx, + void* dst, + nuint maxDstSize, + void* seqStart, + nuint seqSize, + int nbSeq, + ZSTD_longOffset_e isLongOffset + ) + { + byte* ip = (byte*)seqStart; + byte* iend = ip + seqSize; + byte* ostart = (byte*)dst; + byte* oend = ZSTD_maybeNullPtrAdd(ostart, (nint)maxDstSize); + byte* op = ostart; + byte* litPtr = dctx->litPtr; + byte* litBufferEnd = dctx->litBufferEnd; + byte* prefixStart = (byte*)dctx->prefixStart; + byte* vBase = (byte*)dctx->virtualStart; + byte* dictEnd = (byte*)dctx->dictEnd; + if (nbSeq != 0) { - byte* ip = (byte*)seqStart; - byte* iend = ip + seqSize; - byte* ostart = (byte*)dst; - byte* oend = ZSTD_maybeNullPtrAdd(ostart, (nint)maxDstSize); - byte* op = ostart; - byte* litPtr = dctx->litPtr; - byte* litBufferEnd = dctx->litBufferEnd; - byte* prefixStart = (byte*)dctx->prefixStart; - byte* vBase = (byte*)dctx->virtualStart; - byte* dictEnd = (byte*)dctx->dictEnd; - if (nbSeq != 0) + seqState_t seqState; + dctx->fseEntropy = 1; { - seqState_t seqState; - dctx->fseEntropy = 1; - { - uint i; - for (i = 0; i < 3; i++) - (&seqState.prevOffset.e0)[i] = dctx->entropy.rep[i]; - } + uint i; + for (i = 0; i < 3; i++) + (&seqState.prevOffset.e0)[i] = dctx->entropy.rep[i]; + } - if (ERR_isError(BIT_initDStream(&seqState.DStream, ip, (nuint)(iend - ip)))) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - } + if (ERR_isError(BIT_initDStream(&seqState.DStream, ip, (nuint)(iend - ip)))) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } - ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); - ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); - ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); - assert(dst != null); + ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); + ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); + ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); + assert(dst != null); + { + /* some static analyzer believe that @sequence is not initialized (it necessarily is, since for(;;) loop as at least one iteration) */ + seq_t sequence = new seq_t { - /* some static analyzer believe that @sequence is not initialized (it necessarily is, since for(;;) loop as at least one iteration) */ - seq_t sequence = new seq_t - { - litLength = 0, - matchLength = 0, - offset = 0, - }; - for (; nbSeq != 0; nbSeq--) + litLength = 0, + matchLength = 0, + offset = 0, + }; + for (; nbSeq != 0; nbSeq--) + { + sequence = ZSTD_decodeSequence(&seqState, isLongOffset, nbSeq == 1 ? 1 : 0); + if (litPtr + sequence.litLength > dctx->litBufferEnd) + break; { - sequence = ZSTD_decodeSequence(&seqState, isLongOffset, nbSeq == 1 ? 1 : 0); - if (litPtr + sequence.litLength > dctx->litBufferEnd) - break; - { - nuint oneSeqSize = ZSTD_execSequenceSplitLitBuffer( - op, - oend, - litPtr + sequence.litLength - 32, - sequence, - &litPtr, - litBufferEnd, - prefixStart, - vBase, - dictEnd - ); - if (ERR_isError(oneSeqSize)) - return oneSeqSize; - op += oneSeqSize; - } + nuint oneSeqSize = ZSTD_execSequenceSplitLitBuffer( + op, + oend, + litPtr + sequence.litLength - 32, + sequence, + &litPtr, + litBufferEnd, + prefixStart, + vBase, + dictEnd + ); + if (ERR_isError(oneSeqSize)) + return oneSeqSize; + op += oneSeqSize; } + } - if (nbSeq > 0) + if (nbSeq > 0) + { + nuint leftoverLit = (nuint)(dctx->litBufferEnd - litPtr); + if (leftoverLit != 0) { - nuint leftoverLit = (nuint)(dctx->litBufferEnd - litPtr); - if (leftoverLit != 0) + if (leftoverLit > (nuint)(oend - op)) { - if (leftoverLit > (nuint)(oend - op)) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) - ); - } - - ZSTD_safecopyDstBeforeSrc(op, litPtr, (nint)leftoverLit); - sequence.litLength -= leftoverLit; - op += leftoverLit; - } - - litPtr = dctx->litExtraBuffer; - litBufferEnd = dctx->litExtraBuffer + (1 << 16); - dctx->litBufferLocation = ZSTD_litLocation_e.ZSTD_not_in_dst; - { - nuint oneSeqSize = ZSTD_execSequence( - op, - oend, - sequence, - &litPtr, - litBufferEnd, - prefixStart, - vBase, - dictEnd + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) ); - if (ERR_isError(oneSeqSize)) - return oneSeqSize; - op += oneSeqSize; } - nbSeq--; + ZSTD_safecopyDstBeforeSrc(op, litPtr, (nint)leftoverLit); + sequence.litLength -= leftoverLit; + op += leftoverLit; } - } - if (nbSeq > 0) - { - for (; nbSeq != 0; nbSeq--) + litPtr = dctx->litExtraBuffer; + litBufferEnd = dctx->litExtraBuffer + (1 << 16); + dctx->litBufferLocation = ZSTD_litLocation_e.ZSTD_not_in_dst; { - seq_t sequence = ZSTD_decodeSequence( - &seqState, - isLongOffset, - nbSeq == 1 ? 1 : 0 - ); nuint oneSeqSize = ZSTD_execSequence( op, oend, @@ -2099,160 +2071,211 @@ ZSTD_longOffset_e isLongOffset return oneSeqSize; op += oneSeqSize; } - } - if (nbSeq != 0) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + nbSeq--; } + } - if (BIT_endOfDStream(&seqState.DStream) == 0) + if (nbSeq > 0) + { + for (; nbSeq != 0; nbSeq--) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + seq_t sequence = ZSTD_decodeSequence( + &seqState, + isLongOffset, + nbSeq == 1 ? 1 : 0 + ); + nuint oneSeqSize = ZSTD_execSequence( + op, + oend, + sequence, + &litPtr, + litBufferEnd, + prefixStart, + vBase, + dictEnd + ); + if (ERR_isError(oneSeqSize)) + return oneSeqSize; + op += oneSeqSize; } + } - { - uint i; - for (i = 0; i < 3; i++) - dctx->entropy.rep[i] = (uint)(&seqState.prevOffset.e0)[i]; - } + if (nbSeq != 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); } - if (dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split) + if (BIT_endOfDStream(&seqState.DStream) == 0) { - /* split hasn't been reached yet, first get dst then copy litExtraBuffer */ - nuint lastLLSize = (nuint)(litBufferEnd - litPtr); - if (lastLLSize > (nuint)(oend - op)) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } - if (op != null) - { - memmove(op, litPtr, lastLLSize); - op += lastLLSize; - } + { + uint i; + for (i = 0; i < 3; i++) + dctx->entropy.rep[i] = (uint)(&seqState.prevOffset.e0)[i]; + } + } - litPtr = dctx->litExtraBuffer; - litBufferEnd = dctx->litExtraBuffer + (1 << 16); - dctx->litBufferLocation = ZSTD_litLocation_e.ZSTD_not_in_dst; + if (dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split) + { + /* split hasn't been reached yet, first get dst then copy litExtraBuffer */ + nuint lastLLSize = (nuint)(litBufferEnd - litPtr); + if (lastLLSize > (nuint)(oend - op)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); } + if (op != null) { - nuint lastLLSize = (nuint)(litBufferEnd - litPtr); - if (lastLLSize > (nuint)(oend - op)) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - } + memmove(op, litPtr, lastLLSize); + op += lastLLSize; + } - if (op != null) - { - memcpy(op, litPtr, (uint)lastLLSize); - op += lastLLSize; - } + litPtr = dctx->litExtraBuffer; + litBufferEnd = dctx->litExtraBuffer + (1 << 16); + dctx->litBufferLocation = ZSTD_litLocation_e.ZSTD_not_in_dst; + } + + { + nuint lastLLSize = (nuint)(litBufferEnd - litPtr); + if (lastLLSize > (nuint)(oend - op)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); } - return (nuint)(op - ostart); + if (op != null) + { + memcpy(op, litPtr, (uint)lastLLSize); + op += lastLLSize; + } } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_decompressSequences_body( - ZSTD_DCtx_s* dctx, - void* dst, - nuint maxDstSize, - void* seqStart, - nuint seqSize, - int nbSeq, - ZSTD_longOffset_e isLongOffset - ) + return (nuint)(op - ostart); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_decompressSequences_body( + ZSTD_DCtx_s* dctx, + void* dst, + nuint maxDstSize, + void* seqStart, + nuint seqSize, + int nbSeq, + ZSTD_longOffset_e isLongOffset + ) + { + // HACK, force nbSeq to stack (better register usage) + System.Threading.Thread.VolatileRead(ref nbSeq); + byte* ip = (byte*)seqStart; + byte* iend = ip + seqSize; + byte* ostart = (byte*)dst; + byte* oend = + dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_not_in_dst + ? ZSTD_maybeNullPtrAdd(ostart, (nint)maxDstSize) + : dctx->litBuffer; + byte* op = ostart; + byte* litPtr = dctx->litPtr; + byte* litEnd = litPtr + dctx->litSize; + byte* prefixStart = (byte*)dctx->prefixStart; + byte* vBase = (byte*)dctx->virtualStart; + byte* dictEnd = (byte*)dctx->dictEnd; + if (nbSeq != 0) { - // HACK, force nbSeq to stack (better register usage) - System.Threading.Thread.VolatileRead(ref nbSeq); - byte* ip = (byte*)seqStart; - byte* iend = ip + seqSize; - byte* ostart = (byte*)dst; - byte* oend = - dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_not_in_dst - ? ZSTD_maybeNullPtrAdd(ostart, (nint)maxDstSize) - : dctx->litBuffer; - byte* op = ostart; - byte* litPtr = dctx->litPtr; - byte* litEnd = litPtr + dctx->litSize; - byte* prefixStart = (byte*)dctx->prefixStart; - byte* vBase = (byte*)dctx->virtualStart; - byte* dictEnd = (byte*)dctx->dictEnd; - if (nbSeq != 0) + seqState_t seqState; + System.Runtime.CompilerServices.Unsafe.SkipInit(out seqState); + dctx->fseEntropy = 1; { - seqState_t seqState; - System.Runtime.CompilerServices.Unsafe.SkipInit(out seqState); - dctx->fseEntropy = 1; - { - uint i; - for (i = 0; i < 3; i++) - System.Runtime.CompilerServices.Unsafe.Add( - ref seqState.prevOffset.e0, - (int)i - ) = dctx->entropy.rep[i]; - } + uint i; + for (i = 0; i < 3; i++) + System.Runtime.CompilerServices.Unsafe.Add( + ref seqState.prevOffset.e0, + (int)i + ) = dctx->entropy.rep[i]; + } - if (ERR_isError(BIT_initDStream(ref seqState.DStream, ip, (nuint)(iend - ip)))) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - } + if (ERR_isError(BIT_initDStream(ref seqState.DStream, ip, (nuint)(iend - ip)))) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } - ZSTD_initFseState(ref seqState.stateLL, ref seqState.DStream, dctx->LLTptr); - ZSTD_initFseState(ref seqState.stateOffb, ref seqState.DStream, dctx->OFTptr); - ZSTD_initFseState(ref seqState.stateML, ref seqState.DStream, dctx->MLTptr); - assert(dst != null); - nuint seqState_DStream_bitContainer = seqState.DStream.bitContainer; - uint seqState_DStream_bitsConsumed = seqState.DStream.bitsConsumed; - sbyte* seqState_DStream_ptr = seqState.DStream.ptr; - sbyte* seqState_DStream_start = seqState.DStream.start; - sbyte* seqState_DStream_limitPtr = seqState.DStream.limitPtr; - for (; nbSeq != 0; nbSeq--) - { - nuint sequence_litLength; - nuint sequence_matchLength; - nuint sequence_offset; - ZSTD_seqSymbol* llDInfo = seqState.stateLL.table + seqState.stateLL.state; - ZSTD_seqSymbol* mlDInfo = seqState.stateML.table + seqState.stateML.state; - ZSTD_seqSymbol* ofDInfo = seqState.stateOffb.table + seqState.stateOffb.state; - sequence_matchLength = mlDInfo->baseValue; - sequence_litLength = llDInfo->baseValue; + ZSTD_initFseState(ref seqState.stateLL, ref seqState.DStream, dctx->LLTptr); + ZSTD_initFseState(ref seqState.stateOffb, ref seqState.DStream, dctx->OFTptr); + ZSTD_initFseState(ref seqState.stateML, ref seqState.DStream, dctx->MLTptr); + assert(dst != null); + nuint seqState_DStream_bitContainer = seqState.DStream.bitContainer; + uint seqState_DStream_bitsConsumed = seqState.DStream.bitsConsumed; + sbyte* seqState_DStream_ptr = seqState.DStream.ptr; + sbyte* seqState_DStream_start = seqState.DStream.start; + sbyte* seqState_DStream_limitPtr = seqState.DStream.limitPtr; + for (; nbSeq != 0; nbSeq--) + { + nuint sequence_litLength; + nuint sequence_matchLength; + nuint sequence_offset; + ZSTD_seqSymbol* llDInfo = seqState.stateLL.table + seqState.stateLL.state; + ZSTD_seqSymbol* mlDInfo = seqState.stateML.table + seqState.stateML.state; + ZSTD_seqSymbol* ofDInfo = seqState.stateOffb.table + seqState.stateOffb.state; + sequence_matchLength = mlDInfo->baseValue; + sequence_litLength = llDInfo->baseValue; + { + uint ofBase = ofDInfo->baseValue; + byte llBits = llDInfo->nbAdditionalBits; + byte mlBits = mlDInfo->nbAdditionalBits; + byte ofBits = ofDInfo->nbAdditionalBits; + byte totalBits = (byte)(llBits + mlBits + ofBits); + ushort llNext = llDInfo->nextState; + ushort mlNext = mlDInfo->nextState; + ushort ofNext = ofDInfo->nextState; + uint llnbBits = llDInfo->nbBits; + uint mlnbBits = mlDInfo->nbBits; + uint ofnbBits = ofDInfo->nbBits; + assert(llBits <= 16); + assert(mlBits <= 16); + assert(ofBits <= 31); { - uint ofBase = ofDInfo->baseValue; - byte llBits = llDInfo->nbAdditionalBits; - byte mlBits = mlDInfo->nbAdditionalBits; - byte ofBits = ofDInfo->nbAdditionalBits; - byte totalBits = (byte)(llBits + mlBits + ofBits); - ushort llNext = llDInfo->nextState; - ushort mlNext = mlDInfo->nextState; - ushort ofNext = ofDInfo->nextState; - uint llnbBits = llDInfo->nbBits; - uint mlnbBits = mlDInfo->nbBits; - uint ofnbBits = ofDInfo->nbBits; - assert(llBits <= 16); - assert(mlBits <= 16); - assert(ofBits <= 31); + nuint offset; + if (ofBits > 1) { - nuint offset; - if (ofBits > 1) + if (MEM_32bits && isLongOffset != default && ofBits >= 25) { - if (MEM_32bits && isLongOffset != default && ofBits >= 25) - { - /* Always read extra bits, this keeps the logic simple, - * avoids branches, and avoids accidentally reading 0 bits. - */ - const uint extraBits = 30 - 25; - offset = - ofBase - + ( - BIT_readBitsFast( - seqState_DStream_bitContainer, - ref seqState_DStream_bitsConsumed, - ofBits - extraBits - ) << (int)extraBits - ); + /* Always read extra bits, this keeps the logic simple, + * avoids branches, and avoids accidentally reading 0 bits. + */ + const uint extraBits = 30 - 25; + offset = + ofBase + + ( + BIT_readBitsFast( + seqState_DStream_bitContainer, + ref seqState_DStream_bitsConsumed, + ofBits - extraBits + ) << (int)extraBits + ); + BIT_reloadDStream( + ref seqState_DStream_bitContainer, + ref seqState_DStream_bitsConsumed, + ref seqState_DStream_ptr, + seqState_DStream_start, + seqState_DStream_limitPtr + ); + offset += BIT_readBitsFast( + seqState_DStream_bitContainer, + ref seqState_DStream_bitsConsumed, + extraBits + ); + } + else + { + offset = + ofBase + + BIT_readBitsFast( + seqState_DStream_bitContainer, + ref seqState_DStream_bitsConsumed, + ofBits + ); + if (MEM_32bits) BIT_reloadDStream( ref seqState_DStream_bitContainer, ref seqState_DStream_bitsConsumed, @@ -2260,109 +2283,110 @@ ZSTD_longOffset_e isLongOffset seqState_DStream_start, seqState_DStream_limitPtr ); - offset += BIT_readBitsFast( - seqState_DStream_bitContainer, - ref seqState_DStream_bitsConsumed, - extraBits - ); - } - else - { - offset = - ofBase - + BIT_readBitsFast( - seqState_DStream_bitContainer, - ref seqState_DStream_bitsConsumed, - ofBits - ); - if (MEM_32bits) - BIT_reloadDStream( - ref seqState_DStream_bitContainer, - ref seqState_DStream_bitsConsumed, - ref seqState_DStream_ptr, - seqState_DStream_start, - seqState_DStream_limitPtr - ); - } + } - seqState.prevOffset.e2 = seqState.prevOffset.e1; - seqState.prevOffset.e1 = seqState.prevOffset.e0; + seqState.prevOffset.e2 = seqState.prevOffset.e1; + seqState.prevOffset.e1 = seqState.prevOffset.e0; + seqState.prevOffset.e0 = offset; + } + else + { + uint ll0 = llDInfo->baseValue == 0 ? 1U : 0U; + if (ofBits == 0) + { + offset = System.Runtime.CompilerServices.Unsafe.Add( + ref seqState.prevOffset.e0, + (int)ll0 + ); + seqState.prevOffset.e1 = + System.Runtime.CompilerServices.Unsafe.Add( + ref seqState.prevOffset.e0, + ll0 == 0 ? 1 : 0 + ); seqState.prevOffset.e0 = offset; } else { - uint ll0 = llDInfo->baseValue == 0 ? 1U : 0U; - if (ofBits == 0) - { - offset = System.Runtime.CompilerServices.Unsafe.Add( - ref seqState.prevOffset.e0, - (int)ll0 + offset = + ofBase + + ll0 + + BIT_readBitsFast( + seqState_DStream_bitContainer, + ref seqState_DStream_bitsConsumed, + 1 ); - seqState.prevOffset.e1 = - System.Runtime.CompilerServices.Unsafe.Add( - ref seqState.prevOffset.e0, - ll0 == 0 ? 1 : 0 - ); - seqState.prevOffset.e0 = offset; - } - else { - offset = - ofBase - + ll0 - + BIT_readBitsFast( - seqState_DStream_bitContainer, - ref seqState_DStream_bitsConsumed, - 1 - ); - { - nuint temp = - offset == 3 - ? seqState.prevOffset.e0 - 1 - : System.Runtime.CompilerServices.Unsafe.Add( - ref seqState.prevOffset.e0, - (int)offset - ); - temp -= temp == 0 ? 1U : 0U; - if (offset != 1) - seqState.prevOffset.e2 = seqState.prevOffset.e1; - seqState.prevOffset.e1 = seqState.prevOffset.e0; - seqState.prevOffset.e0 = offset = temp; - } + nuint temp = + offset == 3 + ? seqState.prevOffset.e0 - 1 + : System.Runtime.CompilerServices.Unsafe.Add( + ref seqState.prevOffset.e0, + (int)offset + ); + temp -= temp == 0 ? 1U : 0U; + if (offset != 1) + seqState.prevOffset.e2 = seqState.prevOffset.e1; + seqState.prevOffset.e1 = seqState.prevOffset.e0; + seqState.prevOffset.e0 = offset = temp; } } - - sequence_offset = offset; } - if (mlBits > 0) - sequence_matchLength += BIT_readBitsFast( - seqState_DStream_bitContainer, - ref seqState_DStream_bitsConsumed, - mlBits - ); - if (MEM_32bits && mlBits + llBits >= 25 - (30 - 25)) - BIT_reloadDStream( - ref seqState_DStream_bitContainer, - ref seqState_DStream_bitsConsumed, - ref seqState_DStream_ptr, - seqState_DStream_start, - seqState_DStream_limitPtr - ); - if (MEM_64bits && totalBits >= 57 - (9 + 9 + 8)) - BIT_reloadDStream( - ref seqState_DStream_bitContainer, - ref seqState_DStream_bitsConsumed, - ref seqState_DStream_ptr, - seqState_DStream_start, - seqState_DStream_limitPtr - ); - if (llBits > 0) - sequence_litLength += BIT_readBitsFast( - seqState_DStream_bitContainer, - ref seqState_DStream_bitsConsumed, - llBits - ); + sequence_offset = offset; + } + + if (mlBits > 0) + sequence_matchLength += BIT_readBitsFast( + seqState_DStream_bitContainer, + ref seqState_DStream_bitsConsumed, + mlBits + ); + if (MEM_32bits && mlBits + llBits >= 25 - (30 - 25)) + BIT_reloadDStream( + ref seqState_DStream_bitContainer, + ref seqState_DStream_bitsConsumed, + ref seqState_DStream_ptr, + seqState_DStream_start, + seqState_DStream_limitPtr + ); + if (MEM_64bits && totalBits >= 57 - (9 + 9 + 8)) + BIT_reloadDStream( + ref seqState_DStream_bitContainer, + ref seqState_DStream_bitsConsumed, + ref seqState_DStream_ptr, + seqState_DStream_start, + seqState_DStream_limitPtr + ); + if (llBits > 0) + sequence_litLength += BIT_readBitsFast( + seqState_DStream_bitContainer, + ref seqState_DStream_bitsConsumed, + llBits + ); + if (MEM_32bits) + BIT_reloadDStream( + ref seqState_DStream_bitContainer, + ref seqState_DStream_bitsConsumed, + ref seqState_DStream_ptr, + seqState_DStream_start, + seqState_DStream_limitPtr + ); + if ((nbSeq == 1 ? 1 : 0) == 0) + { + ZSTD_updateFseStateWithDInfo( + ref seqState.stateLL, + seqState_DStream_bitContainer, + ref seqState_DStream_bitsConsumed, + llNext, + llnbBits + ); + ZSTD_updateFseStateWithDInfo( + ref seqState.stateML, + seqState_DStream_bitContainer, + ref seqState_DStream_bitsConsumed, + mlNext, + mlnbBits + ); if (MEM_32bits) BIT_reloadDStream( ref seqState_DStream_bitContainer, @@ -2371,265 +2395,241 @@ ZSTD_longOffset_e isLongOffset seqState_DStream_start, seqState_DStream_limitPtr ); - if ((nbSeq == 1 ? 1 : 0) == 0) - { - ZSTD_updateFseStateWithDInfo( - ref seqState.stateLL, - seqState_DStream_bitContainer, - ref seqState_DStream_bitsConsumed, - llNext, - llnbBits - ); - ZSTD_updateFseStateWithDInfo( - ref seqState.stateML, - seqState_DStream_bitContainer, - ref seqState_DStream_bitsConsumed, - mlNext, - mlnbBits - ); - if (MEM_32bits) - BIT_reloadDStream( - ref seqState_DStream_bitContainer, - ref seqState_DStream_bitsConsumed, - ref seqState_DStream_ptr, - seqState_DStream_start, - seqState_DStream_limitPtr - ); - ZSTD_updateFseStateWithDInfo( - ref seqState.stateOffb, - seqState_DStream_bitContainer, - ref seqState_DStream_bitsConsumed, - ofNext, - ofnbBits - ); - BIT_reloadDStream( - ref seqState_DStream_bitContainer, - ref seqState_DStream_bitsConsumed, - ref seqState_DStream_ptr, - seqState_DStream_start, - seqState_DStream_limitPtr - ); - } + ZSTD_updateFseStateWithDInfo( + ref seqState.stateOffb, + seqState_DStream_bitContainer, + ref seqState_DStream_bitsConsumed, + ofNext, + ofnbBits + ); + BIT_reloadDStream( + ref seqState_DStream_bitContainer, + ref seqState_DStream_bitsConsumed, + ref seqState_DStream_ptr, + seqState_DStream_start, + seqState_DStream_limitPtr + ); } + } - nuint oneSeqSize; + nuint oneSeqSize; + { + byte* oLitEnd = op + sequence_litLength; + oneSeqSize = sequence_litLength + sequence_matchLength; + /* risk : address space overflow (32-bits) */ + byte* oMatchEnd = op + oneSeqSize; + /* risk : address space underflow on oend=NULL */ + byte* oend_w = oend - 32; + byte* iLitEnd = litPtr + sequence_litLength; + byte* match = oLitEnd - sequence_offset; + assert(op != null); + assert(oend_w < oend); + if ( + iLitEnd > litEnd + || oMatchEnd > oend_w + || MEM_32bits && (nuint)(oend - op) < oneSeqSize + 32 + ) { - byte* oLitEnd = op + sequence_litLength; - oneSeqSize = sequence_litLength + sequence_matchLength; - /* risk : address space overflow (32-bits) */ - byte* oMatchEnd = op + oneSeqSize; - /* risk : address space underflow on oend=NULL */ - byte* oend_w = oend - 32; - byte* iLitEnd = litPtr + sequence_litLength; - byte* match = oLitEnd - sequence_offset; - assert(op != null); - assert(oend_w < oend); - if ( - iLitEnd > litEnd - || oMatchEnd > oend_w - || MEM_32bits && (nuint)(oend - op) < oneSeqSize + 32 - ) - { - oneSeqSize = ZSTD_execSequenceEnd( - op, - oend, - new seq_t - { - litLength = sequence_litLength, - matchLength = sequence_matchLength, - offset = sequence_offset, - }, - &litPtr, - litEnd, - prefixStart, - vBase, - dictEnd - ); - goto returnOneSeqSize; - } - - assert(op <= oLitEnd); - assert(oLitEnd < oMatchEnd); - assert(oMatchEnd <= oend); - assert(iLitEnd <= litEnd); - assert(oLitEnd <= oend_w); - assert(oMatchEnd <= oend_w); - assert(32 >= 16); - ZSTD_copy16(op, litPtr); - if (sequence_litLength > 16) - { - ZSTD_wildcopy( - op + 16, - litPtr + 16, - (nint)(sequence_litLength - 16), - ZSTD_overlap_e.ZSTD_no_overlap - ); - } - - byte* opInner = oLitEnd; - litPtr = iLitEnd; - if (sequence_offset > (nuint)(oLitEnd - prefixStart)) - { - if (sequence_offset > (nuint)(oLitEnd - vBase)) + oneSeqSize = ZSTD_execSequenceEnd( + op, + oend, + new seq_t { - oneSeqSize = unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); - goto returnOneSeqSize; - } + litLength = sequence_litLength, + matchLength = sequence_matchLength, + offset = sequence_offset, + }, + &litPtr, + litEnd, + prefixStart, + vBase, + dictEnd + ); + goto returnOneSeqSize; + } - match = dictEnd + (match - prefixStart); - if (match + sequence_matchLength <= dictEnd) - { - memmove(oLitEnd, match, sequence_matchLength); - goto returnOneSeqSize; - } + assert(op <= oLitEnd); + assert(oLitEnd < oMatchEnd); + assert(oMatchEnd <= oend); + assert(iLitEnd <= litEnd); + assert(oLitEnd <= oend_w); + assert(oMatchEnd <= oend_w); + assert(32 >= 16); + ZSTD_copy16(op, litPtr); + if (sequence_litLength > 16) + { + ZSTD_wildcopy( + op + 16, + litPtr + 16, + (nint)(sequence_litLength - 16), + ZSTD_overlap_e.ZSTD_no_overlap + ); + } - { - nuint length1 = (nuint)(dictEnd - match); - memmove(oLitEnd, match, length1); - opInner = oLitEnd + length1; - sequence_matchLength -= length1; - match = prefixStart; - } + byte* opInner = oLitEnd; + litPtr = iLitEnd; + if (sequence_offset > (nuint)(oLitEnd - prefixStart)) + { + if (sequence_offset > (nuint)(oLitEnd - vBase)) + { + oneSeqSize = unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); + goto returnOneSeqSize; } - assert(opInner <= oMatchEnd); - assert(oMatchEnd <= oend_w); - assert(match >= prefixStart); - assert(sequence_matchLength >= 1); - if (sequence_offset >= 16) + match = dictEnd + (match - prefixStart); + if (match + sequence_matchLength <= dictEnd) { - ZSTD_wildcopy( - opInner, - match, - (nint)sequence_matchLength, - ZSTD_overlap_e.ZSTD_no_overlap - ); + memmove(oLitEnd, match, sequence_matchLength); goto returnOneSeqSize; } - assert(sequence_offset < 16); - ZSTD_overlapCopy8(ref opInner, ref match, sequence_offset); - if (sequence_matchLength > 8) { - assert(opInner < oMatchEnd); - ZSTD_wildcopy( - opInner, - match, - (nint)sequence_matchLength - 8, - ZSTD_overlap_e.ZSTD_overlap_src_before_dst - ); + nuint length1 = (nuint)(dictEnd - match); + memmove(oLitEnd, match, length1); + opInner = oLitEnd + length1; + sequence_matchLength -= length1; + match = prefixStart; } + } - returnOneSeqSize: - ; + assert(opInner <= oMatchEnd); + assert(oMatchEnd <= oend_w); + assert(match >= prefixStart); + assert(sequence_matchLength >= 1); + if (sequence_offset >= 16) + { + ZSTD_wildcopy( + opInner, + match, + (nint)sequence_matchLength, + ZSTD_overlap_e.ZSTD_no_overlap + ); + goto returnOneSeqSize; } - if (ERR_isError(oneSeqSize)) - return oneSeqSize; - op += oneSeqSize; - } + assert(sequence_offset < 16); + ZSTD_overlapCopy8(ref opInner, ref match, sequence_offset); + if (sequence_matchLength > 8) + { + assert(opInner < oMatchEnd); + ZSTD_wildcopy( + opInner, + match, + (nint)sequence_matchLength - 8, + ZSTD_overlap_e.ZSTD_overlap_src_before_dst + ); + } - assert(nbSeq == 0); - if ( - BIT_endOfDStream( - seqState_DStream_bitsConsumed, - seqState_DStream_ptr, - seqState_DStream_start - ) == 0 - ) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + returnOneSeqSize: + ; } - { - uint i; - for (i = 0; i < 3; i++) - dctx->entropy.rep[i] = (uint) - System.Runtime.CompilerServices.Unsafe.Add( - ref seqState.prevOffset.e0, - (int)i - ); - } + if (ERR_isError(oneSeqSize)) + return oneSeqSize; + op += oneSeqSize; } + assert(nbSeq == 0); + if ( + BIT_endOfDStream( + seqState_DStream_bitsConsumed, + seqState_DStream_ptr, + seqState_DStream_start + ) == 0 + ) { - nuint lastLLSize = (nuint)(litEnd - litPtr); - if (lastLLSize > (nuint)(oend - op)) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - } - - if (op != null) - { - memcpy(op, litPtr, (uint)lastLLSize); - op += lastLLSize; - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); } - return (nuint)(op - ostart); + { + uint i; + for (i = 0; i < 3; i++) + dctx->entropy.rep[i] = (uint) + System.Runtime.CompilerServices.Unsafe.Add( + ref seqState.prevOffset.e0, + (int)i + ); + } } - private static nuint ZSTD_decompressSequences_default( - ZSTD_DCtx_s* dctx, - void* dst, - nuint maxDstSize, - void* seqStart, - nuint seqSize, - int nbSeq, - ZSTD_longOffset_e isLongOffset - ) { - return ZSTD_decompressSequences_body( - dctx, - dst, - maxDstSize, - seqStart, - seqSize, - nbSeq, - isLongOffset - ); - } + nuint lastLLSize = (nuint)(litEnd - litPtr); + if (lastLLSize > (nuint)(oend - op)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } - private static nuint ZSTD_decompressSequencesSplitLitBuffer_default( - ZSTD_DCtx_s* dctx, - void* dst, - nuint maxDstSize, - void* seqStart, - nuint seqSize, - int nbSeq, - ZSTD_longOffset_e isLongOffset - ) - { - return ZSTD_decompressSequences_bodySplitLitBuffer( - dctx, - dst, - maxDstSize, - seqStart, - seqSize, - nbSeq, - isLongOffset - ); + if (op != null) + { + memcpy(op, litPtr, (uint)lastLLSize); + op += lastLLSize; + } } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_prefetchMatch( - nuint prefetchPos, - seq_t sequence, - byte* prefixStart, - byte* dictEnd - ) + return (nuint)(op - ostart); + } + + private static nuint ZSTD_decompressSequences_default( + ZSTD_DCtx_s* dctx, + void* dst, + nuint maxDstSize, + void* seqStart, + nuint seqSize, + int nbSeq, + ZSTD_longOffset_e isLongOffset + ) + { + return ZSTD_decompressSequences_body( + dctx, + dst, + maxDstSize, + seqStart, + seqSize, + nbSeq, + isLongOffset + ); + } + + private static nuint ZSTD_decompressSequencesSplitLitBuffer_default( + ZSTD_DCtx_s* dctx, + void* dst, + nuint maxDstSize, + void* seqStart, + nuint seqSize, + int nbSeq, + ZSTD_longOffset_e isLongOffset + ) + { + return ZSTD_decompressSequences_bodySplitLitBuffer( + dctx, + dst, + maxDstSize, + seqStart, + seqSize, + nbSeq, + isLongOffset + ); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_prefetchMatch( + nuint prefetchPos, + seq_t sequence, + byte* prefixStart, + byte* dictEnd + ) + { + prefetchPos += sequence.litLength; { - prefetchPos += sequence.litLength; - { - byte* matchBase = sequence.offset > prefetchPos ? dictEnd : prefixStart; - /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted. - * No consequence though : memory address is only used for prefetching, not for dereferencing */ - byte* match = ZSTD_wrappedPtrSub( - ZSTD_wrappedPtrAdd(matchBase, (nint)prefetchPos), - (nint)sequence.offset - ); + byte* matchBase = sequence.offset > prefetchPos ? dictEnd : prefixStart; + /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted. + * No consequence though : memory address is only used for prefetching, not for dereferencing */ + byte* match = ZSTD_wrappedPtrSub( + ZSTD_wrappedPtrAdd(matchBase, (nint)prefetchPos), + (nint)sequence.offset + ); #if NETCOREAPP3_0_OR_GREATER if (System.Runtime.Intrinsics.X86.Sse.IsSupported) { @@ -2637,202 +2637,232 @@ private static nuint ZSTD_prefetchMatch( System.Runtime.Intrinsics.X86.Sse.Prefetch0(match + 64); } #endif - } - - return prefetchPos + sequence.matchLength; } - /* This decoding function employs prefetching - * to reduce latency impact of cache misses. - * It's generally employed when block contains a significant portion of long-distance matches - * or when coupled with a "cold" dictionary */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_decompressSequencesLong_body( - ZSTD_DCtx_s* dctx, - void* dst, - nuint maxDstSize, - void* seqStart, - nuint seqSize, - int nbSeq, - ZSTD_longOffset_e isLongOffset - ) + return prefetchPos + sequence.matchLength; + } + + /* This decoding function employs prefetching + * to reduce latency impact of cache misses. + * It's generally employed when block contains a significant portion of long-distance matches + * or when coupled with a "cold" dictionary */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_decompressSequencesLong_body( + ZSTD_DCtx_s* dctx, + void* dst, + nuint maxDstSize, + void* seqStart, + nuint seqSize, + int nbSeq, + ZSTD_longOffset_e isLongOffset + ) + { + byte* ip = (byte*)seqStart; + byte* iend = ip + seqSize; + byte* ostart = (byte*)dst; + byte* oend = + dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_in_dst + ? dctx->litBuffer + : ZSTD_maybeNullPtrAdd(ostart, (nint)maxDstSize); + byte* op = ostart; + byte* litPtr = dctx->litPtr; + byte* litBufferEnd = dctx->litBufferEnd; + byte* prefixStart = (byte*)dctx->prefixStart; + byte* dictStart = (byte*)dctx->virtualStart; + byte* dictEnd = (byte*)dctx->dictEnd; + if (nbSeq != 0) { - byte* ip = (byte*)seqStart; - byte* iend = ip + seqSize; - byte* ostart = (byte*)dst; - byte* oend = - dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_in_dst - ? dctx->litBuffer - : ZSTD_maybeNullPtrAdd(ostart, (nint)maxDstSize); - byte* op = ostart; - byte* litPtr = dctx->litPtr; - byte* litBufferEnd = dctx->litBufferEnd; - byte* prefixStart = (byte*)dctx->prefixStart; - byte* dictStart = (byte*)dctx->virtualStart; - byte* dictEnd = (byte*)dctx->dictEnd; - if (nbSeq != 0) + seq_t* sequences = stackalloc seq_t[8]; + int seqAdvance = nbSeq < 8 ? nbSeq : 8; + seqState_t seqState; + int seqNb; + /* track position relative to prefixStart */ + nuint prefetchPos = (nuint)(op - prefixStart); + dctx->fseEntropy = 1; { - seq_t* sequences = stackalloc seq_t[8]; - int seqAdvance = nbSeq < 8 ? nbSeq : 8; - seqState_t seqState; - int seqNb; - /* track position relative to prefixStart */ - nuint prefetchPos = (nuint)(op - prefixStart); - dctx->fseEntropy = 1; - { - int i; - for (i = 0; i < 3; i++) - (&seqState.prevOffset.e0)[i] = dctx->entropy.rep[i]; - } + int i; + for (i = 0; i < 3; i++) + (&seqState.prevOffset.e0)[i] = dctx->entropy.rep[i]; + } - assert(dst != null); - assert(iend >= ip); - if (ERR_isError(BIT_initDStream(&seqState.DStream, ip, (nuint)(iend - ip)))) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - } + assert(dst != null); + assert(iend >= ip); + if (ERR_isError(BIT_initDStream(&seqState.DStream, ip, (nuint)(iend - ip)))) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } - ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); - ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); - ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); - for (seqNb = 0; seqNb < seqAdvance; seqNb++) - { - seq_t sequence = ZSTD_decodeSequence( - &seqState, - isLongOffset, - seqNb == nbSeq - 1 ? 1 : 0 - ); - prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd); - sequences[seqNb] = sequence; - } + ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); + ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); + ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); + for (seqNb = 0; seqNb < seqAdvance; seqNb++) + { + seq_t sequence = ZSTD_decodeSequence( + &seqState, + isLongOffset, + seqNb == nbSeq - 1 ? 1 : 0 + ); + prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd); + sequences[seqNb] = sequence; + } - for (; seqNb < nbSeq; seqNb++) + for (; seqNb < nbSeq; seqNb++) + { + seq_t sequence = ZSTD_decodeSequence( + &seqState, + isLongOffset, + seqNb == nbSeq - 1 ? 1 : 0 + ); + if ( + dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split + && litPtr + sequences[seqNb - 8 & 8 - 1].litLength > dctx->litBufferEnd + ) { - seq_t sequence = ZSTD_decodeSequence( - &seqState, - isLongOffset, - seqNb == nbSeq - 1 ? 1 : 0 - ); - if ( - dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split - && litPtr + sequences[seqNb - 8 & 8 - 1].litLength > dctx->litBufferEnd - ) + /* lit buffer is reaching split point, empty out the first buffer and transition to litExtraBuffer */ + nuint leftoverLit = (nuint)(dctx->litBufferEnd - litPtr); + if (leftoverLit != 0) { - /* lit buffer is reaching split point, empty out the first buffer and transition to litExtraBuffer */ - nuint leftoverLit = (nuint)(dctx->litBufferEnd - litPtr); - if (leftoverLit != 0) + if (leftoverLit > (nuint)(oend - op)) { - if (leftoverLit > (nuint)(oend - op)) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) - ); - } - - ZSTD_safecopyDstBeforeSrc(op, litPtr, (nint)leftoverLit); - sequences[seqNb - 8 & 8 - 1].litLength -= leftoverLit; - op += leftoverLit; + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) + ); } - litPtr = dctx->litExtraBuffer; - litBufferEnd = dctx->litExtraBuffer + (1 << 16); - dctx->litBufferLocation = ZSTD_litLocation_e.ZSTD_not_in_dst; - { - nuint oneSeqSize = ZSTD_execSequence( + ZSTD_safecopyDstBeforeSrc(op, litPtr, (nint)leftoverLit); + sequences[seqNb - 8 & 8 - 1].litLength -= leftoverLit; + op += leftoverLit; + } + + litPtr = dctx->litExtraBuffer; + litBufferEnd = dctx->litExtraBuffer + (1 << 16); + dctx->litBufferLocation = ZSTD_litLocation_e.ZSTD_not_in_dst; + { + nuint oneSeqSize = ZSTD_execSequence( + op, + oend, + sequences[seqNb - 8 & 8 - 1], + &litPtr, + litBufferEnd, + prefixStart, + dictStart, + dictEnd + ); + if (ERR_isError(oneSeqSize)) + return oneSeqSize; + prefetchPos = ZSTD_prefetchMatch( + prefetchPos, + sequence, + prefixStart, + dictEnd + ); + sequences[seqNb & 8 - 1] = sequence; + op += oneSeqSize; + } + } + else + { + /* lit buffer is either wholly contained in first or second split, or not split at all*/ + nuint oneSeqSize = + dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split + ? ZSTD_execSequenceSplitLitBuffer( op, oend, + litPtr + sequences[seqNb - 8 & 8 - 1].litLength - 32, sequences[seqNb - 8 & 8 - 1], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd - ); - if (ERR_isError(oneSeqSize)) - return oneSeqSize; - prefetchPos = ZSTD_prefetchMatch( - prefetchPos, - sequence, + ) + : ZSTD_execSequence( + op, + oend, + sequences[seqNb - 8 & 8 - 1], + &litPtr, + litBufferEnd, prefixStart, + dictStart, dictEnd ); - sequences[seqNb & 8 - 1] = sequence; - op += oneSeqSize; + if (ERR_isError(oneSeqSize)) + return oneSeqSize; + prefetchPos = ZSTD_prefetchMatch( + prefetchPos, + sequence, + prefixStart, + dictEnd + ); + sequences[seqNb & 8 - 1] = sequence; + op += oneSeqSize; + } + } + + if (BIT_endOfDStream(&seqState.DStream) == 0) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); + } + + seqNb -= seqAdvance; + for (; seqNb < nbSeq; seqNb++) + { + seq_t* sequence = &sequences[seqNb & 8 - 1]; + if ( + dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split + && litPtr + sequence->litLength > dctx->litBufferEnd + ) + { + nuint leftoverLit = (nuint)(dctx->litBufferEnd - litPtr); + if (leftoverLit != 0) + { + if (leftoverLit > (nuint)(oend - op)) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) + ); } + + ZSTD_safecopyDstBeforeSrc(op, litPtr, (nint)leftoverLit); + sequence->litLength -= leftoverLit; + op += leftoverLit; } - else + + litPtr = dctx->litExtraBuffer; + litBufferEnd = dctx->litExtraBuffer + (1 << 16); + dctx->litBufferLocation = ZSTD_litLocation_e.ZSTD_not_in_dst; { - /* lit buffer is either wholly contained in first or second split, or not split at all*/ - nuint oneSeqSize = - dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split - ? ZSTD_execSequenceSplitLitBuffer( - op, - oend, - litPtr + sequences[seqNb - 8 & 8 - 1].litLength - 32, - sequences[seqNb - 8 & 8 - 1], - &litPtr, - litBufferEnd, - prefixStart, - dictStart, - dictEnd - ) - : ZSTD_execSequence( - op, - oend, - sequences[seqNb - 8 & 8 - 1], - &litPtr, - litBufferEnd, - prefixStart, - dictStart, - dictEnd - ); - if (ERR_isError(oneSeqSize)) - return oneSeqSize; - prefetchPos = ZSTD_prefetchMatch( - prefetchPos, - sequence, + nuint oneSeqSize = ZSTD_execSequence( + op, + oend, + *sequence, + &litPtr, + litBufferEnd, prefixStart, + dictStart, dictEnd ); - sequences[seqNb & 8 - 1] = sequence; + if (ERR_isError(oneSeqSize)) + return oneSeqSize; op += oneSeqSize; } } - - if (BIT_endOfDStream(&seqState.DStream) == 0) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); - } - - seqNb -= seqAdvance; - for (; seqNb < nbSeq; seqNb++) + else { - seq_t* sequence = &sequences[seqNb & 8 - 1]; - if ( + nuint oneSeqSize = dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split - && litPtr + sequence->litLength > dctx->litBufferEnd - ) - { - nuint leftoverLit = (nuint)(dctx->litBufferEnd - litPtr); - if (leftoverLit != 0) - { - if (leftoverLit > (nuint)(oend - op)) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) - ); - } - - ZSTD_safecopyDstBeforeSrc(op, litPtr, (nint)leftoverLit); - sequence->litLength -= leftoverLit; - op += leftoverLit; - } - - litPtr = dctx->litExtraBuffer; - litBufferEnd = dctx->litExtraBuffer + (1 << 16); - dctx->litBufferLocation = ZSTD_litLocation_e.ZSTD_not_in_dst; - { - nuint oneSeqSize = ZSTD_execSequence( + ? ZSTD_execSequenceSplitLitBuffer( + op, + oend, + litPtr + sequence->litLength - 32, + *sequence, + &litPtr, + litBufferEnd, + prefixStart, + dictStart, + dictEnd + ) + : ZSTD_execSequence( op, oend, *sequence, @@ -2842,512 +2872,481 @@ ZSTD_longOffset_e isLongOffset dictStart, dictEnd ); - if (ERR_isError(oneSeqSize)) - return oneSeqSize; - op += oneSeqSize; - } - } - else - { - nuint oneSeqSize = - dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split - ? ZSTD_execSequenceSplitLitBuffer( - op, - oend, - litPtr + sequence->litLength - 32, - *sequence, - &litPtr, - litBufferEnd, - prefixStart, - dictStart, - dictEnd - ) - : ZSTD_execSequence( - op, - oend, - *sequence, - &litPtr, - litBufferEnd, - prefixStart, - dictStart, - dictEnd - ); - if (ERR_isError(oneSeqSize)) - return oneSeqSize; - op += oneSeqSize; - } - } - - { - uint i; - for (i = 0; i < 3; i++) - dctx->entropy.rep[i] = (uint)(&seqState.prevOffset.e0)[i]; + if (ERR_isError(oneSeqSize)) + return oneSeqSize; + op += oneSeqSize; } } - if (dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split) { - nuint lastLLSize = (nuint)(litBufferEnd - litPtr); - if (lastLLSize > (nuint)(oend - op)) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - } - - if (op != null) - { - memmove(op, litPtr, lastLLSize); - op += lastLLSize; - } - - litPtr = dctx->litExtraBuffer; - litBufferEnd = dctx->litExtraBuffer + (1 << 16); + uint i; + for (i = 0; i < 3; i++) + dctx->entropy.rep[i] = (uint)(&seqState.prevOffset.e0)[i]; } + } + if (dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split) + { + nuint lastLLSize = (nuint)(litBufferEnd - litPtr); + if (lastLLSize > (nuint)(oend - op)) { - nuint lastLLSize = (nuint)(litBufferEnd - litPtr); - if (lastLLSize > (nuint)(oend - op)) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - } + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } - if (op != null) - { - memmove(op, litPtr, lastLLSize); - op += lastLLSize; - } + if (op != null) + { + memmove(op, litPtr, lastLLSize); + op += lastLLSize; } - return (nuint)(op - ostart); + litPtr = dctx->litExtraBuffer; + litBufferEnd = dctx->litExtraBuffer + (1 << 16); } - private static nuint ZSTD_decompressSequencesLong_default( - ZSTD_DCtx_s* dctx, - void* dst, - nuint maxDstSize, - void* seqStart, - nuint seqSize, - int nbSeq, - ZSTD_longOffset_e isLongOffset - ) { - return ZSTD_decompressSequencesLong_body( - dctx, - dst, - maxDstSize, - seqStart, - seqSize, - nbSeq, - isLongOffset - ); + nuint lastLLSize = (nuint)(litBufferEnd - litPtr); + if (lastLLSize > (nuint)(oend - op)) + { + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); + } + + if (op != null) + { + memmove(op, litPtr, lastLLSize); + op += lastLLSize; + } } - private static nuint ZSTD_decompressSequences( - ZSTD_DCtx_s* dctx, - void* dst, - nuint maxDstSize, - void* seqStart, - nuint seqSize, - int nbSeq, - ZSTD_longOffset_e isLongOffset - ) + return (nuint)(op - ostart); + } + + private static nuint ZSTD_decompressSequencesLong_default( + ZSTD_DCtx_s* dctx, + void* dst, + nuint maxDstSize, + void* seqStart, + nuint seqSize, + int nbSeq, + ZSTD_longOffset_e isLongOffset + ) + { + return ZSTD_decompressSequencesLong_body( + dctx, + dst, + maxDstSize, + seqStart, + seqSize, + nbSeq, + isLongOffset + ); + } + + private static nuint ZSTD_decompressSequences( + ZSTD_DCtx_s* dctx, + void* dst, + nuint maxDstSize, + void* seqStart, + nuint seqSize, + int nbSeq, + ZSTD_longOffset_e isLongOffset + ) + { + return ZSTD_decompressSequences_default( + dctx, + dst, + maxDstSize, + seqStart, + seqSize, + nbSeq, + isLongOffset + ); + } + + private static nuint ZSTD_decompressSequencesSplitLitBuffer( + ZSTD_DCtx_s* dctx, + void* dst, + nuint maxDstSize, + void* seqStart, + nuint seqSize, + int nbSeq, + ZSTD_longOffset_e isLongOffset + ) + { + return ZSTD_decompressSequencesSplitLitBuffer_default( + dctx, + dst, + maxDstSize, + seqStart, + seqSize, + nbSeq, + isLongOffset + ); + } + + /* ZSTD_decompressSequencesLong() : + * decompression function triggered when a minimum share of offsets is considered "long", + * aka out of cache. + * note : "long" definition seems overloaded here, sometimes meaning "wider than bitstream register", and sometimes meaning "farther than memory cache distance". + * This function will try to mitigate main memory latency through the use of prefetching */ + private static nuint ZSTD_decompressSequencesLong( + ZSTD_DCtx_s* dctx, + void* dst, + nuint maxDstSize, + void* seqStart, + nuint seqSize, + int nbSeq, + ZSTD_longOffset_e isLongOffset + ) + { + return ZSTD_decompressSequencesLong_default( + dctx, + dst, + maxDstSize, + seqStart, + seqSize, + nbSeq, + isLongOffset + ); + } + + /** + * @returns The total size of the history referenceable by zstd, including + * both the prefix and the extDict. At @p op any offset larger than this + * is invalid. + */ + private static nuint ZSTD_totalHistorySize(byte* op, byte* virtualStart) + { + return (nuint)(op - virtualStart); + } + + /* ZSTD_getOffsetInfo() : + * condition : offTable must be valid + * @return : "share" of long offsets (arbitrarily defined as > (1<<23)) + * compared to maximum possible of (1< table[u].nbAdditionalBits + ? info.maxNbAdditionalBits + : table[u].nbAdditionalBits; + if (table[u].nbAdditionalBits > 22) + info.longOffsetShare += 1; + } + + assert(tableLog <= 8); + info.longOffsetShare <<= (int)(8 - tableLog); } - private static nuint ZSTD_decompressSequencesSplitLitBuffer( - ZSTD_DCtx_s* dctx, - void* dst, - nuint maxDstSize, - void* seqStart, - nuint seqSize, - int nbSeq, - ZSTD_longOffset_e isLongOffset - ) + return info; + } + + /** + * @returns The maximum offset we can decode in one read of our bitstream, without + * reloading more bits in the middle of the offset bits read. Any offsets larger + * than this must use the long offset decoder. + */ + private static nuint ZSTD_maxShortOffset() + { + if (MEM_64bits) { - return ZSTD_decompressSequencesSplitLitBuffer_default( - dctx, - dst, - maxDstSize, - seqStart, - seqSize, - nbSeq, - isLongOffset - ); + return unchecked((nuint)(-1)); } - - /* ZSTD_decompressSequencesLong() : - * decompression function triggered when a minimum share of offsets is considered "long", - * aka out of cache. - * note : "long" definition seems overloaded here, sometimes meaning "wider than bitstream register", and sometimes meaning "farther than memory cache distance". - * This function will try to mitigate main memory latency through the use of prefetching */ - private static nuint ZSTD_decompressSequencesLong( - ZSTD_DCtx_s* dctx, - void* dst, - nuint maxDstSize, - void* seqStart, - nuint seqSize, - int nbSeq, - ZSTD_longOffset_e isLongOffset - ) + else { - return ZSTD_decompressSequencesLong_default( - dctx, - dst, - maxDstSize, - seqStart, - seqSize, - nbSeq, - isLongOffset - ); + /* The maximum offBase is (1 << (STREAM_ACCUMULATOR_MIN + 1)) - 1. + * This offBase would require STREAM_ACCUMULATOR_MIN extra bits. + * Then we have to subtract ZSTD_REP_NUM to get the maximum possible offset. + */ + nuint maxOffbase = ((nuint)1 << (int)((uint)(MEM_32bits ? 25 : 57) + 1)) - 1; + nuint maxOffset = maxOffbase - 3; + assert(ZSTD_highbit32((uint)maxOffbase) == (uint)(MEM_32bits ? 25 : 57)); + return maxOffset; } + } - /** - * @returns The total size of the history referenceable by zstd, including - * both the prefix and the extDict. At @p op any offset larger than this - * is invalid. - */ - private static nuint ZSTD_totalHistorySize(byte* op, byte* virtualStart) + /* ZSTD_decompressBlock_internal() : + * decompress block, starting at `src`, + * into destination buffer `dst`. + * @return : decompressed block size, + * or an error code (which can be tested using ZSTD_isError()) + */ + private static nuint ZSTD_decompressBlock_internal( + ZSTD_DCtx_s* dctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize, + streaming_operation streaming + ) + { + byte* ip = (byte*)src; + if (srcSize > ZSTD_blockSizeMax(dctx)) { - return (nuint)(op - virtualStart); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); } - /* ZSTD_getOffsetInfo() : - * condition : offTable must be valid - * @return : "share" of long offsets (arbitrarily defined as > (1<<23)) - * compared to maximum possible of (1< table[u].nbAdditionalBits - ? info.maxNbAdditionalBits - : table[u].nbAdditionalBits; - if (table[u].nbAdditionalBits > 22) - info.longOffsetShare += 1; - } - - assert(tableLog <= 8); - info.longOffsetShare <<= (int)(8 - tableLog); - } - - return info; + nuint litCSize = ZSTD_decodeLiteralsBlock( + dctx, + src, + srcSize, + dst, + dstCapacity, + streaming + ); + if (ERR_isError(litCSize)) + return litCSize; + ip += litCSize; + srcSize -= litCSize; } - /** - * @returns The maximum offset we can decode in one read of our bitstream, without - * reloading more bits in the middle of the offset bits read. Any offsets larger - * than this must use the long offset decoder. - */ - private static nuint ZSTD_maxShortOffset() - { - if (MEM_64bits) - { - return unchecked((nuint)(-1)); - } - else - { - /* The maximum offBase is (1 << (STREAM_ACCUMULATOR_MIN + 1)) - 1. - * This offBase would require STREAM_ACCUMULATOR_MIN extra bits. - * Then we have to subtract ZSTD_REP_NUM to get the maximum possible offset. - */ - nuint maxOffbase = ((nuint)1 << (int)((uint)(MEM_32bits ? 25 : 57) + 1)) - 1; - nuint maxOffset = maxOffbase - 3; - assert(ZSTD_highbit32((uint)maxOffbase) == (uint)(MEM_32bits ? 25 : 57)); - return maxOffset; - } - } - - /* ZSTD_decompressBlock_internal() : - * decompress block, starting at `src`, - * into destination buffer `dst`. - * @return : decompressed block size, - * or an error code (which can be tested using ZSTD_isError()) - */ - private static nuint ZSTD_decompressBlock_internal( - ZSTD_DCtx_s* dctx, - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize, - streaming_operation streaming - ) { - byte* ip = (byte*)src; - if (srcSize > ZSTD_blockSizeMax(dctx)) + /* Compute the maximum block size, which must also work when !frame and fParams are unset. + * Additionally, take the min with dstCapacity to ensure that the totalHistorySize fits in a size_t. + */ + nuint blockSizeMax = + dstCapacity < ZSTD_blockSizeMax(dctx) ? dstCapacity : ZSTD_blockSizeMax(dctx); + nuint totalHistorySize = ZSTD_totalHistorySize( + ZSTD_maybeNullPtrAdd((byte*)dst, (nint)blockSizeMax), + (byte*)dctx->virtualStart + ); + /* isLongOffset must be true if there are long offsets. + * Offsets are long if they are larger than ZSTD_maxShortOffset(). + * We don't expect that to be the case in 64-bit mode. + * + * We check here to see if our history is large enough to allow long offsets. + * If it isn't, then we can't possible have (valid) long offsets. If the offset + * is invalid, then it is okay to read it incorrectly. + * + * If isLongOffsets is true, then we will later check our decoding table to see + * if it is even possible to generate long offsets. + */ + ZSTD_longOffset_e isLongOffset = (ZSTD_longOffset_e)( + MEM_32bits && totalHistorySize > ZSTD_maxShortOffset() ? 1 : 0 + ); + int usePrefetchDecoder = dctx->ddictIsCold; + int nbSeq; + nuint seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize); + if (ERR_isError(seqHSize)) + return seqHSize; + ip += seqHSize; + srcSize -= seqHSize; + if ((dst == null || dstCapacity == 0) && nbSeq > 0) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); } + if ( + MEM_64bits + && sizeof(nuint) == sizeof(void*) + && unchecked((nuint)(-1)) - (nuint)dst < 1 << 20 + ) { - nuint litCSize = ZSTD_decodeLiteralsBlock( - dctx, - src, - srcSize, - dst, - dstCapacity, - streaming - ); - if (ERR_isError(litCSize)) - return litCSize; - ip += litCSize; - srcSize -= litCSize; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); } + if ( + isLongOffset != default + || usePrefetchDecoder == 0 && totalHistorySize > 1U << 24 && nbSeq > 8 + ) { - /* Compute the maximum block size, which must also work when !frame and fParams are unset. - * Additionally, take the min with dstCapacity to ensure that the totalHistorySize fits in a size_t. - */ - nuint blockSizeMax = - dstCapacity < ZSTD_blockSizeMax(dctx) ? dstCapacity : ZSTD_blockSizeMax(dctx); - nuint totalHistorySize = ZSTD_totalHistorySize( - ZSTD_maybeNullPtrAdd((byte*)dst, (nint)blockSizeMax), - (byte*)dctx->virtualStart - ); - /* isLongOffset must be true if there are long offsets. - * Offsets are long if they are larger than ZSTD_maxShortOffset(). - * We don't expect that to be the case in 64-bit mode. - * - * We check here to see if our history is large enough to allow long offsets. - * If it isn't, then we can't possible have (valid) long offsets. If the offset - * is invalid, then it is okay to read it incorrectly. - * - * If isLongOffsets is true, then we will later check our decoding table to see - * if it is even possible to generate long offsets. - */ - ZSTD_longOffset_e isLongOffset = (ZSTD_longOffset_e)( - MEM_32bits && totalHistorySize > ZSTD_maxShortOffset() ? 1 : 0 - ); - int usePrefetchDecoder = dctx->ddictIsCold; - int nbSeq; - nuint seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize); - if (ERR_isError(seqHSize)) - return seqHSize; - ip += seqHSize; - srcSize -= seqHSize; - if ((dst == null || dstCapacity == 0) && nbSeq > 0) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - } - - if ( - MEM_64bits - && sizeof(nuint) == sizeof(void*) - && unchecked((nuint)(-1)) - (nuint)dst < 1 << 20 - ) - { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); - } - + ZSTD_OffsetInfo info = ZSTD_getOffsetInfo(dctx->OFTptr, nbSeq); if ( isLongOffset != default - || usePrefetchDecoder == 0 && totalHistorySize > 1U << 24 && nbSeq > 8 + && info.maxNbAdditionalBits <= (uint)(MEM_32bits ? 25 : 57) ) { - ZSTD_OffsetInfo info = ZSTD_getOffsetInfo(dctx->OFTptr, nbSeq); - if ( - isLongOffset != default - && info.maxNbAdditionalBits <= (uint)(MEM_32bits ? 25 : 57) - ) - { - isLongOffset = ZSTD_longOffset_e.ZSTD_lo_isRegularOffset; - } - - if (usePrefetchDecoder == 0) - { - /* heuristic values, correspond to 2.73% and 7.81% */ - uint minShare = (uint)(MEM_64bits ? 7 : 20); - usePrefetchDecoder = info.longOffsetShare >= minShare ? 1 : 0; - } + isLongOffset = ZSTD_longOffset_e.ZSTD_lo_isRegularOffset; } - dctx->ddictIsCold = 0; - if (usePrefetchDecoder != 0) + if (usePrefetchDecoder == 0) { - return ZSTD_decompressSequencesLong( - dctx, - dst, - dstCapacity, - ip, - srcSize, - nbSeq, - isLongOffset - ); + /* heuristic values, correspond to 2.73% and 7.81% */ + uint minShare = (uint)(MEM_64bits ? 7 : 20); + usePrefetchDecoder = info.longOffsetShare >= minShare ? 1 : 0; } + } - if (dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split) - return ZSTD_decompressSequencesSplitLitBuffer( - dctx, - dst, - dstCapacity, - ip, - srcSize, - nbSeq, - isLongOffset - ); - else - return ZSTD_decompressSequences( - dctx, - dst, - dstCapacity, - ip, - srcSize, - nbSeq, - isLongOffset - ); + dctx->ddictIsCold = 0; + if (usePrefetchDecoder != 0) + { + return ZSTD_decompressSequencesLong( + dctx, + dst, + dstCapacity, + ip, + srcSize, + nbSeq, + isLongOffset + ); } + + if (dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split) + return ZSTD_decompressSequencesSplitLitBuffer( + dctx, + dst, + dstCapacity, + ip, + srcSize, + nbSeq, + isLongOffset + ); + else + return ZSTD_decompressSequences( + dctx, + dst, + dstCapacity, + ip, + srcSize, + nbSeq, + isLongOffset + ); } + } - /*! ZSTD_checkContinuity() : - * check if next `dst` follows previous position, where decompression ended. - * If yes, do nothing (continue on current segment). - * If not, classify previous segment as "external dictionary", and start a new segment. - * This function cannot fail. */ - private static void ZSTD_checkContinuity(ZSTD_DCtx_s* dctx, void* dst, nuint dstSize) + /*! ZSTD_checkContinuity() : + * check if next `dst` follows previous position, where decompression ended. + * If yes, do nothing (continue on current segment). + * If not, classify previous segment as "external dictionary", and start a new segment. + * This function cannot fail. */ + private static void ZSTD_checkContinuity(ZSTD_DCtx_s* dctx, void* dst, nuint dstSize) + { + if (dst != dctx->previousDstEnd && dstSize > 0) { - if (dst != dctx->previousDstEnd && dstSize > 0) - { - dctx->dictEnd = dctx->previousDstEnd; - dctx->virtualStart = - (sbyte*)dst - ((sbyte*)dctx->previousDstEnd - (sbyte*)dctx->prefixStart); - dctx->prefixStart = dst; - dctx->previousDstEnd = dst; - } + dctx->dictEnd = dctx->previousDstEnd; + dctx->virtualStart = + (sbyte*)dst - ((sbyte*)dctx->previousDstEnd - (sbyte*)dctx->prefixStart); + dctx->prefixStart = dst; + dctx->previousDstEnd = dst; } + } - /* Internal definition of ZSTD_decompressBlock() to avoid deprecation warnings. */ - private static nuint ZSTD_decompressBlock_deprecated( - ZSTD_DCtx_s* dctx, - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize - ) + /* Internal definition of ZSTD_decompressBlock() to avoid deprecation warnings. */ + private static nuint ZSTD_decompressBlock_deprecated( + ZSTD_DCtx_s* dctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize + ) + { + nuint dSize; + dctx->isFrameDecompression = 0; + ZSTD_checkContinuity(dctx, dst, dstCapacity); + dSize = ZSTD_decompressBlock_internal( + dctx, + dst, + dstCapacity, + src, + srcSize, + streaming_operation.not_streaming + ); { - nuint dSize; - dctx->isFrameDecompression = 0; - ZSTD_checkContinuity(dctx, dst, dstCapacity); - dSize = ZSTD_decompressBlock_internal( - dctx, - dst, - dstCapacity, - src, - srcSize, - streaming_operation.not_streaming - ); + nuint err_code = dSize; + if (ERR_isError(err_code)) { - nuint err_code = dSize; - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } - - dctx->previousDstEnd = (sbyte*)dst + dSize; - return dSize; } - /* NOTE: Must just wrap ZSTD_decompressBlock_deprecated() */ - public static nuint ZSTD_decompressBlock( - ZSTD_DCtx_s* dctx, - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize - ) - { - return ZSTD_decompressBlock_deprecated(dctx, dst, dstCapacity, src, srcSize); - } + dctx->previousDstEnd = (sbyte*)dst + dSize; + return dSize; + } - private static void ZSTD_initFseState( - ref ZSTD_fseState DStatePtr, - ref BIT_DStream_t bitD, - ZSTD_seqSymbol* dt - ) - { - void* ptr = dt; - ZSTD_seqSymbol_header* DTableH = (ZSTD_seqSymbol_header*)ptr; - DStatePtr.state = BIT_readBits( - bitD.bitContainer, - ref bitD.bitsConsumed, - DTableH->tableLog - ); - BIT_reloadDStream( - ref bitD.bitContainer, - ref bitD.bitsConsumed, - ref bitD.ptr, - bitD.start, - bitD.limitPtr - ); - DStatePtr.table = dt + 1; - } + /* NOTE: Must just wrap ZSTD_decompressBlock_deprecated() */ + public static nuint ZSTD_decompressBlock( + ZSTD_DCtx_s* dctx, + void* dst, + nuint dstCapacity, + void* src, + nuint srcSize + ) + { + return ZSTD_decompressBlock_deprecated(dctx, dst, dstCapacity, src, srcSize); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_updateFseStateWithDInfo( - ref ZSTD_fseState DStatePtr, - nuint bitD_bitContainer, - ref uint bitD_bitsConsumed, - ushort nextState, - uint nbBits - ) + private static void ZSTD_initFseState( + ref ZSTD_fseState DStatePtr, + ref BIT_DStream_t bitD, + ZSTD_seqSymbol* dt + ) + { + void* ptr = dt; + ZSTD_seqSymbol_header* DTableH = (ZSTD_seqSymbol_header*)ptr; + DStatePtr.state = BIT_readBits( + bitD.bitContainer, + ref bitD.bitsConsumed, + DTableH->tableLog + ); + BIT_reloadDStream( + ref bitD.bitContainer, + ref bitD.bitsConsumed, + ref bitD.ptr, + bitD.start, + bitD.limitPtr + ); + DStatePtr.table = dt + 1; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_updateFseStateWithDInfo( + ref ZSTD_fseState DStatePtr, + nuint bitD_bitContainer, + ref uint bitD_bitsConsumed, + ushort nextState, + uint nbBits + ) + { + nuint lowBits = BIT_readBits(bitD_bitContainer, ref bitD_bitsConsumed, nbBits); + DStatePtr.state = nextState + lowBits; + } + + /*! ZSTD_overlapCopy8() : + * Copies 8 bytes from ip to op and updates op and ip where ip <= op. + * If the offset is < 8 then the offset is spread to at least 8 bytes. + * + * Precondition: *ip <= *op + * Postcondition: *op - *op >= 8 + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_overlapCopy8(ref byte* op, ref byte* ip, nuint offset) + { + assert(ip <= op); + if (offset < 8) { - nuint lowBits = BIT_readBits(bitD_bitContainer, ref bitD_bitsConsumed, nbBits); - DStatePtr.state = nextState + lowBits; + int sub2 = dec64table[offset]; + op[0] = ip[0]; + op[1] = ip[1]; + op[2] = ip[2]; + op[3] = ip[3]; + ip += dec32table[offset]; + ZSTD_copy4(op + 4, ip); + ip -= sub2; } - - /*! ZSTD_overlapCopy8() : - * Copies 8 bytes from ip to op and updates op and ip where ip <= op. - * If the offset is < 8 then the offset is spread to at least 8 bytes. - * - * Precondition: *ip <= *op - * Postcondition: *op - *op >= 8 - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_overlapCopy8(ref byte* op, ref byte* ip, nuint offset) + else { - assert(ip <= op); - if (offset < 8) - { - int sub2 = dec64table[offset]; - op[0] = ip[0]; - op[1] = ip[1]; - op[2] = ip[2]; - op[3] = ip[3]; - ip += dec32table[offset]; - ZSTD_copy4(op + 4, ip); - ip -= sub2; - } - else - { - ZSTD_copy8(op, ip); - } - - ip += 8; - op += 8; - assert(op - ip >= 8); + ZSTD_copy8(op, ip); } + + ip += 8; + op += 8; + assert(op - ip >= 8); } -} +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompressInternal.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompressInternal.cs index c0fb99b45..96b38245e 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompressInternal.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompressInternal.cs @@ -1,12 +1,12 @@ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; -using static ZstdSharp.UnsafeHelper; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods - { #if NET7_0_OR_GREATER private static ReadOnlySpan Span_LL_base => new uint[36] @@ -55,47 +55,47 @@ ref MemoryMarshal.GetReference(Span_LL_base) ); #else - private static readonly uint* LL_base = GetArrayPointer( - new uint[36] - { - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - 18, - 20, - 22, - 24, - 28, - 32, - 40, - 48, - 64, - 0x80, - 0x100, - 0x200, - 0x400, - 0x800, - 0x1000, - 0x2000, - 0x4000, - 0x8000, - 0x10000, - } - ); + private static readonly uint* LL_base = GetArrayPointer( + new uint[36] + { + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 18, + 20, + 22, + 24, + 28, + 32, + 40, + 48, + 64, + 0x80, + 0x100, + 0x200, + 0x400, + 0x800, + 0x1000, + 0x2000, + 0x4000, + 0x8000, + 0x10000, + } + ); #endif #if NET7_0_OR_GREATER private static ReadOnlySpan Span_OF_base => @@ -141,43 +141,43 @@ ref MemoryMarshal.GetReference(Span_OF_base) ); #else - private static readonly uint* OF_base = GetArrayPointer( - new uint[32] - { - 0, - 1, - 1, - 5, - 0xD, - 0x1D, - 0x3D, - 0x7D, - 0xFD, - 0x1FD, - 0x3FD, - 0x7FD, - 0xFFD, - 0x1FFD, - 0x3FFD, - 0x7FFD, - 0xFFFD, - 0x1FFFD, - 0x3FFFD, - 0x7FFFD, - 0xFFFFD, - 0x1FFFFD, - 0x3FFFFD, - 0x7FFFFD, - 0xFFFFFD, - 0x1FFFFFD, - 0x3FFFFFD, - 0x7FFFFFD, - 0xFFFFFFD, - 0x1FFFFFFD, - 0x3FFFFFFD, - 0x7FFFFFFD, - } - ); + private static readonly uint* OF_base = GetArrayPointer( + new uint[32] + { + 0, + 1, + 1, + 5, + 0xD, + 0x1D, + 0x3D, + 0x7D, + 0xFD, + 0x1FD, + 0x3FD, + 0x7FD, + 0xFFD, + 0x1FFD, + 0x3FFD, + 0x7FFD, + 0xFFFD, + 0x1FFFD, + 0x3FFFD, + 0x7FFFD, + 0xFFFFD, + 0x1FFFFD, + 0x3FFFFD, + 0x7FFFFD, + 0xFFFFFD, + 0x1FFFFFD, + 0x3FFFFFD, + 0x7FFFFFD, + 0xFFFFFFD, + 0x1FFFFFFD, + 0x3FFFFFFD, + 0x7FFFFFFD, + } + ); #endif #if NET7_0_OR_GREATER private static ReadOnlySpan Span_OF_bits => @@ -223,43 +223,43 @@ ref MemoryMarshal.GetReference(Span_OF_bits) ); #else - private static readonly byte* OF_bits = GetArrayPointer( - new byte[32] - { - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - 17, - 18, - 19, - 20, - 21, - 22, - 23, - 24, - 25, - 26, - 27, - 28, - 29, - 30, - 31, - } - ); + private static readonly byte* OF_bits = GetArrayPointer( + new byte[32] + { + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + } + ); #endif #if NET7_0_OR_GREATER private static ReadOnlySpan Span_ML_base => @@ -326,70 +326,69 @@ ref MemoryMarshal.GetReference(Span_ML_base) ); #else - private static readonly uint* ML_base = GetArrayPointer( - new uint[53] - { - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - 17, - 18, - 19, - 20, - 21, - 22, - 23, - 24, - 25, - 26, - 27, - 28, - 29, - 30, - 31, - 32, - 33, - 34, - 35, - 37, - 39, - 41, - 43, - 47, - 51, - 59, - 67, - 83, - 99, - 0x83, - 0x103, - 0x203, - 0x403, - 0x803, - 0x1003, - 0x2003, - 0x4003, - 0x8003, - 0x10003, - } - ); -#endif - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static int ZSTD_DCtx_get_bmi2(ZSTD_DCtx_s* dctx) + private static readonly uint* ML_base = GetArrayPointer( + new uint[53] { - return 0; + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 37, + 39, + 41, + 43, + 47, + 51, + 59, + 67, + 83, + 99, + 0x83, + 0x103, + 0x203, + 0x403, + 0x803, + 0x1003, + 0x2003, + 0x4003, + 0x8003, + 0x10003, } + ); +#endif + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int ZSTD_DCtx_get_bmi2(ZSTD_DCtx_s* dctx) + { + return 0; } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDoubleFast.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDoubleFast.cs index f7b916f83..bbc06b18d 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDoubleFast.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDoubleFast.cs @@ -1,102 +1,102 @@ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; -using static ZstdSharp.UnsafeHelper; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods + private static void ZSTD_fillDoubleHashTableForCDict( + ZSTD_MatchState_t* ms, + void* end, + ZSTD_dictTableLoadMethod_e dtlm + ) { - private static void ZSTD_fillDoubleHashTableForCDict( - ZSTD_MatchState_t* ms, - void* end, - ZSTD_dictTableLoadMethod_e dtlm - ) + ZSTD_compressionParameters* cParams = &ms->cParams; + uint* hashLarge = ms->hashTable; + uint hBitsL = cParams->hashLog + 8; + uint mls = cParams->minMatch; + uint* hashSmall = ms->chainTable; + uint hBitsS = cParams->chainLog + 8; + byte* @base = ms->window.@base; + byte* ip = @base + ms->nextToUpdate; + byte* iend = (byte*)end - 8; + const uint fastHashFillStep = 3; + for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) { - ZSTD_compressionParameters* cParams = &ms->cParams; - uint* hashLarge = ms->hashTable; - uint hBitsL = cParams->hashLog + 8; - uint mls = cParams->minMatch; - uint* hashSmall = ms->chainTable; - uint hBitsS = cParams->chainLog + 8; - byte* @base = ms->window.@base; - byte* ip = @base + ms->nextToUpdate; - byte* iend = (byte*)end - 8; - const uint fastHashFillStep = 3; - for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) + uint curr = (uint)(ip - @base); + uint i; + for (i = 0; i < fastHashFillStep; ++i) { - uint curr = (uint)(ip - @base); - uint i; - for (i = 0; i < fastHashFillStep; ++i) + nuint smHashAndTag = ZSTD_hashPtr(ip + i, hBitsS, mls); + nuint lgHashAndTag = ZSTD_hashPtr(ip + i, hBitsL, 8); + if (i == 0) { - nuint smHashAndTag = ZSTD_hashPtr(ip + i, hBitsS, mls); - nuint lgHashAndTag = ZSTD_hashPtr(ip + i, hBitsL, 8); - if (i == 0) - { - ZSTD_writeTaggedIndex(hashSmall, smHashAndTag, curr + i); - } - - if (i == 0 || hashLarge[lgHashAndTag >> 8] == 0) - { - ZSTD_writeTaggedIndex(hashLarge, lgHashAndTag, curr + i); - } + ZSTD_writeTaggedIndex(hashSmall, smHashAndTag, curr + i); + } - if (dtlm == ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast) - break; + if (i == 0 || hashLarge[lgHashAndTag >> 8] == 0) + { + ZSTD_writeTaggedIndex(hashLarge, lgHashAndTag, curr + i); } + + if (dtlm == ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast) + break; } } + } - private static void ZSTD_fillDoubleHashTableForCCtx( - ZSTD_MatchState_t* ms, - void* end, - ZSTD_dictTableLoadMethod_e dtlm - ) + private static void ZSTD_fillDoubleHashTableForCCtx( + ZSTD_MatchState_t* ms, + void* end, + ZSTD_dictTableLoadMethod_e dtlm + ) + { + ZSTD_compressionParameters* cParams = &ms->cParams; + uint* hashLarge = ms->hashTable; + uint hBitsL = cParams->hashLog; + uint mls = cParams->minMatch; + uint* hashSmall = ms->chainTable; + uint hBitsS = cParams->chainLog; + byte* @base = ms->window.@base; + byte* ip = @base + ms->nextToUpdate; + byte* iend = (byte*)end - 8; + const uint fastHashFillStep = 3; + for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) { - ZSTD_compressionParameters* cParams = &ms->cParams; - uint* hashLarge = ms->hashTable; - uint hBitsL = cParams->hashLog; - uint mls = cParams->minMatch; - uint* hashSmall = ms->chainTable; - uint hBitsS = cParams->chainLog; - byte* @base = ms->window.@base; - byte* ip = @base + ms->nextToUpdate; - byte* iend = (byte*)end - 8; - const uint fastHashFillStep = 3; - for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) + uint curr = (uint)(ip - @base); + uint i; + for (i = 0; i < fastHashFillStep; ++i) { - uint curr = (uint)(ip - @base); - uint i; - for (i = 0; i < fastHashFillStep; ++i) - { - nuint smHash = ZSTD_hashPtr(ip + i, hBitsS, mls); - nuint lgHash = ZSTD_hashPtr(ip + i, hBitsL, 8); - if (i == 0) - hashSmall[smHash] = curr + i; - if (i == 0 || hashLarge[lgHash] == 0) - hashLarge[lgHash] = curr + i; - if (dtlm == ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast) - break; - } + nuint smHash = ZSTD_hashPtr(ip + i, hBitsS, mls); + nuint lgHash = ZSTD_hashPtr(ip + i, hBitsL, 8); + if (i == 0) + hashSmall[smHash] = curr + i; + if (i == 0 || hashLarge[lgHash] == 0) + hashLarge[lgHash] = curr + i; + if (dtlm == ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast) + break; } } + } - private static void ZSTD_fillDoubleHashTable( - ZSTD_MatchState_t* ms, - void* end, - ZSTD_dictTableLoadMethod_e dtlm, - ZSTD_tableFillPurpose_e tfp - ) + private static void ZSTD_fillDoubleHashTable( + ZSTD_MatchState_t* ms, + void* end, + ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp + ) + { + if (tfp == ZSTD_tableFillPurpose_e.ZSTD_tfp_forCDict) { - if (tfp == ZSTD_tableFillPurpose_e.ZSTD_tfp_forCDict) - { - ZSTD_fillDoubleHashTableForCDict(ms, end, dtlm); - } - else - { - ZSTD_fillDoubleHashTableForCCtx(ms, end, dtlm); - } + ZSTD_fillDoubleHashTableForCDict(ms, end, dtlm); + } + else + { + ZSTD_fillDoubleHashTableForCCtx(ms, end, dtlm); } + } #if NET7_0_OR_GREATER private static ReadOnlySpan Span_dummy => @@ -108,149 +108,149 @@ ref MemoryMarshal.GetReference(Span_dummy) ); #else - private static readonly byte* dummy = GetArrayPointer( - new byte[10] { 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0, 0xe2, 0xb4 } - ); + private static readonly byte* dummy = GetArrayPointer( + new byte[10] { 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0, 0xe2, 0xb4 } + ); #endif - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_compressBlock_doubleFast_noDict_generic( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize, - uint mls - ) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_compressBlock_doubleFast_noDict_generic( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize, + uint mls + ) + { + ZSTD_compressionParameters* cParams = &ms->cParams; + uint* hashLong = ms->hashTable; + uint hBitsL = cParams->hashLog; + uint* hashSmall = ms->chainTable; + uint hBitsS = cParams->chainLog; + byte* @base = ms->window.@base; + byte* istart = (byte*)src; + byte* anchor = istart; + uint endIndex = (uint)((nuint)(istart - @base) + srcSize); + /* presumes that, if there is a dictionary, it must be using Attach mode */ + uint prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog); + byte* prefixLowest = @base + prefixLowestIndex; + byte* iend = istart + srcSize; + byte* ilimit = iend - 8; + uint offset_1 = rep[0], + offset_2 = rep[1]; + uint offsetSaved1 = 0, + offsetSaved2 = 0; + nuint mLength; + uint offset; + uint curr; + /* how many positions to search before increasing step size */ + const nuint kStepIncr = 1 << 8; + /* the position at which to increment the step size if no match is found */ + byte* nextStep; + /* the current step size */ + nuint step; + /* the long hash at ip */ + nuint hl0; + /* the long hash at ip1 */ + nuint hl1; + /* the long match index for ip */ + uint idxl0; + /* the long match index for ip1 */ + uint idxl1; + /* the long match for ip */ + byte* matchl0; + /* the short match for ip */ + byte* matchs0; + /* the long match for ip1 */ + byte* matchl1; + /* matchs0 or safe address */ + byte* matchs0_safe; + /* the current position */ + byte* ip = istart; + /* the next position */ + byte* ip1; + ip += ip - prefixLowest == 0 ? 1 : 0; { - ZSTD_compressionParameters* cParams = &ms->cParams; - uint* hashLong = ms->hashTable; - uint hBitsL = cParams->hashLog; - uint* hashSmall = ms->chainTable; - uint hBitsS = cParams->chainLog; - byte* @base = ms->window.@base; - byte* istart = (byte*)src; - byte* anchor = istart; - uint endIndex = (uint)((nuint)(istart - @base) + srcSize); - /* presumes that, if there is a dictionary, it must be using Attach mode */ - uint prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog); - byte* prefixLowest = @base + prefixLowestIndex; - byte* iend = istart + srcSize; - byte* ilimit = iend - 8; - uint offset_1 = rep[0], - offset_2 = rep[1]; - uint offsetSaved1 = 0, - offsetSaved2 = 0; - nuint mLength; - uint offset; - uint curr; - /* how many positions to search before increasing step size */ - const nuint kStepIncr = 1 << 8; - /* the position at which to increment the step size if no match is found */ - byte* nextStep; - /* the current step size */ - nuint step; - /* the long hash at ip */ - nuint hl0; - /* the long hash at ip1 */ - nuint hl1; - /* the long match index for ip */ - uint idxl0; - /* the long match index for ip1 */ - uint idxl1; - /* the long match for ip */ - byte* matchl0; - /* the short match for ip */ - byte* matchs0; - /* the long match for ip1 */ - byte* matchl1; - /* matchs0 or safe address */ - byte* matchs0_safe; - /* the current position */ - byte* ip = istart; - /* the next position */ - byte* ip1; - ip += ip - prefixLowest == 0 ? 1 : 0; + uint current = (uint)(ip - @base); + uint windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog); + uint maxRep = current - windowLow; + if (offset_2 > maxRep) { - uint current = (uint)(ip - @base); - uint windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog); - uint maxRep = current - windowLow; - if (offset_2 > maxRep) - { - offsetSaved2 = offset_2; - offset_2 = 0; - } + offsetSaved2 = offset_2; + offset_2 = 0; + } - if (offset_1 > maxRep) - { - offsetSaved1 = offset_1; - offset_1 = 0; - } + if (offset_1 > maxRep) + { + offsetSaved1 = offset_1; + offset_1 = 0; } + } - while (true) + while (true) + { + step = 1; + nextStep = ip + kStepIncr; + ip1 = ip + step; + if (ip1 > ilimit) { - step = 1; - nextStep = ip + kStepIncr; - ip1 = ip + step; - if (ip1 > ilimit) + goto _cleanup; + } + + hl0 = ZSTD_hashPtr(ip, hBitsL, 8); + idxl0 = hashLong[hl0]; + matchl0 = @base + idxl0; + do + { + nuint hs0 = ZSTD_hashPtr(ip, hBitsS, mls); + uint idxs0 = hashSmall[hs0]; + curr = (uint)(ip - @base); + matchs0 = @base + idxs0; + hashLong[hl0] = hashSmall[hs0] = curr; + if (offset_1 > 0 && MEM_read32(ip + 1 - offset_1) == MEM_read32(ip + 1)) { - goto _cleanup; + mLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4; + ip++; + assert(1 >= 1); + assert(1 <= 3); + ZSTD_storeSeq(seqStore, (nuint)(ip - anchor), anchor, iend, 1, mLength); + goto _match_stored; } - hl0 = ZSTD_hashPtr(ip, hBitsL, 8); - idxl0 = hashLong[hl0]; - matchl0 = @base + idxl0; - do + hl1 = ZSTD_hashPtr(ip1, hBitsL, 8); { - nuint hs0 = ZSTD_hashPtr(ip, hBitsS, mls); - uint idxs0 = hashSmall[hs0]; - curr = (uint)(ip - @base); - matchs0 = @base + idxs0; - hashLong[hl0] = hashSmall[hs0] = curr; - if (offset_1 > 0 && MEM_read32(ip + 1 - offset_1) == MEM_read32(ip + 1)) - { - mLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4; - ip++; - assert(1 >= 1); - assert(1 <= 3); - ZSTD_storeSeq(seqStore, (nuint)(ip - anchor), anchor, iend, 1, mLength); - goto _match_stored; - } - - hl1 = ZSTD_hashPtr(ip1, hBitsL, 8); + byte* matchl0_safe = ZSTD_selectAddr( + idxl0, + prefixLowestIndex, + matchl0, + &dummy[0] + ); + if (MEM_read64(matchl0_safe) == MEM_read64(ip) && matchl0_safe == matchl0) { - byte* matchl0_safe = ZSTD_selectAddr( - idxl0, - prefixLowestIndex, - matchl0, - &dummy[0] - ); - if (MEM_read64(matchl0_safe) == MEM_read64(ip) && matchl0_safe == matchl0) + mLength = ZSTD_count(ip + 8, matchl0 + 8, iend) + 8; + offset = (uint)(ip - matchl0); + while (ip > anchor && matchl0 > prefixLowest && ip[-1] == matchl0[-1]) { - mLength = ZSTD_count(ip + 8, matchl0 + 8, iend) + 8; - offset = (uint)(ip - matchl0); - while (ip > anchor && matchl0 > prefixLowest && ip[-1] == matchl0[-1]) - { - ip--; - matchl0--; - mLength++; - } - - goto _match_found; + ip--; + matchl0--; + mLength++; } - } - idxl1 = hashLong[hl1]; - matchl1 = @base + idxl1; - matchs0_safe = ZSTD_selectAddr(idxs0, prefixLowestIndex, matchs0, &dummy[0]); - if (MEM_read32(matchs0_safe) == MEM_read32(ip) && matchs0_safe == matchs0) - { - goto _search_next_long; + goto _match_found; } + } - if (ip1 >= nextStep) - { + idxl1 = hashLong[hl1]; + matchl1 = @base + idxl1; + matchs0_safe = ZSTD_selectAddr(idxs0, prefixLowestIndex, matchs0, &dummy[0]); + if (MEM_read32(matchs0_safe) == MEM_read32(ip) && matchs0_safe == matchs0) + { + goto _search_next_long; + } + + if (ip1 >= nextStep) + { #if NETCOREAPP3_0_OR_GREATER if (System.Runtime.Intrinsics.X86.Sse.IsSupported) { @@ -259,935 +259,934 @@ uint mls } #endif - step++; - nextStep += kStepIncr; - } - - ip = ip1; - ip1 += step; - hl0 = hl1; - idxl0 = idxl1; - matchl0 = matchl1; - } while (ip1 <= ilimit); - _cleanup: - offsetSaved2 = offsetSaved1 != 0 && offset_1 != 0 ? offsetSaved1 : offsetSaved2; - rep[0] = offset_1 != 0 ? offset_1 : offsetSaved1; - rep[1] = offset_2 != 0 ? offset_2 : offsetSaved2; - return (nuint)(iend - anchor); - _search_next_long: - mLength = ZSTD_count(ip + 4, matchs0 + 4, iend) + 4; - offset = (uint)(ip - matchs0); - if (idxl1 > prefixLowestIndex && MEM_read64(matchl1) == MEM_read64(ip1)) - { - nuint l1len = ZSTD_count(ip1 + 8, matchl1 + 8, iend) + 8; - if (l1len > mLength) - { - ip = ip1; - mLength = l1len; - offset = (uint)(ip - matchl1); - matchs0 = matchl1; - } + step++; + nextStep += kStepIncr; } - while (ip > anchor && matchs0 > prefixLowest && ip[-1] == matchs0[-1]) + ip = ip1; + ip1 += step; + hl0 = hl1; + idxl0 = idxl1; + matchl0 = matchl1; + } while (ip1 <= ilimit); + _cleanup: + offsetSaved2 = offsetSaved1 != 0 && offset_1 != 0 ? offsetSaved1 : offsetSaved2; + rep[0] = offset_1 != 0 ? offset_1 : offsetSaved1; + rep[1] = offset_2 != 0 ? offset_2 : offsetSaved2; + return (nuint)(iend - anchor); + _search_next_long: + mLength = ZSTD_count(ip + 4, matchs0 + 4, iend) + 4; + offset = (uint)(ip - matchs0); + if (idxl1 > prefixLowestIndex && MEM_read64(matchl1) == MEM_read64(ip1)) + { + nuint l1len = ZSTD_count(ip1 + 8, matchl1 + 8, iend) + 8; + if (l1len > mLength) { - ip--; - matchs0--; - mLength++; + ip = ip1; + mLength = l1len; + offset = (uint)(ip - matchl1); + matchs0 = matchl1; } + } + + while (ip > anchor && matchs0 > prefixLowest && ip[-1] == matchs0[-1]) + { + ip--; + matchs0--; + mLength++; + } - _match_found: - offset_2 = offset_1; - offset_1 = offset; - if (step < 4) + _match_found: + offset_2 = offset_1; + offset_1 = offset; + if (step < 4) + { + hashLong[hl1] = (uint)(ip1 - @base); + } + + assert(offset > 0); + ZSTD_storeSeq(seqStore, (nuint)(ip - anchor), anchor, iend, offset + 3, mLength); + _match_stored: + ip += mLength; + anchor = ip; + if (ip <= ilimit) + { { - hashLong[hl1] = (uint)(ip1 - @base); + uint indexToInsert = curr + 2; + hashLong[ZSTD_hashPtr(@base + indexToInsert, hBitsL, 8)] = indexToInsert; + hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = (uint)(ip - 2 - @base); + hashSmall[ZSTD_hashPtr(@base + indexToInsert, hBitsS, mls)] = indexToInsert; + hashSmall[ZSTD_hashPtr(ip - 1, hBitsS, mls)] = (uint)(ip - 1 - @base); } - assert(offset > 0); - ZSTD_storeSeq(seqStore, (nuint)(ip - anchor), anchor, iend, offset + 3, mLength); - _match_stored: - ip += mLength; - anchor = ip; - if (ip <= ilimit) + while ( + ip <= ilimit && offset_2 > 0 && MEM_read32(ip) == MEM_read32(ip - offset_2) + ) { - { - uint indexToInsert = curr + 2; - hashLong[ZSTD_hashPtr(@base + indexToInsert, hBitsL, 8)] = indexToInsert; - hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = (uint)(ip - 2 - @base); - hashSmall[ZSTD_hashPtr(@base + indexToInsert, hBitsS, mls)] = indexToInsert; - hashSmall[ZSTD_hashPtr(ip - 1, hBitsS, mls)] = (uint)(ip - 1 - @base); - } - - while ( - ip <= ilimit && offset_2 > 0 && MEM_read32(ip) == MEM_read32(ip - offset_2) - ) - { - /* store sequence */ - nuint rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4; - /* swap offset_2 <=> offset_1 */ - uint tmpOff = offset_2; - offset_2 = offset_1; - offset_1 = tmpOff; - hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (uint)(ip - @base); - hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (uint)(ip - @base); - assert(1 >= 1); - assert(1 <= 3); - ZSTD_storeSeq(seqStore, 0, anchor, iend, 1, rLength); - ip += rLength; - anchor = ip; - continue; - } + /* store sequence */ + nuint rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4; + /* swap offset_2 <=> offset_1 */ + uint tmpOff = offset_2; + offset_2 = offset_1; + offset_1 = tmpOff; + hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (uint)(ip - @base); + hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (uint)(ip - @base); + assert(1 >= 1); + assert(1 <= 3); + ZSTD_storeSeq(seqStore, 0, anchor, iend, 1, rLength); + ip += rLength; + anchor = ip; + continue; } } } + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_compressBlock_doubleFast_dictMatchState_generic( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize, - uint mls - ) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_compressBlock_doubleFast_dictMatchState_generic( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize, + uint mls + ) + { + ZSTD_compressionParameters* cParams = &ms->cParams; + uint* hashLong = ms->hashTable; + uint hBitsL = cParams->hashLog; + uint* hashSmall = ms->chainTable; + uint hBitsS = cParams->chainLog; + byte* @base = ms->window.@base; + byte* istart = (byte*)src; + byte* ip = istart; + byte* anchor = istart; + uint endIndex = (uint)((nuint)(istart - @base) + srcSize); + /* presumes that, if there is a dictionary, it must be using Attach mode */ + uint prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog); + byte* prefixLowest = @base + prefixLowestIndex; + byte* iend = istart + srcSize; + byte* ilimit = iend - 8; + uint offset_1 = rep[0], + offset_2 = rep[1]; + ZSTD_MatchState_t* dms = ms->dictMatchState; + ZSTD_compressionParameters* dictCParams = &dms->cParams; + uint* dictHashLong = dms->hashTable; + uint* dictHashSmall = dms->chainTable; + uint dictStartIndex = dms->window.dictLimit; + byte* dictBase = dms->window.@base; + byte* dictStart = dictBase + dictStartIndex; + byte* dictEnd = dms->window.nextSrc; + uint dictIndexDelta = prefixLowestIndex - (uint)(dictEnd - dictBase); + uint dictHBitsL = dictCParams->hashLog + 8; + uint dictHBitsS = dictCParams->chainLog + 8; + uint dictAndPrefixLength = (uint)(ip - prefixLowest + (dictEnd - dictStart)); + assert(ms->window.dictLimit + (1U << (int)cParams->windowLog) >= endIndex); + if (ms->prefetchCDictTables != 0) { - ZSTD_compressionParameters* cParams = &ms->cParams; - uint* hashLong = ms->hashTable; - uint hBitsL = cParams->hashLog; - uint* hashSmall = ms->chainTable; - uint hBitsS = cParams->chainLog; - byte* @base = ms->window.@base; - byte* istart = (byte*)src; - byte* ip = istart; - byte* anchor = istart; - uint endIndex = (uint)((nuint)(istart - @base) + srcSize); - /* presumes that, if there is a dictionary, it must be using Attach mode */ - uint prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog); - byte* prefixLowest = @base + prefixLowestIndex; - byte* iend = istart + srcSize; - byte* ilimit = iend - 8; - uint offset_1 = rep[0], - offset_2 = rep[1]; - ZSTD_MatchState_t* dms = ms->dictMatchState; - ZSTD_compressionParameters* dictCParams = &dms->cParams; - uint* dictHashLong = dms->hashTable; - uint* dictHashSmall = dms->chainTable; - uint dictStartIndex = dms->window.dictLimit; - byte* dictBase = dms->window.@base; - byte* dictStart = dictBase + dictStartIndex; - byte* dictEnd = dms->window.nextSrc; - uint dictIndexDelta = prefixLowestIndex - (uint)(dictEnd - dictBase); - uint dictHBitsL = dictCParams->hashLog + 8; - uint dictHBitsS = dictCParams->chainLog + 8; - uint dictAndPrefixLength = (uint)(ip - prefixLowest + (dictEnd - dictStart)); - assert(ms->window.dictLimit + (1U << (int)cParams->windowLog) >= endIndex); - if (ms->prefetchCDictTables != 0) + nuint hashTableBytes = ((nuint)1 << (int)dictCParams->hashLog) * sizeof(uint); + nuint chainTableBytes = ((nuint)1 << (int)dictCParams->chainLog) * sizeof(uint); { - nuint hashTableBytes = ((nuint)1 << (int)dictCParams->hashLog) * sizeof(uint); - nuint chainTableBytes = ((nuint)1 << (int)dictCParams->chainLog) * sizeof(uint); + sbyte* _ptr = (sbyte*)dictHashLong; + nuint _size = hashTableBytes; + nuint _pos; + for (_pos = 0; _pos < _size; _pos += 64) { - sbyte* _ptr = (sbyte*)dictHashLong; - nuint _size = hashTableBytes; - nuint _pos; - for (_pos = 0; _pos < _size; _pos += 64) - { #if NETCOREAPP3_0_OR_GREATER if (System.Runtime.Intrinsics.X86.Sse.IsSupported) { System.Runtime.Intrinsics.X86.Sse.Prefetch1(_ptr + _pos); } #endif - } } + } + { + sbyte* _ptr = (sbyte*)dictHashSmall; + nuint _size = chainTableBytes; + nuint _pos; + for (_pos = 0; _pos < _size; _pos += 64) { - sbyte* _ptr = (sbyte*)dictHashSmall; - nuint _size = chainTableBytes; - nuint _pos; - for (_pos = 0; _pos < _size; _pos += 64) - { #if NETCOREAPP3_0_OR_GREATER if (System.Runtime.Intrinsics.X86.Sse.IsSupported) { System.Runtime.Intrinsics.X86.Sse.Prefetch1(_ptr + _pos); } #endif - } } } + } + + ip += dictAndPrefixLength == 0 ? 1 : 0; + assert(offset_1 <= dictAndPrefixLength); + assert(offset_2 <= dictAndPrefixLength); + while (ip < ilimit) + { + nuint mLength; + uint offset; + nuint h2 = ZSTD_hashPtr(ip, hBitsL, 8); + nuint h = ZSTD_hashPtr(ip, hBitsS, mls); + nuint dictHashAndTagL = ZSTD_hashPtr(ip, dictHBitsL, 8); + nuint dictHashAndTagS = ZSTD_hashPtr(ip, dictHBitsS, mls); + uint dictMatchIndexAndTagL = dictHashLong[dictHashAndTagL >> 8]; + uint dictMatchIndexAndTagS = dictHashSmall[dictHashAndTagS >> 8]; + int dictTagsMatchL = ZSTD_comparePackedTags(dictMatchIndexAndTagL, dictHashAndTagL); + int dictTagsMatchS = ZSTD_comparePackedTags(dictMatchIndexAndTagS, dictHashAndTagS); + uint curr = (uint)(ip - @base); + uint matchIndexL = hashLong[h2]; + uint matchIndexS = hashSmall[h]; + byte* matchLong = @base + matchIndexL; + byte* match = @base + matchIndexS; + uint repIndex = curr + 1 - offset_1; + byte* repMatch = + repIndex < prefixLowestIndex + ? dictBase + (repIndex - dictIndexDelta) + : @base + repIndex; + hashLong[h2] = hashSmall[h] = curr; + if ( + ZSTD_index_overlap_check(prefixLowestIndex, repIndex) != 0 + && MEM_read32(repMatch) == MEM_read32(ip + 1) + ) + { + byte* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; + mLength = + ZSTD_count_2segments( + ip + 1 + 4, + repMatch + 4, + iend, + repMatchEnd, + prefixLowest + ) + 4; + ip++; + assert(1 >= 1); + assert(1 <= 3); + ZSTD_storeSeq(seqStore, (nuint)(ip - anchor), anchor, iend, 1, mLength); + goto _match_stored; + } - ip += dictAndPrefixLength == 0 ? 1 : 0; - assert(offset_1 <= dictAndPrefixLength); - assert(offset_2 <= dictAndPrefixLength); - while (ip < ilimit) + if (matchIndexL >= prefixLowestIndex && MEM_read64(matchLong) == MEM_read64(ip)) { - nuint mLength; - uint offset; - nuint h2 = ZSTD_hashPtr(ip, hBitsL, 8); - nuint h = ZSTD_hashPtr(ip, hBitsS, mls); - nuint dictHashAndTagL = ZSTD_hashPtr(ip, dictHBitsL, 8); - nuint dictHashAndTagS = ZSTD_hashPtr(ip, dictHBitsS, mls); - uint dictMatchIndexAndTagL = dictHashLong[dictHashAndTagL >> 8]; - uint dictMatchIndexAndTagS = dictHashSmall[dictHashAndTagS >> 8]; - int dictTagsMatchL = ZSTD_comparePackedTags(dictMatchIndexAndTagL, dictHashAndTagL); - int dictTagsMatchS = ZSTD_comparePackedTags(dictMatchIndexAndTagS, dictHashAndTagS); - uint curr = (uint)(ip - @base); - uint matchIndexL = hashLong[h2]; - uint matchIndexS = hashSmall[h]; - byte* matchLong = @base + matchIndexL; - byte* match = @base + matchIndexS; - uint repIndex = curr + 1 - offset_1; - byte* repMatch = - repIndex < prefixLowestIndex - ? dictBase + (repIndex - dictIndexDelta) - : @base + repIndex; - hashLong[h2] = hashSmall[h] = curr; - if ( - ZSTD_index_overlap_check(prefixLowestIndex, repIndex) != 0 - && MEM_read32(repMatch) == MEM_read32(ip + 1) - ) + mLength = ZSTD_count(ip + 8, matchLong + 8, iend) + 8; + offset = (uint)(ip - matchLong); + while (ip > anchor && matchLong > prefixLowest && ip[-1] == matchLong[-1]) + { + ip--; + matchLong--; + mLength++; + } + + goto _match_found; + } + else if (dictTagsMatchL != 0) + { + /* check dictMatchState long match */ + uint dictMatchIndexL = dictMatchIndexAndTagL >> 8; + byte* dictMatchL = dictBase + dictMatchIndexL; + assert(dictMatchL < dictEnd); + if (dictMatchL > dictStart && MEM_read64(dictMatchL) == MEM_read64(ip)) { - byte* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; mLength = ZSTD_count_2segments( - ip + 1 + 4, - repMatch + 4, + ip + 8, + dictMatchL + 8, iend, - repMatchEnd, + dictEnd, prefixLowest - ) + 4; - ip++; - assert(1 >= 1); - assert(1 <= 3); - ZSTD_storeSeq(seqStore, (nuint)(ip - anchor), anchor, iend, 1, mLength); - goto _match_stored; + ) + 8; + offset = curr - dictMatchIndexL - dictIndexDelta; + while (ip > anchor && dictMatchL > dictStart && ip[-1] == dictMatchL[-1]) + { + ip--; + dictMatchL--; + mLength++; + } + + goto _match_found; } + } - if (matchIndexL >= prefixLowestIndex && MEM_read64(matchLong) == MEM_read64(ip)) + if (matchIndexS > prefixLowestIndex) + { + if (MEM_read32(match) == MEM_read32(ip)) { - mLength = ZSTD_count(ip + 8, matchLong + 8, iend) + 8; - offset = (uint)(ip - matchLong); - while (ip > anchor && matchLong > prefixLowest && ip[-1] == matchLong[-1]) + goto _search_next_long; + } + } + else if (dictTagsMatchS != 0) + { + /* check dictMatchState short match */ + uint dictMatchIndexS = dictMatchIndexAndTagS >> 8; + match = dictBase + dictMatchIndexS; + matchIndexS = dictMatchIndexS + dictIndexDelta; + if (match > dictStart && MEM_read32(match) == MEM_read32(ip)) + { + goto _search_next_long; + } + } + + ip += (ip - anchor >> 8) + 1; + continue; + _search_next_long: + { + nuint hl3 = ZSTD_hashPtr(ip + 1, hBitsL, 8); + nuint dictHashAndTagL3 = ZSTD_hashPtr(ip + 1, dictHBitsL, 8); + uint matchIndexL3 = hashLong[hl3]; + uint dictMatchIndexAndTagL3 = dictHashLong[dictHashAndTagL3 >> 8]; + int dictTagsMatchL3 = ZSTD_comparePackedTags( + dictMatchIndexAndTagL3, + dictHashAndTagL3 + ); + byte* matchL3 = @base + matchIndexL3; + hashLong[hl3] = curr + 1; + if ( + matchIndexL3 >= prefixLowestIndex + && MEM_read64(matchL3) == MEM_read64(ip + 1) + ) + { + mLength = ZSTD_count(ip + 9, matchL3 + 8, iend) + 8; + ip++; + offset = (uint)(ip - matchL3); + while (ip > anchor && matchL3 > prefixLowest && ip[-1] == matchL3[-1]) { ip--; - matchLong--; + matchL3--; mLength++; } goto _match_found; } - else if (dictTagsMatchL != 0) + else if (dictTagsMatchL3 != 0) { - /* check dictMatchState long match */ - uint dictMatchIndexL = dictMatchIndexAndTagL >> 8; - byte* dictMatchL = dictBase + dictMatchIndexL; - assert(dictMatchL < dictEnd); - if (dictMatchL > dictStart && MEM_read64(dictMatchL) == MEM_read64(ip)) + /* check dict long +1 match */ + uint dictMatchIndexL3 = dictMatchIndexAndTagL3 >> 8; + byte* dictMatchL3 = dictBase + dictMatchIndexL3; + assert(dictMatchL3 < dictEnd); + if ( + dictMatchL3 > dictStart + && MEM_read64(dictMatchL3) == MEM_read64(ip + 1) + ) { mLength = ZSTD_count_2segments( - ip + 8, - dictMatchL + 8, + ip + 1 + 8, + dictMatchL3 + 8, iend, dictEnd, prefixLowest ) + 8; - offset = curr - dictMatchIndexL - dictIndexDelta; - while (ip > anchor && dictMatchL > dictStart && ip[-1] == dictMatchL[-1]) + ip++; + offset = curr + 1 - dictMatchIndexL3 - dictIndexDelta; + while ( + ip > anchor && dictMatchL3 > dictStart && ip[-1] == dictMatchL3[-1] + ) { ip--; - dictMatchL--; + dictMatchL3--; mLength++; } goto _match_found; } } + } - if (matchIndexS > prefixLowestIndex) - { - if (MEM_read32(match) == MEM_read32(ip)) - { - goto _search_next_long; - } - } - else if (dictTagsMatchS != 0) + if (matchIndexS < prefixLowestIndex) + { + mLength = + ZSTD_count_2segments(ip + 4, match + 4, iend, dictEnd, prefixLowest) + 4; + offset = curr - matchIndexS; + while (ip > anchor && match > dictStart && ip[-1] == match[-1]) { - /* check dictMatchState short match */ - uint dictMatchIndexS = dictMatchIndexAndTagS >> 8; - match = dictBase + dictMatchIndexS; - matchIndexS = dictMatchIndexS + dictIndexDelta; - if (match > dictStart && MEM_read32(match) == MEM_read32(ip)) - { - goto _search_next_long; - } + ip--; + match--; + mLength++; } - - ip += (ip - anchor >> 8) + 1; - continue; - _search_next_long: + } + else + { + mLength = ZSTD_count(ip + 4, match + 4, iend) + 4; + offset = (uint)(ip - match); + while (ip > anchor && match > prefixLowest && ip[-1] == match[-1]) { - nuint hl3 = ZSTD_hashPtr(ip + 1, hBitsL, 8); - nuint dictHashAndTagL3 = ZSTD_hashPtr(ip + 1, dictHBitsL, 8); - uint matchIndexL3 = hashLong[hl3]; - uint dictMatchIndexAndTagL3 = dictHashLong[dictHashAndTagL3 >> 8]; - int dictTagsMatchL3 = ZSTD_comparePackedTags( - dictMatchIndexAndTagL3, - dictHashAndTagL3 - ); - byte* matchL3 = @base + matchIndexL3; - hashLong[hl3] = curr + 1; - if ( - matchIndexL3 >= prefixLowestIndex - && MEM_read64(matchL3) == MEM_read64(ip + 1) - ) - { - mLength = ZSTD_count(ip + 9, matchL3 + 8, iend) + 8; - ip++; - offset = (uint)(ip - matchL3); - while (ip > anchor && matchL3 > prefixLowest && ip[-1] == matchL3[-1]) - { - ip--; - matchL3--; - mLength++; - } - - goto _match_found; - } - else if (dictTagsMatchL3 != 0) - { - /* check dict long +1 match */ - uint dictMatchIndexL3 = dictMatchIndexAndTagL3 >> 8; - byte* dictMatchL3 = dictBase + dictMatchIndexL3; - assert(dictMatchL3 < dictEnd); - if ( - dictMatchL3 > dictStart - && MEM_read64(dictMatchL3) == MEM_read64(ip + 1) - ) - { - mLength = - ZSTD_count_2segments( - ip + 1 + 8, - dictMatchL3 + 8, - iend, - dictEnd, - prefixLowest - ) + 8; - ip++; - offset = curr + 1 - dictMatchIndexL3 - dictIndexDelta; - while ( - ip > anchor && dictMatchL3 > dictStart && ip[-1] == dictMatchL3[-1] - ) - { - ip--; - dictMatchL3--; - mLength++; - } - - goto _match_found; - } - } + ip--; + match--; + mLength++; } + } - if (matchIndexS < prefixLowestIndex) - { - mLength = - ZSTD_count_2segments(ip + 4, match + 4, iend, dictEnd, prefixLowest) + 4; - offset = curr - matchIndexS; - while (ip > anchor && match > dictStart && ip[-1] == match[-1]) - { - ip--; - match--; - mLength++; - } - } - else + _match_found: + offset_2 = offset_1; + offset_1 = offset; + assert(offset > 0); + ZSTD_storeSeq(seqStore, (nuint)(ip - anchor), anchor, iend, offset + 3, mLength); + _match_stored: + ip += mLength; + anchor = ip; + if (ip <= ilimit) + { { - mLength = ZSTD_count(ip + 4, match + 4, iend) + 4; - offset = (uint)(ip - match); - while (ip > anchor && match > prefixLowest && ip[-1] == match[-1]) - { - ip--; - match--; - mLength++; - } + uint indexToInsert = curr + 2; + hashLong[ZSTD_hashPtr(@base + indexToInsert, hBitsL, 8)] = indexToInsert; + hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = (uint)(ip - 2 - @base); + hashSmall[ZSTD_hashPtr(@base + indexToInsert, hBitsS, mls)] = indexToInsert; + hashSmall[ZSTD_hashPtr(ip - 1, hBitsS, mls)] = (uint)(ip - 1 - @base); } - _match_found: - offset_2 = offset_1; - offset_1 = offset; - assert(offset > 0); - ZSTD_storeSeq(seqStore, (nuint)(ip - anchor), anchor, iend, offset + 3, mLength); - _match_stored: - ip += mLength; - anchor = ip; - if (ip <= ilimit) + while (ip <= ilimit) { + uint current2 = (uint)(ip - @base); + uint repIndex2 = current2 - offset_2; + byte* repMatch2 = + repIndex2 < prefixLowestIndex + ? dictBase + repIndex2 - dictIndexDelta + : @base + repIndex2; + if ( + ZSTD_index_overlap_check(prefixLowestIndex, repIndex2) != 0 + && MEM_read32(repMatch2) == MEM_read32(ip) + ) { - uint indexToInsert = curr + 2; - hashLong[ZSTD_hashPtr(@base + indexToInsert, hBitsL, 8)] = indexToInsert; - hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = (uint)(ip - 2 - @base); - hashSmall[ZSTD_hashPtr(@base + indexToInsert, hBitsS, mls)] = indexToInsert; - hashSmall[ZSTD_hashPtr(ip - 1, hBitsS, mls)] = (uint)(ip - 1 - @base); + byte* repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend; + nuint repLength2 = + ZSTD_count_2segments( + ip + 4, + repMatch2 + 4, + iend, + repEnd2, + prefixLowest + ) + 4; + /* swap offset_2 <=> offset_1 */ + uint tmpOffset = offset_2; + offset_2 = offset_1; + offset_1 = tmpOffset; + assert(1 >= 1); + assert(1 <= 3); + ZSTD_storeSeq(seqStore, 0, anchor, iend, 1, repLength2); + hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; + hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; + ip += repLength2; + anchor = ip; + continue; } - while (ip <= ilimit) - { - uint current2 = (uint)(ip - @base); - uint repIndex2 = current2 - offset_2; - byte* repMatch2 = - repIndex2 < prefixLowestIndex - ? dictBase + repIndex2 - dictIndexDelta - : @base + repIndex2; - if ( - ZSTD_index_overlap_check(prefixLowestIndex, repIndex2) != 0 - && MEM_read32(repMatch2) == MEM_read32(ip) - ) - { - byte* repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend; - nuint repLength2 = - ZSTD_count_2segments( - ip + 4, - repMatch2 + 4, - iend, - repEnd2, - prefixLowest - ) + 4; - /* swap offset_2 <=> offset_1 */ - uint tmpOffset = offset_2; - offset_2 = offset_1; - offset_1 = tmpOffset; - assert(1 >= 1); - assert(1 <= 3); - ZSTD_storeSeq(seqStore, 0, anchor, iend, 1, repLength2); - hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; - hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; - ip += repLength2; - anchor = ip; - continue; - } - - break; - } + break; } } - - rep[0] = offset_1; - rep[1] = offset_2; - return (nuint)(iend - anchor); } - private static nuint ZSTD_compressBlock_doubleFast_noDict_4( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_doubleFast_noDict_generic(ms, seqStore, rep, src, srcSize, 4); - } + rep[0] = offset_1; + rep[1] = offset_2; + return (nuint)(iend - anchor); + } - private static nuint ZSTD_compressBlock_doubleFast_noDict_5( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_doubleFast_noDict_generic(ms, seqStore, rep, src, srcSize, 5); - } + private static nuint ZSTD_compressBlock_doubleFast_noDict_4( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_doubleFast_noDict_generic(ms, seqStore, rep, src, srcSize, 4); + } - private static nuint ZSTD_compressBlock_doubleFast_noDict_6( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_doubleFast_noDict_generic(ms, seqStore, rep, src, srcSize, 6); - } + private static nuint ZSTD_compressBlock_doubleFast_noDict_5( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_doubleFast_noDict_generic(ms, seqStore, rep, src, srcSize, 5); + } - private static nuint ZSTD_compressBlock_doubleFast_noDict_7( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_doubleFast_noDict_generic(ms, seqStore, rep, src, srcSize, 7); - } + private static nuint ZSTD_compressBlock_doubleFast_noDict_6( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_doubleFast_noDict_generic(ms, seqStore, rep, src, srcSize, 6); + } - private static nuint ZSTD_compressBlock_doubleFast_dictMatchState_4( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_doubleFast_dictMatchState_generic( - ms, - seqStore, - rep, - src, - srcSize, - 4 - ); - } + private static nuint ZSTD_compressBlock_doubleFast_noDict_7( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_doubleFast_noDict_generic(ms, seqStore, rep, src, srcSize, 7); + } - private static nuint ZSTD_compressBlock_doubleFast_dictMatchState_5( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_doubleFast_dictMatchState_generic( - ms, - seqStore, - rep, - src, - srcSize, - 5 - ); - } + private static nuint ZSTD_compressBlock_doubleFast_dictMatchState_4( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_doubleFast_dictMatchState_generic( + ms, + seqStore, + rep, + src, + srcSize, + 4 + ); + } - private static nuint ZSTD_compressBlock_doubleFast_dictMatchState_6( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_doubleFast_dictMatchState_generic( - ms, - seqStore, - rep, - src, - srcSize, - 6 - ); - } + private static nuint ZSTD_compressBlock_doubleFast_dictMatchState_5( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_doubleFast_dictMatchState_generic( + ms, + seqStore, + rep, + src, + srcSize, + 5 + ); + } + + private static nuint ZSTD_compressBlock_doubleFast_dictMatchState_6( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_doubleFast_dictMatchState_generic( + ms, + seqStore, + rep, + src, + srcSize, + 6 + ); + } + + private static nuint ZSTD_compressBlock_doubleFast_dictMatchState_7( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_doubleFast_dictMatchState_generic( + ms, + seqStore, + rep, + src, + srcSize, + 7 + ); + } - private static nuint ZSTD_compressBlock_doubleFast_dictMatchState_7( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) + private static nuint ZSTD_compressBlock_doubleFast( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + uint mls = ms->cParams.minMatch; + switch (mls) { - return ZSTD_compressBlock_doubleFast_dictMatchState_generic( - ms, - seqStore, - rep, - src, - srcSize, - 7 - ); + default: + case 4: + return ZSTD_compressBlock_doubleFast_noDict_4(ms, seqStore, rep, src, srcSize); + case 5: + return ZSTD_compressBlock_doubleFast_noDict_5(ms, seqStore, rep, src, srcSize); + case 6: + return ZSTD_compressBlock_doubleFast_noDict_6(ms, seqStore, rep, src, srcSize); + case 7: + return ZSTD_compressBlock_doubleFast_noDict_7(ms, seqStore, rep, src, srcSize); } + } - private static nuint ZSTD_compressBlock_doubleFast( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) + private static nuint ZSTD_compressBlock_doubleFast_dictMatchState( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + uint mls = ms->cParams.minMatch; + switch (mls) { - uint mls = ms->cParams.minMatch; - switch (mls) - { - default: - case 4: - return ZSTD_compressBlock_doubleFast_noDict_4(ms, seqStore, rep, src, srcSize); - case 5: - return ZSTD_compressBlock_doubleFast_noDict_5(ms, seqStore, rep, src, srcSize); - case 6: - return ZSTD_compressBlock_doubleFast_noDict_6(ms, seqStore, rep, src, srcSize); - case 7: - return ZSTD_compressBlock_doubleFast_noDict_7(ms, seqStore, rep, src, srcSize); - } + default: + case 4: + return ZSTD_compressBlock_doubleFast_dictMatchState_4( + ms, + seqStore, + rep, + src, + srcSize + ); + case 5: + return ZSTD_compressBlock_doubleFast_dictMatchState_5( + ms, + seqStore, + rep, + src, + srcSize + ); + case 6: + return ZSTD_compressBlock_doubleFast_dictMatchState_6( + ms, + seqStore, + rep, + src, + srcSize + ); + case 7: + return ZSTD_compressBlock_doubleFast_dictMatchState_7( + ms, + seqStore, + rep, + src, + srcSize + ); } + } - private static nuint ZSTD_compressBlock_doubleFast_dictMatchState( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) + private static nuint ZSTD_compressBlock_doubleFast_extDict_generic( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize, + uint mls + ) + { + ZSTD_compressionParameters* cParams = &ms->cParams; + uint* hashLong = ms->hashTable; + uint hBitsL = cParams->hashLog; + uint* hashSmall = ms->chainTable; + uint hBitsS = cParams->chainLog; + byte* istart = (byte*)src; + byte* ip = istart; + byte* anchor = istart; + byte* iend = istart + srcSize; + byte* ilimit = iend - 8; + byte* @base = ms->window.@base; + uint endIndex = (uint)((nuint)(istart - @base) + srcSize); + uint lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog); + uint dictStartIndex = lowLimit; + uint dictLimit = ms->window.dictLimit; + uint prefixStartIndex = dictLimit > lowLimit ? dictLimit : lowLimit; + byte* prefixStart = @base + prefixStartIndex; + byte* dictBase = ms->window.dictBase; + byte* dictStart = dictBase + dictStartIndex; + byte* dictEnd = dictBase + prefixStartIndex; + uint offset_1 = rep[0], + offset_2 = rep[1]; + if (prefixStartIndex == dictStartIndex) + return ZSTD_compressBlock_doubleFast(ms, seqStore, rep, src, srcSize); + while (ip < ilimit) { - uint mls = ms->cParams.minMatch; - switch (mls) + nuint hSmall = ZSTD_hashPtr(ip, hBitsS, mls); + uint matchIndex = hashSmall[hSmall]; + byte* matchBase = matchIndex < prefixStartIndex ? dictBase : @base; + byte* match = matchBase + matchIndex; + nuint hLong = ZSTD_hashPtr(ip, hBitsL, 8); + uint matchLongIndex = hashLong[hLong]; + byte* matchLongBase = matchLongIndex < prefixStartIndex ? dictBase : @base; + byte* matchLong = matchLongBase + matchLongIndex; + uint curr = (uint)(ip - @base); + /* offset_1 expected <= curr +1 */ + uint repIndex = curr + 1 - offset_1; + byte* repBase = repIndex < prefixStartIndex ? dictBase : @base; + byte* repMatch = repBase + repIndex; + nuint mLength; + hashSmall[hSmall] = hashLong[hLong] = curr; + if ( + ( + ZSTD_index_overlap_check(prefixStartIndex, repIndex) + & (offset_1 <= curr + 1 - dictStartIndex ? 1 : 0) + ) != 0 + && MEM_read32(repMatch) == MEM_read32(ip + 1) + ) { - default: - case 4: - return ZSTD_compressBlock_doubleFast_dictMatchState_4( - ms, - seqStore, - rep, - src, - srcSize - ); - case 5: - return ZSTD_compressBlock_doubleFast_dictMatchState_5( - ms, - seqStore, - rep, - src, - srcSize - ); - case 6: - return ZSTD_compressBlock_doubleFast_dictMatchState_6( - ms, - seqStore, - rep, - src, - srcSize - ); - case 7: - return ZSTD_compressBlock_doubleFast_dictMatchState_7( - ms, - seqStore, - rep, - src, - srcSize - ); + byte* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; + mLength = + ZSTD_count_2segments( + ip + 1 + 4, + repMatch + 4, + iend, + repMatchEnd, + prefixStart + ) + 4; + ip++; + assert(1 >= 1); + assert(1 <= 3); + ZSTD_storeSeq(seqStore, (nuint)(ip - anchor), anchor, iend, 1, mLength); } - } - - private static nuint ZSTD_compressBlock_doubleFast_extDict_generic( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize, - uint mls - ) - { - ZSTD_compressionParameters* cParams = &ms->cParams; - uint* hashLong = ms->hashTable; - uint hBitsL = cParams->hashLog; - uint* hashSmall = ms->chainTable; - uint hBitsS = cParams->chainLog; - byte* istart = (byte*)src; - byte* ip = istart; - byte* anchor = istart; - byte* iend = istart + srcSize; - byte* ilimit = iend - 8; - byte* @base = ms->window.@base; - uint endIndex = (uint)((nuint)(istart - @base) + srcSize); - uint lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog); - uint dictStartIndex = lowLimit; - uint dictLimit = ms->window.dictLimit; - uint prefixStartIndex = dictLimit > lowLimit ? dictLimit : lowLimit; - byte* prefixStart = @base + prefixStartIndex; - byte* dictBase = ms->window.dictBase; - byte* dictStart = dictBase + dictStartIndex; - byte* dictEnd = dictBase + prefixStartIndex; - uint offset_1 = rep[0], - offset_2 = rep[1]; - if (prefixStartIndex == dictStartIndex) - return ZSTD_compressBlock_doubleFast(ms, seqStore, rep, src, srcSize); - while (ip < ilimit) + else { - nuint hSmall = ZSTD_hashPtr(ip, hBitsS, mls); - uint matchIndex = hashSmall[hSmall]; - byte* matchBase = matchIndex < prefixStartIndex ? dictBase : @base; - byte* match = matchBase + matchIndex; - nuint hLong = ZSTD_hashPtr(ip, hBitsL, 8); - uint matchLongIndex = hashLong[hLong]; - byte* matchLongBase = matchLongIndex < prefixStartIndex ? dictBase : @base; - byte* matchLong = matchLongBase + matchLongIndex; - uint curr = (uint)(ip - @base); - /* offset_1 expected <= curr +1 */ - uint repIndex = curr + 1 - offset_1; - byte* repBase = repIndex < prefixStartIndex ? dictBase : @base; - byte* repMatch = repBase + repIndex; - nuint mLength; - hashSmall[hSmall] = hashLong[hLong] = curr; - if ( - ( - ZSTD_index_overlap_check(prefixStartIndex, repIndex) - & (offset_1 <= curr + 1 - dictStartIndex ? 1 : 0) - ) != 0 - && MEM_read32(repMatch) == MEM_read32(ip + 1) - ) + if (matchLongIndex > dictStartIndex && MEM_read64(matchLong) == MEM_read64(ip)) { - byte* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; + byte* matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend; + byte* lowMatchPtr = + matchLongIndex < prefixStartIndex ? dictStart : prefixStart; + uint offset; mLength = - ZSTD_count_2segments( - ip + 1 + 4, - repMatch + 4, - iend, - repMatchEnd, - prefixStart - ) + 4; - ip++; - assert(1 >= 1); - assert(1 <= 3); - ZSTD_storeSeq(seqStore, (nuint)(ip - anchor), anchor, iend, 1, mLength); + ZSTD_count_2segments(ip + 8, matchLong + 8, iend, matchEnd, prefixStart) + + 8; + offset = curr - matchLongIndex; + while (ip > anchor && matchLong > lowMatchPtr && ip[-1] == matchLong[-1]) + { + ip--; + matchLong--; + mLength++; + } + + offset_2 = offset_1; + offset_1 = offset; + assert(offset > 0); + ZSTD_storeSeq( + seqStore, + (nuint)(ip - anchor), + anchor, + iend, + offset + 3, + mLength + ); } - else + else if (matchIndex > dictStartIndex && MEM_read32(match) == MEM_read32(ip)) { - if (matchLongIndex > dictStartIndex && MEM_read64(matchLong) == MEM_read64(ip)) + nuint h3 = ZSTD_hashPtr(ip + 1, hBitsL, 8); + uint matchIndex3 = hashLong[h3]; + byte* match3Base = matchIndex3 < prefixStartIndex ? dictBase : @base; + byte* match3 = match3Base + matchIndex3; + uint offset; + hashLong[h3] = curr + 1; + if ( + matchIndex3 > dictStartIndex + && MEM_read64(match3) == MEM_read64(ip + 1) + ) { - byte* matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend; + byte* matchEnd = matchIndex3 < prefixStartIndex ? dictEnd : iend; byte* lowMatchPtr = - matchLongIndex < prefixStartIndex ? dictStart : prefixStart; - uint offset; + matchIndex3 < prefixStartIndex ? dictStart : prefixStart; mLength = - ZSTD_count_2segments(ip + 8, matchLong + 8, iend, matchEnd, prefixStart) - + 8; - offset = curr - matchLongIndex; - while (ip > anchor && matchLong > lowMatchPtr && ip[-1] == matchLong[-1]) + ZSTD_count_2segments( + ip + 9, + match3 + 8, + iend, + matchEnd, + prefixStart + ) + 8; + ip++; + offset = curr + 1 - matchIndex3; + while (ip > anchor && match3 > lowMatchPtr && ip[-1] == match3[-1]) { ip--; - matchLong--; + match3--; mLength++; } - - offset_2 = offset_1; - offset_1 = offset; - assert(offset > 0); - ZSTD_storeSeq( - seqStore, - (nuint)(ip - anchor), - anchor, - iend, - offset + 3, - mLength - ); } - else if (matchIndex > dictStartIndex && MEM_read32(match) == MEM_read32(ip)) + else { - nuint h3 = ZSTD_hashPtr(ip + 1, hBitsL, 8); - uint matchIndex3 = hashLong[h3]; - byte* match3Base = matchIndex3 < prefixStartIndex ? dictBase : @base; - byte* match3 = match3Base + matchIndex3; - uint offset; - hashLong[h3] = curr + 1; - if ( - matchIndex3 > dictStartIndex - && MEM_read64(match3) == MEM_read64(ip + 1) - ) - { - byte* matchEnd = matchIndex3 < prefixStartIndex ? dictEnd : iend; - byte* lowMatchPtr = - matchIndex3 < prefixStartIndex ? dictStart : prefixStart; - mLength = - ZSTD_count_2segments( - ip + 9, - match3 + 8, - iend, - matchEnd, - prefixStart - ) + 8; - ip++; - offset = curr + 1 - matchIndex3; - while (ip > anchor && match3 > lowMatchPtr && ip[-1] == match3[-1]) - { - ip--; - match3--; - mLength++; - } - } - else + byte* matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend; + byte* lowMatchPtr = + matchIndex < prefixStartIndex ? dictStart : prefixStart; + mLength = + ZSTD_count_2segments(ip + 4, match + 4, iend, matchEnd, prefixStart) + + 4; + offset = curr - matchIndex; + while (ip > anchor && match > lowMatchPtr && ip[-1] == match[-1]) { - byte* matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend; - byte* lowMatchPtr = - matchIndex < prefixStartIndex ? dictStart : prefixStart; - mLength = - ZSTD_count_2segments(ip + 4, match + 4, iend, matchEnd, prefixStart) - + 4; - offset = curr - matchIndex; - while (ip > anchor && match > lowMatchPtr && ip[-1] == match[-1]) - { - ip--; - match--; - mLength++; - } + ip--; + match--; + mLength++; } - - offset_2 = offset_1; - offset_1 = offset; - assert(offset > 0); - ZSTD_storeSeq( - seqStore, - (nuint)(ip - anchor), - anchor, - iend, - offset + 3, - mLength - ); - } - else - { - ip += (ip - anchor >> 8) + 1; - continue; } + + offset_2 = offset_1; + offset_1 = offset; + assert(offset > 0); + ZSTD_storeSeq( + seqStore, + (nuint)(ip - anchor), + anchor, + iend, + offset + 3, + mLength + ); } + else + { + ip += (ip - anchor >> 8) + 1; + continue; + } + } - ip += mLength; - anchor = ip; - if (ip <= ilimit) + ip += mLength; + anchor = ip; + if (ip <= ilimit) + { { - { - uint indexToInsert = curr + 2; - hashLong[ZSTD_hashPtr(@base + indexToInsert, hBitsL, 8)] = indexToInsert; - hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = (uint)(ip - 2 - @base); - hashSmall[ZSTD_hashPtr(@base + indexToInsert, hBitsS, mls)] = indexToInsert; - hashSmall[ZSTD_hashPtr(ip - 1, hBitsS, mls)] = (uint)(ip - 1 - @base); - } + uint indexToInsert = curr + 2; + hashLong[ZSTD_hashPtr(@base + indexToInsert, hBitsL, 8)] = indexToInsert; + hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = (uint)(ip - 2 - @base); + hashSmall[ZSTD_hashPtr(@base + indexToInsert, hBitsS, mls)] = indexToInsert; + hashSmall[ZSTD_hashPtr(ip - 1, hBitsS, mls)] = (uint)(ip - 1 - @base); + } - while (ip <= ilimit) + while (ip <= ilimit) + { + uint current2 = (uint)(ip - @base); + uint repIndex2 = current2 - offset_2; + byte* repMatch2 = + repIndex2 < prefixStartIndex ? dictBase + repIndex2 : @base + repIndex2; + if ( + ( + ZSTD_index_overlap_check(prefixStartIndex, repIndex2) + & (offset_2 <= current2 - dictStartIndex ? 1 : 0) + ) != 0 + && MEM_read32(repMatch2) == MEM_read32(ip) + ) { - uint current2 = (uint)(ip - @base); - uint repIndex2 = current2 - offset_2; - byte* repMatch2 = - repIndex2 < prefixStartIndex ? dictBase + repIndex2 : @base + repIndex2; - if ( - ( - ZSTD_index_overlap_check(prefixStartIndex, repIndex2) - & (offset_2 <= current2 - dictStartIndex ? 1 : 0) - ) != 0 - && MEM_read32(repMatch2) == MEM_read32(ip) - ) - { - byte* repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; - nuint repLength2 = - ZSTD_count_2segments( - ip + 4, - repMatch2 + 4, - iend, - repEnd2, - prefixStart - ) + 4; - /* swap offset_2 <=> offset_1 */ - uint tmpOffset = offset_2; - offset_2 = offset_1; - offset_1 = tmpOffset; - assert(1 >= 1); - assert(1 <= 3); - ZSTD_storeSeq(seqStore, 0, anchor, iend, 1, repLength2); - hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; - hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; - ip += repLength2; - anchor = ip; - continue; - } - - break; + byte* repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; + nuint repLength2 = + ZSTD_count_2segments( + ip + 4, + repMatch2 + 4, + iend, + repEnd2, + prefixStart + ) + 4; + /* swap offset_2 <=> offset_1 */ + uint tmpOffset = offset_2; + offset_2 = offset_1; + offset_1 = tmpOffset; + assert(1 >= 1); + assert(1 <= 3); + ZSTD_storeSeq(seqStore, 0, anchor, iend, 1, repLength2); + hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; + hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; + ip += repLength2; + anchor = ip; + continue; } + + break; } } - - rep[0] = offset_1; - rep[1] = offset_2; - return (nuint)(iend - anchor); } - private static nuint ZSTD_compressBlock_doubleFast_extDict_4( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_doubleFast_extDict_generic( - ms, - seqStore, - rep, - src, - srcSize, - 4 - ); - } + rep[0] = offset_1; + rep[1] = offset_2; + return (nuint)(iend - anchor); + } - private static nuint ZSTD_compressBlock_doubleFast_extDict_5( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_doubleFast_extDict_generic( - ms, - seqStore, - rep, - src, - srcSize, - 5 - ); - } + private static nuint ZSTD_compressBlock_doubleFast_extDict_4( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_doubleFast_extDict_generic( + ms, + seqStore, + rep, + src, + srcSize, + 4 + ); + } - private static nuint ZSTD_compressBlock_doubleFast_extDict_6( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_doubleFast_extDict_generic( - ms, - seqStore, - rep, - src, - srcSize, - 6 - ); - } + private static nuint ZSTD_compressBlock_doubleFast_extDict_5( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_doubleFast_extDict_generic( + ms, + seqStore, + rep, + src, + srcSize, + 5 + ); + } - private static nuint ZSTD_compressBlock_doubleFast_extDict_7( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_doubleFast_extDict_generic( - ms, - seqStore, - rep, - src, - srcSize, - 7 - ); - } + private static nuint ZSTD_compressBlock_doubleFast_extDict_6( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_doubleFast_extDict_generic( + ms, + seqStore, + rep, + src, + srcSize, + 6 + ); + } - private static nuint ZSTD_compressBlock_doubleFast_extDict( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) + private static nuint ZSTD_compressBlock_doubleFast_extDict_7( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_doubleFast_extDict_generic( + ms, + seqStore, + rep, + src, + srcSize, + 7 + ); + } + + private static nuint ZSTD_compressBlock_doubleFast_extDict( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + uint mls = ms->cParams.minMatch; + switch (mls) { - uint mls = ms->cParams.minMatch; - switch (mls) - { - default: - case 4: - return ZSTD_compressBlock_doubleFast_extDict_4(ms, seqStore, rep, src, srcSize); - case 5: - return ZSTD_compressBlock_doubleFast_extDict_5(ms, seqStore, rep, src, srcSize); - case 6: - return ZSTD_compressBlock_doubleFast_extDict_6(ms, seqStore, rep, src, srcSize); - case 7: - return ZSTD_compressBlock_doubleFast_extDict_7(ms, seqStore, rep, src, srcSize); - } + default: + case 4: + return ZSTD_compressBlock_doubleFast_extDict_4(ms, seqStore, rep, src, srcSize); + case 5: + return ZSTD_compressBlock_doubleFast_extDict_5(ms, seqStore, rep, src, srcSize); + case 6: + return ZSTD_compressBlock_doubleFast_extDict_6(ms, seqStore, rep, src, srcSize); + case 7: + return ZSTD_compressBlock_doubleFast_extDict_7(ms, seqStore, rep, src, srcSize); } } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdFast.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdFast.cs index 49aa8a7d8..bfe73ae03 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdFast.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdFast.cs @@ -1,1248 +1,1247 @@ using System.Runtime.CompilerServices; -using static ZstdSharp.UnsafeHelper; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods + private static void ZSTD_fillHashTableForCDict( + ZSTD_MatchState_t* ms, + void* end, + ZSTD_dictTableLoadMethod_e dtlm + ) { - private static void ZSTD_fillHashTableForCDict( - ZSTD_MatchState_t* ms, - void* end, - ZSTD_dictTableLoadMethod_e dtlm - ) + ZSTD_compressionParameters* cParams = &ms->cParams; + uint* hashTable = ms->hashTable; + uint hBits = cParams->hashLog + 8; + uint mls = cParams->minMatch; + byte* @base = ms->window.@base; + byte* ip = @base + ms->nextToUpdate; + byte* iend = (byte*)end - 8; + const uint fastHashFillStep = 3; + assert(dtlm == ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_full); + for (; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) { - ZSTD_compressionParameters* cParams = &ms->cParams; - uint* hashTable = ms->hashTable; - uint hBits = cParams->hashLog + 8; - uint mls = cParams->minMatch; - byte* @base = ms->window.@base; - byte* ip = @base + ms->nextToUpdate; - byte* iend = (byte*)end - 8; - const uint fastHashFillStep = 3; - assert(dtlm == ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_full); - for (; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) + uint curr = (uint)(ip - @base); { - uint curr = (uint)(ip - @base); - { - nuint hashAndTag = ZSTD_hashPtr(ip, hBits, mls); - ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr); - } + nuint hashAndTag = ZSTD_hashPtr(ip, hBits, mls); + ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr); + } - if (dtlm == ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast) - continue; + if (dtlm == ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast) + continue; + { + uint p; + for (p = 1; p < fastHashFillStep; ++p) { - uint p; - for (p = 1; p < fastHashFillStep; ++p) + nuint hashAndTag = ZSTD_hashPtr(ip + p, hBits, mls); + if (hashTable[hashAndTag >> 8] == 0) { - nuint hashAndTag = ZSTD_hashPtr(ip + p, hBits, mls); - if (hashTable[hashAndTag >> 8] == 0) - { - ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr + p); - } + ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr + p); } } } } + } - private static void ZSTD_fillHashTableForCCtx( - ZSTD_MatchState_t* ms, - void* end, - ZSTD_dictTableLoadMethod_e dtlm - ) + private static void ZSTD_fillHashTableForCCtx( + ZSTD_MatchState_t* ms, + void* end, + ZSTD_dictTableLoadMethod_e dtlm + ) + { + ZSTD_compressionParameters* cParams = &ms->cParams; + uint* hashTable = ms->hashTable; + uint hBits = cParams->hashLog; + uint mls = cParams->minMatch; + byte* @base = ms->window.@base; + byte* ip = @base + ms->nextToUpdate; + byte* iend = (byte*)end - 8; + const uint fastHashFillStep = 3; + assert(dtlm == ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast); + for (; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) { - ZSTD_compressionParameters* cParams = &ms->cParams; - uint* hashTable = ms->hashTable; - uint hBits = cParams->hashLog; - uint mls = cParams->minMatch; - byte* @base = ms->window.@base; - byte* ip = @base + ms->nextToUpdate; - byte* iend = (byte*)end - 8; - const uint fastHashFillStep = 3; - assert(dtlm == ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast); - for (; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) + uint curr = (uint)(ip - @base); + nuint hash0 = ZSTD_hashPtr(ip, hBits, mls); + hashTable[hash0] = curr; + if (dtlm == ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast) + continue; { - uint curr = (uint)(ip - @base); - nuint hash0 = ZSTD_hashPtr(ip, hBits, mls); - hashTable[hash0] = curr; - if (dtlm == ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast) - continue; + uint p; + for (p = 1; p < fastHashFillStep; ++p) { - uint p; - for (p = 1; p < fastHashFillStep; ++p) + nuint hash = ZSTD_hashPtr(ip + p, hBits, mls); + if (hashTable[hash] == 0) { - nuint hash = ZSTD_hashPtr(ip + p, hBits, mls); - if (hashTable[hash] == 0) - { - hashTable[hash] = curr + p; - } + hashTable[hash] = curr + p; } } } } + } - private static void ZSTD_fillHashTable( - ZSTD_MatchState_t* ms, - void* end, - ZSTD_dictTableLoadMethod_e dtlm, - ZSTD_tableFillPurpose_e tfp - ) + private static void ZSTD_fillHashTable( + ZSTD_MatchState_t* ms, + void* end, + ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp + ) + { + if (tfp == ZSTD_tableFillPurpose_e.ZSTD_tfp_forCDict) { - if (tfp == ZSTD_tableFillPurpose_e.ZSTD_tfp_forCDict) - { - ZSTD_fillHashTableForCDict(ms, end, dtlm); - } - else - { - ZSTD_fillHashTableForCCtx(ms, end, dtlm); - } + ZSTD_fillHashTableForCDict(ms, end, dtlm); } + else + { + ZSTD_fillHashTableForCCtx(ms, end, dtlm); + } + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int ZSTD_match4Found_cmov( + byte* currentPtr, + byte* matchAddress, + uint matchIdx, + uint idxLowLimit + ) + { + /* currentIdx >= lowLimit is a (somewhat) unpredictable branch. + * However expression below compiles into conditional move. + */ + byte* mvalAddr = ZSTD_selectAddr(matchIdx, idxLowLimit, matchAddress, dummy); + if (MEM_read32(currentPtr) != MEM_read32(mvalAddr)) + return 0; + return matchIdx >= idxLowLimit ? 1 : 0; + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static int ZSTD_match4Found_cmov( - byte* currentPtr, - byte* matchAddress, - uint matchIdx, - uint idxLowLimit - ) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int ZSTD_match4Found_branch( + byte* currentPtr, + byte* matchAddress, + uint matchIdx, + uint idxLowLimit + ) + { + /* using a branch instead of a cmov, + * because it's faster in scenarios where matchIdx >= idxLowLimit is generally true, + * aka almost all candidates are within range */ + uint mval; + if (matchIdx >= idxLowLimit) + { + mval = MEM_read32(matchAddress); + } + else { - /* currentIdx >= lowLimit is a (somewhat) unpredictable branch. - * However expression below compiles into conditional move. - */ - byte* mvalAddr = ZSTD_selectAddr(matchIdx, idxLowLimit, matchAddress, dummy); - if (MEM_read32(currentPtr) != MEM_read32(mvalAddr)) - return 0; - return matchIdx >= idxLowLimit ? 1 : 0; + mval = MEM_read32(currentPtr) ^ 1; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static int ZSTD_match4Found_branch( - byte* currentPtr, - byte* matchAddress, - uint matchIdx, - uint idxLowLimit - ) + return MEM_read32(currentPtr) == mval ? 1 : 0; + } + + /** + * If you squint hard enough (and ignore repcodes), the search operation at any + * given position is broken into 4 stages: + * + * 1. Hash (map position to hash value via input read) + * 2. Lookup (map hash val to index via hashtable read) + * 3. Load (map index to value at that position via input read) + * 4. Compare + * + * Each of these steps involves a memory read at an address which is computed + * from the previous step. This means these steps must be sequenced and their + * latencies are cumulative. + * + * Rather than do 1->2->3->4 sequentially for a single position before moving + * onto the next, this implementation interleaves these operations across the + * next few positions: + * + * R = Repcode Read & Compare + * H = Hash + * T = Table Lookup + * M = Match Read & Compare + * + * Pos | Time --> + * ----+------------------- + * N | ... M + * N+1 | ... TM + * N+2 | R H T M + * N+3 | H TM + * N+4 | R H T M + * N+5 | H ... + * N+6 | R ... + * + * This is very much analogous to the pipelining of execution in a CPU. And just + * like a CPU, we have to dump the pipeline when we find a match (i.e., take a + * branch). + * + * When this happens, we throw away our current state, and do the following prep + * to re-enter the loop: + * + * Pos | Time --> + * ----+------------------- + * N | H T + * N+1 | H + * + * This is also the work we do at the beginning to enter the loop initially. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_compressBlock_fast_noDict_generic( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize, + uint mls, + int useCmov + ) + { + ZSTD_compressionParameters* cParams = &ms->cParams; + uint* hashTable = ms->hashTable; + uint hlog = cParams->hashLog; + /* min 2 */ + nuint stepSize = cParams->targetLength + (uint)(cParams->targetLength == 0 ? 1 : 0) + 1; + byte* @base = ms->window.@base; + byte* istart = (byte*)src; + uint endIndex = (uint)((nuint)(istart - @base) + srcSize); + uint prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog); + byte* prefixStart = @base + prefixStartIndex; + byte* iend = istart + srcSize; + byte* ilimit = iend - 8; + byte* anchor = istart; + byte* ip0 = istart; + byte* ip1; + byte* ip2; + byte* ip3; + uint current0; + uint rep_offset1 = rep[0]; + uint rep_offset2 = rep[1]; + uint offsetSaved1 = 0, + offsetSaved2 = 0; + /* hash for ip0 */ + nuint hash0; + /* hash for ip1 */ + nuint hash1; + /* match idx for ip0 */ + uint matchIdx; + uint offcode; + byte* match0; + nuint mLength; + /* ip0 and ip1 are always adjacent. The targetLength skipping and + * uncompressibility acceleration is applied to every other position, + * matching the behavior of #1562. step therefore represents the gap + * between pairs of positions, from ip0 to ip2 or ip1 to ip3. */ + nuint step; + byte* nextStep; + const nuint kStepIncr = 1 << 8 - 1; + void* matchFound = + useCmov != 0 + ? (delegate* managed)(&ZSTD_match4Found_cmov) + : (delegate* managed)(&ZSTD_match4Found_branch); + ip0 += ip0 == prefixStart ? 1 : 0; { - /* using a branch instead of a cmov, - * because it's faster in scenarios where matchIdx >= idxLowLimit is generally true, - * aka almost all candidates are within range */ - uint mval; - if (matchIdx >= idxLowLimit) + uint curr = (uint)(ip0 - @base); + uint windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog); + uint maxRep = curr - windowLow; + if (rep_offset2 > maxRep) { - mval = MEM_read32(matchAddress); + offsetSaved2 = rep_offset2; + rep_offset2 = 0; } - else + + if (rep_offset1 > maxRep) { - mval = MEM_read32(currentPtr) ^ 1; + offsetSaved1 = rep_offset1; + rep_offset1 = 0; } + } - return MEM_read32(currentPtr) == mval ? 1 : 0; + _start: + step = stepSize; + nextStep = ip0 + kStepIncr; + ip1 = ip0 + 1; + ip2 = ip0 + step; + ip3 = ip2 + 1; + if (ip3 >= ilimit) + { + goto _cleanup; } - /** - * If you squint hard enough (and ignore repcodes), the search operation at any - * given position is broken into 4 stages: - * - * 1. Hash (map position to hash value via input read) - * 2. Lookup (map hash val to index via hashtable read) - * 3. Load (map index to value at that position via input read) - * 4. Compare - * - * Each of these steps involves a memory read at an address which is computed - * from the previous step. This means these steps must be sequenced and their - * latencies are cumulative. - * - * Rather than do 1->2->3->4 sequentially for a single position before moving - * onto the next, this implementation interleaves these operations across the - * next few positions: - * - * R = Repcode Read & Compare - * H = Hash - * T = Table Lookup - * M = Match Read & Compare - * - * Pos | Time --> - * ----+------------------- - * N | ... M - * N+1 | ... TM - * N+2 | R H T M - * N+3 | H TM - * N+4 | R H T M - * N+5 | H ... - * N+6 | R ... - * - * This is very much analogous to the pipelining of execution in a CPU. And just - * like a CPU, we have to dump the pipeline when we find a match (i.e., take a - * branch). - * - * When this happens, we throw away our current state, and do the following prep - * to re-enter the loop: - * - * Pos | Time --> - * ----+------------------- - * N | H T - * N+1 | H - * - * This is also the work we do at the beginning to enter the loop initially. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_compressBlock_fast_noDict_generic( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize, - uint mls, - int useCmov - ) + hash0 = ZSTD_hashPtr(ip0, hlog, mls); + hash1 = ZSTD_hashPtr(ip1, hlog, mls); + matchIdx = hashTable[hash0]; + do { - ZSTD_compressionParameters* cParams = &ms->cParams; - uint* hashTable = ms->hashTable; - uint hlog = cParams->hashLog; - /* min 2 */ - nuint stepSize = cParams->targetLength + (uint)(cParams->targetLength == 0 ? 1 : 0) + 1; - byte* @base = ms->window.@base; - byte* istart = (byte*)src; - uint endIndex = (uint)((nuint)(istart - @base) + srcSize); - uint prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog); - byte* prefixStart = @base + prefixStartIndex; - byte* iend = istart + srcSize; - byte* ilimit = iend - 8; - byte* anchor = istart; - byte* ip0 = istart; - byte* ip1; - byte* ip2; - byte* ip3; - uint current0; - uint rep_offset1 = rep[0]; - uint rep_offset2 = rep[1]; - uint offsetSaved1 = 0, - offsetSaved2 = 0; - /* hash for ip0 */ - nuint hash0; - /* hash for ip1 */ - nuint hash1; - /* match idx for ip0 */ - uint matchIdx; - uint offcode; - byte* match0; - nuint mLength; - /* ip0 and ip1 are always adjacent. The targetLength skipping and - * uncompressibility acceleration is applied to every other position, - * matching the behavior of #1562. step therefore represents the gap - * between pairs of positions, from ip0 to ip2 or ip1 to ip3. */ - nuint step; - byte* nextStep; - const nuint kStepIncr = 1 << 8 - 1; - void* matchFound = - useCmov != 0 - ? (delegate* managed)(&ZSTD_match4Found_cmov) - : (delegate* managed)(&ZSTD_match4Found_branch); - ip0 += ip0 == prefixStart ? 1 : 0; + /* load repcode match for ip[2]*/ + uint rval = MEM_read32(ip2 - rep_offset1); + current0 = (uint)(ip0 - @base); + hashTable[hash0] = current0; + if (MEM_read32(ip2) == rval && rep_offset1 > 0) { - uint curr = (uint)(ip0 - @base); - uint windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog); - uint maxRep = curr - windowLow; - if (rep_offset2 > maxRep) - { - offsetSaved2 = rep_offset2; - rep_offset2 = 0; - } - - if (rep_offset1 > maxRep) - { - offsetSaved1 = rep_offset1; - rep_offset1 = 0; - } + ip0 = ip2; + match0 = ip0 - rep_offset1; + mLength = ip0[-1] == match0[-1] ? 1U : 0U; + ip0 -= mLength; + match0 -= mLength; + assert(1 >= 1); + assert(1 <= 3); + offcode = 1; + mLength += 4; + hashTable[hash1] = (uint)(ip1 - @base); + goto _match; } - _start: - step = stepSize; - nextStep = ip0 + kStepIncr; - ip1 = ip0 + 1; - ip2 = ip0 + step; - ip3 = ip2 + 1; - if (ip3 >= ilimit) + if ( + ((delegate* managed)matchFound)( + ip0, + @base + matchIdx, + matchIdx, + prefixStartIndex + ) != 0 + ) { - goto _cleanup; + hashTable[hash1] = (uint)(ip1 - @base); + goto _offset; } - hash0 = ZSTD_hashPtr(ip0, hlog, mls); - hash1 = ZSTD_hashPtr(ip1, hlog, mls); - matchIdx = hashTable[hash0]; - do + matchIdx = hashTable[hash1]; + hash0 = hash1; + hash1 = ZSTD_hashPtr(ip2, hlog, mls); + ip0 = ip1; + ip1 = ip2; + ip2 = ip3; + current0 = (uint)(ip0 - @base); + hashTable[hash0] = current0; + if ( + ((delegate* managed)matchFound)( + ip0, + @base + matchIdx, + matchIdx, + prefixStartIndex + ) != 0 + ) { - /* load repcode match for ip[2]*/ - uint rval = MEM_read32(ip2 - rep_offset1); - current0 = (uint)(ip0 - @base); - hashTable[hash0] = current0; - if (MEM_read32(ip2) == rval && rep_offset1 > 0) + if (step <= 4) { - ip0 = ip2; - match0 = ip0 - rep_offset1; - mLength = ip0[-1] == match0[-1] ? 1U : 0U; - ip0 -= mLength; - match0 -= mLength; - assert(1 >= 1); - assert(1 <= 3); - offcode = 1; - mLength += 4; hashTable[hash1] = (uint)(ip1 - @base); - goto _match; } - if ( - ((delegate* managed)matchFound)( - ip0, - @base + matchIdx, - matchIdx, - prefixStartIndex - ) != 0 - ) - { - hashTable[hash1] = (uint)(ip1 - @base); - goto _offset; - } + goto _offset; + } - matchIdx = hashTable[hash1]; - hash0 = hash1; - hash1 = ZSTD_hashPtr(ip2, hlog, mls); - ip0 = ip1; - ip1 = ip2; - ip2 = ip3; - current0 = (uint)(ip0 - @base); - hashTable[hash0] = current0; - if ( - ((delegate* managed)matchFound)( - ip0, - @base + matchIdx, - matchIdx, - prefixStartIndex - ) != 0 - ) + matchIdx = hashTable[hash1]; + hash0 = hash1; + hash1 = ZSTD_hashPtr(ip2, hlog, mls); + ip0 = ip1; + ip1 = ip2; + ip2 = ip0 + step; + ip3 = ip1 + step; + if (ip2 >= nextStep) + { + step++; +#if NETCOREAPP3_0_OR_GREATER + if (System.Runtime.Intrinsics.X86.Sse.IsSupported) { - if (step <= 4) - { - hashTable[hash1] = (uint)(ip1 - @base); - } - - goto _offset; + System.Runtime.Intrinsics.X86.Sse.Prefetch0(ip1 + 64); + System.Runtime.Intrinsics.X86.Sse.Prefetch0(ip1 + 128); } - - matchIdx = hashTable[hash1]; - hash0 = hash1; - hash1 = ZSTD_hashPtr(ip2, hlog, mls); - ip0 = ip1; - ip1 = ip2; - ip2 = ip0 + step; - ip3 = ip1 + step; - if (ip2 >= nextStep) - { - step++; -#if NETCOREAPP3_0_OR_GREATER - if (System.Runtime.Intrinsics.X86.Sse.IsSupported) - { - System.Runtime.Intrinsics.X86.Sse.Prefetch0(ip1 + 64); - System.Runtime.Intrinsics.X86.Sse.Prefetch0(ip1 + 128); - } #endif - nextStep += kStepIncr; - } - } while (ip3 < ilimit); - _cleanup: - offsetSaved2 = offsetSaved1 != 0 && rep_offset1 != 0 ? offsetSaved1 : offsetSaved2; - rep[0] = rep_offset1 != 0 ? rep_offset1 : offsetSaved1; - rep[1] = rep_offset2 != 0 ? rep_offset2 : offsetSaved2; - return (nuint)(iend - anchor); - _offset: - match0 = @base + matchIdx; - rep_offset2 = rep_offset1; - rep_offset1 = (uint)(ip0 - match0); - assert(rep_offset1 > 0); - offcode = rep_offset1 + 3; - mLength = 4; - while (ip0 > anchor && match0 > prefixStart && ip0[-1] == match0[-1]) - { - ip0--; - match0--; - mLength++; + nextStep += kStepIncr; } + } while (ip3 < ilimit); + _cleanup: + offsetSaved2 = offsetSaved1 != 0 && rep_offset1 != 0 ? offsetSaved1 : offsetSaved2; + rep[0] = rep_offset1 != 0 ? rep_offset1 : offsetSaved1; + rep[1] = rep_offset2 != 0 ? rep_offset2 : offsetSaved2; + return (nuint)(iend - anchor); + _offset: + match0 = @base + matchIdx; + rep_offset2 = rep_offset1; + rep_offset1 = (uint)(ip0 - match0); + assert(rep_offset1 > 0); + offcode = rep_offset1 + 3; + mLength = 4; + while (ip0 > anchor && match0 > prefixStart && ip0[-1] == match0[-1]) + { + ip0--; + match0--; + mLength++; + } - _match: - mLength += ZSTD_count(ip0 + mLength, match0 + mLength, iend); - ZSTD_storeSeq(seqStore, (nuint)(ip0 - anchor), anchor, iend, offcode, mLength); - ip0 += mLength; - anchor = ip0; - if (ip0 <= ilimit) + _match: + mLength += ZSTD_count(ip0 + mLength, match0 + mLength, iend); + ZSTD_storeSeq(seqStore, (nuint)(ip0 - anchor), anchor, iend, offcode, mLength); + ip0 += mLength; + anchor = ip0; + if (ip0 <= ilimit) + { + assert(@base + current0 + 2 > istart); + hashTable[ZSTD_hashPtr(@base + current0 + 2, hlog, mls)] = current0 + 2; + hashTable[ZSTD_hashPtr(ip0 - 2, hlog, mls)] = (uint)(ip0 - 2 - @base); + if (rep_offset2 > 0) { - assert(@base + current0 + 2 > istart); - hashTable[ZSTD_hashPtr(@base + current0 + 2, hlog, mls)] = current0 + 2; - hashTable[ZSTD_hashPtr(ip0 - 2, hlog, mls)] = (uint)(ip0 - 2 - @base); - if (rep_offset2 > 0) + while (ip0 <= ilimit && MEM_read32(ip0) == MEM_read32(ip0 - rep_offset2)) { - while (ip0 <= ilimit && MEM_read32(ip0) == MEM_read32(ip0 - rep_offset2)) + /* store sequence */ + nuint rLength = ZSTD_count(ip0 + 4, ip0 + 4 - rep_offset2, iend) + 4; { - /* store sequence */ - nuint rLength = ZSTD_count(ip0 + 4, ip0 + 4 - rep_offset2, iend) + 4; - { - /* swap rep_offset2 <=> rep_offset1 */ - uint tmpOff = rep_offset2; - rep_offset2 = rep_offset1; - rep_offset1 = tmpOff; - } - - hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (uint)(ip0 - @base); - ip0 += rLength; - assert(1 >= 1); - assert(1 <= 3); - ZSTD_storeSeq(seqStore, 0, anchor, iend, 1, rLength); - anchor = ip0; - continue; + /* swap rep_offset2 <=> rep_offset1 */ + uint tmpOff = rep_offset2; + rep_offset2 = rep_offset1; + rep_offset1 = tmpOff; } + + hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (uint)(ip0 - @base); + ip0 += rLength; + assert(1 >= 1); + assert(1 <= 3); + ZSTD_storeSeq(seqStore, 0, anchor, iend, 1, rLength); + anchor = ip0; + continue; } } - - goto _start; } - private static nuint ZSTD_compressBlock_fast_noDict_4_1( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_fast_noDict_generic(ms, seqStore, rep, src, srcSize, 4, 1); - } + goto _start; + } - private static nuint ZSTD_compressBlock_fast_noDict_5_1( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_fast_noDict_generic(ms, seqStore, rep, src, srcSize, 5, 1); - } + private static nuint ZSTD_compressBlock_fast_noDict_4_1( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_fast_noDict_generic(ms, seqStore, rep, src, srcSize, 4, 1); + } - private static nuint ZSTD_compressBlock_fast_noDict_6_1( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_fast_noDict_generic(ms, seqStore, rep, src, srcSize, 6, 1); - } + private static nuint ZSTD_compressBlock_fast_noDict_5_1( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_fast_noDict_generic(ms, seqStore, rep, src, srcSize, 5, 1); + } - private static nuint ZSTD_compressBlock_fast_noDict_7_1( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_fast_noDict_generic(ms, seqStore, rep, src, srcSize, 7, 1); - } + private static nuint ZSTD_compressBlock_fast_noDict_6_1( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_fast_noDict_generic(ms, seqStore, rep, src, srcSize, 6, 1); + } - private static nuint ZSTD_compressBlock_fast_noDict_4_0( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_fast_noDict_generic(ms, seqStore, rep, src, srcSize, 4, 0); - } + private static nuint ZSTD_compressBlock_fast_noDict_7_1( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_fast_noDict_generic(ms, seqStore, rep, src, srcSize, 7, 1); + } - private static nuint ZSTD_compressBlock_fast_noDict_5_0( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_fast_noDict_generic(ms, seqStore, rep, src, srcSize, 5, 0); - } + private static nuint ZSTD_compressBlock_fast_noDict_4_0( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_fast_noDict_generic(ms, seqStore, rep, src, srcSize, 4, 0); + } - private static nuint ZSTD_compressBlock_fast_noDict_6_0( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_fast_noDict_generic(ms, seqStore, rep, src, srcSize, 6, 0); - } + private static nuint ZSTD_compressBlock_fast_noDict_5_0( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_fast_noDict_generic(ms, seqStore, rep, src, srcSize, 5, 0); + } - private static nuint ZSTD_compressBlock_fast_noDict_7_0( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_fast_noDict_generic(ms, seqStore, rep, src, srcSize, 7, 0); - } + private static nuint ZSTD_compressBlock_fast_noDict_6_0( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_fast_noDict_generic(ms, seqStore, rep, src, srcSize, 6, 0); + } + + private static nuint ZSTD_compressBlock_fast_noDict_7_0( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_fast_noDict_generic(ms, seqStore, rep, src, srcSize, 7, 0); + } - private static nuint ZSTD_compressBlock_fast( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) + private static nuint ZSTD_compressBlock_fast( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + uint mml = ms->cParams.minMatch; + /* use cmov when "candidate in range" branch is likely unpredictable */ + int useCmov = ms->cParams.windowLog < 19 ? 1 : 0; + assert(ms->dictMatchState == null); + if (useCmov != 0) { - uint mml = ms->cParams.minMatch; - /* use cmov when "candidate in range" branch is likely unpredictable */ - int useCmov = ms->cParams.windowLog < 19 ? 1 : 0; - assert(ms->dictMatchState == null); - if (useCmov != 0) + switch (mml) { - switch (mml) - { - default: - case 4: - return ZSTD_compressBlock_fast_noDict_4_1(ms, seqStore, rep, src, srcSize); - case 5: - return ZSTD_compressBlock_fast_noDict_5_1(ms, seqStore, rep, src, srcSize); - case 6: - return ZSTD_compressBlock_fast_noDict_6_1(ms, seqStore, rep, src, srcSize); - case 7: - return ZSTD_compressBlock_fast_noDict_7_1(ms, seqStore, rep, src, srcSize); - } + default: + case 4: + return ZSTD_compressBlock_fast_noDict_4_1(ms, seqStore, rep, src, srcSize); + case 5: + return ZSTD_compressBlock_fast_noDict_5_1(ms, seqStore, rep, src, srcSize); + case 6: + return ZSTD_compressBlock_fast_noDict_6_1(ms, seqStore, rep, src, srcSize); + case 7: + return ZSTD_compressBlock_fast_noDict_7_1(ms, seqStore, rep, src, srcSize); } - else + } + else + { + switch (mml) { - switch (mml) - { - default: - case 4: - return ZSTD_compressBlock_fast_noDict_4_0(ms, seqStore, rep, src, srcSize); - case 5: - return ZSTD_compressBlock_fast_noDict_5_0(ms, seqStore, rep, src, srcSize); - case 6: - return ZSTD_compressBlock_fast_noDict_6_0(ms, seqStore, rep, src, srcSize); - case 7: - return ZSTD_compressBlock_fast_noDict_7_0(ms, seqStore, rep, src, srcSize); - } + default: + case 4: + return ZSTD_compressBlock_fast_noDict_4_0(ms, seqStore, rep, src, srcSize); + case 5: + return ZSTD_compressBlock_fast_noDict_5_0(ms, seqStore, rep, src, srcSize); + case 6: + return ZSTD_compressBlock_fast_noDict_6_0(ms, seqStore, rep, src, srcSize); + case 7: + return ZSTD_compressBlock_fast_noDict_7_0(ms, seqStore, rep, src, srcSize); } } + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_compressBlock_fast_dictMatchState_generic( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize, - uint mls, - uint hasStep - ) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_compressBlock_fast_dictMatchState_generic( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize, + uint mls, + uint hasStep + ) + { + ZSTD_compressionParameters* cParams = &ms->cParams; + uint* hashTable = ms->hashTable; + uint hlog = cParams->hashLog; + /* support stepSize of 0 */ + uint stepSize = cParams->targetLength + (uint)(cParams->targetLength == 0 ? 1 : 0); + byte* @base = ms->window.@base; + byte* istart = (byte*)src; + byte* ip0 = istart; + /* we assert below that stepSize >= 1 */ + byte* ip1 = ip0 + stepSize; + byte* anchor = istart; + uint prefixStartIndex = ms->window.dictLimit; + byte* prefixStart = @base + prefixStartIndex; + byte* iend = istart + srcSize; + byte* ilimit = iend - 8; + uint offset_1 = rep[0], + offset_2 = rep[1]; + ZSTD_MatchState_t* dms = ms->dictMatchState; + ZSTD_compressionParameters* dictCParams = &dms->cParams; + uint* dictHashTable = dms->hashTable; + uint dictStartIndex = dms->window.dictLimit; + byte* dictBase = dms->window.@base; + byte* dictStart = dictBase + dictStartIndex; + byte* dictEnd = dms->window.nextSrc; + uint dictIndexDelta = prefixStartIndex - (uint)(dictEnd - dictBase); + uint dictAndPrefixLength = (uint)(istart - prefixStart + dictEnd - dictStart); + uint dictHBits = dictCParams->hashLog + 8; + /* if a dictionary is still attached, it necessarily means that + * it is within window size. So we just check it. */ + uint maxDistance = 1U << (int)cParams->windowLog; + uint endIndex = (uint)((nuint)(istart - @base) + srcSize); + assert(endIndex - prefixStartIndex <= maxDistance); + assert(prefixStartIndex >= (uint)(dictEnd - dictBase)); + if (ms->prefetchCDictTables != 0) { - ZSTD_compressionParameters* cParams = &ms->cParams; - uint* hashTable = ms->hashTable; - uint hlog = cParams->hashLog; - /* support stepSize of 0 */ - uint stepSize = cParams->targetLength + (uint)(cParams->targetLength == 0 ? 1 : 0); - byte* @base = ms->window.@base; - byte* istart = (byte*)src; - byte* ip0 = istart; - /* we assert below that stepSize >= 1 */ - byte* ip1 = ip0 + stepSize; - byte* anchor = istart; - uint prefixStartIndex = ms->window.dictLimit; - byte* prefixStart = @base + prefixStartIndex; - byte* iend = istart + srcSize; - byte* ilimit = iend - 8; - uint offset_1 = rep[0], - offset_2 = rep[1]; - ZSTD_MatchState_t* dms = ms->dictMatchState; - ZSTD_compressionParameters* dictCParams = &dms->cParams; - uint* dictHashTable = dms->hashTable; - uint dictStartIndex = dms->window.dictLimit; - byte* dictBase = dms->window.@base; - byte* dictStart = dictBase + dictStartIndex; - byte* dictEnd = dms->window.nextSrc; - uint dictIndexDelta = prefixStartIndex - (uint)(dictEnd - dictBase); - uint dictAndPrefixLength = (uint)(istart - prefixStart + dictEnd - dictStart); - uint dictHBits = dictCParams->hashLog + 8; - /* if a dictionary is still attached, it necessarily means that - * it is within window size. So we just check it. */ - uint maxDistance = 1U << (int)cParams->windowLog; - uint endIndex = (uint)((nuint)(istart - @base) + srcSize); - assert(endIndex - prefixStartIndex <= maxDistance); - assert(prefixStartIndex >= (uint)(dictEnd - dictBase)); - if (ms->prefetchCDictTables != 0) + nuint hashTableBytes = ((nuint)1 << (int)dictCParams->hashLog) * sizeof(uint); { - nuint hashTableBytes = ((nuint)1 << (int)dictCParams->hashLog) * sizeof(uint); + sbyte* _ptr = (sbyte*)dictHashTable; + nuint _size = hashTableBytes; + nuint _pos; + for (_pos = 0; _pos < _size; _pos += 64) { - sbyte* _ptr = (sbyte*)dictHashTable; - nuint _size = hashTableBytes; - nuint _pos; - for (_pos = 0; _pos < _size; _pos += 64) - { #if NETCOREAPP3_0_OR_GREATER - if (System.Runtime.Intrinsics.X86.Sse.IsSupported) - { - System.Runtime.Intrinsics.X86.Sse.Prefetch1(_ptr + _pos); - } -#endif + if (System.Runtime.Intrinsics.X86.Sse.IsSupported) + { + System.Runtime.Intrinsics.X86.Sse.Prefetch1(_ptr + _pos); } +#endif } } + } - ip0 += dictAndPrefixLength == 0 ? 1 : 0; - assert(offset_1 <= dictAndPrefixLength); - assert(offset_2 <= dictAndPrefixLength); - assert(stepSize >= 1); - while (ip1 <= ilimit) + ip0 += dictAndPrefixLength == 0 ? 1 : 0; + assert(offset_1 <= dictAndPrefixLength); + assert(offset_2 <= dictAndPrefixLength); + assert(stepSize >= 1); + while (ip1 <= ilimit) + { + nuint mLength; + nuint hash0 = ZSTD_hashPtr(ip0, hlog, mls); + nuint dictHashAndTag0 = ZSTD_hashPtr(ip0, dictHBits, mls); + uint dictMatchIndexAndTag = dictHashTable[dictHashAndTag0 >> 8]; + int dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag0); + uint matchIndex = hashTable[hash0]; + uint curr = (uint)(ip0 - @base); + nuint step = stepSize; + const nuint kStepIncr = 1 << 8; + byte* nextStep = ip0 + kStepIncr; + while (true) { - nuint mLength; - nuint hash0 = ZSTD_hashPtr(ip0, hlog, mls); - nuint dictHashAndTag0 = ZSTD_hashPtr(ip0, dictHBits, mls); - uint dictMatchIndexAndTag = dictHashTable[dictHashAndTag0 >> 8]; - int dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag0); - uint matchIndex = hashTable[hash0]; - uint curr = (uint)(ip0 - @base); - nuint step = stepSize; - const nuint kStepIncr = 1 << 8; - byte* nextStep = ip0 + kStepIncr; - while (true) + byte* match = @base + matchIndex; + uint repIndex = curr + 1 - offset_1; + byte* repMatch = + repIndex < prefixStartIndex + ? dictBase + (repIndex - dictIndexDelta) + : @base + repIndex; + nuint hash1 = ZSTD_hashPtr(ip1, hlog, mls); + nuint dictHashAndTag1 = ZSTD_hashPtr(ip1, dictHBits, mls); + hashTable[hash0] = curr; + if ( + ZSTD_index_overlap_check(prefixStartIndex, repIndex) != 0 + && MEM_read32(repMatch) == MEM_read32(ip0 + 1) + ) + { + byte* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; + mLength = + ZSTD_count_2segments( + ip0 + 1 + 4, + repMatch + 4, + iend, + repMatchEnd, + prefixStart + ) + 4; + ip0++; + assert(1 >= 1); + assert(1 <= 3); + ZSTD_storeSeq(seqStore, (nuint)(ip0 - anchor), anchor, iend, 1, mLength); + break; + } + + if (dictTagsMatch != 0) { - byte* match = @base + matchIndex; - uint repIndex = curr + 1 - offset_1; - byte* repMatch = - repIndex < prefixStartIndex - ? dictBase + (repIndex - dictIndexDelta) - : @base + repIndex; - nuint hash1 = ZSTD_hashPtr(ip1, hlog, mls); - nuint dictHashAndTag1 = ZSTD_hashPtr(ip1, dictHBits, mls); - hashTable[hash0] = curr; + /* Found a possible dict match */ + uint dictMatchIndex = dictMatchIndexAndTag >> 8; + byte* dictMatch = dictBase + dictMatchIndex; if ( - ZSTD_index_overlap_check(prefixStartIndex, repIndex) != 0 - && MEM_read32(repMatch) == MEM_read32(ip0 + 1) + dictMatchIndex > dictStartIndex + && MEM_read32(dictMatch) == MEM_read32(ip0) ) { - byte* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; - mLength = - ZSTD_count_2segments( - ip0 + 1 + 4, - repMatch + 4, - iend, - repMatchEnd, - prefixStart - ) + 4; - ip0++; - assert(1 >= 1); - assert(1 <= 3); - ZSTD_storeSeq(seqStore, (nuint)(ip0 - anchor), anchor, iend, 1, mLength); - break; - } - - if (dictTagsMatch != 0) - { - /* Found a possible dict match */ - uint dictMatchIndex = dictMatchIndexAndTag >> 8; - byte* dictMatch = dictBase + dictMatchIndex; - if ( - dictMatchIndex > dictStartIndex - && MEM_read32(dictMatch) == MEM_read32(ip0) - ) + if (matchIndex <= prefixStartIndex) { - if (matchIndex <= prefixStartIndex) - { - uint offset = curr - dictMatchIndex - dictIndexDelta; - mLength = - ZSTD_count_2segments( - ip0 + 4, - dictMatch + 4, - iend, - dictEnd, - prefixStart - ) + 4; - while ( - ip0 > anchor - && dictMatch > dictStart - && ip0[-1] == dictMatch[-1] - ) - { - ip0--; - dictMatch--; - mLength++; - } - - offset_2 = offset_1; - offset_1 = offset; - assert(offset > 0); - ZSTD_storeSeq( - seqStore, - (nuint)(ip0 - anchor), - anchor, + uint offset = curr - dictMatchIndex - dictIndexDelta; + mLength = + ZSTD_count_2segments( + ip0 + 4, + dictMatch + 4, iend, - offset + 3, - mLength - ); - break; + dictEnd, + prefixStart + ) + 4; + while ( + ip0 > anchor + && dictMatch > dictStart + && ip0[-1] == dictMatch[-1] + ) + { + ip0--; + dictMatch--; + mLength++; } - } - } - if (ZSTD_match4Found_cmov(ip0, match, matchIndex, prefixStartIndex) != 0) - { - /* found a regular match of size >= 4 */ - uint offset = (uint)(ip0 - match); - mLength = ZSTD_count(ip0 + 4, match + 4, iend) + 4; - while (ip0 > anchor && match > prefixStart && ip0[-1] == match[-1]) - { - ip0--; - match--; - mLength++; + offset_2 = offset_1; + offset_1 = offset; + assert(offset > 0); + ZSTD_storeSeq( + seqStore, + (nuint)(ip0 - anchor), + anchor, + iend, + offset + 3, + mLength + ); + break; } - - offset_2 = offset_1; - offset_1 = offset; - assert(offset > 0); - ZSTD_storeSeq( - seqStore, - (nuint)(ip0 - anchor), - anchor, - iend, - offset + 3, - mLength - ); - break; } + } - dictMatchIndexAndTag = dictHashTable[dictHashAndTag1 >> 8]; - dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag1); - matchIndex = hashTable[hash1]; - if (ip1 >= nextStep) + if (ZSTD_match4Found_cmov(ip0, match, matchIndex, prefixStartIndex) != 0) + { + /* found a regular match of size >= 4 */ + uint offset = (uint)(ip0 - match); + mLength = ZSTD_count(ip0 + 4, match + 4, iend) + 4; + while (ip0 > anchor && match > prefixStart && ip0[-1] == match[-1]) { - step++; - nextStep += kStepIncr; + ip0--; + match--; + mLength++; } - ip0 = ip1; - ip1 = ip1 + step; - if (ip1 > ilimit) - goto _cleanup; - curr = (uint)(ip0 - @base); - hash0 = hash1; + offset_2 = offset_1; + offset_1 = offset; + assert(offset > 0); + ZSTD_storeSeq( + seqStore, + (nuint)(ip0 - anchor), + anchor, + iend, + offset + 3, + mLength + ); + break; } - assert(mLength != 0); - ip0 += mLength; - anchor = ip0; - if (ip0 <= ilimit) + dictMatchIndexAndTag = dictHashTable[dictHashAndTag1 >> 8]; + dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag1); + matchIndex = hashTable[hash1]; + if (ip1 >= nextStep) { - assert(@base + curr + 2 > istart); - hashTable[ZSTD_hashPtr(@base + curr + 2, hlog, mls)] = curr + 2; - hashTable[ZSTD_hashPtr(ip0 - 2, hlog, mls)] = (uint)(ip0 - 2 - @base); - while (ip0 <= ilimit) - { - uint current2 = (uint)(ip0 - @base); - uint repIndex2 = current2 - offset_2; - byte* repMatch2 = - repIndex2 < prefixStartIndex - ? dictBase - dictIndexDelta + repIndex2 - : @base + repIndex2; - if ( - ZSTD_index_overlap_check(prefixStartIndex, repIndex2) != 0 - && MEM_read32(repMatch2) == MEM_read32(ip0) - ) - { - byte* repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; - nuint repLength2 = - ZSTD_count_2segments( - ip0 + 4, - repMatch2 + 4, - iend, - repEnd2, - prefixStart - ) + 4; - /* swap offset_2 <=> offset_1 */ - uint tmpOffset = offset_2; - offset_2 = offset_1; - offset_1 = tmpOffset; - assert(1 >= 1); - assert(1 <= 3); - ZSTD_storeSeq(seqStore, 0, anchor, iend, 1, repLength2); - hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = current2; - ip0 += repLength2; - anchor = ip0; - continue; - } + step++; + nextStep += kStepIncr; + } - break; + ip0 = ip1; + ip1 = ip1 + step; + if (ip1 > ilimit) + goto _cleanup; + curr = (uint)(ip0 - @base); + hash0 = hash1; + } + + assert(mLength != 0); + ip0 += mLength; + anchor = ip0; + if (ip0 <= ilimit) + { + assert(@base + curr + 2 > istart); + hashTable[ZSTD_hashPtr(@base + curr + 2, hlog, mls)] = curr + 2; + hashTable[ZSTD_hashPtr(ip0 - 2, hlog, mls)] = (uint)(ip0 - 2 - @base); + while (ip0 <= ilimit) + { + uint current2 = (uint)(ip0 - @base); + uint repIndex2 = current2 - offset_2; + byte* repMatch2 = + repIndex2 < prefixStartIndex + ? dictBase - dictIndexDelta + repIndex2 + : @base + repIndex2; + if ( + ZSTD_index_overlap_check(prefixStartIndex, repIndex2) != 0 + && MEM_read32(repMatch2) == MEM_read32(ip0) + ) + { + byte* repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; + nuint repLength2 = + ZSTD_count_2segments( + ip0 + 4, + repMatch2 + 4, + iend, + repEnd2, + prefixStart + ) + 4; + /* swap offset_2 <=> offset_1 */ + uint tmpOffset = offset_2; + offset_2 = offset_1; + offset_1 = tmpOffset; + assert(1 >= 1); + assert(1 <= 3); + ZSTD_storeSeq(seqStore, 0, anchor, iend, 1, repLength2); + hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = current2; + ip0 += repLength2; + anchor = ip0; + continue; } - } - assert(ip0 == anchor); - ip1 = ip0 + stepSize; + break; + } } - _cleanup: - rep[0] = offset_1; - rep[1] = offset_2; - return (nuint)(iend - anchor); + assert(ip0 == anchor); + ip1 = ip0 + stepSize; } - private static nuint ZSTD_compressBlock_fast_dictMatchState_4_0( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_fast_dictMatchState_generic( - ms, - seqStore, - rep, - src, - srcSize, - 4, - 0 - ); - } + _cleanup: + rep[0] = offset_1; + rep[1] = offset_2; + return (nuint)(iend - anchor); + } - private static nuint ZSTD_compressBlock_fast_dictMatchState_5_0( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_fast_dictMatchState_generic( - ms, - seqStore, - rep, - src, - srcSize, - 5, - 0 - ); - } + private static nuint ZSTD_compressBlock_fast_dictMatchState_4_0( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_fast_dictMatchState_generic( + ms, + seqStore, + rep, + src, + srcSize, + 4, + 0 + ); + } - private static nuint ZSTD_compressBlock_fast_dictMatchState_6_0( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_fast_dictMatchState_generic( - ms, - seqStore, - rep, - src, - srcSize, - 6, - 0 - ); - } + private static nuint ZSTD_compressBlock_fast_dictMatchState_5_0( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_fast_dictMatchState_generic( + ms, + seqStore, + rep, + src, + srcSize, + 5, + 0 + ); + } + + private static nuint ZSTD_compressBlock_fast_dictMatchState_6_0( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_fast_dictMatchState_generic( + ms, + seqStore, + rep, + src, + srcSize, + 6, + 0 + ); + } - private static nuint ZSTD_compressBlock_fast_dictMatchState_7_0( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) + private static nuint ZSTD_compressBlock_fast_dictMatchState_7_0( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_fast_dictMatchState_generic( + ms, + seqStore, + rep, + src, + srcSize, + 7, + 0 + ); + } + + private static nuint ZSTD_compressBlock_fast_dictMatchState( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + uint mls = ms->cParams.minMatch; + assert(ms->dictMatchState != null); + switch (mls) { - return ZSTD_compressBlock_fast_dictMatchState_generic( - ms, - seqStore, - rep, - src, - srcSize, - 7, - 0 - ); + default: + case 4: + return ZSTD_compressBlock_fast_dictMatchState_4_0( + ms, + seqStore, + rep, + src, + srcSize + ); + case 5: + return ZSTD_compressBlock_fast_dictMatchState_5_0( + ms, + seqStore, + rep, + src, + srcSize + ); + case 6: + return ZSTD_compressBlock_fast_dictMatchState_6_0( + ms, + seqStore, + rep, + src, + srcSize + ); + case 7: + return ZSTD_compressBlock_fast_dictMatchState_7_0( + ms, + seqStore, + rep, + src, + srcSize + ); } + } - private static nuint ZSTD_compressBlock_fast_dictMatchState( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) + private static nuint ZSTD_compressBlock_fast_extDict_generic( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize, + uint mls, + uint hasStep + ) + { + ZSTD_compressionParameters* cParams = &ms->cParams; + uint* hashTable = ms->hashTable; + uint hlog = cParams->hashLog; + /* support stepSize of 0 */ + nuint stepSize = cParams->targetLength + (uint)(cParams->targetLength == 0 ? 1 : 0) + 1; + byte* @base = ms->window.@base; + byte* dictBase = ms->window.dictBase; + byte* istart = (byte*)src; + byte* anchor = istart; + uint endIndex = (uint)((nuint)(istart - @base) + srcSize); + uint lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog); + uint dictStartIndex = lowLimit; + byte* dictStart = dictBase + dictStartIndex; + uint dictLimit = ms->window.dictLimit; + uint prefixStartIndex = dictLimit < lowLimit ? lowLimit : dictLimit; + byte* prefixStart = @base + prefixStartIndex; + byte* dictEnd = dictBase + prefixStartIndex; + byte* iend = istart + srcSize; + byte* ilimit = iend - 8; + uint offset_1 = rep[0], + offset_2 = rep[1]; + uint offsetSaved1 = 0, + offsetSaved2 = 0; + byte* ip0 = istart; + byte* ip1; + byte* ip2; + byte* ip3; + uint current0; + /* hash for ip0 */ + nuint hash0; + /* hash for ip1 */ + nuint hash1; + /* match idx for ip0 */ + uint idx; + /* base pointer for idx */ + byte* idxBase; + uint offcode; + byte* match0; + nuint mLength; + /* initialize to avoid warning, assert != 0 later */ + byte* matchEnd = null; + nuint step; + byte* nextStep; + const nuint kStepIncr = 1 << 8 - 1; + if (prefixStartIndex == dictStartIndex) + return ZSTD_compressBlock_fast(ms, seqStore, rep, src, srcSize); { - uint mls = ms->cParams.minMatch; - assert(ms->dictMatchState != null); - switch (mls) + uint curr = (uint)(ip0 - @base); + uint maxRep = curr - dictStartIndex; + if (offset_2 >= maxRep) { - default: - case 4: - return ZSTD_compressBlock_fast_dictMatchState_4_0( - ms, - seqStore, - rep, - src, - srcSize - ); - case 5: - return ZSTD_compressBlock_fast_dictMatchState_5_0( - ms, - seqStore, - rep, - src, - srcSize - ); - case 6: - return ZSTD_compressBlock_fast_dictMatchState_6_0( - ms, - seqStore, - rep, - src, - srcSize - ); - case 7: - return ZSTD_compressBlock_fast_dictMatchState_7_0( - ms, - seqStore, - rep, - src, - srcSize - ); + offsetSaved2 = offset_2; + offset_2 = 0; + } + + if (offset_1 >= maxRep) + { + offsetSaved1 = offset_1; + offset_1 = 0; } } - private static nuint ZSTD_compressBlock_fast_extDict_generic( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize, - uint mls, - uint hasStep - ) + _start: + step = stepSize; + nextStep = ip0 + kStepIncr; + ip1 = ip0 + 1; + ip2 = ip0 + step; + ip3 = ip2 + 1; + if (ip3 >= ilimit) + { + goto _cleanup; + } + + hash0 = ZSTD_hashPtr(ip0, hlog, mls); + hash1 = ZSTD_hashPtr(ip1, hlog, mls); + idx = hashTable[hash0]; + idxBase = idx < prefixStartIndex ? dictBase : @base; + do { - ZSTD_compressionParameters* cParams = &ms->cParams; - uint* hashTable = ms->hashTable; - uint hlog = cParams->hashLog; - /* support stepSize of 0 */ - nuint stepSize = cParams->targetLength + (uint)(cParams->targetLength == 0 ? 1 : 0) + 1; - byte* @base = ms->window.@base; - byte* dictBase = ms->window.dictBase; - byte* istart = (byte*)src; - byte* anchor = istart; - uint endIndex = (uint)((nuint)(istart - @base) + srcSize); - uint lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog); - uint dictStartIndex = lowLimit; - byte* dictStart = dictBase + dictStartIndex; - uint dictLimit = ms->window.dictLimit; - uint prefixStartIndex = dictLimit < lowLimit ? lowLimit : dictLimit; - byte* prefixStart = @base + prefixStartIndex; - byte* dictEnd = dictBase + prefixStartIndex; - byte* iend = istart + srcSize; - byte* ilimit = iend - 8; - uint offset_1 = rep[0], - offset_2 = rep[1]; - uint offsetSaved1 = 0, - offsetSaved2 = 0; - byte* ip0 = istart; - byte* ip1; - byte* ip2; - byte* ip3; - uint current0; - /* hash for ip0 */ - nuint hash0; - /* hash for ip1 */ - nuint hash1; - /* match idx for ip0 */ - uint idx; - /* base pointer for idx */ - byte* idxBase; - uint offcode; - byte* match0; - nuint mLength; - /* initialize to avoid warning, assert != 0 later */ - byte* matchEnd = null; - nuint step; - byte* nextStep; - const nuint kStepIncr = 1 << 8 - 1; - if (prefixStartIndex == dictStartIndex) - return ZSTD_compressBlock_fast(ms, seqStore, rep, src, srcSize); { - uint curr = (uint)(ip0 - @base); - uint maxRep = curr - dictStartIndex; - if (offset_2 >= maxRep) + uint current2 = (uint)(ip2 - @base); + uint repIndex = current2 - offset_1; + byte* repBase = repIndex < prefixStartIndex ? dictBase : @base; + uint rval; + if (prefixStartIndex - repIndex >= 4 && offset_1 > 0) { - offsetSaved2 = offset_2; - offset_2 = 0; + rval = MEM_read32(repBase + repIndex); + } + else + { + rval = MEM_read32(ip2) ^ 1; } - if (offset_1 >= maxRep) + current0 = (uint)(ip0 - @base); + hashTable[hash0] = current0; + if (MEM_read32(ip2) == rval) { - offsetSaved1 = offset_1; - offset_1 = 0; + ip0 = ip2; + match0 = repBase + repIndex; + matchEnd = repIndex < prefixStartIndex ? dictEnd : iend; + assert(match0 != prefixStart && match0 != dictStart); + mLength = ip0[-1] == match0[-1] ? 1U : 0U; + ip0 -= mLength; + match0 -= mLength; + assert(1 >= 1); + assert(1 <= 3); + offcode = 1; + mLength += 4; + goto _match; } } - _start: - step = stepSize; - nextStep = ip0 + kStepIncr; - ip1 = ip0 + 1; - ip2 = ip0 + step; - ip3 = ip2 + 1; - if (ip3 >= ilimit) { - goto _cleanup; + uint mval = + idx >= dictStartIndex ? MEM_read32(idxBase + idx) : MEM_read32(ip0) ^ 1; + if (MEM_read32(ip0) == mval) + { + goto _offset; + } } - hash0 = ZSTD_hashPtr(ip0, hlog, mls); - hash1 = ZSTD_hashPtr(ip1, hlog, mls); - idx = hashTable[hash0]; + idx = hashTable[hash1]; idxBase = idx < prefixStartIndex ? dictBase : @base; - do + hash0 = hash1; + hash1 = ZSTD_hashPtr(ip2, hlog, mls); + ip0 = ip1; + ip1 = ip2; + ip2 = ip3; + current0 = (uint)(ip0 - @base); + hashTable[hash0] = current0; { + uint mval = + idx >= dictStartIndex ? MEM_read32(idxBase + idx) : MEM_read32(ip0) ^ 1; + if (MEM_read32(ip0) == mval) { - uint current2 = (uint)(ip2 - @base); - uint repIndex = current2 - offset_1; - byte* repBase = repIndex < prefixStartIndex ? dictBase : @base; - uint rval; - if (prefixStartIndex - repIndex >= 4 && offset_1 > 0) - { - rval = MEM_read32(repBase + repIndex); - } - else - { - rval = MEM_read32(ip2) ^ 1; - } - - current0 = (uint)(ip0 - @base); - hashTable[hash0] = current0; - if (MEM_read32(ip2) == rval) - { - ip0 = ip2; - match0 = repBase + repIndex; - matchEnd = repIndex < prefixStartIndex ? dictEnd : iend; - assert(match0 != prefixStart && match0 != dictStart); - mLength = ip0[-1] == match0[-1] ? 1U : 0U; - ip0 -= mLength; - match0 -= mLength; - assert(1 >= 1); - assert(1 <= 3); - offcode = 1; - mLength += 4; - goto _match; - } - } - - { - uint mval = - idx >= dictStartIndex ? MEM_read32(idxBase + idx) : MEM_read32(ip0) ^ 1; - if (MEM_read32(ip0) == mval) - { - goto _offset; - } + goto _offset; } + } - idx = hashTable[hash1]; - idxBase = idx < prefixStartIndex ? dictBase : @base; - hash0 = hash1; - hash1 = ZSTD_hashPtr(ip2, hlog, mls); - ip0 = ip1; - ip1 = ip2; - ip2 = ip3; - current0 = (uint)(ip0 - @base); - hashTable[hash0] = current0; + idx = hashTable[hash1]; + idxBase = idx < prefixStartIndex ? dictBase : @base; + hash0 = hash1; + hash1 = ZSTD_hashPtr(ip2, hlog, mls); + ip0 = ip1; + ip1 = ip2; + ip2 = ip0 + step; + ip3 = ip1 + step; + if (ip2 >= nextStep) + { + step++; +#if NETCOREAPP3_0_OR_GREATER + if (System.Runtime.Intrinsics.X86.Sse.IsSupported) { - uint mval = - idx >= dictStartIndex ? MEM_read32(idxBase + idx) : MEM_read32(ip0) ^ 1; - if (MEM_read32(ip0) == mval) - { - goto _offset; - } + System.Runtime.Intrinsics.X86.Sse.Prefetch0(ip1 + 64); + System.Runtime.Intrinsics.X86.Sse.Prefetch0(ip1 + 128); } - - idx = hashTable[hash1]; - idxBase = idx < prefixStartIndex ? dictBase : @base; - hash0 = hash1; - hash1 = ZSTD_hashPtr(ip2, hlog, mls); - ip0 = ip1; - ip1 = ip2; - ip2 = ip0 + step; - ip3 = ip1 + step; - if (ip2 >= nextStep) - { - step++; -#if NETCOREAPP3_0_OR_GREATER - if (System.Runtime.Intrinsics.X86.Sse.IsSupported) - { - System.Runtime.Intrinsics.X86.Sse.Prefetch0(ip1 + 64); - System.Runtime.Intrinsics.X86.Sse.Prefetch0(ip1 + 128); - } #endif - nextStep += kStepIncr; - } - } while (ip3 < ilimit); - _cleanup: - offsetSaved2 = offsetSaved1 != 0 && offset_1 != 0 ? offsetSaved1 : offsetSaved2; - rep[0] = offset_1 != 0 ? offset_1 : offsetSaved1; - rep[1] = offset_2 != 0 ? offset_2 : offsetSaved2; - return (nuint)(iend - anchor); - _offset: - { - uint offset = current0 - idx; - byte* lowMatchPtr = idx < prefixStartIndex ? dictStart : prefixStart; - matchEnd = idx < prefixStartIndex ? dictEnd : iend; - match0 = idxBase + idx; - offset_2 = offset_1; - offset_1 = offset; - assert(offset > 0); - offcode = offset + 3; - mLength = 4; - while (ip0 > anchor && match0 > lowMatchPtr && ip0[-1] == match0[-1]) - { - ip0--; - match0--; - mLength++; - } + nextStep += kStepIncr; } - - _match: - assert(matchEnd != null); - mLength += ZSTD_count_2segments( - ip0 + mLength, - match0 + mLength, - iend, - matchEnd, - prefixStart - ); - ZSTD_storeSeq(seqStore, (nuint)(ip0 - anchor), anchor, iend, offcode, mLength); - ip0 += mLength; - anchor = ip0; - if (ip1 < ip0) + } while (ip3 < ilimit); + _cleanup: + offsetSaved2 = offsetSaved1 != 0 && offset_1 != 0 ? offsetSaved1 : offsetSaved2; + rep[0] = offset_1 != 0 ? offset_1 : offsetSaved1; + rep[1] = offset_2 != 0 ? offset_2 : offsetSaved2; + return (nuint)(iend - anchor); + _offset: + { + uint offset = current0 - idx; + byte* lowMatchPtr = idx < prefixStartIndex ? dictStart : prefixStart; + matchEnd = idx < prefixStartIndex ? dictEnd : iend; + match0 = idxBase + idx; + offset_2 = offset_1; + offset_1 = offset; + assert(offset > 0); + offcode = offset + 3; + mLength = 4; + while (ip0 > anchor && match0 > lowMatchPtr && ip0[-1] == match0[-1]) { - hashTable[hash1] = (uint)(ip1 - @base); + ip0--; + match0--; + mLength++; } + } - if (ip0 <= ilimit) + _match: + assert(matchEnd != null); + mLength += ZSTD_count_2segments( + ip0 + mLength, + match0 + mLength, + iend, + matchEnd, + prefixStart + ); + ZSTD_storeSeq(seqStore, (nuint)(ip0 - anchor), anchor, iend, offcode, mLength); + ip0 += mLength; + anchor = ip0; + if (ip1 < ip0) + { + hashTable[hash1] = (uint)(ip1 - @base); + } + + if (ip0 <= ilimit) + { + assert(@base + current0 + 2 > istart); + hashTable[ZSTD_hashPtr(@base + current0 + 2, hlog, mls)] = current0 + 2; + hashTable[ZSTD_hashPtr(ip0 - 2, hlog, mls)] = (uint)(ip0 - 2 - @base); + while (ip0 <= ilimit) { - assert(@base + current0 + 2 > istart); - hashTable[ZSTD_hashPtr(@base + current0 + 2, hlog, mls)] = current0 + 2; - hashTable[ZSTD_hashPtr(ip0 - 2, hlog, mls)] = (uint)(ip0 - 2 - @base); - while (ip0 <= ilimit) + uint repIndex2 = (uint)(ip0 - @base) - offset_2; + byte* repMatch2 = + repIndex2 < prefixStartIndex ? dictBase + repIndex2 : @base + repIndex2; + if ( + ( + ZSTD_index_overlap_check(prefixStartIndex, repIndex2) + & (offset_2 > 0 ? 1 : 0) + ) != 0 + && MEM_read32(repMatch2) == MEM_read32(ip0) + ) { - uint repIndex2 = (uint)(ip0 - @base) - offset_2; - byte* repMatch2 = - repIndex2 < prefixStartIndex ? dictBase + repIndex2 : @base + repIndex2; - if ( - ( - ZSTD_index_overlap_check(prefixStartIndex, repIndex2) - & (offset_2 > 0 ? 1 : 0) - ) != 0 - && MEM_read32(repMatch2) == MEM_read32(ip0) - ) + byte* repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; + nuint repLength2 = + ZSTD_count_2segments(ip0 + 4, repMatch2 + 4, iend, repEnd2, prefixStart) + + 4; { - byte* repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; - nuint repLength2 = - ZSTD_count_2segments(ip0 + 4, repMatch2 + 4, iend, repEnd2, prefixStart) - + 4; - { - /* swap offset_2 <=> offset_1 */ - uint tmpOffset = offset_2; - offset_2 = offset_1; - offset_1 = tmpOffset; - } - - assert(1 >= 1); - assert(1 <= 3); - ZSTD_storeSeq(seqStore, 0, anchor, iend, 1, repLength2); - hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (uint)(ip0 - @base); - ip0 += repLength2; - anchor = ip0; - continue; + /* swap offset_2 <=> offset_1 */ + uint tmpOffset = offset_2; + offset_2 = offset_1; + offset_1 = tmpOffset; } - break; + assert(1 >= 1); + assert(1 <= 3); + ZSTD_storeSeq(seqStore, 0, anchor, iend, 1, repLength2); + hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (uint)(ip0 - @base); + ip0 += repLength2; + anchor = ip0; + continue; } - } - goto _start; + break; + } } - private static nuint ZSTD_compressBlock_fast_extDict_4_0( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 4, 0); - } + goto _start; + } - private static nuint ZSTD_compressBlock_fast_extDict_5_0( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 5, 0); - } + private static nuint ZSTD_compressBlock_fast_extDict_4_0( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 4, 0); + } - private static nuint ZSTD_compressBlock_fast_extDict_6_0( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 6, 0); - } + private static nuint ZSTD_compressBlock_fast_extDict_5_0( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 5, 0); + } - private static nuint ZSTD_compressBlock_fast_extDict_7_0( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 7, 0); - } + private static nuint ZSTD_compressBlock_fast_extDict_6_0( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 6, 0); + } - private static nuint ZSTD_compressBlock_fast_extDict( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) + private static nuint ZSTD_compressBlock_fast_extDict_7_0( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 7, 0); + } + + private static nuint ZSTD_compressBlock_fast_extDict( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + uint mls = ms->cParams.minMatch; + assert(ms->dictMatchState == null); + switch (mls) { - uint mls = ms->cParams.minMatch; - assert(ms->dictMatchState == null); - switch (mls) - { - default: - case 4: - return ZSTD_compressBlock_fast_extDict_4_0(ms, seqStore, rep, src, srcSize); - case 5: - return ZSTD_compressBlock_fast_extDict_5_0(ms, seqStore, rep, src, srcSize); - case 6: - return ZSTD_compressBlock_fast_extDict_6_0(ms, seqStore, rep, src, srcSize); - case 7: - return ZSTD_compressBlock_fast_extDict_7_0(ms, seqStore, rep, src, srcSize); - } + default: + case 4: + return ZSTD_compressBlock_fast_extDict_4_0(ms, seqStore, rep, src, srcSize); + case 5: + return ZSTD_compressBlock_fast_extDict_5_0(ms, seqStore, rep, src, srcSize); + case 6: + return ZSTD_compressBlock_fast_extDict_6_0(ms, seqStore, rep, src, srcSize); + case 7: + return ZSTD_compressBlock_fast_extDict_7_0(ms, seqStore, rep, src, srcSize); } } -} +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdInternal.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdInternal.cs index e6ccfb0ca..16f4c5135 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdInternal.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdInternal.cs @@ -1,7 +1,7 @@ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; -using static ZstdSharp.UnsafeHelper; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; #if NETCOREAPP3_0_OR_GREATER using System.Runtime.Intrinsics.X86; #endif @@ -9,10 +9,10 @@ using System.Runtime.Intrinsics.Arm; #endif -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods - { #if NET7_0_OR_GREATER private static ReadOnlySpan Span_repStartValue => new uint[3] { 1, 4, 8 }; private static uint* repStartValue => @@ -22,15 +22,15 @@ ref MemoryMarshal.GetReference(Span_repStartValue) ); #else - private static readonly uint* repStartValue = GetArrayPointer(new uint[3] { 1, 4, 8 }); + private static readonly uint* repStartValue = GetArrayPointer(new uint[3] { 1, 4, 8 }); #endif - private static readonly nuint* ZSTD_fcs_fieldSize = GetArrayPointer( - new nuint[4] { 0, 2, 4, 8 } - ); - private static readonly nuint* ZSTD_did_fieldSize = GetArrayPointer( - new nuint[4] { 0, 1, 2, 4 } - ); - private const uint ZSTD_blockHeaderSize = 3; + private static readonly nuint* ZSTD_fcs_fieldSize = GetArrayPointer( + new nuint[4] { 0, 2, 4, 8 } + ); + private static readonly nuint* ZSTD_did_fieldSize = GetArrayPointer( + new nuint[4] { 0, 1, 2, 4 } + ); + private const uint ZSTD_blockHeaderSize = 3; #if NET7_0_OR_GREATER private static ReadOnlySpan Span_LL_bits => new byte[36] @@ -79,47 +79,47 @@ ref MemoryMarshal.GetReference(Span_LL_bits) ); #else - private static readonly byte* LL_bits = GetArrayPointer( - new byte[36] - { - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 1, - 1, - 1, - 1, - 2, - 2, - 3, - 3, - 4, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - } - ); + private static readonly byte* LL_bits = GetArrayPointer( + new byte[36] + { + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 2, + 2, + 3, + 3, + 4, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + } + ); #endif #if NET7_0_OR_GREATER private static ReadOnlySpan Span_LL_defaultNorm => @@ -169,49 +169,49 @@ ref MemoryMarshal.GetReference(Span_LL_defaultNorm) ); #else - private static readonly short* LL_defaultNorm = GetArrayPointer( - new short[36] - { - 4, - 3, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 1, - 1, - 1, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 3, - 2, - 1, - 1, - 1, - 1, - 1, - (short)(-1), - (short)(-1), - (short)(-1), - (short)(-1), - } - ); + private static readonly short* LL_defaultNorm = GetArrayPointer( + new short[36] + { + 4, + 3, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 1, + 1, + 1, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 3, + 2, + 1, + 1, + 1, + 1, + 1, + (short)(-1), + (short)(-1), + (short)(-1), + (short)(-1), + } + ); #endif - private const uint LL_defaultNormLog = 6; + private const uint LL_defaultNormLog = 6; #if NET7_0_OR_GREATER private static ReadOnlySpan Span_ML_bits => new byte[53] @@ -277,64 +277,64 @@ ref MemoryMarshal.GetReference(Span_ML_bits) ); #else - private static readonly byte* ML_bits = GetArrayPointer( - new byte[53] - { - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 1, - 1, - 1, - 1, - 2, - 2, - 3, - 3, - 4, - 4, - 5, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - } - ); + private static readonly byte* ML_bits = GetArrayPointer( + new byte[53] + { + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 2, + 2, + 3, + 3, + 4, + 4, + 5, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + } + ); #endif #if NET7_0_OR_GREATER private static ReadOnlySpan Span_ML_defaultNorm => @@ -401,66 +401,66 @@ ref MemoryMarshal.GetReference(Span_ML_defaultNorm) ); #else - private static readonly short* ML_defaultNorm = GetArrayPointer( - new short[53] - { - 1, - 4, - 3, - 2, - 2, - 2, - 2, - 2, - 2, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - (short)(-1), - (short)(-1), - (short)(-1), - (short)(-1), - (short)(-1), - (short)(-1), - (short)(-1), - } - ); + private static readonly short* ML_defaultNorm = GetArrayPointer( + new short[53] + { + 1, + 4, + 3, + 2, + 2, + 2, + 2, + 2, + 2, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + (short)(-1), + (short)(-1), + (short)(-1), + (short)(-1), + (short)(-1), + (short)(-1), + (short)(-1), + } + ); #endif - private const uint ML_defaultNormLog = 6; + private const uint ML_defaultNormLog = 6; #if NET7_0_OR_GREATER private static ReadOnlySpan Span_OF_defaultNorm => new short[29] @@ -502,58 +502,58 @@ ref MemoryMarshal.GetReference(Span_OF_defaultNorm) ); #else - private static readonly short* OF_defaultNorm = GetArrayPointer( - new short[29] - { - 1, - 1, - 1, - 1, - 1, - 1, - 2, - 2, - 2, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - (short)(-1), - (short)(-1), - (short)(-1), - (short)(-1), - (short)(-1), - } - ); -#endif - private const uint OF_defaultNormLog = 5; - - /*-******************************************* - * Shared functions to include for inlining - *********************************************/ - private static void ZSTD_copy8(void* dst, void* src) + private static readonly short* OF_defaultNorm = GetArrayPointer( + new short[29] { - memcpy(dst, src, 8); + 1, + 1, + 1, + 1, + 1, + 1, + 2, + 2, + 2, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + (short)(-1), + (short)(-1), + (short)(-1), + (short)(-1), + (short)(-1), } + ); +#endif + private const uint OF_defaultNormLog = 5; - /* Need to use memmove here since the literal buffer can now be located within - the dst buffer. In circumstances where the op "catches up" to where the - literal buffer is, there can be partial overlaps in this call on the final - copy if the literal is being shifted by less than 16 bytes. */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_copy16(void* dst, void* src) - { + /*-******************************************* + * Shared functions to include for inlining + *********************************************/ + private static void ZSTD_copy8(void* dst, void* src) + { + memcpy(dst, src, 8); + } + + /* Need to use memmove here since the literal buffer can now be located within + the dst buffer. In circumstances where the op "catches up" to where the + literal buffer is, there can be partial overlaps in this call on the final + copy if the literal is being shifted by less than 16 bytes. */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_copy16(void* dst, void* src) + { #if NET5_0_OR_GREATER if (AdvSimd.IsSupported) { @@ -568,74 +568,73 @@ private static void ZSTD_copy16(void* dst, void* src) } else #endif - { - var v1 = System.Runtime.CompilerServices.Unsafe.ReadUnaligned((ulong*)src); - var v2 = System.Runtime.CompilerServices.Unsafe.ReadUnaligned( - (ulong*)src + 1 - ); - System.Runtime.CompilerServices.Unsafe.WriteUnaligned((ulong*)dst, v1); - System.Runtime.CompilerServices.Unsafe.WriteUnaligned((ulong*)dst + 1, v2); - } + { + var v1 = System.Runtime.CompilerServices.Unsafe.ReadUnaligned((ulong*)src); + var v2 = System.Runtime.CompilerServices.Unsafe.ReadUnaligned( + (ulong*)src + 1 + ); + System.Runtime.CompilerServices.Unsafe.WriteUnaligned((ulong*)dst, v1); + System.Runtime.CompilerServices.Unsafe.WriteUnaligned((ulong*)dst + 1, v2); } + } - /*! ZSTD_wildcopy() : - * Custom version of ZSTD_memcpy(), can over read/write up to WILDCOPY_OVERLENGTH bytes (if length==0) - * @param ovtype controls the overlap detection - * - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart. - * - ZSTD_overlap_src_before_dst: The src and dst may overlap, but they MUST be at least 8 bytes apart. - * The src buffer must be before the dst buffer. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_wildcopy(void* dst, void* src, nint length, ZSTD_overlap_e ovtype) + /*! ZSTD_wildcopy() : + * Custom version of ZSTD_memcpy(), can over read/write up to WILDCOPY_OVERLENGTH bytes (if length==0) + * @param ovtype controls the overlap detection + * - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart. + * - ZSTD_overlap_src_before_dst: The src and dst may overlap, but they MUST be at least 8 bytes apart. + * The src buffer must be before the dst buffer. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_wildcopy(void* dst, void* src, nint length, ZSTD_overlap_e ovtype) + { + nint diff = (nint)((byte*)dst - (byte*)src); + byte* ip = (byte*)src; + byte* op = (byte*)dst; + byte* oend = op + length; + if (ovtype == ZSTD_overlap_e.ZSTD_overlap_src_before_dst && diff < 16) { - nint diff = (nint)((byte*)dst - (byte*)src); - byte* ip = (byte*)src; - byte* op = (byte*)dst; - byte* oend = op + length; - if (ovtype == ZSTD_overlap_e.ZSTD_overlap_src_before_dst && diff < 16) + do { - do - { - ZSTD_copy8(op, ip); - op += 8; - ip += 8; - } while (op < oend); - } - else + ZSTD_copy8(op, ip); + op += 8; + ip += 8; + } while (op < oend); + } + else + { + assert(diff >= 16 || diff <= -16); + ZSTD_copy16(op, ip); + if (16 >= length) + return; + op += 16; + ip += 16; + do { - assert(diff >= 16 || diff <= -16); - ZSTD_copy16(op, ip); - if (16 >= length) - return; - op += 16; - ip += 16; - do { - { - ZSTD_copy16(op, ip); - op += 16; - ip += 16; - } + ZSTD_copy16(op, ip); + op += 16; + ip += 16; + } - { - ZSTD_copy16(op, ip); - op += 16; - ip += 16; - } - } while (op < oend); - } + { + ZSTD_copy16(op, ip); + op += 16; + ip += 16; + } + } while (op < oend); } + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_limitCopy(void* dst, nuint dstCapacity, void* src, nuint srcSize) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_limitCopy(void* dst, nuint dstCapacity, void* src, nuint srcSize) + { + nuint length = dstCapacity < srcSize ? dstCapacity : srcSize; + if (length > 0) { - nuint length = dstCapacity < srcSize ? dstCapacity : srcSize; - if (length > 0) - { - memcpy(dst, src, (uint)length); - } - - return length; + memcpy(dst, src, (uint)length); } + + return length; } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLazy.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLazy.cs index b5a3d7779..e75c84765 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLazy.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLazy.cs @@ -1,7 +1,6 @@ using System; using System.Numerics; using System.Runtime.CompilerServices; -using static ZstdSharp.UnsafeHelper; #if NETCOREAPP3_0_OR_GREATER using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.X86; @@ -9,80 +8,328 @@ #if NET5_0_OR_GREATER using System.Runtime.Intrinsics.Arm; #endif +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods + /*-************************************* + * Binary Tree search + ***************************************/ + private static void ZSTD_updateDUBT(ZSTD_MatchState_t* ms, byte* ip, byte* iend, uint mls) + { + ZSTD_compressionParameters* cParams = &ms->cParams; + uint* hashTable = ms->hashTable; + uint hashLog = cParams->hashLog; + uint* bt = ms->chainTable; + uint btLog = cParams->chainLog - 1; + uint btMask = (uint)((1 << (int)btLog) - 1); + byte* @base = ms->window.@base; + uint target = (uint)(ip - @base); + uint idx = ms->nextToUpdate; + assert(ip + 8 <= iend); + assert(idx >= ms->window.dictLimit); + for (; idx < target; idx++) + { + /* assumption : ip + 8 <= iend */ + nuint h = ZSTD_hashPtr(@base + idx, hashLog, mls); + uint matchIndex = hashTable[h]; + uint* nextCandidatePtr = bt + 2 * (idx & btMask); + uint* sortMarkPtr = nextCandidatePtr + 1; + hashTable[h] = idx; + *nextCandidatePtr = matchIndex; + *sortMarkPtr = 1; + } + + ms->nextToUpdate = target; + } + + /** ZSTD_insertDUBT1() : + * sort one already inserted but unsorted position + * assumption : curr >= btlow == (curr - btmask) + * doesn't fail */ + private static void ZSTD_insertDUBT1( + ZSTD_MatchState_t* ms, + uint curr, + byte* inputEnd, + uint nbCompares, + uint btLow, + ZSTD_dictMode_e dictMode + ) + { + ZSTD_compressionParameters* cParams = &ms->cParams; + uint* bt = ms->chainTable; + uint btLog = cParams->chainLog - 1; + uint btMask = (uint)((1 << (int)btLog) - 1); + nuint commonLengthSmaller = 0, + commonLengthLarger = 0; + byte* @base = ms->window.@base; + byte* dictBase = ms->window.dictBase; + uint dictLimit = ms->window.dictLimit; + byte* ip = curr >= dictLimit ? @base + curr : dictBase + curr; + byte* iend = curr >= dictLimit ? inputEnd : dictBase + dictLimit; + byte* dictEnd = dictBase + dictLimit; + byte* prefixStart = @base + dictLimit; + byte* match; + uint* smallerPtr = bt + 2 * (curr & btMask); + uint* largerPtr = smallerPtr + 1; + /* this candidate is unsorted : next sorted candidate is reached through *smallerPtr, while *largerPtr contains previous unsorted candidate (which is already saved and can be overwritten) */ + uint matchIndex = *smallerPtr; + /* to be nullified at the end */ + uint dummy32; + uint windowValid = ms->window.lowLimit; + uint maxDistance = 1U << (int)cParams->windowLog; + uint windowLow = curr - windowValid > maxDistance ? curr - maxDistance : windowValid; + assert(curr >= btLow); + assert(ip < iend); + for (; nbCompares != 0 && matchIndex > windowLow; --nbCompares) + { + uint* nextPtr = bt + 2 * (matchIndex & btMask); + /* guaranteed minimum nb of common bytes */ + nuint matchLength = + commonLengthSmaller < commonLengthLarger + ? commonLengthSmaller + : commonLengthLarger; + assert(matchIndex < curr); + if ( + dictMode != ZSTD_dictMode_e.ZSTD_extDict + || matchIndex + matchLength >= dictLimit + || curr < dictLimit + ) + { + byte* mBase = + dictMode != ZSTD_dictMode_e.ZSTD_extDict + || matchIndex + matchLength >= dictLimit + ? @base + : dictBase; + assert(matchIndex + matchLength >= dictLimit || curr < dictLimit); + match = mBase + matchIndex; + matchLength += ZSTD_count(ip + matchLength, match + matchLength, iend); + } + else + { + match = dictBase + matchIndex; + matchLength += ZSTD_count_2segments( + ip + matchLength, + match + matchLength, + iend, + dictEnd, + prefixStart + ); + if (matchIndex + matchLength >= dictLimit) + match = @base + matchIndex; + } + + if (ip + matchLength == iend) + { + break; + } + + if (match[matchLength] < ip[matchLength]) + { + *smallerPtr = matchIndex; + commonLengthSmaller = matchLength; + if (matchIndex <= btLow) + { + smallerPtr = &dummy32; + break; + } + + smallerPtr = nextPtr + 1; + matchIndex = nextPtr[1]; + } + else + { + *largerPtr = matchIndex; + commonLengthLarger = matchLength; + if (matchIndex <= btLow) + { + largerPtr = &dummy32; + break; + } + + largerPtr = nextPtr; + matchIndex = nextPtr[0]; + } + } + + *smallerPtr = *largerPtr = 0; + } + + private static nuint ZSTD_DUBT_findBetterDictMatch( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iend, + nuint* offsetPtr, + nuint bestLength, + uint nbCompares, + uint mls, + ZSTD_dictMode_e dictMode + ) { - /*-************************************* - * Binary Tree search - ***************************************/ - private static void ZSTD_updateDUBT(ZSTD_MatchState_t* ms, byte* ip, byte* iend, uint mls) + ZSTD_MatchState_t* dms = ms->dictMatchState; + ZSTD_compressionParameters* dmsCParams = &dms->cParams; + uint* dictHashTable = dms->hashTable; + uint hashLog = dmsCParams->hashLog; + nuint h = ZSTD_hashPtr(ip, hashLog, mls); + uint dictMatchIndex = dictHashTable[h]; + byte* @base = ms->window.@base; + byte* prefixStart = @base + ms->window.dictLimit; + uint curr = (uint)(ip - @base); + byte* dictBase = dms->window.@base; + byte* dictEnd = dms->window.nextSrc; + uint dictHighLimit = (uint)(dms->window.nextSrc - dms->window.@base); + uint dictLowLimit = dms->window.lowLimit; + uint dictIndexDelta = ms->window.lowLimit - dictHighLimit; + uint* dictBt = dms->chainTable; + uint btLog = dmsCParams->chainLog - 1; + uint btMask = (uint)((1 << (int)btLog) - 1); + uint btLow = + btMask >= dictHighLimit - dictLowLimit ? dictLowLimit : dictHighLimit - btMask; + nuint commonLengthSmaller = 0, + commonLengthLarger = 0; + assert(dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState); + for (; nbCompares != 0 && dictMatchIndex > dictLowLimit; --nbCompares) { - ZSTD_compressionParameters* cParams = &ms->cParams; - uint* hashTable = ms->hashTable; - uint hashLog = cParams->hashLog; - uint* bt = ms->chainTable; - uint btLog = cParams->chainLog - 1; - uint btMask = (uint)((1 << (int)btLog) - 1); - byte* @base = ms->window.@base; - uint target = (uint)(ip - @base); - uint idx = ms->nextToUpdate; - assert(ip + 8 <= iend); - assert(idx >= ms->window.dictLimit); - for (; idx < target; idx++) + uint* nextPtr = dictBt + 2 * (dictMatchIndex & btMask); + /* guaranteed minimum nb of common bytes */ + nuint matchLength = + commonLengthSmaller < commonLengthLarger + ? commonLengthSmaller + : commonLengthLarger; + byte* match = dictBase + dictMatchIndex; + matchLength += ZSTD_count_2segments( + ip + matchLength, + match + matchLength, + iend, + dictEnd, + prefixStart + ); + if (dictMatchIndex + matchLength >= dictHighLimit) + match = @base + dictMatchIndex + dictIndexDelta; + if (matchLength > bestLength) + { + uint matchIndex = dictMatchIndex + dictIndexDelta; + if ( + 4 * (int)(matchLength - bestLength) + > (int)( + ZSTD_highbit32(curr - matchIndex + 1) + - ZSTD_highbit32((uint)offsetPtr[0] + 1) + ) + ) + { + bestLength = matchLength; + assert(curr - matchIndex > 0); + *offsetPtr = curr - matchIndex + 3; + } + + if (ip + matchLength == iend) + { + break; + } + } + + if (match[matchLength] < ip[matchLength]) + { + if (dictMatchIndex <= btLow) + { + break; + } + + commonLengthSmaller = matchLength; + dictMatchIndex = nextPtr[1]; + } + else { - /* assumption : ip + 8 <= iend */ - nuint h = ZSTD_hashPtr(@base + idx, hashLog, mls); - uint matchIndex = hashTable[h]; - uint* nextCandidatePtr = bt + 2 * (idx & btMask); - uint* sortMarkPtr = nextCandidatePtr + 1; - hashTable[h] = idx; - *nextCandidatePtr = matchIndex; - *sortMarkPtr = 1; + if (dictMatchIndex <= btLow) + { + break; + } + + commonLengthLarger = matchLength; + dictMatchIndex = nextPtr[0]; } + } + + if (bestLength >= 3) + { + assert(*offsetPtr > 3); + uint mIndex = curr - (uint)(*offsetPtr - 3); + } + + return bestLength; + } + + private static nuint ZSTD_DUBT_findBestMatch( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iend, + nuint* offBasePtr, + uint mls, + ZSTD_dictMode_e dictMode + ) + { + ZSTD_compressionParameters* cParams = &ms->cParams; + uint* hashTable = ms->hashTable; + uint hashLog = cParams->hashLog; + nuint h = ZSTD_hashPtr(ip, hashLog, mls); + uint matchIndex = hashTable[h]; + byte* @base = ms->window.@base; + uint curr = (uint)(ip - @base); + uint windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog); + uint* bt = ms->chainTable; + uint btLog = cParams->chainLog - 1; + uint btMask = (uint)((1 << (int)btLog) - 1); + uint btLow = btMask >= curr ? 0 : curr - btMask; + uint unsortLimit = btLow > windowLow ? btLow : windowLow; + uint* nextCandidate = bt + 2 * (matchIndex & btMask); + uint* unsortedMark = bt + 2 * (matchIndex & btMask) + 1; + uint nbCompares = 1U << (int)cParams->searchLog; + uint nbCandidates = nbCompares; + uint previousCandidate = 0; + assert(ip <= iend - 8); + assert(dictMode != ZSTD_dictMode_e.ZSTD_dedicatedDictSearch); + while (matchIndex > unsortLimit && *unsortedMark == 1 && nbCandidates > 1) + { + *unsortedMark = previousCandidate; + previousCandidate = matchIndex; + matchIndex = *nextCandidate; + nextCandidate = bt + 2 * (matchIndex & btMask); + unsortedMark = bt + 2 * (matchIndex & btMask) + 1; + nbCandidates--; + } + + if (matchIndex > unsortLimit && *unsortedMark == 1) + { + *nextCandidate = *unsortedMark = 0; + } - ms->nextToUpdate = target; + matchIndex = previousCandidate; + while (matchIndex != 0) + { + uint* nextCandidateIdxPtr = bt + 2 * (matchIndex & btMask) + 1; + uint nextCandidateIdx = *nextCandidateIdxPtr; + ZSTD_insertDUBT1(ms, matchIndex, iend, nbCandidates, unsortLimit, dictMode); + matchIndex = nextCandidateIdx; + nbCandidates++; } - /** ZSTD_insertDUBT1() : - * sort one already inserted but unsorted position - * assumption : curr >= btlow == (curr - btmask) - * doesn't fail */ - private static void ZSTD_insertDUBT1( - ZSTD_MatchState_t* ms, - uint curr, - byte* inputEnd, - uint nbCompares, - uint btLow, - ZSTD_dictMode_e dictMode - ) { - ZSTD_compressionParameters* cParams = &ms->cParams; - uint* bt = ms->chainTable; - uint btLog = cParams->chainLog - 1; - uint btMask = (uint)((1 << (int)btLog) - 1); nuint commonLengthSmaller = 0, - commonLengthLarger = 0; - byte* @base = ms->window.@base; + commonLengthLarger = 0; byte* dictBase = ms->window.dictBase; uint dictLimit = ms->window.dictLimit; - byte* ip = curr >= dictLimit ? @base + curr : dictBase + curr; - byte* iend = curr >= dictLimit ? inputEnd : dictBase + dictLimit; byte* dictEnd = dictBase + dictLimit; byte* prefixStart = @base + dictLimit; - byte* match; uint* smallerPtr = bt + 2 * (curr & btMask); - uint* largerPtr = smallerPtr + 1; - /* this candidate is unsorted : next sorted candidate is reached through *smallerPtr, while *largerPtr contains previous unsorted candidate (which is already saved and can be overwritten) */ - uint matchIndex = *smallerPtr; + uint* largerPtr = bt + 2 * (curr & btMask) + 1; + uint matchEndIdx = curr + 8 + 1; /* to be nullified at the end */ uint dummy32; - uint windowValid = ms->window.lowLimit; - uint maxDistance = 1U << (int)cParams->windowLog; - uint windowLow = curr - windowValid > maxDistance ? curr - maxDistance : windowValid; - assert(curr >= btLow); - assert(ip < iend); + nuint bestLength = 0; + matchIndex = hashTable[h]; + hashTable[h] = curr; for (; nbCompares != 0 && matchIndex > windowLow; --nbCompares) { uint* nextPtr = bt + 2 * (matchIndex & btMask); @@ -91,20 +338,13 @@ ZSTD_dictMode_e dictMode commonLengthSmaller < commonLengthLarger ? commonLengthSmaller : commonLengthLarger; - assert(matchIndex < curr); + byte* match; if ( dictMode != ZSTD_dictMode_e.ZSTD_extDict || matchIndex + matchLength >= dictLimit - || curr < dictLimit ) { - byte* mBase = - dictMode != ZSTD_dictMode_e.ZSTD_extDict - || matchIndex + matchLength >= dictLimit - ? @base - : dictBase; - assert(matchIndex + matchLength >= dictLimit || curr < dictLimit); - match = mBase + matchIndex; + match = @base + matchIndex; matchLength += ZSTD_count(ip + matchLength, match + matchLength, iend); } else @@ -121,9 +361,32 @@ ZSTD_dictMode_e dictMode match = @base + matchIndex; } - if (ip + matchLength == iend) + if (matchLength > bestLength) { - break; + if (matchLength > matchEndIdx - matchIndex) + matchEndIdx = matchIndex + (uint)matchLength; + if ( + 4 * (int)(matchLength - bestLength) + > (int)( + ZSTD_highbit32(curr - matchIndex + 1) + - ZSTD_highbit32((uint)*offBasePtr) + ) + ) + { + bestLength = matchLength; + assert(curr - matchIndex > 0); + *offBasePtr = curr - matchIndex + 3; + } + + if (ip + matchLength == iend) + { + if (dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState) + { + nbCompares = 0; + } + + break; + } } if (match[matchLength] < ip[matchLength]) @@ -155,504 +418,285 @@ ZSTD_dictMode_e dictMode } *smallerPtr = *largerPtr = 0; - } - - private static nuint ZSTD_DUBT_findBetterDictMatch( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iend, - nuint* offsetPtr, - nuint bestLength, - uint nbCompares, - uint mls, - ZSTD_dictMode_e dictMode - ) - { - ZSTD_MatchState_t* dms = ms->dictMatchState; - ZSTD_compressionParameters* dmsCParams = &dms->cParams; - uint* dictHashTable = dms->hashTable; - uint hashLog = dmsCParams->hashLog; - nuint h = ZSTD_hashPtr(ip, hashLog, mls); - uint dictMatchIndex = dictHashTable[h]; - byte* @base = ms->window.@base; - byte* prefixStart = @base + ms->window.dictLimit; - uint curr = (uint)(ip - @base); - byte* dictBase = dms->window.@base; - byte* dictEnd = dms->window.nextSrc; - uint dictHighLimit = (uint)(dms->window.nextSrc - dms->window.@base); - uint dictLowLimit = dms->window.lowLimit; - uint dictIndexDelta = ms->window.lowLimit - dictHighLimit; - uint* dictBt = dms->chainTable; - uint btLog = dmsCParams->chainLog - 1; - uint btMask = (uint)((1 << (int)btLog) - 1); - uint btLow = - btMask >= dictHighLimit - dictLowLimit ? dictLowLimit : dictHighLimit - btMask; - nuint commonLengthSmaller = 0, - commonLengthLarger = 0; - assert(dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState); - for (; nbCompares != 0 && dictMatchIndex > dictLowLimit; --nbCompares) + assert(nbCompares <= 1U << (sizeof(nuint) == 4 ? 30 : 31) - 1); + if (dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState && nbCompares != 0) { - uint* nextPtr = dictBt + 2 * (dictMatchIndex & btMask); - /* guaranteed minimum nb of common bytes */ - nuint matchLength = - commonLengthSmaller < commonLengthLarger - ? commonLengthSmaller - : commonLengthLarger; - byte* match = dictBase + dictMatchIndex; - matchLength += ZSTD_count_2segments( - ip + matchLength, - match + matchLength, + bestLength = ZSTD_DUBT_findBetterDictMatch( + ms, + ip, iend, - dictEnd, - prefixStart + offBasePtr, + bestLength, + nbCompares, + mls, + dictMode ); - if (dictMatchIndex + matchLength >= dictHighLimit) - match = @base + dictMatchIndex + dictIndexDelta; - if (matchLength > bestLength) - { - uint matchIndex = dictMatchIndex + dictIndexDelta; - if ( - 4 * (int)(matchLength - bestLength) - > (int)( - ZSTD_highbit32(curr - matchIndex + 1) - - ZSTD_highbit32((uint)offsetPtr[0] + 1) - ) - ) - { - bestLength = matchLength; - assert(curr - matchIndex > 0); - *offsetPtr = curr - matchIndex + 3; - } - - if (ip + matchLength == iend) - { - break; - } - } - - if (match[matchLength] < ip[matchLength]) - { - if (dictMatchIndex <= btLow) - { - break; - } - - commonLengthSmaller = matchLength; - dictMatchIndex = nextPtr[1]; - } - else - { - if (dictMatchIndex <= btLow) - { - break; - } - - commonLengthLarger = matchLength; - dictMatchIndex = nextPtr[0]; - } } + assert(matchEndIdx > curr + 8); + ms->nextToUpdate = matchEndIdx - 8; if (bestLength >= 3) { - assert(*offsetPtr > 3); - uint mIndex = curr - (uint)(*offsetPtr - 3); + assert(*offBasePtr > 3); + uint mIndex = curr - (uint)(*offBasePtr - 3); } return bestLength; } + } - private static nuint ZSTD_DUBT_findBestMatch( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iend, - nuint* offBasePtr, - uint mls, - ZSTD_dictMode_e dictMode - ) - { - ZSTD_compressionParameters* cParams = &ms->cParams; - uint* hashTable = ms->hashTable; - uint hashLog = cParams->hashLog; - nuint h = ZSTD_hashPtr(ip, hashLog, mls); - uint matchIndex = hashTable[h]; - byte* @base = ms->window.@base; - uint curr = (uint)(ip - @base); - uint windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog); - uint* bt = ms->chainTable; - uint btLog = cParams->chainLog - 1; - uint btMask = (uint)((1 << (int)btLog) - 1); - uint btLow = btMask >= curr ? 0 : curr - btMask; - uint unsortLimit = btLow > windowLow ? btLow : windowLow; - uint* nextCandidate = bt + 2 * (matchIndex & btMask); - uint* unsortedMark = bt + 2 * (matchIndex & btMask) + 1; - uint nbCompares = 1U << (int)cParams->searchLog; - uint nbCandidates = nbCompares; - uint previousCandidate = 0; - assert(ip <= iend - 8); - assert(dictMode != ZSTD_dictMode_e.ZSTD_dedicatedDictSearch); - while (matchIndex > unsortLimit && *unsortedMark == 1 && nbCandidates > 1) - { - *unsortedMark = previousCandidate; - previousCandidate = matchIndex; - matchIndex = *nextCandidate; - nextCandidate = bt + 2 * (matchIndex & btMask); - unsortedMark = bt + 2 * (matchIndex & btMask) + 1; - nbCandidates--; - } + /** ZSTD_BtFindBestMatch() : Tree updater, providing best match */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_BtFindBestMatch( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offBasePtr, + uint mls, + ZSTD_dictMode_e dictMode + ) + { + if (ip < ms->window.@base + ms->nextToUpdate) + return 0; + ZSTD_updateDUBT(ms, ip, iLimit, mls); + return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offBasePtr, mls, dictMode); + } - if (matchIndex > unsortLimit && *unsortedMark == 1) + /*********************************** + * Dedicated dict search + ***********************************/ + private static void ZSTD_dedicatedDictSearch_lazy_loadDictionary( + ZSTD_MatchState_t* ms, + byte* ip + ) + { + byte* @base = ms->window.@base; + uint target = (uint)(ip - @base); + uint* hashTable = ms->hashTable; + uint* chainTable = ms->chainTable; + uint chainSize = (uint)(1 << (int)ms->cParams.chainLog); + uint idx = ms->nextToUpdate; + uint minChain = chainSize < target - idx ? target - chainSize : idx; + const uint bucketSize = 1 << 2; + uint cacheSize = bucketSize - 1; + uint chainAttempts = (uint)(1 << (int)ms->cParams.searchLog) - cacheSize; + uint chainLimit = chainAttempts > 255 ? 255 : chainAttempts; + /* We know the hashtable is oversized by a factor of `bucketSize`. + * We are going to temporarily pretend `bucketSize == 1`, keeping only a + * single entry. We will use the rest of the space to construct a temporary + * chaintable. + */ + uint hashLog = ms->cParams.hashLog - 2; + uint* tmpHashTable = hashTable; + uint* tmpChainTable = hashTable + ((nuint)1 << (int)hashLog); + uint tmpChainSize = (uint)((1 << 2) - 1) << (int)hashLog; + uint tmpMinChain = tmpChainSize < target ? target - tmpChainSize : idx; + uint hashIdx; + assert(ms->cParams.chainLog <= 24); + assert(ms->cParams.hashLog > ms->cParams.chainLog); + assert(idx != 0); + assert(tmpMinChain <= minChain); + for (; idx < target; idx++) + { + uint h = (uint)ZSTD_hashPtr(@base + idx, hashLog, ms->cParams.minMatch); + if (idx >= tmpMinChain) { - *nextCandidate = *unsortedMark = 0; + tmpChainTable[idx - tmpMinChain] = hashTable[h]; } - matchIndex = previousCandidate; - while (matchIndex != 0) - { - uint* nextCandidateIdxPtr = bt + 2 * (matchIndex & btMask) + 1; - uint nextCandidateIdx = *nextCandidateIdxPtr; - ZSTD_insertDUBT1(ms, matchIndex, iend, nbCandidates, unsortLimit, dictMode); - matchIndex = nextCandidateIdx; - nbCandidates++; - } + tmpHashTable[h] = idx; + } + { + uint chainPos = 0; + for (hashIdx = 0; hashIdx < 1U << (int)hashLog; hashIdx++) { - nuint commonLengthSmaller = 0, - commonLengthLarger = 0; - byte* dictBase = ms->window.dictBase; - uint dictLimit = ms->window.dictLimit; - byte* dictEnd = dictBase + dictLimit; - byte* prefixStart = @base + dictLimit; - uint* smallerPtr = bt + 2 * (curr & btMask); - uint* largerPtr = bt + 2 * (curr & btMask) + 1; - uint matchEndIdx = curr + 8 + 1; - /* to be nullified at the end */ - uint dummy32; - nuint bestLength = 0; - matchIndex = hashTable[h]; - hashTable[h] = curr; - for (; nbCompares != 0 && matchIndex > windowLow; --nbCompares) + uint count; + uint countBeyondMinChain = 0; + uint i = tmpHashTable[hashIdx]; + for (count = 0; i >= tmpMinChain && count < cacheSize; count++) { - uint* nextPtr = bt + 2 * (matchIndex & btMask); - /* guaranteed minimum nb of common bytes */ - nuint matchLength = - commonLengthSmaller < commonLengthLarger - ? commonLengthSmaller - : commonLengthLarger; - byte* match; - if ( - dictMode != ZSTD_dictMode_e.ZSTD_extDict - || matchIndex + matchLength >= dictLimit - ) - { - match = @base + matchIndex; - matchLength += ZSTD_count(ip + matchLength, match + matchLength, iend); - } - else + if (i < minChain) { - match = dictBase + matchIndex; - matchLength += ZSTD_count_2segments( - ip + matchLength, - match + matchLength, - iend, - dictEnd, - prefixStart - ); - if (matchIndex + matchLength >= dictLimit) - match = @base + matchIndex; + countBeyondMinChain++; } - if (matchLength > bestLength) - { - if (matchLength > matchEndIdx - matchIndex) - matchEndIdx = matchIndex + (uint)matchLength; - if ( - 4 * (int)(matchLength - bestLength) - > (int)( - ZSTD_highbit32(curr - matchIndex + 1) - - ZSTD_highbit32((uint)*offBasePtr) - ) - ) - { - bestLength = matchLength; - assert(curr - matchIndex > 0); - *offBasePtr = curr - matchIndex + 3; - } + i = tmpChainTable[i - tmpMinChain]; + } - if (ip + matchLength == iend) + if (count == cacheSize) + { + for (count = 0; count < chainLimit; ) + { + if (i < minChain) { - if (dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState) + if (i == 0 || ++countBeyondMinChain > cacheSize) { - nbCompares = 0; + break; } - - break; - } - } - - if (match[matchLength] < ip[matchLength]) - { - *smallerPtr = matchIndex; - commonLengthSmaller = matchLength; - if (matchIndex <= btLow) - { - smallerPtr = &dummy32; - break; } - smallerPtr = nextPtr + 1; - matchIndex = nextPtr[1]; - } - else - { - *largerPtr = matchIndex; - commonLengthLarger = matchLength; - if (matchIndex <= btLow) + chainTable[chainPos++] = i; + count++; + if (i < tmpMinChain) { - largerPtr = &dummy32; break; } - largerPtr = nextPtr; - matchIndex = nextPtr[0]; + i = tmpChainTable[i - tmpMinChain]; } } - - *smallerPtr = *largerPtr = 0; - assert(nbCompares <= 1U << (sizeof(nuint) == 4 ? 30 : 31) - 1); - if (dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState && nbCompares != 0) + else { - bestLength = ZSTD_DUBT_findBetterDictMatch( - ms, - ip, - iend, - offBasePtr, - bestLength, - nbCompares, - mls, - dictMode - ); + count = 0; } - assert(matchEndIdx > curr + 8); - ms->nextToUpdate = matchEndIdx - 8; - if (bestLength >= 3) + if (count != 0) { - assert(*offBasePtr > 3); - uint mIndex = curr - (uint)(*offBasePtr - 3); + tmpHashTable[hashIdx] = (chainPos - count << 8) + count; } + else + { + tmpHashTable[hashIdx] = 0; + } + } - return bestLength; + assert(chainPos <= chainSize); + } + + for (hashIdx = (uint)(1 << (int)hashLog); hashIdx != 0; ) + { + uint bucketIdx = --hashIdx << 2; + uint chainPackedPointer = tmpHashTable[hashIdx]; + uint i; + for (i = 0; i < cacheSize; i++) + { + hashTable[bucketIdx + i] = 0; } + + hashTable[bucketIdx + bucketSize - 1] = chainPackedPointer; } - /** ZSTD_BtFindBestMatch() : Tree updater, providing best match */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_BtFindBestMatch( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offBasePtr, - uint mls, - ZSTD_dictMode_e dictMode - ) + for (idx = ms->nextToUpdate; idx < target; idx++) + { + uint h = (uint)ZSTD_hashPtr(@base + idx, hashLog, ms->cParams.minMatch) << 2; + uint i; + for (i = cacheSize - 1; i != 0; i--) + hashTable[h + i] = hashTable[h + i - 1]; + hashTable[h] = idx; + } + + ms->nextToUpdate = target; + } + + /* Returns the longest match length found in the dedicated dict search structure. + * If none are longer than the argument ml, then ml will be returned. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_dedicatedDictSearch_lazy_search( + nuint* offsetPtr, + nuint ml, + uint nbAttempts, + ZSTD_MatchState_t* dms, + byte* ip, + byte* iLimit, + byte* prefixStart, + uint curr, + uint dictLimit, + nuint ddsIdx + ) + { + uint ddsLowestIndex = dms->window.dictLimit; + byte* ddsBase = dms->window.@base; + byte* ddsEnd = dms->window.nextSrc; + uint ddsSize = (uint)(ddsEnd - ddsBase); + uint ddsIndexDelta = dictLimit - ddsSize; + const uint bucketSize = 1 << 2; + uint bucketLimit = nbAttempts < bucketSize - 1 ? nbAttempts : bucketSize - 1; + uint ddsAttempt; + uint matchIndex; + for (ddsAttempt = 0; ddsAttempt < bucketSize - 1; ddsAttempt++) { - if (ip < ms->window.@base + ms->nextToUpdate) - return 0; - ZSTD_updateDUBT(ms, ip, iLimit, mls); - return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offBasePtr, mls, dictMode); +#if NETCOREAPP3_0_OR_GREATER + if (Sse.IsSupported) + { + Sse.Prefetch0(ddsBase + dms->hashTable[ddsIdx + ddsAttempt]); + } +#endif } - /*********************************** - * Dedicated dict search - ***********************************/ - private static void ZSTD_dedicatedDictSearch_lazy_loadDictionary( - ZSTD_MatchState_t* ms, - byte* ip - ) { - byte* @base = ms->window.@base; - uint target = (uint)(ip - @base); - uint* hashTable = ms->hashTable; - uint* chainTable = ms->chainTable; - uint chainSize = (uint)(1 << (int)ms->cParams.chainLog); - uint idx = ms->nextToUpdate; - uint minChain = chainSize < target - idx ? target - chainSize : idx; - const uint bucketSize = 1 << 2; - uint cacheSize = bucketSize - 1; - uint chainAttempts = (uint)(1 << (int)ms->cParams.searchLog) - cacheSize; - uint chainLimit = chainAttempts > 255 ? 255 : chainAttempts; - /* We know the hashtable is oversized by a factor of `bucketSize`. - * We are going to temporarily pretend `bucketSize == 1`, keeping only a - * single entry. We will use the rest of the space to construct a temporary - * chaintable. - */ - uint hashLog = ms->cParams.hashLog - 2; - uint* tmpHashTable = hashTable; - uint* tmpChainTable = hashTable + ((nuint)1 << (int)hashLog); - uint tmpChainSize = (uint)((1 << 2) - 1) << (int)hashLog; - uint tmpMinChain = tmpChainSize < target ? target - tmpChainSize : idx; - uint hashIdx; - assert(ms->cParams.chainLog <= 24); - assert(ms->cParams.hashLog > ms->cParams.chainLog); - assert(idx != 0); - assert(tmpMinChain <= minChain); - for (; idx < target; idx++) - { - uint h = (uint)ZSTD_hashPtr(@base + idx, hashLog, ms->cParams.minMatch); - if (idx >= tmpMinChain) + uint chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1]; + uint chainIndex = chainPackedPointer >> 8; +#if NETCOREAPP3_0_OR_GREATER + if (Sse.IsSupported) { - tmpChainTable[idx - tmpMinChain] = hashTable[h]; + Sse.Prefetch0(&dms->chainTable[chainIndex]); } +#endif + } - tmpHashTable[h] = idx; + for (ddsAttempt = 0; ddsAttempt < bucketLimit; ddsAttempt++) + { + nuint currentMl = 0; + byte* match; + matchIndex = dms->hashTable[ddsIdx + ddsAttempt]; + match = ddsBase + matchIndex; + if (matchIndex == 0) + { + return ml; + } + + assert(matchIndex >= ddsLowestIndex); + assert(match + 4 <= ddsEnd); + if (MEM_read32(match) == MEM_read32(ip)) + { + currentMl = + ZSTD_count_2segments(ip + 4, match + 4, iLimit, ddsEnd, prefixStart) + 4; } + if (currentMl > ml) { - uint chainPos = 0; - for (hashIdx = 0; hashIdx < 1U << (int)hashLog; hashIdx++) + ml = currentMl; + assert(curr - (matchIndex + ddsIndexDelta) > 0); + *offsetPtr = curr - (matchIndex + ddsIndexDelta) + 3; + if (ip + currentMl == iLimit) { - uint count; - uint countBeyondMinChain = 0; - uint i = tmpHashTable[hashIdx]; - for (count = 0; i >= tmpMinChain && count < cacheSize; count++) - { - if (i < minChain) - { - countBeyondMinChain++; - } + return ml; + } + } + } - i = tmpChainTable[i - tmpMinChain]; - } - - if (count == cacheSize) - { - for (count = 0; count < chainLimit; ) - { - if (i < minChain) - { - if (i == 0 || ++countBeyondMinChain > cacheSize) - { - break; - } - } - - chainTable[chainPos++] = i; - count++; - if (i < tmpMinChain) - { - break; - } - - i = tmpChainTable[i - tmpMinChain]; - } - } - else - { - count = 0; - } - - if (count != 0) - { - tmpHashTable[hashIdx] = (chainPos - count << 8) + count; - } - else - { - tmpHashTable[hashIdx] = 0; - } - } - - assert(chainPos <= chainSize); - } - - for (hashIdx = (uint)(1 << (int)hashLog); hashIdx != 0; ) - { - uint bucketIdx = --hashIdx << 2; - uint chainPackedPointer = tmpHashTable[hashIdx]; - uint i; - for (i = 0; i < cacheSize; i++) - { - hashTable[bucketIdx + i] = 0; - } - - hashTable[bucketIdx + bucketSize - 1] = chainPackedPointer; - } - - for (idx = ms->nextToUpdate; idx < target; idx++) - { - uint h = (uint)ZSTD_hashPtr(@base + idx, hashLog, ms->cParams.minMatch) << 2; - uint i; - for (i = cacheSize - 1; i != 0; i--) - hashTable[h + i] = hashTable[h + i - 1]; - hashTable[h] = idx; - } - - ms->nextToUpdate = target; - } - - /* Returns the longest match length found in the dedicated dict search structure. - * If none are longer than the argument ml, then ml will be returned. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_dedicatedDictSearch_lazy_search( - nuint* offsetPtr, - nuint ml, - uint nbAttempts, - ZSTD_MatchState_t* dms, - byte* ip, - byte* iLimit, - byte* prefixStart, - uint curr, - uint dictLimit, - nuint ddsIdx - ) { - uint ddsLowestIndex = dms->window.dictLimit; - byte* ddsBase = dms->window.@base; - byte* ddsEnd = dms->window.nextSrc; - uint ddsSize = (uint)(ddsEnd - ddsBase); - uint ddsIndexDelta = dictLimit - ddsSize; - const uint bucketSize = 1 << 2; - uint bucketLimit = nbAttempts < bucketSize - 1 ? nbAttempts : bucketSize - 1; - uint ddsAttempt; - uint matchIndex; - for (ddsAttempt = 0; ddsAttempt < bucketSize - 1; ddsAttempt++) - { -#if NETCOREAPP3_0_OR_GREATER - if (Sse.IsSupported) - { - Sse.Prefetch0(ddsBase + dms->hashTable[ddsIdx + ddsAttempt]); - } -#endif - } - + uint chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1]; + uint chainIndex = chainPackedPointer >> 8; + uint chainLength = chainPackedPointer & 0xFF; + uint chainAttempts = nbAttempts - ddsAttempt; + uint chainLimit = chainAttempts > chainLength ? chainLength : chainAttempts; + uint chainAttempt; + for (chainAttempt = 0; chainAttempt < chainLimit; chainAttempt++) { - uint chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1]; - uint chainIndex = chainPackedPointer >> 8; #if NETCOREAPP3_0_OR_GREATER - if (Sse.IsSupported) - { - Sse.Prefetch0(&dms->chainTable[chainIndex]); - } + if (Sse.IsSupported) + { + Sse.Prefetch0(ddsBase + dms->chainTable[chainIndex + chainAttempt]); + } #endif } - for (ddsAttempt = 0; ddsAttempt < bucketLimit; ddsAttempt++) + for (chainAttempt = 0; chainAttempt < chainLimit; chainAttempt++, chainIndex++) { nuint currentMl = 0; byte* match; - matchIndex = dms->hashTable[ddsIdx + ddsAttempt]; + matchIndex = dms->chainTable[chainIndex]; match = ddsBase + matchIndex; - if (matchIndex == 0) - { - return ml; - } - assert(matchIndex >= ddsLowestIndex); assert(match + 4 <= ddsEnd); if (MEM_read32(match) == MEM_read32(ip)) { currentMl = - ZSTD_count_2segments(ip + 4, match + 4, iLimit, ddsEnd, prefixStart) + 4; + ZSTD_count_2segments(ip + 4, match + 4, iLimit, ddsEnd, prefixStart) + + 4; } if (currentMl > ml) @@ -661,287 +705,243 @@ nuint ddsIdx assert(curr - (matchIndex + ddsIndexDelta) > 0); *offsetPtr = curr - (matchIndex + ddsIndexDelta) + 3; if (ip + currentMl == iLimit) - { - return ml; - } - } - } - - { - uint chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1]; - uint chainIndex = chainPackedPointer >> 8; - uint chainLength = chainPackedPointer & 0xFF; - uint chainAttempts = nbAttempts - ddsAttempt; - uint chainLimit = chainAttempts > chainLength ? chainLength : chainAttempts; - uint chainAttempt; - for (chainAttempt = 0; chainAttempt < chainLimit; chainAttempt++) - { -#if NETCOREAPP3_0_OR_GREATER - if (Sse.IsSupported) - { - Sse.Prefetch0(ddsBase + dms->chainTable[chainIndex + chainAttempt]); - } -#endif - } - - for (chainAttempt = 0; chainAttempt < chainLimit; chainAttempt++, chainIndex++) - { - nuint currentMl = 0; - byte* match; - matchIndex = dms->chainTable[chainIndex]; - match = ddsBase + matchIndex; - assert(matchIndex >= ddsLowestIndex); - assert(match + 4 <= ddsEnd); - if (MEM_read32(match) == MEM_read32(ip)) - { - currentMl = - ZSTD_count_2segments(ip + 4, match + 4, iLimit, ddsEnd, prefixStart) - + 4; - } - - if (currentMl > ml) - { - ml = currentMl; - assert(curr - (matchIndex + ddsIndexDelta) > 0); - *offsetPtr = curr - (matchIndex + ddsIndexDelta) + 3; - if (ip + currentMl == iLimit) - break; - } + break; } } - - return ml; } - /* Update chains up to ip (excluded) - Assumption : always within prefix (i.e. not within extDict) */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_insertAndFindFirstIndex_internal( - ZSTD_MatchState_t* ms, - ZSTD_compressionParameters* cParams, - byte* ip, - uint mls, - uint lazySkipping - ) - { - uint* hashTable = ms->hashTable; - uint hashLog = cParams->hashLog; - uint* chainTable = ms->chainTable; - uint chainMask = (uint)((1 << (int)cParams->chainLog) - 1); - byte* @base = ms->window.@base; - uint target = (uint)(ip - @base); - uint idx = ms->nextToUpdate; - while (idx < target) - { - nuint h = ZSTD_hashPtr(@base + idx, hashLog, mls); - chainTable[idx & chainMask] = hashTable[h]; - hashTable[h] = idx; - idx++; - if (lazySkipping != 0) - break; - } - - ms->nextToUpdate = target; - return hashTable[ZSTD_hashPtr(ip, hashLog, mls)]; - } + return ml; + } - private static uint ZSTD_insertAndFindFirstIndex(ZSTD_MatchState_t* ms, byte* ip) + /* Update chains up to ip (excluded) + Assumption : always within prefix (i.e. not within extDict) */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_insertAndFindFirstIndex_internal( + ZSTD_MatchState_t* ms, + ZSTD_compressionParameters* cParams, + byte* ip, + uint mls, + uint lazySkipping + ) + { + uint* hashTable = ms->hashTable; + uint hashLog = cParams->hashLog; + uint* chainTable = ms->chainTable; + uint chainMask = (uint)((1 << (int)cParams->chainLog) - 1); + byte* @base = ms->window.@base; + uint target = (uint)(ip - @base); + uint idx = ms->nextToUpdate; + while (idx < target) { - ZSTD_compressionParameters* cParams = &ms->cParams; - return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch, 0); + nuint h = ZSTD_hashPtr(@base + idx, hashLog, mls); + chainTable[idx & chainMask] = hashTable[h]; + hashTable[h] = idx; + idx++; + if (lazySkipping != 0) + break; } - /* inlining is important to hardwire a hot branch (template emulation) */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_HcFindBestMatch( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr, - uint mls, - ZSTD_dictMode_e dictMode - ) + ms->nextToUpdate = target; + return hashTable[ZSTD_hashPtr(ip, hashLog, mls)]; + } + + private static uint ZSTD_insertAndFindFirstIndex(ZSTD_MatchState_t* ms, byte* ip) + { + ZSTD_compressionParameters* cParams = &ms->cParams; + return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch, 0); + } + + /* inlining is important to hardwire a hot branch (template emulation) */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_HcFindBestMatch( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr, + uint mls, + ZSTD_dictMode_e dictMode + ) + { + ZSTD_compressionParameters* cParams = &ms->cParams; + uint* chainTable = ms->chainTable; + uint chainSize = (uint)(1 << (int)cParams->chainLog); + uint chainMask = chainSize - 1; + byte* @base = ms->window.@base; + byte* dictBase = ms->window.dictBase; + uint dictLimit = ms->window.dictLimit; + byte* prefixStart = @base + dictLimit; + byte* dictEnd = dictBase + dictLimit; + uint curr = (uint)(ip - @base); + uint maxDistance = 1U << (int)cParams->windowLog; + uint lowestValid = ms->window.lowLimit; + uint withinMaxDistance = + curr - lowestValid > maxDistance ? curr - maxDistance : lowestValid; + uint isDictionary = ms->loadedDictEnd != 0 ? 1U : 0U; + uint lowLimit = isDictionary != 0 ? lowestValid : withinMaxDistance; + uint minChain = curr > chainSize ? curr - chainSize : 0; + uint nbAttempts = 1U << (int)cParams->searchLog; + nuint ml = 4 - 1; + ZSTD_MatchState_t* dms = ms->dictMatchState; + uint ddsHashLog = + dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch ? dms->cParams.hashLog - 2 : 0; + nuint ddsIdx = + dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch + ? ZSTD_hashPtr(ip, ddsHashLog, mls) << 2 + : 0; + uint matchIndex; + if (dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch) { - ZSTD_compressionParameters* cParams = &ms->cParams; - uint* chainTable = ms->chainTable; - uint chainSize = (uint)(1 << (int)cParams->chainLog); - uint chainMask = chainSize - 1; - byte* @base = ms->window.@base; - byte* dictBase = ms->window.dictBase; - uint dictLimit = ms->window.dictLimit; - byte* prefixStart = @base + dictLimit; - byte* dictEnd = dictBase + dictLimit; - uint curr = (uint)(ip - @base); - uint maxDistance = 1U << (int)cParams->windowLog; - uint lowestValid = ms->window.lowLimit; - uint withinMaxDistance = - curr - lowestValid > maxDistance ? curr - maxDistance : lowestValid; - uint isDictionary = ms->loadedDictEnd != 0 ? 1U : 0U; - uint lowLimit = isDictionary != 0 ? lowestValid : withinMaxDistance; - uint minChain = curr > chainSize ? curr - chainSize : 0; - uint nbAttempts = 1U << (int)cParams->searchLog; - nuint ml = 4 - 1; - ZSTD_MatchState_t* dms = ms->dictMatchState; - uint ddsHashLog = - dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch ? dms->cParams.hashLog - 2 : 0; - nuint ddsIdx = - dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch - ? ZSTD_hashPtr(ip, ddsHashLog, mls) << 2 - : 0; - uint matchIndex; - if (dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch) - { - uint* entry = &dms->hashTable[ddsIdx]; + uint* entry = &dms->hashTable[ddsIdx]; #if NETCOREAPP3_0_OR_GREATER if (Sse.IsSupported) { Sse.Prefetch0(entry); } #endif + } + + matchIndex = ZSTD_insertAndFindFirstIndex_internal( + ms, + cParams, + ip, + mls, + (uint)ms->lazySkipping + ); + for (; matchIndex >= lowLimit && nbAttempts > 0; nbAttempts--) + { + nuint currentMl = 0; + if (dictMode != ZSTD_dictMode_e.ZSTD_extDict || matchIndex >= dictLimit) + { + byte* match = @base + matchIndex; + assert(matchIndex >= dictLimit); + if (MEM_read32(match + ml - 3) == MEM_read32(ip + ml - 3)) + currentMl = ZSTD_count(ip, match, iLimit); + } + else + { + byte* match = dictBase + matchIndex; + assert(match + 4 <= dictEnd); + if (MEM_read32(match) == MEM_read32(ip)) + currentMl = + ZSTD_count_2segments(ip + 4, match + 4, iLimit, dictEnd, prefixStart) + + 4; } - matchIndex = ZSTD_insertAndFindFirstIndex_internal( - ms, - cParams, + if (currentMl > ml) + { + ml = currentMl; + assert(curr - matchIndex > 0); + *offsetPtr = curr - matchIndex + 3; + if (ip + currentMl == iLimit) + break; + } + + if (matchIndex <= minChain) + break; + matchIndex = chainTable[matchIndex & chainMask]; + } + + assert(nbAttempts <= 1U << (sizeof(nuint) == 4 ? 30 : 31) - 1); + if (dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch) + { + ml = ZSTD_dedicatedDictSearch_lazy_search( + offsetPtr, + ml, + nbAttempts, + dms, ip, - mls, - (uint)ms->lazySkipping + iLimit, + prefixStart, + curr, + dictLimit, + ddsIdx ); - for (; matchIndex >= lowLimit && nbAttempts > 0; nbAttempts--) + } + else if (dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState) + { + uint* dmsChainTable = dms->chainTable; + uint dmsChainSize = (uint)(1 << (int)dms->cParams.chainLog); + uint dmsChainMask = dmsChainSize - 1; + uint dmsLowestIndex = dms->window.dictLimit; + byte* dmsBase = dms->window.@base; + byte* dmsEnd = dms->window.nextSrc; + uint dmsSize = (uint)(dmsEnd - dmsBase); + uint dmsIndexDelta = dictLimit - dmsSize; + uint dmsMinChain = dmsSize > dmsChainSize ? dmsSize - dmsChainSize : 0; + matchIndex = dms->hashTable[ZSTD_hashPtr(ip, dms->cParams.hashLog, mls)]; + for (; matchIndex >= dmsLowestIndex && nbAttempts > 0; nbAttempts--) { nuint currentMl = 0; - if (dictMode != ZSTD_dictMode_e.ZSTD_extDict || matchIndex >= dictLimit) - { - byte* match = @base + matchIndex; - assert(matchIndex >= dictLimit); - if (MEM_read32(match + ml - 3) == MEM_read32(ip + ml - 3)) - currentMl = ZSTD_count(ip, match, iLimit); - } - else - { - byte* match = dictBase + matchIndex; - assert(match + 4 <= dictEnd); - if (MEM_read32(match) == MEM_read32(ip)) - currentMl = - ZSTD_count_2segments(ip + 4, match + 4, iLimit, dictEnd, prefixStart) - + 4; - } - + byte* match = dmsBase + matchIndex; + assert(match + 4 <= dmsEnd); + if (MEM_read32(match) == MEM_read32(ip)) + currentMl = + ZSTD_count_2segments(ip + 4, match + 4, iLimit, dmsEnd, prefixStart) + + 4; if (currentMl > ml) { ml = currentMl; - assert(curr - matchIndex > 0); - *offsetPtr = curr - matchIndex + 3; + assert(curr > matchIndex + dmsIndexDelta); + assert(curr - (matchIndex + dmsIndexDelta) > 0); + *offsetPtr = curr - (matchIndex + dmsIndexDelta) + 3; if (ip + currentMl == iLimit) break; } - if (matchIndex <= minChain) + if (matchIndex <= dmsMinChain) break; - matchIndex = chainTable[matchIndex & chainMask]; - } - - assert(nbAttempts <= 1U << (sizeof(nuint) == 4 ? 30 : 31) - 1); - if (dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch) - { - ml = ZSTD_dedicatedDictSearch_lazy_search( - offsetPtr, - ml, - nbAttempts, - dms, - ip, - iLimit, - prefixStart, - curr, - dictLimit, - ddsIdx - ); - } - else if (dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState) - { - uint* dmsChainTable = dms->chainTable; - uint dmsChainSize = (uint)(1 << (int)dms->cParams.chainLog); - uint dmsChainMask = dmsChainSize - 1; - uint dmsLowestIndex = dms->window.dictLimit; - byte* dmsBase = dms->window.@base; - byte* dmsEnd = dms->window.nextSrc; - uint dmsSize = (uint)(dmsEnd - dmsBase); - uint dmsIndexDelta = dictLimit - dmsSize; - uint dmsMinChain = dmsSize > dmsChainSize ? dmsSize - dmsChainSize : 0; - matchIndex = dms->hashTable[ZSTD_hashPtr(ip, dms->cParams.hashLog, mls)]; - for (; matchIndex >= dmsLowestIndex && nbAttempts > 0; nbAttempts--) - { - nuint currentMl = 0; - byte* match = dmsBase + matchIndex; - assert(match + 4 <= dmsEnd); - if (MEM_read32(match) == MEM_read32(ip)) - currentMl = - ZSTD_count_2segments(ip + 4, match + 4, iLimit, dmsEnd, prefixStart) - + 4; - if (currentMl > ml) - { - ml = currentMl; - assert(curr > matchIndex + dmsIndexDelta); - assert(curr - (matchIndex + dmsIndexDelta) > 0); - *offsetPtr = curr - (matchIndex + dmsIndexDelta) + 3; - if (ip + currentMl == iLimit) - break; - } - - if (matchIndex <= dmsMinChain) - break; - matchIndex = dmsChainTable[matchIndex & dmsChainMask]; - } + matchIndex = dmsChainTable[matchIndex & dmsChainMask]; } - - return ml; } - /* ZSTD_VecMask_next(): - * Starting from the LSB, returns the idx of the next non-zero bit. - * Basically counting the nb of trailing zeroes. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_VecMask_next(ulong val) - { - assert(val != 0); - return (uint)BitOperations.TrailingZeroCount(val); - } + return ml; + } - /* ZSTD_row_nextIndex(): - * Returns the next index to insert at within a tagTable row, and updates the "head" - * value to reflect the update. Essentially cycles backwards from [1, {entries per row}) - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_row_nextIndex(byte* tagRow, uint rowMask) - { - uint next = (uint)(*tagRow - 1) & rowMask; - next += next == 0 ? rowMask : 0; - *tagRow = (byte)next; - return next; - } + /* ZSTD_VecMask_next(): + * Starting from the LSB, returns the idx of the next non-zero bit. + * Basically counting the nb of trailing zeroes. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_VecMask_next(ulong val) + { + assert(val != 0); + return (uint)BitOperations.TrailingZeroCount(val); + } - /* ZSTD_isAligned(): - * Checks that a pointer is aligned to "align" bytes which must be a power of 2. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static int ZSTD_isAligned(void* ptr, nuint align) - { - assert((align & align - 1) == 0); - return ((nuint)ptr & align - 1) == 0 ? 1 : 0; - } + /* ZSTD_row_nextIndex(): + * Returns the next index to insert at within a tagTable row, and updates the "head" + * value to reflect the update. Essentially cycles backwards from [1, {entries per row}) + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_row_nextIndex(byte* tagRow, uint rowMask) + { + uint next = (uint)(*tagRow - 1) & rowMask; + next += next == 0 ? rowMask : 0; + *tagRow = (byte)next; + return next; + } - /* ZSTD_row_prefetch(): - * Performs prefetching for the hashTable and tagTable at a given row. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_row_prefetch( - uint* hashTable, - byte* tagTable, - uint relRow, - uint rowLog - ) - { + /* ZSTD_isAligned(): + * Checks that a pointer is aligned to "align" bytes which must be a power of 2. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int ZSTD_isAligned(void* ptr, nuint align) + { + assert((align & align - 1) == 0); + return ((nuint)ptr & align - 1) == 0 ? 1 : 0; + } + + /* ZSTD_row_prefetch(): + * Performs prefetching for the hashTable and tagTable at a given row. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_row_prefetch( + uint* hashTable, + byte* tagTable, + uint relRow, + uint rowLog + ) + { #if NETCOREAPP3_0_OR_GREATER if (Sse.IsSupported) { @@ -949,15 +949,15 @@ uint rowLog } #endif - if (rowLog >= 5) - { + if (rowLog >= 5) + { #if NETCOREAPP3_0_OR_GREATER if (Sse.IsSupported) { Sse.Prefetch0(hashTable + relRow + 16); } #endif - } + } #if NETCOREAPP3_0_OR_GREATER if (Sse.IsSupported) @@ -966,194 +966,194 @@ uint rowLog } #endif - if (rowLog == 6) - { + if (rowLog == 6) + { #if NETCOREAPP3_0_OR_GREATER if (Sse.IsSupported) { Sse.Prefetch0(tagTable + relRow + 32); } #endif - } - - assert(rowLog == 4 || rowLog == 5 || rowLog == 6); - assert(ZSTD_isAligned(hashTable + relRow, 64) != 0); - assert(ZSTD_isAligned(tagTable + relRow, (nuint)1 << (int)rowLog) != 0); } - /* ZSTD_row_fillHashCache(): - * Fill up the hash cache starting at idx, prefetching up to ZSTD_ROW_HASH_CACHE_SIZE entries, - * but not beyond iLimit. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_row_fillHashCache( - ZSTD_MatchState_t* ms, - byte* @base, - uint rowLog, - uint mls, - uint idx, - byte* iLimit - ) + assert(rowLog == 4 || rowLog == 5 || rowLog == 6); + assert(ZSTD_isAligned(hashTable + relRow, 64) != 0); + assert(ZSTD_isAligned(tagTable + relRow, (nuint)1 << (int)rowLog) != 0); + } + + /* ZSTD_row_fillHashCache(): + * Fill up the hash cache starting at idx, prefetching up to ZSTD_ROW_HASH_CACHE_SIZE entries, + * but not beyond iLimit. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_row_fillHashCache( + ZSTD_MatchState_t* ms, + byte* @base, + uint rowLog, + uint mls, + uint idx, + byte* iLimit + ) + { + uint* hashTable = ms->hashTable; + byte* tagTable = ms->tagTable; + uint hashLog = ms->rowHashLog; + uint maxElemsToPrefetch = @base + idx > iLimit ? 0 : (uint)(iLimit - (@base + idx) + 1); + uint lim = idx + (8 < maxElemsToPrefetch ? 8 : maxElemsToPrefetch); + for (; idx < lim; ++idx) { - uint* hashTable = ms->hashTable; - byte* tagTable = ms->tagTable; - uint hashLog = ms->rowHashLog; - uint maxElemsToPrefetch = @base + idx > iLimit ? 0 : (uint)(iLimit - (@base + idx) + 1); - uint lim = idx + (8 < maxElemsToPrefetch ? 8 : maxElemsToPrefetch); - for (; idx < lim; ++idx) - { - uint hash = (uint)ZSTD_hashPtrSalted(@base + idx, hashLog + 8, mls, ms->hashSalt); - uint row = hash >> 8 << (int)rowLog; - ZSTD_row_prefetch(hashTable, tagTable, row, rowLog); - ms->hashCache[idx & 8 - 1] = hash; - } + uint hash = (uint)ZSTD_hashPtrSalted(@base + idx, hashLog + 8, mls, ms->hashSalt); + uint row = hash >> 8 << (int)rowLog; + ZSTD_row_prefetch(hashTable, tagTable, row, rowLog); + ms->hashCache[idx & 8 - 1] = hash; } + } - /* ZSTD_row_nextCachedHash(): - * Returns the hash of base + idx, and replaces the hash in the hash cache with the byte at - * base + idx + ZSTD_ROW_HASH_CACHE_SIZE. Also prefetches the appropriate rows from hashTable and tagTable. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_row_nextCachedHash( - uint* cache, - uint* hashTable, - byte* tagTable, - byte* @base, - uint idx, - uint hashLog, - uint rowLog, - uint mls, - ulong hashSalt - ) + /* ZSTD_row_nextCachedHash(): + * Returns the hash of base + idx, and replaces the hash in the hash cache with the byte at + * base + idx + ZSTD_ROW_HASH_CACHE_SIZE. Also prefetches the appropriate rows from hashTable and tagTable. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_row_nextCachedHash( + uint* cache, + uint* hashTable, + byte* tagTable, + byte* @base, + uint idx, + uint hashLog, + uint rowLog, + uint mls, + ulong hashSalt + ) + { + uint newHash = (uint)ZSTD_hashPtrSalted(@base + idx + 8, hashLog + 8, mls, hashSalt); + uint row = newHash >> 8 << (int)rowLog; + ZSTD_row_prefetch(hashTable, tagTable, row, rowLog); { - uint newHash = (uint)ZSTD_hashPtrSalted(@base + idx + 8, hashLog + 8, mls, hashSalt); - uint row = newHash >> 8 << (int)rowLog; - ZSTD_row_prefetch(hashTable, tagTable, row, rowLog); - { - uint hash = cache[idx & 8 - 1]; - cache[idx & 8 - 1] = newHash; - return hash; - } + uint hash = cache[idx & 8 - 1]; + cache[idx & 8 - 1] = newHash; + return hash; } + } - /* ZSTD_row_update_internalImpl(): - * Updates the hash table with positions starting from updateStartIdx until updateEndIdx. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_row_update_internalImpl( - ZSTD_MatchState_t* ms, - uint updateStartIdx, - uint updateEndIdx, - uint mls, - uint rowLog, - uint rowMask, - uint useCache - ) + /* ZSTD_row_update_internalImpl(): + * Updates the hash table with positions starting from updateStartIdx until updateEndIdx. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_row_update_internalImpl( + ZSTD_MatchState_t* ms, + uint updateStartIdx, + uint updateEndIdx, + uint mls, + uint rowLog, + uint rowMask, + uint useCache + ) + { + uint* hashTable = ms->hashTable; + byte* tagTable = ms->tagTable; + uint hashLog = ms->rowHashLog; + byte* @base = ms->window.@base; + for (; updateStartIdx < updateEndIdx; ++updateStartIdx) { - uint* hashTable = ms->hashTable; - byte* tagTable = ms->tagTable; - uint hashLog = ms->rowHashLog; - byte* @base = ms->window.@base; - for (; updateStartIdx < updateEndIdx; ++updateStartIdx) - { - uint hash = - useCache != 0 - ? ZSTD_row_nextCachedHash( - ms->hashCache, - hashTable, - tagTable, - @base, - updateStartIdx, - hashLog, - rowLog, - mls, - ms->hashSalt - ) - : (uint)ZSTD_hashPtrSalted( - @base + updateStartIdx, - hashLog + 8, - mls, - ms->hashSalt - ); - uint relRow = hash >> 8 << (int)rowLog; - uint* row = hashTable + relRow; - byte* tagRow = tagTable + relRow; - uint pos = ZSTD_row_nextIndex(tagRow, rowMask); - assert( - hash - == ZSTD_hashPtrSalted( - @base + updateStartIdx, - hashLog + 8, - mls, - ms->hashSalt - ) - ); - tagRow[pos] = (byte)(hash & (1U << 8) - 1); - row[pos] = updateStartIdx; - } + uint hash = + useCache != 0 + ? ZSTD_row_nextCachedHash( + ms->hashCache, + hashTable, + tagTable, + @base, + updateStartIdx, + hashLog, + rowLog, + mls, + ms->hashSalt + ) + : (uint)ZSTD_hashPtrSalted( + @base + updateStartIdx, + hashLog + 8, + mls, + ms->hashSalt + ); + uint relRow = hash >> 8 << (int)rowLog; + uint* row = hashTable + relRow; + byte* tagRow = tagTable + relRow; + uint pos = ZSTD_row_nextIndex(tagRow, rowMask); + assert( + hash + == ZSTD_hashPtrSalted( + @base + updateStartIdx, + hashLog + 8, + mls, + ms->hashSalt + ) + ); + tagRow[pos] = (byte)(hash & (1U << 8) - 1); + row[pos] = updateStartIdx; } + } - /* ZSTD_row_update_internal(): - * Inserts the byte at ip into the appropriate position in the hash table, and updates ms->nextToUpdate. - * Skips sections of long matches as is necessary. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_row_update_internal( - ZSTD_MatchState_t* ms, - byte* ip, - uint mls, - uint rowLog, - uint rowMask, - uint useCache - ) + /* ZSTD_row_update_internal(): + * Inserts the byte at ip into the appropriate position in the hash table, and updates ms->nextToUpdate. + * Skips sections of long matches as is necessary. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_row_update_internal( + ZSTD_MatchState_t* ms, + byte* ip, + uint mls, + uint rowLog, + uint rowMask, + uint useCache + ) + { + uint idx = ms->nextToUpdate; + byte* @base = ms->window.@base; + uint target = (uint)(ip - @base); + const uint kSkipThreshold = 384; + const uint kMaxMatchStartPositionsToUpdate = 96; + const uint kMaxMatchEndPositionsToUpdate = 32; + if (useCache != 0) { - uint idx = ms->nextToUpdate; - byte* @base = ms->window.@base; - uint target = (uint)(ip - @base); - const uint kSkipThreshold = 384; - const uint kMaxMatchStartPositionsToUpdate = 96; - const uint kMaxMatchEndPositionsToUpdate = 32; - if (useCache != 0) + if (target - idx > kSkipThreshold) { - if (target - idx > kSkipThreshold) - { - uint bound = idx + kMaxMatchStartPositionsToUpdate; - ZSTD_row_update_internalImpl(ms, idx, bound, mls, rowLog, rowMask, useCache); - idx = target - kMaxMatchEndPositionsToUpdate; - ZSTD_row_fillHashCache(ms, @base, rowLog, mls, idx, ip + 1); - } + uint bound = idx + kMaxMatchStartPositionsToUpdate; + ZSTD_row_update_internalImpl(ms, idx, bound, mls, rowLog, rowMask, useCache); + idx = target - kMaxMatchEndPositionsToUpdate; + ZSTD_row_fillHashCache(ms, @base, rowLog, mls, idx, ip + 1); } - - assert(target >= idx); - ZSTD_row_update_internalImpl(ms, idx, target, mls, rowLog, rowMask, useCache); - ms->nextToUpdate = target; } - /* ZSTD_row_update(): - * External wrapper for ZSTD_row_update_internal(). Used for filling the hashtable during dictionary - * processing. - */ - private static void ZSTD_row_update(ZSTD_MatchState_t* ms, byte* ip) - { - uint rowLog = - ms->cParams.searchLog <= 4 ? 4 - : ms->cParams.searchLog <= 6 ? ms->cParams.searchLog - : 6; - uint rowMask = (1U << (int)rowLog) - 1; - /* mls caps out at 6 */ - uint mls = ms->cParams.minMatch < 6 ? ms->cParams.minMatch : 6; - ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 0); - } + assert(target >= idx); + ZSTD_row_update_internalImpl(ms, idx, target, mls, rowLog, rowMask, useCache); + ms->nextToUpdate = target; + } - /* Returns the mask width of bits group of which will be set to 1. Given not all - * architectures have easy movemask instruction, this helps to iterate over - * groups of bits easier and faster. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_row_matchMaskGroupWidth(uint rowEntries) - { - assert(rowEntries == 16 || rowEntries == 32 || rowEntries == 64); - assert(rowEntries <= 64); + /* ZSTD_row_update(): + * External wrapper for ZSTD_row_update_internal(). Used for filling the hashtable during dictionary + * processing. + */ + private static void ZSTD_row_update(ZSTD_MatchState_t* ms, byte* ip) + { + uint rowLog = + ms->cParams.searchLog <= 4 ? 4 + : ms->cParams.searchLog <= 6 ? ms->cParams.searchLog + : 6; + uint rowMask = (1U << (int)rowLog) - 1; + /* mls caps out at 6 */ + uint mls = ms->cParams.minMatch < 6 ? ms->cParams.minMatch : 6; + ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 0); + } + + /* Returns the mask width of bits group of which will be set to 1. Given not all + * architectures have easy movemask instruction, this helps to iterate over + * groups of bits easier and faster. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_row_matchMaskGroupWidth(uint rowEntries) + { + assert(rowEntries == 16 || rowEntries == 32 || rowEntries == 64); + assert(rowEntries <= 64); #if NET5_0_OR_GREATER if (AdvSimd.IsSupported && BitConverter.IsLittleEndian) { @@ -1170,8 +1170,8 @@ private static uint ZSTD_row_matchMaskGroupWidth(uint rowEntries) #endif } #endif - return 1; - } + return 1; + } #if NETCOREAPP3_0_OR_GREATER [MethodImpl(MethodImplOptions.AggressiveInlining)] @@ -1222,24 +1222,24 @@ private static ulong ZSTD_row_getSSEMask(int nbChunks, byte* src, byte tag, uint } #endif - /* Returns a ZSTD_VecMask (U64) that has the nth group (determined by - * ZSTD_row_matchMaskGroupWidth) of bits set to 1 if the newly-computed "tag" - * matches the hash at the nth position in a row of the tagTable. - * Each row is a circular buffer beginning at the value of "headGrouped". So we - * must rotate the "matches" bitfield to match up with the actual layout of the - * entries within the hashTable */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static ulong ZSTD_row_getMatchMask( - byte* tagRow, - byte tag, - uint headGrouped, - uint rowEntries - ) - { - byte* src = tagRow; - assert(rowEntries == 16 || rowEntries == 32 || rowEntries == 64); - assert(rowEntries <= 64); - assert(ZSTD_row_matchMaskGroupWidth(rowEntries) * rowEntries <= sizeof(ulong) * 8); + /* Returns a ZSTD_VecMask (U64) that has the nth group (determined by + * ZSTD_row_matchMaskGroupWidth) of bits set to 1 if the newly-computed "tag" + * matches the hash at the nth position in a row of the tagTable. + * Each row is a circular buffer beginning at the value of "headGrouped". So we + * must rotate the "matches" bitfield to match up with the actual layout of the + * entries within the hashTable */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ulong ZSTD_row_getMatchMask( + byte* tagRow, + byte tag, + uint headGrouped, + uint rowEntries + ) + { + byte* src = tagRow; + assert(rowEntries == 16 || rowEntries == 32 || rowEntries == 64); + assert(rowEntries <= 64); + assert(ZSTD_row_matchMaskGroupWidth(rowEntries) * rowEntries <= sizeof(ulong) * 8); #if NETCOREAPP3_0_OR_GREATER if (Sse2.IsSupported) { @@ -1325,246 +1325,332 @@ Vector128 chunk3 } #endif + { + nuint chunkSize = (nuint)sizeof(nuint); + nuint shiftAmount = chunkSize * 8 - chunkSize; + nuint xFF = ~(nuint)0; + nuint x01 = xFF / 0xFF; + nuint x80 = x01 << 7; + nuint splatChar = tag * x01; + ulong matches = 0; + int i = (int)(rowEntries - chunkSize); + assert(sizeof(nuint) == 4 || sizeof(nuint) == 8); + if (BitConverter.IsLittleEndian) { - nuint chunkSize = (nuint)sizeof(nuint); - nuint shiftAmount = chunkSize * 8 - chunkSize; - nuint xFF = ~(nuint)0; - nuint x01 = xFF / 0xFF; - nuint x80 = x01 << 7; - nuint splatChar = tag * x01; - ulong matches = 0; - int i = (int)(rowEntries - chunkSize); - assert(sizeof(nuint) == 4 || sizeof(nuint) == 8); - if (BitConverter.IsLittleEndian) + nuint extractMagic = xFF / 0x7F >> (int)chunkSize; + do { - nuint extractMagic = xFF / 0x7F >> (int)chunkSize; - do - { - nuint chunk = MEM_readST(&src[i]); - chunk ^= splatChar; - chunk = ((chunk | x80) - x01 | chunk) & x80; - matches <<= (int)chunkSize; - matches |= chunk * extractMagic >> (int)shiftAmount; - i -= (int)chunkSize; - } while (i >= 0); - } - else + nuint chunk = MEM_readST(&src[i]); + chunk ^= splatChar; + chunk = ((chunk | x80) - x01 | chunk) & x80; + matches <<= (int)chunkSize; + matches |= chunk * extractMagic >> (int)shiftAmount; + i -= (int)chunkSize; + } while (i >= 0); + } + else + { + nuint msb = xFF ^ xFF >> 1; + nuint extractMagic = msb / 0x1FF | msb; + do { - nuint msb = xFF ^ xFF >> 1; - nuint extractMagic = msb / 0x1FF | msb; - do - { - nuint chunk = MEM_readST(&src[i]); - chunk ^= splatChar; - chunk = ((chunk | x80) - x01 | chunk) & x80; - matches <<= (int)chunkSize; - matches |= (chunk >> 7) * extractMagic >> (int)shiftAmount; - i -= (int)chunkSize; - } while (i >= 0); - } + nuint chunk = MEM_readST(&src[i]); + chunk ^= splatChar; + chunk = ((chunk | x80) - x01 | chunk) & x80; + matches <<= (int)chunkSize; + matches |= (chunk >> 7) * extractMagic >> (int)shiftAmount; + i -= (int)chunkSize; + } while (i >= 0); + } - matches = ~matches; - if (rowEntries == 16) - { - return BitOperations.RotateRight((ushort)matches, (int)headGrouped); - } - else if (rowEntries == 32) - { - return BitOperations.RotateRight((uint)matches, (int)headGrouped); - } - else - { - return BitOperations.RotateRight(matches, (int)headGrouped); - } + matches = ~matches; + if (rowEntries == 16) + { + return BitOperations.RotateRight((ushort)matches, (int)headGrouped); + } + else if (rowEntries == 32) + { + return BitOperations.RotateRight((uint)matches, (int)headGrouped); + } + else + { + return BitOperations.RotateRight(matches, (int)headGrouped); } } + } - /* The high-level approach of the SIMD row based match finder is as follows: - * - Figure out where to insert the new entry: - * - Generate a hash for current input position and split it into a one byte of tag and `rowHashLog` bits of index. - * - The hash is salted by a value that changes on every context reset, so when the same table is used - * we will avoid collisions that would otherwise slow us down by introducing phantom matches. - * - The hashTable is effectively split into groups or "rows" of 15 or 31 entries of U32, and the index determines - * which row to insert into. - * - Determine the correct position within the row to insert the entry into. Each row of 15 or 31 can - * be considered as a circular buffer with a "head" index that resides in the tagTable (overall 16 or 32 bytes - * per row). - * - Use SIMD to efficiently compare the tags in the tagTable to the 1-byte tag calculated for the position and - * generate a bitfield that we can cycle through to check the collisions in the hash table. - * - Pick the longest match. - * - Insert the tag into the equivalent row and position in the tagTable. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_RowFindBestMatch( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr, - uint mls, - ZSTD_dictMode_e dictMode, - uint rowLog - ) + /* The high-level approach of the SIMD row based match finder is as follows: + * - Figure out where to insert the new entry: + * - Generate a hash for current input position and split it into a one byte of tag and `rowHashLog` bits of index. + * - The hash is salted by a value that changes on every context reset, so when the same table is used + * we will avoid collisions that would otherwise slow us down by introducing phantom matches. + * - The hashTable is effectively split into groups or "rows" of 15 or 31 entries of U32, and the index determines + * which row to insert into. + * - Determine the correct position within the row to insert the entry into. Each row of 15 or 31 can + * be considered as a circular buffer with a "head" index that resides in the tagTable (overall 16 or 32 bytes + * per row). + * - Use SIMD to efficiently compare the tags in the tagTable to the 1-byte tag calculated for the position and + * generate a bitfield that we can cycle through to check the collisions in the hash table. + * - Pick the longest match. + * - Insert the tag into the equivalent row and position in the tagTable. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_RowFindBestMatch( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr, + uint mls, + ZSTD_dictMode_e dictMode, + uint rowLog + ) + { + uint* hashTable = ms->hashTable; + byte* tagTable = ms->tagTable; + uint* hashCache = ms->hashCache; + uint hashLog = ms->rowHashLog; + ZSTD_compressionParameters* cParams = &ms->cParams; + byte* @base = ms->window.@base; + byte* dictBase = ms->window.dictBase; + uint dictLimit = ms->window.dictLimit; + byte* prefixStart = @base + dictLimit; + byte* dictEnd = dictBase + dictLimit; + uint curr = (uint)(ip - @base); + uint maxDistance = 1U << (int)cParams->windowLog; + uint lowestValid = ms->window.lowLimit; + uint withinMaxDistance = + curr - lowestValid > maxDistance ? curr - maxDistance : lowestValid; + uint isDictionary = ms->loadedDictEnd != 0 ? 1U : 0U; + uint lowLimit = isDictionary != 0 ? lowestValid : withinMaxDistance; + uint rowEntries = 1U << (int)rowLog; + uint rowMask = rowEntries - 1; + /* nb of searches is capped at nb entries per row */ + uint cappedSearchLog = cParams->searchLog < rowLog ? cParams->searchLog : rowLog; + uint groupWidth = ZSTD_row_matchMaskGroupWidth(rowEntries); + ulong hashSalt = ms->hashSalt; + uint nbAttempts = 1U << (int)cappedSearchLog; + nuint ml = 4 - 1; + uint hash; + /* DMS/DDS variables that may be referenced laster */ + ZSTD_MatchState_t* dms = ms->dictMatchState; + /* Initialize the following variables to satisfy static analyzer */ + nuint ddsIdx = 0; + /* cctx hash tables are limited in searches, but allow extra searches into DDS */ + uint ddsExtraAttempts = 0; + uint dmsTag = 0; + uint* dmsRow = null; + byte* dmsTagRow = null; + if (dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch) { - uint* hashTable = ms->hashTable; - byte* tagTable = ms->tagTable; - uint* hashCache = ms->hashCache; - uint hashLog = ms->rowHashLog; - ZSTD_compressionParameters* cParams = &ms->cParams; - byte* @base = ms->window.@base; - byte* dictBase = ms->window.dictBase; - uint dictLimit = ms->window.dictLimit; - byte* prefixStart = @base + dictLimit; - byte* dictEnd = dictBase + dictLimit; - uint curr = (uint)(ip - @base); - uint maxDistance = 1U << (int)cParams->windowLog; - uint lowestValid = ms->window.lowLimit; - uint withinMaxDistance = - curr - lowestValid > maxDistance ? curr - maxDistance : lowestValid; - uint isDictionary = ms->loadedDictEnd != 0 ? 1U : 0U; - uint lowLimit = isDictionary != 0 ? lowestValid : withinMaxDistance; - uint rowEntries = 1U << (int)rowLog; - uint rowMask = rowEntries - 1; - /* nb of searches is capped at nb entries per row */ - uint cappedSearchLog = cParams->searchLog < rowLog ? cParams->searchLog : rowLog; - uint groupWidth = ZSTD_row_matchMaskGroupWidth(rowEntries); - ulong hashSalt = ms->hashSalt; - uint nbAttempts = 1U << (int)cappedSearchLog; - nuint ml = 4 - 1; - uint hash; - /* DMS/DDS variables that may be referenced laster */ - ZSTD_MatchState_t* dms = ms->dictMatchState; - /* Initialize the following variables to satisfy static analyzer */ - nuint ddsIdx = 0; - /* cctx hash tables are limited in searches, but allow extra searches into DDS */ - uint ddsExtraAttempts = 0; - uint dmsTag = 0; - uint* dmsRow = null; - byte* dmsTagRow = null; - if (dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch) + uint ddsHashLog = dms->cParams.hashLog - 2; { - uint ddsHashLog = dms->cParams.hashLog - 2; - { - ddsIdx = ZSTD_hashPtr(ip, ddsHashLog, mls) << 2; + ddsIdx = ZSTD_hashPtr(ip, ddsHashLog, mls) << 2; #if NETCOREAPP3_0_OR_GREATER if (Sse.IsSupported) { Sse.Prefetch0(&dms->hashTable[ddsIdx]); } #endif - } - - ddsExtraAttempts = - cParams->searchLog > rowLog ? 1U << (int)(cParams->searchLog - rowLog) : 0; } - if (dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState) + ddsExtraAttempts = + cParams->searchLog > rowLog ? 1U << (int)(cParams->searchLog - rowLog) : 0; + } + + if (dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState) + { + /* Prefetch DMS rows */ + uint* dmsHashTable = dms->hashTable; + byte* dmsTagTable = dms->tagTable; + uint dmsHash = (uint)ZSTD_hashPtr(ip, dms->rowHashLog + 8, mls); + uint dmsRelRow = dmsHash >> 8 << (int)rowLog; + dmsTag = dmsHash & (1U << 8) - 1; + dmsTagRow = dmsTagTable + dmsRelRow; + dmsRow = dmsHashTable + dmsRelRow; + ZSTD_row_prefetch(dmsHashTable, dmsTagTable, dmsRelRow, rowLog); + } + + if (ms->lazySkipping == 0) + { + ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 1); + hash = ZSTD_row_nextCachedHash( + hashCache, + hashTable, + tagTable, + @base, + curr, + hashLog, + rowLog, + mls, + hashSalt + ); + } + else + { + hash = (uint)ZSTD_hashPtrSalted(ip, hashLog + 8, mls, hashSalt); + ms->nextToUpdate = curr; + } + + ms->hashSaltEntropy += hash; + { + uint relRow = hash >> 8 << (int)rowLog; + uint tag = hash & (1U << 8) - 1; + uint* row = hashTable + relRow; + byte* tagRow = tagTable + relRow; + uint headGrouped = (*tagRow & rowMask) * groupWidth; + uint* matchBuffer = stackalloc uint[64]; + nuint numMatches = 0; + nuint currMatch = 0; + ulong matches = ZSTD_row_getMatchMask(tagRow, (byte)tag, headGrouped, rowEntries); + for (; matches > 0 && nbAttempts > 0; matches &= matches - 1) { - /* Prefetch DMS rows */ - uint* dmsHashTable = dms->hashTable; - byte* dmsTagTable = dms->tagTable; - uint dmsHash = (uint)ZSTD_hashPtr(ip, dms->rowHashLog + 8, mls); - uint dmsRelRow = dmsHash >> 8 << (int)rowLog; - dmsTag = dmsHash & (1U << 8) - 1; - dmsTagRow = dmsTagTable + dmsRelRow; - dmsRow = dmsHashTable + dmsRelRow; - ZSTD_row_prefetch(dmsHashTable, dmsTagTable, dmsRelRow, rowLog); + uint matchPos = + (headGrouped + ZSTD_VecMask_next(matches)) / groupWidth & rowMask; + uint matchIndex = row[matchPos]; + if (matchPos == 0) + continue; + assert(numMatches < rowEntries); + if (matchIndex < lowLimit) + break; + if (dictMode != ZSTD_dictMode_e.ZSTD_extDict || matchIndex >= dictLimit) + { +#if NETCOREAPP3_0_OR_GREATER + if (Sse.IsSupported) + { + Sse.Prefetch0(@base + matchIndex); + } +#endif + } + else + { +#if NETCOREAPP3_0_OR_GREATER + if (Sse.IsSupported) + { + Sse.Prefetch0(dictBase + matchIndex); + } +#endif + } + + matchBuffer[numMatches++] = matchIndex; + --nbAttempts; } - if (ms->lazySkipping == 0) { - ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 1); - hash = ZSTD_row_nextCachedHash( - hashCache, - hashTable, - tagTable, - @base, - curr, - hashLog, - rowLog, - mls, - hashSalt - ); + uint pos = ZSTD_row_nextIndex(tagRow, rowMask); + tagRow[pos] = (byte)tag; + row[pos] = ms->nextToUpdate++; } - else + + for (; currMatch < numMatches; ++currMatch) { - hash = (uint)ZSTD_hashPtrSalted(ip, hashLog + 8, mls, hashSalt); - ms->nextToUpdate = curr; + uint matchIndex = matchBuffer[currMatch]; + nuint currentMl = 0; + assert(matchIndex < curr); + assert(matchIndex >= lowLimit); + if (dictMode != ZSTD_dictMode_e.ZSTD_extDict || matchIndex >= dictLimit) + { + byte* match = @base + matchIndex; + assert(matchIndex >= dictLimit); + if (MEM_read32(match + ml - 3) == MEM_read32(ip + ml - 3)) + currentMl = ZSTD_count(ip, match, iLimit); + } + else + { + byte* match = dictBase + matchIndex; + assert(match + 4 <= dictEnd); + if (MEM_read32(match) == MEM_read32(ip)) + currentMl = + ZSTD_count_2segments( + ip + 4, + match + 4, + iLimit, + dictEnd, + prefixStart + ) + 4; + } + + if (currentMl > ml) + { + ml = currentMl; + assert(curr - matchIndex > 0); + *offsetPtr = curr - matchIndex + 3; + if (ip + currentMl == iLimit) + break; + } } + } - ms->hashSaltEntropy += hash; + assert(nbAttempts <= 1U << (sizeof(nuint) == 4 ? 30 : 31) - 1); + if (dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch) + { + ml = ZSTD_dedicatedDictSearch_lazy_search( + offsetPtr, + ml, + nbAttempts + ddsExtraAttempts, + dms, + ip, + iLimit, + prefixStart, + curr, + dictLimit, + ddsIdx + ); + } + else if (dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState) + { + /* TODO: Measure and potentially add prefetching to DMS */ + uint dmsLowestIndex = dms->window.dictLimit; + byte* dmsBase = dms->window.@base; + byte* dmsEnd = dms->window.nextSrc; + uint dmsSize = (uint)(dmsEnd - dmsBase); + uint dmsIndexDelta = dictLimit - dmsSize; { - uint relRow = hash >> 8 << (int)rowLog; - uint tag = hash & (1U << 8) - 1; - uint* row = hashTable + relRow; - byte* tagRow = tagTable + relRow; - uint headGrouped = (*tagRow & rowMask) * groupWidth; + uint headGrouped = (*dmsTagRow & rowMask) * groupWidth; uint* matchBuffer = stackalloc uint[64]; nuint numMatches = 0; nuint currMatch = 0; - ulong matches = ZSTD_row_getMatchMask(tagRow, (byte)tag, headGrouped, rowEntries); + ulong matches = ZSTD_row_getMatchMask( + dmsTagRow, + (byte)dmsTag, + headGrouped, + rowEntries + ); for (; matches > 0 && nbAttempts > 0; matches &= matches - 1) { uint matchPos = (headGrouped + ZSTD_VecMask_next(matches)) / groupWidth & rowMask; - uint matchIndex = row[matchPos]; + uint matchIndex = dmsRow[matchPos]; if (matchPos == 0) continue; - assert(numMatches < rowEntries); - if (matchIndex < lowLimit) + if (matchIndex < dmsLowestIndex) break; - if (dictMode != ZSTD_dictMode_e.ZSTD_extDict || matchIndex >= dictLimit) - { -#if NETCOREAPP3_0_OR_GREATER - if (Sse.IsSupported) - { - Sse.Prefetch0(@base + matchIndex); - } -#endif - } - else - { #if NETCOREAPP3_0_OR_GREATER if (Sse.IsSupported) { - Sse.Prefetch0(dictBase + matchIndex); + Sse.Prefetch0(dmsBase + matchIndex); } #endif - } matchBuffer[numMatches++] = matchIndex; --nbAttempts; } - { - uint pos = ZSTD_row_nextIndex(tagRow, rowMask); - tagRow[pos] = (byte)tag; - row[pos] = ms->nextToUpdate++; - } - for (; currMatch < numMatches; ++currMatch) { uint matchIndex = matchBuffer[currMatch]; nuint currentMl = 0; + assert(matchIndex >= dmsLowestIndex); assert(matchIndex < curr); - assert(matchIndex >= lowLimit); - if (dictMode != ZSTD_dictMode_e.ZSTD_extDict || matchIndex >= dictLimit) - { - byte* match = @base + matchIndex; - assert(matchIndex >= dictLimit); - if (MEM_read32(match + ml - 3) == MEM_read32(ip + ml - 3)) - currentMl = ZSTD_count(ip, match, iLimit); - } - else { - byte* match = dictBase + matchIndex; - assert(match + 4 <= dictEnd); + byte* match = dmsBase + matchIndex; + assert(match + 4 <= dmsEnd); if (MEM_read32(match) == MEM_read32(ip)) currentMl = ZSTD_count_2segments( ip + 4, match + 4, iLimit, - dictEnd, + dmsEnd, prefixStart ) + 4; } @@ -1572,2217 +1658,2214 @@ uint rowLog if (currentMl > ml) { ml = currentMl; - assert(curr - matchIndex > 0); - *offsetPtr = curr - matchIndex + 3; + assert(curr > matchIndex + dmsIndexDelta); + assert(curr - (matchIndex + dmsIndexDelta) > 0); + *offsetPtr = curr - (matchIndex + dmsIndexDelta) + 3; if (ip + currentMl == iLimit) break; } } } + } - assert(nbAttempts <= 1U << (sizeof(nuint) == 4 ? 30 : 31) - 1); - if (dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch) - { - ml = ZSTD_dedicatedDictSearch_lazy_search( - offsetPtr, - ml, - nbAttempts + ddsExtraAttempts, - dms, - ip, - iLimit, - prefixStart, - curr, - dictLimit, - ddsIdx - ); - } - else if (dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState) - { - /* TODO: Measure and potentially add prefetching to DMS */ - uint dmsLowestIndex = dms->window.dictLimit; - byte* dmsBase = dms->window.@base; - byte* dmsEnd = dms->window.nextSrc; - uint dmsSize = (uint)(dmsEnd - dmsBase); - uint dmsIndexDelta = dictLimit - dmsSize; - { - uint headGrouped = (*dmsTagRow & rowMask) * groupWidth; - uint* matchBuffer = stackalloc uint[64]; - nuint numMatches = 0; - nuint currMatch = 0; - ulong matches = ZSTD_row_getMatchMask( - dmsTagRow, - (byte)dmsTag, - headGrouped, - rowEntries - ); - for (; matches > 0 && nbAttempts > 0; matches &= matches - 1) - { - uint matchPos = - (headGrouped + ZSTD_VecMask_next(matches)) / groupWidth & rowMask; - uint matchIndex = dmsRow[matchPos]; - if (matchPos == 0) - continue; - if (matchIndex < dmsLowestIndex) - break; -#if NETCOREAPP3_0_OR_GREATER - if (Sse.IsSupported) - { - Sse.Prefetch0(dmsBase + matchIndex); - } -#endif + return ml; + } - matchBuffer[numMatches++] = matchIndex; - --nbAttempts; - } + /* Generate row search fns for each combination of (dictMode, mls, rowLog) */ + private static nuint ZSTD_RowFindBestMatch_noDict_4_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 4 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 4, + ZSTD_dictMode_e.ZSTD_noDict, + 4 + ); + } - for (; currMatch < numMatches; ++currMatch) - { - uint matchIndex = matchBuffer[currMatch]; - nuint currentMl = 0; - assert(matchIndex >= dmsLowestIndex); - assert(matchIndex < curr); - { - byte* match = dmsBase + matchIndex; - assert(match + 4 <= dmsEnd); - if (MEM_read32(match) == MEM_read32(ip)) - currentMl = - ZSTD_count_2segments( - ip + 4, - match + 4, - iLimit, - dmsEnd, - prefixStart - ) + 4; - } + private static nuint ZSTD_RowFindBestMatch_noDict_4_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 5 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 4, + ZSTD_dictMode_e.ZSTD_noDict, + 5 + ); + } - if (currentMl > ml) - { - ml = currentMl; - assert(curr > matchIndex + dmsIndexDelta); - assert(curr - (matchIndex + dmsIndexDelta) > 0); - *offsetPtr = curr - (matchIndex + dmsIndexDelta) + 3; - if (ip + currentMl == iLimit) - break; - } - } - } - } + private static nuint ZSTD_RowFindBestMatch_noDict_4_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 6 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 4, + ZSTD_dictMode_e.ZSTD_noDict, + 6 + ); + } - return ml; - } + private static nuint ZSTD_RowFindBestMatch_noDict_5_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 4 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 5, + ZSTD_dictMode_e.ZSTD_noDict, + 4 + ); + } - /* Generate row search fns for each combination of (dictMode, mls, rowLog) */ - private static nuint ZSTD_RowFindBestMatch_noDict_4_4( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 4 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 4 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 4, - ZSTD_dictMode_e.ZSTD_noDict, - 4 - ); - } + private static nuint ZSTD_RowFindBestMatch_noDict_5_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 5 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 5, + ZSTD_dictMode_e.ZSTD_noDict, + 5 + ); + } - private static nuint ZSTD_RowFindBestMatch_noDict_4_5( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 4 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 5 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 4, - ZSTD_dictMode_e.ZSTD_noDict, - 5 - ); - } + private static nuint ZSTD_RowFindBestMatch_noDict_5_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 6 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 5, + ZSTD_dictMode_e.ZSTD_noDict, + 6 + ); + } - private static nuint ZSTD_RowFindBestMatch_noDict_4_6( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 4 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 6 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 4, - ZSTD_dictMode_e.ZSTD_noDict, - 6 - ); - } + private static nuint ZSTD_RowFindBestMatch_noDict_6_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 4 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 6, + ZSTD_dictMode_e.ZSTD_noDict, + 4 + ); + } - private static nuint ZSTD_RowFindBestMatch_noDict_5_4( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 5 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 4 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 5, - ZSTD_dictMode_e.ZSTD_noDict, - 4 - ); - } + private static nuint ZSTD_RowFindBestMatch_noDict_6_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 5 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 6, + ZSTD_dictMode_e.ZSTD_noDict, + 5 + ); + } - private static nuint ZSTD_RowFindBestMatch_noDict_5_5( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 5 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 5 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 5, - ZSTD_dictMode_e.ZSTD_noDict, - 5 - ); - } + private static nuint ZSTD_RowFindBestMatch_noDict_6_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 6 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 6, + ZSTD_dictMode_e.ZSTD_noDict, + 6 + ); + } - private static nuint ZSTD_RowFindBestMatch_noDict_5_6( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 5 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 6 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 5, - ZSTD_dictMode_e.ZSTD_noDict, - 6 - ); - } - - private static nuint ZSTD_RowFindBestMatch_noDict_6_4( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 6 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 4 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 6, - ZSTD_dictMode_e.ZSTD_noDict, - 4 - ); - } - - private static nuint ZSTD_RowFindBestMatch_noDict_6_5( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 6 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 5 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 6, - ZSTD_dictMode_e.ZSTD_noDict, - 5 - ); - } - - private static nuint ZSTD_RowFindBestMatch_noDict_6_6( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 6 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 6 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 6, - ZSTD_dictMode_e.ZSTD_noDict, - 6 - ); - } - - private static nuint ZSTD_RowFindBestMatch_extDict_4_4( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 4 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 4 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 4, - ZSTD_dictMode_e.ZSTD_extDict, - 4 - ); - } - - private static nuint ZSTD_RowFindBestMatch_extDict_4_5( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 4 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 5 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 4, - ZSTD_dictMode_e.ZSTD_extDict, - 5 - ); - } + private static nuint ZSTD_RowFindBestMatch_extDict_4_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 4 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 4, + ZSTD_dictMode_e.ZSTD_extDict, + 4 + ); + } - private static nuint ZSTD_RowFindBestMatch_extDict_4_6( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 4 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 6 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 4, - ZSTD_dictMode_e.ZSTD_extDict, - 6 - ); - } + private static nuint ZSTD_RowFindBestMatch_extDict_4_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 5 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 4, + ZSTD_dictMode_e.ZSTD_extDict, + 5 + ); + } - private static nuint ZSTD_RowFindBestMatch_extDict_5_4( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 5 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 4 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 5, - ZSTD_dictMode_e.ZSTD_extDict, - 4 - ); - } + private static nuint ZSTD_RowFindBestMatch_extDict_4_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 6 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 4, + ZSTD_dictMode_e.ZSTD_extDict, + 6 + ); + } - private static nuint ZSTD_RowFindBestMatch_extDict_5_5( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 5 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 5 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 5, - ZSTD_dictMode_e.ZSTD_extDict, - 5 - ); - } + private static nuint ZSTD_RowFindBestMatch_extDict_5_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 4 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 5, + ZSTD_dictMode_e.ZSTD_extDict, + 4 + ); + } - private static nuint ZSTD_RowFindBestMatch_extDict_5_6( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 5 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 6 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 5, - ZSTD_dictMode_e.ZSTD_extDict, - 6 - ); - } + private static nuint ZSTD_RowFindBestMatch_extDict_5_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 5 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 5, + ZSTD_dictMode_e.ZSTD_extDict, + 5 + ); + } - private static nuint ZSTD_RowFindBestMatch_extDict_6_4( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 6 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 4 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 6, - ZSTD_dictMode_e.ZSTD_extDict, - 4 - ); - } + private static nuint ZSTD_RowFindBestMatch_extDict_5_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 6 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 5, + ZSTD_dictMode_e.ZSTD_extDict, + 6 + ); + } - private static nuint ZSTD_RowFindBestMatch_extDict_6_5( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 6 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 5 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 6, - ZSTD_dictMode_e.ZSTD_extDict, - 5 - ); - } + private static nuint ZSTD_RowFindBestMatch_extDict_6_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 4 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 6, + ZSTD_dictMode_e.ZSTD_extDict, + 4 + ); + } - private static nuint ZSTD_RowFindBestMatch_extDict_6_6( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 6 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 6 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 6, - ZSTD_dictMode_e.ZSTD_extDict, - 6 - ); - } + private static nuint ZSTD_RowFindBestMatch_extDict_6_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 5 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 6, + ZSTD_dictMode_e.ZSTD_extDict, + 5 + ); + } - private static nuint ZSTD_RowFindBestMatch_dictMatchState_4_4( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 4 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 4 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 4, - ZSTD_dictMode_e.ZSTD_dictMatchState, - 4 - ); - } + private static nuint ZSTD_RowFindBestMatch_extDict_6_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 6 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 6, + ZSTD_dictMode_e.ZSTD_extDict, + 6 + ); + } - private static nuint ZSTD_RowFindBestMatch_dictMatchState_4_5( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 4 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 5 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 4, - ZSTD_dictMode_e.ZSTD_dictMatchState, - 5 - ); - } + private static nuint ZSTD_RowFindBestMatch_dictMatchState_4_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 4 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 4, + ZSTD_dictMode_e.ZSTD_dictMatchState, + 4 + ); + } - private static nuint ZSTD_RowFindBestMatch_dictMatchState_4_6( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 4 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 6 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 4, - ZSTD_dictMode_e.ZSTD_dictMatchState, - 6 - ); - } + private static nuint ZSTD_RowFindBestMatch_dictMatchState_4_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 5 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 4, + ZSTD_dictMode_e.ZSTD_dictMatchState, + 5 + ); + } - private static nuint ZSTD_RowFindBestMatch_dictMatchState_5_4( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 5 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 4 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 5, - ZSTD_dictMode_e.ZSTD_dictMatchState, - 4 - ); - } + private static nuint ZSTD_RowFindBestMatch_dictMatchState_4_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 6 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 4, + ZSTD_dictMode_e.ZSTD_dictMatchState, + 6 + ); + } - private static nuint ZSTD_RowFindBestMatch_dictMatchState_5_5( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 5 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 5 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 5, - ZSTD_dictMode_e.ZSTD_dictMatchState, - 5 - ); - } + private static nuint ZSTD_RowFindBestMatch_dictMatchState_5_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 4 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 5, + ZSTD_dictMode_e.ZSTD_dictMatchState, + 4 + ); + } - private static nuint ZSTD_RowFindBestMatch_dictMatchState_5_6( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 5 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 6 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 5, - ZSTD_dictMode_e.ZSTD_dictMatchState, - 6 - ); - } + private static nuint ZSTD_RowFindBestMatch_dictMatchState_5_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 5 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 5, + ZSTD_dictMode_e.ZSTD_dictMatchState, + 5 + ); + } - private static nuint ZSTD_RowFindBestMatch_dictMatchState_6_4( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 6 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 4 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 6, - ZSTD_dictMode_e.ZSTD_dictMatchState, - 4 - ); - } + private static nuint ZSTD_RowFindBestMatch_dictMatchState_5_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 6 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 5, + ZSTD_dictMode_e.ZSTD_dictMatchState, + 6 + ); + } - private static nuint ZSTD_RowFindBestMatch_dictMatchState_6_5( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 6 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 5 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 6, - ZSTD_dictMode_e.ZSTD_dictMatchState, - 5 - ); - } + private static nuint ZSTD_RowFindBestMatch_dictMatchState_6_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 4 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 6, + ZSTD_dictMode_e.ZSTD_dictMatchState, + 4 + ); + } - private static nuint ZSTD_RowFindBestMatch_dictMatchState_6_6( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 6 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 6 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 6, - ZSTD_dictMode_e.ZSTD_dictMatchState, - 6 - ); - } + private static nuint ZSTD_RowFindBestMatch_dictMatchState_6_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 5 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 6, + ZSTD_dictMode_e.ZSTD_dictMatchState, + 5 + ); + } - private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_4_4( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 4 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 4 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 4, - ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, - 4 - ); - } + private static nuint ZSTD_RowFindBestMatch_dictMatchState_6_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 6 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 6, + ZSTD_dictMode_e.ZSTD_dictMatchState, + 6 + ); + } - private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_4_5( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 4 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 5 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 4, - ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, - 5 - ); - } + private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_4_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 4 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 4, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, + 4 + ); + } - private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_4_6( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 4 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 6 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 4, - ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, - 6 - ); - } + private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_4_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 5 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 4, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, + 5 + ); + } - private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_5_4( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 5 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 4 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 5, - ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, - 4 - ); - } + private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_4_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 6 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 4, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, + 6 + ); + } - private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_5_5( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 5 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 5 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 5, - ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, - 5 - ); - } + private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_5_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 4 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 5, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, + 4 + ); + } - private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_5_6( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 5 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 6 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 5, - ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, - 6 - ); - } + private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_5_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 5 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 5, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, + 5 + ); + } - private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_6_4( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 6 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 4 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 6, - ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, - 4 - ); - } + private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_5_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 6 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 5, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, + 6 + ); + } - private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_6_5( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 6 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 5 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 6, - ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, - 5 - ); - } + private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_6_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 4 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 6, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, + 4 + ); + } - private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_6_6( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 6 - ); - assert( - ( - 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 - : 6 < ms->cParams.searchLog ? 6 - : ms->cParams.searchLog - ) == 6 - ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 6, - ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, - 6 - ); - } + private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_6_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 5 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 6, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, + 5 + ); + } - /* Generate binary Tree search fns for each combination of (dictMode, mls) */ - private static nuint ZSTD_BtFindBestMatch_noDict_4( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offBasePtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 4 - ); - return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, 4, ZSTD_dictMode_e.ZSTD_noDict); - } + private static nuint ZSTD_RowFindBestMatch_dedicatedDictSearch_6_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + assert( + ( + 4 > (6 < ms->cParams.searchLog ? 6 : ms->cParams.searchLog) ? 4 + : 6 < ms->cParams.searchLog ? 6 + : ms->cParams.searchLog + ) == 6 + ); + return ZSTD_RowFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 6, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch, + 6 + ); + } - private static nuint ZSTD_BtFindBestMatch_noDict_5( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offBasePtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 5 - ); - return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, 5, ZSTD_dictMode_e.ZSTD_noDict); - } + /* Generate binary Tree search fns for each combination of (dictMode, mls) */ + private static nuint ZSTD_BtFindBestMatch_noDict_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offBasePtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, 4, ZSTD_dictMode_e.ZSTD_noDict); + } - private static nuint ZSTD_BtFindBestMatch_noDict_6( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offBasePtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 6 - ); - return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, 6, ZSTD_dictMode_e.ZSTD_noDict); - } + private static nuint ZSTD_BtFindBestMatch_noDict_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offBasePtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, 5, ZSTD_dictMode_e.ZSTD_noDict); + } - private static nuint ZSTD_BtFindBestMatch_extDict_4( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offBasePtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 4 - ); - return ZSTD_BtFindBestMatch( - ms, - ip, - iLimit, - offBasePtr, - 4, - ZSTD_dictMode_e.ZSTD_extDict - ); - } + private static nuint ZSTD_BtFindBestMatch_noDict_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offBasePtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, 6, ZSTD_dictMode_e.ZSTD_noDict); + } - private static nuint ZSTD_BtFindBestMatch_extDict_5( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offBasePtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 5 - ); - return ZSTD_BtFindBestMatch( - ms, - ip, - iLimit, - offBasePtr, - 5, - ZSTD_dictMode_e.ZSTD_extDict - ); - } + private static nuint ZSTD_BtFindBestMatch_extDict_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offBasePtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + return ZSTD_BtFindBestMatch( + ms, + ip, + iLimit, + offBasePtr, + 4, + ZSTD_dictMode_e.ZSTD_extDict + ); + } - private static nuint ZSTD_BtFindBestMatch_extDict_6( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offBasePtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 6 - ); - return ZSTD_BtFindBestMatch( - ms, - ip, - iLimit, - offBasePtr, - 6, - ZSTD_dictMode_e.ZSTD_extDict - ); - } + private static nuint ZSTD_BtFindBestMatch_extDict_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offBasePtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + return ZSTD_BtFindBestMatch( + ms, + ip, + iLimit, + offBasePtr, + 5, + ZSTD_dictMode_e.ZSTD_extDict + ); + } - private static nuint ZSTD_BtFindBestMatch_dictMatchState_4( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offBasePtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 4 - ); - return ZSTD_BtFindBestMatch( - ms, - ip, - iLimit, - offBasePtr, - 4, - ZSTD_dictMode_e.ZSTD_dictMatchState - ); - } + private static nuint ZSTD_BtFindBestMatch_extDict_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offBasePtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + return ZSTD_BtFindBestMatch( + ms, + ip, + iLimit, + offBasePtr, + 6, + ZSTD_dictMode_e.ZSTD_extDict + ); + } - private static nuint ZSTD_BtFindBestMatch_dictMatchState_5( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offBasePtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 5 - ); - return ZSTD_BtFindBestMatch( - ms, - ip, - iLimit, - offBasePtr, - 5, - ZSTD_dictMode_e.ZSTD_dictMatchState - ); - } + private static nuint ZSTD_BtFindBestMatch_dictMatchState_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offBasePtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + return ZSTD_BtFindBestMatch( + ms, + ip, + iLimit, + offBasePtr, + 4, + ZSTD_dictMode_e.ZSTD_dictMatchState + ); + } - private static nuint ZSTD_BtFindBestMatch_dictMatchState_6( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offBasePtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 6 - ); - return ZSTD_BtFindBestMatch( - ms, - ip, - iLimit, - offBasePtr, - 6, - ZSTD_dictMode_e.ZSTD_dictMatchState - ); - } + private static nuint ZSTD_BtFindBestMatch_dictMatchState_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offBasePtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + return ZSTD_BtFindBestMatch( + ms, + ip, + iLimit, + offBasePtr, + 5, + ZSTD_dictMode_e.ZSTD_dictMatchState + ); + } - private static nuint ZSTD_BtFindBestMatch_dedicatedDictSearch_4( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offBasePtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 4 - ); - return ZSTD_BtFindBestMatch( - ms, - ip, - iLimit, - offBasePtr, - 4, - ZSTD_dictMode_e.ZSTD_dedicatedDictSearch - ); - } + private static nuint ZSTD_BtFindBestMatch_dictMatchState_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offBasePtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + return ZSTD_BtFindBestMatch( + ms, + ip, + iLimit, + offBasePtr, + 6, + ZSTD_dictMode_e.ZSTD_dictMatchState + ); + } - private static nuint ZSTD_BtFindBestMatch_dedicatedDictSearch_5( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offBasePtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 5 - ); - return ZSTD_BtFindBestMatch( - ms, - ip, - iLimit, - offBasePtr, - 5, - ZSTD_dictMode_e.ZSTD_dedicatedDictSearch - ); - } + private static nuint ZSTD_BtFindBestMatch_dedicatedDictSearch_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offBasePtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + return ZSTD_BtFindBestMatch( + ms, + ip, + iLimit, + offBasePtr, + 4, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch + ); + } - private static nuint ZSTD_BtFindBestMatch_dedicatedDictSearch_6( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offBasePtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 6 - ); - return ZSTD_BtFindBestMatch( - ms, - ip, - iLimit, - offBasePtr, - 6, - ZSTD_dictMode_e.ZSTD_dedicatedDictSearch - ); - } + private static nuint ZSTD_BtFindBestMatch_dedicatedDictSearch_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offBasePtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + return ZSTD_BtFindBestMatch( + ms, + ip, + iLimit, + offBasePtr, + 5, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch + ); + } - /* Generate hash chain search fns for each combination of (dictMode, mls) */ - private static nuint ZSTD_HcFindBestMatch_noDict_4( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 4 - ); - return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_noDict); - } + private static nuint ZSTD_BtFindBestMatch_dedicatedDictSearch_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offBasePtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + return ZSTD_BtFindBestMatch( + ms, + ip, + iLimit, + offBasePtr, + 6, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch + ); + } - private static nuint ZSTD_HcFindBestMatch_noDict_5( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 5 - ); - return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_noDict); - } + /* Generate hash chain search fns for each combination of (dictMode, mls) */ + private static nuint ZSTD_HcFindBestMatch_noDict_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_noDict); + } - private static nuint ZSTD_HcFindBestMatch_noDict_6( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 6 - ); - return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_noDict); - } + private static nuint ZSTD_HcFindBestMatch_noDict_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_noDict); + } - private static nuint ZSTD_HcFindBestMatch_extDict_4( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 4 - ); - return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_extDict); - } + private static nuint ZSTD_HcFindBestMatch_noDict_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_noDict); + } - private static nuint ZSTD_HcFindBestMatch_extDict_5( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 5 - ); - return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_extDict); - } + private static nuint ZSTD_HcFindBestMatch_extDict_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_extDict); + } - private static nuint ZSTD_HcFindBestMatch_extDict_6( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 6 - ); - return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_extDict); - } + private static nuint ZSTD_HcFindBestMatch_extDict_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_extDict); + } - private static nuint ZSTD_HcFindBestMatch_dictMatchState_4( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 4 - ); - return ZSTD_HcFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 4, - ZSTD_dictMode_e.ZSTD_dictMatchState - ); - } + private static nuint ZSTD_HcFindBestMatch_extDict_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_extDict); + } - private static nuint ZSTD_HcFindBestMatch_dictMatchState_5( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 5 - ); - return ZSTD_HcFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 5, - ZSTD_dictMode_e.ZSTD_dictMatchState - ); - } + private static nuint ZSTD_HcFindBestMatch_dictMatchState_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + return ZSTD_HcFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 4, + ZSTD_dictMode_e.ZSTD_dictMatchState + ); + } - private static nuint ZSTD_HcFindBestMatch_dictMatchState_6( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 6 - ); - return ZSTD_HcFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 6, - ZSTD_dictMode_e.ZSTD_dictMatchState - ); - } + private static nuint ZSTD_HcFindBestMatch_dictMatchState_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + return ZSTD_HcFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 5, + ZSTD_dictMode_e.ZSTD_dictMatchState + ); + } - private static nuint ZSTD_HcFindBestMatch_dedicatedDictSearch_4( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 4 - ); - return ZSTD_HcFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 4, - ZSTD_dictMode_e.ZSTD_dedicatedDictSearch - ); - } + private static nuint ZSTD_HcFindBestMatch_dictMatchState_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + return ZSTD_HcFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 6, + ZSTD_dictMode_e.ZSTD_dictMatchState + ); + } - private static nuint ZSTD_HcFindBestMatch_dedicatedDictSearch_5( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 5 - ); - return ZSTD_HcFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 5, - ZSTD_dictMode_e.ZSTD_dedicatedDictSearch - ); - } + private static nuint ZSTD_HcFindBestMatch_dedicatedDictSearch_4( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 4 + ); + return ZSTD_HcFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 4, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch + ); + } - private static nuint ZSTD_HcFindBestMatch_dedicatedDictSearch_6( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iLimit, - nuint* offsetPtr - ) - { - assert( - ( - 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 - : 6 < ms->cParams.minMatch ? 6 - : ms->cParams.minMatch - ) == 6 - ); - return ZSTD_HcFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 6, - ZSTD_dictMode_e.ZSTD_dedicatedDictSearch - ); - } + private static nuint ZSTD_HcFindBestMatch_dedicatedDictSearch_5( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 5 + ); + return ZSTD_HcFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 5, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch + ); + } - /** - * Searches for the longest match at @p ip. - * Dispatches to the correct implementation function based on the - * (searchMethod, dictMode, mls, rowLog). We use switch statements - * here instead of using an indirect function call through a function - * pointer because after Spectre and Meltdown mitigations, indirect - * function calls can be very costly, especially in the kernel. - * - * NOTE: dictMode and searchMethod should be templated, so those switch - * statements should be optimized out. Only the mls & rowLog switches - * should be left. - * - * @param ms The match state. - * @param ip The position to search at. - * @param iend The end of the input data. - * @param[out] offsetPtr Stores the match offset into this pointer. - * @param mls The minimum search length, in the range [4, 6]. - * @param rowLog The row log (if applicable), in the range [4, 6]. - * @param searchMethod The search method to use (templated). - * @param dictMode The dictMode (templated). - * - * @returns The length of the longest match found, or < mls if no match is found. - * If a match is found its offset is stored in @p offsetPtr. - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_searchMax( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iend, - nuint* offsetPtr, - uint mls, - uint rowLog, - searchMethod_e searchMethod, - ZSTD_dictMode_e dictMode - ) + private static nuint ZSTD_HcFindBestMatch_dedicatedDictSearch_6( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iLimit, + nuint* offsetPtr + ) + { + assert( + ( + 4 > (6 < ms->cParams.minMatch ? 6 : ms->cParams.minMatch) ? 4 + : 6 < ms->cParams.minMatch ? 6 + : ms->cParams.minMatch + ) == 6 + ); + return ZSTD_HcFindBestMatch( + ms, + ip, + iLimit, + offsetPtr, + 6, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch + ); + } + + /** + * Searches for the longest match at @p ip. + * Dispatches to the correct implementation function based on the + * (searchMethod, dictMode, mls, rowLog). We use switch statements + * here instead of using an indirect function call through a function + * pointer because after Spectre and Meltdown mitigations, indirect + * function calls can be very costly, especially in the kernel. + * + * NOTE: dictMode and searchMethod should be templated, so those switch + * statements should be optimized out. Only the mls & rowLog switches + * should be left. + * + * @param ms The match state. + * @param ip The position to search at. + * @param iend The end of the input data. + * @param[out] offsetPtr Stores the match offset into this pointer. + * @param mls The minimum search length, in the range [4, 6]. + * @param rowLog The row log (if applicable), in the range [4, 6]. + * @param searchMethod The search method to use (templated). + * @param dictMode The dictMode (templated). + * + * @returns The length of the longest match found, or < mls if no match is found. + * If a match is found its offset is stored in @p offsetPtr. + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_searchMax( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iend, + nuint* offsetPtr, + uint mls, + uint rowLog, + searchMethod_e searchMethod, + ZSTD_dictMode_e dictMode + ) + { + if (dictMode == ZSTD_dictMode_e.ZSTD_noDict) { - if (dictMode == ZSTD_dictMode_e.ZSTD_noDict) + if (searchMethod == searchMethod_e.search_rowHash) { - if (searchMethod == searchMethod_e.search_rowHash) + if (mls == 4) { - if (mls == 4) - { - if (rowLog == 4) - return ZSTD_RowFindBestMatch_noDict_4_4(ms, ip, iend, offsetPtr); - return rowLog == 5 - ? ZSTD_RowFindBestMatch_noDict_4_5(ms, ip, iend, offsetPtr) - : ZSTD_RowFindBestMatch_noDict_4_6(ms, ip, iend, offsetPtr); - } - - if (mls == 5) - { - if (rowLog == 4) - return ZSTD_RowFindBestMatch_noDict_5_4(ms, ip, iend, offsetPtr); - return rowLog == 5 - ? ZSTD_RowFindBestMatch_noDict_5_5(ms, ip, iend, offsetPtr) - : ZSTD_RowFindBestMatch_noDict_5_6(ms, ip, iend, offsetPtr); - } - if (rowLog == 4) - return ZSTD_RowFindBestMatch_noDict_6_4(ms, ip, iend, offsetPtr); + return ZSTD_RowFindBestMatch_noDict_4_4(ms, ip, iend, offsetPtr); return rowLog == 5 - ? ZSTD_RowFindBestMatch_noDict_6_5(ms, ip, iend, offsetPtr) - : ZSTD_RowFindBestMatch_noDict_6_6(ms, ip, iend, offsetPtr); + ? ZSTD_RowFindBestMatch_noDict_4_5(ms, ip, iend, offsetPtr) + : ZSTD_RowFindBestMatch_noDict_4_6(ms, ip, iend, offsetPtr); } - if (searchMethod == searchMethod_e.search_hashChain) + if (mls == 5) { - if (mls == 4) - return ZSTD_HcFindBestMatch_noDict_4(ms, ip, iend, offsetPtr); - return mls == 5 - ? ZSTD_HcFindBestMatch_noDict_5(ms, ip, iend, offsetPtr) - : ZSTD_HcFindBestMatch_noDict_6(ms, ip, iend, offsetPtr); + if (rowLog == 4) + return ZSTD_RowFindBestMatch_noDict_5_4(ms, ip, iend, offsetPtr); + return rowLog == 5 + ? ZSTD_RowFindBestMatch_noDict_5_5(ms, ip, iend, offsetPtr) + : ZSTD_RowFindBestMatch_noDict_5_6(ms, ip, iend, offsetPtr); } - // searchMethod_e.search_binaryTree - if (mls == 4) - return ZSTD_BtFindBestMatch_noDict_4(ms, ip, iend, offsetPtr); - return mls == 5 - ? ZSTD_BtFindBestMatch_noDict_5(ms, ip, iend, offsetPtr) - : ZSTD_BtFindBestMatch_noDict_6(ms, ip, iend, offsetPtr); + if (rowLog == 4) + return ZSTD_RowFindBestMatch_noDict_6_4(ms, ip, iend, offsetPtr); + return rowLog == 5 + ? ZSTD_RowFindBestMatch_noDict_6_5(ms, ip, iend, offsetPtr) + : ZSTD_RowFindBestMatch_noDict_6_6(ms, ip, iend, offsetPtr); } - if (dictMode == ZSTD_dictMode_e.ZSTD_extDict) + if (searchMethod == searchMethod_e.search_hashChain) { - if (searchMethod == searchMethod_e.search_rowHash) - { - if (mls == 4) - { - if (rowLog == 4) - return ZSTD_RowFindBestMatch_extDict_4_4(ms, ip, iend, offsetPtr); - if (rowLog == 5) - return ZSTD_RowFindBestMatch_extDict_4_5(ms, ip, iend, offsetPtr); - return ZSTD_RowFindBestMatch_extDict_4_6(ms, ip, iend, offsetPtr); - } - - if (mls == 5) - { - if (rowLog == 4) - return ZSTD_RowFindBestMatch_extDict_5_4(ms, ip, iend, offsetPtr); - if (rowLog == 5) - return ZSTD_RowFindBestMatch_extDict_5_5(ms, ip, iend, offsetPtr); - return ZSTD_RowFindBestMatch_extDict_5_6(ms, ip, iend, offsetPtr); - } + if (mls == 4) + return ZSTD_HcFindBestMatch_noDict_4(ms, ip, iend, offsetPtr); + return mls == 5 + ? ZSTD_HcFindBestMatch_noDict_5(ms, ip, iend, offsetPtr) + : ZSTD_HcFindBestMatch_noDict_6(ms, ip, iend, offsetPtr); + } - if (mls == 6) - { - if (rowLog == 4) - return ZSTD_RowFindBestMatch_extDict_6_4(ms, ip, iend, offsetPtr); - if (rowLog == 5) - return ZSTD_RowFindBestMatch_extDict_6_5(ms, ip, iend, offsetPtr); - return ZSTD_RowFindBestMatch_extDict_6_6(ms, ip, iend, offsetPtr); - } - } + // searchMethod_e.search_binaryTree + if (mls == 4) + return ZSTD_BtFindBestMatch_noDict_4(ms, ip, iend, offsetPtr); + return mls == 5 + ? ZSTD_BtFindBestMatch_noDict_5(ms, ip, iend, offsetPtr) + : ZSTD_BtFindBestMatch_noDict_6(ms, ip, iend, offsetPtr); + } - if (searchMethod == searchMethod_e.search_hashChain) + if (dictMode == ZSTD_dictMode_e.ZSTD_extDict) + { + if (searchMethod == searchMethod_e.search_rowHash) + { + if (mls == 4) { - if (mls == 4) - return ZSTD_HcFindBestMatch_extDict_4(ms, ip, iend, offsetPtr); - if (mls == 5) - return ZSTD_HcFindBestMatch_extDict_5(ms, ip, iend, offsetPtr); - return ZSTD_HcFindBestMatch_extDict_6(ms, ip, iend, offsetPtr); + if (rowLog == 4) + return ZSTD_RowFindBestMatch_extDict_4_4(ms, ip, iend, offsetPtr); + if (rowLog == 5) + return ZSTD_RowFindBestMatch_extDict_4_5(ms, ip, iend, offsetPtr); + return ZSTD_RowFindBestMatch_extDict_4_6(ms, ip, iend, offsetPtr); } - // searchMethod_e.search_binaryTree - if (mls == 4) - return ZSTD_BtFindBestMatch_extDict_4(ms, ip, iend, offsetPtr); if (mls == 5) - return ZSTD_BtFindBestMatch_extDict_5(ms, ip, iend, offsetPtr); - return ZSTD_BtFindBestMatch_extDict_6(ms, ip, iend, offsetPtr); - } - - if (dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState) - { - if (searchMethod == searchMethod_e.search_rowHash) { - if (mls == 4) - { - if (rowLog == 4) - return ZSTD_RowFindBestMatch_dictMatchState_4_4( - ms, - ip, - iend, - offsetPtr - ); - if (rowLog == 5) - return ZSTD_RowFindBestMatch_dictMatchState_4_5( - ms, - ip, - iend, - offsetPtr - ); - return ZSTD_RowFindBestMatch_dictMatchState_4_6(ms, ip, iend, offsetPtr); - } - - if (mls == 5) - { - if (rowLog == 4) - return ZSTD_RowFindBestMatch_dictMatchState_5_4( - ms, - ip, - iend, - offsetPtr - ); - if (rowLog == 5) - return ZSTD_RowFindBestMatch_dictMatchState_5_5( - ms, - ip, - iend, - offsetPtr - ); - return ZSTD_RowFindBestMatch_dictMatchState_5_6(ms, ip, iend, offsetPtr); - } - - if (mls == 6) - { - if (rowLog == 4) - return ZSTD_RowFindBestMatch_dictMatchState_6_4( - ms, - ip, - iend, - offsetPtr - ); - if (rowLog == 5) - return ZSTD_RowFindBestMatch_dictMatchState_6_5( - ms, - ip, - iend, - offsetPtr - ); - return ZSTD_RowFindBestMatch_dictMatchState_6_6(ms, ip, iend, offsetPtr); - } + if (rowLog == 4) + return ZSTD_RowFindBestMatch_extDict_5_4(ms, ip, iend, offsetPtr); + if (rowLog == 5) + return ZSTD_RowFindBestMatch_extDict_5_5(ms, ip, iend, offsetPtr); + return ZSTD_RowFindBestMatch_extDict_5_6(ms, ip, iend, offsetPtr); } - if (searchMethod == searchMethod_e.search_hashChain) + if (mls == 6) { - if (mls == 4) - return ZSTD_HcFindBestMatch_dictMatchState_4(ms, ip, iend, offsetPtr); - if (mls == 5) - return ZSTD_HcFindBestMatch_dictMatchState_5(ms, ip, iend, offsetPtr); - return ZSTD_HcFindBestMatch_dictMatchState_6(ms, ip, iend, offsetPtr); + if (rowLog == 4) + return ZSTD_RowFindBestMatch_extDict_6_4(ms, ip, iend, offsetPtr); + if (rowLog == 5) + return ZSTD_RowFindBestMatch_extDict_6_5(ms, ip, iend, offsetPtr); + return ZSTD_RowFindBestMatch_extDict_6_6(ms, ip, iend, offsetPtr); } + } - // search_binaryTree + if (searchMethod == searchMethod_e.search_hashChain) + { if (mls == 4) - return ZSTD_BtFindBestMatch_dictMatchState_4(ms, ip, iend, offsetPtr); + return ZSTD_HcFindBestMatch_extDict_4(ms, ip, iend, offsetPtr); if (mls == 5) - return ZSTD_BtFindBestMatch_dictMatchState_5(ms, ip, iend, offsetPtr); - return ZSTD_BtFindBestMatch_dictMatchState_6(ms, ip, iend, offsetPtr); + return ZSTD_HcFindBestMatch_extDict_5(ms, ip, iend, offsetPtr); + return ZSTD_HcFindBestMatch_extDict_6(ms, ip, iend, offsetPtr); } + // searchMethod_e.search_binaryTree + if (mls == 4) + return ZSTD_BtFindBestMatch_extDict_4(ms, ip, iend, offsetPtr); + if (mls == 5) + return ZSTD_BtFindBestMatch_extDict_5(ms, ip, iend, offsetPtr); + return ZSTD_BtFindBestMatch_extDict_6(ms, ip, iend, offsetPtr); + } + + if (dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState) + { if (searchMethod == searchMethod_e.search_rowHash) { if (mls == 4) { if (rowLog == 4) - return ZSTD_RowFindBestMatch_dedicatedDictSearch_4_4( + return ZSTD_RowFindBestMatch_dictMatchState_4_4( ms, ip, iend, offsetPtr ); if (rowLog == 5) - return ZSTD_RowFindBestMatch_dedicatedDictSearch_4_5( + return ZSTD_RowFindBestMatch_dictMatchState_4_5( ms, ip, iend, offsetPtr ); - return ZSTD_RowFindBestMatch_dedicatedDictSearch_4_6(ms, ip, iend, offsetPtr); + return ZSTD_RowFindBestMatch_dictMatchState_4_6(ms, ip, iend, offsetPtr); } if (mls == 5) { if (rowLog == 4) - return ZSTD_RowFindBestMatch_dedicatedDictSearch_5_4( + return ZSTD_RowFindBestMatch_dictMatchState_5_4( ms, ip, iend, offsetPtr ); if (rowLog == 5) - return ZSTD_RowFindBestMatch_dedicatedDictSearch_5_5( + return ZSTD_RowFindBestMatch_dictMatchState_5_5( ms, ip, iend, offsetPtr ); - return ZSTD_RowFindBestMatch_dedicatedDictSearch_5_6(ms, ip, iend, offsetPtr); + return ZSTD_RowFindBestMatch_dictMatchState_5_6(ms, ip, iend, offsetPtr); } if (mls == 6) { if (rowLog == 4) - return ZSTD_RowFindBestMatch_dedicatedDictSearch_6_4( + return ZSTD_RowFindBestMatch_dictMatchState_6_4( ms, ip, iend, offsetPtr ); if (rowLog == 5) - return ZSTD_RowFindBestMatch_dedicatedDictSearch_6_5( + return ZSTD_RowFindBestMatch_dictMatchState_6_5( ms, ip, iend, offsetPtr ); - return ZSTD_RowFindBestMatch_dedicatedDictSearch_6_6(ms, ip, iend, offsetPtr); + return ZSTD_RowFindBestMatch_dictMatchState_6_6(ms, ip, iend, offsetPtr); } } if (searchMethod == searchMethod_e.search_hashChain) { if (mls == 4) - return ZSTD_HcFindBestMatch_dedicatedDictSearch_4(ms, ip, iend, offsetPtr); + return ZSTD_HcFindBestMatch_dictMatchState_4(ms, ip, iend, offsetPtr); if (mls == 5) - return ZSTD_HcFindBestMatch_dedicatedDictSearch_5(ms, ip, iend, offsetPtr); - return ZSTD_HcFindBestMatch_dedicatedDictSearch_6(ms, ip, iend, offsetPtr); + return ZSTD_HcFindBestMatch_dictMatchState_5(ms, ip, iend, offsetPtr); + return ZSTD_HcFindBestMatch_dictMatchState_6(ms, ip, iend, offsetPtr); } - // searchMethod_e.search_binaryTree + // search_binaryTree if (mls == 4) - return ZSTD_BtFindBestMatch_dedicatedDictSearch_4(ms, ip, iend, offsetPtr); + return ZSTD_BtFindBestMatch_dictMatchState_4(ms, ip, iend, offsetPtr); if (mls == 5) - return ZSTD_BtFindBestMatch_dedicatedDictSearch_5(ms, ip, iend, offsetPtr); - return ZSTD_BtFindBestMatch_dedicatedDictSearch_6(ms, ip, iend, offsetPtr); + return ZSTD_BtFindBestMatch_dictMatchState_5(ms, ip, iend, offsetPtr); + return ZSTD_BtFindBestMatch_dictMatchState_6(ms, ip, iend, offsetPtr); } - /* ******************************* - * Common parser - lazy strategy - *********************************/ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_compressBlock_lazy_generic( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize, - searchMethod_e searchMethod, - uint depth, - ZSTD_dictMode_e dictMode - ) + if (searchMethod == searchMethod_e.search_rowHash) { - byte* istart = (byte*)src; - byte* ip = istart; - byte* anchor = istart; - byte* iend = istart + srcSize; - byte* ilimit = searchMethod == searchMethod_e.search_rowHash ? iend - 8 - 8 : iend - 8; - byte* @base = ms->window.@base; - uint prefixLowestIndex = ms->window.dictLimit; - byte* prefixLowest = @base + prefixLowestIndex; - uint mls = - ms->cParams.minMatch <= 4 ? 4 - : ms->cParams.minMatch <= 6 ? ms->cParams.minMatch - : 6; - uint rowLog = - ms->cParams.searchLog <= 4 ? 4 - : ms->cParams.searchLog <= 6 ? ms->cParams.searchLog - : 6; - uint offset_1 = rep[0], - offset_2 = rep[1]; - uint offsetSaved1 = 0, - offsetSaved2 = 0; - int isDMS = dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? 1 : 0; - int isDDS = dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch ? 1 : 0; - int isDxS = isDMS != 0 || isDDS != 0 ? 1 : 0; - ZSTD_MatchState_t* dms = ms->dictMatchState; - uint dictLowestIndex = isDxS != 0 ? dms->window.dictLimit : 0; - byte* dictBase = isDxS != 0 ? dms->window.@base : null; - byte* dictLowest = isDxS != 0 ? dictBase + dictLowestIndex : null; - byte* dictEnd = isDxS != 0 ? dms->window.nextSrc : null; - uint dictIndexDelta = isDxS != 0 ? prefixLowestIndex - (uint)(dictEnd - dictBase) : 0; - uint dictAndPrefixLength = (uint)(ip - prefixLowest + (dictEnd - dictLowest)); - ip += dictAndPrefixLength == 0 ? 1 : 0; - if (dictMode == ZSTD_dictMode_e.ZSTD_noDict) + if (mls == 4) { - uint curr = (uint)(ip - @base); - uint windowLow = ZSTD_getLowestPrefixIndex(ms, curr, ms->cParams.windowLog); - uint maxRep = curr - windowLow; - if (offset_2 > maxRep) - { - offsetSaved2 = offset_2; - offset_2 = 0; - } + if (rowLog == 4) + return ZSTD_RowFindBestMatch_dedicatedDictSearch_4_4( + ms, + ip, + iend, + offsetPtr + ); + if (rowLog == 5) + return ZSTD_RowFindBestMatch_dedicatedDictSearch_4_5( + ms, + ip, + iend, + offsetPtr + ); + return ZSTD_RowFindBestMatch_dedicatedDictSearch_4_6(ms, ip, iend, offsetPtr); + } - if (offset_1 > maxRep) - { - offsetSaved1 = offset_1; - offset_1 = 0; - } + if (mls == 5) + { + if (rowLog == 4) + return ZSTD_RowFindBestMatch_dedicatedDictSearch_5_4( + ms, + ip, + iend, + offsetPtr + ); + if (rowLog == 5) + return ZSTD_RowFindBestMatch_dedicatedDictSearch_5_5( + ms, + ip, + iend, + offsetPtr + ); + return ZSTD_RowFindBestMatch_dedicatedDictSearch_5_6(ms, ip, iend, offsetPtr); } -#if DEBUG - if (isDxS != 0) + if (mls == 6) + { + if (rowLog == 4) + return ZSTD_RowFindBestMatch_dedicatedDictSearch_6_4( + ms, + ip, + iend, + offsetPtr + ); + if (rowLog == 5) + return ZSTD_RowFindBestMatch_dedicatedDictSearch_6_5( + ms, + ip, + iend, + offsetPtr + ); + return ZSTD_RowFindBestMatch_dedicatedDictSearch_6_6(ms, ip, iend, offsetPtr); + } + } + + if (searchMethod == searchMethod_e.search_hashChain) + { + if (mls == 4) + return ZSTD_HcFindBestMatch_dedicatedDictSearch_4(ms, ip, iend, offsetPtr); + if (mls == 5) + return ZSTD_HcFindBestMatch_dedicatedDictSearch_5(ms, ip, iend, offsetPtr); + return ZSTD_HcFindBestMatch_dedicatedDictSearch_6(ms, ip, iend, offsetPtr); + } + + // searchMethod_e.search_binaryTree + if (mls == 4) + return ZSTD_BtFindBestMatch_dedicatedDictSearch_4(ms, ip, iend, offsetPtr); + if (mls == 5) + return ZSTD_BtFindBestMatch_dedicatedDictSearch_5(ms, ip, iend, offsetPtr); + return ZSTD_BtFindBestMatch_dedicatedDictSearch_6(ms, ip, iend, offsetPtr); + } + + /* ******************************* + * Common parser - lazy strategy + *********************************/ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_compressBlock_lazy_generic( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize, + searchMethod_e searchMethod, + uint depth, + ZSTD_dictMode_e dictMode + ) + { + byte* istart = (byte*)src; + byte* ip = istart; + byte* anchor = istart; + byte* iend = istart + srcSize; + byte* ilimit = searchMethod == searchMethod_e.search_rowHash ? iend - 8 - 8 : iend - 8; + byte* @base = ms->window.@base; + uint prefixLowestIndex = ms->window.dictLimit; + byte* prefixLowest = @base + prefixLowestIndex; + uint mls = + ms->cParams.minMatch <= 4 ? 4 + : ms->cParams.minMatch <= 6 ? ms->cParams.minMatch + : 6; + uint rowLog = + ms->cParams.searchLog <= 4 ? 4 + : ms->cParams.searchLog <= 6 ? ms->cParams.searchLog + : 6; + uint offset_1 = rep[0], + offset_2 = rep[1]; + uint offsetSaved1 = 0, + offsetSaved2 = 0; + int isDMS = dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? 1 : 0; + int isDDS = dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch ? 1 : 0; + int isDxS = isDMS != 0 || isDDS != 0 ? 1 : 0; + ZSTD_MatchState_t* dms = ms->dictMatchState; + uint dictLowestIndex = isDxS != 0 ? dms->window.dictLimit : 0; + byte* dictBase = isDxS != 0 ? dms->window.@base : null; + byte* dictLowest = isDxS != 0 ? dictBase + dictLowestIndex : null; + byte* dictEnd = isDxS != 0 ? dms->window.nextSrc : null; + uint dictIndexDelta = isDxS != 0 ? prefixLowestIndex - (uint)(dictEnd - dictBase) : 0; + uint dictAndPrefixLength = (uint)(ip - prefixLowest + (dictEnd - dictLowest)); + ip += dictAndPrefixLength == 0 ? 1 : 0; + if (dictMode == ZSTD_dictMode_e.ZSTD_noDict) + { + uint curr = (uint)(ip - @base); + uint windowLow = ZSTD_getLowestPrefixIndex(ms, curr, ms->cParams.windowLog); + uint maxRep = curr - windowLow; + if (offset_2 > maxRep) { - assert(offset_1 <= dictAndPrefixLength); - assert(offset_2 <= dictAndPrefixLength); + offsetSaved2 = offset_2; + offset_2 = 0; } -#endif - ms->lazySkipping = 0; - if (searchMethod == searchMethod_e.search_rowHash) + if (offset_1 > maxRep) { - ZSTD_row_fillHashCache(ms, @base, rowLog, mls, ms->nextToUpdate, ilimit); + offsetSaved1 = offset_1; + offset_1 = 0; } + } + +#if DEBUG + if (isDxS != 0) + { + assert(offset_1 <= dictAndPrefixLength); + assert(offset_2 <= dictAndPrefixLength); + } +#endif - while (ip < ilimit) + ms->lazySkipping = 0; + if (searchMethod == searchMethod_e.search_rowHash) + { + ZSTD_row_fillHashCache(ms, @base, rowLog, mls, ms->nextToUpdate, ilimit); + } + + while (ip < ilimit) + { + nuint matchLength = 0; + assert(1 >= 1); + assert(1 <= 3); + nuint offBase = 1; + byte* start = ip + 1; + if (isDxS != 0) { - nuint matchLength = 0; - assert(1 >= 1); - assert(1 <= 3); - nuint offBase = 1; - byte* start = ip + 1; - if (isDxS != 0) - { - uint repIndex = (uint)(ip - @base) + 1 - offset_1; - byte* repMatch = - ( - dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState - || dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch - ) - && repIndex < prefixLowestIndex - ? dictBase + (repIndex - dictIndexDelta) - : @base + repIndex; - if ( - ZSTD_index_overlap_check(prefixLowestIndex, repIndex) != 0 - && MEM_read32(repMatch) == MEM_read32(ip + 1) + uint repIndex = (uint)(ip - @base) + 1 - offset_1; + byte* repMatch = + ( + dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState + || dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch ) - { - byte* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; - matchLength = - ZSTD_count_2segments( - ip + 1 + 4, - repMatch + 4, - iend, - repMatchEnd, - prefixLowest - ) + 4; - if (depth == 0) - goto _storeSequence; - } - } - + && repIndex < prefixLowestIndex + ? dictBase + (repIndex - dictIndexDelta) + : @base + repIndex; if ( - dictMode == ZSTD_dictMode_e.ZSTD_noDict - && offset_1 > 0 - && MEM_read32(ip + 1 - offset_1) == MEM_read32(ip + 1) + ZSTD_index_overlap_check(prefixLowestIndex, repIndex) != 0 + && MEM_read32(repMatch) == MEM_read32(ip + 1) ) { - matchLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4; - if (depth == 0) - goto _storeSequence; + byte* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; + matchLength = + ZSTD_count_2segments( + ip + 1 + 4, + repMatch + 4, + iend, + repMatchEnd, + prefixLowest + ) + 4; + if (depth == 0) + goto _storeSequence; + } + } + + if ( + dictMode == ZSTD_dictMode_e.ZSTD_noDict + && offset_1 > 0 + && MEM_read32(ip + 1 - offset_1) == MEM_read32(ip + 1) + ) + { + matchLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4; + if (depth == 0) + goto _storeSequence; + } + + { + nuint offbaseFound = 999999999; + nuint ml2 = ZSTD_searchMax( + ms, + ip, + iend, + &offbaseFound, + mls, + rowLog, + searchMethod, + dictMode + ); + if (ml2 > matchLength) + { + matchLength = ml2; + start = ip; + offBase = offbaseFound; } + } + + if (matchLength < 4) + { + /* jump faster over incompressible sections */ + nuint step = ((nuint)(ip - anchor) >> 8) + 1; + ip += step; + ms->lazySkipping = step > 8 ? 1 : 0; + continue; + } + if (depth >= 1) + while (ip < ilimit) { - nuint offbaseFound = 999999999; - nuint ml2 = ZSTD_searchMax( - ms, - ip, - iend, - &offbaseFound, - mls, - rowLog, - searchMethod, - dictMode - ); - if (ml2 > matchLength) + ip++; + if ( + dictMode == ZSTD_dictMode_e.ZSTD_noDict + && offBase != 0 + && offset_1 > 0 + && MEM_read32(ip) == MEM_read32(ip - offset_1) + ) { - matchLength = ml2; - start = ip; - offBase = offbaseFound; + nuint mlRep = ZSTD_count(ip + 4, ip + 4 - offset_1, iend) + 4; + int gain2 = (int)(mlRep * 3); + int gain1 = (int)(matchLength * 3 - ZSTD_highbit32((uint)offBase) + 1); + if (mlRep >= 4 && gain2 > gain1) + { + matchLength = mlRep; + assert(1 >= 1); + assert(1 <= 3); + offBase = 1; + start = ip; + } } - } - if (matchLength < 4) - { - /* jump faster over incompressible sections */ - nuint step = ((nuint)(ip - anchor) >> 8) + 1; - ip += step; - ms->lazySkipping = step > 8 ? 1 : 0; - continue; - } + if (isDxS != 0) + { + uint repIndex = (uint)(ip - @base) - offset_1; + byte* repMatch = + repIndex < prefixLowestIndex + ? dictBase + (repIndex - dictIndexDelta) + : @base + repIndex; + if ( + ZSTD_index_overlap_check(prefixLowestIndex, repIndex) != 0 + && MEM_read32(repMatch) == MEM_read32(ip) + ) + { + byte* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; + nuint mlRep = + ZSTD_count_2segments( + ip + 4, + repMatch + 4, + iend, + repMatchEnd, + prefixLowest + ) + 4; + int gain2 = (int)(mlRep * 3); + int gain1 = (int)( + matchLength * 3 - ZSTD_highbit32((uint)offBase) + 1 + ); + if (mlRep >= 4 && gain2 > gain1) + { + matchLength = mlRep; + assert(1 >= 1); + assert(1 <= 3); + offBase = 1; + start = ip; + } + } + } + + { + nuint ofbCandidate = 999999999; + nuint ml2 = ZSTD_searchMax( + ms, + ip, + iend, + &ofbCandidate, + mls, + rowLog, + searchMethod, + dictMode + ); + /* raw approx */ + int gain2 = (int)(ml2 * 4 - ZSTD_highbit32((uint)ofbCandidate)); + int gain1 = (int)(matchLength * 4 - ZSTD_highbit32((uint)offBase) + 4); + if (ml2 >= 4 && gain2 > gain1) + { + matchLength = ml2; + offBase = ofbCandidate; + start = ip; + continue; + } + } - if (depth >= 1) - while (ip < ilimit) + if (depth == 2 && ip < ilimit) { ip++; if ( @@ -3793,8 +3876,10 @@ ZSTD_dictMode_e dictMode ) { nuint mlRep = ZSTD_count(ip + 4, ip + 4 - offset_1, iend) + 4; - int gain2 = (int)(mlRep * 3); - int gain1 = (int)(matchLength * 3 - ZSTD_highbit32((uint)offBase) + 1); + int gain2 = (int)(mlRep * 4); + int gain1 = (int)( + matchLength * 4 - ZSTD_highbit32((uint)offBase) + 1 + ); if (mlRep >= 4 && gain2 > gain1) { matchLength = mlRep; @@ -3817,7 +3902,8 @@ ZSTD_dictMode_e dictMode && MEM_read32(repMatch) == MEM_read32(ip) ) { - byte* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; + byte* repMatchEnd = + repIndex < prefixLowestIndex ? dictEnd : iend; nuint mlRep = ZSTD_count_2segments( ip + 4, @@ -3826,9 +3912,9 @@ ZSTD_dictMode_e dictMode repMatchEnd, prefixLowest ) + 4; - int gain2 = (int)(mlRep * 3); + int gain2 = (int)(mlRep * 4); int gain1 = (int)( - matchLength * 3 - ZSTD_highbit32((uint)offBase) + 1 + matchLength * 4 - ZSTD_highbit32((uint)offBase) + 1 ); if (mlRep >= 4 && gain2 > gain1) { @@ -3855,7 +3941,9 @@ ZSTD_dictMode_e dictMode ); /* raw approx */ int gain2 = (int)(ml2 * 4 - ZSTD_highbit32((uint)ofbCandidate)); - int gain1 = (int)(matchLength * 4 - ZSTD_highbit32((uint)offBase) + 4); + int gain1 = (int)( + matchLength * 4 - ZSTD_highbit32((uint)offBase) + 7 + ); if (ml2 >= 4 && gain2 > gain1) { matchLength = ml2; @@ -3864,201 +3952,91 @@ ZSTD_dictMode_e dictMode continue; } } - - if (depth == 2 && ip < ilimit) - { - ip++; - if ( - dictMode == ZSTD_dictMode_e.ZSTD_noDict - && offBase != 0 - && offset_1 > 0 - && MEM_read32(ip) == MEM_read32(ip - offset_1) - ) - { - nuint mlRep = ZSTD_count(ip + 4, ip + 4 - offset_1, iend) + 4; - int gain2 = (int)(mlRep * 4); - int gain1 = (int)( - matchLength * 4 - ZSTD_highbit32((uint)offBase) + 1 - ); - if (mlRep >= 4 && gain2 > gain1) - { - matchLength = mlRep; - assert(1 >= 1); - assert(1 <= 3); - offBase = 1; - start = ip; - } - } - - if (isDxS != 0) - { - uint repIndex = (uint)(ip - @base) - offset_1; - byte* repMatch = - repIndex < prefixLowestIndex - ? dictBase + (repIndex - dictIndexDelta) - : @base + repIndex; - if ( - ZSTD_index_overlap_check(prefixLowestIndex, repIndex) != 0 - && MEM_read32(repMatch) == MEM_read32(ip) - ) - { - byte* repMatchEnd = - repIndex < prefixLowestIndex ? dictEnd : iend; - nuint mlRep = - ZSTD_count_2segments( - ip + 4, - repMatch + 4, - iend, - repMatchEnd, - prefixLowest - ) + 4; - int gain2 = (int)(mlRep * 4); - int gain1 = (int)( - matchLength * 4 - ZSTD_highbit32((uint)offBase) + 1 - ); - if (mlRep >= 4 && gain2 > gain1) - { - matchLength = mlRep; - assert(1 >= 1); - assert(1 <= 3); - offBase = 1; - start = ip; - } - } - } - - { - nuint ofbCandidate = 999999999; - nuint ml2 = ZSTD_searchMax( - ms, - ip, - iend, - &ofbCandidate, - mls, - rowLog, - searchMethod, - dictMode - ); - /* raw approx */ - int gain2 = (int)(ml2 * 4 - ZSTD_highbit32((uint)ofbCandidate)); - int gain1 = (int)( - matchLength * 4 - ZSTD_highbit32((uint)offBase) + 7 - ); - if (ml2 >= 4 && gain2 > gain1) - { - matchLength = ml2; - offBase = ofbCandidate; - start = ip; - continue; - } - } - } - - break; - } - - if (offBase > 3) - { - if (dictMode == ZSTD_dictMode_e.ZSTD_noDict) - { - assert(offBase > 3); - assert(offBase > 3); - while ( - start > anchor - && start - (offBase - 3) > prefixLowest - && start[-1] == (start - (offBase - 3))[-1] - ) - { - start--; - matchLength++; - } - } - - if (isDxS != 0) - { - assert(offBase > 3); - uint matchIndex = (uint)((nuint)(start - @base) - (offBase - 3)); - byte* match = - matchIndex < prefixLowestIndex - ? dictBase + matchIndex - dictIndexDelta - : @base + matchIndex; - byte* mStart = matchIndex < prefixLowestIndex ? dictLowest : prefixLowest; - while (start > anchor && match > mStart && start[-1] == match[-1]) - { - start--; - match--; - matchLength++; - } } - offset_2 = offset_1; - assert(offBase > 3); - offset_1 = (uint)(offBase - 3); - } - - _storeSequence: - { - nuint litLength = (nuint)(start - anchor); - ZSTD_storeSeq(seqStore, litLength, anchor, iend, (uint)offBase, matchLength); - anchor = ip = start + matchLength; + break; } - if (ms->lazySkipping != 0) + if (offBase > 3) + { + if (dictMode == ZSTD_dictMode_e.ZSTD_noDict) { - if (searchMethod == searchMethod_e.search_rowHash) + assert(offBase > 3); + assert(offBase > 3); + while ( + start > anchor + && start - (offBase - 3) > prefixLowest + && start[-1] == (start - (offBase - 3))[-1] + ) { - ZSTD_row_fillHashCache(ms, @base, rowLog, mls, ms->nextToUpdate, ilimit); + start--; + matchLength++; } - - ms->lazySkipping = 0; } if (isDxS != 0) { - while (ip <= ilimit) + assert(offBase > 3); + uint matchIndex = (uint)((nuint)(start - @base) - (offBase - 3)); + byte* match = + matchIndex < prefixLowestIndex + ? dictBase + matchIndex - dictIndexDelta + : @base + matchIndex; + byte* mStart = matchIndex < prefixLowestIndex ? dictLowest : prefixLowest; + while (start > anchor && match > mStart && start[-1] == match[-1]) { - uint current2 = (uint)(ip - @base); - uint repIndex = current2 - offset_2; - byte* repMatch = - repIndex < prefixLowestIndex - ? dictBase - dictIndexDelta + repIndex - : @base + repIndex; - if ( - ZSTD_index_overlap_check(prefixLowestIndex, repIndex) != 0 - && MEM_read32(repMatch) == MEM_read32(ip) - ) - { - byte* repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend; - matchLength = - ZSTD_count_2segments( - ip + 4, - repMatch + 4, - iend, - repEnd2, - prefixLowest - ) + 4; - offBase = offset_2; - offset_2 = offset_1; - offset_1 = (uint)offBase; - assert(1 >= 1); - assert(1 <= 3); - ZSTD_storeSeq(seqStore, 0, anchor, iend, 1, matchLength); - ip += matchLength; - anchor = ip; - continue; - } - - break; + start--; + match--; + matchLength++; } } - if (dictMode == ZSTD_dictMode_e.ZSTD_noDict) + offset_2 = offset_1; + assert(offBase > 3); + offset_1 = (uint)(offBase - 3); + } + + _storeSequence: + { + nuint litLength = (nuint)(start - anchor); + ZSTD_storeSeq(seqStore, litLength, anchor, iend, (uint)offBase, matchLength); + anchor = ip = start + matchLength; + } + + if (ms->lazySkipping != 0) + { + if (searchMethod == searchMethod_e.search_rowHash) { - while ( - ip <= ilimit && offset_2 > 0 && MEM_read32(ip) == MEM_read32(ip - offset_2) + ZSTD_row_fillHashCache(ms, @base, rowLog, mls, ms->nextToUpdate, ilimit); + } + + ms->lazySkipping = 0; + } + + if (isDxS != 0) + { + while (ip <= ilimit) + { + uint current2 = (uint)(ip - @base); + uint repIndex = current2 - offset_2; + byte* repMatch = + repIndex < prefixLowestIndex + ? dictBase - dictIndexDelta + repIndex + : @base + repIndex; + if ( + ZSTD_index_overlap_check(prefixLowestIndex, repIndex) != 0 + && MEM_read32(repMatch) == MEM_read32(ip) ) { - matchLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4; + byte* repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend; + matchLength = + ZSTD_count_2segments( + ip + 4, + repMatch + 4, + iend, + repEnd2, + prefixLowest + ) + 4; offBase = offset_2; offset_2 = offset_1; offset_1 = (uint)offBase; @@ -4069,521 +4047,610 @@ ZSTD_dictMode_e dictMode anchor = ip; continue; } + + break; } } - offsetSaved2 = offsetSaved1 != 0 && offset_1 != 0 ? offsetSaved1 : offsetSaved2; - rep[0] = offset_1 != 0 ? offset_1 : offsetSaved1; - rep[1] = offset_2 != 0 ? offset_2 : offsetSaved2; - return (nuint)(iend - anchor); + if (dictMode == ZSTD_dictMode_e.ZSTD_noDict) + { + while ( + ip <= ilimit && offset_2 > 0 && MEM_read32(ip) == MEM_read32(ip - offset_2) + ) + { + matchLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4; + offBase = offset_2; + offset_2 = offset_1; + offset_1 = (uint)offBase; + assert(1 >= 1); + assert(1 <= 3); + ZSTD_storeSeq(seqStore, 0, anchor, iend, 1, matchLength); + ip += matchLength; + anchor = ip; + continue; + } + } } - private static nuint ZSTD_compressBlock_greedy( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_lazy_generic( - ms, - seqStore, - rep, - src, - srcSize, - searchMethod_e.search_hashChain, - 0, - ZSTD_dictMode_e.ZSTD_noDict - ); - } + offsetSaved2 = offsetSaved1 != 0 && offset_1 != 0 ? offsetSaved1 : offsetSaved2; + rep[0] = offset_1 != 0 ? offset_1 : offsetSaved1; + rep[1] = offset_2 != 0 ? offset_2 : offsetSaved2; + return (nuint)(iend - anchor); + } - private static nuint ZSTD_compressBlock_greedy_dictMatchState( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_lazy_generic( - ms, - seqStore, - rep, - src, - srcSize, - searchMethod_e.search_hashChain, - 0, - ZSTD_dictMode_e.ZSTD_dictMatchState - ); - } + private static nuint ZSTD_compressBlock_greedy( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_hashChain, + 0, + ZSTD_dictMode_e.ZSTD_noDict + ); + } - private static nuint ZSTD_compressBlock_greedy_dedicatedDictSearch( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_lazy_generic( - ms, - seqStore, - rep, - src, - srcSize, - searchMethod_e.search_hashChain, - 0, - ZSTD_dictMode_e.ZSTD_dedicatedDictSearch - ); - } + private static nuint ZSTD_compressBlock_greedy_dictMatchState( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_hashChain, + 0, + ZSTD_dictMode_e.ZSTD_dictMatchState + ); + } - private static nuint ZSTD_compressBlock_greedy_row( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_lazy_generic( - ms, - seqStore, - rep, - src, - srcSize, - searchMethod_e.search_rowHash, - 0, - ZSTD_dictMode_e.ZSTD_noDict - ); - } + private static nuint ZSTD_compressBlock_greedy_dedicatedDictSearch( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_hashChain, + 0, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch + ); + } - private static nuint ZSTD_compressBlock_greedy_dictMatchState_row( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_lazy_generic( - ms, - seqStore, - rep, - src, - srcSize, - searchMethod_e.search_rowHash, - 0, - ZSTD_dictMode_e.ZSTD_dictMatchState - ); - } + private static nuint ZSTD_compressBlock_greedy_row( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_rowHash, + 0, + ZSTD_dictMode_e.ZSTD_noDict + ); + } - private static nuint ZSTD_compressBlock_greedy_dedicatedDictSearch_row( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_lazy_generic( - ms, - seqStore, - rep, - src, - srcSize, - searchMethod_e.search_rowHash, - 0, - ZSTD_dictMode_e.ZSTD_dedicatedDictSearch - ); - } + private static nuint ZSTD_compressBlock_greedy_dictMatchState_row( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_rowHash, + 0, + ZSTD_dictMode_e.ZSTD_dictMatchState + ); + } - private static nuint ZSTD_compressBlock_lazy( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_lazy_generic( - ms, - seqStore, - rep, - src, - srcSize, - searchMethod_e.search_hashChain, - 1, - ZSTD_dictMode_e.ZSTD_noDict - ); - } + private static nuint ZSTD_compressBlock_greedy_dedicatedDictSearch_row( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_rowHash, + 0, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch + ); + } - private static nuint ZSTD_compressBlock_lazy_dictMatchState( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_lazy_generic( - ms, - seqStore, - rep, - src, - srcSize, - searchMethod_e.search_hashChain, - 1, - ZSTD_dictMode_e.ZSTD_dictMatchState - ); - } + private static nuint ZSTD_compressBlock_lazy( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_hashChain, + 1, + ZSTD_dictMode_e.ZSTD_noDict + ); + } + + private static nuint ZSTD_compressBlock_lazy_dictMatchState( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_hashChain, + 1, + ZSTD_dictMode_e.ZSTD_dictMatchState + ); + } + + private static nuint ZSTD_compressBlock_lazy_dedicatedDictSearch( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_hashChain, + 1, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch + ); + } - private static nuint ZSTD_compressBlock_lazy_dedicatedDictSearch( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_lazy_generic( - ms, - seqStore, - rep, - src, - srcSize, - searchMethod_e.search_hashChain, - 1, - ZSTD_dictMode_e.ZSTD_dedicatedDictSearch - ); - } + private static nuint ZSTD_compressBlock_lazy_row( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_rowHash, + 1, + ZSTD_dictMode_e.ZSTD_noDict + ); + } - private static nuint ZSTD_compressBlock_lazy_row( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_lazy_generic( - ms, - seqStore, - rep, - src, - srcSize, - searchMethod_e.search_rowHash, - 1, - ZSTD_dictMode_e.ZSTD_noDict - ); - } + private static nuint ZSTD_compressBlock_lazy_dictMatchState_row( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_rowHash, + 1, + ZSTD_dictMode_e.ZSTD_dictMatchState + ); + } - private static nuint ZSTD_compressBlock_lazy_dictMatchState_row( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_lazy_generic( - ms, - seqStore, - rep, - src, - srcSize, - searchMethod_e.search_rowHash, - 1, - ZSTD_dictMode_e.ZSTD_dictMatchState - ); - } + private static nuint ZSTD_compressBlock_lazy_dedicatedDictSearch_row( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_rowHash, + 1, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch + ); + } - private static nuint ZSTD_compressBlock_lazy_dedicatedDictSearch_row( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_lazy_generic( - ms, - seqStore, - rep, - src, - srcSize, - searchMethod_e.search_rowHash, - 1, - ZSTD_dictMode_e.ZSTD_dedicatedDictSearch - ); - } + private static nuint ZSTD_compressBlock_lazy2( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_hashChain, + 2, + ZSTD_dictMode_e.ZSTD_noDict + ); + } - private static nuint ZSTD_compressBlock_lazy2( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_lazy_generic( - ms, - seqStore, - rep, - src, - srcSize, - searchMethod_e.search_hashChain, - 2, - ZSTD_dictMode_e.ZSTD_noDict - ); - } + private static nuint ZSTD_compressBlock_lazy2_dictMatchState( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_hashChain, + 2, + ZSTD_dictMode_e.ZSTD_dictMatchState + ); + } - private static nuint ZSTD_compressBlock_lazy2_dictMatchState( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_lazy_generic( - ms, - seqStore, - rep, - src, - srcSize, - searchMethod_e.search_hashChain, - 2, - ZSTD_dictMode_e.ZSTD_dictMatchState - ); - } + private static nuint ZSTD_compressBlock_lazy2_dedicatedDictSearch( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_hashChain, + 2, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch + ); + } - private static nuint ZSTD_compressBlock_lazy2_dedicatedDictSearch( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_lazy_generic( - ms, - seqStore, - rep, - src, - srcSize, - searchMethod_e.search_hashChain, - 2, - ZSTD_dictMode_e.ZSTD_dedicatedDictSearch - ); - } + private static nuint ZSTD_compressBlock_lazy2_row( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_rowHash, + 2, + ZSTD_dictMode_e.ZSTD_noDict + ); + } - private static nuint ZSTD_compressBlock_lazy2_row( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_lazy_generic( - ms, - seqStore, - rep, - src, - srcSize, - searchMethod_e.search_rowHash, - 2, - ZSTD_dictMode_e.ZSTD_noDict - ); - } + private static nuint ZSTD_compressBlock_lazy2_dictMatchState_row( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_rowHash, + 2, + ZSTD_dictMode_e.ZSTD_dictMatchState + ); + } - private static nuint ZSTD_compressBlock_lazy2_dictMatchState_row( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_lazy_generic( - ms, - seqStore, - rep, - src, - srcSize, - searchMethod_e.search_rowHash, - 2, - ZSTD_dictMode_e.ZSTD_dictMatchState - ); - } + private static nuint ZSTD_compressBlock_lazy2_dedicatedDictSearch_row( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_rowHash, + 2, + ZSTD_dictMode_e.ZSTD_dedicatedDictSearch + ); + } - private static nuint ZSTD_compressBlock_lazy2_dedicatedDictSearch_row( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_lazy_generic( - ms, - seqStore, - rep, - src, - srcSize, - searchMethod_e.search_rowHash, - 2, - ZSTD_dictMode_e.ZSTD_dedicatedDictSearch - ); - } + private static nuint ZSTD_compressBlock_btlazy2( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_binaryTree, + 2, + ZSTD_dictMode_e.ZSTD_noDict + ); + } - private static nuint ZSTD_compressBlock_btlazy2( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_lazy_generic( - ms, - seqStore, - rep, - src, - srcSize, - searchMethod_e.search_binaryTree, - 2, - ZSTD_dictMode_e.ZSTD_noDict - ); - } + private static nuint ZSTD_compressBlock_btlazy2_dictMatchState( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_lazy_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_binaryTree, + 2, + ZSTD_dictMode_e.ZSTD_dictMatchState + ); + } - private static nuint ZSTD_compressBlock_btlazy2_dictMatchState( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_compressBlock_lazy_extDict_generic( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize, + searchMethod_e searchMethod, + uint depth + ) + { + byte* istart = (byte*)src; + byte* ip = istart; + byte* anchor = istart; + byte* iend = istart + srcSize; + byte* ilimit = searchMethod == searchMethod_e.search_rowHash ? iend - 8 - 8 : iend - 8; + byte* @base = ms->window.@base; + uint dictLimit = ms->window.dictLimit; + byte* prefixStart = @base + dictLimit; + byte* dictBase = ms->window.dictBase; + byte* dictEnd = dictBase + dictLimit; + byte* dictStart = dictBase + ms->window.lowLimit; + uint windowLog = ms->cParams.windowLog; + uint mls = + ms->cParams.minMatch <= 4 ? 4 + : ms->cParams.minMatch <= 6 ? ms->cParams.minMatch + : 6; + uint rowLog = + ms->cParams.searchLog <= 4 ? 4 + : ms->cParams.searchLog <= 6 ? ms->cParams.searchLog + : 6; + uint offset_1 = rep[0], + offset_2 = rep[1]; + ms->lazySkipping = 0; + ip += ip == prefixStart ? 1 : 0; + if (searchMethod == searchMethod_e.search_rowHash) { - return ZSTD_compressBlock_lazy_generic( - ms, - seqStore, - rep, - src, - srcSize, - searchMethod_e.search_binaryTree, - 2, - ZSTD_dictMode_e.ZSTD_dictMatchState - ); + ZSTD_row_fillHashCache(ms, @base, rowLog, mls, ms->nextToUpdate, ilimit); } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_compressBlock_lazy_extDict_generic( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize, - searchMethod_e searchMethod, - uint depth - ) + while (ip < ilimit) { - byte* istart = (byte*)src; - byte* ip = istart; - byte* anchor = istart; - byte* iend = istart + srcSize; - byte* ilimit = searchMethod == searchMethod_e.search_rowHash ? iend - 8 - 8 : iend - 8; - byte* @base = ms->window.@base; - uint dictLimit = ms->window.dictLimit; - byte* prefixStart = @base + dictLimit; - byte* dictBase = ms->window.dictBase; - byte* dictEnd = dictBase + dictLimit; - byte* dictStart = dictBase + ms->window.lowLimit; - uint windowLog = ms->cParams.windowLog; - uint mls = - ms->cParams.minMatch <= 4 ? 4 - : ms->cParams.minMatch <= 6 ? ms->cParams.minMatch - : 6; - uint rowLog = - ms->cParams.searchLog <= 4 ? 4 - : ms->cParams.searchLog <= 6 ? ms->cParams.searchLog - : 6; - uint offset_1 = rep[0], - offset_2 = rep[1]; - ms->lazySkipping = 0; - ip += ip == prefixStart ? 1 : 0; - if (searchMethod == searchMethod_e.search_rowHash) + nuint matchLength = 0; + assert(1 >= 1); + assert(1 <= 3); + nuint offBase = 1; + byte* start = ip + 1; + uint curr = (uint)(ip - @base); { - ZSTD_row_fillHashCache(ms, @base, rowLog, mls, ms->nextToUpdate, ilimit); + uint windowLow = ZSTD_getLowestMatchIndex(ms, curr + 1, windowLog); + uint repIndex = curr + 1 - offset_1; + byte* repBase = repIndex < dictLimit ? dictBase : @base; + byte* repMatch = repBase + repIndex; + if ( + ( + ZSTD_index_overlap_check(dictLimit, repIndex) + & (offset_1 <= curr + 1 - windowLow ? 1 : 0) + ) != 0 + ) + if (MEM_read32(ip + 1) == MEM_read32(repMatch)) + { + /* repcode detected we should take it */ + byte* repEnd = repIndex < dictLimit ? dictEnd : iend; + matchLength = + ZSTD_count_2segments( + ip + 1 + 4, + repMatch + 4, + iend, + repEnd, + prefixStart + ) + 4; + if (depth == 0) + goto _storeSequence; + } } - while (ip < ilimit) { - nuint matchLength = 0; - assert(1 >= 1); - assert(1 <= 3); - nuint offBase = 1; - byte* start = ip + 1; - uint curr = (uint)(ip - @base); + nuint ofbCandidate = 999999999; + nuint ml2 = ZSTD_searchMax( + ms, + ip, + iend, + &ofbCandidate, + mls, + rowLog, + searchMethod, + ZSTD_dictMode_e.ZSTD_extDict + ); + if (ml2 > matchLength) { - uint windowLow = ZSTD_getLowestMatchIndex(ms, curr + 1, windowLog); - uint repIndex = curr + 1 - offset_1; - byte* repBase = repIndex < dictLimit ? dictBase : @base; - byte* repMatch = repBase + repIndex; - if ( - ( - ZSTD_index_overlap_check(dictLimit, repIndex) - & (offset_1 <= curr + 1 - windowLow ? 1 : 0) - ) != 0 - ) - if (MEM_read32(ip + 1) == MEM_read32(repMatch)) - { - /* repcode detected we should take it */ - byte* repEnd = repIndex < dictLimit ? dictEnd : iend; - matchLength = - ZSTD_count_2segments( - ip + 1 + 4, - repMatch + 4, - iend, - repEnd, - prefixStart - ) + 4; - if (depth == 0) - goto _storeSequence; - } + matchLength = ml2; + start = ip; + offBase = ofbCandidate; } + } + + if (matchLength < 4) + { + nuint step = (nuint)(ip - anchor) >> 8; + ip += step + 1; + ms->lazySkipping = step > 8 ? 1 : 0; + continue; + } + if (depth >= 1) + while (ip < ilimit) { - nuint ofbCandidate = 999999999; - nuint ml2 = ZSTD_searchMax( - ms, - ip, - iend, - &ofbCandidate, - mls, - rowLog, - searchMethod, - ZSTD_dictMode_e.ZSTD_extDict - ); - if (ml2 > matchLength) + ip++; + curr++; + if (offBase != 0) { - matchLength = ml2; - start = ip; - offBase = ofbCandidate; + uint windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog); + uint repIndex = curr - offset_1; + byte* repBase = repIndex < dictLimit ? dictBase : @base; + byte* repMatch = repBase + repIndex; + if ( + ( + ZSTD_index_overlap_check(dictLimit, repIndex) + & (offset_1 <= curr - windowLow ? 1 : 0) + ) != 0 + ) + if (MEM_read32(ip) == MEM_read32(repMatch)) + { + /* repcode detected */ + byte* repEnd = repIndex < dictLimit ? dictEnd : iend; + nuint repLength = + ZSTD_count_2segments( + ip + 4, + repMatch + 4, + iend, + repEnd, + prefixStart + ) + 4; + int gain2 = (int)(repLength * 3); + int gain1 = (int)( + matchLength * 3 - ZSTD_highbit32((uint)offBase) + 1 + ); + if (repLength >= 4 && gain2 > gain1) + { + matchLength = repLength; + assert(1 >= 1); + assert(1 <= 3); + offBase = 1; + start = ip; + } + } } - } - if (matchLength < 4) - { - nuint step = (nuint)(ip - anchor) >> 8; - ip += step + 1; - ms->lazySkipping = step > 8 ? 1 : 0; - continue; - } + { + nuint ofbCandidate = 999999999; + nuint ml2 = ZSTD_searchMax( + ms, + ip, + iend, + &ofbCandidate, + mls, + rowLog, + searchMethod, + ZSTD_dictMode_e.ZSTD_extDict + ); + /* raw approx */ + int gain2 = (int)(ml2 * 4 - ZSTD_highbit32((uint)ofbCandidate)); + int gain1 = (int)(matchLength * 4 - ZSTD_highbit32((uint)offBase) + 4); + if (ml2 >= 4 && gain2 > gain1) + { + matchLength = ml2; + offBase = ofbCandidate; + start = ip; + continue; + } + } - if (depth >= 1) - while (ip < ilimit) + if (depth == 2 && ip < ilimit) { ip++; curr++; @@ -4611,9 +4678,9 @@ uint depth repEnd, prefixStart ) + 4; - int gain2 = (int)(repLength * 3); + int gain2 = (int)(repLength * 4); int gain1 = (int)( - matchLength * 3 - ZSTD_highbit32((uint)offBase) + 1 + matchLength * 4 - ZSTD_highbit32((uint)offBase) + 1 ); if (repLength >= 4 && gain2 > gain1) { @@ -4640,7 +4707,9 @@ uint depth ); /* raw approx */ int gain2 = (int)(ml2 * 4 - ZSTD_highbit32((uint)ofbCandidate)); - int gain1 = (int)(matchLength * 4 - ZSTD_highbit32((uint)offBase) + 4); + int gain1 = (int)( + matchLength * 4 - ZSTD_highbit32((uint)offBase) + 7 + ); if (ml2 >= 4 && gain2 > gain1) { matchLength = ml2; @@ -4649,292 +4718,222 @@ uint depth continue; } } - - if (depth == 2 && ip < ilimit) - { - ip++; - curr++; - if (offBase != 0) - { - uint windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog); - uint repIndex = curr - offset_1; - byte* repBase = repIndex < dictLimit ? dictBase : @base; - byte* repMatch = repBase + repIndex; - if ( - ( - ZSTD_index_overlap_check(dictLimit, repIndex) - & (offset_1 <= curr - windowLow ? 1 : 0) - ) != 0 - ) - if (MEM_read32(ip) == MEM_read32(repMatch)) - { - /* repcode detected */ - byte* repEnd = repIndex < dictLimit ? dictEnd : iend; - nuint repLength = - ZSTD_count_2segments( - ip + 4, - repMatch + 4, - iend, - repEnd, - prefixStart - ) + 4; - int gain2 = (int)(repLength * 4); - int gain1 = (int)( - matchLength * 4 - ZSTD_highbit32((uint)offBase) + 1 - ); - if (repLength >= 4 && gain2 > gain1) - { - matchLength = repLength; - assert(1 >= 1); - assert(1 <= 3); - offBase = 1; - start = ip; - } - } - } - - { - nuint ofbCandidate = 999999999; - nuint ml2 = ZSTD_searchMax( - ms, - ip, - iend, - &ofbCandidate, - mls, - rowLog, - searchMethod, - ZSTD_dictMode_e.ZSTD_extDict - ); - /* raw approx */ - int gain2 = (int)(ml2 * 4 - ZSTD_highbit32((uint)ofbCandidate)); - int gain1 = (int)( - matchLength * 4 - ZSTD_highbit32((uint)offBase) + 7 - ); - if (ml2 >= 4 && gain2 > gain1) - { - matchLength = ml2; - offBase = ofbCandidate; - start = ip; - continue; - } - } - } - - break; - } - - if (offBase > 3) - { - assert(offBase > 3); - uint matchIndex = (uint)((nuint)(start - @base) - (offBase - 3)); - byte* match = - matchIndex < dictLimit ? dictBase + matchIndex : @base + matchIndex; - byte* mStart = matchIndex < dictLimit ? dictStart : prefixStart; - while (start > anchor && match > mStart && start[-1] == match[-1]) - { - start--; - match--; - matchLength++; } - offset_2 = offset_1; - assert(offBase > 3); - offset_1 = (uint)(offBase - 3); + break; } - _storeSequence: + if (offBase > 3) + { + assert(offBase > 3); + uint matchIndex = (uint)((nuint)(start - @base) - (offBase - 3)); + byte* match = + matchIndex < dictLimit ? dictBase + matchIndex : @base + matchIndex; + byte* mStart = matchIndex < dictLimit ? dictStart : prefixStart; + while (start > anchor && match > mStart && start[-1] == match[-1]) { - nuint litLength = (nuint)(start - anchor); - ZSTD_storeSeq(seqStore, litLength, anchor, iend, (uint)offBase, matchLength); - anchor = ip = start + matchLength; + start--; + match--; + matchLength++; } - if (ms->lazySkipping != 0) - { - if (searchMethod == searchMethod_e.search_rowHash) - { - ZSTD_row_fillHashCache(ms, @base, rowLog, mls, ms->nextToUpdate, ilimit); - } + offset_2 = offset_1; + assert(offBase > 3); + offset_1 = (uint)(offBase - 3); + } - ms->lazySkipping = 0; - } + _storeSequence: + { + nuint litLength = (nuint)(start - anchor); + ZSTD_storeSeq(seqStore, litLength, anchor, iend, (uint)offBase, matchLength); + anchor = ip = start + matchLength; + } - while (ip <= ilimit) + if (ms->lazySkipping != 0) + { + if (searchMethod == searchMethod_e.search_rowHash) { - uint repCurrent = (uint)(ip - @base); - uint windowLow = ZSTD_getLowestMatchIndex(ms, repCurrent, windowLog); - uint repIndex = repCurrent - offset_2; - byte* repBase = repIndex < dictLimit ? dictBase : @base; - byte* repMatch = repBase + repIndex; - if ( - ( - ZSTD_index_overlap_check(dictLimit, repIndex) - & (offset_2 <= repCurrent - windowLow ? 1 : 0) - ) != 0 - ) - if (MEM_read32(ip) == MEM_read32(repMatch)) - { - /* repcode detected we should take it */ - byte* repEnd = repIndex < dictLimit ? dictEnd : iend; - matchLength = - ZSTD_count_2segments( - ip + 4, - repMatch + 4, - iend, - repEnd, - prefixStart - ) + 4; - offBase = offset_2; - offset_2 = offset_1; - offset_1 = (uint)offBase; - assert(1 >= 1); - assert(1 <= 3); - ZSTD_storeSeq(seqStore, 0, anchor, iend, 1, matchLength); - ip += matchLength; - anchor = ip; - continue; - } - - break; + ZSTD_row_fillHashCache(ms, @base, rowLog, mls, ms->nextToUpdate, ilimit); } + + ms->lazySkipping = 0; } - rep[0] = offset_1; - rep[1] = offset_2; - return (nuint)(iend - anchor); - } + while (ip <= ilimit) + { + uint repCurrent = (uint)(ip - @base); + uint windowLow = ZSTD_getLowestMatchIndex(ms, repCurrent, windowLog); + uint repIndex = repCurrent - offset_2; + byte* repBase = repIndex < dictLimit ? dictBase : @base; + byte* repMatch = repBase + repIndex; + if ( + ( + ZSTD_index_overlap_check(dictLimit, repIndex) + & (offset_2 <= repCurrent - windowLow ? 1 : 0) + ) != 0 + ) + if (MEM_read32(ip) == MEM_read32(repMatch)) + { + /* repcode detected we should take it */ + byte* repEnd = repIndex < dictLimit ? dictEnd : iend; + matchLength = + ZSTD_count_2segments( + ip + 4, + repMatch + 4, + iend, + repEnd, + prefixStart + ) + 4; + offBase = offset_2; + offset_2 = offset_1; + offset_1 = (uint)offBase; + assert(1 >= 1); + assert(1 <= 3); + ZSTD_storeSeq(seqStore, 0, anchor, iend, 1, matchLength); + ip += matchLength; + anchor = ip; + continue; + } - private static nuint ZSTD_compressBlock_greedy_extDict( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_lazy_extDict_generic( - ms, - seqStore, - rep, - src, - srcSize, - searchMethod_e.search_hashChain, - 0 - ); + break; + } } - private static nuint ZSTD_compressBlock_greedy_extDict_row( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_lazy_extDict_generic( - ms, - seqStore, - rep, - src, - srcSize, - searchMethod_e.search_rowHash, - 0 - ); - } + rep[0] = offset_1; + rep[1] = offset_2; + return (nuint)(iend - anchor); + } - private static nuint ZSTD_compressBlock_lazy_extDict( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_lazy_extDict_generic( - ms, - seqStore, - rep, - src, - srcSize, - searchMethod_e.search_hashChain, - 1 - ); - } + private static nuint ZSTD_compressBlock_greedy_extDict( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_lazy_extDict_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_hashChain, + 0 + ); + } - private static nuint ZSTD_compressBlock_lazy_extDict_row( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_lazy_extDict_generic( - ms, - seqStore, - rep, - src, - srcSize, - searchMethod_e.search_rowHash, - 1 - ); - } + private static nuint ZSTD_compressBlock_greedy_extDict_row( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_lazy_extDict_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_rowHash, + 0 + ); + } - private static nuint ZSTD_compressBlock_lazy2_extDict( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_lazy_extDict_generic( - ms, - seqStore, - rep, - src, - srcSize, - searchMethod_e.search_hashChain, - 2 - ); - } + private static nuint ZSTD_compressBlock_lazy_extDict( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_lazy_extDict_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_hashChain, + 1 + ); + } - private static nuint ZSTD_compressBlock_lazy2_extDict_row( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_lazy_extDict_generic( - ms, - seqStore, - rep, - src, - srcSize, - searchMethod_e.search_rowHash, - 2 - ); - } + private static nuint ZSTD_compressBlock_lazy_extDict_row( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_lazy_extDict_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_rowHash, + 1 + ); + } - private static nuint ZSTD_compressBlock_btlazy2_extDict( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_lazy_extDict_generic( - ms, - seqStore, - rep, - src, - srcSize, - searchMethod_e.search_binaryTree, - 2 - ); - } + private static nuint ZSTD_compressBlock_lazy2_extDict( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_lazy_extDict_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_hashChain, + 2 + ); + } + + private static nuint ZSTD_compressBlock_lazy2_extDict_row( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_lazy_extDict_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_rowHash, + 2 + ); + } + + private static nuint ZSTD_compressBlock_btlazy2_extDict( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_lazy_extDict_generic( + ms, + seqStore, + rep, + src, + srcSize, + searchMethod_e.search_binaryTree, + 2 + ); } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLdm.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLdm.cs index 407ec8799..6b8c3471b 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLdm.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLdm.cs @@ -1,147 +1,133 @@ -using static ZstdSharp.UnsafeHelper; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods + /** ZSTD_ldm_gear_init(): + * + * Initializes the rolling hash state such that it will honor the + * settings in params. */ + private static void ZSTD_ldm_gear_init(ldmRollingHashState_t* state, ldmParams_t* @params) { - /** ZSTD_ldm_gear_init(): - * - * Initializes the rolling hash state such that it will honor the - * settings in params. */ - private static void ZSTD_ldm_gear_init(ldmRollingHashState_t* state, ldmParams_t* @params) + uint maxBitsInMask = @params->minMatchLength < 64 ? @params->minMatchLength : 64; + uint hashRateLog = @params->hashRateLog; + state->rolling = ~(uint)0; + if (hashRateLog > 0 && hashRateLog <= maxBitsInMask) { - uint maxBitsInMask = @params->minMatchLength < 64 ? @params->minMatchLength : 64; - uint hashRateLog = @params->hashRateLog; - state->rolling = ~(uint)0; - if (hashRateLog > 0 && hashRateLog <= maxBitsInMask) - { - state->stopMask = - ((ulong)1 << (int)hashRateLog) - 1 << (int)(maxBitsInMask - hashRateLog); - } - else - { - state->stopMask = ((ulong)1 << (int)hashRateLog) - 1; - } + state->stopMask = + ((ulong)1 << (int)hashRateLog) - 1 << (int)(maxBitsInMask - hashRateLog); + } + else + { + state->stopMask = ((ulong)1 << (int)hashRateLog) - 1; } + } - /** ZSTD_ldm_gear_reset() - * Feeds [data, data + minMatchLength) into the hash without registering any - * splits. This effectively resets the hash state. This is used when skipping - * over data, either at the beginning of a block, or skipping sections. - */ - private static void ZSTD_ldm_gear_reset( - ldmRollingHashState_t* state, - byte* data, - nuint minMatchLength - ) + /** ZSTD_ldm_gear_reset() + * Feeds [data, data + minMatchLength) into the hash without registering any + * splits. This effectively resets the hash state. This is used when skipping + * over data, either at the beginning of a block, or skipping sections. + */ + private static void ZSTD_ldm_gear_reset( + ldmRollingHashState_t* state, + byte* data, + nuint minMatchLength + ) + { + ulong hash = state->rolling; + nuint n = 0; + while (n + 3 < minMatchLength) { - ulong hash = state->rolling; - nuint n = 0; - while (n + 3 < minMatchLength) { - { - hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; - n += 1; - } - - { - hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; - n += 1; - } + hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; + n += 1; + } - { - hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; - n += 1; - } + { + hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; + n += 1; + } - { - hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; - n += 1; - } + { + hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; + n += 1; } - while (n < minMatchLength) { hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; n += 1; } } - /** ZSTD_ldm_gear_feed(): - * - * Registers in the splits array all the split points found in the first - * size bytes following the data pointer. This function terminates when - * either all the data has been processed or LDM_BATCH_SIZE splits are - * present in the splits array. - * - * Precondition: The splits array must not be full. - * Returns: The number of bytes processed. */ - private static nuint ZSTD_ldm_gear_feed( - ldmRollingHashState_t* state, - byte* data, - nuint size, - nuint* splits, - uint* numSplits - ) + while (n < minMatchLength) { - nuint n; - ulong hash, - mask; - hash = state->rolling; - mask = state->stopMask; - n = 0; - while (n + 3 < size) - { - { - hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; - n += 1; - if ((hash & mask) == 0) - { - splits[*numSplits] = n; - *numSplits += 1; - if (*numSplits == 64) - goto done; - } - } + hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; + n += 1; + } + } + /** ZSTD_ldm_gear_feed(): + * + * Registers in the splits array all the split points found in the first + * size bytes following the data pointer. This function terminates when + * either all the data has been processed or LDM_BATCH_SIZE splits are + * present in the splits array. + * + * Precondition: The splits array must not be full. + * Returns: The number of bytes processed. */ + private static nuint ZSTD_ldm_gear_feed( + ldmRollingHashState_t* state, + byte* data, + nuint size, + nuint* splits, + uint* numSplits + ) + { + nuint n; + ulong hash, + mask; + hash = state->rolling; + mask = state->stopMask; + n = 0; + while (n + 3 < size) + { + { + hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; + n += 1; + if ((hash & mask) == 0) { - hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; - n += 1; - if ((hash & mask) == 0) - { - splits[*numSplits] = n; - *numSplits += 1; - if (*numSplits == 64) - goto done; - } + splits[*numSplits] = n; + *numSplits += 1; + if (*numSplits == 64) + goto done; } + } + { + hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; + n += 1; + if ((hash & mask) == 0) { - hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; - n += 1; - if ((hash & mask) == 0) - { - splits[*numSplits] = n; - *numSplits += 1; - if (*numSplits == 64) - goto done; - } + splits[*numSplits] = n; + *numSplits += 1; + if (*numSplits == 64) + goto done; } + } + { + hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; + n += 1; + if ((hash & mask) == 0) { - hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; - n += 1; - if ((hash & mask) == 0) - { - splits[*numSplits] = n; - *numSplits += 1; - if (*numSplits == 64) - goto done; - } + splits[*numSplits] = n; + *numSplits += 1; + if (*numSplits == 64) + goto done; } } - while (n < size) { hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; n += 1; @@ -153,58 +139,47 @@ private static nuint ZSTD_ldm_gear_feed( goto done; } } - - done: - state->rolling = hash; - return n; } - /** ZSTD_ldm_adjustParameters() : - * If the params->hashRateLog is not set, set it to its default value based on - * windowLog and params->hashLog. - * - * Ensures that params->bucketSizeLog is <= params->hashLog (setting it to - * params->hashLog if it is not). - * - * Ensures that the minMatchLength >= targetLength during optimal parsing. - */ - private static void ZSTD_ldm_adjustParameters( - ldmParams_t* @params, - ZSTD_compressionParameters* cParams - ) + while (n < size) { - @params->windowLog = cParams->windowLog; - if (@params->hashRateLog == 0) + hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; + n += 1; + if ((hash & mask) == 0) { - if (@params->hashLog > 0) - { - assert( - @params->hashLog - <= (uint)( - (sizeof(nuint) == 4 ? 30 : 31) < 30 - ? sizeof(nuint) == 4 - ? 30 - : 31 - : 30 - ) - ); - if (@params->windowLog > @params->hashLog) - { - @params->hashRateLog = @params->windowLog - @params->hashLog; - } - } - else - { - assert(1 <= (int)cParams->strategy && (int)cParams->strategy <= 9); - @params->hashRateLog = (uint)(7 - (int)cParams->strategy / 3); - } + splits[*numSplits] = n; + *numSplits += 1; + if (*numSplits == 64) + goto done; } + } + + done: + state->rolling = hash; + return n; + } - if (@params->hashLog == 0) + /** ZSTD_ldm_adjustParameters() : + * If the params->hashRateLog is not set, set it to its default value based on + * windowLog and params->hashLog. + * + * Ensures that params->bucketSizeLog is <= params->hashLog (setting it to + * params->hashLog if it is not). + * + * Ensures that the minMatchLength >= targetLength during optimal parsing. + */ + private static void ZSTD_ldm_adjustParameters( + ldmParams_t* @params, + ZSTD_compressionParameters* cParams + ) + { + @params->windowLog = cParams->windowLog; + if (@params->hashRateLog == 0) + { + if (@params->hashLog > 0) { - @params->hashLog = - @params->windowLog - @params->hashRateLog <= 6 ? 6 - : @params->windowLog - @params->hashRateLog + assert( + @params->hashLog <= (uint)( (sizeof(nuint) == 4 ? 30 : 31) < 30 ? sizeof(nuint) == 4 @@ -212,745 +187,769 @@ private static void ZSTD_ldm_adjustParameters( : 31 : 30 ) - ? @params->windowLog - @params->hashRateLog - : (uint)( - (sizeof(nuint) == 4 ? 30 : 31) < 30 - ? sizeof(nuint) == 4 - ? 30 - : 31 - : 30 - ); - } - - if (@params->minMatchLength == 0) - { - @params->minMatchLength = 64; - if (cParams->strategy >= ZSTD_strategy.ZSTD_btultra) - @params->minMatchLength /= 2; + ); + if (@params->windowLog > @params->hashLog) + { + @params->hashRateLog = @params->windowLog - @params->hashLog; + } } - - if (@params->bucketSizeLog == 0) + else { assert(1 <= (int)cParams->strategy && (int)cParams->strategy <= 9); - @params->bucketSizeLog = - (uint)cParams->strategy <= 4 ? 4 - : (uint)cParams->strategy <= 8 ? (uint)cParams->strategy - : 8; + @params->hashRateLog = (uint)(7 - (int)cParams->strategy / 3); } - - @params->bucketSizeLog = - @params->bucketSizeLog < @params->hashLog - ? @params->bucketSizeLog - : @params->hashLog; } - /** ZSTD_ldm_getTableSize() : - * Estimate the space needed for long distance matching tables or 0 if LDM is - * disabled. - */ - private static nuint ZSTD_ldm_getTableSize(ldmParams_t @params) + if (@params->hashLog == 0) { - nuint ldmHSize = (nuint)1 << (int)@params.hashLog; - nuint ldmBucketSizeLog = - @params.bucketSizeLog < @params.hashLog ? @params.bucketSizeLog : @params.hashLog; - nuint ldmBucketSize = (nuint)1 << (int)(@params.hashLog - ldmBucketSizeLog); - nuint totalSize = - ZSTD_cwksp_alloc_size(ldmBucketSize) - + ZSTD_cwksp_alloc_size(ldmHSize * (nuint)sizeof(ldmEntry_t)); - return @params.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable ? totalSize : 0; + @params->hashLog = + @params->windowLog - @params->hashRateLog <= 6 ? 6 + : @params->windowLog - @params->hashRateLog + <= (uint)( + (sizeof(nuint) == 4 ? 30 : 31) < 30 + ? sizeof(nuint) == 4 + ? 30 + : 31 + : 30 + ) + ? @params->windowLog - @params->hashRateLog + : (uint)( + (sizeof(nuint) == 4 ? 30 : 31) < 30 + ? sizeof(nuint) == 4 + ? 30 + : 31 + : 30 + ); } - /** ZSTD_ldm_getSeqSpace() : - * Return an upper bound on the number of sequences that can be produced by - * the long distance matcher, or 0 if LDM is disabled. - */ - private static nuint ZSTD_ldm_getMaxNbSeq(ldmParams_t @params, nuint maxChunkSize) + if (@params->minMatchLength == 0) { - return @params.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable - ? maxChunkSize / @params.minMatchLength - : 0; + @params->minMatchLength = 64; + if (cParams->strategy >= ZSTD_strategy.ZSTD_btultra) + @params->minMatchLength /= 2; } - /** ZSTD_ldm_getBucket() : - * Returns a pointer to the start of the bucket associated with hash. */ - private static ldmEntry_t* ZSTD_ldm_getBucket( - ldmState_t* ldmState, - nuint hash, - uint bucketSizeLog - ) + if (@params->bucketSizeLog == 0) { - return ldmState->hashTable + (hash << (int)bucketSizeLog); + assert(1 <= (int)cParams->strategy && (int)cParams->strategy <= 9); + @params->bucketSizeLog = + (uint)cParams->strategy <= 4 ? 4 + : (uint)cParams->strategy <= 8 ? (uint)cParams->strategy + : 8; } - /** ZSTD_ldm_insertEntry() : - * Insert the entry with corresponding hash into the hash table */ - private static void ZSTD_ldm_insertEntry( - ldmState_t* ldmState, - nuint hash, - ldmEntry_t entry, - uint bucketSizeLog - ) - { - byte* pOffset = ldmState->bucketOffsets + hash; - uint offset = *pOffset; - *(ZSTD_ldm_getBucket(ldmState, hash, bucketSizeLog) + offset) = entry; - *pOffset = (byte)(offset + 1 & (1U << (int)bucketSizeLog) - 1); - } + @params->bucketSizeLog = + @params->bucketSizeLog < @params->hashLog + ? @params->bucketSizeLog + : @params->hashLog; + } - /** ZSTD_ldm_countBackwardsMatch() : - * Returns the number of bytes that match backwards before pIn and pMatch. - * - * We count only bytes where pMatch >= pBase and pIn >= pAnchor. */ - private static nuint ZSTD_ldm_countBackwardsMatch( - byte* pIn, - byte* pAnchor, - byte* pMatch, - byte* pMatchBase - ) - { - nuint matchLength = 0; - while (pIn > pAnchor && pMatch > pMatchBase && pIn[-1] == pMatch[-1]) - { - pIn--; - pMatch--; - matchLength++; - } + /** ZSTD_ldm_getTableSize() : + * Estimate the space needed for long distance matching tables or 0 if LDM is + * disabled. + */ + private static nuint ZSTD_ldm_getTableSize(ldmParams_t @params) + { + nuint ldmHSize = (nuint)1 << (int)@params.hashLog; + nuint ldmBucketSizeLog = + @params.bucketSizeLog < @params.hashLog ? @params.bucketSizeLog : @params.hashLog; + nuint ldmBucketSize = (nuint)1 << (int)(@params.hashLog - ldmBucketSizeLog); + nuint totalSize = + ZSTD_cwksp_alloc_size(ldmBucketSize) + + ZSTD_cwksp_alloc_size(ldmHSize * (nuint)sizeof(ldmEntry_t)); + return @params.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable ? totalSize : 0; + } - return matchLength; - } + /** ZSTD_ldm_getSeqSpace() : + * Return an upper bound on the number of sequences that can be produced by + * the long distance matcher, or 0 if LDM is disabled. + */ + private static nuint ZSTD_ldm_getMaxNbSeq(ldmParams_t @params, nuint maxChunkSize) + { + return @params.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable + ? maxChunkSize / @params.minMatchLength + : 0; + } + + /** ZSTD_ldm_getBucket() : + * Returns a pointer to the start of the bucket associated with hash. */ + private static ldmEntry_t* ZSTD_ldm_getBucket( + ldmState_t* ldmState, + nuint hash, + uint bucketSizeLog + ) + { + return ldmState->hashTable + (hash << (int)bucketSizeLog); + } + + /** ZSTD_ldm_insertEntry() : + * Insert the entry with corresponding hash into the hash table */ + private static void ZSTD_ldm_insertEntry( + ldmState_t* ldmState, + nuint hash, + ldmEntry_t entry, + uint bucketSizeLog + ) + { + byte* pOffset = ldmState->bucketOffsets + hash; + uint offset = *pOffset; + *(ZSTD_ldm_getBucket(ldmState, hash, bucketSizeLog) + offset) = entry; + *pOffset = (byte)(offset + 1 & (1U << (int)bucketSizeLog) - 1); + } - /** ZSTD_ldm_countBackwardsMatch_2segments() : - * Returns the number of bytes that match backwards from pMatch, - * even with the backwards match spanning 2 different segments. - * - * On reaching `pMatchBase`, start counting from mEnd */ - private static nuint ZSTD_ldm_countBackwardsMatch_2segments( - byte* pIn, - byte* pAnchor, - byte* pMatch, - byte* pMatchBase, - byte* pExtDictStart, - byte* pExtDictEnd - ) + /** ZSTD_ldm_countBackwardsMatch() : + * Returns the number of bytes that match backwards before pIn and pMatch. + * + * We count only bytes where pMatch >= pBase and pIn >= pAnchor. */ + private static nuint ZSTD_ldm_countBackwardsMatch( + byte* pIn, + byte* pAnchor, + byte* pMatch, + byte* pMatchBase + ) + { + nuint matchLength = 0; + while (pIn > pAnchor && pMatch > pMatchBase && pIn[-1] == pMatch[-1]) { - nuint matchLength = ZSTD_ldm_countBackwardsMatch(pIn, pAnchor, pMatch, pMatchBase); - if (pMatch - matchLength != pMatchBase || pMatchBase == pExtDictStart) - { - return matchLength; - } + pIn--; + pMatch--; + matchLength++; + } - matchLength += ZSTD_ldm_countBackwardsMatch( - pIn - matchLength, - pAnchor, - pExtDictEnd, - pExtDictStart - ); + return matchLength; + } + + /** ZSTD_ldm_countBackwardsMatch_2segments() : + * Returns the number of bytes that match backwards from pMatch, + * even with the backwards match spanning 2 different segments. + * + * On reaching `pMatchBase`, start counting from mEnd */ + private static nuint ZSTD_ldm_countBackwardsMatch_2segments( + byte* pIn, + byte* pAnchor, + byte* pMatch, + byte* pMatchBase, + byte* pExtDictStart, + byte* pExtDictEnd + ) + { + nuint matchLength = ZSTD_ldm_countBackwardsMatch(pIn, pAnchor, pMatch, pMatchBase); + if (pMatch - matchLength != pMatchBase || pMatchBase == pExtDictStart) + { return matchLength; } - /** ZSTD_ldm_fillFastTables() : - * - * Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies. - * This is similar to ZSTD_loadDictionaryContent. - * - * The tables for the other strategies are filled within their - * block compressors. */ - private static nuint ZSTD_ldm_fillFastTables(ZSTD_MatchState_t* ms, void* end) - { - byte* iend = (byte*)end; - switch (ms->cParams.strategy) - { - case ZSTD_strategy.ZSTD_fast: - ZSTD_fillHashTable( - ms, - iend, - ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, - ZSTD_tableFillPurpose_e.ZSTD_tfp_forCCtx - ); - break; - case ZSTD_strategy.ZSTD_dfast: - ZSTD_fillDoubleHashTable( - ms, - iend, - ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, - ZSTD_tableFillPurpose_e.ZSTD_tfp_forCCtx - ); - break; - case ZSTD_strategy.ZSTD_greedy: - case ZSTD_strategy.ZSTD_lazy: - case ZSTD_strategy.ZSTD_lazy2: - case ZSTD_strategy.ZSTD_btlazy2: - case ZSTD_strategy.ZSTD_btopt: - case ZSTD_strategy.ZSTD_btultra: - case ZSTD_strategy.ZSTD_btultra2: - break; - default: - assert(0 != 0); - break; - } + matchLength += ZSTD_ldm_countBackwardsMatch( + pIn - matchLength, + pAnchor, + pExtDictEnd, + pExtDictStart + ); + return matchLength; + } - return 0; + /** ZSTD_ldm_fillFastTables() : + * + * Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies. + * This is similar to ZSTD_loadDictionaryContent. + * + * The tables for the other strategies are filled within their + * block compressors. */ + private static nuint ZSTD_ldm_fillFastTables(ZSTD_MatchState_t* ms, void* end) + { + byte* iend = (byte*)end; + switch (ms->cParams.strategy) + { + case ZSTD_strategy.ZSTD_fast: + ZSTD_fillHashTable( + ms, + iend, + ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, + ZSTD_tableFillPurpose_e.ZSTD_tfp_forCCtx + ); + break; + case ZSTD_strategy.ZSTD_dfast: + ZSTD_fillDoubleHashTable( + ms, + iend, + ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, + ZSTD_tableFillPurpose_e.ZSTD_tfp_forCCtx + ); + break; + case ZSTD_strategy.ZSTD_greedy: + case ZSTD_strategy.ZSTD_lazy: + case ZSTD_strategy.ZSTD_lazy2: + case ZSTD_strategy.ZSTD_btlazy2: + case ZSTD_strategy.ZSTD_btopt: + case ZSTD_strategy.ZSTD_btultra: + case ZSTD_strategy.ZSTD_btultra2: + break; + default: + assert(0 != 0); + break; } - private static void ZSTD_ldm_fillHashTable( - ldmState_t* ldmState, - byte* ip, - byte* iend, - ldmParams_t* @params - ) + return 0; + } + + private static void ZSTD_ldm_fillHashTable( + ldmState_t* ldmState, + byte* ip, + byte* iend, + ldmParams_t* @params + ) + { + uint minMatchLength = @params->minMatchLength; + uint bucketSizeLog = @params->bucketSizeLog; + uint hBits = @params->hashLog - bucketSizeLog; + byte* @base = ldmState->window.@base; + byte* istart = ip; + ldmRollingHashState_t hashState; + nuint* splits = &ldmState->splitIndices.e0; + uint numSplits; + ZSTD_ldm_gear_init(&hashState, @params); + while (ip < iend) { - uint minMatchLength = @params->minMatchLength; - uint bucketSizeLog = @params->bucketSizeLog; - uint hBits = @params->hashLog - bucketSizeLog; - byte* @base = ldmState->window.@base; - byte* istart = ip; - ldmRollingHashState_t hashState; - nuint* splits = &ldmState->splitIndices.e0; - uint numSplits; - ZSTD_ldm_gear_init(&hashState, @params); - while (ip < iend) + nuint hashed; + uint n; + numSplits = 0; + hashed = ZSTD_ldm_gear_feed(&hashState, ip, (nuint)(iend - ip), splits, &numSplits); + for (n = 0; n < numSplits; n++) { - nuint hashed; - uint n; - numSplits = 0; - hashed = ZSTD_ldm_gear_feed(&hashState, ip, (nuint)(iend - ip), splits, &numSplits); - for (n = 0; n < numSplits; n++) + if (ip + splits[n] >= istart + minMatchLength) { - if (ip + splits[n] >= istart + minMatchLength) - { - byte* split = ip + splits[n] - minMatchLength; - ulong xxhash = ZSTD_XXH64(split, minMatchLength, 0); - uint hash = (uint)(xxhash & ((uint)1 << (int)hBits) - 1); - ldmEntry_t entry; - entry.offset = (uint)(split - @base); - entry.checksum = (uint)(xxhash >> 32); - ZSTD_ldm_insertEntry(ldmState, hash, entry, @params->bucketSizeLog); - } + byte* split = ip + splits[n] - minMatchLength; + ulong xxhash = ZSTD_XXH64(split, minMatchLength, 0); + uint hash = (uint)(xxhash & ((uint)1 << (int)hBits) - 1); + ldmEntry_t entry; + entry.offset = (uint)(split - @base); + entry.checksum = (uint)(xxhash >> 32); + ZSTD_ldm_insertEntry(ldmState, hash, entry, @params->bucketSizeLog); } - - ip += hashed; } + + ip += hashed; } + } - /** ZSTD_ldm_limitTableUpdate() : - * - * Sets cctx->nextToUpdate to a position corresponding closer to anchor - * if it is far way - * (after a long match, only update tables a limited amount). */ - private static void ZSTD_ldm_limitTableUpdate(ZSTD_MatchState_t* ms, byte* anchor) + /** ZSTD_ldm_limitTableUpdate() : + * + * Sets cctx->nextToUpdate to a position corresponding closer to anchor + * if it is far way + * (after a long match, only update tables a limited amount). */ + private static void ZSTD_ldm_limitTableUpdate(ZSTD_MatchState_t* ms, byte* anchor) + { + uint curr = (uint)(anchor - ms->window.@base); + if (curr > ms->nextToUpdate + 1024) { - uint curr = (uint)(anchor - ms->window.@base); - if (curr > ms->nextToUpdate + 1024) - { - ms->nextToUpdate = - curr - - (512 < curr - ms->nextToUpdate - 1024 ? 512 : curr - ms->nextToUpdate - 1024); - } + ms->nextToUpdate = + curr + - (512 < curr - ms->nextToUpdate - 1024 ? 512 : curr - ms->nextToUpdate - 1024); } + } - private static nuint ZSTD_ldm_generateSequences_internal( - ldmState_t* ldmState, - RawSeqStore_t* rawSeqStore, - ldmParams_t* @params, - void* src, - nuint srcSize - ) + private static nuint ZSTD_ldm_generateSequences_internal( + ldmState_t* ldmState, + RawSeqStore_t* rawSeqStore, + ldmParams_t* @params, + void* src, + nuint srcSize + ) + { + /* LDM parameters */ + int extDict = (int)ZSTD_window_hasExtDict(ldmState->window); + uint minMatchLength = @params->minMatchLength; + uint entsPerBucket = 1U << (int)@params->bucketSizeLog; + uint hBits = @params->hashLog - @params->bucketSizeLog; + /* Prefix and extDict parameters */ + uint dictLimit = ldmState->window.dictLimit; + uint lowestIndex = extDict != 0 ? ldmState->window.lowLimit : dictLimit; + byte* @base = ldmState->window.@base; + byte* dictBase = extDict != 0 ? ldmState->window.dictBase : null; + byte* dictStart = extDict != 0 ? dictBase + lowestIndex : null; + byte* dictEnd = extDict != 0 ? dictBase + dictLimit : null; + byte* lowPrefixPtr = @base + dictLimit; + /* Input bounds */ + byte* istart = (byte*)src; + byte* iend = istart + srcSize; + byte* ilimit = iend - 8; + /* Input positions */ + byte* anchor = istart; + byte* ip = istart; + /* Rolling hash state */ + ldmRollingHashState_t hashState; + /* Arrays for staged-processing */ + nuint* splits = &ldmState->splitIndices.e0; + ldmMatchCandidate_t* candidates = &ldmState->matchCandidates.e0; + uint numSplits; + if (srcSize < minMatchLength) + return (nuint)(iend - anchor); + ZSTD_ldm_gear_init(&hashState, @params); + ZSTD_ldm_gear_reset(&hashState, ip, minMatchLength); + ip += minMatchLength; + while (ip < ilimit) { - /* LDM parameters */ - int extDict = (int)ZSTD_window_hasExtDict(ldmState->window); - uint minMatchLength = @params->minMatchLength; - uint entsPerBucket = 1U << (int)@params->bucketSizeLog; - uint hBits = @params->hashLog - @params->bucketSizeLog; - /* Prefix and extDict parameters */ - uint dictLimit = ldmState->window.dictLimit; - uint lowestIndex = extDict != 0 ? ldmState->window.lowLimit : dictLimit; - byte* @base = ldmState->window.@base; - byte* dictBase = extDict != 0 ? ldmState->window.dictBase : null; - byte* dictStart = extDict != 0 ? dictBase + lowestIndex : null; - byte* dictEnd = extDict != 0 ? dictBase + dictLimit : null; - byte* lowPrefixPtr = @base + dictLimit; - /* Input bounds */ - byte* istart = (byte*)src; - byte* iend = istart + srcSize; - byte* ilimit = iend - 8; - /* Input positions */ - byte* anchor = istart; - byte* ip = istart; - /* Rolling hash state */ - ldmRollingHashState_t hashState; - /* Arrays for staged-processing */ - nuint* splits = &ldmState->splitIndices.e0; - ldmMatchCandidate_t* candidates = &ldmState->matchCandidates.e0; - uint numSplits; - if (srcSize < minMatchLength) - return (nuint)(iend - anchor); - ZSTD_ldm_gear_init(&hashState, @params); - ZSTD_ldm_gear_reset(&hashState, ip, minMatchLength); - ip += minMatchLength; - while (ip < ilimit) + nuint hashed; + uint n; + numSplits = 0; + hashed = ZSTD_ldm_gear_feed( + &hashState, + ip, + (nuint)(ilimit - ip), + splits, + &numSplits + ); + for (n = 0; n < numSplits; n++) { - nuint hashed; - uint n; - numSplits = 0; - hashed = ZSTD_ldm_gear_feed( - &hashState, - ip, - (nuint)(ilimit - ip), - splits, - &numSplits + byte* split = ip + splits[n] - minMatchLength; + ulong xxhash = ZSTD_XXH64(split, minMatchLength, 0); + uint hash = (uint)(xxhash & ((uint)1 << (int)hBits) - 1); + candidates[n].split = split; + candidates[n].hash = hash; + candidates[n].checksum = (uint)(xxhash >> 32); + candidates[n].bucket = ZSTD_ldm_getBucket( + ldmState, + hash, + @params->bucketSizeLog ); - for (n = 0; n < numSplits; n++) - { - byte* split = ip + splits[n] - minMatchLength; - ulong xxhash = ZSTD_XXH64(split, minMatchLength, 0); - uint hash = (uint)(xxhash & ((uint)1 << (int)hBits) - 1); - candidates[n].split = split; - candidates[n].hash = hash; - candidates[n].checksum = (uint)(xxhash >> 32); - candidates[n].bucket = ZSTD_ldm_getBucket( - ldmState, - hash, - @params->bucketSizeLog - ); #if NETCOREAPP3_0_OR_GREATER - if (System.Runtime.Intrinsics.X86.Sse.IsSupported) - { - System.Runtime.Intrinsics.X86.Sse.Prefetch0(candidates[n].bucket); - } + if (System.Runtime.Intrinsics.X86.Sse.IsSupported) + { + System.Runtime.Intrinsics.X86.Sse.Prefetch0(candidates[n].bucket); + } #endif + } + + for (n = 0; n < numSplits; n++) + { + nuint forwardMatchLength = 0, + backwardMatchLength = 0, + bestMatchLength = 0, + mLength; + uint offset; + byte* split = candidates[n].split; + uint checksum = candidates[n].checksum; + uint hash = candidates[n].hash; + ldmEntry_t* bucket = candidates[n].bucket; + ldmEntry_t* cur; + ldmEntry_t* bestEntry = null; + ldmEntry_t newEntry; + newEntry.offset = (uint)(split - @base); + newEntry.checksum = checksum; + if (split < anchor) + { + ZSTD_ldm_insertEntry(ldmState, hash, newEntry, @params->bucketSizeLog); + continue; } - for (n = 0; n < numSplits; n++) + for (cur = bucket; cur < bucket + entsPerBucket; cur++) { - nuint forwardMatchLength = 0, - backwardMatchLength = 0, - bestMatchLength = 0, - mLength; - uint offset; - byte* split = candidates[n].split; - uint checksum = candidates[n].checksum; - uint hash = candidates[n].hash; - ldmEntry_t* bucket = candidates[n].bucket; - ldmEntry_t* cur; - ldmEntry_t* bestEntry = null; - ldmEntry_t newEntry; - newEntry.offset = (uint)(split - @base); - newEntry.checksum = checksum; - if (split < anchor) + nuint curForwardMatchLength, + curBackwardMatchLength, + curTotalMatchLength; + if (cur->checksum != checksum || cur->offset <= lowestIndex) { - ZSTD_ldm_insertEntry(ldmState, hash, newEntry, @params->bucketSizeLog); continue; } - for (cur = bucket; cur < bucket + entsPerBucket; cur++) + if (extDict != 0) { - nuint curForwardMatchLength, - curBackwardMatchLength, - curTotalMatchLength; - if (cur->checksum != checksum || cur->offset <= lowestIndex) + byte* curMatchBase = cur->offset < dictLimit ? dictBase : @base; + byte* pMatch = curMatchBase + cur->offset; + byte* matchEnd = cur->offset < dictLimit ? dictEnd : iend; + byte* lowMatchPtr = cur->offset < dictLimit ? dictStart : lowPrefixPtr; + curForwardMatchLength = ZSTD_count_2segments( + split, + pMatch, + iend, + matchEnd, + lowPrefixPtr + ); + if (curForwardMatchLength < minMatchLength) { continue; } - if (extDict != 0) - { - byte* curMatchBase = cur->offset < dictLimit ? dictBase : @base; - byte* pMatch = curMatchBase + cur->offset; - byte* matchEnd = cur->offset < dictLimit ? dictEnd : iend; - byte* lowMatchPtr = cur->offset < dictLimit ? dictStart : lowPrefixPtr; - curForwardMatchLength = ZSTD_count_2segments( - split, - pMatch, - iend, - matchEnd, - lowPrefixPtr - ); - if (curForwardMatchLength < minMatchLength) - { - continue; - } - - curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch_2segments( - split, - anchor, - pMatch, - lowMatchPtr, - dictStart, - dictEnd - ); - } - else - { - byte* pMatch = @base + cur->offset; - curForwardMatchLength = ZSTD_count(split, pMatch, iend); - if (curForwardMatchLength < minMatchLength) - { - continue; - } - - curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch( - split, - anchor, - pMatch, - lowPrefixPtr - ); - } - - curTotalMatchLength = curForwardMatchLength + curBackwardMatchLength; - if (curTotalMatchLength > bestMatchLength) + curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch_2segments( + split, + anchor, + pMatch, + lowMatchPtr, + dictStart, + dictEnd + ); + } + else + { + byte* pMatch = @base + cur->offset; + curForwardMatchLength = ZSTD_count(split, pMatch, iend); + if (curForwardMatchLength < minMatchLength) { - bestMatchLength = curTotalMatchLength; - forwardMatchLength = curForwardMatchLength; - backwardMatchLength = curBackwardMatchLength; - bestEntry = cur; + continue; } - } - if (bestEntry == null) - { - ZSTD_ldm_insertEntry(ldmState, hash, newEntry, @params->bucketSizeLog); - continue; + curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch( + split, + anchor, + pMatch, + lowPrefixPtr + ); } - offset = (uint)(split - @base) - bestEntry->offset; - mLength = forwardMatchLength + backwardMatchLength; + curTotalMatchLength = curForwardMatchLength + curBackwardMatchLength; + if (curTotalMatchLength > bestMatchLength) { - rawSeq* seq = rawSeqStore->seq + rawSeqStore->size; - if (rawSeqStore->size == rawSeqStore->capacity) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) - ); - seq->litLength = (uint)(split - backwardMatchLength - anchor); - seq->matchLength = (uint)mLength; - seq->offset = offset; - rawSeqStore->size++; + bestMatchLength = curTotalMatchLength; + forwardMatchLength = curForwardMatchLength; + backwardMatchLength = curBackwardMatchLength; + bestEntry = cur; } + } + if (bestEntry == null) + { ZSTD_ldm_insertEntry(ldmState, hash, newEntry, @params->bucketSizeLog); - anchor = split + forwardMatchLength; - if (anchor > ip + hashed) - { - ZSTD_ldm_gear_reset(&hashState, anchor - minMatchLength, minMatchLength); - ip = anchor - hashed; - break; - } + continue; + } + + offset = (uint)(split - @base) - bestEntry->offset; + mLength = forwardMatchLength + backwardMatchLength; + { + rawSeq* seq = rawSeqStore->seq + rawSeqStore->size; + if (rawSeqStore->size == rawSeqStore->capacity) + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) + ); + seq->litLength = (uint)(split - backwardMatchLength - anchor); + seq->matchLength = (uint)mLength; + seq->offset = offset; + rawSeqStore->size++; } - ip += hashed; + ZSTD_ldm_insertEntry(ldmState, hash, newEntry, @params->bucketSizeLog); + anchor = split + forwardMatchLength; + if (anchor > ip + hashed) + { + ZSTD_ldm_gear_reset(&hashState, anchor - minMatchLength, minMatchLength); + ip = anchor - hashed; + break; + } } - return (nuint)(iend - anchor); + ip += hashed; } - /*! ZSTD_ldm_reduceTable() : - * reduce table indexes by `reducerValue` */ - private static void ZSTD_ldm_reduceTable(ldmEntry_t* table, uint size, uint reducerValue) + return (nuint)(iend - anchor); + } + + /*! ZSTD_ldm_reduceTable() : + * reduce table indexes by `reducerValue` */ + private static void ZSTD_ldm_reduceTable(ldmEntry_t* table, uint size, uint reducerValue) + { + uint u; + for (u = 0; u < size; u++) { - uint u; - for (u = 0; u < size; u++) - { - if (table[u].offset < reducerValue) - table[u].offset = 0; - else - table[u].offset -= reducerValue; - } + if (table[u].offset < reducerValue) + table[u].offset = 0; + else + table[u].offset -= reducerValue; } + } - /** - * ZSTD_ldm_generateSequences(): - * - * Generates the sequences using the long distance match finder. - * Generates long range matching sequences in `sequences`, which parse a prefix - * of the source. `sequences` must be large enough to store every sequence, - * which can be checked with `ZSTD_ldm_getMaxNbSeq()`. - * @returns 0 or an error code. - * - * NOTE: The user must have called ZSTD_window_update() for all of the input - * they have, even if they pass it to ZSTD_ldm_generateSequences() in chunks. - * NOTE: This function returns an error if it runs out of space to store - * sequences. - */ - private static nuint ZSTD_ldm_generateSequences( - ldmState_t* ldmState, - RawSeqStore_t* sequences, - ldmParams_t* @params, - void* src, - nuint srcSize - ) + /** + * ZSTD_ldm_generateSequences(): + * + * Generates the sequences using the long distance match finder. + * Generates long range matching sequences in `sequences`, which parse a prefix + * of the source. `sequences` must be large enough to store every sequence, + * which can be checked with `ZSTD_ldm_getMaxNbSeq()`. + * @returns 0 or an error code. + * + * NOTE: The user must have called ZSTD_window_update() for all of the input + * they have, even if they pass it to ZSTD_ldm_generateSequences() in chunks. + * NOTE: This function returns an error if it runs out of space to store + * sequences. + */ + private static nuint ZSTD_ldm_generateSequences( + ldmState_t* ldmState, + RawSeqStore_t* sequences, + ldmParams_t* @params, + void* src, + nuint srcSize + ) + { + uint maxDist = 1U << (int)@params->windowLog; + byte* istart = (byte*)src; + byte* iend = istart + srcSize; + const nuint kMaxChunkSize = 1 << 20; + nuint nbChunks = + srcSize / kMaxChunkSize + (nuint)(srcSize % kMaxChunkSize != 0 ? 1 : 0); + nuint chunk; + nuint leftoverSize = 0; + assert( + unchecked((uint)-1) - (MEM_64bits ? 3500U * (1 << 20) : 2000U * (1 << 20)) + >= kMaxChunkSize + ); + assert(ldmState->window.nextSrc >= (byte*)src + srcSize); + assert(sequences->pos <= sequences->size); + assert(sequences->size <= sequences->capacity); + for (chunk = 0; chunk < nbChunks && sequences->size < sequences->capacity; ++chunk) { - uint maxDist = 1U << (int)@params->windowLog; - byte* istart = (byte*)src; - byte* iend = istart + srcSize; - const nuint kMaxChunkSize = 1 << 20; - nuint nbChunks = - srcSize / kMaxChunkSize + (nuint)(srcSize % kMaxChunkSize != 0 ? 1 : 0); - nuint chunk; - nuint leftoverSize = 0; - assert( - unchecked((uint)-1) - (MEM_64bits ? 3500U * (1 << 20) : 2000U * (1 << 20)) - >= kMaxChunkSize - ); - assert(ldmState->window.nextSrc >= (byte*)src + srcSize); - assert(sequences->pos <= sequences->size); - assert(sequences->size <= sequences->capacity); - for (chunk = 0; chunk < nbChunks && sequences->size < sequences->capacity; ++chunk) + byte* chunkStart = istart + chunk * kMaxChunkSize; + nuint remaining = (nuint)(iend - chunkStart); + byte* chunkEnd = remaining < kMaxChunkSize ? iend : chunkStart + kMaxChunkSize; + nuint chunkSize = (nuint)(chunkEnd - chunkStart); + nuint newLeftoverSize; + nuint prevSize = sequences->size; + assert(chunkStart < iend); + if ( + ZSTD_window_needOverflowCorrection( + ldmState->window, + 0, + maxDist, + ldmState->loadedDictEnd, + chunkStart, + chunkEnd + ) != 0 + ) { - byte* chunkStart = istart + chunk * kMaxChunkSize; - nuint remaining = (nuint)(iend - chunkStart); - byte* chunkEnd = remaining < kMaxChunkSize ? iend : chunkStart + kMaxChunkSize; - nuint chunkSize = (nuint)(chunkEnd - chunkStart); - nuint newLeftoverSize; - nuint prevSize = sequences->size; - assert(chunkStart < iend); - if ( - ZSTD_window_needOverflowCorrection( - ldmState->window, - 0, - maxDist, - ldmState->loadedDictEnd, - chunkStart, - chunkEnd - ) != 0 - ) - { - uint ldmHSize = 1U << (int)@params->hashLog; - uint correction = ZSTD_window_correctOverflow( - &ldmState->window, - 0, - maxDist, - chunkStart - ); - ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction); - ldmState->loadedDictEnd = 0; - } - - ZSTD_window_enforceMaxDist( + uint ldmHSize = 1U << (int)@params->hashLog; + uint correction = ZSTD_window_correctOverflow( &ldmState->window, - chunkEnd, + 0, maxDist, - &ldmState->loadedDictEnd, - null - ); - newLeftoverSize = ZSTD_ldm_generateSequences_internal( - ldmState, - sequences, - @params, - chunkStart, - chunkSize + chunkStart ); - if (ERR_isError(newLeftoverSize)) - return newLeftoverSize; - if (prevSize < sequences->size) - { - sequences->seq[prevSize].litLength += (uint)leftoverSize; - leftoverSize = newLeftoverSize; - } - else - { - assert(newLeftoverSize == chunkSize); - leftoverSize += chunkSize; - } + ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction); + ldmState->loadedDictEnd = 0; } - return 0; + ZSTD_window_enforceMaxDist( + &ldmState->window, + chunkEnd, + maxDist, + &ldmState->loadedDictEnd, + null + ); + newLeftoverSize = ZSTD_ldm_generateSequences_internal( + ldmState, + sequences, + @params, + chunkStart, + chunkSize + ); + if (ERR_isError(newLeftoverSize)) + return newLeftoverSize; + if (prevSize < sequences->size) + { + sequences->seq[prevSize].litLength += (uint)leftoverSize; + leftoverSize = newLeftoverSize; + } + else + { + assert(newLeftoverSize == chunkSize); + leftoverSize += chunkSize; + } } - /** - * ZSTD_ldm_skipSequences(): - * - * Skip past `srcSize` bytes worth of sequences in `rawSeqStore`. - * Avoids emitting matches less than `minMatch` bytes. - * Must be called for data that is not passed to ZSTD_ldm_blockCompress(). - */ - private static void ZSTD_ldm_skipSequences( - RawSeqStore_t* rawSeqStore, - nuint srcSize, - uint minMatch - ) + return 0; + } + + /** + * ZSTD_ldm_skipSequences(): + * + * Skip past `srcSize` bytes worth of sequences in `rawSeqStore`. + * Avoids emitting matches less than `minMatch` bytes. + * Must be called for data that is not passed to ZSTD_ldm_blockCompress(). + */ + private static void ZSTD_ldm_skipSequences( + RawSeqStore_t* rawSeqStore, + nuint srcSize, + uint minMatch + ) + { + while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) { - while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) + rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos; + if (srcSize <= seq->litLength) { - rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos; - if (srcSize <= seq->litLength) - { - seq->litLength -= (uint)srcSize; - return; - } + seq->litLength -= (uint)srcSize; + return; + } - srcSize -= seq->litLength; - seq->litLength = 0; - if (srcSize < seq->matchLength) + srcSize -= seq->litLength; + seq->litLength = 0; + if (srcSize < seq->matchLength) + { + seq->matchLength -= (uint)srcSize; + if (seq->matchLength < minMatch) { - seq->matchLength -= (uint)srcSize; - if (seq->matchLength < minMatch) + if (rawSeqStore->pos + 1 < rawSeqStore->size) { - if (rawSeqStore->pos + 1 < rawSeqStore->size) - { - seq[1].litLength += seq[0].matchLength; - } - - rawSeqStore->pos++; + seq[1].litLength += seq[0].matchLength; } - return; + rawSeqStore->pos++; } - srcSize -= seq->matchLength; - seq->matchLength = 0; - rawSeqStore->pos++; + return; } + + srcSize -= seq->matchLength; + seq->matchLength = 0; + rawSeqStore->pos++; } + } - /** - * If the sequence length is longer than remaining then the sequence is split - * between this block and the next. - * - * Returns the current sequence to handle, or if the rest of the block should - * be literals, it returns a sequence with offset == 0. - */ - private static rawSeq maybeSplitSequence( - RawSeqStore_t* rawSeqStore, - uint remaining, - uint minMatch - ) + /** + * If the sequence length is longer than remaining then the sequence is split + * between this block and the next. + * + * Returns the current sequence to handle, or if the rest of the block should + * be literals, it returns a sequence with offset == 0. + */ + private static rawSeq maybeSplitSequence( + RawSeqStore_t* rawSeqStore, + uint remaining, + uint minMatch + ) + { + rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos]; + assert(sequence.offset > 0); + if (remaining >= sequence.litLength + sequence.matchLength) { - rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos]; - assert(sequence.offset > 0); - if (remaining >= sequence.litLength + sequence.matchLength) - { - rawSeqStore->pos++; - return sequence; - } + rawSeqStore->pos++; + return sequence; + } - if (remaining <= sequence.litLength) + if (remaining <= sequence.litLength) + { + sequence.offset = 0; + } + else if (remaining < sequence.litLength + sequence.matchLength) + { + sequence.matchLength = remaining - sequence.litLength; + if (sequence.matchLength < minMatch) { sequence.offset = 0; } - else if (remaining < sequence.litLength + sequence.matchLength) - { - sequence.matchLength = remaining - sequence.litLength; - if (sequence.matchLength < minMatch) - { - sequence.offset = 0; - } - } - - ZSTD_ldm_skipSequences(rawSeqStore, remaining, minMatch); - return sequence; } - /* ZSTD_ldm_skipRawSeqStoreBytes(): - * Moves forward in rawSeqStore by nbBytes, updating fields 'pos' and 'posInSequence'. - * Not to be used in conjunction with ZSTD_ldm_skipSequences(). - * Must be called for data with is not passed to ZSTD_ldm_blockCompress(). - */ - private static void ZSTD_ldm_skipRawSeqStoreBytes(RawSeqStore_t* rawSeqStore, nuint nbBytes) + ZSTD_ldm_skipSequences(rawSeqStore, remaining, minMatch); + return sequence; + } + + /* ZSTD_ldm_skipRawSeqStoreBytes(): + * Moves forward in rawSeqStore by nbBytes, updating fields 'pos' and 'posInSequence'. + * Not to be used in conjunction with ZSTD_ldm_skipSequences(). + * Must be called for data with is not passed to ZSTD_ldm_blockCompress(). + */ + private static void ZSTD_ldm_skipRawSeqStoreBytes(RawSeqStore_t* rawSeqStore, nuint nbBytes) + { + uint currPos = (uint)(rawSeqStore->posInSequence + nbBytes); + while (currPos != 0 && rawSeqStore->pos < rawSeqStore->size) { - uint currPos = (uint)(rawSeqStore->posInSequence + nbBytes); - while (currPos != 0 && rawSeqStore->pos < rawSeqStore->size) + rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos]; + if (currPos >= currSeq.litLength + currSeq.matchLength) { - rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos]; - if (currPos >= currSeq.litLength + currSeq.matchLength) - { - currPos -= currSeq.litLength + currSeq.matchLength; - rawSeqStore->pos++; - } - else - { - rawSeqStore->posInSequence = currPos; - break; - } + currPos -= currSeq.litLength + currSeq.matchLength; + rawSeqStore->pos++; } - - if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) + else { - rawSeqStore->posInSequence = 0; + rawSeqStore->posInSequence = currPos; + break; } } - /** - * ZSTD_ldm_blockCompress(): - * - * Compresses a block using the predefined sequences, along with a secondary - * block compressor. The literals section of every sequence is passed to the - * secondary block compressor, and those sequences are interspersed with the - * predefined sequences. Returns the length of the last literals. - * Updates `rawSeqStore.pos` to indicate how many sequences have been consumed. - * `rawSeqStore.seq` may also be updated to split the last sequence between two - * blocks. - * @return The length of the last literals. - * - * NOTE: The source must be at most the maximum block size, but the predefined - * sequences can be any size, and may be longer than the block. In the case that - * they are longer than the block, the last sequences may need to be split into - * two. We handle that case correctly, and update `rawSeqStore` appropriately. - * NOTE: This function does not return any errors. - */ - private static nuint ZSTD_ldm_blockCompress( - RawSeqStore_t* rawSeqStore, - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - ZSTD_paramSwitch_e useRowMatchFinder, - void* src, - nuint srcSize - ) + if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) { - ZSTD_compressionParameters* cParams = &ms->cParams; - uint minMatch = cParams->minMatch; - ZSTD_BlockCompressor_f blockCompressor = ZSTD_selectBlockCompressor( - cParams->strategy, - useRowMatchFinder, - ZSTD_matchState_dictMode(ms) - ); - /* Input bounds */ - byte* istart = (byte*)src; - byte* iend = istart + srcSize; - /* Input positions */ - byte* ip = istart; - if (cParams->strategy >= ZSTD_strategy.ZSTD_btopt) - { - nuint lastLLSize; - ms->ldmSeqStore = rawSeqStore; - lastLLSize = blockCompressor(ms, seqStore, rep, src, srcSize); - ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore, srcSize); - return lastLLSize; - } + rawSeqStore->posInSequence = 0; + } + } - assert(rawSeqStore->pos <= rawSeqStore->size); - assert(rawSeqStore->size <= rawSeqStore->capacity); - while (rawSeqStore->pos < rawSeqStore->size && ip < iend) - { - /* maybeSplitSequence updates rawSeqStore->pos */ - rawSeq sequence = maybeSplitSequence(rawSeqStore, (uint)(iend - ip), minMatch); - if (sequence.offset == 0) - break; - assert(ip + sequence.litLength + sequence.matchLength <= iend); - ZSTD_ldm_limitTableUpdate(ms, ip); - ZSTD_ldm_fillFastTables(ms, ip); - { - int i; - nuint newLitLength = blockCompressor(ms, seqStore, rep, ip, sequence.litLength); - ip += sequence.litLength; - for (i = 3 - 1; i > 0; i--) - rep[i] = rep[i - 1]; - rep[0] = sequence.offset; - assert(sequence.offset > 0); - ZSTD_storeSeq( - seqStore, - newLitLength, - ip - newLitLength, - iend, - sequence.offset + 3, - sequence.matchLength - ); - ip += sequence.matchLength; - } - } + /** + * ZSTD_ldm_blockCompress(): + * + * Compresses a block using the predefined sequences, along with a secondary + * block compressor. The literals section of every sequence is passed to the + * secondary block compressor, and those sequences are interspersed with the + * predefined sequences. Returns the length of the last literals. + * Updates `rawSeqStore.pos` to indicate how many sequences have been consumed. + * `rawSeqStore.seq` may also be updated to split the last sequence between two + * blocks. + * @return The length of the last literals. + * + * NOTE: The source must be at most the maximum block size, but the predefined + * sequences can be any size, and may be longer than the block. In the case that + * they are longer than the block, the last sequences may need to be split into + * two. We handle that case correctly, and update `rawSeqStore` appropriately. + * NOTE: This function does not return any errors. + */ + private static nuint ZSTD_ldm_blockCompress( + RawSeqStore_t* rawSeqStore, + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + ZSTD_paramSwitch_e useRowMatchFinder, + void* src, + nuint srcSize + ) + { + ZSTD_compressionParameters* cParams = &ms->cParams; + uint minMatch = cParams->minMatch; + ZSTD_BlockCompressor_f blockCompressor = ZSTD_selectBlockCompressor( + cParams->strategy, + useRowMatchFinder, + ZSTD_matchState_dictMode(ms) + ); + /* Input bounds */ + byte* istart = (byte*)src; + byte* iend = istart + srcSize; + /* Input positions */ + byte* ip = istart; + if (cParams->strategy >= ZSTD_strategy.ZSTD_btopt) + { + nuint lastLLSize; + ms->ldmSeqStore = rawSeqStore; + lastLLSize = blockCompressor(ms, seqStore, rep, src, srcSize); + ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore, srcSize); + return lastLLSize; + } + assert(rawSeqStore->pos <= rawSeqStore->size); + assert(rawSeqStore->size <= rawSeqStore->capacity); + while (rawSeqStore->pos < rawSeqStore->size && ip < iend) + { + /* maybeSplitSequence updates rawSeqStore->pos */ + rawSeq sequence = maybeSplitSequence(rawSeqStore, (uint)(iend - ip), minMatch); + if (sequence.offset == 0) + break; + assert(ip + sequence.litLength + sequence.matchLength <= iend); ZSTD_ldm_limitTableUpdate(ms, ip); ZSTD_ldm_fillFastTables(ms, ip); - return blockCompressor(ms, seqStore, rep, ip, (nuint)(iend - ip)); + { + int i; + nuint newLitLength = blockCompressor(ms, seqStore, rep, ip, sequence.litLength); + ip += sequence.litLength; + for (i = 3 - 1; i > 0; i--) + rep[i] = rep[i - 1]; + rep[0] = sequence.offset; + assert(sequence.offset > 0); + ZSTD_storeSeq( + seqStore, + newLitLength, + ip - newLitLength, + iend, + sequence.offset + 3, + sequence.matchLength + ); + ip += sequence.matchLength; + } } + + ZSTD_ldm_limitTableUpdate(ms, ip); + ZSTD_ldm_fillFastTables(ms, ip); + return blockCompressor(ms, seqStore, rep, ip, (nuint)(iend - ip)); } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLdmGeartab.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLdmGeartab.cs index 5cf4c7a43..a828beb2a 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLdmGeartab.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLdmGeartab.cs @@ -1,11 +1,11 @@ using System; using System.Runtime.InteropServices; -using static ZstdSharp.UnsafeHelper; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods - { #if NET7_0_OR_GREATER private static ReadOnlySpan Span_ZSTD_ldm_gearTab => new ulong[256] @@ -274,267 +274,266 @@ ref MemoryMarshal.GetReference(Span_ZSTD_ldm_gearTab) ); #else - private static readonly ulong* ZSTD_ldm_gearTab = GetArrayPointer( - new ulong[256] - { - 0xf5b8f72c5f77775c, - 0x84935f266b7ac412, - 0xb647ada9ca730ccc, - 0xb065bb4b114fb1de, - 0x34584e7e8c3a9fd0, - 0x4e97e17c6ae26b05, - 0x3a03d743bc99a604, - 0xcecd042422c4044f, - 0x76de76c58524259e, - 0x9c8528f65badeaca, - 0x86563706e2097529, - 0x2902475fa375d889, - 0xafb32a9739a5ebe6, - 0xce2714da3883e639, - 0x21eaf821722e69e, - 0x37b628620b628, - 0x49a8d455d88caf5, - 0x8556d711e6958140, - 0x4f7ae74fc605c1f, - 0x829f0c3468bd3a20, - 0x4ffdc885c625179e, - 0x8473de048a3daf1b, - 0x51008822b05646b2, - 0x69d75d12b2d1cc5f, - 0x8c9d4a19159154bc, - 0xc3cc10f4abbd4003, - 0xd06ddc1cecb97391, - 0xbe48e6e7ed80302e, - 0x3481db31cee03547, - 0xacc3f67cdaa1d210, - 0x65cb771d8c7f96cc, - 0x8eb27177055723dd, - 0xc789950d44cd94be, - 0x934feadc3700b12b, - 0x5e485f11edbdf182, - 0x1e2e2a46fd64767a, - 0x2969ca71d82efa7c, - 0x9d46e9935ebbba2e, - 0xe056b67e05e6822b, - 0x94d73f55739d03a0, - 0xcd7010bdb69b5a03, - 0x455ef9fcd79b82f4, - 0x869cb54a8749c161, - 0x38d1a4fa6185d225, - 0xb475166f94bbe9bb, - 0xa4143548720959f1, - 0x7aed4780ba6b26ba, - 0xd0ce264439e02312, - 0x84366d746078d508, - 0xa8ce973c72ed17be, - 0x21c323a29a430b01, - 0x9962d617e3af80ee, - 0xab0ce91d9c8cf75b, - 0x530e8ee6d19a4dbc, - 0x2ef68c0cf53f5d72, - 0xc03a681640a85506, - 0x496e4e9f9c310967, - 0x78580472b59b14a0, - 0x273824c23b388577, - 0x66bf923ad45cb553, - 0x47ae1a5a2492ba86, - 0x35e304569e229659, - 0x4765182a46870b6f, - 0x6cbab625e9099412, - 0xddac9a2e598522c1, - 0x7172086e666624f2, - 0xdf5003ca503b7837, - 0x88c0c1db78563d09, - 0x58d51865acfc289d, - 0x177671aec65224f1, - 0xfb79d8a241e967d7, - 0x2be1e101cad9a49a, - 0x6625682f6e29186b, - 0x399553457ac06e50, - 0x35dffb4c23abb74, - 0x429db2591f54aade, - 0xc52802a8037d1009, - 0x6acb27381f0b25f3, - 0xf45e2551ee4f823b, - 0x8b0ea2d99580c2f7, - 0x3bed519cbcb4e1e1, - 0xff452823dbb010a, - 0x9d42ed614f3dd267, - 0x5b9313c06257c57b, - 0xa114b8008b5e1442, - 0xc1fe311c11c13d4b, - 0x66e8763ea34c5568, - 0x8b982af1c262f05d, - 0xee8876faaa75fbb7, - 0x8a62a4d0d172bb2a, - 0xc13d94a3b7449a97, - 0x6dbbba9dc15d037c, - 0xc786101f1d92e0f1, - 0xd78681a907a0b79b, - 0xf61aaf2962c9abb9, - 0x2cfd16fcd3cb7ad9, - 0x868c5b6744624d21, - 0x25e650899c74ddd7, - 0xba042af4a7c37463, - 0x4eb1a539465a3eca, - 0xbe09dbf03b05d5ca, - 0x774e5a362b5472ba, - 0x47a1221229d183cd, - 0x504b0ca18ef5a2df, - 0xdffbdfbde2456eb9, - 0x46cd2b2fbee34634, - 0xf2aef8fe819d98c3, - 0x357f5276d4599d61, - 0x24a5483879c453e3, - 0x88026889192b4b9, - 0x28da96671782dbec, - 0x4ef37c40588e9aaa, - 0x8837b90651bc9fb3, - 0xc164f741d3f0e5d6, - 0xbc135a0a704b70ba, - 0x69cd868f7622ada, - 0xbc37ba89e0b9c0ab, - 0x47c14a01323552f6, - 0x4f00794bacee98bb, - 0x7107de7d637a69d5, - 0x88af793bb6f2255e, - 0xf3c6466b8799b598, - 0xc288c616aa7f3b59, - 0x81ca63cf42fca3fd, - 0x88d85ace36a2674b, - 0xd056bd3792389e7, - 0xe55c396c4e9dd32d, - 0xbefb504571e6c0a6, - 0x96ab32115e91e8cc, - 0xbf8acb18de8f38d1, - 0x66dae58801672606, - 0x833b6017872317fb, - 0xb87c16f2d1c92864, - 0xdb766a74e58b669c, - 0x89659f85c61417be, - 0xc8daad856011ea0c, - 0x76a4b565b6fe7eae, - 0xa469d085f6237312, - 0xaaf0365683a3e96c, - 0x4dbb746f8424f7b8, - 0x638755af4e4acc1, - 0x3d7807f5bde64486, - 0x17be6d8f5bbb7639, - 0x903f0cd44dc35dc, - 0x67b672eafdf1196c, - 0xa676ff93ed4c82f1, - 0x521d1004c5053d9d, - 0x37ba9ad09ccc9202, - 0x84e54d297aacfb51, - 0xa0b4b776a143445, - 0x820d471e20b348e, - 0x1874383cb83d46dc, - 0x97edeec7a1efe11c, - 0xb330e50b1bdc42aa, - 0x1dd91955ce70e032, - 0xa514cdb88f2939d5, - 0x2791233fd90db9d3, - 0x7b670a4cc50f7a9b, - 0x77c07d2a05c6dfa5, - 0xe3778b6646d0a6fa, - 0xb39c8eda47b56749, - 0x933ed448addbef28, - 0xaf846af6ab7d0bf4, - 0xe5af208eb666e49, - 0x5e6622f73534cd6a, - 0x297daeca42ef5b6e, - 0x862daef3d35539a6, - 0xe68722498f8e1ea9, - 0x981c53093dc0d572, - 0xfa09b0bfbf86fbf5, - 0x30b1e96166219f15, - 0x70e7d466bdc4fb83, - 0x5a66736e35f2a8e9, - 0xcddb59d2b7c1baef, - 0xd6c7d247d26d8996, - 0xea4e39eac8de1ba3, - 0x539c8bb19fa3aff2, - 0x9f90e4c5fd508d8, - 0xa34e5956fbaf3385, - 0x2e2f8e151d3ef375, - 0x173691e9b83faec1, - 0xb85a8d56bf016379, - 0x8382381267408ae3, - 0xb90f901bbdc0096d, - 0x7c6ad32933bcec65, - 0x76bb5e2f2c8ad595, - 0x390f851a6cf46d28, - 0xc3e6064da1c2da72, - 0xc52a0c101cfa5389, - 0xd78eaf84a3fbc530, - 0x3781b9e2288b997e, - 0x73c2f6dea83d05c4, - 0x4228e364c5b5ed7, - 0x9d7a3edf0da43911, - 0x8edcfeda24686756, - 0x5e7667a7b7a9b3a1, - 0x4c4f389fa143791d, - 0xb08bc1023da7cddc, - 0x7ab4be3ae529b1cc, - 0x754e6132dbe74ff9, - 0x71635442a839df45, - 0x2f6fb1643fbe52de, - 0x961e0a42cf7a8177, - 0xf3b45d83d89ef2ea, - 0xee3de4cf4a6e3e9b, - 0xcd6848542c3295e7, - 0xe4cee1664c78662f, - 0x9947548b474c68c4, - 0x25d73777a5ed8b0b, - 0xc915b1d636b7fc, - 0x21c2ba75d9b0d2da, - 0x5f6b5dcf608a64a1, - 0xdcf333255ff9570c, - 0x633b922418ced4ee, - 0xc136dde0b004b34a, - 0x58cc83b05d4b2f5a, - 0x5eb424dda28e42d2, - 0x62df47369739cd98, - 0xb4e0b42485e4ce17, - 0x16e1f0c1f9a8d1e7, - 0x8ec3916707560ebf, - 0x62ba6e2df2cc9db3, - 0xcbf9f4ff77d83a16, - 0x78d9d7d07d2bbcc4, - 0xef554ce1e02c41f4, - 0x8d7581127eccf94d, - 0xa9b53336cb3c8a05, - 0x38c42c0bf45c4f91, - 0x640893cdf4488863, - 0x80ec34bc575ea568, - 0x39f324f5b48eaa40, - 0xe9d9ed1f8eff527f, - 0x9224fc058cc5a214, - 0xbaba00b04cfe7741, - 0x309a9f120fcf52af, - 0xa558f3ec65626212, - 0x424bec8b7adabe2f, - 0x41622513a6aea433, - 0xb88da2d5324ca798, - 0xd287733b245528a4, - 0x9a44697e6d68aec3, - 0x7b1093be2f49bb28, - 0x50bbec632e3d8aad, - 0x6cd90723e1ea8283, - 0x897b9e7431b02bf3, - 0x219efdcb338a7047, - 0x3b0311f0a27c0656, - 0xdb17bf91c0db96e7, - 0x8cd4fd6b4e85a5b2, - 0xfab071054ba6409d, - 0x40d6fe831fa9dfd9, - 0xaf358debad7d791e, - 0xeb8d0e25a65e3e58, - 0xbbcbd3df14e08580, - 0xcf751f27ecdab2b, - 0x2b4da14f2613d8f4, - } - ); + private static readonly ulong* ZSTD_ldm_gearTab = GetArrayPointer( + new ulong[256] + { + 0xf5b8f72c5f77775c, + 0x84935f266b7ac412, + 0xb647ada9ca730ccc, + 0xb065bb4b114fb1de, + 0x34584e7e8c3a9fd0, + 0x4e97e17c6ae26b05, + 0x3a03d743bc99a604, + 0xcecd042422c4044f, + 0x76de76c58524259e, + 0x9c8528f65badeaca, + 0x86563706e2097529, + 0x2902475fa375d889, + 0xafb32a9739a5ebe6, + 0xce2714da3883e639, + 0x21eaf821722e69e, + 0x37b628620b628, + 0x49a8d455d88caf5, + 0x8556d711e6958140, + 0x4f7ae74fc605c1f, + 0x829f0c3468bd3a20, + 0x4ffdc885c625179e, + 0x8473de048a3daf1b, + 0x51008822b05646b2, + 0x69d75d12b2d1cc5f, + 0x8c9d4a19159154bc, + 0xc3cc10f4abbd4003, + 0xd06ddc1cecb97391, + 0xbe48e6e7ed80302e, + 0x3481db31cee03547, + 0xacc3f67cdaa1d210, + 0x65cb771d8c7f96cc, + 0x8eb27177055723dd, + 0xc789950d44cd94be, + 0x934feadc3700b12b, + 0x5e485f11edbdf182, + 0x1e2e2a46fd64767a, + 0x2969ca71d82efa7c, + 0x9d46e9935ebbba2e, + 0xe056b67e05e6822b, + 0x94d73f55739d03a0, + 0xcd7010bdb69b5a03, + 0x455ef9fcd79b82f4, + 0x869cb54a8749c161, + 0x38d1a4fa6185d225, + 0xb475166f94bbe9bb, + 0xa4143548720959f1, + 0x7aed4780ba6b26ba, + 0xd0ce264439e02312, + 0x84366d746078d508, + 0xa8ce973c72ed17be, + 0x21c323a29a430b01, + 0x9962d617e3af80ee, + 0xab0ce91d9c8cf75b, + 0x530e8ee6d19a4dbc, + 0x2ef68c0cf53f5d72, + 0xc03a681640a85506, + 0x496e4e9f9c310967, + 0x78580472b59b14a0, + 0x273824c23b388577, + 0x66bf923ad45cb553, + 0x47ae1a5a2492ba86, + 0x35e304569e229659, + 0x4765182a46870b6f, + 0x6cbab625e9099412, + 0xddac9a2e598522c1, + 0x7172086e666624f2, + 0xdf5003ca503b7837, + 0x88c0c1db78563d09, + 0x58d51865acfc289d, + 0x177671aec65224f1, + 0xfb79d8a241e967d7, + 0x2be1e101cad9a49a, + 0x6625682f6e29186b, + 0x399553457ac06e50, + 0x35dffb4c23abb74, + 0x429db2591f54aade, + 0xc52802a8037d1009, + 0x6acb27381f0b25f3, + 0xf45e2551ee4f823b, + 0x8b0ea2d99580c2f7, + 0x3bed519cbcb4e1e1, + 0xff452823dbb010a, + 0x9d42ed614f3dd267, + 0x5b9313c06257c57b, + 0xa114b8008b5e1442, + 0xc1fe311c11c13d4b, + 0x66e8763ea34c5568, + 0x8b982af1c262f05d, + 0xee8876faaa75fbb7, + 0x8a62a4d0d172bb2a, + 0xc13d94a3b7449a97, + 0x6dbbba9dc15d037c, + 0xc786101f1d92e0f1, + 0xd78681a907a0b79b, + 0xf61aaf2962c9abb9, + 0x2cfd16fcd3cb7ad9, + 0x868c5b6744624d21, + 0x25e650899c74ddd7, + 0xba042af4a7c37463, + 0x4eb1a539465a3eca, + 0xbe09dbf03b05d5ca, + 0x774e5a362b5472ba, + 0x47a1221229d183cd, + 0x504b0ca18ef5a2df, + 0xdffbdfbde2456eb9, + 0x46cd2b2fbee34634, + 0xf2aef8fe819d98c3, + 0x357f5276d4599d61, + 0x24a5483879c453e3, + 0x88026889192b4b9, + 0x28da96671782dbec, + 0x4ef37c40588e9aaa, + 0x8837b90651bc9fb3, + 0xc164f741d3f0e5d6, + 0xbc135a0a704b70ba, + 0x69cd868f7622ada, + 0xbc37ba89e0b9c0ab, + 0x47c14a01323552f6, + 0x4f00794bacee98bb, + 0x7107de7d637a69d5, + 0x88af793bb6f2255e, + 0xf3c6466b8799b598, + 0xc288c616aa7f3b59, + 0x81ca63cf42fca3fd, + 0x88d85ace36a2674b, + 0xd056bd3792389e7, + 0xe55c396c4e9dd32d, + 0xbefb504571e6c0a6, + 0x96ab32115e91e8cc, + 0xbf8acb18de8f38d1, + 0x66dae58801672606, + 0x833b6017872317fb, + 0xb87c16f2d1c92864, + 0xdb766a74e58b669c, + 0x89659f85c61417be, + 0xc8daad856011ea0c, + 0x76a4b565b6fe7eae, + 0xa469d085f6237312, + 0xaaf0365683a3e96c, + 0x4dbb746f8424f7b8, + 0x638755af4e4acc1, + 0x3d7807f5bde64486, + 0x17be6d8f5bbb7639, + 0x903f0cd44dc35dc, + 0x67b672eafdf1196c, + 0xa676ff93ed4c82f1, + 0x521d1004c5053d9d, + 0x37ba9ad09ccc9202, + 0x84e54d297aacfb51, + 0xa0b4b776a143445, + 0x820d471e20b348e, + 0x1874383cb83d46dc, + 0x97edeec7a1efe11c, + 0xb330e50b1bdc42aa, + 0x1dd91955ce70e032, + 0xa514cdb88f2939d5, + 0x2791233fd90db9d3, + 0x7b670a4cc50f7a9b, + 0x77c07d2a05c6dfa5, + 0xe3778b6646d0a6fa, + 0xb39c8eda47b56749, + 0x933ed448addbef28, + 0xaf846af6ab7d0bf4, + 0xe5af208eb666e49, + 0x5e6622f73534cd6a, + 0x297daeca42ef5b6e, + 0x862daef3d35539a6, + 0xe68722498f8e1ea9, + 0x981c53093dc0d572, + 0xfa09b0bfbf86fbf5, + 0x30b1e96166219f15, + 0x70e7d466bdc4fb83, + 0x5a66736e35f2a8e9, + 0xcddb59d2b7c1baef, + 0xd6c7d247d26d8996, + 0xea4e39eac8de1ba3, + 0x539c8bb19fa3aff2, + 0x9f90e4c5fd508d8, + 0xa34e5956fbaf3385, + 0x2e2f8e151d3ef375, + 0x173691e9b83faec1, + 0xb85a8d56bf016379, + 0x8382381267408ae3, + 0xb90f901bbdc0096d, + 0x7c6ad32933bcec65, + 0x76bb5e2f2c8ad595, + 0x390f851a6cf46d28, + 0xc3e6064da1c2da72, + 0xc52a0c101cfa5389, + 0xd78eaf84a3fbc530, + 0x3781b9e2288b997e, + 0x73c2f6dea83d05c4, + 0x4228e364c5b5ed7, + 0x9d7a3edf0da43911, + 0x8edcfeda24686756, + 0x5e7667a7b7a9b3a1, + 0x4c4f389fa143791d, + 0xb08bc1023da7cddc, + 0x7ab4be3ae529b1cc, + 0x754e6132dbe74ff9, + 0x71635442a839df45, + 0x2f6fb1643fbe52de, + 0x961e0a42cf7a8177, + 0xf3b45d83d89ef2ea, + 0xee3de4cf4a6e3e9b, + 0xcd6848542c3295e7, + 0xe4cee1664c78662f, + 0x9947548b474c68c4, + 0x25d73777a5ed8b0b, + 0xc915b1d636b7fc, + 0x21c2ba75d9b0d2da, + 0x5f6b5dcf608a64a1, + 0xdcf333255ff9570c, + 0x633b922418ced4ee, + 0xc136dde0b004b34a, + 0x58cc83b05d4b2f5a, + 0x5eb424dda28e42d2, + 0x62df47369739cd98, + 0xb4e0b42485e4ce17, + 0x16e1f0c1f9a8d1e7, + 0x8ec3916707560ebf, + 0x62ba6e2df2cc9db3, + 0xcbf9f4ff77d83a16, + 0x78d9d7d07d2bbcc4, + 0xef554ce1e02c41f4, + 0x8d7581127eccf94d, + 0xa9b53336cb3c8a05, + 0x38c42c0bf45c4f91, + 0x640893cdf4488863, + 0x80ec34bc575ea568, + 0x39f324f5b48eaa40, + 0xe9d9ed1f8eff527f, + 0x9224fc058cc5a214, + 0xbaba00b04cfe7741, + 0x309a9f120fcf52af, + 0xa558f3ec65626212, + 0x424bec8b7adabe2f, + 0x41622513a6aea433, + 0xb88da2d5324ca798, + 0xd287733b245528a4, + 0x9a44697e6d68aec3, + 0x7b1093be2f49bb28, + 0x50bbec632e3d8aad, + 0x6cd90723e1ea8283, + 0x897b9e7431b02bf3, + 0x219efdcb338a7047, + 0x3b0311f0a27c0656, + 0xdb17bf91c0db96e7, + 0x8cd4fd6b4e85a5b2, + 0xfab071054ba6409d, + 0x40d6fe831fa9dfd9, + 0xaf358debad7d791e, + 0xeb8d0e25a65e3e58, + 0xbbcbd3df14e08580, + 0xcf751f27ecdab2b, + 0x2b4da14f2613d8f4, + } + ); #endif - } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdOpt.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdOpt.cs index d5ebe36ad..7c8a98cae 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdOpt.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdOpt.cs @@ -1,119 +1,119 @@ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; -using static ZstdSharp.UnsafeHelper; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods + /* ZSTD_bitWeight() : + * provide estimated "cost" of a stat in full bits only */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_bitWeight(uint stat) { - /* ZSTD_bitWeight() : - * provide estimated "cost" of a stat in full bits only */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_bitWeight(uint stat) - { - return ZSTD_highbit32(stat + 1) * (1 << 8); - } + return ZSTD_highbit32(stat + 1) * (1 << 8); + } - /* ZSTD_fracWeight() : - * provide fractional-bit "cost" of a stat, - * using linear interpolation approximation */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_fracWeight(uint rawStat) - { - uint stat = rawStat + 1; - uint hb = ZSTD_highbit32(stat); - uint BWeight = hb * (1 << 8); - /* Fweight was meant for "Fractional weight" - * but it's effectively a value between 1 and 2 - * using fixed point arithmetic */ - uint FWeight = stat << 8 >> (int)hb; - uint weight = BWeight + FWeight; - assert(hb + 8 < 31); - return weight; - } + /* ZSTD_fracWeight() : + * provide fractional-bit "cost" of a stat, + * using linear interpolation approximation */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_fracWeight(uint rawStat) + { + uint stat = rawStat + 1; + uint hb = ZSTD_highbit32(stat); + uint BWeight = hb * (1 << 8); + /* Fweight was meant for "Fractional weight" + * but it's effectively a value between 1 and 2 + * using fixed point arithmetic */ + uint FWeight = stat << 8 >> (int)hb; + uint weight = BWeight + FWeight; + assert(hb + 8 < 31); + return weight; + } - private static int ZSTD_compressedLiterals(optState_t* optPtr) - { - return optPtr->literalCompressionMode != ZSTD_paramSwitch_e.ZSTD_ps_disable ? 1 : 0; - } + private static int ZSTD_compressedLiterals(optState_t* optPtr) + { + return optPtr->literalCompressionMode != ZSTD_paramSwitch_e.ZSTD_ps_disable ? 1 : 0; + } - private static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel) - { - if (ZSTD_compressedLiterals(optPtr) != 0) - optPtr->litSumBasePrice = - optLevel != 0 - ? ZSTD_fracWeight(optPtr->litSum) - : ZSTD_bitWeight(optPtr->litSum); - optPtr->litLengthSumBasePrice = - optLevel != 0 - ? ZSTD_fracWeight(optPtr->litLengthSum) - : ZSTD_bitWeight(optPtr->litLengthSum); - optPtr->matchLengthSumBasePrice = - optLevel != 0 - ? ZSTD_fracWeight(optPtr->matchLengthSum) - : ZSTD_bitWeight(optPtr->matchLengthSum); - optPtr->offCodeSumBasePrice = + private static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel) + { + if (ZSTD_compressedLiterals(optPtr) != 0) + optPtr->litSumBasePrice = optLevel != 0 - ? ZSTD_fracWeight(optPtr->offCodeSum) - : ZSTD_bitWeight(optPtr->offCodeSum); - } + ? ZSTD_fracWeight(optPtr->litSum) + : ZSTD_bitWeight(optPtr->litSum); + optPtr->litLengthSumBasePrice = + optLevel != 0 + ? ZSTD_fracWeight(optPtr->litLengthSum) + : ZSTD_bitWeight(optPtr->litLengthSum); + optPtr->matchLengthSumBasePrice = + optLevel != 0 + ? ZSTD_fracWeight(optPtr->matchLengthSum) + : ZSTD_bitWeight(optPtr->matchLengthSum); + optPtr->offCodeSumBasePrice = + optLevel != 0 + ? ZSTD_fracWeight(optPtr->offCodeSum) + : ZSTD_bitWeight(optPtr->offCodeSum); + } - private static uint sum_u32(uint* table, nuint nbElts) + private static uint sum_u32(uint* table, nuint nbElts) + { + nuint n; + uint total = 0; + for (n = 0; n < nbElts; n++) { - nuint n; - uint total = 0; - for (n = 0; n < nbElts; n++) - { - total += table[n]; - } - - return total; + total += table[n]; } - private static uint ZSTD_downscaleStats( - uint* table, - uint lastEltIndex, - uint shift, - base_directive_e base1 - ) - { - uint s, - sum = 0; - assert(shift < 30); - for (s = 0; s < lastEltIndex + 1; s++) - { - uint @base = (uint)( - base1 != default ? 1 - : table[s] > 0 ? 1 - : 0 - ); - uint newStat = @base + (table[s] >> (int)shift); - sum += newStat; - table[s] = newStat; - } - - return sum; - } + return total; + } - /* ZSTD_scaleStats() : - * reduce all elt frequencies in table if sum too large - * return the resulting sum of elements */ - private static uint ZSTD_scaleStats(uint* table, uint lastEltIndex, uint logTarget) + private static uint ZSTD_downscaleStats( + uint* table, + uint lastEltIndex, + uint shift, + base_directive_e base1 + ) + { + uint s, + sum = 0; + assert(shift < 30); + for (s = 0; s < lastEltIndex + 1; s++) { - uint prevsum = sum_u32(table, lastEltIndex + 1); - uint factor = prevsum >> (int)logTarget; - assert(logTarget < 30); - if (factor <= 1) - return prevsum; - return ZSTD_downscaleStats( - table, - lastEltIndex, - ZSTD_highbit32(factor), - base_directive_e.base_1guaranteed + uint @base = (uint)( + base1 != default ? 1 + : table[s] > 0 ? 1 + : 0 ); + uint newStat = @base + (table[s] >> (int)shift); + sum += newStat; + table[s] = newStat; } + return sum; + } + + /* ZSTD_scaleStats() : + * reduce all elt frequencies in table if sum too large + * return the resulting sum of elements */ + private static uint ZSTD_scaleStats(uint* table, uint lastEltIndex, uint logTarget) + { + uint prevsum = sum_u32(table, lastEltIndex + 1); + uint factor = prevsum >> (int)logTarget; + assert(logTarget < 30); + if (factor <= 1) + return prevsum; + return ZSTD_downscaleStats( + table, + lastEltIndex, + ZSTD_highbit32(factor), + base_directive_e.base_1guaranteed + ); + } + #if NET7_0_OR_GREATER private static ReadOnlySpan Span_baseLLfreqs => new uint[36] @@ -162,47 +162,47 @@ ref MemoryMarshal.GetReference(Span_baseLLfreqs) ); #else - private static readonly uint* baseLLfreqs = GetArrayPointer( - new uint[36] - { - 4, - 2, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - } - ); + private static readonly uint* baseLLfreqs = GetArrayPointer( + new uint[36] + { + 4, + 2, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + } + ); #endif #if NET7_0_OR_GREATER private static ReadOnlySpan Span_baseOFCfreqs => @@ -248,792 +248,864 @@ ref MemoryMarshal.GetReference(Span_baseOFCfreqs) ); #else - private static readonly uint* baseOFCfreqs = GetArrayPointer( - new uint[32] - { - 6, - 2, - 1, - 1, - 2, - 3, - 4, - 4, - 4, - 3, - 2, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - } - ); + private static readonly uint* baseOFCfreqs = GetArrayPointer( + new uint[32] + { + 6, + 2, + 1, + 1, + 2, + 3, + 4, + 4, + 4, + 3, + 2, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + } + ); #endif - /* ZSTD_rescaleFreqs() : - * if first block (detected by optPtr->litLengthSum == 0) : init statistics - * take hints from dictionary if there is one - * and init from zero if there is none, - * using src for literals stats, and baseline stats for sequence symbols - * otherwise downscale existing stats, to be used as seed for next block. - */ - private static void ZSTD_rescaleFreqs( - optState_t* optPtr, - byte* src, - nuint srcSize, - int optLevel - ) + /* ZSTD_rescaleFreqs() : + * if first block (detected by optPtr->litLengthSum == 0) : init statistics + * take hints from dictionary if there is one + * and init from zero if there is none, + * using src for literals stats, and baseline stats for sequence symbols + * otherwise downscale existing stats, to be used as seed for next block. + */ + private static void ZSTD_rescaleFreqs( + optState_t* optPtr, + byte* src, + nuint srcSize, + int optLevel + ) + { + int compressedLiterals = ZSTD_compressedLiterals(optPtr); + optPtr->priceType = ZSTD_OptPrice_e.zop_dynamic; + if (optPtr->litLengthSum == 0) { - int compressedLiterals = ZSTD_compressedLiterals(optPtr); - optPtr->priceType = ZSTD_OptPrice_e.zop_dynamic; - if (optPtr->litLengthSum == 0) + if (srcSize <= 8) { - if (srcSize <= 8) - { - optPtr->priceType = ZSTD_OptPrice_e.zop_predef; - } - - assert(optPtr->symbolCosts != null); - if (optPtr->symbolCosts->huf.repeatMode == HUF_repeat.HUF_repeat_valid) - { - optPtr->priceType = ZSTD_OptPrice_e.zop_dynamic; - if (compressedLiterals != 0) - { - /* generate literals statistics from huffman table */ - uint lit; - assert(optPtr->litFreq != null); - optPtr->litSum = 0; - for (lit = 0; lit <= (1 << 8) - 1; lit++) - { - /* scale to 2K */ - const uint scaleLog = 11; - uint bitCost = HUF_getNbBitsFromCTable( - &optPtr->symbolCosts->huf.CTable.e0, - lit - ); - assert(bitCost <= scaleLog); - optPtr->litFreq[lit] = (uint)( - bitCost != 0 ? 1 << (int)(scaleLog - bitCost) : 1 - ); - optPtr->litSum += optPtr->litFreq[lit]; - } - } - - { - uint ll; - FSE_CState_t llstate; - FSE_initCState(&llstate, optPtr->symbolCosts->fse.litlengthCTable); - optPtr->litLengthSum = 0; - for (ll = 0; ll <= 35; ll++) - { - /* scale to 1K */ - const uint scaleLog = 10; - uint bitCost = FSE_getMaxNbBits(llstate.symbolTT, ll); - assert(bitCost < scaleLog); - optPtr->litLengthFreq[ll] = (uint)( - bitCost != 0 ? 1 << (int)(scaleLog - bitCost) : 1 - ); - optPtr->litLengthSum += optPtr->litLengthFreq[ll]; - } - } - - { - uint ml; - FSE_CState_t mlstate; - FSE_initCState(&mlstate, optPtr->symbolCosts->fse.matchlengthCTable); - optPtr->matchLengthSum = 0; - for (ml = 0; ml <= 52; ml++) - { - const uint scaleLog = 10; - uint bitCost = FSE_getMaxNbBits(mlstate.symbolTT, ml); - assert(bitCost < scaleLog); - optPtr->matchLengthFreq[ml] = (uint)( - bitCost != 0 ? 1 << (int)(scaleLog - bitCost) : 1 - ); - optPtr->matchLengthSum += optPtr->matchLengthFreq[ml]; - } - } + optPtr->priceType = ZSTD_OptPrice_e.zop_predef; + } - { - uint of; - FSE_CState_t ofstate; - FSE_initCState(&ofstate, optPtr->symbolCosts->fse.offcodeCTable); - optPtr->offCodeSum = 0; - for (of = 0; of <= 31; of++) - { - const uint scaleLog = 10; - uint bitCost = FSE_getMaxNbBits(ofstate.symbolTT, of); - assert(bitCost < scaleLog); - optPtr->offCodeFreq[of] = (uint)( - bitCost != 0 ? 1 << (int)(scaleLog - bitCost) : 1 - ); - optPtr->offCodeSum += optPtr->offCodeFreq[of]; - } - } - } - else + assert(optPtr->symbolCosts != null); + if (optPtr->symbolCosts->huf.repeatMode == HUF_repeat.HUF_repeat_valid) + { + optPtr->priceType = ZSTD_OptPrice_e.zop_dynamic; + if (compressedLiterals != 0) { + /* generate literals statistics from huffman table */ + uint lit; assert(optPtr->litFreq != null); - if (compressedLiterals != 0) + optPtr->litSum = 0; + for (lit = 0; lit <= (1 << 8) - 1; lit++) { - /* base initial cost of literals on direct frequency within src */ - uint lit = (1 << 8) - 1; - HIST_count_simple(optPtr->litFreq, &lit, src, srcSize); - optPtr->litSum = ZSTD_downscaleStats( - optPtr->litFreq, - (1 << 8) - 1, - 8, - base_directive_e.base_0possible + /* scale to 2K */ + const uint scaleLog = 11; + uint bitCost = HUF_getNbBitsFromCTable( + &optPtr->symbolCosts->huf.CTable.e0, + lit + ); + assert(bitCost <= scaleLog); + optPtr->litFreq[lit] = (uint)( + bitCost != 0 ? 1 << (int)(scaleLog - bitCost) : 1 ); + optPtr->litSum += optPtr->litFreq[lit]; } + } + { + uint ll; + FSE_CState_t llstate; + FSE_initCState(&llstate, optPtr->symbolCosts->fse.litlengthCTable); + optPtr->litLengthSum = 0; + for (ll = 0; ll <= 35; ll++) { - memcpy(optPtr->litLengthFreq, baseLLfreqs, sizeof(uint) * 36); - optPtr->litLengthSum = sum_u32(baseLLfreqs, 35 + 1); + /* scale to 1K */ + const uint scaleLog = 10; + uint bitCost = FSE_getMaxNbBits(llstate.symbolTT, ll); + assert(bitCost < scaleLog); + optPtr->litLengthFreq[ll] = (uint)( + bitCost != 0 ? 1 << (int)(scaleLog - bitCost) : 1 + ); + optPtr->litLengthSum += optPtr->litLengthFreq[ll]; } + } + { + uint ml; + FSE_CState_t mlstate; + FSE_initCState(&mlstate, optPtr->symbolCosts->fse.matchlengthCTable); + optPtr->matchLengthSum = 0; + for (ml = 0; ml <= 52; ml++) { - uint ml; - for (ml = 0; ml <= 52; ml++) - optPtr->matchLengthFreq[ml] = 1; + const uint scaleLog = 10; + uint bitCost = FSE_getMaxNbBits(mlstate.symbolTT, ml); + assert(bitCost < scaleLog); + optPtr->matchLengthFreq[ml] = (uint)( + bitCost != 0 ? 1 << (int)(scaleLog - bitCost) : 1 + ); + optPtr->matchLengthSum += optPtr->matchLengthFreq[ml]; } + } - optPtr->matchLengthSum = 52 + 1; + { + uint of; + FSE_CState_t ofstate; + FSE_initCState(&ofstate, optPtr->symbolCosts->fse.offcodeCTable); + optPtr->offCodeSum = 0; + for (of = 0; of <= 31; of++) { - memcpy(optPtr->offCodeFreq, baseOFCfreqs, sizeof(uint) * 32); - optPtr->offCodeSum = sum_u32(baseOFCfreqs, 31 + 1); + const uint scaleLog = 10; + uint bitCost = FSE_getMaxNbBits(ofstate.symbolTT, of); + assert(bitCost < scaleLog); + optPtr->offCodeFreq[of] = (uint)( + bitCost != 0 ? 1 << (int)(scaleLog - bitCost) : 1 + ); + optPtr->offCodeSum += optPtr->offCodeFreq[of]; } } } else { + assert(optPtr->litFreq != null); if (compressedLiterals != 0) - optPtr->litSum = ZSTD_scaleStats(optPtr->litFreq, (1 << 8) - 1, 12); - optPtr->litLengthSum = ZSTD_scaleStats(optPtr->litLengthFreq, 35, 11); - optPtr->matchLengthSum = ZSTD_scaleStats(optPtr->matchLengthFreq, 52, 11); - optPtr->offCodeSum = ZSTD_scaleStats(optPtr->offCodeFreq, 31, 11); - } + { + /* base initial cost of literals on direct frequency within src */ + uint lit = (1 << 8) - 1; + HIST_count_simple(optPtr->litFreq, &lit, src, srcSize); + optPtr->litSum = ZSTD_downscaleStats( + optPtr->litFreq, + (1 << 8) - 1, + 8, + base_directive_e.base_0possible + ); + } - ZSTD_setBasePrices(optPtr, optLevel); - } + { + memcpy(optPtr->litLengthFreq, baseLLfreqs, sizeof(uint) * 36); + optPtr->litLengthSum = sum_u32(baseLLfreqs, 35 + 1); + } - /* ZSTD_rawLiteralsCost() : - * price of literals (only) in specified segment (which length can be 0). - * does not include price of literalLength symbol */ - private static uint ZSTD_rawLiteralsCost( - byte* literals, - uint litLength, - optState_t* optPtr, - int optLevel - ) - { - if (litLength == 0) - return 0; - if (ZSTD_compressedLiterals(optPtr) == 0) - return (litLength << 3) * (1 << 8); - if (optPtr->priceType == ZSTD_OptPrice_e.zop_predef) - return litLength * 6 * (1 << 8); - { - uint price = optPtr->litSumBasePrice * litLength; - uint litPriceMax = optPtr->litSumBasePrice - (1 << 8); - uint u; - assert(optPtr->litSumBasePrice >= 1 << 8); - for (u = 0; u < litLength; u++) { - uint litPrice = - optLevel != 0 - ? ZSTD_fracWeight(optPtr->litFreq[literals[u]]) - : ZSTD_bitWeight(optPtr->litFreq[literals[u]]); - if (litPrice > litPriceMax) - litPrice = litPriceMax; - price -= litPrice; + uint ml; + for (ml = 0; ml <= 52; ml++) + optPtr->matchLengthFreq[ml] = 1; } - return price; + optPtr->matchLengthSum = 52 + 1; + { + memcpy(optPtr->offCodeFreq, baseOFCfreqs, sizeof(uint) * 32); + optPtr->offCodeSum = sum_u32(baseOFCfreqs, 31 + 1); + } } } + else + { + if (compressedLiterals != 0) + optPtr->litSum = ZSTD_scaleStats(optPtr->litFreq, (1 << 8) - 1, 12); + optPtr->litLengthSum = ZSTD_scaleStats(optPtr->litLengthFreq, 35, 11); + optPtr->matchLengthSum = ZSTD_scaleStats(optPtr->matchLengthFreq, 52, 11); + optPtr->offCodeSum = ZSTD_scaleStats(optPtr->offCodeFreq, 31, 11); + } + + ZSTD_setBasePrices(optPtr, optLevel); + } - /* ZSTD_litLengthPrice() : - * cost of literalLength symbol */ - private static uint ZSTD_litLengthPrice(uint litLength, optState_t* optPtr, int optLevel) + /* ZSTD_rawLiteralsCost() : + * price of literals (only) in specified segment (which length can be 0). + * does not include price of literalLength symbol */ + private static uint ZSTD_rawLiteralsCost( + byte* literals, + uint litLength, + optState_t* optPtr, + int optLevel + ) + { + if (litLength == 0) + return 0; + if (ZSTD_compressedLiterals(optPtr) == 0) + return (litLength << 3) * (1 << 8); + if (optPtr->priceType == ZSTD_OptPrice_e.zop_predef) + return litLength * 6 * (1 << 8); { - assert(litLength <= 1 << 17); - if (optPtr->priceType == ZSTD_OptPrice_e.zop_predef) - return optLevel != 0 ? ZSTD_fracWeight(litLength) : ZSTD_bitWeight(litLength); - if (litLength == 1 << 17) - return (1 << 8) + ZSTD_litLengthPrice((1 << 17) - 1, optPtr, optLevel); + uint price = optPtr->litSumBasePrice * litLength; + uint litPriceMax = optPtr->litSumBasePrice - (1 << 8); + uint u; + assert(optPtr->litSumBasePrice >= 1 << 8); + for (u = 0; u < litLength; u++) { - uint llCode = ZSTD_LLcode(litLength); - return (uint)(LL_bits[llCode] * (1 << 8)) - + optPtr->litLengthSumBasePrice - - ( - optLevel != 0 - ? ZSTD_fracWeight(optPtr->litLengthFreq[llCode]) - : ZSTD_bitWeight(optPtr->litLengthFreq[llCode]) - ); + uint litPrice = + optLevel != 0 + ? ZSTD_fracWeight(optPtr->litFreq[literals[u]]) + : ZSTD_bitWeight(optPtr->litFreq[literals[u]]); + if (litPrice > litPriceMax) + litPrice = litPriceMax; + price -= litPrice; } + + return price; } + } - /* ZSTD_getMatchPrice() : - * Provides the cost of the match part (offset + matchLength) of a sequence. - * Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence. - * @offBase : sumtype, representing an offset or a repcode, and using numeric representation of ZSTD_storeSeq() - * @optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) - */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_getMatchPrice( - uint offBase, - uint matchLength, - optState_t* optPtr, - int optLevel - ) + /* ZSTD_litLengthPrice() : + * cost of literalLength symbol */ + private static uint ZSTD_litLengthPrice(uint litLength, optState_t* optPtr, int optLevel) + { + assert(litLength <= 1 << 17); + if (optPtr->priceType == ZSTD_OptPrice_e.zop_predef) + return optLevel != 0 ? ZSTD_fracWeight(litLength) : ZSTD_bitWeight(litLength); + if (litLength == 1 << 17) + return (1 << 8) + ZSTD_litLengthPrice((1 << 17) - 1, optPtr, optLevel); { - uint price; - uint offCode = ZSTD_highbit32(offBase); - uint mlBase = matchLength - 3; - assert(matchLength >= 3); - if (optPtr->priceType == ZSTD_OptPrice_e.zop_predef) - return (optLevel != 0 ? ZSTD_fracWeight(mlBase) : ZSTD_bitWeight(mlBase)) - + (16 + offCode) * (1 << 8); - price = - offCode * (1 << 8) + uint llCode = ZSTD_LLcode(litLength); + return (uint)(LL_bits[llCode] * (1 << 8)) + + optPtr->litLengthSumBasePrice + - ( + optLevel != 0 + ? ZSTD_fracWeight(optPtr->litLengthFreq[llCode]) + : ZSTD_bitWeight(optPtr->litLengthFreq[llCode]) + ); + } + } + + /* ZSTD_getMatchPrice() : + * Provides the cost of the match part (offset + matchLength) of a sequence. + * Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence. + * @offBase : sumtype, representing an offset or a repcode, and using numeric representation of ZSTD_storeSeq() + * @optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) + */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_getMatchPrice( + uint offBase, + uint matchLength, + optState_t* optPtr, + int optLevel + ) + { + uint price; + uint offCode = ZSTD_highbit32(offBase); + uint mlBase = matchLength - 3; + assert(matchLength >= 3); + if (optPtr->priceType == ZSTD_OptPrice_e.zop_predef) + return (optLevel != 0 ? ZSTD_fracWeight(mlBase) : ZSTD_bitWeight(mlBase)) + + (16 + offCode) * (1 << 8); + price = + offCode * (1 << 8) + + ( + optPtr->offCodeSumBasePrice + - ( + optLevel != 0 + ? ZSTD_fracWeight(optPtr->offCodeFreq[offCode]) + : ZSTD_bitWeight(optPtr->offCodeFreq[offCode]) + ) + ); + if (optLevel < 2 && offCode >= 20) + price += (offCode - 19) * 2 * (1 << 8); + { + uint mlCode = ZSTD_MLcode(mlBase); + price += + (uint)(ML_bits[mlCode] * (1 << 8)) + ( - optPtr->offCodeSumBasePrice + optPtr->matchLengthSumBasePrice - ( optLevel != 0 - ? ZSTD_fracWeight(optPtr->offCodeFreq[offCode]) - : ZSTD_bitWeight(optPtr->offCodeFreq[offCode]) + ? ZSTD_fracWeight(optPtr->matchLengthFreq[mlCode]) + : ZSTD_bitWeight(optPtr->matchLengthFreq[mlCode]) ) ); - if (optLevel < 2 && offCode >= 20) - price += (offCode - 19) * 2 * (1 << 8); - { - uint mlCode = ZSTD_MLcode(mlBase); - price += - (uint)(ML_bits[mlCode] * (1 << 8)) - + ( - optPtr->matchLengthSumBasePrice - - ( - optLevel != 0 - ? ZSTD_fracWeight(optPtr->matchLengthFreq[mlCode]) - : ZSTD_bitWeight(optPtr->matchLengthFreq[mlCode]) - ) - ); - } + } - price += (1 << 8) / 5; - return price; + price += (1 << 8) / 5; + return price; + } + + /* ZSTD_updateStats() : + * assumption : literals + litLength <= iend */ + private static void ZSTD_updateStats( + optState_t* optPtr, + uint litLength, + byte* literals, + uint offBase, + uint matchLength + ) + { + if (ZSTD_compressedLiterals(optPtr) != 0) + { + uint u; + for (u = 0; u < litLength; u++) + optPtr->litFreq[literals[u]] += 2; + optPtr->litSum += litLength * 2; } - /* ZSTD_updateStats() : - * assumption : literals + litLength <= iend */ - private static void ZSTD_updateStats( - optState_t* optPtr, - uint litLength, - byte* literals, - uint offBase, - uint matchLength - ) { - if (ZSTD_compressedLiterals(optPtr) != 0) + uint llCode = ZSTD_LLcode(litLength); + optPtr->litLengthFreq[llCode]++; + optPtr->litLengthSum++; + } + + { + uint offCode = ZSTD_highbit32(offBase); + assert(offCode <= 31); + optPtr->offCodeFreq[offCode]++; + optPtr->offCodeSum++; + } + + { + uint mlBase = matchLength - 3; + uint mlCode = ZSTD_MLcode(mlBase); + optPtr->matchLengthFreq[mlCode]++; + optPtr->matchLengthSum++; + } + } + + /* ZSTD_readMINMATCH() : + * function safe only for comparisons + * assumption : memPtr must be at least 4 bytes before end of buffer */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_readMINMATCH(void* memPtr, uint length) + { + switch (length) + { + default: + case 4: + return MEM_read32(memPtr); + case 3: + if (BitConverter.IsLittleEndian) + return MEM_read32(memPtr) << 8; + else + return MEM_read32(memPtr) >> 8; + } + } + + /* Update hashTable3 up to ip (excluded) + Assumption : always within prefix (i.e. not within extDict) */ + private static uint ZSTD_insertAndFindFirstIndexHash3( + ZSTD_MatchState_t* ms, + uint* nextToUpdate3, + byte* ip + ) + { + uint* hashTable3 = ms->hashTable3; + uint hashLog3 = ms->hashLog3; + byte* @base = ms->window.@base; + uint idx = *nextToUpdate3; + uint target = (uint)(ip - @base); + nuint hash3 = ZSTD_hash3Ptr(ip, hashLog3); + assert(hashLog3 > 0); + while (idx < target) + { + hashTable3[ZSTD_hash3Ptr(@base + idx, hashLog3)] = idx; + idx++; + } + + *nextToUpdate3 = target; + return hashTable3[hash3]; + } + + /*-************************************* + * Binary Tree search + ***************************************/ + /** ZSTD_insertBt1() : add one or multiple positions to tree. + * @param ip assumed <= iend-8 . + * @param target The target of ZSTD_updateTree_internal() - we are filling to this position + * @return : nb of positions added */ + private static uint ZSTD_insertBt1( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iend, + uint target, + uint mls, + int extDict + ) + { + ZSTD_compressionParameters* cParams = &ms->cParams; + uint* hashTable = ms->hashTable; + uint hashLog = cParams->hashLog; + nuint h = ZSTD_hashPtr(ip, hashLog, mls); + uint* bt = ms->chainTable; + uint btLog = cParams->chainLog - 1; + uint btMask = (uint)((1 << (int)btLog) - 1); + uint matchIndex = hashTable[h]; + nuint commonLengthSmaller = 0, + commonLengthLarger = 0; + byte* @base = ms->window.@base; + byte* dictBase = ms->window.dictBase; + uint dictLimit = ms->window.dictLimit; + byte* dictEnd = dictBase + dictLimit; + byte* prefixStart = @base + dictLimit; + byte* match; + uint curr = (uint)(ip - @base); + uint btLow = btMask >= curr ? 0 : curr - btMask; + uint* smallerPtr = bt + 2 * (curr & btMask); + uint* largerPtr = smallerPtr + 1; + /* to be nullified at the end */ + uint dummy32; + /* windowLow is based on target because + * we only need positions that will be in the window at the end of the tree update. + */ + uint windowLow = ZSTD_getLowestMatchIndex(ms, target, cParams->windowLog); + uint matchEndIdx = curr + 8 + 1; + nuint bestLength = 8; + uint nbCompares = 1U << (int)cParams->searchLog; + assert(curr <= target); + assert(ip <= iend - 8); + hashTable[h] = curr; + assert(windowLow > 0); + for (; nbCompares != 0 && matchIndex >= windowLow; --nbCompares) + { + uint* nextPtr = bt + 2 * (matchIndex & btMask); + /* guaranteed minimum nb of common bytes */ + nuint matchLength = + commonLengthSmaller < commonLengthLarger + ? commonLengthSmaller + : commonLengthLarger; + assert(matchIndex < curr); + if (extDict == 0 || matchIndex + matchLength >= dictLimit) { - uint u; - for (u = 0; u < litLength; u++) - optPtr->litFreq[literals[u]] += 2; - optPtr->litSum += litLength * 2; + assert(matchIndex + matchLength >= dictLimit); + match = @base + matchIndex; + matchLength += ZSTD_count(ip + matchLength, match + matchLength, iend); } - + else { - uint llCode = ZSTD_LLcode(litLength); - optPtr->litLengthFreq[llCode]++; - optPtr->litLengthSum++; + match = dictBase + matchIndex; + matchLength += ZSTD_count_2segments( + ip + matchLength, + match + matchLength, + iend, + dictEnd, + prefixStart + ); + if (matchIndex + matchLength >= dictLimit) + match = @base + matchIndex; } + if (matchLength > bestLength) { - uint offCode = ZSTD_highbit32(offBase); - assert(offCode <= 31); - optPtr->offCodeFreq[offCode]++; - optPtr->offCodeSum++; + bestLength = matchLength; + if (matchLength > matchEndIdx - matchIndex) + matchEndIdx = matchIndex + (uint)matchLength; } + if (ip + matchLength == iend) { - uint mlBase = matchLength - 3; - uint mlCode = ZSTD_MLcode(mlBase); - optPtr->matchLengthFreq[mlCode]++; - optPtr->matchLengthSum++; + break; } - } - /* ZSTD_readMINMATCH() : - * function safe only for comparisons - * assumption : memPtr must be at least 4 bytes before end of buffer */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_readMINMATCH(void* memPtr, uint length) - { - switch (length) + if (match[matchLength] < ip[matchLength]) + { + *smallerPtr = matchIndex; + commonLengthSmaller = matchLength; + if (matchIndex <= btLow) + { + smallerPtr = &dummy32; + break; + } + + smallerPtr = nextPtr + 1; + matchIndex = nextPtr[1]; + } + else { - default: - case 4: - return MEM_read32(memPtr); - case 3: - if (BitConverter.IsLittleEndian) - return MEM_read32(memPtr) << 8; - else - return MEM_read32(memPtr) >> 8; + *largerPtr = matchIndex; + commonLengthLarger = matchLength; + if (matchIndex <= btLow) + { + largerPtr = &dummy32; + break; + } + + largerPtr = nextPtr; + matchIndex = nextPtr[0]; } } - /* Update hashTable3 up to ip (excluded) - Assumption : always within prefix (i.e. not within extDict) */ - private static uint ZSTD_insertAndFindFirstIndexHash3( - ZSTD_MatchState_t* ms, - uint* nextToUpdate3, - byte* ip - ) + *smallerPtr = *largerPtr = 0; { - uint* hashTable3 = ms->hashTable3; - uint hashLog3 = ms->hashLog3; - byte* @base = ms->window.@base; - uint idx = *nextToUpdate3; - uint target = (uint)(ip - @base); - nuint hash3 = ZSTD_hash3Ptr(ip, hashLog3); - assert(hashLog3 > 0); - while (idx < target) - { - hashTable3[ZSTD_hash3Ptr(@base + idx, hashLog3)] = idx; - idx++; - } + uint positions = 0; + if (bestLength > 384) + positions = 192 < (uint)(bestLength - 384) ? 192 : (uint)(bestLength - 384); + assert(matchEndIdx > curr + 8); + return positions > matchEndIdx - (curr + 8) ? positions : matchEndIdx - (curr + 8); + } + } - *nextToUpdate3 = target; - return hashTable3[hash3]; + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ZSTD_updateTree_internal( + ZSTD_MatchState_t* ms, + byte* ip, + byte* iend, + uint mls, + ZSTD_dictMode_e dictMode + ) + { + byte* @base = ms->window.@base; + uint target = (uint)(ip - @base); + uint idx = ms->nextToUpdate; + while (idx < target) + { + uint forward = ZSTD_insertBt1( + ms, + @base + idx, + iend, + target, + mls, + dictMode == ZSTD_dictMode_e.ZSTD_extDict ? 1 : 0 + ); + assert(idx < idx + forward); + idx += forward; } - /*-************************************* - * Binary Tree search - ***************************************/ - /** ZSTD_insertBt1() : add one or multiple positions to tree. - * @param ip assumed <= iend-8 . - * @param target The target of ZSTD_updateTree_internal() - we are filling to this position - * @return : nb of positions added */ - private static uint ZSTD_insertBt1( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iend, - uint target, - uint mls, - int extDict - ) + assert((nuint)(ip - @base) <= unchecked((uint)-1)); + assert((nuint)(iend - @base) <= unchecked((uint)-1)); + ms->nextToUpdate = target; + } + + /* used in ZSTD_loadDictionaryContent() */ + private static void ZSTD_updateTree(ZSTD_MatchState_t* ms, byte* ip, byte* iend) + { + ZSTD_updateTree_internal( + ms, + ip, + iend, + ms->cParams.minMatch, + ZSTD_dictMode_e.ZSTD_noDict + ); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_insertBtAndGetAllMatches( + ZSTD_match_t* matches, + ZSTD_MatchState_t* ms, + uint* nextToUpdate3, + byte* ip, + byte* iLimit, + ZSTD_dictMode_e dictMode, + uint* rep, + uint ll0, + uint lengthToBeat, + uint mls + ) + { + ZSTD_compressionParameters* cParams = &ms->cParams; + uint sufficient_len = + cParams->targetLength < (1 << 12) - 1 ? cParams->targetLength : (1 << 12) - 1; + byte* @base = ms->window.@base; + uint curr = (uint)(ip - @base); + uint hashLog = cParams->hashLog; + uint minMatch = (uint)(mls == 3 ? 3 : 4); + uint* hashTable = ms->hashTable; + nuint h = ZSTD_hashPtr(ip, hashLog, mls); + uint matchIndex = hashTable[h]; + uint* bt = ms->chainTable; + uint btLog = cParams->chainLog - 1; + uint btMask = (1U << (int)btLog) - 1; + nuint commonLengthSmaller = 0, + commonLengthLarger = 0; + byte* dictBase = ms->window.dictBase; + uint dictLimit = ms->window.dictLimit; + byte* dictEnd = dictBase + dictLimit; + byte* prefixStart = @base + dictLimit; + uint btLow = btMask >= curr ? 0 : curr - btMask; + uint windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog); + uint matchLow = windowLow != 0 ? windowLow : 1; + uint* smallerPtr = bt + 2 * (curr & btMask); + uint* largerPtr = bt + 2 * (curr & btMask) + 1; + /* farthest referenced position of any match => detects repetitive patterns */ + uint matchEndIdx = curr + 8 + 1; + /* to be nullified at the end */ + uint dummy32; + uint mnum = 0; + uint nbCompares = 1U << (int)cParams->searchLog; + ZSTD_MatchState_t* dms = + dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? ms->dictMatchState : null; + ZSTD_compressionParameters* dmsCParams = + dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? &dms->cParams : null; + byte* dmsBase = + dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? dms->window.@base : null; + byte* dmsEnd = + dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? dms->window.nextSrc : null; + uint dmsHighLimit = + dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? (uint)(dmsEnd - dmsBase) : 0; + uint dmsLowLimit = + dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? dms->window.lowLimit : 0; + uint dmsIndexDelta = + dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? windowLow - dmsHighLimit : 0; + uint dmsHashLog = + dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? dmsCParams->hashLog : hashLog; + uint dmsBtLog = + dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? dmsCParams->chainLog - 1 : btLog; + uint dmsBtMask = + dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? (1U << (int)dmsBtLog) - 1 : 0; + uint dmsBtLow = + dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState + && dmsBtMask < dmsHighLimit - dmsLowLimit + ? dmsHighLimit - dmsBtMask + : dmsLowLimit; + nuint bestLength = lengthToBeat - 1; + assert(ll0 <= 1); { - ZSTD_compressionParameters* cParams = &ms->cParams; - uint* hashTable = ms->hashTable; - uint hashLog = cParams->hashLog; - nuint h = ZSTD_hashPtr(ip, hashLog, mls); - uint* bt = ms->chainTable; - uint btLog = cParams->chainLog - 1; - uint btMask = (uint)((1 << (int)btLog) - 1); - uint matchIndex = hashTable[h]; - nuint commonLengthSmaller = 0, - commonLengthLarger = 0; - byte* @base = ms->window.@base; - byte* dictBase = ms->window.dictBase; - uint dictLimit = ms->window.dictLimit; - byte* dictEnd = dictBase + dictLimit; - byte* prefixStart = @base + dictLimit; - byte* match; - uint curr = (uint)(ip - @base); - uint btLow = btMask >= curr ? 0 : curr - btMask; - uint* smallerPtr = bt + 2 * (curr & btMask); - uint* largerPtr = smallerPtr + 1; - /* to be nullified at the end */ - uint dummy32; - /* windowLow is based on target because - * we only need positions that will be in the window at the end of the tree update. - */ - uint windowLow = ZSTD_getLowestMatchIndex(ms, target, cParams->windowLog); - uint matchEndIdx = curr + 8 + 1; - nuint bestLength = 8; - uint nbCompares = 1U << (int)cParams->searchLog; - assert(curr <= target); - assert(ip <= iend - 8); - hashTable[h] = curr; - assert(windowLow > 0); - for (; nbCompares != 0 && matchIndex >= windowLow; --nbCompares) + uint lastR = 3 + ll0; + uint repCode; + for (repCode = ll0; repCode < lastR; repCode++) { - uint* nextPtr = bt + 2 * (matchIndex & btMask); - /* guaranteed minimum nb of common bytes */ - nuint matchLength = - commonLengthSmaller < commonLengthLarger - ? commonLengthSmaller - : commonLengthLarger; - assert(matchIndex < curr); - if (extDict == 0 || matchIndex + matchLength >= dictLimit) + uint repOffset = repCode == 3 ? rep[0] - 1 : rep[repCode]; + uint repIndex = curr - repOffset; + uint repLen = 0; + assert(curr >= dictLimit); + if (repOffset - 1 < curr - dictLimit) { - assert(matchIndex + matchLength >= dictLimit); - match = @base + matchIndex; - matchLength += ZSTD_count(ip + matchLength, match + matchLength, iend); + if ( + repIndex >= windowLow + && ZSTD_readMINMATCH(ip, minMatch) + == ZSTD_readMINMATCH(ip - repOffset, minMatch) + ) + { + repLen = + (uint)ZSTD_count(ip + minMatch, ip + minMatch - repOffset, iLimit) + + minMatch; + } } else { - match = dictBase + matchIndex; - matchLength += ZSTD_count_2segments( - ip + matchLength, - match + matchLength, - iend, - dictEnd, - prefixStart - ); - if (matchIndex + matchLength >= dictLimit) - match = @base + matchIndex; - } - - if (matchLength > bestLength) - { - bestLength = matchLength; - if (matchLength > matchEndIdx - matchIndex) - matchEndIdx = matchIndex + (uint)matchLength; - } + byte* repMatch = + dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState + ? dmsBase + repIndex - dmsIndexDelta + : dictBase + repIndex; + assert(curr >= windowLow); + if ( + dictMode == ZSTD_dictMode_e.ZSTD_extDict + && ( + (repOffset - 1 < curr - windowLow ? 1 : 0) + & ZSTD_index_overlap_check(dictLimit, repIndex) + ) != 0 + && ZSTD_readMINMATCH(ip, minMatch) + == ZSTD_readMINMATCH(repMatch, minMatch) + ) + { + repLen = + (uint)ZSTD_count_2segments( + ip + minMatch, + repMatch + minMatch, + iLimit, + dictEnd, + prefixStart + ) + minMatch; + } - if (ip + matchLength == iend) - { - break; + if ( + dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState + && ( + (repOffset - 1 < curr - (dmsLowLimit + dmsIndexDelta) ? 1 : 0) + & ZSTD_index_overlap_check(dictLimit, repIndex) + ) != 0 + && ZSTD_readMINMATCH(ip, minMatch) + == ZSTD_readMINMATCH(repMatch, minMatch) + ) + { + repLen = + (uint)ZSTD_count_2segments( + ip + minMatch, + repMatch + minMatch, + iLimit, + dmsEnd, + prefixStart + ) + minMatch; + } } - if (match[matchLength] < ip[matchLength]) + if (repLen > bestLength) { - *smallerPtr = matchIndex; - commonLengthSmaller = matchLength; - if (matchIndex <= btLow) + bestLength = repLen; + assert(repCode - ll0 + 1 >= 1); + assert(repCode - ll0 + 1 <= 3); + matches[mnum].off = repCode - ll0 + 1; + matches[mnum].len = repLen; + mnum++; + if (repLen > sufficient_len || ip + repLen == iLimit) { - smallerPtr = &dummy32; - break; + return mnum; } + } + } + } - smallerPtr = nextPtr + 1; - matchIndex = nextPtr[1]; + if (mls == 3 && bestLength < mls) + { + uint matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, nextToUpdate3, ip); + if (matchIndex3 >= matchLow && curr - matchIndex3 < 1 << 18) + { + nuint mlen; + if ( + dictMode == ZSTD_dictMode_e.ZSTD_noDict + || dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState + || matchIndex3 >= dictLimit + ) + { + byte* match = @base + matchIndex3; + mlen = ZSTD_count(ip, match, iLimit); } else { - *largerPtr = matchIndex; - commonLengthLarger = matchLength; - if (matchIndex <= btLow) + byte* match = dictBase + matchIndex3; + mlen = ZSTD_count_2segments(ip, match, iLimit, dictEnd, prefixStart); + } + + if (mlen >= mls) + { + bestLength = mlen; + assert(curr > matchIndex3); + assert(mnum == 0); + assert(curr - matchIndex3 > 0); + matches[0].off = curr - matchIndex3 + 3; + matches[0].len = (uint)mlen; + mnum = 1; + if (mlen > sufficient_len || ip + mlen == iLimit) { - largerPtr = &dummy32; - break; + ms->nextToUpdate = curr + 1; + return 1; } - - largerPtr = nextPtr; - matchIndex = nextPtr[0]; } } - - *smallerPtr = *largerPtr = 0; - { - uint positions = 0; - if (bestLength > 384) - positions = 192 < (uint)(bestLength - 384) ? 192 : (uint)(bestLength - 384); - assert(matchEndIdx > curr + 8); - return positions > matchEndIdx - (curr + 8) ? positions : matchEndIdx - (curr + 8); - } } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_updateTree_internal( - ZSTD_MatchState_t* ms, - byte* ip, - byte* iend, - uint mls, - ZSTD_dictMode_e dictMode - ) + hashTable[h] = curr; + for (; nbCompares != 0 && matchIndex >= matchLow; --nbCompares) { - byte* @base = ms->window.@base; - uint target = (uint)(ip - @base); - uint idx = ms->nextToUpdate; - while (idx < target) + uint* nextPtr = bt + 2 * (matchIndex & btMask); + byte* match; + /* guaranteed minimum nb of common bytes */ + nuint matchLength = + commonLengthSmaller < commonLengthLarger + ? commonLengthSmaller + : commonLengthLarger; + assert(curr > matchIndex); + if ( + dictMode == ZSTD_dictMode_e.ZSTD_noDict + || dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState + || matchIndex + matchLength >= dictLimit + ) { - uint forward = ZSTD_insertBt1( - ms, - @base + idx, - iend, - target, - mls, - dictMode == ZSTD_dictMode_e.ZSTD_extDict ? 1 : 0 + assert(matchIndex + matchLength >= dictLimit); + match = @base + matchIndex; +#if DEBUG + if (matchIndex >= dictLimit) + assert(memcmp(match, ip, matchLength) == 0); +#endif + matchLength += ZSTD_count(ip + matchLength, match + matchLength, iLimit); + } + else + { + match = dictBase + matchIndex; + assert(memcmp(match, ip, matchLength) == 0); + matchLength += ZSTD_count_2segments( + ip + matchLength, + match + matchLength, + iLimit, + dictEnd, + prefixStart ); - assert(idx < idx + forward); - idx += forward; + if (matchIndex + matchLength >= dictLimit) + match = @base + matchIndex; } - assert((nuint)(ip - @base) <= unchecked((uint)-1)); - assert((nuint)(iend - @base) <= unchecked((uint)-1)); - ms->nextToUpdate = target; - } - - /* used in ZSTD_loadDictionaryContent() */ - private static void ZSTD_updateTree(ZSTD_MatchState_t* ms, byte* ip, byte* iend) - { - ZSTD_updateTree_internal( - ms, - ip, - iend, - ms->cParams.minMatch, - ZSTD_dictMode_e.ZSTD_noDict - ); - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_insertBtAndGetAllMatches( - ZSTD_match_t* matches, - ZSTD_MatchState_t* ms, - uint* nextToUpdate3, - byte* ip, - byte* iLimit, - ZSTD_dictMode_e dictMode, - uint* rep, - uint ll0, - uint lengthToBeat, - uint mls - ) - { - ZSTD_compressionParameters* cParams = &ms->cParams; - uint sufficient_len = - cParams->targetLength < (1 << 12) - 1 ? cParams->targetLength : (1 << 12) - 1; - byte* @base = ms->window.@base; - uint curr = (uint)(ip - @base); - uint hashLog = cParams->hashLog; - uint minMatch = (uint)(mls == 3 ? 3 : 4); - uint* hashTable = ms->hashTable; - nuint h = ZSTD_hashPtr(ip, hashLog, mls); - uint matchIndex = hashTable[h]; - uint* bt = ms->chainTable; - uint btLog = cParams->chainLog - 1; - uint btMask = (1U << (int)btLog) - 1; - nuint commonLengthSmaller = 0, - commonLengthLarger = 0; - byte* dictBase = ms->window.dictBase; - uint dictLimit = ms->window.dictLimit; - byte* dictEnd = dictBase + dictLimit; - byte* prefixStart = @base + dictLimit; - uint btLow = btMask >= curr ? 0 : curr - btMask; - uint windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog); - uint matchLow = windowLow != 0 ? windowLow : 1; - uint* smallerPtr = bt + 2 * (curr & btMask); - uint* largerPtr = bt + 2 * (curr & btMask) + 1; - /* farthest referenced position of any match => detects repetitive patterns */ - uint matchEndIdx = curr + 8 + 1; - /* to be nullified at the end */ - uint dummy32; - uint mnum = 0; - uint nbCompares = 1U << (int)cParams->searchLog; - ZSTD_MatchState_t* dms = - dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? ms->dictMatchState : null; - ZSTD_compressionParameters* dmsCParams = - dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? &dms->cParams : null; - byte* dmsBase = - dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? dms->window.@base : null; - byte* dmsEnd = - dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? dms->window.nextSrc : null; - uint dmsHighLimit = - dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? (uint)(dmsEnd - dmsBase) : 0; - uint dmsLowLimit = - dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? dms->window.lowLimit : 0; - uint dmsIndexDelta = - dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? windowLow - dmsHighLimit : 0; - uint dmsHashLog = - dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? dmsCParams->hashLog : hashLog; - uint dmsBtLog = - dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? dmsCParams->chainLog - 1 : btLog; - uint dmsBtMask = - dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? (1U << (int)dmsBtLog) - 1 : 0; - uint dmsBtLow = - dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState - && dmsBtMask < dmsHighLimit - dmsLowLimit - ? dmsHighLimit - dmsBtMask - : dmsLowLimit; - nuint bestLength = lengthToBeat - 1; - assert(ll0 <= 1); + if (matchLength > bestLength) { - uint lastR = 3 + ll0; - uint repCode; - for (repCode = ll0; repCode < lastR; repCode++) + assert(matchEndIdx > matchIndex); + if (matchLength > matchEndIdx - matchIndex) + matchEndIdx = matchIndex + (uint)matchLength; + bestLength = matchLength; + assert(curr - matchIndex > 0); + matches[mnum].off = curr - matchIndex + 3; + matches[mnum].len = (uint)matchLength; + mnum++; + if (matchLength > 1 << 12 || ip + matchLength == iLimit) { - uint repOffset = repCode == 3 ? rep[0] - 1 : rep[repCode]; - uint repIndex = curr - repOffset; - uint repLen = 0; - assert(curr >= dictLimit); - if (repOffset - 1 < curr - dictLimit) - { - if ( - repIndex >= windowLow - && ZSTD_readMINMATCH(ip, minMatch) - == ZSTD_readMINMATCH(ip - repOffset, minMatch) - ) - { - repLen = - (uint)ZSTD_count(ip + minMatch, ip + minMatch - repOffset, iLimit) - + minMatch; - } - } - else - { - byte* repMatch = - dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState - ? dmsBase + repIndex - dmsIndexDelta - : dictBase + repIndex; - assert(curr >= windowLow); - if ( - dictMode == ZSTD_dictMode_e.ZSTD_extDict - && ( - (repOffset - 1 < curr - windowLow ? 1 : 0) - & ZSTD_index_overlap_check(dictLimit, repIndex) - ) != 0 - && ZSTD_readMINMATCH(ip, minMatch) - == ZSTD_readMINMATCH(repMatch, minMatch) - ) - { - repLen = - (uint)ZSTD_count_2segments( - ip + minMatch, - repMatch + minMatch, - iLimit, - dictEnd, - prefixStart - ) + minMatch; - } - - if ( - dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState - && ( - (repOffset - 1 < curr - (dmsLowLimit + dmsIndexDelta) ? 1 : 0) - & ZSTD_index_overlap_check(dictLimit, repIndex) - ) != 0 - && ZSTD_readMINMATCH(ip, minMatch) - == ZSTD_readMINMATCH(repMatch, minMatch) - ) - { - repLen = - (uint)ZSTD_count_2segments( - ip + minMatch, - repMatch + minMatch, - iLimit, - dmsEnd, - prefixStart - ) + minMatch; - } - } - - if (repLen > bestLength) - { - bestLength = repLen; - assert(repCode - ll0 + 1 >= 1); - assert(repCode - ll0 + 1 <= 3); - matches[mnum].off = repCode - ll0 + 1; - matches[mnum].len = repLen; - mnum++; - if (repLen > sufficient_len || ip + repLen == iLimit) - { - return mnum; - } - } + if (dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState) + nbCompares = 0; + break; } } - if (mls == 3 && bestLength < mls) + if (match[matchLength] < ip[matchLength]) { - uint matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, nextToUpdate3, ip); - if (matchIndex3 >= matchLow && curr - matchIndex3 < 1 << 18) + *smallerPtr = matchIndex; + commonLengthSmaller = matchLength; + if (matchIndex <= btLow) { - nuint mlen; - if ( - dictMode == ZSTD_dictMode_e.ZSTD_noDict - || dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState - || matchIndex3 >= dictLimit - ) - { - byte* match = @base + matchIndex3; - mlen = ZSTD_count(ip, match, iLimit); - } - else - { - byte* match = dictBase + matchIndex3; - mlen = ZSTD_count_2segments(ip, match, iLimit, dictEnd, prefixStart); - } + smallerPtr = &dummy32; + break; + } - if (mlen >= mls) - { - bestLength = mlen; - assert(curr > matchIndex3); - assert(mnum == 0); - assert(curr - matchIndex3 > 0); - matches[0].off = curr - matchIndex3 + 3; - matches[0].len = (uint)mlen; - mnum = 1; - if (mlen > sufficient_len || ip + mlen == iLimit) - { - ms->nextToUpdate = curr + 1; - return 1; - } - } + smallerPtr = nextPtr + 1; + matchIndex = nextPtr[1]; + } + else + { + *largerPtr = matchIndex; + commonLengthLarger = matchLength; + if (matchIndex <= btLow) + { + largerPtr = &dummy32; + break; } + + largerPtr = nextPtr; + matchIndex = nextPtr[0]; } + } - hashTable[h] = curr; - for (; nbCompares != 0 && matchIndex >= matchLow; --nbCompares) + *smallerPtr = *largerPtr = 0; + assert(nbCompares <= 1U << (sizeof(nuint) == 4 ? 30 : 31) - 1); + if (dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState && nbCompares != 0) + { + nuint dmsH = ZSTD_hashPtr(ip, dmsHashLog, mls); + uint dictMatchIndex = dms->hashTable[dmsH]; + uint* dmsBt = dms->chainTable; + commonLengthSmaller = commonLengthLarger = 0; + for (; nbCompares != 0 && dictMatchIndex > dmsLowLimit; --nbCompares) { - uint* nextPtr = bt + 2 * (matchIndex & btMask); - byte* match; + uint* nextPtr = dmsBt + 2 * (dictMatchIndex & dmsBtMask); /* guaranteed minimum nb of common bytes */ nuint matchLength = commonLengthSmaller < commonLengthLarger ? commonLengthSmaller : commonLengthLarger; - assert(curr > matchIndex); - if ( - dictMode == ZSTD_dictMode_e.ZSTD_noDict - || dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState - || matchIndex + matchLength >= dictLimit - ) - { - assert(matchIndex + matchLength >= dictLimit); - match = @base + matchIndex; -#if DEBUG - if (matchIndex >= dictLimit) - assert(memcmp(match, ip, matchLength) == 0); -#endif - matchLength += ZSTD_count(ip + matchLength, match + matchLength, iLimit); - } - else - { - match = dictBase + matchIndex; - assert(memcmp(match, ip, matchLength) == 0); - matchLength += ZSTD_count_2segments( - ip + matchLength, - match + matchLength, - iLimit, - dictEnd, - prefixStart - ); - if (matchIndex + matchLength >= dictLimit) - match = @base + matchIndex; - } - + byte* match = dmsBase + dictMatchIndex; + matchLength += ZSTD_count_2segments( + ip + matchLength, + match + matchLength, + iLimit, + dmsEnd, + prefixStart + ); + if (dictMatchIndex + matchLength >= dmsHighLimit) + match = @base + dictMatchIndex + dmsIndexDelta; if (matchLength > bestLength) { - assert(matchEndIdx > matchIndex); + matchIndex = dictMatchIndex + dmsIndexDelta; if (matchLength > matchEndIdx - matchIndex) matchEndIdx = matchIndex + (uint)matchLength; bestLength = matchLength; @@ -1043,1243 +1115,1170 @@ uint mls mnum++; if (matchLength > 1 << 12 || ip + matchLength == iLimit) { - if (dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState) - nbCompares = 0; break; } } + if (dictMatchIndex <= dmsBtLow) + { + break; + } + if (match[matchLength] < ip[matchLength]) { - *smallerPtr = matchIndex; commonLengthSmaller = matchLength; - if (matchIndex <= btLow) - { - smallerPtr = &dummy32; - break; - } - - smallerPtr = nextPtr + 1; - matchIndex = nextPtr[1]; + dictMatchIndex = nextPtr[1]; } else { - *largerPtr = matchIndex; commonLengthLarger = matchLength; - if (matchIndex <= btLow) - { - largerPtr = &dummy32; - break; - } - - largerPtr = nextPtr; - matchIndex = nextPtr[0]; + dictMatchIndex = nextPtr[0]; } } + } - *smallerPtr = *largerPtr = 0; - assert(nbCompares <= 1U << (sizeof(nuint) == 4 ? 30 : 31) - 1); - if (dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState && nbCompares != 0) - { - nuint dmsH = ZSTD_hashPtr(ip, dmsHashLog, mls); - uint dictMatchIndex = dms->hashTable[dmsH]; - uint* dmsBt = dms->chainTable; - commonLengthSmaller = commonLengthLarger = 0; - for (; nbCompares != 0 && dictMatchIndex > dmsLowLimit; --nbCompares) - { - uint* nextPtr = dmsBt + 2 * (dictMatchIndex & dmsBtMask); - /* guaranteed minimum nb of common bytes */ - nuint matchLength = - commonLengthSmaller < commonLengthLarger - ? commonLengthSmaller - : commonLengthLarger; - byte* match = dmsBase + dictMatchIndex; - matchLength += ZSTD_count_2segments( - ip + matchLength, - match + matchLength, - iLimit, - dmsEnd, - prefixStart - ); - if (dictMatchIndex + matchLength >= dmsHighLimit) - match = @base + dictMatchIndex + dmsIndexDelta; - if (matchLength > bestLength) - { - matchIndex = dictMatchIndex + dmsIndexDelta; - if (matchLength > matchEndIdx - matchIndex) - matchEndIdx = matchIndex + (uint)matchLength; - bestLength = matchLength; - assert(curr - matchIndex > 0); - matches[mnum].off = curr - matchIndex + 3; - matches[mnum].len = (uint)matchLength; - mnum++; - if (matchLength > 1 << 12 || ip + matchLength == iLimit) - { - break; - } - } + assert(matchEndIdx > curr + 8); + ms->nextToUpdate = matchEndIdx - 8; + return mnum; + } - if (dictMatchIndex <= dmsBtLow) - { - break; - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint ZSTD_btGetAllMatches_internal( + ZSTD_match_t* matches, + ZSTD_MatchState_t* ms, + uint* nextToUpdate3, + byte* ip, + byte* iHighLimit, + uint* rep, + uint ll0, + uint lengthToBeat, + ZSTD_dictMode_e dictMode, + uint mls + ) + { + assert( + ( + ms->cParams.minMatch <= 3 ? 3 + : ms->cParams.minMatch <= 6 ? ms->cParams.minMatch + : 6 + ) == mls + ); + if (ip < ms->window.@base + ms->nextToUpdate) + return 0; + ZSTD_updateTree_internal(ms, ip, iHighLimit, mls, dictMode); + return ZSTD_insertBtAndGetAllMatches( + matches, + ms, + nextToUpdate3, + ip, + iHighLimit, + dictMode, + rep, + ll0, + lengthToBeat, + mls + ); + } - if (match[matchLength] < ip[matchLength]) - { - commonLengthSmaller = matchLength; - dictMatchIndex = nextPtr[1]; - } - else - { - commonLengthLarger = matchLength; - dictMatchIndex = nextPtr[0]; - } - } - } + private static uint ZSTD_btGetAllMatches_noDict_3( + ZSTD_match_t* matches, + ZSTD_MatchState_t* ms, + uint* nextToUpdate3, + byte* ip, + byte* iHighLimit, + uint* rep, + uint ll0, + uint lengthToBeat + ) + { + return ZSTD_btGetAllMatches_internal( + matches, + ms, + nextToUpdate3, + ip, + iHighLimit, + rep, + ll0, + lengthToBeat, + ZSTD_dictMode_e.ZSTD_noDict, + 3 + ); + } - assert(matchEndIdx > curr + 8); - ms->nextToUpdate = matchEndIdx - 8; - return mnum; - } + private static uint ZSTD_btGetAllMatches_noDict_4( + ZSTD_match_t* matches, + ZSTD_MatchState_t* ms, + uint* nextToUpdate3, + byte* ip, + byte* iHighLimit, + uint* rep, + uint ll0, + uint lengthToBeat + ) + { + return ZSTD_btGetAllMatches_internal( + matches, + ms, + nextToUpdate3, + ip, + iHighLimit, + rep, + ll0, + lengthToBeat, + ZSTD_dictMode_e.ZSTD_noDict, + 4 + ); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_btGetAllMatches_internal( - ZSTD_match_t* matches, - ZSTD_MatchState_t* ms, - uint* nextToUpdate3, - byte* ip, - byte* iHighLimit, - uint* rep, - uint ll0, - uint lengthToBeat, - ZSTD_dictMode_e dictMode, - uint mls - ) - { - assert( - ( - ms->cParams.minMatch <= 3 ? 3 - : ms->cParams.minMatch <= 6 ? ms->cParams.minMatch - : 6 - ) == mls - ); - if (ip < ms->window.@base + ms->nextToUpdate) - return 0; - ZSTD_updateTree_internal(ms, ip, iHighLimit, mls, dictMode); - return ZSTD_insertBtAndGetAllMatches( - matches, - ms, - nextToUpdate3, - ip, - iHighLimit, - dictMode, - rep, - ll0, - lengthToBeat, - mls - ); - } + private static uint ZSTD_btGetAllMatches_noDict_5( + ZSTD_match_t* matches, + ZSTD_MatchState_t* ms, + uint* nextToUpdate3, + byte* ip, + byte* iHighLimit, + uint* rep, + uint ll0, + uint lengthToBeat + ) + { + return ZSTD_btGetAllMatches_internal( + matches, + ms, + nextToUpdate3, + ip, + iHighLimit, + rep, + ll0, + lengthToBeat, + ZSTD_dictMode_e.ZSTD_noDict, + 5 + ); + } - private static uint ZSTD_btGetAllMatches_noDict_3( - ZSTD_match_t* matches, - ZSTD_MatchState_t* ms, - uint* nextToUpdate3, - byte* ip, - byte* iHighLimit, - uint* rep, - uint ll0, - uint lengthToBeat - ) - { - return ZSTD_btGetAllMatches_internal( - matches, - ms, - nextToUpdate3, - ip, - iHighLimit, - rep, - ll0, - lengthToBeat, - ZSTD_dictMode_e.ZSTD_noDict, - 3 - ); - } + private static uint ZSTD_btGetAllMatches_noDict_6( + ZSTD_match_t* matches, + ZSTD_MatchState_t* ms, + uint* nextToUpdate3, + byte* ip, + byte* iHighLimit, + uint* rep, + uint ll0, + uint lengthToBeat + ) + { + return ZSTD_btGetAllMatches_internal( + matches, + ms, + nextToUpdate3, + ip, + iHighLimit, + rep, + ll0, + lengthToBeat, + ZSTD_dictMode_e.ZSTD_noDict, + 6 + ); + } - private static uint ZSTD_btGetAllMatches_noDict_4( - ZSTD_match_t* matches, - ZSTD_MatchState_t* ms, - uint* nextToUpdate3, - byte* ip, - byte* iHighLimit, - uint* rep, - uint ll0, - uint lengthToBeat - ) - { - return ZSTD_btGetAllMatches_internal( - matches, - ms, - nextToUpdate3, - ip, - iHighLimit, - rep, - ll0, - lengthToBeat, - ZSTD_dictMode_e.ZSTD_noDict, - 4 - ); - } + private static uint ZSTD_btGetAllMatches_extDict_3( + ZSTD_match_t* matches, + ZSTD_MatchState_t* ms, + uint* nextToUpdate3, + byte* ip, + byte* iHighLimit, + uint* rep, + uint ll0, + uint lengthToBeat + ) + { + return ZSTD_btGetAllMatches_internal( + matches, + ms, + nextToUpdate3, + ip, + iHighLimit, + rep, + ll0, + lengthToBeat, + ZSTD_dictMode_e.ZSTD_extDict, + 3 + ); + } - private static uint ZSTD_btGetAllMatches_noDict_5( - ZSTD_match_t* matches, - ZSTD_MatchState_t* ms, - uint* nextToUpdate3, - byte* ip, - byte* iHighLimit, - uint* rep, - uint ll0, - uint lengthToBeat - ) - { - return ZSTD_btGetAllMatches_internal( - matches, - ms, - nextToUpdate3, - ip, - iHighLimit, - rep, - ll0, - lengthToBeat, - ZSTD_dictMode_e.ZSTD_noDict, - 5 - ); - } + private static uint ZSTD_btGetAllMatches_extDict_4( + ZSTD_match_t* matches, + ZSTD_MatchState_t* ms, + uint* nextToUpdate3, + byte* ip, + byte* iHighLimit, + uint* rep, + uint ll0, + uint lengthToBeat + ) + { + return ZSTD_btGetAllMatches_internal( + matches, + ms, + nextToUpdate3, + ip, + iHighLimit, + rep, + ll0, + lengthToBeat, + ZSTD_dictMode_e.ZSTD_extDict, + 4 + ); + } - private static uint ZSTD_btGetAllMatches_noDict_6( - ZSTD_match_t* matches, - ZSTD_MatchState_t* ms, - uint* nextToUpdate3, - byte* ip, - byte* iHighLimit, - uint* rep, - uint ll0, - uint lengthToBeat - ) - { - return ZSTD_btGetAllMatches_internal( - matches, - ms, - nextToUpdate3, - ip, - iHighLimit, - rep, - ll0, - lengthToBeat, - ZSTD_dictMode_e.ZSTD_noDict, - 6 - ); - } + private static uint ZSTD_btGetAllMatches_extDict_5( + ZSTD_match_t* matches, + ZSTD_MatchState_t* ms, + uint* nextToUpdate3, + byte* ip, + byte* iHighLimit, + uint* rep, + uint ll0, + uint lengthToBeat + ) + { + return ZSTD_btGetAllMatches_internal( + matches, + ms, + nextToUpdate3, + ip, + iHighLimit, + rep, + ll0, + lengthToBeat, + ZSTD_dictMode_e.ZSTD_extDict, + 5 + ); + } - private static uint ZSTD_btGetAllMatches_extDict_3( - ZSTD_match_t* matches, - ZSTD_MatchState_t* ms, - uint* nextToUpdate3, - byte* ip, - byte* iHighLimit, - uint* rep, - uint ll0, - uint lengthToBeat - ) - { - return ZSTD_btGetAllMatches_internal( - matches, - ms, - nextToUpdate3, - ip, - iHighLimit, - rep, - ll0, - lengthToBeat, - ZSTD_dictMode_e.ZSTD_extDict, - 3 - ); - } + private static uint ZSTD_btGetAllMatches_extDict_6( + ZSTD_match_t* matches, + ZSTD_MatchState_t* ms, + uint* nextToUpdate3, + byte* ip, + byte* iHighLimit, + uint* rep, + uint ll0, + uint lengthToBeat + ) + { + return ZSTD_btGetAllMatches_internal( + matches, + ms, + nextToUpdate3, + ip, + iHighLimit, + rep, + ll0, + lengthToBeat, + ZSTD_dictMode_e.ZSTD_extDict, + 6 + ); + } - private static uint ZSTD_btGetAllMatches_extDict_4( - ZSTD_match_t* matches, - ZSTD_MatchState_t* ms, - uint* nextToUpdate3, - byte* ip, - byte* iHighLimit, - uint* rep, - uint ll0, - uint lengthToBeat - ) - { - return ZSTD_btGetAllMatches_internal( - matches, - ms, - nextToUpdate3, - ip, - iHighLimit, - rep, - ll0, - lengthToBeat, - ZSTD_dictMode_e.ZSTD_extDict, - 4 - ); - } + private static uint ZSTD_btGetAllMatches_dictMatchState_3( + ZSTD_match_t* matches, + ZSTD_MatchState_t* ms, + uint* nextToUpdate3, + byte* ip, + byte* iHighLimit, + uint* rep, + uint ll0, + uint lengthToBeat + ) + { + return ZSTD_btGetAllMatches_internal( + matches, + ms, + nextToUpdate3, + ip, + iHighLimit, + rep, + ll0, + lengthToBeat, + ZSTD_dictMode_e.ZSTD_dictMatchState, + 3 + ); + } - private static uint ZSTD_btGetAllMatches_extDict_5( - ZSTD_match_t* matches, - ZSTD_MatchState_t* ms, - uint* nextToUpdate3, - byte* ip, - byte* iHighLimit, - uint* rep, - uint ll0, - uint lengthToBeat - ) + private static uint ZSTD_btGetAllMatches_dictMatchState_4( + ZSTD_match_t* matches, + ZSTD_MatchState_t* ms, + uint* nextToUpdate3, + byte* ip, + byte* iHighLimit, + uint* rep, + uint ll0, + uint lengthToBeat + ) + { + return ZSTD_btGetAllMatches_internal( + matches, + ms, + nextToUpdate3, + ip, + iHighLimit, + rep, + ll0, + lengthToBeat, + ZSTD_dictMode_e.ZSTD_dictMatchState, + 4 + ); + } + + private static uint ZSTD_btGetAllMatches_dictMatchState_5( + ZSTD_match_t* matches, + ZSTD_MatchState_t* ms, + uint* nextToUpdate3, + byte* ip, + byte* iHighLimit, + uint* rep, + uint ll0, + uint lengthToBeat + ) + { + return ZSTD_btGetAllMatches_internal( + matches, + ms, + nextToUpdate3, + ip, + iHighLimit, + rep, + ll0, + lengthToBeat, + ZSTD_dictMode_e.ZSTD_dictMatchState, + 5 + ); + } + + private static uint ZSTD_btGetAllMatches_dictMatchState_6( + ZSTD_match_t* matches, + ZSTD_MatchState_t* ms, + uint* nextToUpdate3, + byte* ip, + byte* iHighLimit, + uint* rep, + uint ll0, + uint lengthToBeat + ) + { + return ZSTD_btGetAllMatches_internal( + matches, + ms, + nextToUpdate3, + ip, + iHighLimit, + rep, + ll0, + lengthToBeat, + ZSTD_dictMode_e.ZSTD_dictMatchState, + 6 + ); + } + + private static readonly ZSTD_getAllMatchesFn[][] getAllMatchesFns = + new ZSTD_getAllMatchesFn[3][] { - return ZSTD_btGetAllMatches_internal( - matches, - ms, - nextToUpdate3, - ip, - iHighLimit, - rep, - ll0, - lengthToBeat, - ZSTD_dictMode_e.ZSTD_extDict, - 5 - ); - } + new ZSTD_getAllMatchesFn[4] + { + ZSTD_btGetAllMatches_noDict_3, + ZSTD_btGetAllMatches_noDict_4, + ZSTD_btGetAllMatches_noDict_5, + ZSTD_btGetAllMatches_noDict_6, + }, + new ZSTD_getAllMatchesFn[4] + { + ZSTD_btGetAllMatches_extDict_3, + ZSTD_btGetAllMatches_extDict_4, + ZSTD_btGetAllMatches_extDict_5, + ZSTD_btGetAllMatches_extDict_6, + }, + new ZSTD_getAllMatchesFn[4] + { + ZSTD_btGetAllMatches_dictMatchState_3, + ZSTD_btGetAllMatches_dictMatchState_4, + ZSTD_btGetAllMatches_dictMatchState_5, + ZSTD_btGetAllMatches_dictMatchState_6, + }, + }; + + private static ZSTD_getAllMatchesFn ZSTD_selectBtGetAllMatches( + ZSTD_MatchState_t* ms, + ZSTD_dictMode_e dictMode + ) + { + uint mls = + ms->cParams.minMatch <= 3 ? 3 + : ms->cParams.minMatch <= 6 ? ms->cParams.minMatch + : 6; + assert((uint)dictMode < 3); + assert(mls - 3 < 4); + return getAllMatchesFns[(int)dictMode][mls - 3]; + } - private static uint ZSTD_btGetAllMatches_extDict_6( - ZSTD_match_t* matches, - ZSTD_MatchState_t* ms, - uint* nextToUpdate3, - byte* ip, - byte* iHighLimit, - uint* rep, - uint ll0, - uint lengthToBeat - ) + /* ZSTD_optLdm_skipRawSeqStoreBytes(): + * Moves forward in @rawSeqStore by @nbBytes, + * which will update the fields 'pos' and 'posInSequence'. + */ + private static void ZSTD_optLdm_skipRawSeqStoreBytes( + RawSeqStore_t* rawSeqStore, + nuint nbBytes + ) + { + uint currPos = (uint)(rawSeqStore->posInSequence + nbBytes); + while (currPos != 0 && rawSeqStore->pos < rawSeqStore->size) { - return ZSTD_btGetAllMatches_internal( - matches, - ms, - nextToUpdate3, - ip, - iHighLimit, - rep, - ll0, - lengthToBeat, - ZSTD_dictMode_e.ZSTD_extDict, - 6 - ); + rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos]; + if (currPos >= currSeq.litLength + currSeq.matchLength) + { + currPos -= currSeq.litLength + currSeq.matchLength; + rawSeqStore->pos++; + } + else + { + rawSeqStore->posInSequence = currPos; + break; + } } - private static uint ZSTD_btGetAllMatches_dictMatchState_3( - ZSTD_match_t* matches, - ZSTD_MatchState_t* ms, - uint* nextToUpdate3, - byte* ip, - byte* iHighLimit, - uint* rep, - uint ll0, - uint lengthToBeat - ) + if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) { - return ZSTD_btGetAllMatches_internal( - matches, - ms, - nextToUpdate3, - ip, - iHighLimit, - rep, - ll0, - lengthToBeat, - ZSTD_dictMode_e.ZSTD_dictMatchState, - 3 - ); + rawSeqStore->posInSequence = 0; } + } - private static uint ZSTD_btGetAllMatches_dictMatchState_4( - ZSTD_match_t* matches, - ZSTD_MatchState_t* ms, - uint* nextToUpdate3, - byte* ip, - byte* iHighLimit, - uint* rep, - uint ll0, - uint lengthToBeat - ) + /* ZSTD_opt_getNextMatchAndUpdateSeqStore(): + * Calculates the beginning and end of the next match in the current block. + * Updates 'pos' and 'posInSequence' of the ldmSeqStore. + */ + private static void ZSTD_opt_getNextMatchAndUpdateSeqStore( + ZSTD_optLdm_t* optLdm, + uint currPosInBlock, + uint blockBytesRemaining + ) + { + rawSeq currSeq; + uint currBlockEndPos; + uint literalsBytesRemaining; + uint matchBytesRemaining; + if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) { - return ZSTD_btGetAllMatches_internal( - matches, - ms, - nextToUpdate3, - ip, - iHighLimit, - rep, - ll0, - lengthToBeat, - ZSTD_dictMode_e.ZSTD_dictMatchState, - 4 - ); + optLdm->startPosInBlock = 0xffffffff; + optLdm->endPosInBlock = 0xffffffff; + return; } - private static uint ZSTD_btGetAllMatches_dictMatchState_5( - ZSTD_match_t* matches, - ZSTD_MatchState_t* ms, - uint* nextToUpdate3, - byte* ip, - byte* iHighLimit, - uint* rep, - uint ll0, - uint lengthToBeat - ) + currSeq = optLdm->seqStore.seq[optLdm->seqStore.pos]; + assert(optLdm->seqStore.posInSequence <= currSeq.litLength + currSeq.matchLength); + currBlockEndPos = currPosInBlock + blockBytesRemaining; + literalsBytesRemaining = + optLdm->seqStore.posInSequence < currSeq.litLength + ? currSeq.litLength - (uint)optLdm->seqStore.posInSequence + : 0; + matchBytesRemaining = + literalsBytesRemaining == 0 + ? currSeq.matchLength + - ((uint)optLdm->seqStore.posInSequence - currSeq.litLength) + : currSeq.matchLength; + if (literalsBytesRemaining >= blockBytesRemaining) { - return ZSTD_btGetAllMatches_internal( - matches, - ms, - nextToUpdate3, - ip, - iHighLimit, - rep, - ll0, - lengthToBeat, - ZSTD_dictMode_e.ZSTD_dictMatchState, - 5 - ); + optLdm->startPosInBlock = 0xffffffff; + optLdm->endPosInBlock = 0xffffffff; + ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, blockBytesRemaining); + return; } - private static uint ZSTD_btGetAllMatches_dictMatchState_6( - ZSTD_match_t* matches, - ZSTD_MatchState_t* ms, - uint* nextToUpdate3, - byte* ip, - byte* iHighLimit, - uint* rep, - uint ll0, - uint lengthToBeat - ) + optLdm->startPosInBlock = currPosInBlock + literalsBytesRemaining; + optLdm->endPosInBlock = optLdm->startPosInBlock + matchBytesRemaining; + optLdm->offset = currSeq.offset; + if (optLdm->endPosInBlock > currBlockEndPos) { - return ZSTD_btGetAllMatches_internal( - matches, - ms, - nextToUpdate3, - ip, - iHighLimit, - rep, - ll0, - lengthToBeat, - ZSTD_dictMode_e.ZSTD_dictMatchState, - 6 + optLdm->endPosInBlock = currBlockEndPos; + ZSTD_optLdm_skipRawSeqStoreBytes( + &optLdm->seqStore, + currBlockEndPos - currPosInBlock ); } - - private static readonly ZSTD_getAllMatchesFn[][] getAllMatchesFns = - new ZSTD_getAllMatchesFn[3][] - { - new ZSTD_getAllMatchesFn[4] - { - ZSTD_btGetAllMatches_noDict_3, - ZSTD_btGetAllMatches_noDict_4, - ZSTD_btGetAllMatches_noDict_5, - ZSTD_btGetAllMatches_noDict_6, - }, - new ZSTD_getAllMatchesFn[4] - { - ZSTD_btGetAllMatches_extDict_3, - ZSTD_btGetAllMatches_extDict_4, - ZSTD_btGetAllMatches_extDict_5, - ZSTD_btGetAllMatches_extDict_6, - }, - new ZSTD_getAllMatchesFn[4] - { - ZSTD_btGetAllMatches_dictMatchState_3, - ZSTD_btGetAllMatches_dictMatchState_4, - ZSTD_btGetAllMatches_dictMatchState_5, - ZSTD_btGetAllMatches_dictMatchState_6, - }, - }; - - private static ZSTD_getAllMatchesFn ZSTD_selectBtGetAllMatches( - ZSTD_MatchState_t* ms, - ZSTD_dictMode_e dictMode - ) + else { - uint mls = - ms->cParams.minMatch <= 3 ? 3 - : ms->cParams.minMatch <= 6 ? ms->cParams.minMatch - : 6; - assert((uint)dictMode < 3); - assert(mls - 3 < 4); - return getAllMatchesFns[(int)dictMode][mls - 3]; + ZSTD_optLdm_skipRawSeqStoreBytes( + &optLdm->seqStore, + literalsBytesRemaining + matchBytesRemaining + ); } + } - /* ZSTD_optLdm_skipRawSeqStoreBytes(): - * Moves forward in @rawSeqStore by @nbBytes, - * which will update the fields 'pos' and 'posInSequence'. - */ - private static void ZSTD_optLdm_skipRawSeqStoreBytes( - RawSeqStore_t* rawSeqStore, - nuint nbBytes - ) - { - uint currPos = (uint)(rawSeqStore->posInSequence + nbBytes); - while (currPos != 0 && rawSeqStore->pos < rawSeqStore->size) - { - rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos]; - if (currPos >= currSeq.litLength + currSeq.matchLength) - { - currPos -= currSeq.litLength + currSeq.matchLength; - rawSeqStore->pos++; - } - else - { - rawSeqStore->posInSequence = currPos; - break; - } - } - - if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) - { - rawSeqStore->posInSequence = 0; - } + /* ZSTD_optLdm_maybeAddMatch(): + * Adds a match if it's long enough, + * based on it's 'matchStartPosInBlock' and 'matchEndPosInBlock', + * into 'matches'. Maintains the correct ordering of 'matches'. + */ + private static void ZSTD_optLdm_maybeAddMatch( + ZSTD_match_t* matches, + uint* nbMatches, + ZSTD_optLdm_t* optLdm, + uint currPosInBlock, + uint minMatch + ) + { + uint posDiff = currPosInBlock - optLdm->startPosInBlock; + /* Note: ZSTD_match_t actually contains offBase and matchLength (before subtracting MINMATCH) */ + uint candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff; + if ( + currPosInBlock < optLdm->startPosInBlock + || currPosInBlock >= optLdm->endPosInBlock + || candidateMatchLength < minMatch + ) + { + return; } - /* ZSTD_opt_getNextMatchAndUpdateSeqStore(): - * Calculates the beginning and end of the next match in the current block. - * Updates 'pos' and 'posInSequence' of the ldmSeqStore. - */ - private static void ZSTD_opt_getNextMatchAndUpdateSeqStore( - ZSTD_optLdm_t* optLdm, - uint currPosInBlock, - uint blockBytesRemaining + if ( + *nbMatches == 0 + || candidateMatchLength > matches[*nbMatches - 1].len && *nbMatches < 1 << 12 ) { - rawSeq currSeq; - uint currBlockEndPos; - uint literalsBytesRemaining; - uint matchBytesRemaining; - if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) - { - optLdm->startPosInBlock = 0xffffffff; - optLdm->endPosInBlock = 0xffffffff; - return; - } - - currSeq = optLdm->seqStore.seq[optLdm->seqStore.pos]; - assert(optLdm->seqStore.posInSequence <= currSeq.litLength + currSeq.matchLength); - currBlockEndPos = currPosInBlock + blockBytesRemaining; - literalsBytesRemaining = - optLdm->seqStore.posInSequence < currSeq.litLength - ? currSeq.litLength - (uint)optLdm->seqStore.posInSequence - : 0; - matchBytesRemaining = - literalsBytesRemaining == 0 - ? currSeq.matchLength - - ((uint)optLdm->seqStore.posInSequence - currSeq.litLength) - : currSeq.matchLength; - if (literalsBytesRemaining >= blockBytesRemaining) - { - optLdm->startPosInBlock = 0xffffffff; - optLdm->endPosInBlock = 0xffffffff; - ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, blockBytesRemaining); - return; - } + assert(optLdm->offset > 0); + uint candidateOffBase = optLdm->offset + 3; + matches[*nbMatches].len = candidateMatchLength; + matches[*nbMatches].off = candidateOffBase; + (*nbMatches)++; + } + } - optLdm->startPosInBlock = currPosInBlock + literalsBytesRemaining; - optLdm->endPosInBlock = optLdm->startPosInBlock + matchBytesRemaining; - optLdm->offset = currSeq.offset; - if (optLdm->endPosInBlock > currBlockEndPos) - { - optLdm->endPosInBlock = currBlockEndPos; - ZSTD_optLdm_skipRawSeqStoreBytes( - &optLdm->seqStore, - currBlockEndPos - currPosInBlock - ); - } - else - { - ZSTD_optLdm_skipRawSeqStoreBytes( - &optLdm->seqStore, - literalsBytesRemaining + matchBytesRemaining - ); - } + /* ZSTD_optLdm_processMatchCandidate(): + * Wrapper function to update ldm seq store and call ldm functions as necessary. + */ + private static void ZSTD_optLdm_processMatchCandidate( + ZSTD_optLdm_t* optLdm, + ZSTD_match_t* matches, + uint* nbMatches, + uint currPosInBlock, + uint remainingBytes, + uint minMatch + ) + { + if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) + { + return; } - /* ZSTD_optLdm_maybeAddMatch(): - * Adds a match if it's long enough, - * based on it's 'matchStartPosInBlock' and 'matchEndPosInBlock', - * into 'matches'. Maintains the correct ordering of 'matches'. - */ - private static void ZSTD_optLdm_maybeAddMatch( - ZSTD_match_t* matches, - uint* nbMatches, - ZSTD_optLdm_t* optLdm, - uint currPosInBlock, - uint minMatch - ) + if (currPosInBlock >= optLdm->endPosInBlock) { - uint posDiff = currPosInBlock - optLdm->startPosInBlock; - /* Note: ZSTD_match_t actually contains offBase and matchLength (before subtracting MINMATCH) */ - uint candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff; - if ( - currPosInBlock < optLdm->startPosInBlock - || currPosInBlock >= optLdm->endPosInBlock - || candidateMatchLength < minMatch - ) + if (currPosInBlock > optLdm->endPosInBlock) { - return; + /* The position at which ZSTD_optLdm_processMatchCandidate() is called is not necessarily + * at the end of a match from the ldm seq store, and will often be some bytes + * over beyond matchEndPosInBlock. As such, we need to correct for these "overshoots" + */ + uint posOvershoot = currPosInBlock - optLdm->endPosInBlock; + ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, posOvershoot); } - if ( - *nbMatches == 0 - || candidateMatchLength > matches[*nbMatches - 1].len && *nbMatches < 1 << 12 - ) - { - assert(optLdm->offset > 0); - uint candidateOffBase = optLdm->offset + 3; - matches[*nbMatches].len = candidateMatchLength; - matches[*nbMatches].off = candidateOffBase; - (*nbMatches)++; - } + ZSTD_opt_getNextMatchAndUpdateSeqStore(optLdm, currPosInBlock, remainingBytes); } - /* ZSTD_optLdm_processMatchCandidate(): - * Wrapper function to update ldm seq store and call ldm functions as necessary. - */ - private static void ZSTD_optLdm_processMatchCandidate( - ZSTD_optLdm_t* optLdm, - ZSTD_match_t* matches, - uint* nbMatches, - uint currPosInBlock, - uint remainingBytes, - uint minMatch - ) + ZSTD_optLdm_maybeAddMatch(matches, nbMatches, optLdm, currPosInBlock, minMatch); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static nuint ZSTD_compressBlock_opt_generic( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize, + int optLevel, + ZSTD_dictMode_e dictMode + ) + { + optState_t* optStatePtr = &ms->opt; + byte* istart = (byte*)src; + byte* ip = istart; + byte* anchor = istart; + byte* iend = istart + srcSize; + byte* ilimit = iend - 8; + byte* @base = ms->window.@base; + byte* prefixStart = @base + ms->window.dictLimit; + ZSTD_compressionParameters* cParams = &ms->cParams; + ZSTD_getAllMatchesFn getAllMatches = ZSTD_selectBtGetAllMatches(ms, dictMode); + uint sufficient_len = + cParams->targetLength < (1 << 12) - 1 ? cParams->targetLength : (1 << 12) - 1; + uint minMatch = (uint)(cParams->minMatch == 3 ? 3 : 4); + uint nextToUpdate3 = ms->nextToUpdate; + ZSTD_optimal_t* opt = optStatePtr->priceTable; + ZSTD_match_t* matches = optStatePtr->matchTable; + ZSTD_optimal_t lastStretch; + ZSTD_optLdm_t optLdm; + lastStretch = new ZSTD_optimal_t(); + optLdm.seqStore = ms->ldmSeqStore != null ? *ms->ldmSeqStore : kNullRawSeqStore; + optLdm.endPosInBlock = optLdm.startPosInBlock = optLdm.offset = 0; + ZSTD_opt_getNextMatchAndUpdateSeqStore(&optLdm, (uint)(ip - istart), (uint)(iend - ip)); + assert(optLevel <= 2); + ZSTD_rescaleFreqs(optStatePtr, (byte*)src, srcSize, optLevel); + ip += ip == prefixStart ? 1 : 0; + while (ip < ilimit) { - if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) + uint cur, + last_pos = 0; { - return; + uint litlen = (uint)(ip - anchor); + uint ll0 = litlen == 0 ? 1U : 0U; + uint nbMatches = getAllMatches( + matches, + ms, + &nextToUpdate3, + ip, + iend, + rep, + ll0, + minMatch + ); + ZSTD_optLdm_processMatchCandidate( + &optLdm, + matches, + &nbMatches, + (uint)(ip - istart), + (uint)(iend - ip), + minMatch + ); + if (nbMatches == 0) + { + ip++; + continue; + } + + opt[0].mlen = 0; + opt[0].litlen = litlen; + opt[0].price = (int)ZSTD_litLengthPrice(litlen, optStatePtr, optLevel); + memcpy(&opt[0].rep[0], rep, sizeof(uint) * 3); + { + uint maxML = matches[nbMatches - 1].len; + uint maxOffBase = matches[nbMatches - 1].off; + if (maxML > sufficient_len) + { + lastStretch.litlen = 0; + lastStretch.mlen = maxML; + lastStretch.off = maxOffBase; + cur = 0; + last_pos = maxML; + goto _shortestPath; + } + } + + assert(opt[0].price >= 0); + { + uint pos; + uint matchNb; + for (pos = 1; pos < minMatch; pos++) + { + opt[pos].price = 1 << 30; + opt[pos].mlen = 0; + opt[pos].litlen = litlen + pos; + } + + for (matchNb = 0; matchNb < nbMatches; matchNb++) + { + uint offBase = matches[matchNb].off; + uint end = matches[matchNb].len; + for (; pos <= end; pos++) + { + int matchPrice = (int)ZSTD_getMatchPrice( + offBase, + pos, + optStatePtr, + optLevel + ); + int sequencePrice = opt[0].price + matchPrice; + opt[pos].mlen = pos; + opt[pos].off = offBase; + opt[pos].litlen = 0; + opt[pos].price = + sequencePrice + + (int)ZSTD_litLengthPrice(0, optStatePtr, optLevel); + } + } + + last_pos = pos - 1; + opt[pos].price = 1 << 30; + } } - if (currPosInBlock >= optLdm->endPosInBlock) + for (cur = 1; cur <= last_pos; cur++) { - if (currPosInBlock > optLdm->endPosInBlock) + byte* inr = ip + cur; + assert(cur <= 1 << 12); { - /* The position at which ZSTD_optLdm_processMatchCandidate() is called is not necessarily - * at the end of a match from the ldm seq store, and will often be some bytes - * over beyond matchEndPosInBlock. As such, we need to correct for these "overshoots" - */ - uint posOvershoot = currPosInBlock - optLdm->endPosInBlock; - ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, posOvershoot); + uint litlen = opt[cur - 1].litlen + 1; + int price = + opt[cur - 1].price + + (int)ZSTD_rawLiteralsCost(ip + cur - 1, 1, optStatePtr, optLevel) + + ( + (int)ZSTD_litLengthPrice(litlen, optStatePtr, optLevel) + - (int)ZSTD_litLengthPrice(litlen - 1, optStatePtr, optLevel) + ); + assert(price < 1000000000); + if (price <= opt[cur].price) + { + ZSTD_optimal_t prevMatch = opt[cur]; + opt[cur] = opt[cur - 1]; + opt[cur].litlen = litlen; + opt[cur].price = price; + if ( + optLevel >= 1 + && prevMatch.litlen == 0 + && (int)ZSTD_litLengthPrice(1, optStatePtr, optLevel) + - (int)ZSTD_litLengthPrice(1 - 1, optStatePtr, optLevel) + < 0 + && ip + cur < iend + ) + { + /* check next position, in case it would be cheaper */ + int with1literal = + prevMatch.price + + (int)ZSTD_rawLiteralsCost(ip + cur, 1, optStatePtr, optLevel) + + ( + (int)ZSTD_litLengthPrice(1, optStatePtr, optLevel) + - (int)ZSTD_litLengthPrice(1 - 1, optStatePtr, optLevel) + ); + int withMoreLiterals = + price + + (int)ZSTD_rawLiteralsCost(ip + cur, 1, optStatePtr, optLevel) + + ( + (int)ZSTD_litLengthPrice(litlen + 1, optStatePtr, optLevel) + - (int)ZSTD_litLengthPrice( + litlen + 1 - 1, + optStatePtr, + optLevel + ) + ); + if ( + with1literal < withMoreLiterals + && with1literal < opt[cur + 1].price + ) + { + /* update offset history - before it disappears */ + uint prev = cur - prevMatch.mlen; + repcodes_s newReps = ZSTD_newRep( + opt[prev].rep, + prevMatch.off, + opt[prev].litlen == 0 ? 1U : 0U + ); + assert(cur >= prevMatch.mlen); + opt[cur + 1] = prevMatch; + memcpy(opt[cur + 1].rep, &newReps, (uint)sizeof(repcodes_s)); + opt[cur + 1].litlen = 1; + opt[cur + 1].price = with1literal; + if (last_pos < cur + 1) + last_pos = cur + 1; + } + } + } } - ZSTD_opt_getNextMatchAndUpdateSeqStore(optLdm, currPosInBlock, remainingBytes); - } + assert(cur >= opt[cur].mlen); + if (opt[cur].litlen == 0) + { + /* just finished a match => alter offset history */ + uint prev = cur - opt[cur].mlen; + repcodes_s newReps = ZSTD_newRep( + opt[prev].rep, + opt[cur].off, + opt[prev].litlen == 0 ? 1U : 0U + ); + memcpy(opt[cur].rep, &newReps, (uint)sizeof(repcodes_s)); + } - ZSTD_optLdm_maybeAddMatch(matches, nbMatches, optLdm, currPosInBlock, minMatch); - } + if (inr > ilimit) + continue; + if (cur == last_pos) + break; + if (optLevel == 0 && opt[cur + 1].price <= opt[cur].price + (1 << 8) / 2) + { + continue; + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint ZSTD_compressBlock_opt_generic( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize, - int optLevel, - ZSTD_dictMode_e dictMode - ) - { - optState_t* optStatePtr = &ms->opt; - byte* istart = (byte*)src; - byte* ip = istart; - byte* anchor = istart; - byte* iend = istart + srcSize; - byte* ilimit = iend - 8; - byte* @base = ms->window.@base; - byte* prefixStart = @base + ms->window.dictLimit; - ZSTD_compressionParameters* cParams = &ms->cParams; - ZSTD_getAllMatchesFn getAllMatches = ZSTD_selectBtGetAllMatches(ms, dictMode); - uint sufficient_len = - cParams->targetLength < (1 << 12) - 1 ? cParams->targetLength : (1 << 12) - 1; - uint minMatch = (uint)(cParams->minMatch == 3 ? 3 : 4); - uint nextToUpdate3 = ms->nextToUpdate; - ZSTD_optimal_t* opt = optStatePtr->priceTable; - ZSTD_match_t* matches = optStatePtr->matchTable; - ZSTD_optimal_t lastStretch; - ZSTD_optLdm_t optLdm; - lastStretch = new ZSTD_optimal_t(); - optLdm.seqStore = ms->ldmSeqStore != null ? *ms->ldmSeqStore : kNullRawSeqStore; - optLdm.endPosInBlock = optLdm.startPosInBlock = optLdm.offset = 0; - ZSTD_opt_getNextMatchAndUpdateSeqStore(&optLdm, (uint)(ip - istart), (uint)(iend - ip)); - assert(optLevel <= 2); - ZSTD_rescaleFreqs(optStatePtr, (byte*)src, srcSize, optLevel); - ip += ip == prefixStart ? 1 : 0; - while (ip < ilimit) - { - uint cur, - last_pos = 0; + assert(opt[cur].price >= 0); { - uint litlen = (uint)(ip - anchor); - uint ll0 = litlen == 0 ? 1U : 0U; + uint ll0 = opt[cur].litlen == 0 ? 1U : 0U; + int previousPrice = opt[cur].price; + int basePrice = + previousPrice + (int)ZSTD_litLengthPrice(0, optStatePtr, optLevel); uint nbMatches = getAllMatches( matches, ms, &nextToUpdate3, - ip, + inr, iend, - rep, + opt[cur].rep, ll0, minMatch ); + uint matchNb; ZSTD_optLdm_processMatchCandidate( &optLdm, matches, &nbMatches, - (uint)(ip - istart), - (uint)(iend - ip), + (uint)(inr - istart), + (uint)(iend - inr), minMatch ); if (nbMatches == 0) { - ip++; continue; } - opt[0].mlen = 0; - opt[0].litlen = litlen; - opt[0].price = (int)ZSTD_litLengthPrice(litlen, optStatePtr, optLevel); - memcpy(&opt[0].rep[0], rep, sizeof(uint) * 3); { - uint maxML = matches[nbMatches - 1].len; - uint maxOffBase = matches[nbMatches - 1].off; - if (maxML > sufficient_len) + uint longestML = matches[nbMatches - 1].len; + if ( + longestML > sufficient_len + || cur + longestML >= 1 << 12 + || ip + cur + longestML >= iend + ) { + lastStretch.mlen = longestML; + lastStretch.off = matches[nbMatches - 1].off; lastStretch.litlen = 0; - lastStretch.mlen = maxML; - lastStretch.off = maxOffBase; - cur = 0; - last_pos = maxML; + last_pos = cur + longestML; goto _shortestPath; } } - assert(opt[0].price >= 0); - { - uint pos; - uint matchNb; - for (pos = 1; pos < minMatch; pos++) - { - opt[pos].price = 1 << 30; - opt[pos].mlen = 0; - opt[pos].litlen = litlen + pos; - } - - for (matchNb = 0; matchNb < nbMatches; matchNb++) - { - uint offBase = matches[matchNb].off; - uint end = matches[matchNb].len; - for (; pos <= end; pos++) - { - int matchPrice = (int)ZSTD_getMatchPrice( - offBase, - pos, - optStatePtr, - optLevel - ); - int sequencePrice = opt[0].price + matchPrice; - opt[pos].mlen = pos; - opt[pos].off = offBase; - opt[pos].litlen = 0; - opt[pos].price = - sequencePrice - + (int)ZSTD_litLengthPrice(0, optStatePtr, optLevel); - } - } - - last_pos = pos - 1; - opt[pos].price = 1 << 30; - } - } - - for (cur = 1; cur <= last_pos; cur++) - { - byte* inr = ip + cur; - assert(cur <= 1 << 12); + for (matchNb = 0; matchNb < nbMatches; matchNb++) { - uint litlen = opt[cur - 1].litlen + 1; - int price = - opt[cur - 1].price - + (int)ZSTD_rawLiteralsCost(ip + cur - 1, 1, optStatePtr, optLevel) - + ( - (int)ZSTD_litLengthPrice(litlen, optStatePtr, optLevel) - - (int)ZSTD_litLengthPrice(litlen - 1, optStatePtr, optLevel) - ); - assert(price < 1000000000); - if (price <= opt[cur].price) + uint offset = matches[matchNb].off; + uint lastML = matches[matchNb].len; + uint startML = matchNb > 0 ? matches[matchNb - 1].len + 1 : minMatch; + uint mlen; + for (mlen = lastML; mlen >= startML; mlen--) { - ZSTD_optimal_t prevMatch = opt[cur]; - opt[cur] = opt[cur - 1]; - opt[cur].litlen = litlen; - opt[cur].price = price; - if ( - optLevel >= 1 - && prevMatch.litlen == 0 - && (int)ZSTD_litLengthPrice(1, optStatePtr, optLevel) - - (int)ZSTD_litLengthPrice(1 - 1, optStatePtr, optLevel) - < 0 - && ip + cur < iend - ) + uint pos = cur + mlen; + int price = + basePrice + + (int)ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel); + if (pos > last_pos || price < opt[pos].price) { - /* check next position, in case it would be cheaper */ - int with1literal = - prevMatch.price - + (int)ZSTD_rawLiteralsCost(ip + cur, 1, optStatePtr, optLevel) - + ( - (int)ZSTD_litLengthPrice(1, optStatePtr, optLevel) - - (int)ZSTD_litLengthPrice(1 - 1, optStatePtr, optLevel) - ); - int withMoreLiterals = - price - + (int)ZSTD_rawLiteralsCost(ip + cur, 1, optStatePtr, optLevel) - + ( - (int)ZSTD_litLengthPrice(litlen + 1, optStatePtr, optLevel) - - (int)ZSTD_litLengthPrice( - litlen + 1 - 1, - optStatePtr, - optLevel - ) - ); - if ( - with1literal < withMoreLiterals - && with1literal < opt[cur + 1].price - ) + while (last_pos < pos) { - /* update offset history - before it disappears */ - uint prev = cur - prevMatch.mlen; - repcodes_s newReps = ZSTD_newRep( - opt[prev].rep, - prevMatch.off, - opt[prev].litlen == 0 ? 1U : 0U - ); - assert(cur >= prevMatch.mlen); - opt[cur + 1] = prevMatch; - memcpy(opt[cur + 1].rep, &newReps, (uint)sizeof(repcodes_s)); - opt[cur + 1].litlen = 1; - opt[cur + 1].price = with1literal; - if (last_pos < cur + 1) - last_pos = cur + 1; + last_pos++; + opt[last_pos].price = 1 << 30; + opt[last_pos].litlen = 0 == 0 ? 1U : 0U; } - } - } - } - - assert(cur >= opt[cur].mlen); - if (opt[cur].litlen == 0) - { - /* just finished a match => alter offset history */ - uint prev = cur - opt[cur].mlen; - repcodes_s newReps = ZSTD_newRep( - opt[prev].rep, - opt[cur].off, - opt[prev].litlen == 0 ? 1U : 0U - ); - memcpy(opt[cur].rep, &newReps, (uint)sizeof(repcodes_s)); - } - - if (inr > ilimit) - continue; - if (cur == last_pos) - break; - if (optLevel == 0 && opt[cur + 1].price <= opt[cur].price + (1 << 8) / 2) - { - continue; - } - - assert(opt[cur].price >= 0); - { - uint ll0 = opt[cur].litlen == 0 ? 1U : 0U; - int previousPrice = opt[cur].price; - int basePrice = - previousPrice + (int)ZSTD_litLengthPrice(0, optStatePtr, optLevel); - uint nbMatches = getAllMatches( - matches, - ms, - &nextToUpdate3, - inr, - iend, - opt[cur].rep, - ll0, - minMatch - ); - uint matchNb; - ZSTD_optLdm_processMatchCandidate( - &optLdm, - matches, - &nbMatches, - (uint)(inr - istart), - (uint)(iend - inr), - minMatch - ); - if (nbMatches == 0) - { - continue; - } - { - uint longestML = matches[nbMatches - 1].len; - if ( - longestML > sufficient_len - || cur + longestML >= 1 << 12 - || ip + cur + longestML >= iend - ) - { - lastStretch.mlen = longestML; - lastStretch.off = matches[nbMatches - 1].off; - lastStretch.litlen = 0; - last_pos = cur + longestML; - goto _shortestPath; + opt[pos].mlen = mlen; + opt[pos].off = offset; + opt[pos].litlen = 0; + opt[pos].price = price; } - } - - for (matchNb = 0; matchNb < nbMatches; matchNb++) - { - uint offset = matches[matchNb].off; - uint lastML = matches[matchNb].len; - uint startML = matchNb > 0 ? matches[matchNb - 1].len + 1 : minMatch; - uint mlen; - for (mlen = lastML; mlen >= startML; mlen--) + else { - uint pos = cur + mlen; - int price = - basePrice - + (int)ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel); - if (pos > last_pos || price < opt[pos].price) - { - while (last_pos < pos) - { - last_pos++; - opt[last_pos].price = 1 << 30; - opt[last_pos].litlen = 0 == 0 ? 1U : 0U; - } - - opt[pos].mlen = mlen; - opt[pos].off = offset; - opt[pos].litlen = 0; - opt[pos].price = price; - } - else - { - if (optLevel == 0) - break; - } + if (optLevel == 0) + break; } } } - - opt[last_pos + 1].price = 1 << 30; } - lastStretch = opt[last_pos]; - assert(cur >= lastStretch.mlen); - cur = last_pos - lastStretch.mlen; - _shortestPath: - assert(opt[0].mlen == 0); - assert(last_pos >= lastStretch.mlen); - assert(cur == last_pos - lastStretch.mlen); - if (lastStretch.mlen == 0) - { - assert(lastStretch.litlen == (uint)(ip - anchor) + last_pos); - ip += last_pos; - continue; - } + opt[last_pos + 1].price = 1 << 30; + } + + lastStretch = opt[last_pos]; + assert(cur >= lastStretch.mlen); + cur = last_pos - lastStretch.mlen; + _shortestPath: + assert(opt[0].mlen == 0); + assert(last_pos >= lastStretch.mlen); + assert(cur == last_pos - lastStretch.mlen); + if (lastStretch.mlen == 0) + { + assert(lastStretch.litlen == (uint)(ip - anchor) + last_pos); + ip += last_pos; + continue; + } + + assert(lastStretch.off > 0); + if (lastStretch.litlen == 0) + { + /* finishing on a match : update offset history */ + repcodes_s reps = ZSTD_newRep( + opt[cur].rep, + lastStretch.off, + opt[cur].litlen == 0 ? 1U : 0U + ); + memcpy(rep, &reps, (uint)sizeof(repcodes_s)); + } + else + { + memcpy(rep, lastStretch.rep, (uint)sizeof(repcodes_s)); + assert(cur >= lastStretch.litlen); + cur -= lastStretch.litlen; + } - assert(lastStretch.off > 0); - if (lastStretch.litlen == 0) + { + uint storeEnd = cur + 2; + uint storeStart = storeEnd; + uint stretchPos = cur; + assert(storeEnd < (1 << 12) + 3); + if (lastStretch.litlen > 0) { - /* finishing on a match : update offset history */ - repcodes_s reps = ZSTD_newRep( - opt[cur].rep, - lastStretch.off, - opt[cur].litlen == 0 ? 1U : 0U - ); - memcpy(rep, &reps, (uint)sizeof(repcodes_s)); + opt[storeEnd].litlen = lastStretch.litlen; + opt[storeEnd].mlen = 0; + storeStart = storeEnd - 1; + opt[storeStart] = lastStretch; } - else + { - memcpy(rep, lastStretch.rep, (uint)sizeof(repcodes_s)); - assert(cur >= lastStretch.litlen); - cur -= lastStretch.litlen; + opt[storeEnd] = lastStretch; + storeStart = storeEnd; } + while (true) { - uint storeEnd = cur + 2; - uint storeStart = storeEnd; - uint stretchPos = cur; - assert(storeEnd < (1 << 12) + 3); - if (lastStretch.litlen > 0) + ZSTD_optimal_t nextStretch = opt[stretchPos]; + opt[storeStart].litlen = nextStretch.litlen; + if (nextStretch.mlen == 0) { - opt[storeEnd].litlen = lastStretch.litlen; - opt[storeEnd].mlen = 0; - storeStart = storeEnd - 1; - opt[storeStart] = lastStretch; + break; } - { - opt[storeEnd] = lastStretch; - storeStart = storeEnd; - } + storeStart--; + opt[storeStart] = nextStretch; + assert(nextStretch.litlen + nextStretch.mlen <= stretchPos); + stretchPos -= nextStretch.litlen + nextStretch.mlen; + } - while (true) + { + uint storePos; + for (storePos = storeStart; storePos <= storeEnd; storePos++) { - ZSTD_optimal_t nextStretch = opt[stretchPos]; - opt[storeStart].litlen = nextStretch.litlen; - if (nextStretch.mlen == 0) + uint llen = opt[storePos].litlen; + uint mlen = opt[storePos].mlen; + uint offBase = opt[storePos].off; + uint advance = llen + mlen; + if (mlen == 0) { - break; + assert(storePos == storeEnd); + ip = anchor + llen; + continue; } - storeStart--; - opt[storeStart] = nextStretch; - assert(nextStretch.litlen + nextStretch.mlen <= stretchPos); - stretchPos -= nextStretch.litlen + nextStretch.mlen; - } - - { - uint storePos; - for (storePos = storeStart; storePos <= storeEnd; storePos++) - { - uint llen = opt[storePos].litlen; - uint mlen = opt[storePos].mlen; - uint offBase = opt[storePos].off; - uint advance = llen + mlen; - if (mlen == 0) - { - assert(storePos == storeEnd); - ip = anchor + llen; - continue; - } - - assert(anchor + llen <= iend); - ZSTD_updateStats(optStatePtr, llen, anchor, offBase, mlen); - ZSTD_storeSeq(seqStore, llen, anchor, iend, offBase, mlen); - anchor += advance; - ip = anchor; - } + assert(anchor + llen <= iend); + ZSTD_updateStats(optStatePtr, llen, anchor, offBase, mlen); + ZSTD_storeSeq(seqStore, llen, anchor, iend, offBase, mlen); + anchor += advance; + ip = anchor; } - - ZSTD_setBasePrices(optStatePtr, optLevel); } - } - return (nuint)(iend - anchor); - } - - private static nuint ZSTD_compressBlock_opt0( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize, - ZSTD_dictMode_e dictMode - ) - { - return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0, dictMode); + ZSTD_setBasePrices(optStatePtr, optLevel); + } } - private static nuint ZSTD_compressBlock_opt2( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize, - ZSTD_dictMode_e dictMode - ) - { - return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2, dictMode); - } + return (nuint)(iend - anchor); + } - private static nuint ZSTD_compressBlock_btopt( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_opt0( - ms, - seqStore, - rep, - src, - srcSize, - ZSTD_dictMode_e.ZSTD_noDict - ); - } + private static nuint ZSTD_compressBlock_opt0( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize, + ZSTD_dictMode_e dictMode + ) + { + return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0, dictMode); + } - /* ZSTD_initStats_ultra(): - * make a first compression pass, just to seed stats with more accurate starting values. - * only works on first block, with no dictionary and no ldm. - * this function cannot error out, its narrow contract must be respected. - */ - private static void ZSTD_initStats_ultra( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - /* updated rep codes will sink here */ - uint* tmpRep = stackalloc uint[3]; - memcpy(tmpRep, rep, sizeof(uint) * 3); - assert(ms->opt.litLengthSum == 0); - assert(seqStore->sequences == seqStore->sequencesStart); - assert(ms->window.dictLimit == ms->window.lowLimit); - assert(ms->window.dictLimit - ms->nextToUpdate <= 1); - ZSTD_compressBlock_opt2( - ms, - seqStore, - tmpRep, - src, - srcSize, - ZSTD_dictMode_e.ZSTD_noDict - ); - ZSTD_resetSeqStore(seqStore); - ms->window.@base -= srcSize; - ms->window.dictLimit += (uint)srcSize; - ms->window.lowLimit = ms->window.dictLimit; - ms->nextToUpdate = ms->window.dictLimit; - } + private static nuint ZSTD_compressBlock_opt2( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize, + ZSTD_dictMode_e dictMode + ) + { + return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2, dictMode); + } - private static nuint ZSTD_compressBlock_btultra( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_opt2( - ms, - seqStore, - rep, - src, - srcSize, - ZSTD_dictMode_e.ZSTD_noDict - ); - } + private static nuint ZSTD_compressBlock_btopt( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_opt0( + ms, + seqStore, + rep, + src, + srcSize, + ZSTD_dictMode_e.ZSTD_noDict + ); + } - /* note : no btultra2 variant for extDict nor dictMatchState, - * because btultra2 is not meant to work with dictionaries - * and is only specific for the first block (no prefix) */ - private static nuint ZSTD_compressBlock_btultra2( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - uint curr = (uint)((byte*)src - ms->window.@base); - assert(srcSize <= 1 << 17); - if ( - ms->opt.litLengthSum == 0 - && seqStore->sequences == seqStore->sequencesStart - && ms->window.dictLimit == ms->window.lowLimit - && curr == ms->window.dictLimit - && srcSize > 8 - ) - { - ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize); - } + /* ZSTD_initStats_ultra(): + * make a first compression pass, just to seed stats with more accurate starting values. + * only works on first block, with no dictionary and no ldm. + * this function cannot error out, its narrow contract must be respected. + */ + private static void ZSTD_initStats_ultra( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + /* updated rep codes will sink here */ + uint* tmpRep = stackalloc uint[3]; + memcpy(tmpRep, rep, sizeof(uint) * 3); + assert(ms->opt.litLengthSum == 0); + assert(seqStore->sequences == seqStore->sequencesStart); + assert(ms->window.dictLimit == ms->window.lowLimit); + assert(ms->window.dictLimit - ms->nextToUpdate <= 1); + ZSTD_compressBlock_opt2( + ms, + seqStore, + tmpRep, + src, + srcSize, + ZSTD_dictMode_e.ZSTD_noDict + ); + ZSTD_resetSeqStore(seqStore); + ms->window.@base -= srcSize; + ms->window.dictLimit += (uint)srcSize; + ms->window.lowLimit = ms->window.dictLimit; + ms->nextToUpdate = ms->window.dictLimit; + } - return ZSTD_compressBlock_opt2( - ms, - seqStore, - rep, - src, - srcSize, - ZSTD_dictMode_e.ZSTD_noDict - ); - } + private static nuint ZSTD_compressBlock_btultra( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_opt2( + ms, + seqStore, + rep, + src, + srcSize, + ZSTD_dictMode_e.ZSTD_noDict + ); + } - private static nuint ZSTD_compressBlock_btopt_dictMatchState( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize + /* note : no btultra2 variant for extDict nor dictMatchState, + * because btultra2 is not meant to work with dictionaries + * and is only specific for the first block (no prefix) */ + private static nuint ZSTD_compressBlock_btultra2( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + uint curr = (uint)((byte*)src - ms->window.@base); + assert(srcSize <= 1 << 17); + if ( + ms->opt.litLengthSum == 0 + && seqStore->sequences == seqStore->sequencesStart + && ms->window.dictLimit == ms->window.lowLimit + && curr == ms->window.dictLimit + && srcSize > 8 ) { - return ZSTD_compressBlock_opt0( - ms, - seqStore, - rep, - src, - srcSize, - ZSTD_dictMode_e.ZSTD_dictMatchState - ); + ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize); } - private static nuint ZSTD_compressBlock_btopt_extDict( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_opt0( - ms, - seqStore, - rep, - src, - srcSize, - ZSTD_dictMode_e.ZSTD_extDict - ); - } + return ZSTD_compressBlock_opt2( + ms, + seqStore, + rep, + src, + srcSize, + ZSTD_dictMode_e.ZSTD_noDict + ); + } - private static nuint ZSTD_compressBlock_btultra_dictMatchState( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_opt2( - ms, - seqStore, - rep, - src, - srcSize, - ZSTD_dictMode_e.ZSTD_dictMatchState - ); - } + private static nuint ZSTD_compressBlock_btopt_dictMatchState( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_opt0( + ms, + seqStore, + rep, + src, + srcSize, + ZSTD_dictMode_e.ZSTD_dictMatchState + ); + } - private static nuint ZSTD_compressBlock_btultra_extDict( - ZSTD_MatchState_t* ms, - SeqStore_t* seqStore, - uint* rep, - void* src, - nuint srcSize - ) - { - return ZSTD_compressBlock_opt2( - ms, - seqStore, - rep, - src, - srcSize, - ZSTD_dictMode_e.ZSTD_extDict - ); - } + private static nuint ZSTD_compressBlock_btopt_extDict( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_opt0( + ms, + seqStore, + rep, + src, + srcSize, + ZSTD_dictMode_e.ZSTD_extDict + ); + } + + private static nuint ZSTD_compressBlock_btultra_dictMatchState( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_opt2( + ms, + seqStore, + rep, + src, + srcSize, + ZSTD_dictMode_e.ZSTD_dictMatchState + ); + } + + private static nuint ZSTD_compressBlock_btultra_extDict( + ZSTD_MatchState_t* ms, + SeqStore_t* seqStore, + uint* rep, + void* src, + nuint srcSize + ) + { + return ZSTD_compressBlock_opt2( + ms, + seqStore, + rep, + src, + srcSize, + ZSTD_dictMode_e.ZSTD_extDict + ); } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdPresplit.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdPresplit.cs index 96ba2eab0..51f0fd229 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdPresplit.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdPresplit.cs @@ -1,167 +1,167 @@ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; -using static ZstdSharp.UnsafeHelper; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public static unsafe partial class Methods { - public static unsafe partial class Methods + /* for hashLog > 8, hash 2 bytes. + * for hashLog == 8, just take the byte, no hashing. + * The speed of this method relies on compile-time constant propagation */ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static uint hash2(void* p, uint hashLog) { - /* for hashLog > 8, hash 2 bytes. - * for hashLog == 8, just take the byte, no hashing. - * The speed of this method relies on compile-time constant propagation */ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint hash2(void* p, uint hashLog) - { - assert(hashLog >= 8); - if (hashLog == 8) - return ((byte*)p)[0]; - assert(hashLog <= 10); - return MEM_read16(p) * 0x9e3779b9 >> (int)(32 - hashLog); - } + assert(hashLog >= 8); + if (hashLog == 8) + return ((byte*)p)[0]; + assert(hashLog <= 10); + return MEM_read16(p) * 0x9e3779b9 >> (int)(32 - hashLog); + } - private static void initStats(FPStats* fpstats) + private static void initStats(FPStats* fpstats) + { + *fpstats = new FPStats(); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void addEvents_generic( + Fingerprint* fp, + void* src, + nuint srcSize, + nuint samplingRate, + uint hashLog + ) + { + sbyte* p = (sbyte*)src; + nuint limit = srcSize - 2 + 1; + nuint n; + assert(srcSize >= 2); + for (n = 0; n < limit; n += samplingRate) { - *fpstats = new FPStats(); + fp->events[hash2(p + n, hashLog)]++; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void addEvents_generic( - Fingerprint* fp, - void* src, - nuint srcSize, - nuint samplingRate, - uint hashLog - ) - { - sbyte* p = (sbyte*)src; - nuint limit = srcSize - 2 + 1; - nuint n; - assert(srcSize >= 2); - for (n = 0; n < limit; n += samplingRate) - { - fp->events[hash2(p + n, hashLog)]++; - } + fp->nbEvents += limit / samplingRate; + } - fp->nbEvents += limit / samplingRate; - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void recordFingerprint_generic( + Fingerprint* fp, + void* src, + nuint srcSize, + nuint samplingRate, + uint hashLog + ) + { + memset(fp, 0, (uint)(sizeof(uint) * ((nuint)1 << (int)hashLog))); + fp->nbEvents = 0; + addEvents_generic(fp, src, srcSize, samplingRate, hashLog); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void recordFingerprint_generic( - Fingerprint* fp, - void* src, - nuint srcSize, - nuint samplingRate, - uint hashLog - ) - { - memset(fp, 0, (uint)(sizeof(uint) * ((nuint)1 << (int)hashLog))); - fp->nbEvents = 0; - addEvents_generic(fp, src, srcSize, samplingRate, hashLog); - } + private static void ZSTD_recordFingerprint_1(Fingerprint* fp, void* src, nuint srcSize) + { + recordFingerprint_generic(fp, src, srcSize, 1, 10); + } - private static void ZSTD_recordFingerprint_1(Fingerprint* fp, void* src, nuint srcSize) - { - recordFingerprint_generic(fp, src, srcSize, 1, 10); - } + private static void ZSTD_recordFingerprint_5(Fingerprint* fp, void* src, nuint srcSize) + { + recordFingerprint_generic(fp, src, srcSize, 5, 10); + } - private static void ZSTD_recordFingerprint_5(Fingerprint* fp, void* src, nuint srcSize) - { - recordFingerprint_generic(fp, src, srcSize, 5, 10); - } + private static void ZSTD_recordFingerprint_11(Fingerprint* fp, void* src, nuint srcSize) + { + recordFingerprint_generic(fp, src, srcSize, 11, 9); + } - private static void ZSTD_recordFingerprint_11(Fingerprint* fp, void* src, nuint srcSize) - { - recordFingerprint_generic(fp, src, srcSize, 11, 9); - } + private static void ZSTD_recordFingerprint_43(Fingerprint* fp, void* src, nuint srcSize) + { + recordFingerprint_generic(fp, src, srcSize, 43, 8); + } - private static void ZSTD_recordFingerprint_43(Fingerprint* fp, void* src, nuint srcSize) - { - recordFingerprint_generic(fp, src, srcSize, 43, 8); - } + private static ulong abs64(long s64) + { + return (ulong)(s64 < 0 ? -s64 : s64); + } - private static ulong abs64(long s64) + private static ulong fpDistance(Fingerprint* fp1, Fingerprint* fp2, uint hashLog) + { + ulong distance = 0; + nuint n; + assert(hashLog <= 10); + for (n = 0; n < (nuint)1 << (int)hashLog; n++) { - return (ulong)(s64 < 0 ? -s64 : s64); + distance += abs64( + fp1->events[n] * (long)fp2->nbEvents - fp2->events[n] * (long)fp1->nbEvents + ); } - private static ulong fpDistance(Fingerprint* fp1, Fingerprint* fp2, uint hashLog) - { - ulong distance = 0; - nuint n; - assert(hashLog <= 10); - for (n = 0; n < (nuint)1 << (int)hashLog; n++) - { - distance += abs64( - fp1->events[n] * (long)fp2->nbEvents - fp2->events[n] * (long)fp1->nbEvents - ); - } - - return distance; - } + return distance; + } - /* Compare newEvents with pastEvents - * return 1 when considered "too different" - */ - private static int compareFingerprints( - Fingerprint* @ref, - Fingerprint* newfp, - int penalty, - uint hashLog - ) + /* Compare newEvents with pastEvents + * return 1 when considered "too different" + */ + private static int compareFingerprints( + Fingerprint* @ref, + Fingerprint* newfp, + int penalty, + uint hashLog + ) + { + assert(@ref->nbEvents > 0); + assert(newfp->nbEvents > 0); { - assert(@ref->nbEvents > 0); - assert(newfp->nbEvents > 0); - { - ulong p50 = @ref->nbEvents * (ulong)newfp->nbEvents; - ulong deviation = fpDistance(@ref, newfp, hashLog); - ulong threshold = p50 * (ulong)(16 - 2 + penalty) / 16; - return deviation >= threshold ? 1 : 0; - } + ulong p50 = @ref->nbEvents * (ulong)newfp->nbEvents; + ulong deviation = fpDistance(@ref, newfp, hashLog); + ulong threshold = p50 * (ulong)(16 - 2 + penalty) / 16; + return deviation >= threshold ? 1 : 0; } + } - private static void mergeEvents(Fingerprint* acc, Fingerprint* newfp) + private static void mergeEvents(Fingerprint* acc, Fingerprint* newfp) + { + nuint n; + for (n = 0; n < 1 << 10; n++) { - nuint n; - for (n = 0; n < 1 << 10; n++) - { - acc->events[n] += newfp->events[n]; - } - - acc->nbEvents += newfp->nbEvents; + acc->events[n] += newfp->events[n]; } - private static void flushEvents(FPStats* fpstats) - { - nuint n; - for (n = 0; n < 1 << 10; n++) - { - fpstats->pastEvents.events[n] = fpstats->newEvents.events[n]; - } + acc->nbEvents += newfp->nbEvents; + } - fpstats->pastEvents.nbEvents = fpstats->newEvents.nbEvents; - fpstats->newEvents = new Fingerprint(); + private static void flushEvents(FPStats* fpstats) + { + nuint n; + for (n = 0; n < 1 << 10; n++) + { + fpstats->pastEvents.events[n] = fpstats->newEvents.events[n]; } - private static void removeEvents(Fingerprint* acc, Fingerprint* slice) - { - nuint n; - for (n = 0; n < 1 << 10; n++) - { - assert(acc->events[n] >= slice->events[n]); - acc->events[n] -= slice->events[n]; - } + fpstats->pastEvents.nbEvents = fpstats->newEvents.nbEvents; + fpstats->newEvents = new Fingerprint(); + } - acc->nbEvents -= slice->nbEvents; + private static void removeEvents(Fingerprint* acc, Fingerprint* slice) + { + nuint n; + for (n = 0; n < 1 << 10; n++) + { + assert(acc->events[n] >= slice->events[n]); + acc->events[n] -= slice->events[n]; } - private static readonly void*[] records_fs = new void*[4] - { - (delegate* managed)(&ZSTD_recordFingerprint_43), - (delegate* managed)(&ZSTD_recordFingerprint_11), - (delegate* managed)(&ZSTD_recordFingerprint_5), - (delegate* managed)(&ZSTD_recordFingerprint_1), - }; + acc->nbEvents -= slice->nbEvents; + } + + private static readonly void*[] records_fs = new void*[4] + { + (delegate* managed)(&ZSTD_recordFingerprint_43), + (delegate* managed)(&ZSTD_recordFingerprint_11), + (delegate* managed)(&ZSTD_recordFingerprint_5), + (delegate* managed)(&ZSTD_recordFingerprint_1), + }; #if NET7_0_OR_GREATER private static ReadOnlySpan Span_hashParams => new uint[4] { 8, 9, 10, 10 }; private static uint* hashParams => @@ -171,127 +171,126 @@ ref MemoryMarshal.GetReference(Span_hashParams) ); #else - private static readonly uint* hashParams = GetArrayPointer(new uint[4] { 8, 9, 10, 10 }); + private static readonly uint* hashParams = GetArrayPointer(new uint[4] { 8, 9, 10, 10 }); #endif - private static nuint ZSTD_splitBlock_byChunks( - void* blockStart, - nuint blockSize, - int level, - void* workspace, - nuint wkspSize - ) + private static nuint ZSTD_splitBlock_byChunks( + void* blockStart, + nuint blockSize, + int level, + void* workspace, + nuint wkspSize + ) + { + assert(0 <= level && level <= 3); + void* record_f = records_fs[level]; + FPStats* fpstats = (FPStats*)workspace; + sbyte* p = (sbyte*)blockStart; + int penalty = 3; + nuint pos = 0; + assert(blockSize == 128 << 10); + assert(workspace != null); + assert((nuint)workspace % (nuint)Math.Max(sizeof(uint), sizeof(ulong)) == 0); + assert(wkspSize >= (nuint)sizeof(FPStats)); + initStats(fpstats); + ((delegate* managed)record_f)( + &fpstats->pastEvents, + p, + 8 << 10 + ); + for (pos = 8 << 10; pos <= blockSize - (8 << 10); pos += 8 << 10) { - assert(0 <= level && level <= 3); - void* record_f = records_fs[level]; - FPStats* fpstats = (FPStats*)workspace; - sbyte* p = (sbyte*)blockStart; - int penalty = 3; - nuint pos = 0; - assert(blockSize == 128 << 10); - assert(workspace != null); - assert((nuint)workspace % (nuint)Math.Max(sizeof(uint), sizeof(ulong)) == 0); - assert(wkspSize >= (nuint)sizeof(FPStats)); - initStats(fpstats); ((delegate* managed)record_f)( - &fpstats->pastEvents, - p, + &fpstats->newEvents, + p + pos, 8 << 10 ); - for (pos = 8 << 10; pos <= blockSize - (8 << 10); pos += 8 << 10) - { - ((delegate* managed)record_f)( + if ( + compareFingerprints( + &fpstats->pastEvents, &fpstats->newEvents, - p + pos, - 8 << 10 - ); - if ( - compareFingerprints( - &fpstats->pastEvents, - &fpstats->newEvents, - penalty, - hashParams[level] - ) != 0 - ) - { - return pos; - } - else - { - mergeEvents(&fpstats->pastEvents, &fpstats->newEvents); - if (penalty > 0) - penalty--; - } + penalty, + hashParams[level] + ) != 0 + ) + { + return pos; } - - assert(pos == blockSize); - return blockSize; - } - - /* ZSTD_splitBlock_fromBorders(): very fast strategy : - * compare fingerprint from beginning and end of the block, - * derive from their difference if it's preferable to split in the middle, - * repeat the process a second time, for finer grained decision. - * 3 times did not brought improvements, so I stopped at 2. - * Benefits are good enough for a cheap heuristic. - * More accurate splitting saves more, but speed impact is also more perceptible. - * For better accuracy, use more elaborate variant *_byChunks. - */ - private static nuint ZSTD_splitBlock_fromBorders( - void* blockStart, - nuint blockSize, - void* workspace, - nuint wkspSize - ) - { - FPStats* fpstats = (FPStats*)workspace; - Fingerprint* middleEvents = (Fingerprint*) - (void*)((sbyte*)workspace + 512 * sizeof(uint)); - assert(blockSize == 128 << 10); - assert(workspace != null); - assert((nuint)workspace % (nuint)Math.Max(sizeof(uint), sizeof(ulong)) == 0); - assert(wkspSize >= (nuint)sizeof(FPStats)); - initStats(fpstats); - HIST_add(fpstats->pastEvents.events, blockStart, 512); - HIST_add(fpstats->newEvents.events, (sbyte*)blockStart + blockSize - 512, 512); - fpstats->pastEvents.nbEvents = fpstats->newEvents.nbEvents = 512; - if (compareFingerprints(&fpstats->pastEvents, &fpstats->newEvents, 0, 8) == 0) - return blockSize; - HIST_add(middleEvents->events, (sbyte*)blockStart + blockSize / 2 - 512 / 2, 512); - middleEvents->nbEvents = 512; + else { - ulong distFromBegin = fpDistance(&fpstats->pastEvents, middleEvents, 8); - ulong distFromEnd = fpDistance(&fpstats->newEvents, middleEvents, 8); - const ulong minDistance = 512 * 512 / 3; - if (abs64((long)distFromBegin - (long)distFromEnd) < minDistance) - return 64 * (1 << 10); - return (nuint)(distFromBegin > distFromEnd ? 32 * (1 << 10) : 96 * (1 << 10)); + mergeEvents(&fpstats->pastEvents, &fpstats->newEvents); + if (penalty > 0) + penalty--; } } - /* ZSTD_splitBlock(): - * @level must be a value between 0 and 4. - * higher levels spend more energy to detect block boundaries. - * @workspace must be aligned for size_t. - * @wkspSize must be at least >= ZSTD_SLIPBLOCK_WORKSPACESIZE - * note: - * For the time being, this function only accepts full 128 KB blocks. - * Therefore, @blockSize must be == 128 KB. - * While this could be extended to smaller sizes in the future, - * it is not yet clear if this would be useful. TBD. - */ - private static nuint ZSTD_splitBlock( - void* blockStart, - nuint blockSize, - int level, - void* workspace, - nuint wkspSize - ) + assert(pos == blockSize); + return blockSize; + } + + /* ZSTD_splitBlock_fromBorders(): very fast strategy : + * compare fingerprint from beginning and end of the block, + * derive from their difference if it's preferable to split in the middle, + * repeat the process a second time, for finer grained decision. + * 3 times did not brought improvements, so I stopped at 2. + * Benefits are good enough for a cheap heuristic. + * More accurate splitting saves more, but speed impact is also more perceptible. + * For better accuracy, use more elaborate variant *_byChunks. + */ + private static nuint ZSTD_splitBlock_fromBorders( + void* blockStart, + nuint blockSize, + void* workspace, + nuint wkspSize + ) + { + FPStats* fpstats = (FPStats*)workspace; + Fingerprint* middleEvents = (Fingerprint*) + (void*)((sbyte*)workspace + 512 * sizeof(uint)); + assert(blockSize == 128 << 10); + assert(workspace != null); + assert((nuint)workspace % (nuint)Math.Max(sizeof(uint), sizeof(ulong)) == 0); + assert(wkspSize >= (nuint)sizeof(FPStats)); + initStats(fpstats); + HIST_add(fpstats->pastEvents.events, blockStart, 512); + HIST_add(fpstats->newEvents.events, (sbyte*)blockStart + blockSize - 512, 512); + fpstats->pastEvents.nbEvents = fpstats->newEvents.nbEvents = 512; + if (compareFingerprints(&fpstats->pastEvents, &fpstats->newEvents, 0, 8) == 0) + return blockSize; + HIST_add(middleEvents->events, (sbyte*)blockStart + blockSize / 2 - 512 / 2, 512); + middleEvents->nbEvents = 512; { - assert(0 <= level && level <= 4); - if (level == 0) - return ZSTD_splitBlock_fromBorders(blockStart, blockSize, workspace, wkspSize); - return ZSTD_splitBlock_byChunks(blockStart, blockSize, level - 1, workspace, wkspSize); + ulong distFromBegin = fpDistance(&fpstats->pastEvents, middleEvents, 8); + ulong distFromEnd = fpDistance(&fpstats->newEvents, middleEvents, 8); + const ulong minDistance = 512 * 512 / 3; + if (abs64((long)distFromBegin - (long)distFromEnd) < minDistance) + return 64 * (1 << 10); + return (nuint)(distFromBegin > distFromEnd ? 32 * (1 << 10) : 96 * (1 << 10)); } } + + /* ZSTD_splitBlock(): + * @level must be a value between 0 and 4. + * higher levels spend more energy to detect block boundaries. + * @workspace must be aligned for size_t. + * @wkspSize must be at least >= ZSTD_SLIPBLOCK_WORKSPACESIZE + * note: + * For the time being, this function only accepts full 128 KB blocks. + * Therefore, @blockSize must be == 128 KB. + * While this could be extended to smaller sizes in the future, + * it is not yet clear if this would be useful. TBD. + */ + private static nuint ZSTD_splitBlock( + void* blockStart, + nuint blockSize, + int level, + void* workspace, + nuint wkspSize + ) + { + assert(0 <= level && level <= 4); + if (level == 0) + return ZSTD_splitBlock_fromBorders(blockStart, blockSize, workspace, wkspSize); + return ZSTD_splitBlock_byChunks(blockStart, blockSize, level - 1, workspace, wkspSize); + } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdmtCompress.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdmtCompress.cs index bcf2da7bd..7d955bb50 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdmtCompress.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdmtCompress.cs @@ -1,609 +1,662 @@ using System.Runtime.CompilerServices; -using static ZstdSharp.UnsafeHelper; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; -namespace ZstdSharp.Unsafe -{ - public static unsafe partial class Methods - { - private static readonly buffer_s g_nullBuffer = new buffer_s(start: null, capacity: 0); - - private static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool_s* bufPool) - { - if (bufPool == null) - return; - if (bufPool->buffers != null) - { - uint u; - for (u = 0; u < bufPool->totalBuffers; u++) - { - ZSTD_customFree(bufPool->buffers[u].start, bufPool->cMem); - } - - ZSTD_customFree(bufPool->buffers, bufPool->cMem); - } +namespace SharpCompress.Compressors.ZStandard.Unsafe; - SynchronizationWrapper.Free(&bufPool->poolMutex); - ZSTD_customFree(bufPool, bufPool->cMem); - } +public static unsafe partial class Methods +{ + private static readonly buffer_s g_nullBuffer = new buffer_s(start: null, capacity: 0); - private static ZSTDMT_bufferPool_s* ZSTDMT_createBufferPool( - uint maxNbBuffers, - ZSTD_customMem cMem - ) + private static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool_s* bufPool) + { + if (bufPool == null) + return; + if (bufPool->buffers != null) { - ZSTDMT_bufferPool_s* bufPool = (ZSTDMT_bufferPool_s*)ZSTD_customCalloc( - (nuint)sizeof(ZSTDMT_bufferPool_s), - cMem - ); - if (bufPool == null) - return null; - SynchronizationWrapper.Init(&bufPool->poolMutex); - bufPool->buffers = (buffer_s*)ZSTD_customCalloc( - maxNbBuffers * (uint)sizeof(buffer_s), - cMem - ); - if (bufPool->buffers == null) + uint u; + for (u = 0; u < bufPool->totalBuffers; u++) { - ZSTDMT_freeBufferPool(bufPool); - return null; + ZSTD_customFree(bufPool->buffers[u].start, bufPool->cMem); } - bufPool->bufferSize = 64 * (1 << 10); - bufPool->totalBuffers = maxNbBuffers; - bufPool->nbBuffers = 0; - bufPool->cMem = cMem; - return bufPool; + ZSTD_customFree(bufPool->buffers, bufPool->cMem); } - /* only works at initialization, not during compression */ - private static nuint ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool_s* bufPool) - { - nuint poolSize = (nuint)sizeof(ZSTDMT_bufferPool_s); - nuint arraySize = bufPool->totalBuffers * (uint)sizeof(buffer_s); - uint u; - nuint totalBufferSize = 0; - SynchronizationWrapper.Enter(&bufPool->poolMutex); - for (u = 0; u < bufPool->totalBuffers; u++) - totalBufferSize += bufPool->buffers[u].capacity; - SynchronizationWrapper.Exit(&bufPool->poolMutex); - return poolSize + arraySize + totalBufferSize; - } + SynchronizationWrapper.Free(&bufPool->poolMutex); + ZSTD_customFree(bufPool, bufPool->cMem); + } - /* ZSTDMT_setBufferSize() : - * all future buffers provided by this buffer pool will have _at least_ this size - * note : it's better for all buffers to have same size, - * as they become freely interchangeable, reducing malloc/free usages and memory fragmentation */ - private static void ZSTDMT_setBufferSize(ZSTDMT_bufferPool_s* bufPool, nuint bSize) + private static ZSTDMT_bufferPool_s* ZSTDMT_createBufferPool( + uint maxNbBuffers, + ZSTD_customMem cMem + ) + { + ZSTDMT_bufferPool_s* bufPool = (ZSTDMT_bufferPool_s*)ZSTD_customCalloc( + (nuint)sizeof(ZSTDMT_bufferPool_s), + cMem + ); + if (bufPool == null) + return null; + SynchronizationWrapper.Init(&bufPool->poolMutex); + bufPool->buffers = (buffer_s*)ZSTD_customCalloc( + maxNbBuffers * (uint)sizeof(buffer_s), + cMem + ); + if (bufPool->buffers == null) { - SynchronizationWrapper.Enter(&bufPool->poolMutex); - bufPool->bufferSize = bSize; - SynchronizationWrapper.Exit(&bufPool->poolMutex); + ZSTDMT_freeBufferPool(bufPool); + return null; } - private static ZSTDMT_bufferPool_s* ZSTDMT_expandBufferPool( - ZSTDMT_bufferPool_s* srcBufPool, - uint maxNbBuffers - ) - { - if (srcBufPool == null) - return null; - if (srcBufPool->totalBuffers >= maxNbBuffers) - return srcBufPool; - { - ZSTD_customMem cMem = srcBufPool->cMem; - /* forward parameters */ - nuint bSize = srcBufPool->bufferSize; - ZSTDMT_bufferPool_s* newBufPool; - ZSTDMT_freeBufferPool(srcBufPool); - newBufPool = ZSTDMT_createBufferPool(maxNbBuffers, cMem); - if (newBufPool == null) - return newBufPool; - ZSTDMT_setBufferSize(newBufPool, bSize); - return newBufPool; - } - } + bufPool->bufferSize = 64 * (1 << 10); + bufPool->totalBuffers = maxNbBuffers; + bufPool->nbBuffers = 0; + bufPool->cMem = cMem; + return bufPool; + } - /** ZSTDMT_getBuffer() : - * assumption : bufPool must be valid - * @return : a buffer, with start pointer and size - * note: allocation may fail, in this case, start==NULL and size==0 */ - private static buffer_s ZSTDMT_getBuffer(ZSTDMT_bufferPool_s* bufPool) - { - nuint bSize = bufPool->bufferSize; - SynchronizationWrapper.Enter(&bufPool->poolMutex); - if (bufPool->nbBuffers != 0) - { - buffer_s buf = bufPool->buffers[--bufPool->nbBuffers]; - nuint availBufferSize = buf.capacity; - bufPool->buffers[bufPool->nbBuffers] = g_nullBuffer; - if (availBufferSize >= bSize && availBufferSize >> 3 <= bSize) - { - SynchronizationWrapper.Exit(&bufPool->poolMutex); - return buf; - } + /* only works at initialization, not during compression */ + private static nuint ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool_s* bufPool) + { + nuint poolSize = (nuint)sizeof(ZSTDMT_bufferPool_s); + nuint arraySize = bufPool->totalBuffers * (uint)sizeof(buffer_s); + uint u; + nuint totalBufferSize = 0; + SynchronizationWrapper.Enter(&bufPool->poolMutex); + for (u = 0; u < bufPool->totalBuffers; u++) + totalBufferSize += bufPool->buffers[u].capacity; + SynchronizationWrapper.Exit(&bufPool->poolMutex); + return poolSize + arraySize + totalBufferSize; + } - ZSTD_customFree(buf.start, bufPool->cMem); - } + /* ZSTDMT_setBufferSize() : + * all future buffers provided by this buffer pool will have _at least_ this size + * note : it's better for all buffers to have same size, + * as they become freely interchangeable, reducing malloc/free usages and memory fragmentation */ + private static void ZSTDMT_setBufferSize(ZSTDMT_bufferPool_s* bufPool, nuint bSize) + { + SynchronizationWrapper.Enter(&bufPool->poolMutex); + bufPool->bufferSize = bSize; + SynchronizationWrapper.Exit(&bufPool->poolMutex); + } - SynchronizationWrapper.Exit(&bufPool->poolMutex); - { - buffer_s buffer; - void* start = ZSTD_customMalloc(bSize, bufPool->cMem); - buffer.start = start; - buffer.capacity = start == null ? 0 : bSize; - return buffer; - } + private static ZSTDMT_bufferPool_s* ZSTDMT_expandBufferPool( + ZSTDMT_bufferPool_s* srcBufPool, + uint maxNbBuffers + ) + { + if (srcBufPool == null) + return null; + if (srcBufPool->totalBuffers >= maxNbBuffers) + return srcBufPool; + { + ZSTD_customMem cMem = srcBufPool->cMem; + /* forward parameters */ + nuint bSize = srcBufPool->bufferSize; + ZSTDMT_bufferPool_s* newBufPool; + ZSTDMT_freeBufferPool(srcBufPool); + newBufPool = ZSTDMT_createBufferPool(maxNbBuffers, cMem); + if (newBufPool == null) + return newBufPool; + ZSTDMT_setBufferSize(newBufPool, bSize); + return newBufPool; } + } - /* store buffer for later re-use, up to pool capacity */ - private static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool_s* bufPool, buffer_s buf) + /** ZSTDMT_getBuffer() : + * assumption : bufPool must be valid + * @return : a buffer, with start pointer and size + * note: allocation may fail, in this case, start==NULL and size==0 */ + private static buffer_s ZSTDMT_getBuffer(ZSTDMT_bufferPool_s* bufPool) + { + nuint bSize = bufPool->bufferSize; + SynchronizationWrapper.Enter(&bufPool->poolMutex); + if (bufPool->nbBuffers != 0) { - if (buf.start == null) - return; - SynchronizationWrapper.Enter(&bufPool->poolMutex); - if (bufPool->nbBuffers < bufPool->totalBuffers) + buffer_s buf = bufPool->buffers[--bufPool->nbBuffers]; + nuint availBufferSize = buf.capacity; + bufPool->buffers[bufPool->nbBuffers] = g_nullBuffer; + if (availBufferSize >= bSize && availBufferSize >> 3 <= bSize) { - bufPool->buffers[bufPool->nbBuffers++] = buf; SynchronizationWrapper.Exit(&bufPool->poolMutex); - return; + return buf; } - SynchronizationWrapper.Exit(&bufPool->poolMutex); ZSTD_customFree(buf.start, bufPool->cMem); } - private static nuint ZSTDMT_sizeof_seqPool(ZSTDMT_bufferPool_s* seqPool) + SynchronizationWrapper.Exit(&bufPool->poolMutex); { - return ZSTDMT_sizeof_bufferPool(seqPool); + buffer_s buffer; + void* start = ZSTD_customMalloc(bSize, bufPool->cMem); + buffer.start = start; + buffer.capacity = start == null ? 0 : bSize; + return buffer; } + } - private static RawSeqStore_t bufferToSeq(buffer_s buffer) + /* store buffer for later re-use, up to pool capacity */ + private static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool_s* bufPool, buffer_s buf) + { + if (buf.start == null) + return; + SynchronizationWrapper.Enter(&bufPool->poolMutex); + if (bufPool->nbBuffers < bufPool->totalBuffers) { - RawSeqStore_t seq = kNullRawSeqStore; - seq.seq = (rawSeq*)buffer.start; - seq.capacity = buffer.capacity / (nuint)sizeof(rawSeq); - return seq; + bufPool->buffers[bufPool->nbBuffers++] = buf; + SynchronizationWrapper.Exit(&bufPool->poolMutex); + return; } - private static buffer_s seqToBuffer(RawSeqStore_t seq) - { - buffer_s buffer; - buffer.start = seq.seq; - buffer.capacity = seq.capacity * (nuint)sizeof(rawSeq); - return buffer; - } + SynchronizationWrapper.Exit(&bufPool->poolMutex); + ZSTD_customFree(buf.start, bufPool->cMem); + } - private static RawSeqStore_t ZSTDMT_getSeq(ZSTDMT_bufferPool_s* seqPool) - { - if (seqPool->bufferSize == 0) - { - return kNullRawSeqStore; - } + private static nuint ZSTDMT_sizeof_seqPool(ZSTDMT_bufferPool_s* seqPool) + { + return ZSTDMT_sizeof_bufferPool(seqPool); + } - return bufferToSeq(ZSTDMT_getBuffer(seqPool)); - } + private static RawSeqStore_t bufferToSeq(buffer_s buffer) + { + RawSeqStore_t seq = kNullRawSeqStore; + seq.seq = (rawSeq*)buffer.start; + seq.capacity = buffer.capacity / (nuint)sizeof(rawSeq); + return seq; + } - private static void ZSTDMT_releaseSeq(ZSTDMT_bufferPool_s* seqPool, RawSeqStore_t seq) - { - ZSTDMT_releaseBuffer(seqPool, seqToBuffer(seq)); - } + private static buffer_s seqToBuffer(RawSeqStore_t seq) + { + buffer_s buffer; + buffer.start = seq.seq; + buffer.capacity = seq.capacity * (nuint)sizeof(rawSeq); + return buffer; + } - private static void ZSTDMT_setNbSeq(ZSTDMT_bufferPool_s* seqPool, nuint nbSeq) + private static RawSeqStore_t ZSTDMT_getSeq(ZSTDMT_bufferPool_s* seqPool) + { + if (seqPool->bufferSize == 0) { - ZSTDMT_setBufferSize(seqPool, nbSeq * (nuint)sizeof(rawSeq)); + return kNullRawSeqStore; } - private static ZSTDMT_bufferPool_s* ZSTDMT_createSeqPool( - uint nbWorkers, - ZSTD_customMem cMem - ) + return bufferToSeq(ZSTDMT_getBuffer(seqPool)); + } + + private static void ZSTDMT_releaseSeq(ZSTDMT_bufferPool_s* seqPool, RawSeqStore_t seq) + { + ZSTDMT_releaseBuffer(seqPool, seqToBuffer(seq)); + } + + private static void ZSTDMT_setNbSeq(ZSTDMT_bufferPool_s* seqPool, nuint nbSeq) + { + ZSTDMT_setBufferSize(seqPool, nbSeq * (nuint)sizeof(rawSeq)); + } + + private static ZSTDMT_bufferPool_s* ZSTDMT_createSeqPool( + uint nbWorkers, + ZSTD_customMem cMem + ) + { + ZSTDMT_bufferPool_s* seqPool = ZSTDMT_createBufferPool(nbWorkers, cMem); + if (seqPool == null) + return null; + ZSTDMT_setNbSeq(seqPool, 0); + return seqPool; + } + + private static void ZSTDMT_freeSeqPool(ZSTDMT_bufferPool_s* seqPool) + { + ZSTDMT_freeBufferPool(seqPool); + } + + private static ZSTDMT_bufferPool_s* ZSTDMT_expandSeqPool( + ZSTDMT_bufferPool_s* pool, + uint nbWorkers + ) + { + return ZSTDMT_expandBufferPool(pool, nbWorkers); + } + + /* note : all CCtx borrowed from the pool must be reverted back to the pool _before_ freeing the pool */ + private static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool) + { + if (pool == null) + return; + SynchronizationWrapper.Free(&pool->poolMutex); + if (pool->cctxs != null) { - ZSTDMT_bufferPool_s* seqPool = ZSTDMT_createBufferPool(nbWorkers, cMem); - if (seqPool == null) - return null; - ZSTDMT_setNbSeq(seqPool, 0); - return seqPool; + int cid; + for (cid = 0; cid < pool->totalCCtx; cid++) + ZSTD_freeCCtx(pool->cctxs[cid]); + ZSTD_customFree(pool->cctxs, pool->cMem); } - private static void ZSTDMT_freeSeqPool(ZSTDMT_bufferPool_s* seqPool) + ZSTD_customFree(pool, pool->cMem); + } + + /* ZSTDMT_createCCtxPool() : + * implies nbWorkers >= 1 , checked by caller ZSTDMT_createCCtx() */ + private static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(int nbWorkers, ZSTD_customMem cMem) + { + ZSTDMT_CCtxPool* cctxPool = (ZSTDMT_CCtxPool*)ZSTD_customCalloc( + (nuint)sizeof(ZSTDMT_CCtxPool), + cMem + ); + assert(nbWorkers > 0); + if (cctxPool == null) + return null; + SynchronizationWrapper.Init(&cctxPool->poolMutex); + cctxPool->totalCCtx = nbWorkers; + cctxPool->cctxs = (ZSTD_CCtx_s**)ZSTD_customCalloc( + (nuint)(nbWorkers * sizeof(ZSTD_CCtx_s*)), + cMem + ); + if (cctxPool->cctxs == null) { - ZSTDMT_freeBufferPool(seqPool); + ZSTDMT_freeCCtxPool(cctxPool); + return null; } - private static ZSTDMT_bufferPool_s* ZSTDMT_expandSeqPool( - ZSTDMT_bufferPool_s* pool, - uint nbWorkers - ) + cctxPool->cMem = cMem; + cctxPool->cctxs[0] = ZSTD_createCCtx_advanced(cMem); + if (cctxPool->cctxs[0] == null) { - return ZSTDMT_expandBufferPool(pool, nbWorkers); + ZSTDMT_freeCCtxPool(cctxPool); + return null; } - /* note : all CCtx borrowed from the pool must be reverted back to the pool _before_ freeing the pool */ - private static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool) - { - if (pool == null) - return; - SynchronizationWrapper.Free(&pool->poolMutex); - if (pool->cctxs != null) - { - int cid; - for (cid = 0; cid < pool->totalCCtx; cid++) - ZSTD_freeCCtx(pool->cctxs[cid]); - ZSTD_customFree(pool->cctxs, pool->cMem); - } + cctxPool->availCCtx = 1; + return cctxPool; + } - ZSTD_customFree(pool, pool->cMem); + private static ZSTDMT_CCtxPool* ZSTDMT_expandCCtxPool( + ZSTDMT_CCtxPool* srcPool, + int nbWorkers + ) + { + if (srcPool == null) + return null; + if (nbWorkers <= srcPool->totalCCtx) + return srcPool; + { + ZSTD_customMem cMem = srcPool->cMem; + ZSTDMT_freeCCtxPool(srcPool); + return ZSTDMT_createCCtxPool(nbWorkers, cMem); } + } - /* ZSTDMT_createCCtxPool() : - * implies nbWorkers >= 1 , checked by caller ZSTDMT_createCCtx() */ - private static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(int nbWorkers, ZSTD_customMem cMem) + /* only works during initialization phase, not during compression */ + private static nuint ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool) + { + SynchronizationWrapper.Enter(&cctxPool->poolMutex); { - ZSTDMT_CCtxPool* cctxPool = (ZSTDMT_CCtxPool*)ZSTD_customCalloc( - (nuint)sizeof(ZSTDMT_CCtxPool), - cMem - ); - assert(nbWorkers > 0); - if (cctxPool == null) - return null; - SynchronizationWrapper.Init(&cctxPool->poolMutex); - cctxPool->totalCCtx = nbWorkers; - cctxPool->cctxs = (ZSTD_CCtx_s**)ZSTD_customCalloc( - (nuint)(nbWorkers * sizeof(ZSTD_CCtx_s*)), - cMem - ); - if (cctxPool->cctxs == null) - { - ZSTDMT_freeCCtxPool(cctxPool); - return null; - } - - cctxPool->cMem = cMem; - cctxPool->cctxs[0] = ZSTD_createCCtx_advanced(cMem); - if (cctxPool->cctxs[0] == null) + uint nbWorkers = (uint)cctxPool->totalCCtx; + nuint poolSize = (nuint)sizeof(ZSTDMT_CCtxPool); + nuint arraySize = (nuint)(cctxPool->totalCCtx * sizeof(ZSTD_CCtx_s*)); + nuint totalCCtxSize = 0; + uint u; + for (u = 0; u < nbWorkers; u++) { - ZSTDMT_freeCCtxPool(cctxPool); - return null; + totalCCtxSize += ZSTD_sizeof_CCtx(cctxPool->cctxs[u]); } - cctxPool->availCCtx = 1; - return cctxPool; + SynchronizationWrapper.Exit(&cctxPool->poolMutex); + assert(nbWorkers > 0); + return poolSize + arraySize + totalCCtxSize; } + } - private static ZSTDMT_CCtxPool* ZSTDMT_expandCCtxPool( - ZSTDMT_CCtxPool* srcPool, - int nbWorkers - ) + private static ZSTD_CCtx_s* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* cctxPool) + { + SynchronizationWrapper.Enter(&cctxPool->poolMutex); + if (cctxPool->availCCtx != 0) { - if (srcPool == null) - return null; - if (nbWorkers <= srcPool->totalCCtx) - return srcPool; + cctxPool->availCCtx--; { - ZSTD_customMem cMem = srcPool->cMem; - ZSTDMT_freeCCtxPool(srcPool); - return ZSTDMT_createCCtxPool(nbWorkers, cMem); + ZSTD_CCtx_s* cctx = cctxPool->cctxs[cctxPool->availCCtx]; + SynchronizationWrapper.Exit(&cctxPool->poolMutex); + return cctx; } } - /* only works during initialization phase, not during compression */ - private static nuint ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool) - { - SynchronizationWrapper.Enter(&cctxPool->poolMutex); - { - uint nbWorkers = (uint)cctxPool->totalCCtx; - nuint poolSize = (nuint)sizeof(ZSTDMT_CCtxPool); - nuint arraySize = (nuint)(cctxPool->totalCCtx * sizeof(ZSTD_CCtx_s*)); - nuint totalCCtxSize = 0; - uint u; - for (u = 0; u < nbWorkers; u++) - { - totalCCtxSize += ZSTD_sizeof_CCtx(cctxPool->cctxs[u]); - } + SynchronizationWrapper.Exit(&cctxPool->poolMutex); + return ZSTD_createCCtx_advanced(cctxPool->cMem); + } - SynchronizationWrapper.Exit(&cctxPool->poolMutex); - assert(nbWorkers > 0); - return poolSize + arraySize + totalCCtxSize; - } + private static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx_s* cctx) + { + if (cctx == null) + return; + SynchronizationWrapper.Enter(&pool->poolMutex); + if (pool->availCCtx < pool->totalCCtx) + pool->cctxs[pool->availCCtx++] = cctx; + else + { + ZSTD_freeCCtx(cctx); } - private static ZSTD_CCtx_s* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* cctxPool) - { - SynchronizationWrapper.Enter(&cctxPool->poolMutex); - if (cctxPool->availCCtx != 0) - { - cctxPool->availCCtx--; - { - ZSTD_CCtx_s* cctx = cctxPool->cctxs[cctxPool->availCCtx]; - SynchronizationWrapper.Exit(&cctxPool->poolMutex); - return cctx; - } - } + SynchronizationWrapper.Exit(&pool->poolMutex); + } - SynchronizationWrapper.Exit(&cctxPool->poolMutex); - return ZSTD_createCCtx_advanced(cctxPool->cMem); + private static int ZSTDMT_serialState_reset( + SerialState* serialState, + ZSTDMT_bufferPool_s* seqPool, + ZSTD_CCtx_params_s @params, + nuint jobSize, + void* dict, + nuint dictSize, + ZSTD_dictContentType_e dictContentType + ) + { + if (@params.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) + { + ZSTD_ldm_adjustParameters(&@params.ldmParams, &@params.cParams); + assert(@params.ldmParams.hashLog >= @params.ldmParams.bucketSizeLog); + assert(@params.ldmParams.hashRateLog < 32); } - - private static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx_s* cctx) + else { - if (cctx == null) - return; - SynchronizationWrapper.Enter(&pool->poolMutex); - if (pool->availCCtx < pool->totalCCtx) - pool->cctxs[pool->availCCtx++] = cctx; - else - { - ZSTD_freeCCtx(cctx); - } - - SynchronizationWrapper.Exit(&pool->poolMutex); + @params.ldmParams = new ldmParams_t(); } - private static int ZSTDMT_serialState_reset( - SerialState* serialState, - ZSTDMT_bufferPool_s* seqPool, - ZSTD_CCtx_params_s @params, - nuint jobSize, - void* dict, - nuint dictSize, - ZSTD_dictContentType_e dictContentType - ) + serialState->nextJobID = 0; + if (@params.fParams.checksumFlag != 0) + ZSTD_XXH64_reset(&serialState->xxhState, 0); + if (@params.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) { - if (@params.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) + ZSTD_customMem cMem = @params.customMem; + uint hashLog = @params.ldmParams.hashLog; + nuint hashSize = ((nuint)1 << (int)hashLog) * (nuint)sizeof(ldmEntry_t); + uint bucketLog = @params.ldmParams.hashLog - @params.ldmParams.bucketSizeLog; + uint prevBucketLog = + serialState->@params.ldmParams.hashLog + - serialState->@params.ldmParams.bucketSizeLog; + nuint numBuckets = (nuint)1 << (int)bucketLog; + ZSTDMT_setNbSeq(seqPool, ZSTD_ldm_getMaxNbSeq(@params.ldmParams, jobSize)); + ZSTD_window_init(&serialState->ldmState.window); + if ( + serialState->ldmState.hashTable == null + || serialState->@params.ldmParams.hashLog < hashLog + ) { - ZSTD_ldm_adjustParameters(&@params.ldmParams, &@params.cParams); - assert(@params.ldmParams.hashLog >= @params.ldmParams.bucketSizeLog); - assert(@params.ldmParams.hashRateLog < 32); + ZSTD_customFree(serialState->ldmState.hashTable, cMem); + serialState->ldmState.hashTable = (ldmEntry_t*)ZSTD_customMalloc( + hashSize, + cMem + ); } - else + + if (serialState->ldmState.bucketOffsets == null || prevBucketLog < bucketLog) { - @params.ldmParams = new ldmParams_t(); + ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem); + serialState->ldmState.bucketOffsets = (byte*)ZSTD_customMalloc( + numBuckets, + cMem + ); } - serialState->nextJobID = 0; - if (@params.fParams.checksumFlag != 0) - ZSTD_XXH64_reset(&serialState->xxhState, 0); - if (@params.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) + if ( + serialState->ldmState.hashTable == null + || serialState->ldmState.bucketOffsets == null + ) + return 1; + memset(serialState->ldmState.hashTable, 0, (uint)hashSize); + memset(serialState->ldmState.bucketOffsets, 0, (uint)numBuckets); + serialState->ldmState.loadedDictEnd = 0; + if (dictSize > 0) { - ZSTD_customMem cMem = @params.customMem; - uint hashLog = @params.ldmParams.hashLog; - nuint hashSize = ((nuint)1 << (int)hashLog) * (nuint)sizeof(ldmEntry_t); - uint bucketLog = @params.ldmParams.hashLog - @params.ldmParams.bucketSizeLog; - uint prevBucketLog = - serialState->@params.ldmParams.hashLog - - serialState->@params.ldmParams.bucketSizeLog; - nuint numBuckets = (nuint)1 << (int)bucketLog; - ZSTDMT_setNbSeq(seqPool, ZSTD_ldm_getMaxNbSeq(@params.ldmParams, jobSize)); - ZSTD_window_init(&serialState->ldmState.window); - if ( - serialState->ldmState.hashTable == null - || serialState->@params.ldmParams.hashLog < hashLog - ) + if (dictContentType == ZSTD_dictContentType_e.ZSTD_dct_rawContent) { - ZSTD_customFree(serialState->ldmState.hashTable, cMem); - serialState->ldmState.hashTable = (ldmEntry_t*)ZSTD_customMalloc( - hashSize, - cMem + byte* dictEnd = (byte*)dict + dictSize; + ZSTD_window_update(&serialState->ldmState.window, dict, dictSize, 0); + ZSTD_ldm_fillHashTable( + &serialState->ldmState, + (byte*)dict, + dictEnd, + &@params.ldmParams ); + serialState->ldmState.loadedDictEnd = + @params.forceWindow != 0 + ? 0 + : (uint)(dictEnd - serialState->ldmState.window.@base); } + } - if (serialState->ldmState.bucketOffsets == null || prevBucketLog < bucketLog) - { - ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem); - serialState->ldmState.bucketOffsets = (byte*)ZSTD_customMalloc( - numBuckets, - cMem - ); - } + serialState->ldmWindow = serialState->ldmState.window; + } - if ( - serialState->ldmState.hashTable == null - || serialState->ldmState.bucketOffsets == null - ) - return 1; - memset(serialState->ldmState.hashTable, 0, (uint)hashSize); - memset(serialState->ldmState.bucketOffsets, 0, (uint)numBuckets); - serialState->ldmState.loadedDictEnd = 0; - if (dictSize > 0) - { - if (dictContentType == ZSTD_dictContentType_e.ZSTD_dct_rawContent) - { - byte* dictEnd = (byte*)dict + dictSize; - ZSTD_window_update(&serialState->ldmState.window, dict, dictSize, 0); - ZSTD_ldm_fillHashTable( - &serialState->ldmState, - (byte*)dict, - dictEnd, - &@params.ldmParams - ); - serialState->ldmState.loadedDictEnd = - @params.forceWindow != 0 - ? 0 - : (uint)(dictEnd - serialState->ldmState.window.@base); - } - } + serialState->@params = @params; + serialState->@params.jobSize = (uint)jobSize; + return 0; + } - serialState->ldmWindow = serialState->ldmState.window; - } + private static int ZSTDMT_serialState_init(SerialState* serialState) + { + int initError = 0; + *serialState = new SerialState(); + SynchronizationWrapper.Init(&serialState->mutex); + initError |= 0; + initError |= 0; + SynchronizationWrapper.Init(&serialState->ldmWindowMutex); + initError |= 0; + initError |= 0; + return initError; + } - serialState->@params = @params; - serialState->@params.jobSize = (uint)jobSize; - return 0; + private static void ZSTDMT_serialState_free(SerialState* serialState) + { + ZSTD_customMem cMem = serialState->@params.customMem; + SynchronizationWrapper.Free(&serialState->mutex); + SynchronizationWrapper.Free(&serialState->ldmWindowMutex); + ZSTD_customFree(serialState->ldmState.hashTable, cMem); + ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem); + } + + private static void ZSTDMT_serialState_genSequences( + SerialState* serialState, + RawSeqStore_t* seqStore, + Range src, + uint jobID + ) + { + SynchronizationWrapper.Enter(&serialState->mutex); + while (serialState->nextJobID < jobID) + { + SynchronizationWrapper.Wait(&serialState->mutex); } - private static int ZSTDMT_serialState_init(SerialState* serialState) + if (serialState->nextJobID == jobID) { - int initError = 0; - *serialState = new SerialState(); - SynchronizationWrapper.Init(&serialState->mutex); - initError |= 0; - initError |= 0; - SynchronizationWrapper.Init(&serialState->ldmWindowMutex); - initError |= 0; - initError |= 0; - return initError; + if (serialState->@params.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) + { + nuint error; + assert( + seqStore->seq != null + && seqStore->pos == 0 + && seqStore->size == 0 + && seqStore->capacity > 0 + ); + assert(src.size <= serialState->@params.jobSize); + ZSTD_window_update(&serialState->ldmState.window, src.start, src.size, 0); + error = ZSTD_ldm_generateSequences( + &serialState->ldmState, + seqStore, + &serialState->@params.ldmParams, + src.start, + src.size + ); + assert(!ERR_isError(error)); + SynchronizationWrapper.Enter(&serialState->ldmWindowMutex); + serialState->ldmWindow = serialState->ldmState.window; + SynchronizationWrapper.Pulse(&serialState->ldmWindowMutex); + SynchronizationWrapper.Exit(&serialState->ldmWindowMutex); + } + + if (serialState->@params.fParams.checksumFlag != 0 && src.size > 0) + ZSTD_XXH64_update(&serialState->xxhState, src.start, src.size); } - private static void ZSTDMT_serialState_free(SerialState* serialState) + serialState->nextJobID++; + SynchronizationWrapper.PulseAll(&serialState->mutex); + SynchronizationWrapper.Exit(&serialState->mutex); + } + + private static void ZSTDMT_serialState_applySequences( + SerialState* serialState, + ZSTD_CCtx_s* jobCCtx, + RawSeqStore_t* seqStore + ) + { + if (seqStore->size > 0) { - ZSTD_customMem cMem = serialState->@params.customMem; - SynchronizationWrapper.Free(&serialState->mutex); - SynchronizationWrapper.Free(&serialState->ldmWindowMutex); - ZSTD_customFree(serialState->ldmState.hashTable, cMem); - ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem); + assert( + serialState->@params.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable + ); + assert(jobCCtx != null); + ZSTD_referenceExternalSequences(jobCCtx, seqStore->seq, seqStore->size); } + } - private static void ZSTDMT_serialState_genSequences( - SerialState* serialState, - RawSeqStore_t* seqStore, - Range src, - uint jobID - ) + private static void ZSTDMT_serialState_ensureFinished( + SerialState* serialState, + uint jobID, + nuint cSize + ) + { + SynchronizationWrapper.Enter(&serialState->mutex); + if (serialState->nextJobID <= jobID) { - SynchronizationWrapper.Enter(&serialState->mutex); - while (serialState->nextJobID < jobID) - { - SynchronizationWrapper.Wait(&serialState->mutex); - } + assert(ERR_isError(cSize)); + serialState->nextJobID = jobID + 1; + SynchronizationWrapper.PulseAll(&serialState->mutex); + SynchronizationWrapper.Enter(&serialState->ldmWindowMutex); + ZSTD_window_clear(&serialState->ldmWindow); + SynchronizationWrapper.Pulse(&serialState->ldmWindowMutex); + SynchronizationWrapper.Exit(&serialState->ldmWindowMutex); + } - if (serialState->nextJobID == jobID) - { - if (serialState->@params.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) - { - nuint error; - assert( - seqStore->seq != null - && seqStore->pos == 0 - && seqStore->size == 0 - && seqStore->capacity > 0 - ); - assert(src.size <= serialState->@params.jobSize); - ZSTD_window_update(&serialState->ldmState.window, src.start, src.size, 0); - error = ZSTD_ldm_generateSequences( - &serialState->ldmState, - seqStore, - &serialState->@params.ldmParams, - src.start, - src.size - ); - assert(!ERR_isError(error)); - SynchronizationWrapper.Enter(&serialState->ldmWindowMutex); - serialState->ldmWindow = serialState->ldmState.window; - SynchronizationWrapper.Pulse(&serialState->ldmWindowMutex); - SynchronizationWrapper.Exit(&serialState->ldmWindowMutex); - } + SynchronizationWrapper.Exit(&serialState->mutex); + } - if (serialState->@params.fParams.checksumFlag != 0 && src.size > 0) - ZSTD_XXH64_update(&serialState->xxhState, src.start, src.size); - } + private static readonly Range kNullRange = new Range(start: null, size: 0); - serialState->nextJobID++; - SynchronizationWrapper.PulseAll(&serialState->mutex); - SynchronizationWrapper.Exit(&serialState->mutex); + /* ZSTDMT_compressionJob() is a POOL_function type */ + private static void ZSTDMT_compressionJob(void* jobDescription) + { + ZSTDMT_jobDescription* job = (ZSTDMT_jobDescription*)jobDescription; + /* do not modify job->params ! copy it, modify the copy */ + ZSTD_CCtx_params_s jobParams = job->@params; + ZSTD_CCtx_s* cctx = ZSTDMT_getCCtx(job->cctxPool); + RawSeqStore_t rawSeqStore = ZSTDMT_getSeq(job->seqPool); + buffer_s dstBuff = job->dstBuff; + nuint lastCBlockSize = 0; + if (cctx == null) + { + SynchronizationWrapper.Enter(&job->job_mutex); + job->cSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + SynchronizationWrapper.Exit(&job->job_mutex); + goto _endJob; } - private static void ZSTDMT_serialState_applySequences( - SerialState* serialState, - ZSTD_CCtx_s* jobCCtx, - RawSeqStore_t* seqStore - ) + if (dstBuff.start == null) { - if (seqStore->size > 0) + dstBuff = ZSTDMT_getBuffer(job->bufPool); + if (dstBuff.start == null) { - assert( - serialState->@params.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable + SynchronizationWrapper.Enter(&job->job_mutex); + job->cSize = unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) ); - assert(jobCCtx != null); - ZSTD_referenceExternalSequences(jobCCtx, seqStore->seq, seqStore->size); + SynchronizationWrapper.Exit(&job->job_mutex); + goto _endJob; } + + job->dstBuff = dstBuff; } - private static void ZSTDMT_serialState_ensureFinished( - SerialState* serialState, - uint jobID, - nuint cSize + if ( + jobParams.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable + && rawSeqStore.seq == null ) { - SynchronizationWrapper.Enter(&serialState->mutex); - if (serialState->nextJobID <= jobID) - { - assert(ERR_isError(cSize)); - serialState->nextJobID = jobID + 1; - SynchronizationWrapper.PulseAll(&serialState->mutex); - SynchronizationWrapper.Enter(&serialState->ldmWindowMutex); - ZSTD_window_clear(&serialState->ldmWindow); - SynchronizationWrapper.Pulse(&serialState->ldmWindowMutex); - SynchronizationWrapper.Exit(&serialState->ldmWindowMutex); - } - - SynchronizationWrapper.Exit(&serialState->mutex); + SynchronizationWrapper.Enter(&job->job_mutex); + job->cSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + SynchronizationWrapper.Exit(&job->job_mutex); + goto _endJob; } - private static readonly Range kNullRange = new Range(start: null, size: 0); - - /* ZSTDMT_compressionJob() is a POOL_function type */ - private static void ZSTDMT_compressionJob(void* jobDescription) + if (job->jobID != 0) + jobParams.fParams.checksumFlag = 0; + jobParams.ldmParams.enableLdm = ZSTD_paramSwitch_e.ZSTD_ps_disable; + jobParams.nbWorkers = 0; + ZSTDMT_serialState_genSequences(job->serial, &rawSeqStore, job->src, job->jobID); + if (job->cdict != null) { - ZSTDMT_jobDescription* job = (ZSTDMT_jobDescription*)jobDescription; - /* do not modify job->params ! copy it, modify the copy */ - ZSTD_CCtx_params_s jobParams = job->@params; - ZSTD_CCtx_s* cctx = ZSTDMT_getCCtx(job->cctxPool); - RawSeqStore_t rawSeqStore = ZSTDMT_getSeq(job->seqPool); - buffer_s dstBuff = job->dstBuff; - nuint lastCBlockSize = 0; - if (cctx == null) + nuint initError = ZSTD_compressBegin_advanced_internal( + cctx, + null, + 0, + ZSTD_dictContentType_e.ZSTD_dct_auto, + ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, + job->cdict, + &jobParams, + job->fullFrameSize + ); + assert(job->firstJob != 0); + if (ERR_isError(initError)) { SynchronizationWrapper.Enter(&job->job_mutex); - job->cSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + job->cSize = initError; SynchronizationWrapper.Exit(&job->job_mutex); goto _endJob; } - - if (dstBuff.start == null) + } + else + { + ulong pledgedSrcSize = job->firstJob != 0 ? job->fullFrameSize : job->src.size; { - dstBuff = ZSTDMT_getBuffer(job->bufPool); - if (dstBuff.start == null) + nuint forceWindowError = ZSTD_CCtxParams_setParameter( + &jobParams, + ZSTD_cParameter.ZSTD_c_experimentalParam3, + job->firstJob == 0 ? 1 : 0 + ); + if (ERR_isError(forceWindowError)) { SynchronizationWrapper.Enter(&job->job_mutex); - job->cSize = unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) - ); + job->cSize = forceWindowError; SynchronizationWrapper.Exit(&job->job_mutex); goto _endJob; } - - job->dstBuff = dstBuff; } - if ( - jobParams.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable - && rawSeqStore.seq == null - ) + if (job->firstJob == 0) { - SynchronizationWrapper.Enter(&job->job_mutex); - job->cSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); - SynchronizationWrapper.Exit(&job->job_mutex); - goto _endJob; + nuint err = ZSTD_CCtxParams_setParameter( + &jobParams, + ZSTD_cParameter.ZSTD_c_experimentalParam15, + 0 + ); + if (ERR_isError(err)) + { + SynchronizationWrapper.Enter(&job->job_mutex); + job->cSize = err; + SynchronizationWrapper.Exit(&job->job_mutex); + goto _endJob; + } } - if (job->jobID != 0) - jobParams.fParams.checksumFlag = 0; - jobParams.ldmParams.enableLdm = ZSTD_paramSwitch_e.ZSTD_ps_disable; - jobParams.nbWorkers = 0; - ZSTDMT_serialState_genSequences(job->serial, &rawSeqStore, job->src, job->jobID); - if (job->cdict != null) { nuint initError = ZSTD_compressBegin_advanced_internal( cctx, - null, - 0, - ZSTD_dictContentType_e.ZSTD_dct_auto, + job->prefix.start, + job->prefix.size, + ZSTD_dictContentType_e.ZSTD_dct_rawContent, ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, - job->cdict, + null, &jobParams, - job->fullFrameSize + pledgedSrcSize ); - assert(job->firstJob != 0); if (ERR_isError(initError)) { SynchronizationWrapper.Enter(&job->job_mutex); @@ -612,522 +665,433 @@ private static void ZSTDMT_compressionJob(void* jobDescription) goto _endJob; } } - else - { - ulong pledgedSrcSize = job->firstJob != 0 ? job->fullFrameSize : job->src.size; - { - nuint forceWindowError = ZSTD_CCtxParams_setParameter( - &jobParams, - ZSTD_cParameter.ZSTD_c_experimentalParam3, - job->firstJob == 0 ? 1 : 0 - ); - if (ERR_isError(forceWindowError)) - { - SynchronizationWrapper.Enter(&job->job_mutex); - job->cSize = forceWindowError; - SynchronizationWrapper.Exit(&job->job_mutex); - goto _endJob; - } - } - - if (job->firstJob == 0) - { - nuint err = ZSTD_CCtxParams_setParameter( - &jobParams, - ZSTD_cParameter.ZSTD_c_experimentalParam15, - 0 - ); - if (ERR_isError(err)) - { - SynchronizationWrapper.Enter(&job->job_mutex); - job->cSize = err; - SynchronizationWrapper.Exit(&job->job_mutex); - goto _endJob; - } - } + } - { - nuint initError = ZSTD_compressBegin_advanced_internal( - cctx, - job->prefix.start, - job->prefix.size, - ZSTD_dictContentType_e.ZSTD_dct_rawContent, - ZSTD_dictTableLoadMethod_e.ZSTD_dtlm_fast, - null, - &jobParams, - pledgedSrcSize - ); - if (ERR_isError(initError)) - { - SynchronizationWrapper.Enter(&job->job_mutex); - job->cSize = initError; - SynchronizationWrapper.Exit(&job->job_mutex); - goto _endJob; - } - } + ZSTDMT_serialState_applySequences(job->serial, cctx, &rawSeqStore); + if (job->firstJob == 0) + { + nuint hSize = ZSTD_compressContinue_public( + cctx, + dstBuff.start, + dstBuff.capacity, + job->src.start, + 0 + ); + if (ERR_isError(hSize)) + { + SynchronizationWrapper.Enter(&job->job_mutex); + job->cSize = hSize; + SynchronizationWrapper.Exit(&job->job_mutex); + goto _endJob; } - ZSTDMT_serialState_applySequences(job->serial, cctx, &rawSeqStore); - if (job->firstJob == 0) + ZSTD_invalidateRepCodes(cctx); + } + + { + const nuint chunkSize = 4 * (1 << 17); + int nbChunks = (int)((job->src.size + (chunkSize - 1)) / chunkSize); + byte* ip = (byte*)job->src.start; + byte* ostart = (byte*)dstBuff.start; + byte* op = ostart; + byte* oend = op + dstBuff.capacity; + int chunkNb; +#if DEBUG + if (sizeof(nuint) > sizeof(int)) + assert(job->src.size < unchecked(2147483647 * chunkSize)); +#endif + assert(job->cSize == 0); + for (chunkNb = 1; chunkNb < nbChunks; chunkNb++) { - nuint hSize = ZSTD_compressContinue_public( + nuint cSize = ZSTD_compressContinue_public( cctx, - dstBuff.start, - dstBuff.capacity, - job->src.start, - 0 + op, + (nuint)(oend - op), + ip, + chunkSize ); - if (ERR_isError(hSize)) + if (ERR_isError(cSize)) { SynchronizationWrapper.Enter(&job->job_mutex); - job->cSize = hSize; + job->cSize = cSize; SynchronizationWrapper.Exit(&job->job_mutex); goto _endJob; } - ZSTD_invalidateRepCodes(cctx); + ip += chunkSize; + op += cSize; + assert(op < oend); + SynchronizationWrapper.Enter(&job->job_mutex); + job->cSize += cSize; + job->consumed = chunkSize * (nuint)chunkNb; + SynchronizationWrapper.Pulse(&job->job_mutex); + SynchronizationWrapper.Exit(&job->job_mutex); } - { - const nuint chunkSize = 4 * (1 << 17); - int nbChunks = (int)((job->src.size + (chunkSize - 1)) / chunkSize); - byte* ip = (byte*)job->src.start; - byte* ostart = (byte*)dstBuff.start; - byte* op = ostart; - byte* oend = op + dstBuff.capacity; - int chunkNb; -#if DEBUG - if (sizeof(nuint) > sizeof(int)) - assert(job->src.size < unchecked(2147483647 * chunkSize)); -#endif - assert(job->cSize == 0); - for (chunkNb = 1; chunkNb < nbChunks; chunkNb++) + assert(chunkSize > 0); + assert((chunkSize & chunkSize - 1) == 0); + if (((uint)(nbChunks > 0 ? 1 : 0) | job->lastJob) != 0) + { + nuint lastBlockSize1 = job->src.size & chunkSize - 1; + nuint lastBlockSize = + lastBlockSize1 == 0 && job->src.size >= chunkSize + ? chunkSize + : lastBlockSize1; + nuint cSize = + job->lastJob != 0 + ? ZSTD_compressEnd_public( + cctx, + op, + (nuint)(oend - op), + ip, + lastBlockSize + ) + : ZSTD_compressContinue_public( + cctx, + op, + (nuint)(oend - op), + ip, + lastBlockSize + ); + if (ERR_isError(cSize)) { - nuint cSize = ZSTD_compressContinue_public( - cctx, - op, - (nuint)(oend - op), - ip, - chunkSize - ); - if (ERR_isError(cSize)) - { - SynchronizationWrapper.Enter(&job->job_mutex); - job->cSize = cSize; - SynchronizationWrapper.Exit(&job->job_mutex); - goto _endJob; - } - - ip += chunkSize; - op += cSize; - assert(op < oend); SynchronizationWrapper.Enter(&job->job_mutex); - job->cSize += cSize; - job->consumed = chunkSize * (nuint)chunkNb; - SynchronizationWrapper.Pulse(&job->job_mutex); + job->cSize = cSize; SynchronizationWrapper.Exit(&job->job_mutex); + goto _endJob; } - assert(chunkSize > 0); - assert((chunkSize & chunkSize - 1) == 0); - if (((uint)(nbChunks > 0 ? 1 : 0) | job->lastJob) != 0) - { - nuint lastBlockSize1 = job->src.size & chunkSize - 1; - nuint lastBlockSize = - lastBlockSize1 == 0 && job->src.size >= chunkSize - ? chunkSize - : lastBlockSize1; - nuint cSize = - job->lastJob != 0 - ? ZSTD_compressEnd_public( - cctx, - op, - (nuint)(oend - op), - ip, - lastBlockSize - ) - : ZSTD_compressContinue_public( - cctx, - op, - (nuint)(oend - op), - ip, - lastBlockSize - ); - if (ERR_isError(cSize)) - { - SynchronizationWrapper.Enter(&job->job_mutex); - job->cSize = cSize; - SynchronizationWrapper.Exit(&job->job_mutex); - goto _endJob; - } - - lastCBlockSize = cSize; - } + lastCBlockSize = cSize; } + } #if DEBUG - if (job->firstJob == 0) - { - assert(ZSTD_window_hasExtDict(cctx->blockState.matchState.window) == 0); - } -#endif - - ZSTD_CCtx_trace(cctx, 0); - _endJob: - ZSTDMT_serialState_ensureFinished(job->serial, job->jobID, job->cSize); - ZSTDMT_releaseSeq(job->seqPool, rawSeqStore); - ZSTDMT_releaseCCtx(job->cctxPool, cctx); - SynchronizationWrapper.Enter(&job->job_mutex); - if (ERR_isError(job->cSize)) - assert(lastCBlockSize == 0); - job->cSize += lastCBlockSize; - job->consumed = job->src.size; - SynchronizationWrapper.Pulse(&job->job_mutex); - SynchronizationWrapper.Exit(&job->job_mutex); + if (job->firstJob == 0) + { + assert(ZSTD_window_hasExtDict(cctx->blockState.matchState.window) == 0); } +#endif - private static readonly RoundBuff_t kNullRoundBuff = new RoundBuff_t( - buffer: null, - capacity: 0, - pos: 0 - ); + ZSTD_CCtx_trace(cctx, 0); + _endJob: + ZSTDMT_serialState_ensureFinished(job->serial, job->jobID, job->cSize); + ZSTDMT_releaseSeq(job->seqPool, rawSeqStore); + ZSTDMT_releaseCCtx(job->cctxPool, cctx); + SynchronizationWrapper.Enter(&job->job_mutex); + if (ERR_isError(job->cSize)) + assert(lastCBlockSize == 0); + job->cSize += lastCBlockSize; + job->consumed = job->src.size; + SynchronizationWrapper.Pulse(&job->job_mutex); + SynchronizationWrapper.Exit(&job->job_mutex); + } - private static void ZSTDMT_freeJobsTable( - ZSTDMT_jobDescription* jobTable, - uint nbJobs, - ZSTD_customMem cMem - ) + private static readonly RoundBuff_t kNullRoundBuff = new RoundBuff_t( + buffer: null, + capacity: 0, + pos: 0 + ); + + private static void ZSTDMT_freeJobsTable( + ZSTDMT_jobDescription* jobTable, + uint nbJobs, + ZSTD_customMem cMem + ) + { + uint jobNb; + if (jobTable == null) + return; + for (jobNb = 0; jobNb < nbJobs; jobNb++) { - uint jobNb; - if (jobTable == null) - return; - for (jobNb = 0; jobNb < nbJobs; jobNb++) - { - SynchronizationWrapper.Free(&jobTable[jobNb].job_mutex); - } - - ZSTD_customFree(jobTable, cMem); + SynchronizationWrapper.Free(&jobTable[jobNb].job_mutex); } - /* ZSTDMT_allocJobsTable() - * allocate and init a job table. - * update *nbJobsPtr to next power of 2 value, as size of table */ - private static ZSTDMT_jobDescription* ZSTDMT_createJobsTable( - uint* nbJobsPtr, - ZSTD_customMem cMem - ) - { - uint nbJobsLog2 = ZSTD_highbit32(*nbJobsPtr) + 1; - uint nbJobs = (uint)(1 << (int)nbJobsLog2); - uint jobNb; - ZSTDMT_jobDescription* jobTable = (ZSTDMT_jobDescription*)ZSTD_customCalloc( - nbJobs * (uint)sizeof(ZSTDMT_jobDescription), - cMem - ); - int initError = 0; - if (jobTable == null) - return null; - *nbJobsPtr = nbJobs; - for (jobNb = 0; jobNb < nbJobs; jobNb++) - { - SynchronizationWrapper.Init(&jobTable[jobNb].job_mutex); - initError |= 0; - initError |= 0; - } - - if (initError != 0) - { - ZSTDMT_freeJobsTable(jobTable, nbJobs, cMem); - return null; - } + ZSTD_customFree(jobTable, cMem); + } - return jobTable; + /* ZSTDMT_allocJobsTable() + * allocate and init a job table. + * update *nbJobsPtr to next power of 2 value, as size of table */ + private static ZSTDMT_jobDescription* ZSTDMT_createJobsTable( + uint* nbJobsPtr, + ZSTD_customMem cMem + ) + { + uint nbJobsLog2 = ZSTD_highbit32(*nbJobsPtr) + 1; + uint nbJobs = (uint)(1 << (int)nbJobsLog2); + uint jobNb; + ZSTDMT_jobDescription* jobTable = (ZSTDMT_jobDescription*)ZSTD_customCalloc( + nbJobs * (uint)sizeof(ZSTDMT_jobDescription), + cMem + ); + int initError = 0; + if (jobTable == null) + return null; + *nbJobsPtr = nbJobs; + for (jobNb = 0; jobNb < nbJobs; jobNb++) + { + SynchronizationWrapper.Init(&jobTable[jobNb].job_mutex); + initError |= 0; + initError |= 0; } - private static nuint ZSTDMT_expandJobsTable(ZSTDMT_CCtx_s* mtctx, uint nbWorkers) + if (initError != 0) { - uint nbJobs = nbWorkers + 2; - if (nbJobs > mtctx->jobIDMask + 1) - { - ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask + 1, mtctx->cMem); - mtctx->jobIDMask = 0; - mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, mtctx->cMem); - if (mtctx->jobs == null) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); - assert(nbJobs != 0 && (nbJobs & nbJobs - 1) == 0); - mtctx->jobIDMask = nbJobs - 1; - } - - return 0; + ZSTDMT_freeJobsTable(jobTable, nbJobs, cMem); + return null; } - /* ZSTDMT_CCtxParam_setNbWorkers(): - * Internal use only */ - private static nuint ZSTDMT_CCtxParam_setNbWorkers( - ZSTD_CCtx_params_s* @params, - uint nbWorkers - ) + return jobTable; + } + + private static nuint ZSTDMT_expandJobsTable(ZSTDMT_CCtx_s* mtctx, uint nbWorkers) + { + uint nbJobs = nbWorkers + 2; + if (nbJobs > mtctx->jobIDMask + 1) { - return ZSTD_CCtxParams_setParameter( - @params, - ZSTD_cParameter.ZSTD_c_nbWorkers, - (int)nbWorkers - ); + ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask + 1, mtctx->cMem); + mtctx->jobIDMask = 0; + mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, mtctx->cMem); + if (mtctx->jobs == null) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + assert(nbJobs != 0 && (nbJobs & nbJobs - 1) == 0); + mtctx->jobIDMask = nbJobs - 1; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static ZSTDMT_CCtx_s* ZSTDMT_createCCtx_advanced_internal( - uint nbWorkers, - ZSTD_customMem cMem, - void* pool - ) - { - ZSTDMT_CCtx_s* mtctx; - uint nbJobs = nbWorkers + 2; - int initError; - if (nbWorkers < 1) - return null; - nbWorkers = - nbWorkers < (uint)(sizeof(void*) == 4 ? 64 : 256) - ? nbWorkers - : (uint)(sizeof(void*) == 4 ? 64 : 256); - if (((cMem.customAlloc != null ? 1 : 0) ^ (cMem.customFree != null ? 1 : 0)) != 0) - return null; - mtctx = (ZSTDMT_CCtx_s*)ZSTD_customCalloc((nuint)sizeof(ZSTDMT_CCtx_s), cMem); - if (mtctx == null) - return null; - ZSTDMT_CCtxParam_setNbWorkers(&mtctx->@params, nbWorkers); - mtctx->cMem = cMem; - mtctx->allJobsCompleted = 1; - if (pool != null) - { - mtctx->factory = pool; - mtctx->providedFactory = 1; - } - else - { - mtctx->factory = POOL_create_advanced(nbWorkers, 0, cMem); - mtctx->providedFactory = 0; - } + return 0; + } - mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, cMem); - assert(nbJobs > 0); - assert((nbJobs & nbJobs - 1) == 0); - mtctx->jobIDMask = nbJobs - 1; - mtctx->bufPool = ZSTDMT_createBufferPool(2 * nbWorkers + 3, cMem); - mtctx->cctxPool = ZSTDMT_createCCtxPool((int)nbWorkers, cMem); - mtctx->seqPool = ZSTDMT_createSeqPool(nbWorkers, cMem); - initError = ZSTDMT_serialState_init(&mtctx->serial); - mtctx->roundBuff = kNullRoundBuff; - if ( - ( - ( - mtctx->factory == null - || mtctx->jobs == null - || mtctx->bufPool == null - || mtctx->cctxPool == null - || mtctx->seqPool == null - ? 1 - : 0 - ) | initError - ) != 0 - ) - { - ZSTDMT_freeCCtx(mtctx); - return null; - } + /* ZSTDMT_CCtxParam_setNbWorkers(): + * Internal use only */ + private static nuint ZSTDMT_CCtxParam_setNbWorkers( + ZSTD_CCtx_params_s* @params, + uint nbWorkers + ) + { + return ZSTD_CCtxParams_setParameter( + @params, + ZSTD_cParameter.ZSTD_c_nbWorkers, + (int)nbWorkers + ); + } - return mtctx; + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ZSTDMT_CCtx_s* ZSTDMT_createCCtx_advanced_internal( + uint nbWorkers, + ZSTD_customMem cMem, + void* pool + ) + { + ZSTDMT_CCtx_s* mtctx; + uint nbJobs = nbWorkers + 2; + int initError; + if (nbWorkers < 1) + return null; + nbWorkers = + nbWorkers < (uint)(sizeof(void*) == 4 ? 64 : 256) + ? nbWorkers + : (uint)(sizeof(void*) == 4 ? 64 : 256); + if (((cMem.customAlloc != null ? 1 : 0) ^ (cMem.customFree != null ? 1 : 0)) != 0) + return null; + mtctx = (ZSTDMT_CCtx_s*)ZSTD_customCalloc((nuint)sizeof(ZSTDMT_CCtx_s), cMem); + if (mtctx == null) + return null; + ZSTDMT_CCtxParam_setNbWorkers(&mtctx->@params, nbWorkers); + mtctx->cMem = cMem; + mtctx->allJobsCompleted = 1; + if (pool != null) + { + mtctx->factory = pool; + mtctx->providedFactory = 1; } - - /* Requires ZSTD_MULTITHREAD to be defined during compilation, otherwise it will return NULL. */ - private static ZSTDMT_CCtx_s* ZSTDMT_createCCtx_advanced( - uint nbWorkers, - ZSTD_customMem cMem, - void* pool - ) + else { - return ZSTDMT_createCCtx_advanced_internal(nbWorkers, cMem, pool); + mtctx->factory = POOL_create_advanced(nbWorkers, 0, cMem); + mtctx->providedFactory = 0; } - /* ZSTDMT_releaseAllJobResources() : - * note : ensure all workers are killed first ! */ - private static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx_s* mtctx) + mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, cMem); + assert(nbJobs > 0); + assert((nbJobs & nbJobs - 1) == 0); + mtctx->jobIDMask = nbJobs - 1; + mtctx->bufPool = ZSTDMT_createBufferPool(2 * nbWorkers + 3, cMem); + mtctx->cctxPool = ZSTDMT_createCCtxPool((int)nbWorkers, cMem); + mtctx->seqPool = ZSTDMT_createSeqPool(nbWorkers, cMem); + initError = ZSTDMT_serialState_init(&mtctx->serial); + mtctx->roundBuff = kNullRoundBuff; + if ( + ( + ( + mtctx->factory == null + || mtctx->jobs == null + || mtctx->bufPool == null + || mtctx->cctxPool == null + || mtctx->seqPool == null + ? 1 + : 0 + ) | initError + ) != 0 + ) { - uint jobID; - for (jobID = 0; jobID <= mtctx->jobIDMask; jobID++) - { - /* Copy the mutex/cond out */ - void* mutex = mtctx->jobs[jobID].job_mutex; - void* cond = mtctx->jobs[jobID].job_cond; - ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff); - mtctx->jobs[jobID] = new ZSTDMT_jobDescription - { - job_mutex = mutex, - job_cond = cond, - }; - } - - mtctx->inBuff.buffer = g_nullBuffer; - mtctx->inBuff.filled = 0; - mtctx->allJobsCompleted = 1; + ZSTDMT_freeCCtx(mtctx); + return null; } - private static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx_s* mtctx) - { - while (mtctx->doneJobID < mtctx->nextJobID) - { - uint jobID = mtctx->doneJobID & mtctx->jobIDMask; - SynchronizationWrapper.Enter(&mtctx->jobs[jobID].job_mutex); - while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) - { - SynchronizationWrapper.Wait(&mtctx->jobs[jobID].job_mutex); - } + return mtctx; + } - SynchronizationWrapper.Exit(&mtctx->jobs[jobID].job_mutex); - mtctx->doneJobID++; - } - } + /* Requires ZSTD_MULTITHREAD to be defined during compilation, otherwise it will return NULL. */ + private static ZSTDMT_CCtx_s* ZSTDMT_createCCtx_advanced( + uint nbWorkers, + ZSTD_customMem cMem, + void* pool + ) + { + return ZSTDMT_createCCtx_advanced_internal(nbWorkers, cMem, pool); + } - private static nuint ZSTDMT_freeCCtx(ZSTDMT_CCtx_s* mtctx) + /* ZSTDMT_releaseAllJobResources() : + * note : ensure all workers are killed first ! */ + private static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx_s* mtctx) + { + uint jobID; + for (jobID = 0; jobID <= mtctx->jobIDMask; jobID++) { - if (mtctx == null) - return 0; - if (mtctx->providedFactory == 0) - POOL_free(mtctx->factory); - ZSTDMT_releaseAllJobResources(mtctx); - ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask + 1, mtctx->cMem); - ZSTDMT_freeBufferPool(mtctx->bufPool); - ZSTDMT_freeCCtxPool(mtctx->cctxPool); - ZSTDMT_freeSeqPool(mtctx->seqPool); - ZSTDMT_serialState_free(&mtctx->serial); - ZSTD_freeCDict(mtctx->cdictLocal); - if (mtctx->roundBuff.buffer != null) - ZSTD_customFree(mtctx->roundBuff.buffer, mtctx->cMem); - ZSTD_customFree(mtctx, mtctx->cMem); - return 0; + /* Copy the mutex/cond out */ + void* mutex = mtctx->jobs[jobID].job_mutex; + void* cond = mtctx->jobs[jobID].job_cond; + ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff); + mtctx->jobs[jobID] = new ZSTDMT_jobDescription + { + job_mutex = mutex, + job_cond = cond, + }; } - private static nuint ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx_s* mtctx) - { - if (mtctx == null) - return 0; - return (nuint)sizeof(ZSTDMT_CCtx_s) - + POOL_sizeof(mtctx->factory) - + ZSTDMT_sizeof_bufferPool(mtctx->bufPool) - + (mtctx->jobIDMask + 1) * (uint)sizeof(ZSTDMT_jobDescription) - + ZSTDMT_sizeof_CCtxPool(mtctx->cctxPool) - + ZSTDMT_sizeof_seqPool(mtctx->seqPool) - + ZSTD_sizeof_CDict(mtctx->cdictLocal) - + mtctx->roundBuff.capacity; - } + mtctx->inBuff.buffer = g_nullBuffer; + mtctx->inBuff.filled = 0; + mtctx->allJobsCompleted = 1; + } - /* ZSTDMT_resize() : - * @return : error code if fails, 0 on success */ - private static nuint ZSTDMT_resize(ZSTDMT_CCtx_s* mtctx, uint nbWorkers) + private static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx_s* mtctx) + { + while (mtctx->doneJobID < mtctx->nextJobID) { - if (POOL_resize(mtctx->factory, nbWorkers) != 0) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + uint jobID = mtctx->doneJobID & mtctx->jobIDMask; + SynchronizationWrapper.Enter(&mtctx->jobs[jobID].job_mutex); + while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) { - nuint err_code = ZSTDMT_expandJobsTable(mtctx, nbWorkers); - if (ERR_isError(err_code)) - { - return err_code; - } + SynchronizationWrapper.Wait(&mtctx->jobs[jobID].job_mutex); } - mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, 2 * nbWorkers + 3); - if (mtctx->bufPool == null) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); - mtctx->cctxPool = ZSTDMT_expandCCtxPool(mtctx->cctxPool, (int)nbWorkers); - if (mtctx->cctxPool == null) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); - mtctx->seqPool = ZSTDMT_expandSeqPool(mtctx->seqPool, nbWorkers); - if (mtctx->seqPool == null) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); - ZSTDMT_CCtxParam_setNbWorkers(&mtctx->@params, nbWorkers); - return 0; + SynchronizationWrapper.Exit(&mtctx->jobs[jobID].job_mutex); + mtctx->doneJobID++; } + } - /*! ZSTDMT_updateCParams_whileCompressing() : - * Updates a selected set of compression parameters, remaining compatible with currently active frame. - * New parameters will be applied to next compression job. */ - private static void ZSTDMT_updateCParams_whileCompressing( - ZSTDMT_CCtx_s* mtctx, - ZSTD_CCtx_params_s* cctxParams - ) + private static nuint ZSTDMT_freeCCtx(ZSTDMT_CCtx_s* mtctx) + { + if (mtctx == null) + return 0; + if (mtctx->providedFactory == 0) + POOL_free(mtctx->factory); + ZSTDMT_releaseAllJobResources(mtctx); + ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask + 1, mtctx->cMem); + ZSTDMT_freeBufferPool(mtctx->bufPool); + ZSTDMT_freeCCtxPool(mtctx->cctxPool); + ZSTDMT_freeSeqPool(mtctx->seqPool); + ZSTDMT_serialState_free(&mtctx->serial); + ZSTD_freeCDict(mtctx->cdictLocal); + if (mtctx->roundBuff.buffer != null) + ZSTD_customFree(mtctx->roundBuff.buffer, mtctx->cMem); + ZSTD_customFree(mtctx, mtctx->cMem); + return 0; + } + + private static nuint ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx_s* mtctx) + { + if (mtctx == null) + return 0; + return (nuint)sizeof(ZSTDMT_CCtx_s) + + POOL_sizeof(mtctx->factory) + + ZSTDMT_sizeof_bufferPool(mtctx->bufPool) + + (mtctx->jobIDMask + 1) * (uint)sizeof(ZSTDMT_jobDescription) + + ZSTDMT_sizeof_CCtxPool(mtctx->cctxPool) + + ZSTDMT_sizeof_seqPool(mtctx->seqPool) + + ZSTD_sizeof_CDict(mtctx->cdictLocal) + + mtctx->roundBuff.capacity; + } + + /* ZSTDMT_resize() : + * @return : error code if fails, 0 on success */ + private static nuint ZSTDMT_resize(ZSTDMT_CCtx_s* mtctx, uint nbWorkers) + { + if (POOL_resize(mtctx->factory, nbWorkers) != 0) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); { - /* Do not modify windowLog while compressing */ - uint saved_wlog = mtctx->@params.cParams.windowLog; - int compressionLevel = cctxParams->compressionLevel; - mtctx->@params.compressionLevel = compressionLevel; + nuint err_code = ZSTDMT_expandJobsTable(mtctx, nbWorkers); + if (ERR_isError(err_code)) { - ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams( - cctxParams, - unchecked(0UL - 1), - 0, - ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict - ); - cParams.windowLog = saved_wlog; - mtctx->@params.cParams = cParams; + return err_code; } } - /* ZSTDMT_getFrameProgression(): - * tells how much data has been consumed (input) and produced (output) for current frame. - * able to count progression inside worker threads. - * Note : mutex will be acquired during statistics collection inside workers. */ - private static ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx_s* mtctx) - { - ZSTD_frameProgression fps; - fps.ingested = mtctx->consumed + mtctx->inBuff.filled; - fps.consumed = mtctx->consumed; - fps.produced = fps.flushed = mtctx->produced; - fps.currentJobID = mtctx->nextJobID; - fps.nbActiveWorkers = 0; - { - uint jobNb; - uint lastJobNb = mtctx->nextJobID + (uint)mtctx->jobReady; - assert(mtctx->jobReady <= 1); - for (jobNb = mtctx->doneJobID; jobNb < lastJobNb; jobNb++) - { - uint wJobID = jobNb & mtctx->jobIDMask; - ZSTDMT_jobDescription* jobPtr = &mtctx->jobs[wJobID]; - SynchronizationWrapper.Enter(&jobPtr->job_mutex); - { - nuint cResult = jobPtr->cSize; - nuint produced = ERR_isError(cResult) ? 0 : cResult; - nuint flushed = ERR_isError(cResult) ? 0 : jobPtr->dstFlushed; - assert(flushed <= produced); - fps.ingested += jobPtr->src.size; - fps.consumed += jobPtr->consumed; - fps.produced += produced; - fps.flushed += flushed; - fps.nbActiveWorkers += jobPtr->consumed < jobPtr->src.size ? 1U : 0U; - } - - SynchronizationWrapper.Exit(&mtctx->jobs[wJobID].job_mutex); - } - } + mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, 2 * nbWorkers + 3); + if (mtctx->bufPool == null) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + mtctx->cctxPool = ZSTDMT_expandCCtxPool(mtctx->cctxPool, (int)nbWorkers); + if (mtctx->cctxPool == null) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + mtctx->seqPool = ZSTDMT_expandSeqPool(mtctx->seqPool, nbWorkers); + if (mtctx->seqPool == null) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + ZSTDMT_CCtxParam_setNbWorkers(&mtctx->@params, nbWorkers); + return 0; + } - return fps; + /*! ZSTDMT_updateCParams_whileCompressing() : + * Updates a selected set of compression parameters, remaining compatible with currently active frame. + * New parameters will be applied to next compression job. */ + private static void ZSTDMT_updateCParams_whileCompressing( + ZSTDMT_CCtx_s* mtctx, + ZSTD_CCtx_params_s* cctxParams + ) + { + /* Do not modify windowLog while compressing */ + uint saved_wlog = mtctx->@params.cParams.windowLog; + int compressionLevel = cctxParams->compressionLevel; + mtctx->@params.compressionLevel = compressionLevel; + { + ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams( + cctxParams, + unchecked(0UL - 1), + 0, + ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict + ); + cParams.windowLog = saved_wlog; + mtctx->@params.cParams = cParams; } + } - /*! ZSTDMT_toFlushNow() - * Tell how many bytes are ready to be flushed immediately. - * Probe the oldest active job (not yet entirely flushed) and check its output buffer. - * If return 0, it means there is no active job, - * or, it means oldest job is still active, but everything produced has been flushed so far, - * therefore flushing is limited by speed of oldest job. */ - private static nuint ZSTDMT_toFlushNow(ZSTDMT_CCtx_s* mtctx) + /* ZSTDMT_getFrameProgression(): + * tells how much data has been consumed (input) and produced (output) for current frame. + * able to count progression inside worker threads. + * Note : mutex will be acquired during statistics collection inside workers. */ + private static ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx_s* mtctx) + { + ZSTD_frameProgression fps; + fps.ingested = mtctx->consumed + mtctx->inBuff.filled; + fps.consumed = mtctx->consumed; + fps.produced = fps.flushed = mtctx->produced; + fps.currentJobID = mtctx->nextJobID; + fps.nbActiveWorkers = 0; { - nuint toFlush; - uint jobID = mtctx->doneJobID; - assert(jobID <= mtctx->nextJobID); - if (jobID == mtctx->nextJobID) - return 0; + uint jobNb; + uint lastJobNb = mtctx->nextJobID + (uint)mtctx->jobReady; + assert(mtctx->jobReady <= 1); + for (jobNb = mtctx->doneJobID; jobNb < lastJobNb; jobNb++) { - uint wJobID = jobID & mtctx->jobIDMask; + uint wJobID = jobNb & mtctx->jobIDMask; ZSTDMT_jobDescription* jobPtr = &mtctx->jobs[wJobID]; SynchronizationWrapper.Enter(&jobPtr->job_mutex); { @@ -1135,813 +1099,848 @@ private static nuint ZSTDMT_toFlushNow(ZSTDMT_CCtx_s* mtctx) nuint produced = ERR_isError(cResult) ? 0 : cResult; nuint flushed = ERR_isError(cResult) ? 0 : jobPtr->dstFlushed; assert(flushed <= produced); - assert(jobPtr->consumed <= jobPtr->src.size); - toFlush = produced - flushed; -#if DEBUG - if (toFlush == 0) - { - assert(jobPtr->consumed < jobPtr->src.size); - } -#endif + fps.ingested += jobPtr->src.size; + fps.consumed += jobPtr->consumed; + fps.produced += produced; + fps.flushed += flushed; + fps.nbActiveWorkers += jobPtr->consumed < jobPtr->src.size ? 1U : 0U; } SynchronizationWrapper.Exit(&mtctx->jobs[wJobID].job_mutex); } - - return toFlush; } - /* ------------------------------------------ */ - /* ===== Multi-threaded compression ===== */ - /* ------------------------------------------ */ - private static uint ZSTDMT_computeTargetJobLog(ZSTD_CCtx_params_s* @params) + return fps; + } + + /*! ZSTDMT_toFlushNow() + * Tell how many bytes are ready to be flushed immediately. + * Probe the oldest active job (not yet entirely flushed) and check its output buffer. + * If return 0, it means there is no active job, + * or, it means oldest job is still active, but everything produced has been flushed so far, + * therefore flushing is limited by speed of oldest job. */ + private static nuint ZSTDMT_toFlushNow(ZSTDMT_CCtx_s* mtctx) + { + nuint toFlush; + uint jobID = mtctx->doneJobID; + assert(jobID <= mtctx->nextJobID); + if (jobID == mtctx->nextJobID) + return 0; { - uint jobLog; - if (@params->ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) - { - jobLog = - 21 > ZSTD_cycleLog(@params->cParams.chainLog, @params->cParams.strategy) + 3 - ? 21 - : ZSTD_cycleLog(@params->cParams.chainLog, @params->cParams.strategy) + 3; - } - else - { - jobLog = 20 > @params->cParams.windowLog + 2 ? 20 : @params->cParams.windowLog + 2; + uint wJobID = jobID & mtctx->jobIDMask; + ZSTDMT_jobDescription* jobPtr = &mtctx->jobs[wJobID]; + SynchronizationWrapper.Enter(&jobPtr->job_mutex); + { + nuint cResult = jobPtr->cSize; + nuint produced = ERR_isError(cResult) ? 0 : cResult; + nuint flushed = ERR_isError(cResult) ? 0 : jobPtr->dstFlushed; + assert(flushed <= produced); + assert(jobPtr->consumed <= jobPtr->src.size); + toFlush = produced - flushed; +#if DEBUG + if (toFlush == 0) + { + assert(jobPtr->consumed < jobPtr->src.size); + } +#endif } - return jobLog < (uint)(MEM_32bits ? 29 : 30) ? jobLog : (uint)(MEM_32bits ? 29 : 30); + SynchronizationWrapper.Exit(&mtctx->jobs[wJobID].job_mutex); } - private static int ZSTDMT_overlapLog_default(ZSTD_strategy strat) + return toFlush; + } + + /* ------------------------------------------ */ + /* ===== Multi-threaded compression ===== */ + /* ------------------------------------------ */ + private static uint ZSTDMT_computeTargetJobLog(ZSTD_CCtx_params_s* @params) + { + uint jobLog; + if (@params->ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) { - switch (strat) + jobLog = + 21 > ZSTD_cycleLog(@params->cParams.chainLog, @params->cParams.strategy) + 3 + ? 21 + : ZSTD_cycleLog(@params->cParams.chainLog, @params->cParams.strategy) + 3; + } + else + { + jobLog = 20 > @params->cParams.windowLog + 2 ? 20 : @params->cParams.windowLog + 2; + } + + return jobLog < (uint)(MEM_32bits ? 29 : 30) ? jobLog : (uint)(MEM_32bits ? 29 : 30); + } + + private static int ZSTDMT_overlapLog_default(ZSTD_strategy strat) + { + switch (strat) + { + case ZSTD_strategy.ZSTD_btultra2: + return 9; + case ZSTD_strategy.ZSTD_btultra: + case ZSTD_strategy.ZSTD_btopt: + return 8; + case ZSTD_strategy.ZSTD_btlazy2: + case ZSTD_strategy.ZSTD_lazy2: + return 7; + case ZSTD_strategy.ZSTD_lazy: + case ZSTD_strategy.ZSTD_greedy: + case ZSTD_strategy.ZSTD_dfast: + case ZSTD_strategy.ZSTD_fast: + default: + break; + } + + return 6; + } + + private static int ZSTDMT_overlapLog(int ovlog, ZSTD_strategy strat) + { + assert(0 <= ovlog && ovlog <= 9); + if (ovlog == 0) + return ZSTDMT_overlapLog_default(strat); + return ovlog; + } + + private static nuint ZSTDMT_computeOverlapSize(ZSTD_CCtx_params_s* @params) + { + int overlapRLog = 9 - ZSTDMT_overlapLog(@params->overlapLog, @params->cParams.strategy); + int ovLog = (int)( + overlapRLog >= 8 ? 0 : @params->cParams.windowLog - (uint)overlapRLog + ); + assert(0 <= overlapRLog && overlapRLog <= 8); + if (@params->ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) + { + ovLog = (int)( + ( + @params->cParams.windowLog < ZSTDMT_computeTargetJobLog(@params) - 2 + ? @params->cParams.windowLog + : ZSTDMT_computeTargetJobLog(@params) - 2 + ) - (uint)overlapRLog + ); + } + + assert(0 <= ovLog && ovLog <= (sizeof(nuint) == 4 ? 30 : 31)); + return ovLog == 0 ? 0 : (nuint)1 << ovLog; + } + + /* ====================================== */ + /* ======= Streaming API ======= */ + /* ====================================== */ + private static nuint ZSTDMT_initCStream_internal( + ZSTDMT_CCtx_s* mtctx, + void* dict, + nuint dictSize, + ZSTD_dictContentType_e dictContentType, + ZSTD_CDict_s* cdict, + ZSTD_CCtx_params_s @params, + ulong pledgedSrcSize + ) + { + assert(!ERR_isError(ZSTD_checkCParams(@params.cParams))); + assert(!(dict != null && cdict != null)); + if (@params.nbWorkers != mtctx->@params.nbWorkers) + { + /* init */ + nuint err_code = ZSTDMT_resize(mtctx, (uint)@params.nbWorkers); + if (ERR_isError(err_code)) { - case ZSTD_strategy.ZSTD_btultra2: - return 9; - case ZSTD_strategy.ZSTD_btultra: - case ZSTD_strategy.ZSTD_btopt: - return 8; - case ZSTD_strategy.ZSTD_btlazy2: - case ZSTD_strategy.ZSTD_lazy2: - return 7; - case ZSTD_strategy.ZSTD_lazy: - case ZSTD_strategy.ZSTD_greedy: - case ZSTD_strategy.ZSTD_dfast: - case ZSTD_strategy.ZSTD_fast: - default: - break; + return err_code; } + } - return 6; + if (@params.jobSize != 0 && @params.jobSize < 512 * (1 << 10)) + @params.jobSize = 512 * (1 << 10); + if (@params.jobSize > (nuint)(MEM_32bits ? 512 * (1 << 20) : 1024 * (1 << 20))) + @params.jobSize = (nuint)(MEM_32bits ? 512 * (1 << 20) : 1024 * (1 << 20)); + if (mtctx->allJobsCompleted == 0) + { + ZSTDMT_waitForAllJobsCompleted(mtctx); + ZSTDMT_releaseAllJobResources(mtctx); + mtctx->allJobsCompleted = 1; } - private static int ZSTDMT_overlapLog(int ovlog, ZSTD_strategy strat) + mtctx->@params = @params; + mtctx->frameContentSize = pledgedSrcSize; + ZSTD_freeCDict(mtctx->cdictLocal); + if (dict != null) { - assert(0 <= ovlog && ovlog <= 9); - if (ovlog == 0) - return ZSTDMT_overlapLog_default(strat); - return ovlog; + mtctx->cdictLocal = ZSTD_createCDict_advanced( + dict, + dictSize, + ZSTD_dictLoadMethod_e.ZSTD_dlm_byCopy, + dictContentType, + @params.cParams, + mtctx->cMem + ); + mtctx->cdict = mtctx->cdictLocal; + if (mtctx->cdictLocal == null) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + } + else + { + mtctx->cdictLocal = null; + mtctx->cdict = cdict; } - private static nuint ZSTDMT_computeOverlapSize(ZSTD_CCtx_params_s* @params) + mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(&@params); + mtctx->targetSectionSize = @params.jobSize; + if (mtctx->targetSectionSize == 0) { - int overlapRLog = 9 - ZSTDMT_overlapLog(@params->overlapLog, @params->cParams.strategy); - int ovLog = (int)( - overlapRLog >= 8 ? 0 : @params->cParams.windowLog - (uint)overlapRLog + mtctx->targetSectionSize = (nuint)( + 1UL << (int)ZSTDMT_computeTargetJobLog(&@params) ); - assert(0 <= overlapRLog && overlapRLog <= 8); - if (@params->ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) - { - ovLog = (int)( - ( - @params->cParams.windowLog < ZSTDMT_computeTargetJobLog(@params) - 2 - ? @params->cParams.windowLog - : ZSTDMT_computeTargetJobLog(@params) - 2 - ) - (uint)overlapRLog - ); - } + } - assert(0 <= ovLog && ovLog <= (sizeof(nuint) == 4 ? 30 : 31)); - return ovLog == 0 ? 0 : (nuint)1 << ovLog; - } - - /* ====================================== */ - /* ======= Streaming API ======= */ - /* ====================================== */ - private static nuint ZSTDMT_initCStream_internal( - ZSTDMT_CCtx_s* mtctx, - void* dict, - nuint dictSize, - ZSTD_dictContentType_e dictContentType, - ZSTD_CDict_s* cdict, - ZSTD_CCtx_params_s @params, - ulong pledgedSrcSize - ) + assert( + mtctx->targetSectionSize <= (nuint)(MEM_32bits ? 512 * (1 << 20) : 1024 * (1 << 20)) + ); + if (@params.rsyncable != 0) { - assert(!ERR_isError(ZSTD_checkCParams(@params.cParams))); - assert(!(dict != null && cdict != null)); - if (@params.nbWorkers != mtctx->@params.nbWorkers) - { - /* init */ - nuint err_code = ZSTDMT_resize(mtctx, (uint)@params.nbWorkers); - if (ERR_isError(err_code)) + /* Aim for the targetsectionSize as the average job size. */ + uint jobSizeKB = (uint)(mtctx->targetSectionSize >> 10); + assert(jobSizeKB >= 1); + uint rsyncBits = ZSTD_highbit32(jobSizeKB) + 10; + assert(rsyncBits >= 17 + 2); + mtctx->rsync.hash = 0; + mtctx->rsync.hitMask = (1UL << (int)rsyncBits) - 1; + mtctx->rsync.primePower = ZSTD_rollingHash_primePower(32); + } + + if (mtctx->targetSectionSize < mtctx->targetPrefixSize) + mtctx->targetSectionSize = mtctx->targetPrefixSize; + ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(mtctx->targetSectionSize)); + { + /* If ldm is enabled we need windowSize space. */ + nuint windowSize = + mtctx->@params.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable + ? 1U << (int)mtctx->@params.cParams.windowLog + : 0; + /* Two buffers of slack, plus extra space for the overlap + * This is the minimum slack that LDM works with. One extra because + * flush might waste up to targetSectionSize-1 bytes. Another extra + * for the overlap (if > 0), then one to fill which doesn't overlap + * with the LDM window. + */ + nuint nbSlackBuffers = (nuint)(2 + (mtctx->targetPrefixSize > 0 ? 1 : 0)); + nuint slackSize = mtctx->targetSectionSize * nbSlackBuffers; + /* Compute the total size, and always have enough slack */ + nuint nbWorkers = (nuint)( + mtctx->@params.nbWorkers > 1 ? mtctx->@params.nbWorkers : 1 + ); + nuint sectionsSize = mtctx->targetSectionSize * nbWorkers; + nuint capacity = + (windowSize > sectionsSize ? windowSize : sectionsSize) + slackSize; + if (mtctx->roundBuff.capacity < capacity) + { + if (mtctx->roundBuff.buffer != null) + ZSTD_customFree(mtctx->roundBuff.buffer, mtctx->cMem); + mtctx->roundBuff.buffer = (byte*)ZSTD_customMalloc(capacity, mtctx->cMem); + if (mtctx->roundBuff.buffer == null) { - return err_code; + mtctx->roundBuff.capacity = 0; + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) + ); } + + mtctx->roundBuff.capacity = capacity; } + } - if (@params.jobSize != 0 && @params.jobSize < 512 * (1 << 10)) - @params.jobSize = 512 * (1 << 10); - if (@params.jobSize > (nuint)(MEM_32bits ? 512 * (1 << 20) : 1024 * (1 << 20))) - @params.jobSize = (nuint)(MEM_32bits ? 512 * (1 << 20) : 1024 * (1 << 20)); - if (mtctx->allJobsCompleted == 0) + mtctx->roundBuff.pos = 0; + mtctx->inBuff.buffer = g_nullBuffer; + mtctx->inBuff.filled = 0; + mtctx->inBuff.prefix = kNullRange; + mtctx->doneJobID = 0; + mtctx->nextJobID = 0; + mtctx->frameEnded = 0; + mtctx->allJobsCompleted = 0; + mtctx->consumed = 0; + mtctx->produced = 0; + ZSTD_freeCDict(mtctx->cdictLocal); + mtctx->cdictLocal = null; + mtctx->cdict = null; + if (dict != null) + { + if (dictContentType == ZSTD_dictContentType_e.ZSTD_dct_rawContent) { - ZSTDMT_waitForAllJobsCompleted(mtctx); - ZSTDMT_releaseAllJobResources(mtctx); - mtctx->allJobsCompleted = 1; + mtctx->inBuff.prefix.start = (byte*)dict; + mtctx->inBuff.prefix.size = dictSize; } - - mtctx->@params = @params; - mtctx->frameContentSize = pledgedSrcSize; - ZSTD_freeCDict(mtctx->cdictLocal); - if (dict != null) + else { mtctx->cdictLocal = ZSTD_createCDict_advanced( dict, dictSize, - ZSTD_dictLoadMethod_e.ZSTD_dlm_byCopy, + ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, dictContentType, @params.cParams, mtctx->cMem ); mtctx->cdict = mtctx->cdictLocal; if (mtctx->cdictLocal == null) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); - } - else - { - mtctx->cdictLocal = null; - mtctx->cdict = cdict; + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) + ); } + } + else + { + mtctx->cdict = cdict; + } - mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(&@params); - mtctx->targetSectionSize = @params.jobSize; - if (mtctx->targetSectionSize == 0) - { - mtctx->targetSectionSize = (nuint)( - 1UL << (int)ZSTDMT_computeTargetJobLog(&@params) - ); - } + if ( + ZSTDMT_serialState_reset( + &mtctx->serial, + mtctx->seqPool, + @params, + mtctx->targetSectionSize, + dict, + dictSize, + dictContentType + ) != 0 + ) + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + return 0; + } - assert( - mtctx->targetSectionSize <= (nuint)(MEM_32bits ? 512 * (1 << 20) : 1024 * (1 << 20)) - ); - if (@params.rsyncable != 0) - { - /* Aim for the targetsectionSize as the average job size. */ - uint jobSizeKB = (uint)(mtctx->targetSectionSize >> 10); - assert(jobSizeKB >= 1); - uint rsyncBits = ZSTD_highbit32(jobSizeKB) + 10; - assert(rsyncBits >= 17 + 2); - mtctx->rsync.hash = 0; - mtctx->rsync.hitMask = (1UL << (int)rsyncBits) - 1; - mtctx->rsync.primePower = ZSTD_rollingHash_primePower(32); - } + /* ZSTDMT_writeLastEmptyBlock() + * Write a single empty block with an end-of-frame to finish a frame. + * Job must be created from streaming variant. + * This function is always successful if expected conditions are fulfilled. + */ + private static void ZSTDMT_writeLastEmptyBlock(ZSTDMT_jobDescription* job) + { + assert(job->lastJob == 1); + assert(job->src.size == 0); + assert(job->firstJob == 0); + assert(job->dstBuff.start == null); + job->dstBuff = ZSTDMT_getBuffer(job->bufPool); + if (job->dstBuff.start == null) + { + job->cSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); + return; + } - if (mtctx->targetSectionSize < mtctx->targetPrefixSize) - mtctx->targetSectionSize = mtctx->targetPrefixSize; - ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(mtctx->targetSectionSize)); - { - /* If ldm is enabled we need windowSize space. */ - nuint windowSize = - mtctx->@params.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable - ? 1U << (int)mtctx->@params.cParams.windowLog - : 0; - /* Two buffers of slack, plus extra space for the overlap - * This is the minimum slack that LDM works with. One extra because - * flush might waste up to targetSectionSize-1 bytes. Another extra - * for the overlap (if > 0), then one to fill which doesn't overlap - * with the LDM window. - */ - nuint nbSlackBuffers = (nuint)(2 + (mtctx->targetPrefixSize > 0 ? 1 : 0)); - nuint slackSize = mtctx->targetSectionSize * nbSlackBuffers; - /* Compute the total size, and always have enough slack */ - nuint nbWorkers = (nuint)( - mtctx->@params.nbWorkers > 1 ? mtctx->@params.nbWorkers : 1 - ); - nuint sectionsSize = mtctx->targetSectionSize * nbWorkers; - nuint capacity = - (windowSize > sectionsSize ? windowSize : sectionsSize) + slackSize; - if (mtctx->roundBuff.capacity < capacity) - { - if (mtctx->roundBuff.buffer != null) - ZSTD_customFree(mtctx->roundBuff.buffer, mtctx->cMem); - mtctx->roundBuff.buffer = (byte*)ZSTD_customMalloc(capacity, mtctx->cMem); - if (mtctx->roundBuff.buffer == null) - { - mtctx->roundBuff.capacity = 0; - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) - ); - } + assert(job->dstBuff.capacity >= ZSTD_blockHeaderSize); + job->src = kNullRange; + job->cSize = ZSTD_writeLastEmptyBlock(job->dstBuff.start, job->dstBuff.capacity); + assert(!ERR_isError(job->cSize)); + assert(job->consumed == 0); + } - mtctx->roundBuff.capacity = capacity; - } - } + private static nuint ZSTDMT_createCompressionJob( + ZSTDMT_CCtx_s* mtctx, + nuint srcSize, + ZSTD_EndDirective endOp + ) + { + uint jobID = mtctx->nextJobID & mtctx->jobIDMask; + int endFrame = endOp == ZSTD_EndDirective.ZSTD_e_end ? 1 : 0; + if (mtctx->nextJobID > mtctx->doneJobID + mtctx->jobIDMask) + { + assert( + (mtctx->nextJobID & mtctx->jobIDMask) == (mtctx->doneJobID & mtctx->jobIDMask) + ); + return 0; + } - mtctx->roundBuff.pos = 0; + if (mtctx->jobReady == 0) + { + byte* src = (byte*)mtctx->inBuff.buffer.start; + mtctx->jobs[jobID].src.start = src; + mtctx->jobs[jobID].src.size = srcSize; + assert(mtctx->inBuff.filled >= srcSize); + mtctx->jobs[jobID].prefix = mtctx->inBuff.prefix; + mtctx->jobs[jobID].consumed = 0; + mtctx->jobs[jobID].cSize = 0; + mtctx->jobs[jobID].@params = mtctx->@params; + mtctx->jobs[jobID].cdict = mtctx->nextJobID == 0 ? mtctx->cdict : null; + mtctx->jobs[jobID].fullFrameSize = mtctx->frameContentSize; + mtctx->jobs[jobID].dstBuff = g_nullBuffer; + mtctx->jobs[jobID].cctxPool = mtctx->cctxPool; + mtctx->jobs[jobID].bufPool = mtctx->bufPool; + mtctx->jobs[jobID].seqPool = mtctx->seqPool; + mtctx->jobs[jobID].serial = &mtctx->serial; + mtctx->jobs[jobID].jobID = mtctx->nextJobID; + mtctx->jobs[jobID].firstJob = mtctx->nextJobID == 0 ? 1U : 0U; + mtctx->jobs[jobID].lastJob = (uint)endFrame; + mtctx->jobs[jobID].frameChecksumNeeded = + mtctx->@params.fParams.checksumFlag != 0 + && endFrame != 0 + && mtctx->nextJobID > 0 + ? 1U + : 0U; + mtctx->jobs[jobID].dstFlushed = 0; + mtctx->roundBuff.pos += srcSize; mtctx->inBuff.buffer = g_nullBuffer; mtctx->inBuff.filled = 0; - mtctx->inBuff.prefix = kNullRange; - mtctx->doneJobID = 0; - mtctx->nextJobID = 0; - mtctx->frameEnded = 0; - mtctx->allJobsCompleted = 0; - mtctx->consumed = 0; - mtctx->produced = 0; - ZSTD_freeCDict(mtctx->cdictLocal); - mtctx->cdictLocal = null; - mtctx->cdict = null; - if (dict != null) + if (endFrame == 0) { - if (dictContentType == ZSTD_dictContentType_e.ZSTD_dct_rawContent) - { - mtctx->inBuff.prefix.start = (byte*)dict; - mtctx->inBuff.prefix.size = dictSize; - } - else - { - mtctx->cdictLocal = ZSTD_createCDict_advanced( - dict, - dictSize, - ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, - dictContentType, - @params.cParams, - mtctx->cMem - ); - mtctx->cdict = mtctx->cdictLocal; - if (mtctx->cdictLocal == null) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) - ); - } + nuint newPrefixSize = + srcSize < mtctx->targetPrefixSize ? srcSize : mtctx->targetPrefixSize; + mtctx->inBuff.prefix.start = src + srcSize - newPrefixSize; + mtctx->inBuff.prefix.size = newPrefixSize; } else { - mtctx->cdict = cdict; + mtctx->inBuff.prefix = kNullRange; + mtctx->frameEnded = (uint)endFrame; + if (mtctx->nextJobID == 0) + { + mtctx->@params.fParams.checksumFlag = 0; + } } - if ( - ZSTDMT_serialState_reset( - &mtctx->serial, - mtctx->seqPool, - @params, - mtctx->targetSectionSize, - dict, - dictSize, - dictContentType - ) != 0 - ) - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); - return 0; - } - - /* ZSTDMT_writeLastEmptyBlock() - * Write a single empty block with an end-of-frame to finish a frame. - * Job must be created from streaming variant. - * This function is always successful if expected conditions are fulfilled. - */ - private static void ZSTDMT_writeLastEmptyBlock(ZSTDMT_jobDescription* job) - { - assert(job->lastJob == 1); - assert(job->src.size == 0); - assert(job->firstJob == 0); - assert(job->dstBuff.start == null); - job->dstBuff = ZSTDMT_getBuffer(job->bufPool); - if (job->dstBuff.start == null) + if (srcSize == 0 && mtctx->nextJobID > 0) { - job->cSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); - return; + assert(endOp == ZSTD_EndDirective.ZSTD_e_end); + ZSTDMT_writeLastEmptyBlock(mtctx->jobs + jobID); + mtctx->nextJobID++; + return 0; } - - assert(job->dstBuff.capacity >= ZSTD_blockHeaderSize); - job->src = kNullRange; - job->cSize = ZSTD_writeLastEmptyBlock(job->dstBuff.start, job->dstBuff.capacity); - assert(!ERR_isError(job->cSize)); - assert(job->consumed == 0); } - private static nuint ZSTDMT_createCompressionJob( - ZSTDMT_CCtx_s* mtctx, - nuint srcSize, - ZSTD_EndDirective endOp + if ( + POOL_tryAdd( + mtctx->factory, + (delegate* managed)(&ZSTDMT_compressionJob), + &mtctx->jobs[jobID] + ) != 0 ) { - uint jobID = mtctx->nextJobID & mtctx->jobIDMask; - int endFrame = endOp == ZSTD_EndDirective.ZSTD_e_end ? 1 : 0; - if (mtctx->nextJobID > mtctx->doneJobID + mtctx->jobIDMask) - { - assert( - (mtctx->nextJobID & mtctx->jobIDMask) == (mtctx->doneJobID & mtctx->jobIDMask) - ); - return 0; - } + mtctx->nextJobID++; + mtctx->jobReady = 0; + } + else + { + mtctx->jobReady = 1; + } - if (mtctx->jobReady == 0) - { - byte* src = (byte*)mtctx->inBuff.buffer.start; - mtctx->jobs[jobID].src.start = src; - mtctx->jobs[jobID].src.size = srcSize; - assert(mtctx->inBuff.filled >= srcSize); - mtctx->jobs[jobID].prefix = mtctx->inBuff.prefix; - mtctx->jobs[jobID].consumed = 0; - mtctx->jobs[jobID].cSize = 0; - mtctx->jobs[jobID].@params = mtctx->@params; - mtctx->jobs[jobID].cdict = mtctx->nextJobID == 0 ? mtctx->cdict : null; - mtctx->jobs[jobID].fullFrameSize = mtctx->frameContentSize; - mtctx->jobs[jobID].dstBuff = g_nullBuffer; - mtctx->jobs[jobID].cctxPool = mtctx->cctxPool; - mtctx->jobs[jobID].bufPool = mtctx->bufPool; - mtctx->jobs[jobID].seqPool = mtctx->seqPool; - mtctx->jobs[jobID].serial = &mtctx->serial; - mtctx->jobs[jobID].jobID = mtctx->nextJobID; - mtctx->jobs[jobID].firstJob = mtctx->nextJobID == 0 ? 1U : 0U; - mtctx->jobs[jobID].lastJob = (uint)endFrame; - mtctx->jobs[jobID].frameChecksumNeeded = - mtctx->@params.fParams.checksumFlag != 0 - && endFrame != 0 - && mtctx->nextJobID > 0 - ? 1U - : 0U; - mtctx->jobs[jobID].dstFlushed = 0; - mtctx->roundBuff.pos += srcSize; - mtctx->inBuff.buffer = g_nullBuffer; - mtctx->inBuff.filled = 0; - if (endFrame == 0) - { - nuint newPrefixSize = - srcSize < mtctx->targetPrefixSize ? srcSize : mtctx->targetPrefixSize; - mtctx->inBuff.prefix.start = src + srcSize - newPrefixSize; - mtctx->inBuff.prefix.size = newPrefixSize; - } - else - { - mtctx->inBuff.prefix = kNullRange; - mtctx->frameEnded = (uint)endFrame; - if (mtctx->nextJobID == 0) - { - mtctx->@params.fParams.checksumFlag = 0; - } - } + return 0; + } - if (srcSize == 0 && mtctx->nextJobID > 0) + /*! ZSTDMT_flushProduced() : + * flush whatever data has been produced but not yet flushed in current job. + * move to next job if current one is fully flushed. + * `output` : `pos` will be updated with amount of data flushed . + * `blockToFlush` : if >0, the function will block and wait if there is no data available to flush . + * @return : amount of data remaining within internal buffer, 0 if no more, 1 if unknown but > 0, or an error code */ + private static nuint ZSTDMT_flushProduced( + ZSTDMT_CCtx_s* mtctx, + ZSTD_outBuffer_s* output, + uint blockToFlush, + ZSTD_EndDirective end + ) + { + uint wJobID = mtctx->doneJobID & mtctx->jobIDMask; + assert(output->size >= output->pos); + SynchronizationWrapper.Enter(&mtctx->jobs[wJobID].job_mutex); + if (blockToFlush != 0 && mtctx->doneJobID < mtctx->nextJobID) + { + assert(mtctx->jobs[wJobID].dstFlushed <= mtctx->jobs[wJobID].cSize); + while (mtctx->jobs[wJobID].dstFlushed == mtctx->jobs[wJobID].cSize) + { + if (mtctx->jobs[wJobID].consumed == mtctx->jobs[wJobID].src.size) { - assert(endOp == ZSTD_EndDirective.ZSTD_e_end); - ZSTDMT_writeLastEmptyBlock(mtctx->jobs + jobID); - mtctx->nextJobID++; - return 0; + break; } - } - if ( - POOL_tryAdd( - mtctx->factory, - (delegate* managed)(&ZSTDMT_compressionJob), - &mtctx->jobs[jobID] - ) != 0 - ) - { - mtctx->nextJobID++; - mtctx->jobReady = 0; - } - else - { - mtctx->jobReady = 1; + SynchronizationWrapper.Wait(&mtctx->jobs[wJobID].job_mutex); } - - return 0; } - /*! ZSTDMT_flushProduced() : - * flush whatever data has been produced but not yet flushed in current job. - * move to next job if current one is fully flushed. - * `output` : `pos` will be updated with amount of data flushed . - * `blockToFlush` : if >0, the function will block and wait if there is no data available to flush . - * @return : amount of data remaining within internal buffer, 0 if no more, 1 if unknown but > 0, or an error code */ - private static nuint ZSTDMT_flushProduced( - ZSTDMT_CCtx_s* mtctx, - ZSTD_outBuffer_s* output, - uint blockToFlush, - ZSTD_EndDirective end - ) { - uint wJobID = mtctx->doneJobID & mtctx->jobIDMask; - assert(output->size >= output->pos); - SynchronizationWrapper.Enter(&mtctx->jobs[wJobID].job_mutex); - if (blockToFlush != 0 && mtctx->doneJobID < mtctx->nextJobID) + /* shared */ + nuint cSize = mtctx->jobs[wJobID].cSize; + /* shared */ + nuint srcConsumed = mtctx->jobs[wJobID].consumed; + /* read-only, could be done after mutex lock, but no-declaration-after-statement */ + nuint srcSize = mtctx->jobs[wJobID].src.size; + SynchronizationWrapper.Exit(&mtctx->jobs[wJobID].job_mutex); + if (ERR_isError(cSize)) { - assert(mtctx->jobs[wJobID].dstFlushed <= mtctx->jobs[wJobID].cSize); - while (mtctx->jobs[wJobID].dstFlushed == mtctx->jobs[wJobID].cSize) - { - if (mtctx->jobs[wJobID].consumed == mtctx->jobs[wJobID].src.size) - { - break; - } - - SynchronizationWrapper.Wait(&mtctx->jobs[wJobID].job_mutex); - } + ZSTDMT_waitForAllJobsCompleted(mtctx); + ZSTDMT_releaseAllJobResources(mtctx); + return cSize; } + assert(srcConsumed <= srcSize); + if (srcConsumed == srcSize && mtctx->jobs[wJobID].frameChecksumNeeded != 0) { - /* shared */ - nuint cSize = mtctx->jobs[wJobID].cSize; - /* shared */ - nuint srcConsumed = mtctx->jobs[wJobID].consumed; - /* read-only, could be done after mutex lock, but no-declaration-after-statement */ - nuint srcSize = mtctx->jobs[wJobID].src.size; - SynchronizationWrapper.Exit(&mtctx->jobs[wJobID].job_mutex); - if (ERR_isError(cSize)) - { - ZSTDMT_waitForAllJobsCompleted(mtctx); - ZSTDMT_releaseAllJobResources(mtctx); - return cSize; - } + uint checksum = (uint)ZSTD_XXH64_digest(&mtctx->serial.xxhState); + MEM_writeLE32( + (sbyte*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].cSize, + checksum + ); + cSize += 4; + mtctx->jobs[wJobID].cSize += 4; + mtctx->jobs[wJobID].frameChecksumNeeded = 0; + } - assert(srcConsumed <= srcSize); - if (srcConsumed == srcSize && mtctx->jobs[wJobID].frameChecksumNeeded != 0) + if (cSize > 0) + { + nuint toFlush = + cSize - mtctx->jobs[wJobID].dstFlushed < output->size - output->pos + ? cSize - mtctx->jobs[wJobID].dstFlushed + : output->size - output->pos; + assert(mtctx->doneJobID < mtctx->nextJobID); + assert(cSize >= mtctx->jobs[wJobID].dstFlushed); + assert(mtctx->jobs[wJobID].dstBuff.start != null); + if (toFlush > 0) { - uint checksum = (uint)ZSTD_XXH64_digest(&mtctx->serial.xxhState); - MEM_writeLE32( - (sbyte*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].cSize, - checksum + memcpy( + (sbyte*)output->dst + output->pos, + (sbyte*)mtctx->jobs[wJobID].dstBuff.start + + mtctx->jobs[wJobID].dstFlushed, + (uint)toFlush ); - cSize += 4; - mtctx->jobs[wJobID].cSize += 4; - mtctx->jobs[wJobID].frameChecksumNeeded = 0; } - if (cSize > 0) + output->pos += toFlush; + mtctx->jobs[wJobID].dstFlushed += toFlush; + if (srcConsumed == srcSize && mtctx->jobs[wJobID].dstFlushed == cSize) { - nuint toFlush = - cSize - mtctx->jobs[wJobID].dstFlushed < output->size - output->pos - ? cSize - mtctx->jobs[wJobID].dstFlushed - : output->size - output->pos; - assert(mtctx->doneJobID < mtctx->nextJobID); - assert(cSize >= mtctx->jobs[wJobID].dstFlushed); - assert(mtctx->jobs[wJobID].dstBuff.start != null); - if (toFlush > 0) - { - memcpy( - (sbyte*)output->dst + output->pos, - (sbyte*)mtctx->jobs[wJobID].dstBuff.start - + mtctx->jobs[wJobID].dstFlushed, - (uint)toFlush - ); - } - - output->pos += toFlush; - mtctx->jobs[wJobID].dstFlushed += toFlush; - if (srcConsumed == srcSize && mtctx->jobs[wJobID].dstFlushed == cSize) - { - ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[wJobID].dstBuff); - mtctx->jobs[wJobID].dstBuff = g_nullBuffer; - mtctx->jobs[wJobID].cSize = 0; - mtctx->consumed += srcSize; - mtctx->produced += cSize; - mtctx->doneJobID++; - } + ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[wJobID].dstBuff); + mtctx->jobs[wJobID].dstBuff = g_nullBuffer; + mtctx->jobs[wJobID].cSize = 0; + mtctx->consumed += srcSize; + mtctx->produced += cSize; + mtctx->doneJobID++; } - - if (cSize > mtctx->jobs[wJobID].dstFlushed) - return cSize - mtctx->jobs[wJobID].dstFlushed; - if (srcSize > srcConsumed) - return 1; } - if (mtctx->doneJobID < mtctx->nextJobID) - return 1; - if (mtctx->jobReady != 0) - return 1; - if (mtctx->inBuff.filled > 0) + if (cSize > mtctx->jobs[wJobID].dstFlushed) + return cSize - mtctx->jobs[wJobID].dstFlushed; + if (srcSize > srcConsumed) return 1; - mtctx->allJobsCompleted = mtctx->frameEnded; - if (end == ZSTD_EndDirective.ZSTD_e_end) - return mtctx->frameEnded == 0 ? 1U : 0U; - return 0; } - /** - * Returns the range of data used by the earliest job that is not yet complete. - * If the data of the first job is broken up into two segments, we cover both - * sections. - */ - private static Range ZSTDMT_getInputDataInUse(ZSTDMT_CCtx_s* mtctx) - { - uint firstJobID = mtctx->doneJobID; - uint lastJobID = mtctx->nextJobID; - uint jobID; - /* no need to check during first round */ - nuint roundBuffCapacity = mtctx->roundBuff.capacity; - nuint nbJobs1stRoundMin = roundBuffCapacity / mtctx->targetSectionSize; - if (lastJobID < nbJobs1stRoundMin) - return kNullRange; - for (jobID = firstJobID; jobID < lastJobID; ++jobID) + if (mtctx->doneJobID < mtctx->nextJobID) + return 1; + if (mtctx->jobReady != 0) + return 1; + if (mtctx->inBuff.filled > 0) + return 1; + mtctx->allJobsCompleted = mtctx->frameEnded; + if (end == ZSTD_EndDirective.ZSTD_e_end) + return mtctx->frameEnded == 0 ? 1U : 0U; + return 0; + } + + /** + * Returns the range of data used by the earliest job that is not yet complete. + * If the data of the first job is broken up into two segments, we cover both + * sections. + */ + private static Range ZSTDMT_getInputDataInUse(ZSTDMT_CCtx_s* mtctx) + { + uint firstJobID = mtctx->doneJobID; + uint lastJobID = mtctx->nextJobID; + uint jobID; + /* no need to check during first round */ + nuint roundBuffCapacity = mtctx->roundBuff.capacity; + nuint nbJobs1stRoundMin = roundBuffCapacity / mtctx->targetSectionSize; + if (lastJobID < nbJobs1stRoundMin) + return kNullRange; + for (jobID = firstJobID; jobID < lastJobID; ++jobID) + { + uint wJobID = jobID & mtctx->jobIDMask; + nuint consumed; + SynchronizationWrapper.Enter(&mtctx->jobs[wJobID].job_mutex); + consumed = mtctx->jobs[wJobID].consumed; + SynchronizationWrapper.Exit(&mtctx->jobs[wJobID].job_mutex); + if (consumed < mtctx->jobs[wJobID].src.size) { - uint wJobID = jobID & mtctx->jobIDMask; - nuint consumed; - SynchronizationWrapper.Enter(&mtctx->jobs[wJobID].job_mutex); - consumed = mtctx->jobs[wJobID].consumed; - SynchronizationWrapper.Exit(&mtctx->jobs[wJobID].job_mutex); - if (consumed < mtctx->jobs[wJobID].src.size) + Range range = mtctx->jobs[wJobID].prefix; + if (range.size == 0) { - Range range = mtctx->jobs[wJobID].prefix; - if (range.size == 0) - { - range = mtctx->jobs[wJobID].src; - } - - assert(range.start <= mtctx->jobs[wJobID].src.start); - return range; + range = mtctx->jobs[wJobID].src; } - } - return kNullRange; + assert(range.start <= mtctx->jobs[wJobID].src.start); + return range; + } } - /** - * Returns non-zero iff buffer and range overlap. - */ - private static int ZSTDMT_isOverlapped(buffer_s buffer, Range range) + return kNullRange; + } + + /** + * Returns non-zero iff buffer and range overlap. + */ + private static int ZSTDMT_isOverlapped(buffer_s buffer, Range range) + { + byte* bufferStart = (byte*)buffer.start; + byte* rangeStart = (byte*)range.start; + if (rangeStart == null || bufferStart == null) + return 0; { - byte* bufferStart = (byte*)buffer.start; - byte* rangeStart = (byte*)range.start; - if (rangeStart == null || bufferStart == null) + byte* bufferEnd = bufferStart + buffer.capacity; + byte* rangeEnd = rangeStart + range.size; + if (bufferStart == bufferEnd || rangeStart == rangeEnd) return 0; - { - byte* bufferEnd = bufferStart + buffer.capacity; - byte* rangeEnd = rangeStart + range.size; - if (bufferStart == bufferEnd || rangeStart == rangeEnd) - return 0; - return bufferStart < rangeEnd && rangeStart < bufferEnd ? 1 : 0; - } + return bufferStart < rangeEnd && rangeStart < bufferEnd ? 1 : 0; } + } - private static int ZSTDMT_doesOverlapWindow(buffer_s buffer, ZSTD_window_t window) - { - Range extDict; - Range prefix; - extDict.start = window.dictBase + window.lowLimit; - extDict.size = window.dictLimit - window.lowLimit; - prefix.start = window.@base + window.dictLimit; - prefix.size = (nuint)(window.nextSrc - (window.@base + window.dictLimit)); - return - ZSTDMT_isOverlapped(buffer, extDict) != 0 - || ZSTDMT_isOverlapped(buffer, prefix) != 0 + private static int ZSTDMT_doesOverlapWindow(buffer_s buffer, ZSTD_window_t window) + { + Range extDict; + Range prefix; + extDict.start = window.dictBase + window.lowLimit; + extDict.size = window.dictLimit - window.lowLimit; + prefix.start = window.@base + window.dictLimit; + prefix.size = (nuint)(window.nextSrc - (window.@base + window.dictLimit)); + return + ZSTDMT_isOverlapped(buffer, extDict) != 0 + || ZSTDMT_isOverlapped(buffer, prefix) != 0 ? 1 : 0; - } + } - private static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx_s* mtctx, buffer_s buffer) + private static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx_s* mtctx, buffer_s buffer) + { + if (mtctx->@params.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) { - if (mtctx->@params.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) + void** mutex = &mtctx->serial.ldmWindowMutex; + SynchronizationWrapper.Enter(mutex); + while (ZSTDMT_doesOverlapWindow(buffer, mtctx->serial.ldmWindow) != 0) { - void** mutex = &mtctx->serial.ldmWindowMutex; - SynchronizationWrapper.Enter(mutex); - while (ZSTDMT_doesOverlapWindow(buffer, mtctx->serial.ldmWindow) != 0) - { - SynchronizationWrapper.Wait(mutex); - } - - SynchronizationWrapper.Exit(mutex); + SynchronizationWrapper.Wait(mutex); } + + SynchronizationWrapper.Exit(mutex); } + } - /** - * Attempts to set the inBuff to the next section to fill. - * If any part of the new section is still in use we give up. - * Returns non-zero if the buffer is filled. - */ - private static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx_s* mtctx) + /** + * Attempts to set the inBuff to the next section to fill. + * If any part of the new section is still in use we give up. + * Returns non-zero if the buffer is filled. + */ + private static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx_s* mtctx) + { + Range inUse = ZSTDMT_getInputDataInUse(mtctx); + nuint spaceLeft = mtctx->roundBuff.capacity - mtctx->roundBuff.pos; + nuint spaceNeeded = mtctx->targetSectionSize; + buffer_s buffer; + assert(mtctx->inBuff.buffer.start == null); + assert(mtctx->roundBuff.capacity >= spaceNeeded); + if (spaceLeft < spaceNeeded) { - Range inUse = ZSTDMT_getInputDataInUse(mtctx); - nuint spaceLeft = mtctx->roundBuff.capacity - mtctx->roundBuff.pos; - nuint spaceNeeded = mtctx->targetSectionSize; - buffer_s buffer; - assert(mtctx->inBuff.buffer.start == null); - assert(mtctx->roundBuff.capacity >= spaceNeeded); - if (spaceLeft < spaceNeeded) - { - /* ZSTD_invalidateRepCodes() doesn't work for extDict variants. - * Simply copy the prefix to the beginning in that case. - */ - byte* start = mtctx->roundBuff.buffer; - nuint prefixSize = mtctx->inBuff.prefix.size; - buffer.start = start; - buffer.capacity = prefixSize; - if (ZSTDMT_isOverlapped(buffer, inUse) != 0) - { - return 0; - } - - ZSTDMT_waitForLdmComplete(mtctx, buffer); - memmove(start, mtctx->inBuff.prefix.start, prefixSize); - mtctx->inBuff.prefix.start = start; - mtctx->roundBuff.pos = prefixSize; - } - - buffer.start = mtctx->roundBuff.buffer + mtctx->roundBuff.pos; - buffer.capacity = spaceNeeded; + /* ZSTD_invalidateRepCodes() doesn't work for extDict variants. + * Simply copy the prefix to the beginning in that case. + */ + byte* start = mtctx->roundBuff.buffer; + nuint prefixSize = mtctx->inBuff.prefix.size; + buffer.start = start; + buffer.capacity = prefixSize; if (ZSTDMT_isOverlapped(buffer, inUse) != 0) { return 0; } - assert(ZSTDMT_isOverlapped(buffer, mtctx->inBuff.prefix) == 0); ZSTDMT_waitForLdmComplete(mtctx, buffer); - mtctx->inBuff.buffer = buffer; - mtctx->inBuff.filled = 0; - assert(mtctx->roundBuff.pos + buffer.capacity <= mtctx->roundBuff.capacity); - return 1; + memmove(start, mtctx->inBuff.prefix.start, prefixSize); + mtctx->inBuff.prefix.start = start; + mtctx->roundBuff.pos = prefixSize; } - /** - * Searches through the input for a synchronization point. If one is found, we - * will instruct the caller to flush, and return the number of bytes to load. - * Otherwise, we will load as many bytes as possible and instruct the caller - * to continue as normal. - */ - private static SyncPoint findSynchronizationPoint( - ZSTDMT_CCtx_s* mtctx, - ZSTD_inBuffer_s input - ) + buffer.start = mtctx->roundBuff.buffer + mtctx->roundBuff.pos; + buffer.capacity = spaceNeeded; + if (ZSTDMT_isOverlapped(buffer, inUse) != 0) { - byte* istart = (byte*)input.src + input.pos; - ulong primePower = mtctx->rsync.primePower; - ulong hitMask = mtctx->rsync.hitMask; - SyncPoint syncPoint; - ulong hash; - byte* prev; - nuint pos; - syncPoint.toLoad = - input.size - input.pos < mtctx->targetSectionSize - mtctx->inBuff.filled - ? input.size - input.pos - : mtctx->targetSectionSize - mtctx->inBuff.filled; - syncPoint.flush = 0; - if (mtctx->@params.rsyncable == 0) - return syncPoint; - if (mtctx->inBuff.filled + input.size - input.pos < 1 << 17) - return syncPoint; - if (mtctx->inBuff.filled + syncPoint.toLoad < 32) - return syncPoint; - if (mtctx->inBuff.filled < 1 << 17) + return 0; + } + + assert(ZSTDMT_isOverlapped(buffer, mtctx->inBuff.prefix) == 0); + ZSTDMT_waitForLdmComplete(mtctx, buffer); + mtctx->inBuff.buffer = buffer; + mtctx->inBuff.filled = 0; + assert(mtctx->roundBuff.pos + buffer.capacity <= mtctx->roundBuff.capacity); + return 1; + } + + /** + * Searches through the input for a synchronization point. If one is found, we + * will instruct the caller to flush, and return the number of bytes to load. + * Otherwise, we will load as many bytes as possible and instruct the caller + * to continue as normal. + */ + private static SyncPoint findSynchronizationPoint( + ZSTDMT_CCtx_s* mtctx, + ZSTD_inBuffer_s input + ) + { + byte* istart = (byte*)input.src + input.pos; + ulong primePower = mtctx->rsync.primePower; + ulong hitMask = mtctx->rsync.hitMask; + SyncPoint syncPoint; + ulong hash; + byte* prev; + nuint pos; + syncPoint.toLoad = + input.size - input.pos < mtctx->targetSectionSize - mtctx->inBuff.filled + ? input.size - input.pos + : mtctx->targetSectionSize - mtctx->inBuff.filled; + syncPoint.flush = 0; + if (mtctx->@params.rsyncable == 0) + return syncPoint; + if (mtctx->inBuff.filled + input.size - input.pos < 1 << 17) + return syncPoint; + if (mtctx->inBuff.filled + syncPoint.toLoad < 32) + return syncPoint; + if (mtctx->inBuff.filled < 1 << 17) + { + pos = (1 << 17) - mtctx->inBuff.filled; + if (pos >= 32) { - pos = (1 << 17) - mtctx->inBuff.filled; - if (pos >= 32) - { - prev = istart + pos - 32; - hash = ZSTD_rollingHash_compute(prev, 32); - } - else - { - assert(mtctx->inBuff.filled >= 32); - prev = (byte*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - 32; - hash = ZSTD_rollingHash_compute(prev + pos, 32 - pos); - hash = ZSTD_rollingHash_append(hash, istart, pos); - } + prev = istart + pos - 32; + hash = ZSTD_rollingHash_compute(prev, 32); } else { - assert(mtctx->inBuff.filled >= 1 << 17); - assert(1 << 17 >= 32); - pos = 0; + assert(mtctx->inBuff.filled >= 32); prev = (byte*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - 32; - hash = ZSTD_rollingHash_compute(prev, 32); - if ((hash & hitMask) == hitMask) - { - syncPoint.toLoad = 0; - syncPoint.flush = 1; - return syncPoint; - } + hash = ZSTD_rollingHash_compute(prev + pos, 32 - pos); + hash = ZSTD_rollingHash_append(hash, istart, pos); } + } + else + { + assert(mtctx->inBuff.filled >= 1 << 17); + assert(1 << 17 >= 32); + pos = 0; + prev = (byte*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - 32; + hash = ZSTD_rollingHash_compute(prev, 32); + if ((hash & hitMask) == hitMask) + { + syncPoint.toLoad = 0; + syncPoint.flush = 1; + return syncPoint; + } + } - assert(pos < 32 || ZSTD_rollingHash_compute(istart + pos - 32, 32) == hash); - for (; pos < syncPoint.toLoad; ++pos) + assert(pos < 32 || ZSTD_rollingHash_compute(istart + pos - 32, 32) == hash); + for (; pos < syncPoint.toLoad; ++pos) + { + byte toRemove = pos < 32 ? prev[pos] : istart[pos - 32]; + hash = ZSTD_rollingHash_rotate(hash, toRemove, istart[pos], primePower); + assert(mtctx->inBuff.filled + pos >= 1 << 17); + if ((hash & hitMask) == hitMask) { - byte toRemove = pos < 32 ? prev[pos] : istart[pos - 32]; - hash = ZSTD_rollingHash_rotate(hash, toRemove, istart[pos], primePower); - assert(mtctx->inBuff.filled + pos >= 1 << 17); - if ((hash & hitMask) == hitMask) - { - syncPoint.toLoad = pos + 1; - syncPoint.flush = 1; - ++pos; - break; - } + syncPoint.toLoad = pos + 1; + syncPoint.flush = 1; + ++pos; + break; } - - assert(pos < 32 || ZSTD_rollingHash_compute(istart + pos - 32, 32) == hash); - return syncPoint; } - /* === Streaming functions === */ - private static nuint ZSTDMT_nextInputSizeHint(ZSTDMT_CCtx_s* mtctx) + assert(pos < 32 || ZSTD_rollingHash_compute(istart + pos - 32, 32) == hash); + return syncPoint; + } + + /* === Streaming functions === */ + private static nuint ZSTDMT_nextInputSizeHint(ZSTDMT_CCtx_s* mtctx) + { + nuint hintInSize = mtctx->targetSectionSize - mtctx->inBuff.filled; + if (hintInSize == 0) + hintInSize = mtctx->targetSectionSize; + return hintInSize; + } + + /** ZSTDMT_compressStream_generic() : + * internal use only - exposed to be invoked from zstd_compress.c + * assumption : output and input are valid (pos <= size) + * @return : minimum amount of data remaining to flush, 0 if none */ + private static nuint ZSTDMT_compressStream_generic( + ZSTDMT_CCtx_s* mtctx, + ZSTD_outBuffer_s* output, + ZSTD_inBuffer_s* input, + ZSTD_EndDirective endOp + ) + { + uint forwardInputProgress = 0; + assert(output->pos <= output->size); + assert(input->pos <= input->size); + if (mtctx->frameEnded != 0 && endOp == ZSTD_EndDirective.ZSTD_e_continue) { - nuint hintInSize = mtctx->targetSectionSize - mtctx->inBuff.filled; - if (hintInSize == 0) - hintInSize = mtctx->targetSectionSize; - return hintInSize; + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); } - /** ZSTDMT_compressStream_generic() : - * internal use only - exposed to be invoked from zstd_compress.c - * assumption : output and input are valid (pos <= size) - * @return : minimum amount of data remaining to flush, 0 if none */ - private static nuint ZSTDMT_compressStream_generic( - ZSTDMT_CCtx_s* mtctx, - ZSTD_outBuffer_s* output, - ZSTD_inBuffer_s* input, - ZSTD_EndDirective endOp - ) + if (mtctx->jobReady == 0 && input->size > input->pos) { - uint forwardInputProgress = 0; - assert(output->pos <= output->size); - assert(input->pos <= input->size); - if (mtctx->frameEnded != 0 && endOp == ZSTD_EndDirective.ZSTD_e_continue) + if (mtctx->inBuff.buffer.start == null) { - return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stage_wrong)); - } - - if (mtctx->jobReady == 0 && input->size > input->pos) - { - if (mtctx->inBuff.buffer.start == null) + assert(mtctx->inBuff.filled == 0); + if (ZSTDMT_tryGetInputRange(mtctx) == 0) { - assert(mtctx->inBuff.filled == 0); - if (ZSTDMT_tryGetInputRange(mtctx) == 0) - { - assert(mtctx->doneJobID != mtctx->nextJobID); - } + assert(mtctx->doneJobID != mtctx->nextJobID); } + } - if (mtctx->inBuff.buffer.start != null) + if (mtctx->inBuff.buffer.start != null) + { + SyncPoint syncPoint = findSynchronizationPoint(mtctx, *input); + if (syncPoint.flush != 0 && endOp == ZSTD_EndDirective.ZSTD_e_continue) { - SyncPoint syncPoint = findSynchronizationPoint(mtctx, *input); - if (syncPoint.flush != 0 && endOp == ZSTD_EndDirective.ZSTD_e_continue) - { - endOp = ZSTD_EndDirective.ZSTD_e_flush; - } - - assert(mtctx->inBuff.buffer.capacity >= mtctx->targetSectionSize); - memcpy( - (sbyte*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, - (sbyte*)input->src + input->pos, - (uint)syncPoint.toLoad - ); - input->pos += syncPoint.toLoad; - mtctx->inBuff.filled += syncPoint.toLoad; - forwardInputProgress = syncPoint.toLoad > 0 ? 1U : 0U; + endOp = ZSTD_EndDirective.ZSTD_e_flush; } - } - if (input->pos < input->size && endOp == ZSTD_EndDirective.ZSTD_e_end) - { - assert( - mtctx->inBuff.filled == 0 - || mtctx->inBuff.filled == mtctx->targetSectionSize - || mtctx->@params.rsyncable != 0 + assert(mtctx->inBuff.buffer.capacity >= mtctx->targetSectionSize); + memcpy( + (sbyte*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, + (sbyte*)input->src + input->pos, + (uint)syncPoint.toLoad ); - endOp = ZSTD_EndDirective.ZSTD_e_flush; + input->pos += syncPoint.toLoad; + mtctx->inBuff.filled += syncPoint.toLoad; + forwardInputProgress = syncPoint.toLoad > 0 ? 1U : 0U; } + } - if ( - mtctx->jobReady != 0 - || mtctx->inBuff.filled >= mtctx->targetSectionSize - || endOp != ZSTD_EndDirective.ZSTD_e_continue && mtctx->inBuff.filled > 0 - || endOp == ZSTD_EndDirective.ZSTD_e_end && mtctx->frameEnded == 0 - ) + if (input->pos < input->size && endOp == ZSTD_EndDirective.ZSTD_e_end) + { + assert( + mtctx->inBuff.filled == 0 + || mtctx->inBuff.filled == mtctx->targetSectionSize + || mtctx->@params.rsyncable != 0 + ); + endOp = ZSTD_EndDirective.ZSTD_e_flush; + } + + if ( + mtctx->jobReady != 0 + || mtctx->inBuff.filled >= mtctx->targetSectionSize + || endOp != ZSTD_EndDirective.ZSTD_e_continue && mtctx->inBuff.filled > 0 + || endOp == ZSTD_EndDirective.ZSTD_e_end && mtctx->frameEnded == 0 + ) + { + nuint jobSize = mtctx->inBuff.filled; + assert(mtctx->inBuff.filled <= mtctx->targetSectionSize); { - nuint jobSize = mtctx->inBuff.filled; - assert(mtctx->inBuff.filled <= mtctx->targetSectionSize); + nuint err_code = ZSTDMT_createCompressionJob(mtctx, jobSize, endOp); + if (ERR_isError(err_code)) { - nuint err_code = ZSTDMT_createCompressionJob(mtctx, jobSize, endOp); - if (ERR_isError(err_code)) - { - return err_code; - } + return err_code; } } + } - { - /* block if there was no forward input progress */ - nuint remainingToFlush = ZSTDMT_flushProduced( - mtctx, - output, - forwardInputProgress == 0 ? 1U : 0U, - endOp - ); - if (input->pos < input->size) - return remainingToFlush > 1 ? remainingToFlush : 1; - return remainingToFlush; - } + { + /* block if there was no forward input progress */ + nuint remainingToFlush = ZSTDMT_flushProduced( + mtctx, + output, + forwardInputProgress == 0 ? 1U : 0U, + endOp + ); + if (input->pos < input->size) + return remainingToFlush > 1 ? remainingToFlush : 1; + return remainingToFlush; } } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/_wksps_e__Union.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/_wksps_e__Union.cs index c0da19f62..af51bc197 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/_wksps_e__Union.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/_wksps_e__Union.cs @@ -1,17 +1,16 @@ using System.Runtime.InteropServices; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +[StructLayout(LayoutKind.Explicit)] +public unsafe struct _wksps_e__Union { - [StructLayout(LayoutKind.Explicit)] - public unsafe struct _wksps_e__Union - { - [FieldOffset(0)] - public HUF_buildCTable_wksp_tables buildCTable_wksp; + [FieldOffset(0)] + public HUF_buildCTable_wksp_tables buildCTable_wksp; - [FieldOffset(0)] - public HUF_WriteCTableWksp writeCTable_wksp; + [FieldOffset(0)] + public HUF_WriteCTableWksp writeCTable_wksp; - [FieldOffset(0)] - public fixed uint hist_wksp[1024]; - } + [FieldOffset(0)] + public fixed uint hist_wksp[1024]; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/algo_time_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/algo_time_t.cs index 5ea98f181..869a68805 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/algo_time_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/algo_time_t.cs @@ -1,14 +1,13 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public struct algo_time_t { - public struct algo_time_t - { - public uint tableTime; - public uint decode256Time; + public uint tableTime; + public uint decode256Time; - public algo_time_t(uint tableTime, uint decode256Time) - { - this.tableTime = tableTime; - this.decode256Time = decode256Time; - } + public algo_time_t(uint tableTime, uint decode256Time) + { + this.tableTime = tableTime; + this.decode256Time = decode256Time; } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/base_directive_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/base_directive_e.cs index 1e95f5e00..b461bbfdb 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/base_directive_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/base_directive_e.cs @@ -1,8 +1,7 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public enum base_directive_e { - public enum base_directive_e - { - base_0possible = 0, - base_1guaranteed = 1, - } + base_0possible = 0, + base_1guaranteed = 1, } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/blockProperties_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/blockProperties_t.cs index 4413c5d4d..e1f68e389 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/blockProperties_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/blockProperties_t.cs @@ -1,9 +1,8 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public struct blockProperties_t { - public struct blockProperties_t - { - public blockType_e blockType; - public uint lastBlock; - public uint origSize; - } + public blockType_e blockType; + public uint lastBlock; + public uint origSize; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/blockType_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/blockType_e.cs index 44f7af6b6..1c0df0a25 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/blockType_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/blockType_e.cs @@ -1,10 +1,9 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public enum blockType_e { - public enum blockType_e - { - bt_raw, - bt_rle, - bt_compressed, - bt_reserved, - } + bt_raw, + bt_rle, + bt_compressed, + bt_reserved, } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/buffer_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/buffer_s.cs index 332edb1f9..6ddf1b227 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/buffer_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/buffer_s.cs @@ -1,16 +1,15 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/* ===== Buffer Pool ===== */ +/* a single Buffer Pool can be invoked from multiple threads in parallel */ +public unsafe struct buffer_s { - /* ===== Buffer Pool ===== */ - /* a single Buffer Pool can be invoked from multiple threads in parallel */ - public unsafe struct buffer_s - { - public void* start; - public nuint capacity; + public void* start; + public nuint capacity; - public buffer_s(void* start, nuint capacity) - { - this.start = start; - this.capacity = capacity; - } + public buffer_s(void* start, nuint capacity) + { + this.start = start; + this.capacity = capacity; } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/dictItem.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/dictItem.cs index b18b0c719..929fea695 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/dictItem.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/dictItem.cs @@ -1,9 +1,8 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public struct dictItem { - public struct dictItem - { - public uint pos; - public uint length; - public uint savings; - } + public uint pos; + public uint length; + public uint savings; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/inBuff_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/inBuff_t.cs index 1c9e85244..df3642fa9 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/inBuff_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/inBuff_t.cs @@ -1,13 +1,12 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/* ------------------------------------------ */ +/* ===== Multi-threaded compression ===== */ +/* ------------------------------------------ */ +public struct InBuff_t { - /* ------------------------------------------ */ - /* ===== Multi-threaded compression ===== */ - /* ------------------------------------------ */ - public struct InBuff_t - { - /* read-only non-owned prefix buffer */ - public Range prefix; - public buffer_s buffer; - public nuint filled; - } -} + /* read-only non-owned prefix buffer */ + public Range prefix; + public buffer_s buffer; + public nuint filled; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmEntry_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmEntry_t.cs index 8b308fd58..81304ceae 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmEntry_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmEntry_t.cs @@ -1,8 +1,7 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public struct ldmEntry_t { - public struct ldmEntry_t - { - public uint offset; - public uint checksum; - } + public uint offset; + public uint checksum; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmMatchCandidate_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmMatchCandidate_t.cs index a5958c318..e6f6d465f 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmMatchCandidate_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmMatchCandidate_t.cs @@ -1,10 +1,9 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct ldmMatchCandidate_t { - public unsafe struct ldmMatchCandidate_t - { - public byte* split; - public uint hash; - public uint checksum; - public ldmEntry_t* bucket; - } + public byte* split; + public uint hash; + public uint checksum; + public ldmEntry_t* bucket; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmParams_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmParams_t.cs index 4558d0c55..71c108dd5 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmParams_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmParams_t.cs @@ -1,23 +1,22 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public struct ldmParams_t { - public struct ldmParams_t - { - /* ZSTD_ps_enable to enable LDM. ZSTD_ps_auto by default */ - public ZSTD_paramSwitch_e enableLdm; + /* ZSTD_ps_enable to enable LDM. ZSTD_ps_auto by default */ + public ZSTD_paramSwitch_e enableLdm; - /* Log size of hashTable */ - public uint hashLog; + /* Log size of hashTable */ + public uint hashLog; - /* Log bucket size for collision resolution, at most 8 */ - public uint bucketSizeLog; + /* Log bucket size for collision resolution, at most 8 */ + public uint bucketSizeLog; - /* Minimum match length */ - public uint minMatchLength; + /* Minimum match length */ + public uint minMatchLength; - /* Log number of entries to skip */ - public uint hashRateLog; + /* Log number of entries to skip */ + public uint hashRateLog; - /* Window log for the LDM */ - public uint windowLog; - } + /* Window log for the LDM */ + public uint windowLog; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmRollingHashState_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmRollingHashState_t.cs index d4fa28ec4..c27abfcae 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmRollingHashState_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmRollingHashState_t.cs @@ -1,8 +1,7 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public struct ldmRollingHashState_t { - public struct ldmRollingHashState_t - { - public ulong rolling; - public ulong stopMask; - } -} + public ulong rolling; + public ulong stopMask; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmState_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmState_t.cs index db4484e84..8ee9d9a71 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmState_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmState_t.cs @@ -1,18 +1,18 @@ using System.Runtime.CompilerServices; -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct ldmState_t { - public unsafe struct ldmState_t - { - /* State for the window round buffer management */ - public ZSTD_window_t window; - public ldmEntry_t* hashTable; - public uint loadedDictEnd; + /* State for the window round buffer management */ + public ZSTD_window_t window; + public ldmEntry_t* hashTable; + public uint loadedDictEnd; - /* Next position in bucket to insert entry */ - public byte* bucketOffsets; - public _splitIndices_e__FixedBuffer splitIndices; - public _matchCandidates_e__FixedBuffer matchCandidates; + /* Next position in bucket to insert entry */ + public byte* bucketOffsets; + public _splitIndices_e__FixedBuffer splitIndices; + public _matchCandidates_e__FixedBuffer matchCandidates; #if NET8_0_OR_GREATER [InlineArray(64)] @@ -22,73 +22,73 @@ public unsafe struct _splitIndices_e__FixedBuffer } #else - public unsafe struct _splitIndices_e__FixedBuffer - { - public nuint e0; - public nuint e1; - public nuint e2; - public nuint e3; - public nuint e4; - public nuint e5; - public nuint e6; - public nuint e7; - public nuint e8; - public nuint e9; - public nuint e10; - public nuint e11; - public nuint e12; - public nuint e13; - public nuint e14; - public nuint e15; - public nuint e16; - public nuint e17; - public nuint e18; - public nuint e19; - public nuint e20; - public nuint e21; - public nuint e22; - public nuint e23; - public nuint e24; - public nuint e25; - public nuint e26; - public nuint e27; - public nuint e28; - public nuint e29; - public nuint e30; - public nuint e31; - public nuint e32; - public nuint e33; - public nuint e34; - public nuint e35; - public nuint e36; - public nuint e37; - public nuint e38; - public nuint e39; - public nuint e40; - public nuint e41; - public nuint e42; - public nuint e43; - public nuint e44; - public nuint e45; - public nuint e46; - public nuint e47; - public nuint e48; - public nuint e49; - public nuint e50; - public nuint e51; - public nuint e52; - public nuint e53; - public nuint e54; - public nuint e55; - public nuint e56; - public nuint e57; - public nuint e58; - public nuint e59; - public nuint e60; - public nuint e61; - public nuint e62; - public nuint e63; - } + public unsafe struct _splitIndices_e__FixedBuffer + { + public nuint e0; + public nuint e1; + public nuint e2; + public nuint e3; + public nuint e4; + public nuint e5; + public nuint e6; + public nuint e7; + public nuint e8; + public nuint e9; + public nuint e10; + public nuint e11; + public nuint e12; + public nuint e13; + public nuint e14; + public nuint e15; + public nuint e16; + public nuint e17; + public nuint e18; + public nuint e19; + public nuint e20; + public nuint e21; + public nuint e22; + public nuint e23; + public nuint e24; + public nuint e25; + public nuint e26; + public nuint e27; + public nuint e28; + public nuint e29; + public nuint e30; + public nuint e31; + public nuint e32; + public nuint e33; + public nuint e34; + public nuint e35; + public nuint e36; + public nuint e37; + public nuint e38; + public nuint e39; + public nuint e40; + public nuint e41; + public nuint e42; + public nuint e43; + public nuint e44; + public nuint e45; + public nuint e46; + public nuint e47; + public nuint e48; + public nuint e49; + public nuint e50; + public nuint e51; + public nuint e52; + public nuint e53; + public nuint e54; + public nuint e55; + public nuint e56; + public nuint e57; + public nuint e58; + public nuint e59; + public nuint e60; + public nuint e61; + public nuint e62; + public nuint e63; + } #endif #if NET8_0_OR_GREATER @@ -99,73 +99,72 @@ public unsafe struct _matchCandidates_e__FixedBuffer } #else - public unsafe struct _matchCandidates_e__FixedBuffer - { - public ldmMatchCandidate_t e0; - public ldmMatchCandidate_t e1; - public ldmMatchCandidate_t e2; - public ldmMatchCandidate_t e3; - public ldmMatchCandidate_t e4; - public ldmMatchCandidate_t e5; - public ldmMatchCandidate_t e6; - public ldmMatchCandidate_t e7; - public ldmMatchCandidate_t e8; - public ldmMatchCandidate_t e9; - public ldmMatchCandidate_t e10; - public ldmMatchCandidate_t e11; - public ldmMatchCandidate_t e12; - public ldmMatchCandidate_t e13; - public ldmMatchCandidate_t e14; - public ldmMatchCandidate_t e15; - public ldmMatchCandidate_t e16; - public ldmMatchCandidate_t e17; - public ldmMatchCandidate_t e18; - public ldmMatchCandidate_t e19; - public ldmMatchCandidate_t e20; - public ldmMatchCandidate_t e21; - public ldmMatchCandidate_t e22; - public ldmMatchCandidate_t e23; - public ldmMatchCandidate_t e24; - public ldmMatchCandidate_t e25; - public ldmMatchCandidate_t e26; - public ldmMatchCandidate_t e27; - public ldmMatchCandidate_t e28; - public ldmMatchCandidate_t e29; - public ldmMatchCandidate_t e30; - public ldmMatchCandidate_t e31; - public ldmMatchCandidate_t e32; - public ldmMatchCandidate_t e33; - public ldmMatchCandidate_t e34; - public ldmMatchCandidate_t e35; - public ldmMatchCandidate_t e36; - public ldmMatchCandidate_t e37; - public ldmMatchCandidate_t e38; - public ldmMatchCandidate_t e39; - public ldmMatchCandidate_t e40; - public ldmMatchCandidate_t e41; - public ldmMatchCandidate_t e42; - public ldmMatchCandidate_t e43; - public ldmMatchCandidate_t e44; - public ldmMatchCandidate_t e45; - public ldmMatchCandidate_t e46; - public ldmMatchCandidate_t e47; - public ldmMatchCandidate_t e48; - public ldmMatchCandidate_t e49; - public ldmMatchCandidate_t e50; - public ldmMatchCandidate_t e51; - public ldmMatchCandidate_t e52; - public ldmMatchCandidate_t e53; - public ldmMatchCandidate_t e54; - public ldmMatchCandidate_t e55; - public ldmMatchCandidate_t e56; - public ldmMatchCandidate_t e57; - public ldmMatchCandidate_t e58; - public ldmMatchCandidate_t e59; - public ldmMatchCandidate_t e60; - public ldmMatchCandidate_t e61; - public ldmMatchCandidate_t e62; - public ldmMatchCandidate_t e63; - } -#endif + public unsafe struct _matchCandidates_e__FixedBuffer + { + public ldmMatchCandidate_t e0; + public ldmMatchCandidate_t e1; + public ldmMatchCandidate_t e2; + public ldmMatchCandidate_t e3; + public ldmMatchCandidate_t e4; + public ldmMatchCandidate_t e5; + public ldmMatchCandidate_t e6; + public ldmMatchCandidate_t e7; + public ldmMatchCandidate_t e8; + public ldmMatchCandidate_t e9; + public ldmMatchCandidate_t e10; + public ldmMatchCandidate_t e11; + public ldmMatchCandidate_t e12; + public ldmMatchCandidate_t e13; + public ldmMatchCandidate_t e14; + public ldmMatchCandidate_t e15; + public ldmMatchCandidate_t e16; + public ldmMatchCandidate_t e17; + public ldmMatchCandidate_t e18; + public ldmMatchCandidate_t e19; + public ldmMatchCandidate_t e20; + public ldmMatchCandidate_t e21; + public ldmMatchCandidate_t e22; + public ldmMatchCandidate_t e23; + public ldmMatchCandidate_t e24; + public ldmMatchCandidate_t e25; + public ldmMatchCandidate_t e26; + public ldmMatchCandidate_t e27; + public ldmMatchCandidate_t e28; + public ldmMatchCandidate_t e29; + public ldmMatchCandidate_t e30; + public ldmMatchCandidate_t e31; + public ldmMatchCandidate_t e32; + public ldmMatchCandidate_t e33; + public ldmMatchCandidate_t e34; + public ldmMatchCandidate_t e35; + public ldmMatchCandidate_t e36; + public ldmMatchCandidate_t e37; + public ldmMatchCandidate_t e38; + public ldmMatchCandidate_t e39; + public ldmMatchCandidate_t e40; + public ldmMatchCandidate_t e41; + public ldmMatchCandidate_t e42; + public ldmMatchCandidate_t e43; + public ldmMatchCandidate_t e44; + public ldmMatchCandidate_t e45; + public ldmMatchCandidate_t e46; + public ldmMatchCandidate_t e47; + public ldmMatchCandidate_t e48; + public ldmMatchCandidate_t e49; + public ldmMatchCandidate_t e50; + public ldmMatchCandidate_t e51; + public ldmMatchCandidate_t e52; + public ldmMatchCandidate_t e53; + public ldmMatchCandidate_t e54; + public ldmMatchCandidate_t e55; + public ldmMatchCandidate_t e56; + public ldmMatchCandidate_t e57; + public ldmMatchCandidate_t e58; + public ldmMatchCandidate_t e59; + public ldmMatchCandidate_t e60; + public ldmMatchCandidate_t e61; + public ldmMatchCandidate_t e62; + public ldmMatchCandidate_t e63; } +#endif } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/nodeElt_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/nodeElt_s.cs index 6ec0c8030..de28886cf 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/nodeElt_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/nodeElt_s.cs @@ -1,13 +1,12 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/* ************************************************************** + * Required declarations + ****************************************************************/ +public struct nodeElt_s { - /* ************************************************************** - * Required declarations - ****************************************************************/ - public struct nodeElt_s - { - public uint count; - public ushort parent; - public byte @byte; - public byte nbBits; - } + public uint count; + public ushort parent; + public byte @byte; + public byte nbBits; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/offsetCount_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/offsetCount_t.cs index a29f87e27..6f24d4f65 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/offsetCount_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/offsetCount_t.cs @@ -1,8 +1,7 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public struct offsetCount_t { - public struct offsetCount_t - { - public uint offset; - public uint count; - } -} + public uint offset; + public uint count; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/optState_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/optState_t.cs index b88e4d555..d15e86840 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/optState_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/optState_t.cs @@ -1,54 +1,53 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct optState_t { - public unsafe struct optState_t - { - /* table of literals statistics, of size 256 */ - public uint* litFreq; + /* table of literals statistics, of size 256 */ + public uint* litFreq; - /* table of litLength statistics, of size (MaxLL+1) */ - public uint* litLengthFreq; + /* table of litLength statistics, of size (MaxLL+1) */ + public uint* litLengthFreq; - /* table of matchLength statistics, of size (MaxML+1) */ - public uint* matchLengthFreq; + /* table of matchLength statistics, of size (MaxML+1) */ + public uint* matchLengthFreq; - /* table of offCode statistics, of size (MaxOff+1) */ - public uint* offCodeFreq; + /* table of offCode statistics, of size (MaxOff+1) */ + public uint* offCodeFreq; - /* list of found matches, of size ZSTD_OPT_SIZE */ - public ZSTD_match_t* matchTable; + /* list of found matches, of size ZSTD_OPT_SIZE */ + public ZSTD_match_t* matchTable; - /* All positions tracked by optimal parser, of size ZSTD_OPT_SIZE */ - public ZSTD_optimal_t* priceTable; + /* All positions tracked by optimal parser, of size ZSTD_OPT_SIZE */ + public ZSTD_optimal_t* priceTable; - /* nb of literals */ - public uint litSum; + /* nb of literals */ + public uint litSum; - /* nb of litLength codes */ - public uint litLengthSum; + /* nb of litLength codes */ + public uint litLengthSum; - /* nb of matchLength codes */ - public uint matchLengthSum; + /* nb of matchLength codes */ + public uint matchLengthSum; - /* nb of offset codes */ - public uint offCodeSum; + /* nb of offset codes */ + public uint offCodeSum; - /* to compare to log2(litfreq) */ - public uint litSumBasePrice; + /* to compare to log2(litfreq) */ + public uint litSumBasePrice; - /* to compare to log2(llfreq) */ - public uint litLengthSumBasePrice; + /* to compare to log2(llfreq) */ + public uint litLengthSumBasePrice; - /* to compare to log2(mlfreq) */ - public uint matchLengthSumBasePrice; + /* to compare to log2(mlfreq) */ + public uint matchLengthSumBasePrice; - /* to compare to log2(offreq) */ - public uint offCodeSumBasePrice; + /* to compare to log2(offreq) */ + public uint offCodeSumBasePrice; - /* prices can be determined dynamically, or follow a pre-defined cost structure */ - public ZSTD_OptPrice_e priceType; + /* prices can be determined dynamically, or follow a pre-defined cost structure */ + public ZSTD_OptPrice_e priceType; - /* pre-calculated dictionary statistics */ - public ZSTD_entropyCTables_t* symbolCosts; - public ZSTD_paramSwitch_e literalCompressionMode; - } + /* pre-calculated dictionary statistics */ + public ZSTD_entropyCTables_t* symbolCosts; + public ZSTD_paramSwitch_e literalCompressionMode; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/rankPos.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/rankPos.cs index 874302f61..c93cb1f68 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/rankPos.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/rankPos.cs @@ -1,8 +1,7 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public struct rankPos { - public struct rankPos - { - public ushort @base; - public ushort curr; - } + public ushort @base; + public ushort curr; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/rankValCol_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/rankValCol_t.cs index be14d6624..ea52c69fa 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/rankValCol_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/rankValCol_t.cs @@ -1,7 +1,6 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct rankValCol_t { - public unsafe struct rankValCol_t - { - public fixed uint Body[13]; - } -} + public fixed uint Body[13]; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/rawSeq.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/rawSeq.cs index 1161bd217..e2c315d72 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/rawSeq.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/rawSeq.cs @@ -1,14 +1,13 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public struct rawSeq { - public struct rawSeq - { - /* Offset of sequence */ - public uint offset; + /* Offset of sequence */ + public uint offset; - /* Length of literals prior to match */ - public uint litLength; + /* Length of literals prior to match */ + public uint litLength; - /* Raw length of match */ - public uint matchLength; - } + /* Raw length of match */ + public uint matchLength; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/repcodes_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/repcodes_s.cs index d0e18b12a..a9624f7cd 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/repcodes_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/repcodes_s.cs @@ -1,7 +1,6 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct repcodes_s { - public unsafe struct repcodes_s - { - public fixed uint rep[3]; - } -} + public fixed uint rep[3]; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/searchMethod_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/searchMethod_e.cs index 6823088c2..10395b044 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/searchMethod_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/searchMethod_e.cs @@ -1,9 +1,8 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public enum searchMethod_e { - public enum searchMethod_e - { - search_hashChain = 0, - search_binaryTree = 1, - search_rowHash = 2, - } + search_hashChain = 0, + search_binaryTree = 1, + search_rowHash = 2, } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/seqState_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/seqState_t.cs index 5c7496ec1..a6aec9c33 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/seqState_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/seqState_t.cs @@ -1,18 +1,17 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public unsafe struct seqState_t { - public unsafe struct seqState_t - { - public BIT_DStream_t DStream; - public ZSTD_fseState stateLL; - public ZSTD_fseState stateOffb; - public ZSTD_fseState stateML; - public _prevOffset_e__FixedBuffer prevOffset; + public BIT_DStream_t DStream; + public ZSTD_fseState stateLL; + public ZSTD_fseState stateOffb; + public ZSTD_fseState stateML; + public _prevOffset_e__FixedBuffer prevOffset; - public unsafe struct _prevOffset_e__FixedBuffer - { - public nuint e0; - public nuint e1; - public nuint e2; - } + public unsafe struct _prevOffset_e__FixedBuffer + { + public nuint e0; + public nuint e1; + public nuint e2; } } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/seqStoreSplits.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/seqStoreSplits.cs index b227d69f7..83679d5f5 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/seqStoreSplits.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/seqStoreSplits.cs @@ -1,12 +1,11 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/* Struct to keep track of where we are in our recursive calls. */ +public unsafe struct seqStoreSplits { - /* Struct to keep track of where we are in our recursive calls. */ - public unsafe struct seqStoreSplits - { - /* Array of split indices */ - public uint* splitLocations; + /* Array of split indices */ + public uint* splitLocations; - /* The current index within splitLocations being worked on */ - public nuint idx; - } + /* The current index within splitLocations being worked on */ + public nuint idx; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/seq_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/seq_t.cs index 5dac7aad1..88395a257 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/seq_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/seq_t.cs @@ -1,9 +1,8 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public struct seq_t { - public struct seq_t - { - public nuint litLength; - public nuint matchLength; - public nuint offset; - } + public nuint litLength; + public nuint matchLength; + public nuint offset; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/sortedSymbol_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/sortedSymbol_t.cs index 6c320a04b..f80373a18 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/sortedSymbol_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/sortedSymbol_t.cs @@ -1,7 +1,6 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +public struct sortedSymbol_t { - public struct sortedSymbol_t - { - public byte symbol; - } -} + public byte symbol; +} \ No newline at end of file diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/streaming_operation.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/streaming_operation.cs index db838c61c..77f189ec9 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/streaming_operation.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/streaming_operation.cs @@ -1,9 +1,8 @@ -namespace ZstdSharp.Unsafe +namespace SharpCompress.Compressors.ZStandard.Unsafe; + +/* Streaming state is used to inform allocation of the literal buffer */ +public enum streaming_operation { - /* Streaming state is used to inform allocation of the literal buffer */ - public enum streaming_operation - { - not_streaming = 0, - is_streaming = 1, - } + not_streaming = 0, + is_streaming = 1, } diff --git a/src/SharpCompress/Compressors/ZStandard/UnsafeHelper.cs b/src/SharpCompress/Compressors/ZStandard/UnsafeHelper.cs index 3a3722da1..d2ea3575e 100644 --- a/src/SharpCompress/Compressors/ZStandard/UnsafeHelper.cs +++ b/src/SharpCompress/Compressors/ZStandard/UnsafeHelper.cs @@ -3,36 +3,36 @@ using System.Runtime.CompilerServices; using System.Runtime.InteropServices; -namespace ZstdSharp +namespace SharpCompress.Compressors.ZStandard; + +public static unsafe class UnsafeHelper { - public static unsafe class UnsafeHelper + public static void* PoisonMemory(void* destination, ulong size) { - public static void* PoisonMemory(void* destination, ulong size) - { - memset(destination, 0xCC, (uint)size); - return destination; - } + memset(destination, 0xCC, (uint)size); + return destination; + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static void* malloc(ulong size) - { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static void* malloc(ulong size) + { #if NET6_0_OR_GREATER - var ptr = NativeMemory.Alloc((nuint)size); + var ptr = NativeMemory.Alloc((nuint)size); #else var ptr = (void*)Marshal.AllocHGlobal((nint)size); #endif #if DEBUG - return PoisonMemory(ptr, size); + return PoisonMemory(ptr, size); #else return ptr; #endif - } + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static void* calloc(ulong num, ulong size) - { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static void* calloc(ulong num, ulong size) + { #if NET6_0_OR_GREATER - return NativeMemory.AllocZeroed((nuint)num, (nuint)size); + return NativeMemory.AllocZeroed((nuint)num, (nuint)size); #else var total = num * size; assert(total <= uint.MaxValue); @@ -40,31 +40,31 @@ public static unsafe class UnsafeHelper memset(destination, 0, (uint)total); return destination; #endif - } + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static void memcpy(void* destination, void* source, uint size) => - System.Runtime.CompilerServices.Unsafe.CopyBlockUnaligned(destination, source, size); + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static void memcpy(void* destination, void* source, uint size) => + System.Runtime.CompilerServices.Unsafe.CopyBlockUnaligned(destination, source, size); - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static void memset(void* memPtr, byte val, uint size) => - System.Runtime.CompilerServices.Unsafe.InitBlockUnaligned(memPtr, val, size); + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static void memset(void* memPtr, byte val, uint size) => + System.Runtime.CompilerServices.Unsafe.InitBlockUnaligned(memPtr, val, size); - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static void free(void* ptr) - { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static void free(void* ptr) + { #if NET6_0_OR_GREATER - NativeMemory.Free(ptr); + NativeMemory.Free(ptr); #else Marshal.FreeHGlobal((IntPtr)ptr); #endif - } + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static T* GetArrayPointer(T[] array) - where T : unmanaged - { - var size = (uint)(sizeof(T) * array.Length); + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static T* GetArrayPointer(T[] array) + where T : unmanaged + { + var size = (uint)(sizeof(T) * array.Length); #if NET9_0_OR_GREATER // This function is used to allocate memory for static data blocks. // We have to use AllocateTypeAssociatedMemory and link the memory's @@ -75,38 +75,37 @@ public static void free(void* ptr) var destination = (T*) RuntimeHelpers.AllocateTypeAssociatedMemory(typeof(UnsafeHelper), (int)size); #else - var destination = (T*)malloc(size); + var destination = (T*)malloc(size); #endif - fixed (void* source = &array[0]) - System.Runtime.CompilerServices.Unsafe.CopyBlockUnaligned( - destination, - source, - size - ); + fixed (void* source = &array[0]) + System.Runtime.CompilerServices.Unsafe.CopyBlockUnaligned( + destination, + source, + size + ); - return destination; - } + return destination; + } - [Conditional("DEBUG")] - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static void assert(bool condition, string? message = null) - { - if (!condition) - throw new ArgumentException(message ?? "assert failed"); - } + [Conditional("DEBUG")] + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static void assert(bool condition, string? message = null) + { + if (!condition) + throw new ArgumentException(message ?? "assert failed"); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static void memmove(void* destination, void* source, ulong size) => - Buffer.MemoryCopy(source, destination, size, size); + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static void memmove(void* destination, void* source, ulong size) => + Buffer.MemoryCopy(source, destination, size, size); - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static int memcmp(void* buf1, void* buf2, ulong size) - { - assert(size <= int.MaxValue); - var intSize = (int)size; - return new ReadOnlySpan(buf1, intSize).SequenceCompareTo( - new ReadOnlySpan(buf2, intSize) - ); - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static int memcmp(void* buf1, void* buf2, ulong size) + { + assert(size <= int.MaxValue); + var intSize = (int)size; + return new ReadOnlySpan(buf1, intSize).SequenceCompareTo( + new ReadOnlySpan(buf2, intSize) + ); } } diff --git a/src/SharpCompress/Compressors/ZStandard/ZStandardStream.cs b/src/SharpCompress/Compressors/ZStandard/ZStandardStream.cs index 61f7fbb46..d465c8355 100644 --- a/src/SharpCompress/Compressors/ZStandard/ZStandardStream.cs +++ b/src/SharpCompress/Compressors/ZStandard/ZStandardStream.cs @@ -8,7 +8,7 @@ namespace SharpCompress.Compressors.ZStandard; -internal class ZStandardStream : ZstdSharp.DecompressionStream, IStreamStack +internal class ZStandardStream : DecompressionStream, IStreamStack { #if DEBUG_STREAMS long IStreamStack.InstanceId { get; set; } diff --git a/src/SharpCompress/Compressors/ZStandard/ZstdException.cs b/src/SharpCompress/Compressors/ZStandard/ZstdException.cs index 1427bd84f..55d79579f 100644 --- a/src/SharpCompress/Compressors/ZStandard/ZstdException.cs +++ b/src/SharpCompress/Compressors/ZStandard/ZstdException.cs @@ -1,14 +1,12 @@ -using System; -using SharpCompress.Common; -using ZstdSharp.Unsafe; +using SharpCompress.Common; +using SharpCompress.Compressors.ZStandard.Unsafe; -namespace ZstdSharp +namespace SharpCompress.Compressors.ZStandard; + +public class ZstdException : SharpCompressException { - public class ZstdException : SharpCompressException - { - public ZstdException(ZSTD_ErrorCode code, string message) - : base(message) => Code = code; + public ZstdException(ZSTD_ErrorCode code, string message) + : base(message) => Code = code; - public ZSTD_ErrorCode Code { get; } - } + public ZSTD_ErrorCode Code { get; } } diff --git a/src/SharpCompress/Factories/ArcFactory.cs b/src/SharpCompress/Factories/ArcFactory.cs index b5180afae..040263e27 100644 --- a/src/SharpCompress/Factories/ArcFactory.cs +++ b/src/SharpCompress/Factories/ArcFactory.cs @@ -10,37 +10,36 @@ using SharpCompress.Readers.Arc; using static System.Net.Mime.MediaTypeNames; -namespace SharpCompress.Factories -{ - public class ArcFactory : Factory, IReaderFactory - { - public override string Name => "Arc"; +namespace SharpCompress.Factories; - public override ArchiveType? KnownArchiveType => ArchiveType.Arc; +public class ArcFactory : Factory, IReaderFactory +{ + public override string Name => "Arc"; - public override IEnumerable GetSupportedExtensions() - { - yield return "arc"; - } + public override ArchiveType? KnownArchiveType => ArchiveType.Arc; - public override bool IsArchive( - Stream stream, - string? password = null, - int bufferSize = ReaderOptions.DefaultBufferSize - ) - { - //You may have to use some(paranoid) checks to ensure that you actually are - //processing an ARC file, since other archivers also adopted the idea of putting - //a 01Ah byte at offset 0, namely the Hyper archiver. To check if you have a - //Hyper - archive, check the next two bytes for "HP" or "ST"(or look below for - //"HYP").Also the ZOO archiver also does put a 01Ah at the start of the file, - //see the ZOO entry below. - var bytes = new byte[2]; - stream.Read(bytes, 0, 2); - return bytes[0] == 0x1A && bytes[1] < 10; //rather thin, but this is all we have - } + public override IEnumerable GetSupportedExtensions() + { + yield return "arc"; + } - public IReader OpenReader(Stream stream, ReaderOptions? options) => - ArcReader.Open(stream, options); + public override bool IsArchive( + Stream stream, + string? password = null, + int bufferSize = ReaderOptions.DefaultBufferSize + ) + { + //You may have to use some(paranoid) checks to ensure that you actually are + //processing an ARC file, since other archivers also adopted the idea of putting + //a 01Ah byte at offset 0, namely the Hyper archiver. To check if you have a + //Hyper - archive, check the next two bytes for "HP" or "ST"(or look below for + //"HYP").Also the ZOO archiver also does put a 01Ah at the start of the file, + //see the ZOO entry below. + var bytes = new byte[2]; + stream.Read(bytes, 0, 2); + return bytes[0] == 0x1A && bytes[1] < 10; //rather thin, but this is all we have } -} + + public IReader OpenReader(Stream stream, ReaderOptions? options) => + ArcReader.Open(stream, options); +} \ No newline at end of file diff --git a/src/SharpCompress/IO/IStreamStack.cs b/src/SharpCompress/IO/IStreamStack.cs index 56c020c8f..c572de1ec 100644 --- a/src/SharpCompress/IO/IStreamStack.cs +++ b/src/SharpCompress/IO/IStreamStack.cs @@ -6,40 +6,40 @@ using System.Linq; using System.Text; -namespace SharpCompress.IO -{ - public interface IStreamStack - { - /// - /// Gets or sets the default buffer size to be applied when buffering is enabled for this stream stack. - /// This value is used by the SetBuffer extension method to configure buffering on the appropriate stream - /// in the stack hierarchy. A value of 0 indicates no default buffer size is set. - /// - int DefaultBufferSize { get; set; } - - /// - /// Returns the immediate underlying stream in the stack. - /// - Stream BaseStream(); - - /// - /// Gets or sets the size of the buffer if the stream supports buffering; otherwise, returns 0. - /// This property must not throw. - /// - int BufferSize { get; set; } - - /// - /// Gets or sets the current position within the buffer if the stream supports buffering; otherwise, returns 0. - /// This property must not throw. - /// - int BufferPosition { get; set; } +namespace SharpCompress.IO; - /// - /// Updates the internal position state of the stream. This should not perform seeking on the underlying stream, - /// but should update any internal position or buffer state as appropriate for the stream implementation. - /// - /// The absolute position to set within the stream stack. - void SetPosition(long position); +public interface IStreamStack +{ + /// + /// Gets or sets the default buffer size to be applied when buffering is enabled for this stream stack. + /// This value is used by the SetBuffer extension method to configure buffering on the appropriate stream + /// in the stack hierarchy. A value of 0 indicates no default buffer size is set. + /// + int DefaultBufferSize { get; set; } + + /// + /// Returns the immediate underlying stream in the stack. + /// + Stream BaseStream(); + + /// + /// Gets or sets the size of the buffer if the stream supports buffering; otherwise, returns 0. + /// This property must not throw. + /// + int BufferSize { get; set; } + + /// + /// Gets or sets the current position within the buffer if the stream supports buffering; otherwise, returns 0. + /// This property must not throw. + /// + int BufferPosition { get; set; } + + /// + /// Updates the internal position state of the stream. This should not perform seeking on the underlying stream, + /// but should update any internal position or buffer state as appropriate for the stream implementation. + /// + /// The absolute position to set within the stream stack. + void SetPosition(long position); #if DEBUG_STREAMS /// @@ -47,197 +47,197 @@ public interface IStreamStack /// long InstanceId { get; set; } #endif - } +} - internal static class StackStreamExtensions +internal static class StackStreamExtensions +{ + /// + /// Gets the logical position of the first buffering stream in the stack, or 0 if none exist. + /// + /// The most derived (outermost) stream in the stack. + /// The position of the first buffering stream, or 0 if not found. + internal static long GetPosition(this IStreamStack stream) { - /// - /// Gets the logical position of the first buffering stream in the stack, or 0 if none exist. - /// - /// The most derived (outermost) stream in the stack. - /// The position of the first buffering stream, or 0 if not found. - internal static long GetPosition(this IStreamStack stream) - { - IStreamStack? current = stream; + IStreamStack? current = stream; - while (current != null) + while (current != null) + { + if (current.BufferSize != 0 && current is Stream st) { - if (current.BufferSize != 0 && current is Stream st) - { - return st.Position; - } - current = current?.BaseStream() as IStreamStack; + return st.Position; } - return 0; + current = current?.BaseStream() as IStreamStack; } + return 0; + } - /// - /// Rewinds the buffer of the outermost buffering stream in the stack by the specified count, if supported. - /// Only the most derived buffering stream is affected. - /// - /// The most derived (outermost) stream in the stack. - /// The number of bytes to rewind within the buffer. - internal static void Rewind(this IStreamStack stream, int count) - { - Stream baseStream = stream.BaseStream(); - Stream thisStream = (Stream)stream; - IStreamStack? buffStream = null; - IStreamStack? current = stream; + /// + /// Rewinds the buffer of the outermost buffering stream in the stack by the specified count, if supported. + /// Only the most derived buffering stream is affected. + /// + /// The most derived (outermost) stream in the stack. + /// The number of bytes to rewind within the buffer. + internal static void Rewind(this IStreamStack stream, int count) + { + Stream baseStream = stream.BaseStream(); + Stream thisStream = (Stream)stream; + IStreamStack? buffStream = null; + IStreamStack? current = stream; - while (buffStream == null && current != null) + while (buffStream == null && current != null) + { + if (current.BufferSize != 0) { - if (current.BufferSize != 0) - { - buffStream = current; - buffStream.BufferPosition -= Math.Min(buffStream.BufferPosition, count); - } - current = current?.BaseStream() as IStreamStack; + buffStream = current; + buffStream.BufferPosition -= Math.Min(buffStream.BufferPosition, count); } + current = current?.BaseStream() as IStreamStack; } + } - /// - /// Sets the buffer size on the first buffering stream in the stack, or on the outermost stream if none exist. - /// If is true, sets the buffer size regardless of current value. - /// - /// The most derived (outermost) stream in the stack. - /// The buffer size to set. - /// If true, forces the buffer size to be set even if already set. - internal static void SetBuffer(this IStreamStack stream, int bufferSize, bool force) - { - if (bufferSize == 0 || stream == null) - return; + /// + /// Sets the buffer size on the first buffering stream in the stack, or on the outermost stream if none exist. + /// If is true, sets the buffer size regardless of current value. + /// + /// The most derived (outermost) stream in the stack. + /// The buffer size to set. + /// If true, forces the buffer size to be set even if already set. + internal static void SetBuffer(this IStreamStack stream, int bufferSize, bool force) + { + if (bufferSize == 0 || stream == null) + return; - IStreamStack? current = stream; - IStreamStack defaultBuffer = stream; - IStreamStack? buffer = null; + IStreamStack? current = stream; + IStreamStack defaultBuffer = stream; + IStreamStack? buffer = null; - // First pass: find the deepest IStreamStack - while (current != null) - { - defaultBuffer = current; - if (buffer == null && ((current.BufferSize != 0 && bufferSize != 0) || force)) - buffer = current; - if (defaultBuffer.DefaultBufferSize != 0) - break; - current = current.BaseStream() as IStreamStack; - } - if (defaultBuffer.DefaultBufferSize == 0) - defaultBuffer.DefaultBufferSize = bufferSize; - (buffer ?? stream).BufferSize = bufferSize; + // First pass: find the deepest IStreamStack + while (current != null) + { + defaultBuffer = current; + if (buffer == null && ((current.BufferSize != 0 && bufferSize != 0) || force)) + buffer = current; + if (defaultBuffer.DefaultBufferSize != 0) + break; + current = current.BaseStream() as IStreamStack; } + if (defaultBuffer.DefaultBufferSize == 0) + defaultBuffer.DefaultBufferSize = bufferSize; + (buffer ?? stream).BufferSize = bufferSize; + } - /// - /// Attempts to set the position in the stream stack. If a buffering stream is present and the position is within its buffer, - /// BufferPosition is set on the outermost buffering stream and all intermediate streams update their internal state via SetPosition. - /// If no buffering stream is present, seeks as close to the root stream as possible and updates all intermediate streams' state via SetPosition. - /// Seeking is never performed if any intermediate stream in the stack is buffering. - /// Throws if the position cannot be set. - /// - /// - /// The most derived (outermost) stream in the stack. The method traverses up the stack via BaseStream() until a stream can satisfy the buffer or seek request. - /// - /// The absolute position to set. - /// The position that was set. - internal static long StackSeek(this IStreamStack stream, long position) + /// + /// Attempts to set the position in the stream stack. If a buffering stream is present and the position is within its buffer, + /// BufferPosition is set on the outermost buffering stream and all intermediate streams update their internal state via SetPosition. + /// If no buffering stream is present, seeks as close to the root stream as possible and updates all intermediate streams' state via SetPosition. + /// Seeking is never performed if any intermediate stream in the stack is buffering. + /// Throws if the position cannot be set. + /// + /// + /// The most derived (outermost) stream in the stack. The method traverses up the stack via BaseStream() until a stream can satisfy the buffer or seek request. + /// + /// The absolute position to set. + /// The position that was set. + internal static long StackSeek(this IStreamStack stream, long position) + { + var stack = new List(); + Stream? current = stream as Stream; + int lastBufferingIndex = -1; + int firstSeekableIndex = -1; + Stream? firstSeekableStream = null; + + // Traverse the stack, collecting info + while (current is IStreamStack stackStream) { - var stack = new List(); - Stream? current = stream as Stream; - int lastBufferingIndex = -1; - int firstSeekableIndex = -1; - Stream? firstSeekableStream = null; - - // Traverse the stack, collecting info - while (current is IStreamStack stackStream) + stack.Add(stackStream); + if (stackStream.BufferSize > 0) { - stack.Add(stackStream); - if (stackStream.BufferSize > 0) - { - lastBufferingIndex = stack.Count - 1; - break; - } - current = stackStream.BaseStream(); + lastBufferingIndex = stack.Count - 1; + break; } + current = stackStream.BaseStream(); + } - // Find the first seekable stream (closest to the root) - if (current != null && current.CanSeek) - { - firstSeekableIndex = stack.Count; - firstSeekableStream = current; - } + // Find the first seekable stream (closest to the root) + if (current != null && current.CanSeek) + { + firstSeekableIndex = stack.Count; + firstSeekableStream = current; + } - // If any buffering stream exists, try to set BufferPosition on the outermost one - if (lastBufferingIndex != -1) + // If any buffering stream exists, try to set BufferPosition on the outermost one + if (lastBufferingIndex != -1) + { + var bufferingStream = stack[lastBufferingIndex]; + if (position >= 0 && position < bufferingStream.BufferSize) { - var bufferingStream = stack[lastBufferingIndex]; - if (position >= 0 && position < bufferingStream.BufferSize) - { - bufferingStream.BufferPosition = (int)position; - return position; - } - else - { - // If position is not in buffer, reset buffer and proceed as non-buffering - bufferingStream.BufferPosition = 0; - } - // Continue to seek as if no buffer is present + bufferingStream.BufferPosition = (int)position; + return position; } - - // If no buffering, or buffer was reset, seek at the first seekable stream (closest to the root) - if (firstSeekableStream != null) + else { - firstSeekableStream.Seek(position, SeekOrigin.Begin); - return firstSeekableStream.Position; + // If position is not in buffer, reset buffer and proceed as non-buffering + bufferingStream.BufferPosition = 0; } - - throw new NotSupportedException( - "Cannot set position on this stream stack (no seekable or buffering stream supports the requested position)." - ); + // Continue to seek as if no buffer is present } - /// - /// Reads bytes from the stream, using the position to observe how much was actually consumed and rewind the buffer to ensure further reads are correct. - /// This is required to prevent buffered reads from skipping data, while also benefiting from buffering and reduced stream IO reads. - /// - /// The stream to read from. - /// The buffer to read data into. - /// The offset in the buffer to start writing data. - /// The maximum number of bytes to read. - /// Returns the buffering stream found in the stack, or null if none exists. - /// Returns the number of bytes actually read from the base stream, or -1 if no buffering stream was found. - /// The number of bytes read into the buffer. - internal static int Read( - this IStreamStack stream, - byte[] buffer, - int offset, - int count, - out IStreamStack? buffStream, - out int baseReadCount - ) + // If no buffering, or buffer was reset, seek at the first seekable stream (closest to the root) + if (firstSeekableStream != null) { - Stream baseStream = stream.BaseStream(); - Stream thisStream = (Stream)stream; - IStreamStack? current = stream; - buffStream = null; - baseReadCount = -1; + firstSeekableStream.Seek(position, SeekOrigin.Begin); + return firstSeekableStream.Position; + } - while (buffStream == null && (current = current?.BaseStream() as IStreamStack) != null) + throw new NotSupportedException( + "Cannot set position on this stream stack (no seekable or buffering stream supports the requested position)." + ); + } + + /// + /// Reads bytes from the stream, using the position to observe how much was actually consumed and rewind the buffer to ensure further reads are correct. + /// This is required to prevent buffered reads from skipping data, while also benefiting from buffering and reduced stream IO reads. + /// + /// The stream to read from. + /// The buffer to read data into. + /// The offset in the buffer to start writing data. + /// The maximum number of bytes to read. + /// Returns the buffering stream found in the stack, or null if none exists. + /// Returns the number of bytes actually read from the base stream, or -1 if no buffering stream was found. + /// The number of bytes read into the buffer. + internal static int Read( + this IStreamStack stream, + byte[] buffer, + int offset, + int count, + out IStreamStack? buffStream, + out int baseReadCount + ) + { + Stream baseStream = stream.BaseStream(); + Stream thisStream = (Stream)stream; + IStreamStack? current = stream; + buffStream = null; + baseReadCount = -1; + + while (buffStream == null && (current = current?.BaseStream() as IStreamStack) != null) + { + if (current.BufferSize != 0) { - if (current.BufferSize != 0) - { - buffStream = current; - } + buffStream = current; } + } - long buffPos = buffStream == null ? -1 : ((Stream)buffStream).Position; + long buffPos = buffStream == null ? -1 : ((Stream)buffStream).Position; - int read = baseStream.Read(buffer, offset, count); //amount read in to buffer + int read = baseStream.Read(buffer, offset, count); //amount read in to buffer - if (buffPos != -1) - { - baseReadCount = (int)(((Stream)buffStream!).Position - buffPos); - } - return read; + if (buffPos != -1) + { + baseReadCount = (int)(((Stream)buffStream!).Position - buffPos); } + return read; + } #if DEBUG_STREAMS private static long _instanceCounter = 0; @@ -360,5 +360,4 @@ public static string GetStreamStackString(this IStreamStack stream, bool constru return sb.ToString(); } #endif - } } diff --git a/src/SharpCompress/Readers/Arc/ArcReader.cs b/src/SharpCompress/Readers/Arc/ArcReader.cs index 7d58b8d42..6ad6a3822 100644 --- a/src/SharpCompress/Readers/Arc/ArcReader.cs +++ b/src/SharpCompress/Readers/Arc/ArcReader.cs @@ -7,35 +7,34 @@ using SharpCompress.Common; using SharpCompress.Common.Arc; -namespace SharpCompress.Readers.Arc +namespace SharpCompress.Readers.Arc; + +public class ArcReader : AbstractReader { - public class ArcReader : AbstractReader - { - private ArcReader(Stream stream, ReaderOptions options) - : base(options, ArchiveType.Arc) => Volume = new ArcVolume(stream, options, 0); + private ArcReader(Stream stream, ReaderOptions options) + : base(options, ArchiveType.Arc) => Volume = new ArcVolume(stream, options, 0); - public override ArcVolume Volume { get; } + public override ArcVolume Volume { get; } - /// - /// Opens an ArcReader for Non-seeking usage with a single volume - /// - /// - /// - /// - public static ArcReader Open(Stream stream, ReaderOptions? options = null) - { - stream.CheckNotNull(nameof(stream)); - return new ArcReader(stream, options ?? new ReaderOptions()); - } + /// + /// Opens an ArcReader for Non-seeking usage with a single volume + /// + /// + /// + /// + public static ArcReader Open(Stream stream, ReaderOptions? options = null) + { + stream.CheckNotNull(nameof(stream)); + return new ArcReader(stream, options ?? new ReaderOptions()); + } - protected override IEnumerable GetEntries(Stream stream) + protected override IEnumerable GetEntries(Stream stream) + { + ArcEntryHeader headerReader = new ArcEntryHeader(new ArchiveEncoding()); + ArcEntryHeader? header; + while ((header = headerReader.ReadHeader(stream)) != null) { - ArcEntryHeader headerReader = new ArcEntryHeader(new ArchiveEncoding()); - ArcEntryHeader? header; - while ((header = headerReader.ReadHeader(stream)) != null) - { - yield return new ArcEntry(new ArcFilePart(header, stream)); - } + yield return new ArcEntry(new ArcFilePart(header, stream)); } } -} +} \ No newline at end of file diff --git a/src/SharpCompress/Writers/Zip/ZipWriter.cs b/src/SharpCompress/Writers/Zip/ZipWriter.cs index 55abe4405..5c5406e63 100644 --- a/src/SharpCompress/Writers/Zip/ZipWriter.cs +++ b/src/SharpCompress/Writers/Zip/ZipWriter.cs @@ -11,6 +11,7 @@ using SharpCompress.Compressors.Deflate; using SharpCompress.Compressors.LZMA; using SharpCompress.Compressors.PPMd; +using SharpCompress.Compressors.ZStandard; using SharpCompress.IO; namespace SharpCompress.Writers.Zip; @@ -396,7 +397,7 @@ private Stream GetWriteStream(Stream writeStream) } case ZipCompressionMethod.ZStandard: { - return new ZstdSharp.CompressionStream(counting, compressionLevel); + return new CompressionStream(counting, compressionLevel); } default: { From ee2cbc8051bff0f4ea92cb1fc9aaecb406d4f145 Mon Sep 17 00:00:00 2001 From: Adam Hathcock Date: Mon, 13 Oct 2025 17:02:41 +0100 Subject: [PATCH 6/6] fmt --- src/SharpCompress/Common/Arc/ArcFilePart.cs | 13 +- .../Compressors/Filters/DeltaFilter.cs | 2 +- .../Compressors/Lzw/LzwConstants.cs | 2 +- .../Compressors/Lzw/LzwStream.cs | 30 +- .../Compressors/RLE90/RunLength90Stream.cs | 6 +- .../Compressors/Squeezed/SqueezedStream.cs | 6 +- .../ZStandard/CompressionStream.cs | 66 +- .../Compressors/ZStandard/Compressor.cs | 30 +- .../Compressors/ZStandard/Constants.cs | 2 +- .../ZStandard/DecompressionStream.cs | 61 +- .../Compressors/ZStandard/Decompressor.cs | 36 +- .../Compressors/ZStandard/SafeHandles.cs | 10 +- .../ZStandard/SynchronizationWrapper.cs | 2 +- .../ZStandard/Unsafe/BIT_CStream_t.cs | 2 +- .../ZStandard/Unsafe/BIT_DStream_status.cs | 2 +- .../Compressors/ZStandard/Unsafe/Bitstream.cs | 174 ++- .../ZStandard/Unsafe/BlockSummary.cs | 2 +- .../ZStandard/Unsafe/COVER_best_s.cs | 2 +- .../ZStandard/Unsafe/COVER_ctx_t.cs | 2 +- .../ZStandard/Unsafe/COVER_map_s.cs | 2 +- .../ZStandard/Unsafe/COVER_segment_t.cs | 2 +- .../Compressors/ZStandard/Unsafe/Cover.cs | 17 +- .../ZStandard/Unsafe/DTableDesc.cs | 2 +- .../ZStandard/Unsafe/EStats_ress_t.cs | 2 +- .../ZStandard/Unsafe/EntropyCommon.cs | 20 +- .../ZStandard/Unsafe/EstimatedBlockSize.cs | 2 +- .../ZStandard/Unsafe/FASTCOVER_ctx_t.cs | 2 +- .../Unsafe/FASTCOVER_tryParameters_data_s.cs | 2 +- .../ZStandard/Unsafe/FSE_DTableHeader.cs | 2 +- .../ZStandard/Unsafe/FSE_DecompressWksp.cs | 2 +- .../Compressors/ZStandard/Unsafe/Fastcover.cs | 56 +- .../Compressors/ZStandard/Unsafe/Fse.cs | 21 +- .../ZStandard/Unsafe/FseCompress.cs | 79 +- .../ZStandard/Unsafe/FseDecompress.cs | 45 +- .../ZStandard/Unsafe/HIST_checkInput_e.cs | 2 +- .../Unsafe/HUF_CompressWeightsWksp.cs | 2 +- .../Unsafe/HUF_DecompressFastArgs.cs | 2 +- .../Unsafe/HUF_ReadDTableX1_Workspace.cs | 2 +- .../Unsafe/HUF_ReadDTableX2_Workspace.cs | 20 +- .../Unsafe/HUF_buildCTable_wksp_tables.cs | 22 +- .../ZStandard/Unsafe/HUF_compress_tables_t.cs | 10 +- .../Compressors/ZStandard/Unsafe/Hist.cs | 11 +- .../ZStandard/Unsafe/HufCompress.cs | 76 +- .../ZStandard/Unsafe/HufDecompress.cs | 175 +-- .../Compressors/ZStandard/Unsafe/Mem.cs | 4 +- .../ZStandard/Unsafe/RSyncState_t.cs | 2 +- .../ZStandard/Unsafe/RawSeqStore_t.cs | 8 +- .../ZStandard/Unsafe/RoundBuff_t.cs | 2 +- .../Compressors/ZStandard/Unsafe/SeqDef_s.cs | 2 +- .../ZStandard/Unsafe/XXH32_canonical_t.cs | 2 +- .../Compressors/ZStandard/Unsafe/Xxhash.cs | 29 +- .../ZStandard/Unsafe/ZDICT_cover_params_t.cs | 2 +- .../ZStandard/Unsafe/ZSTDMT_bufferPool_s.cs | 2 +- .../ZStandard/Unsafe/ZSTD_CDict_s.cs | 2 +- .../ZStandard/Unsafe/ZSTD_DDictHashSet.cs | 2 +- .../ZStandard/Unsafe/ZSTD_DDict_s.cs | 2 +- .../ZStandard/Unsafe/ZSTD_DefaultPolicy_e.cs | 2 +- .../ZStandard/Unsafe/ZSTD_ErrorCode.cs | 2 +- .../ZStandard/Unsafe/ZSTD_OptPrice_e.cs | 2 +- .../ZStandard/Unsafe/ZSTD_SequencePosition.cs | 2 +- .../Unsafe/ZSTD_buffered_policy_e.cs | 2 +- .../ZStandard/Unsafe/ZSTD_cStreamStage.cs | 2 +- .../Unsafe/ZSTD_compResetPolicy_e.cs | 2 +- .../Unsafe/ZSTD_compressionStage_e.cs | 2 +- .../ZStandard/Unsafe/ZSTD_customMem.cs | 2 +- .../Unsafe/ZSTD_cwksp_static_alloc_e.cs | 2 +- .../ZStandard/Unsafe/ZSTD_dStreamStage.cs | 2 +- .../Unsafe/ZSTD_dictContentType_e.cs | 2 +- .../ZStandard/Unsafe/ZSTD_dictUses_e.cs | 2 +- .../ZStandard/Unsafe/ZSTD_entropyDTables_t.cs | 32 +- .../Unsafe/ZSTD_forceIgnoreChecksum_e.cs | 2 +- .../ZStandard/Unsafe/ZSTD_frameHeader.cs | 2 +- .../ZStandard/Unsafe/ZSTD_frameProgression.cs | 2 +- .../ZStandard/Unsafe/ZSTD_frameType_e.cs | 2 +- .../ZStandard/Unsafe/ZSTD_fseCTables_t.cs | 2 +- .../ZStandard/Unsafe/ZSTD_hufCTables_t.cs | 10 +- .../ZStandard/Unsafe/ZSTD_inBuffer_s.cs | 2 +- .../Unsafe/ZSTD_indexResetPolicy_e.cs | 2 +- .../ZStandard/Unsafe/ZSTD_localDict.cs | 2 +- .../ZStandard/Unsafe/ZSTD_longLengthType_e.cs | 2 +- .../ZStandard/Unsafe/ZSTD_match_t.cs | 2 +- .../ZStandard/Unsafe/ZSTD_outBuffer_s.cs | 2 +- .../ZStandard/Unsafe/ZSTD_parameters.cs | 2 +- .../ZStandard/Unsafe/ZSTD_resetTarget_e.cs | 2 +- .../ZStandard/Unsafe/ZSTD_seqSymbol.cs | 2 +- .../ZStandard/Unsafe/ZSTD_sequenceFormat_e.cs | 2 +- .../Unsafe/ZSTD_tableFillPurpose_e.cs | 2 +- .../Compressors/ZStandard/Unsafe/Zdict.cs | 36 +- .../ZStandard/Unsafe/ZstdCompress.cs | 995 ++++++------------ .../ZStandard/Unsafe/ZstdCompressInternal.cs | 480 ++++----- .../ZStandard/Unsafe/ZstdCompressLiterals.cs | 55 +- .../ZStandard/Unsafe/ZstdCompressSequences.cs | 581 +++++----- .../Unsafe/ZstdCompressSuperblock.cs | 24 +- .../Compressors/ZStandard/Unsafe/ZstdCwksp.cs | 19 +- .../Compressors/ZStandard/Unsafe/ZstdDdict.cs | 26 +- .../ZStandard/Unsafe/ZstdDecompress.cs | 633 +++++------ .../ZStandard/Unsafe/ZstdDecompressBlock.cs | 584 ++++------ .../Unsafe/ZstdDecompressInternal.cs | 378 +++---- .../ZStandard/Unsafe/ZstdDoubleFast.cs | 170 +-- .../Compressors/ZStandard/Unsafe/ZstdFast.cs | 72 +- .../ZStandard/Unsafe/ZstdInternal.cs | 540 +++++----- .../Compressors/ZStandard/Unsafe/ZstdLazy.cs | 746 ++++--------- .../Compressors/ZStandard/Unsafe/ZstdLdm.cs | 85 +- .../ZStandard/Unsafe/ZstdLdmGeartab.cs | 530 +++++----- .../Compressors/ZStandard/Unsafe/ZstdOpt.cs | 306 +++--- .../ZStandard/Unsafe/ZstdPresplit.cs | 15 +- .../ZStandard/Unsafe/ZstdmtCompress.cs | 122 +-- .../Compressors/ZStandard/Unsafe/inBuff_t.cs | 2 +- .../ZStandard/Unsafe/ldmRollingHashState_t.cs | 2 +- .../ZStandard/Unsafe/ldmState_t.cs | 20 +- .../ZStandard/Unsafe/offsetCount_t.cs | 2 +- .../ZStandard/Unsafe/rankValCol_t.cs | 2 +- .../ZStandard/Unsafe/repcodes_s.cs | 2 +- .../ZStandard/Unsafe/sortedSymbol_t.cs | 2 +- .../Compressors/ZStandard/UnsafeHelper.cs | 38 +- src/SharpCompress/Factories/ArcFactory.cs | 2 +- src/SharpCompress/IO/IStreamStack.cs | 212 ++-- src/SharpCompress/Readers/Arc/ArcReader.cs | 2 +- 118 files changed, 3238 insertions(+), 4632 deletions(-) diff --git a/src/SharpCompress/Common/Arc/ArcFilePart.cs b/src/SharpCompress/Common/Arc/ArcFilePart.cs index 231733857..da936236b 100644 --- a/src/SharpCompress/Common/Arc/ArcFilePart.cs +++ b/src/SharpCompress/Common/Arc/ArcFilePart.cs @@ -45,20 +45,13 @@ internal override Stream GetCompressedStream() ); break; case CompressionType.RLE90: - compressedStream = new RunLength90Stream( - _stream, - (int)Header.CompressedSize - ); + compressedStream = new RunLength90Stream(_stream, (int)Header.CompressedSize); break; case CompressionType.Squeezed: compressedStream = new SqueezeStream(_stream, (int)Header.CompressedSize); break; case CompressionType.Crunched: - compressedStream = new ArcLzwStream( - _stream, - (int)Header.CompressedSize, - true - ); + compressedStream = new ArcLzwStream(_stream, (int)Header.CompressedSize, true); break; default: throw new NotSupportedException( @@ -71,4 +64,4 @@ internal override Stream GetCompressedStream() } internal override Stream? GetRawStream() => _stream; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/Filters/DeltaFilter.cs b/src/SharpCompress/Compressors/Filters/DeltaFilter.cs index 2f1c2b2eb..5f7593ce9 100644 --- a/src/SharpCompress/Compressors/Filters/DeltaFilter.cs +++ b/src/SharpCompress/Compressors/Filters/DeltaFilter.cs @@ -32,4 +32,4 @@ protected override int Transform(byte[] buffer, int offset, int count) return count; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/Lzw/LzwConstants.cs b/src/SharpCompress/Compressors/Lzw/LzwConstants.cs index 7e8a63997..d36210e8a 100644 --- a/src/SharpCompress/Compressors/Lzw/LzwConstants.cs +++ b/src/SharpCompress/Compressors/Lzw/LzwConstants.cs @@ -61,4 +61,4 @@ public sealed class LzwConstants public const int INIT_BITS = 9; private LzwConstants() { } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/Lzw/LzwStream.cs b/src/SharpCompress/Compressors/Lzw/LzwStream.cs index c6a0d09b0..1dd2736fb 100644 --- a/src/SharpCompress/Compressors/Lzw/LzwStream.cs +++ b/src/SharpCompress/Compressors/Lzw/LzwStream.cs @@ -46,7 +46,7 @@ namespace SharpCompress.Compressors.Lzw; public class LzwStream : Stream, IStreamStack { #if DEBUG_STREAMS - long IStreamStack.InstanceId { get; set; } + long IStreamStack.InstanceId { get; set; } #endif int IStreamStack.DefaultBufferSize { get; set; } @@ -112,7 +112,7 @@ public LzwStream(Stream baseInputStream) { this.baseInputStream = baseInputStream; #if DEBUG_STREAMS - this.DebugConstruct(typeof(LzwStream)); + this.DebugConstruct(typeof(LzwStream)); #endif } @@ -237,13 +237,13 @@ public override int Read(byte[] buffer, int offset, int count) // read next code int pos = lBitPos >> 3; int code = - ( ( - (lData[pos] & 0xFF) - | ((lData[pos + 1] & 0xFF) << 8) - | ((lData[pos + 2] & 0xFF) << 16) - ) >> (lBitPos & 0x7) - ) & lBitMask; + ( + (lData[pos] & 0xFF) + | ((lData[pos + 1] & 0xFF) << 8) + | ((lData[pos + 2] & 0xFF) << 16) + ) >> (lBitPos & 0x7) + ) & lBitMask; lBitPos += lNBits; @@ -251,9 +251,7 @@ public override int Read(byte[] buffer, int offset, int count) if (lOldCode == -1) { if (code >= 256) - throw new IncompleteArchiveException( - "corrupt input: " + code + " > 255" - ); + throw new IncompleteArchiveException("corrupt input: " + code + " > 255"); lFinChar = (byte)(lOldCode = code); buffer[offset++] = lFinChar; @@ -423,10 +421,10 @@ private void ParseHeader() { throw new ArchiveException( "Stream compressed with " - + maxBits - + " bits, but decompression can only handle " - + LzwConstants.MAX_BITS - + " bits." + + maxBits + + " bits, but decompression can only handle " + + LzwConstants.MAX_BITS + + " bits." ); } @@ -564,7 +562,7 @@ protected override void Dispose(bool disposing) { isClosed = true; #if DEBUG_STREAMS - this.DebugDispose(typeof(LzwStream)); + this.DebugDispose(typeof(LzwStream)); #endif if (IsStreamOwner) { diff --git a/src/SharpCompress/Compressors/RLE90/RunLength90Stream.cs b/src/SharpCompress/Compressors/RLE90/RunLength90Stream.cs index d70685f97..165bfb542 100644 --- a/src/SharpCompress/Compressors/RLE90/RunLength90Stream.cs +++ b/src/SharpCompress/Compressors/RLE90/RunLength90Stream.cs @@ -11,7 +11,7 @@ namespace SharpCompress.Compressors.RLE90; public class RunLength90Stream : Stream, IStreamStack { #if DEBUG_STREAMS - long IStreamStack.InstanceId { get; set; } + long IStreamStack.InstanceId { get; set; } #endif int IStreamStack.DefaultBufferSize { get; set; } @@ -40,14 +40,14 @@ public RunLength90Stream(Stream stream, int compressedSize) _stream = stream; _compressedSize = compressedSize; #if DEBUG_STREAMS - this.DebugConstruct(typeof(RunLength90Stream)); + this.DebugConstruct(typeof(RunLength90Stream)); #endif } protected override void Dispose(bool disposing) { #if DEBUG_STREAMS - this.DebugDispose(typeof(RunLength90Stream)); + this.DebugDispose(typeof(RunLength90Stream)); #endif base.Dispose(disposing); } diff --git a/src/SharpCompress/Compressors/Squeezed/SqueezedStream.cs b/src/SharpCompress/Compressors/Squeezed/SqueezedStream.cs index 95e8b0dc4..f60b415ab 100644 --- a/src/SharpCompress/Compressors/Squeezed/SqueezedStream.cs +++ b/src/SharpCompress/Compressors/Squeezed/SqueezedStream.cs @@ -12,7 +12,7 @@ namespace SharpCompress.Compressors.Squeezed; public class SqueezeStream : Stream, IStreamStack { #if DEBUG_STREAMS - long IStreamStack.InstanceId { get; set; } + long IStreamStack.InstanceId { get; set; } #endif int IStreamStack.DefaultBufferSize { get; set; } @@ -42,14 +42,14 @@ public SqueezeStream(Stream stream, int compressedSize) _stream = stream; _compressedSize = compressedSize; #if DEBUG_STREAMS - this.DebugConstruct(typeof(SqueezeStream)); + this.DebugConstruct(typeof(SqueezeStream)); #endif } protected override void Dispose(bool disposing) { #if DEBUG_STREAMS - this.DebugDispose(typeof(SqueezeStream)); + this.DebugDispose(typeof(SqueezeStream)); #endif base.Dispose(disposing); } diff --git a/src/SharpCompress/Compressors/ZStandard/CompressionStream.cs b/src/SharpCompress/Compressors/ZStandard/CompressionStream.cs index 5dc293e17..92de03b34 100644 --- a/src/SharpCompress/Compressors/ZStandard/CompressionStream.cs +++ b/src/SharpCompress/Compressors/ZStandard/CompressionStream.cs @@ -77,7 +77,7 @@ public void LoadDictionary(byte[] dict) #if !NETSTANDARD2_0 && !NETFRAMEWORK public override async ValueTask DisposeAsync() #else - public async Task DisposeAsync() + public async Task DisposeAsync() #endif { if (compressor == null) @@ -149,8 +149,8 @@ public override void Write(byte[] buffer, int offset, int count) => public override void Write(ReadOnlySpan buffer) => WriteInternal(buffer, ZSTD_EndDirective.ZSTD_e_continue); #else - public void Write(ReadOnlySpan buffer) => - WriteInternal(buffer, ZSTD_EndDirective.ZSTD_e_continue); + public void Write(ReadOnlySpan buffer) => + WriteInternal(buffer, ZSTD_EndDirective.ZSTD_e_continue); #endif private void WriteInternal(ReadOnlySpan buffer, ZSTD_EndDirective directive) @@ -172,9 +172,7 @@ private void WriteInternal(ReadOnlySpan buffer, ZSTD_EndDirective directiv if (written > 0) innerStream.Write(outputBuffer, 0, written); } while ( - directive == ZSTD_EndDirective.ZSTD_e_continue - ? input.pos < input.size - : remaining > 0 + directive == ZSTD_EndDirective.ZSTD_e_continue ? input.pos < input.size : remaining > 0 ); } @@ -185,11 +183,11 @@ private async ValueTask WriteInternalAsync( CancellationToken cancellationToken = default ) #else - private async Task WriteInternalAsync( - ReadOnlyMemory? buffer, - ZSTD_EndDirective directive, - CancellationToken cancellationToken = default - ) + private async Task WriteInternalAsync( + ReadOnlyMemory? buffer, + ZSTD_EndDirective directive, + CancellationToken cancellationToken = default + ) #endif { @@ -213,12 +211,10 @@ private async Task WriteInternalAsync( var written = (int)output.pos; if (written > 0) await innerStream - .WriteAsync(outputBuffer, 0, written, cancellationToken) - .ConfigureAwait(false); + .WriteAsync(outputBuffer, 0, written, cancellationToken) + .ConfigureAwait(false); } while ( - directive == ZSTD_EndDirective.ZSTD_e_continue - ? input.pos < input.size - : remaining > 0 + directive == ZSTD_EndDirective.ZSTD_e_continue ? input.pos < input.size : remaining > 0 ); } @@ -229,8 +225,7 @@ public override Task WriteAsync( int offset, int count, CancellationToken cancellationToken - ) => - WriteAsync(new ReadOnlyMemory(buffer, offset, count), cancellationToken).AsTask(); + ) => WriteAsync(new ReadOnlyMemory(buffer, offset, count), cancellationToken).AsTask(); public override async ValueTask WriteAsync( ReadOnlyMemory buffer, @@ -240,19 +235,19 @@ await WriteInternalAsync(buffer, ZSTD_EndDirective.ZSTD_e_continue, cancellation .ConfigureAwait(false); #else - public override Task WriteAsync( - byte[] buffer, - int offset, - int count, - CancellationToken cancellationToken - ) => WriteAsync(new ReadOnlyMemory(buffer, offset, count), cancellationToken); - - public async Task WriteAsync( - ReadOnlyMemory buffer, - CancellationToken cancellationToken = default - ) => - await WriteInternalAsync(buffer, ZSTD_EndDirective.ZSTD_e_continue, cancellationToken) - .ConfigureAwait(false); + public override Task WriteAsync( + byte[] buffer, + int offset, + int count, + CancellationToken cancellationToken + ) => WriteAsync(new ReadOnlyMemory(buffer, offset, count), cancellationToken); + + public async Task WriteAsync( + ReadOnlyMemory buffer, + CancellationToken cancellationToken = default + ) => + await WriteInternalAsync(buffer, ZSTD_EndDirective.ZSTD_e_continue, cancellationToken) + .ConfigureAwait(false); #endif internal unsafe nuint CompressStream( @@ -267,9 +262,9 @@ ZSTD_EndDirective directive input.src = inputBufferPtr; output.dst = outputBufferPtr; return compressor - .NotNull() - .CompressStream(ref input, ref output, directive) - .EnsureZstdSuccess(); + .NotNull() + .CompressStream(ref input, ref output, directive) + .EnsureZstdSuccess(); } } @@ -285,8 +280,7 @@ public override long Position set => throw new NotSupportedException(); } - public override long Seek(long offset, SeekOrigin origin) => - throw new NotSupportedException(); + public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException(); public override void SetLength(long value) => throw new NotSupportedException(); diff --git a/src/SharpCompress/Compressors/ZStandard/Compressor.cs b/src/SharpCompress/Compressors/ZStandard/Compressor.cs index 8e99d24cc..668606016 100644 --- a/src/SharpCompress/Compressors/ZStandard/Compressor.cs +++ b/src/SharpCompress/Compressors/ZStandard/Compressor.cs @@ -62,9 +62,9 @@ public void LoadDictionary(ReadOnlySpan dict) { using var cctx = handle.Acquire(); fixed (byte* dictPtr = dict) - Unsafe.Methods - .ZSTD_CCtx_loadDictionary(cctx, dictPtr, (nuint)dict.Length) - .EnsureZstdSuccess(); + Unsafe + .Methods.ZSTD_CCtx_loadDictionary(cctx, dictPtr, (nuint)dict.Length) + .EnsureZstdSuccess(); } public Compressor(int level = DefaultCompressionLevel) @@ -96,15 +96,15 @@ public int Wrap(ReadOnlySpan src, Span dest) { using var cctx = handle.Acquire(); return (int) - Unsafe.Methods - .ZSTD_compress2( - cctx, - destPtr, - (nuint)dest.Length, - srcPtr, - (nuint)src.Length - ) - .EnsureZstdSuccess(); + Unsafe + .Methods.ZSTD_compress2( + cctx, + destPtr, + (nuint)dest.Length, + srcPtr, + (nuint)src.Length + ) + .EnsureZstdSuccess(); } } @@ -190,9 +190,9 @@ ZSTD_EndDirective directive fixed (ZSTD_outBuffer_s* outputPtr = &output) { using var cctx = handle.Acquire(); - return Unsafe.Methods - .ZSTD_compressStream2(cctx, outputPtr, inputPtr, directive) - .EnsureZstdSuccess(); + return Unsafe + .Methods.ZSTD_compressStream2(cctx, outputPtr, inputPtr, directive) + .EnsureZstdSuccess(); } } diff --git a/src/SharpCompress/Compressors/ZStandard/Constants.cs b/src/SharpCompress/Compressors/ZStandard/Constants.cs index 12c57270e..cce84fc09 100644 --- a/src/SharpCompress/Compressors/ZStandard/Constants.cs +++ b/src/SharpCompress/Compressors/ZStandard/Constants.cs @@ -5,4 +5,4 @@ internal class Constants //NOTE: https://docs.microsoft.com/en-us/dotnet/framework/configure-apps/file-schema/runtime/gcallowverylargeobjects-element#remarks //NOTE: https://github.com/dotnet/runtime/blob/v5.0.0-rtm.20519.4/src/libraries/System.Private.CoreLib/src/System/Array.cs#L27 public const ulong MaxByteArrayLength = 0x7FFFFFC7; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/DecompressionStream.cs b/src/SharpCompress/Compressors/ZStandard/DecompressionStream.cs index 583acd017..9864a8055 100644 --- a/src/SharpCompress/Compressors/ZStandard/DecompressionStream.cs +++ b/src/SharpCompress/Compressors/ZStandard/DecompressionStream.cs @@ -53,13 +53,11 @@ public DecompressionStream( this.checkEndOfStream = checkEndOfStream; inputBufferSize = - bufferSize > 0 ? bufferSize : (int)Unsafe.Methods.ZSTD_DStreamInSize().EnsureZstdSuccess(); + bufferSize > 0 + ? bufferSize + : (int)Unsafe.Methods.ZSTD_DStreamInSize().EnsureZstdSuccess(); inputBuffer = ArrayPool.Shared.Rent(inputBufferSize); - input = new ZSTD_inBuffer_s - { - pos = (nuint)inputBufferSize, - size = (nuint)inputBufferSize, - }; + input = new ZSTD_inBuffer_s { pos = (nuint)inputBufferSize, size = (nuint)inputBufferSize }; } public void SetParameter(ZSTD_dParameter parameter, int value) @@ -110,7 +108,7 @@ public override int Read(byte[] buffer, int offset, int count) => #if !NETSTANDARD2_0 && !NETFRAMEWORK public override int Read(Span buffer) #else - public int Read(Span buffer) + public int Read(Span buffer) #endif { EnsureNotDisposed(); @@ -174,17 +172,17 @@ public override async ValueTask ReadAsync( ) #else - public override Task ReadAsync( - byte[] buffer, - int offset, - int count, - CancellationToken cancellationToken - ) => ReadAsync(new Memory(buffer, offset, count), cancellationToken); - - public async Task ReadAsync( - Memory buffer, - CancellationToken cancellationToken = default - ) + public override Task ReadAsync( + byte[] buffer, + int offset, + int count, + CancellationToken cancellationToken + ) => ReadAsync(new Memory(buffer, offset, count), cancellationToken); + + public async Task ReadAsync( + Memory buffer, + CancellationToken cancellationToken = default + ) #endif { EnsureNotDisposed(); @@ -222,8 +220,8 @@ public async Task ReadAsync( if ( ( bytesRead = await innerStream - .ReadAsync(inputBuffer, 0, inputBufferSize, cancellationToken) - .ConfigureAwait(false) + .ReadAsync(inputBuffer, 0, inputBufferSize, cancellationToken) + .ConfigureAwait(false) ) == 0 ) { @@ -265,8 +263,7 @@ public override long Position public override void Flush() => throw new NotSupportedException(); - public override long Seek(long offset, SeekOrigin origin) => - throw new NotSupportedException(); + public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException(); public override void SetLength(long value) => throw new NotSupportedException(); @@ -280,17 +277,17 @@ private void EnsureNotDisposed() } #if NETSTANDARD2_0 || NETFRAMEWORK - public virtual Task DisposeAsync() + public virtual Task DisposeAsync() + { + try { - try - { - Dispose(); - return Task.CompletedTask; - } - catch (Exception exc) - { - return Task.FromException(exc); - } + Dispose(); + return Task.CompletedTask; + } + catch (Exception exc) + { + return Task.FromException(exc); } + } #endif } diff --git a/src/SharpCompress/Compressors/ZStandard/Decompressor.cs b/src/SharpCompress/Compressors/ZStandard/Decompressor.cs index 80fa8851a..8a63c4d9d 100644 --- a/src/SharpCompress/Compressors/ZStandard/Decompressor.cs +++ b/src/SharpCompress/Compressors/ZStandard/Decompressor.cs @@ -36,17 +36,17 @@ public void LoadDictionary(ReadOnlySpan dict) { using var dctx = handle.Acquire(); fixed (byte* dictPtr = dict) - Unsafe.Methods - .ZSTD_DCtx_loadDictionary(dctx, dictPtr, (nuint)dict.Length) - .EnsureZstdSuccess(); + Unsafe + .Methods.ZSTD_DCtx_loadDictionary(dctx, dictPtr, (nuint)dict.Length) + .EnsureZstdSuccess(); } public static ulong GetDecompressedSize(ReadOnlySpan src) { fixed (byte* srcPtr = src) - return Unsafe.Methods - .ZSTD_decompressBound(srcPtr, (nuint)src.Length) - .EnsureContentSizeOk(); + return Unsafe + .Methods.ZSTD_decompressBound(srcPtr, (nuint)src.Length) + .EnsureContentSizeOk(); } public static ulong GetDecompressedSize(ArraySegment src) => @@ -84,15 +84,15 @@ public int Unwrap(ReadOnlySpan src, Span dest) { using var dctx = handle.Acquire(); return (int) - Unsafe.Methods - .ZSTD_decompressDCtx( - dctx, - destPtr, - (nuint)dest.Length, - srcPtr, - (nuint)src.Length - ) - .EnsureZstdSuccess(); + Unsafe + .Methods.ZSTD_decompressDCtx( + dctx, + destPtr, + (nuint)dest.Length, + srcPtr, + (nuint)src.Length + ) + .EnsureZstdSuccess(); } } @@ -168,7 +168,9 @@ internal nuint DecompressStream(ref ZSTD_inBuffer_s input, ref ZSTD_outBuffer_s fixed (ZSTD_outBuffer_s* outputPtr = &output) { using var dctx = handle.Acquire(); - return Unsafe.Methods.ZSTD_decompressStream(dctx, outputPtr, inputPtr).EnsureZstdSuccess(); + return Unsafe + .Methods.ZSTD_decompressStream(dctx, outputPtr, inputPtr) + .EnsureZstdSuccess(); } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/SafeHandles.cs b/src/SharpCompress/Compressors/ZStandard/SafeHandles.cs index 4490274ca..3b49bdcec 100644 --- a/src/SharpCompress/Compressors/ZStandard/SafeHandles.cs +++ b/src/SharpCompress/Compressors/ZStandard/SafeHandles.cs @@ -47,10 +47,7 @@ public static SafeCctxHandle Create() { var cctx = Unsafe.Methods.ZSTD_createCCtx(); if (cctx == null) - throw new ZstdException( - ZSTD_ErrorCode.ZSTD_error_GENERIC, - "Failed to create cctx" - ); + throw new ZstdException(ZSTD_ErrorCode.ZSTD_error_GENERIC, "Failed to create cctx"); safeHandle.SetHandle((IntPtr)cctx); success = true; } @@ -100,10 +97,7 @@ public static SafeDctxHandle Create() { var dctx = Unsafe.Methods.ZSTD_createDCtx(); if (dctx == null) - throw new ZstdException( - ZSTD_ErrorCode.ZSTD_error_GENERIC, - "Failed to create dctx" - ); + throw new ZstdException(ZSTD_ErrorCode.ZSTD_error_GENERIC, "Failed to create dctx"); safeHandle.SetHandle((IntPtr)dctx); success = true; } diff --git a/src/SharpCompress/Compressors/ZStandard/SynchronizationWrapper.cs b/src/SharpCompress/Compressors/ZStandard/SynchronizationWrapper.cs index e2f7894db..406cacd43 100644 --- a/src/SharpCompress/Compressors/ZStandard/SynchronizationWrapper.cs +++ b/src/SharpCompress/Compressors/ZStandard/SynchronizationWrapper.cs @@ -19,4 +19,4 @@ internal static unsafe class SynchronizationWrapper public static void PulseAll(void** obj) => Monitor.PulseAll(UnwrapObject(obj)); public static void Wait(void** mutex) => Monitor.Wait(UnwrapObject(mutex)); -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_CStream_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_CStream_t.cs index b8ef82f3f..ab23c39aa 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_CStream_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_CStream_t.cs @@ -11,4 +11,4 @@ public unsafe struct BIT_CStream_t public sbyte* startPtr; public sbyte* ptr; public sbyte* endPtr; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_DStream_status.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_DStream_status.cs index 4c0b15062..60b468b14 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_DStream_status.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/BIT_DStream_status.cs @@ -13,4 +13,4 @@ public enum BIT_DStream_status /* user requested more bits than present in bitstream */ BIT_DStream_overflow = 3, -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Bitstream.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Bitstream.cs index d95e90764..88302a494 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Bitstream.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Bitstream.cs @@ -1,57 +1,57 @@ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; #if NETCOREAPP3_0_OR_GREATER using System.Runtime.Intrinsics.X86; #endif -using static SharpCompress.Compressors.ZStandard.UnsafeHelper; namespace SharpCompress.Compressors.ZStandard.Unsafe; public static unsafe partial class Methods { #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_BIT_mask => - new uint[32] - { - 0, - 1, - 3, - 7, - 0xF, - 0x1F, - 0x3F, - 0x7F, - 0xFF, - 0x1FF, - 0x3FF, - 0x7FF, - 0xFFF, - 0x1FFF, - 0x3FFF, - 0x7FFF, - 0xFFFF, - 0x1FFFF, - 0x3FFFF, - 0x7FFFF, - 0xFFFFF, - 0x1FFFFF, - 0x3FFFFF, - 0x7FFFFF, - 0xFFFFFF, - 0x1FFFFFF, - 0x3FFFFFF, - 0x7FFFFFF, - 0xFFFFFFF, - 0x1FFFFFFF, - 0x3FFFFFFF, - 0x7FFFFFFF, - }; - private static uint* BIT_mask => - (uint*) - System.Runtime.CompilerServices.Unsafe.AsPointer( - ref MemoryMarshal.GetReference(Span_BIT_mask) - ); + private static ReadOnlySpan Span_BIT_mask => + new uint[32] + { + 0, + 1, + 3, + 7, + 0xF, + 0x1F, + 0x3F, + 0x7F, + 0xFF, + 0x1FF, + 0x3FF, + 0x7FF, + 0xFFF, + 0x1FFF, + 0x3FFF, + 0x7FFF, + 0xFFFF, + 0x1FFFF, + 0x3FFFF, + 0x7FFFF, + 0xFFFFF, + 0x1FFFFF, + 0x3FFFFF, + 0x7FFFFF, + 0xFFFFFF, + 0x1FFFFFF, + 0x3FFFFFF, + 0x7FFFFFF, + 0xFFFFFFF, + 0x1FFFFFFF, + 0x3FFFFFFF, + 0x7FFFFFFF, + }; + private static uint* BIT_mask => + (uint*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_BIT_mask) + ); #else private static readonly uint* BIT_mask = GetArrayPointer( @@ -100,11 +100,7 @@ ref MemoryMarshal.GetReference(Span_BIT_mask) * @return : 0 if success, * otherwise an error code (can be tested using ERR_isError()) */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint BIT_initCStream( - ref BIT_CStream_t bitC, - void* startPtr, - nuint dstCapacity - ) + private static nuint BIT_initCStream(ref BIT_CStream_t bitC, void* startPtr, nuint dstCapacity) { bitC.bitContainer = 0; bitC.bitPos = 0; @@ -121,15 +117,15 @@ private static nuint BIT_getLowerBits(nuint bitContainer, uint nbBits) { assert(nbBits < sizeof(uint) * 32 / sizeof(uint)); #if NETCOREAPP3_1_OR_GREATER - if (Bmi2.X64.IsSupported) - { - return (nuint)Bmi2.X64.ZeroHighBits(bitContainer, nbBits); - } + if (Bmi2.X64.IsSupported) + { + return (nuint)Bmi2.X64.ZeroHighBits(bitContainer, nbBits); + } - if (Bmi2.IsSupported) - { - return Bmi2.ZeroHighBits((uint)bitContainer, nbBits); - } + if (Bmi2.IsSupported) + { + return Bmi2.ZeroHighBits((uint)bitContainer, nbBits); + } #endif return bitContainer & BIT_mask[nbBits]; @@ -270,16 +266,13 @@ private static nuint BIT_initDStream(BIT_DStream_t* bitD, void* srcBuffer, nuint switch (srcSize) { case 7: - bitD->bitContainer += - (nuint)((byte*)srcBuffer)[6] << sizeof(nuint) * 8 - 16; + bitD->bitContainer += (nuint)((byte*)srcBuffer)[6] << sizeof(nuint) * 8 - 16; goto case 6; case 6: - bitD->bitContainer += - (nuint)((byte*)srcBuffer)[5] << sizeof(nuint) * 8 - 24; + bitD->bitContainer += (nuint)((byte*)srcBuffer)[5] << sizeof(nuint) * 8 - 24; goto case 5; case 5: - bitD->bitContainer += - (nuint)((byte*)srcBuffer)[4] << sizeof(nuint) * 8 - 32; + bitD->bitContainer += (nuint)((byte*)srcBuffer)[4] << sizeof(nuint) * 8 - 32; goto case 4; case 4: bitD->bitContainer += (nuint)((byte*)srcBuffer)[3] << 24; @@ -298,9 +291,7 @@ private static nuint BIT_initDStream(BIT_DStream_t* bitD, void* srcBuffer, nuint byte lastByte = ((byte*)srcBuffer)[srcSize - 1]; bitD->bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0; if (lastByte == 0) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); } bitD->bitsConsumed += (uint)((nuint)sizeof(nuint) - srcSize) * 8; @@ -321,15 +312,15 @@ private static nuint BIT_getMiddleBits(nuint bitContainer, uint start, uint nbBi uint regMask = (uint)(sizeof(nuint) * 8 - 1); assert(nbBits < sizeof(uint) * 32 / sizeof(uint)); #if NETCOREAPP3_1_OR_GREATER - if (Bmi2.X64.IsSupported) - { - return (nuint)Bmi2.X64.ZeroHighBits(bitContainer >> (int)(start & regMask), nbBits); - } + if (Bmi2.X64.IsSupported) + { + return (nuint)Bmi2.X64.ZeroHighBits(bitContainer >> (int)(start & regMask), nbBits); + } - if (Bmi2.IsSupported) - { - return Bmi2.ZeroHighBits((uint)(bitContainer >> (int)(start & regMask)), nbBits); - } + if (Bmi2.IsSupported) + { + return Bmi2.ZeroHighBits((uint)(bitContainer >> (int)(start & regMask)), nbBits); + } #endif return (nuint)(bitContainer >> (int)(start & regMask) & ((ulong)1 << (int)nbBits) - 1); @@ -359,8 +350,8 @@ private static nuint BIT_lookBitsFast(BIT_DStream_t* bitD, uint nbBits) uint regMask = (uint)(sizeof(nuint) * 8 - 1); assert(nbBits >= 1); return bitD->bitContainer - << (int)(bitD->bitsConsumed & regMask) - >> (int)(regMask + 1 - nbBits & regMask); + << (int)(bitD->bitsConsumed & regMask) + >> (int)(regMask + 1 - nbBits & regMask); } [MethodImpl(MethodImplOptions.AggressiveInlining)] @@ -423,13 +414,13 @@ private static BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD) } #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_static_zeroFilled => - new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 }; - private static nuint* static_zeroFilled => - (nuint*) - System.Runtime.CompilerServices.Unsafe.AsPointer( - ref MemoryMarshal.GetReference(Span_static_zeroFilled) - ); + private static ReadOnlySpan Span_static_zeroFilled => + new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 }; + private static nuint* static_zeroFilled => + (nuint*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_static_zeroFilled) + ); #else private static readonly nuint* static_zeroFilled = (nuint*)GetArrayPointer( @@ -485,10 +476,9 @@ private static BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD) [MethodImpl(MethodImplOptions.AggressiveInlining)] private static uint BIT_endOfDStream(BIT_DStream_t* DStream) { - return - DStream->ptr == DStream->start && DStream->bitsConsumed == (uint)(sizeof(nuint) * 8) - ? 1U - : 0U; + return DStream->ptr == DStream->start && DStream->bitsConsumed == (uint)(sizeof(nuint) * 8) + ? 1U + : 0U; } /*-******************************************************** @@ -554,9 +544,7 @@ private static nuint BIT_initDStream(ref BIT_DStream_t bitD, void* srcBuffer, nu byte lastByte = ((byte*)srcBuffer)[srcSize - 1]; bitD.bitsConsumed = lastByte != 0 ? 8 - ZSTD_highbit32(lastByte) : 0; if (lastByte == 0) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); } bitD.bitsConsumed += (uint)((nuint)sizeof(nuint) - srcSize) * 8; @@ -572,11 +560,7 @@ private static nuint BIT_initDStream(ref BIT_DStream_t bitD, void* srcBuffer, nu * On 64-bits, maxNbBits==56. * @return : value extracted */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static nuint BIT_lookBits( - nuint bitD_bitContainer, - uint bitD_bitsConsumed, - uint nbBits - ) + private static nuint BIT_lookBits(nuint bitD_bitContainer, uint bitD_bitsConsumed, uint nbBits) { return BIT_getMiddleBits( bitD_bitContainer, @@ -597,8 +581,8 @@ uint nbBits uint regMask = (uint)(sizeof(nuint) * 8 - 1); assert(nbBits >= 1); return bitD_bitContainer - << (int)(bitD_bitsConsumed & regMask) - >> (int)(regMask + 1 - nbBits & regMask); + << (int)(bitD_bitsConsumed & regMask) + >> (int)(regMask + 1 - nbBits & regMask); } [MethodImpl(MethodImplOptions.AggressiveInlining)] diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/BlockSummary.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/BlockSummary.cs index 46f1d7061..9a53651dd 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/BlockSummary.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/BlockSummary.cs @@ -5,4 +5,4 @@ public struct BlockSummary public nuint nbSequences; public nuint blockSize; public nuint litSize; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_best_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_best_s.cs index 1c56883ab..380831328 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_best_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_best_s.cs @@ -17,4 +17,4 @@ public unsafe struct COVER_best_s public nuint dictSize; public ZDICT_cover_params_t parameters; public nuint compressedSize; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_ctx_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_ctx_t.cs index 839d37351..287127621 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_ctx_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_ctx_t.cs @@ -16,4 +16,4 @@ public unsafe struct COVER_ctx_t public uint* freqs; public uint* dmerAt; public uint d; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_map_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_map_s.cs index 7c7d29d73..dbfa337f6 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_map_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_map_s.cs @@ -6,4 +6,4 @@ public unsafe struct COVER_map_s public uint sizeLog; public uint size; public uint sizeMask; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_segment_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_segment_t.cs index bc6be3ec4..7e852ea9c 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_segment_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/COVER_segment_t.cs @@ -8,4 +8,4 @@ public struct COVER_segment_t public uint begin; public uint end; public uint score; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Cover.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Cover.cs index a199a19d9..11c79f3c6 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Cover.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Cover.cs @@ -24,11 +24,7 @@ private static nuint COVER_sum(nuint* samplesSizes, uint nbSamples) /** * Warns the user when their corpus is too small. */ - private static void COVER_warnOnSmallCorpus( - nuint maxDictSize, - nuint nbDmers, - int displayLevel - ) + private static void COVER_warnOnSmallCorpus(nuint maxDictSize, nuint nbDmers, int displayLevel) { double ratio = nbDmers / (double)maxDictSize; if (ratio >= 10) @@ -100,8 +96,7 @@ nuint dictBufferCapacity i = parameters.splitPoint < 1 ? nbTrainSamples : 0; for (; i < nbSamples; ++i) { - maxSampleSize = - samplesSizes[i] > maxSampleSize ? samplesSizes[i] : maxSampleSize; + maxSampleSize = samplesSizes[i] > maxSampleSize ? samplesSizes[i] : maxSampleSize; } dstCapacity = ZSTD_compressBound(maxSampleSize); @@ -435,11 +430,7 @@ nuint totalCompressedSize if (totalCompressedSize <= largestCompressed * regressionTolerance) { free(largestDictbuffer); - return setDictSelection( - candidateDictBuffer, - dictContentSize, - totalCompressedSize - ); + return setDictSelection(candidateDictBuffer, dictContentSize, totalCompressedSize); } dictContentSize *= 2; @@ -450,4 +441,4 @@ nuint totalCompressedSize free(candidateDictBuffer); return setDictSelection(largestDictbuffer, dictContentSize, totalCompressedSize); } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/DTableDesc.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/DTableDesc.cs index c89de564d..6586ac46f 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/DTableDesc.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/DTableDesc.cs @@ -9,4 +9,4 @@ public struct DTableDesc public byte tableType; public byte tableLog; public byte reserved; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/EStats_ress_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/EStats_ress_t.cs index 93d18c382..9d3f1ba28 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/EStats_ress_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/EStats_ress_t.cs @@ -10,4 +10,4 @@ public unsafe struct EStats_ress_t /* must be ZSTD_BLOCKSIZE_MAX allocated */ public void* workPlace; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/EntropyCommon.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/EntropyCommon.cs index 77283d846..e14f925f5 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/EntropyCommon.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/EntropyCommon.cs @@ -73,9 +73,7 @@ nuint hbSize if (FSE_isError(countSize)) return countSize; if (countSize > hbSize) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); return countSize; } } @@ -220,13 +218,7 @@ private static nuint FSE_readNCount_body_default( nuint hbSize ) { - return FSE_readNCount_body( - normalizedCounter, - maxSVPtr, - tableLogPtr, - headerBuffer, - hbSize - ); + return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize); } /*! FSE_readNCount_bmi2(): @@ -369,9 +361,7 @@ int bmi2 for (n = 0; n < oSize; n++) { if (huffWeight[n] > 12) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); rankStats[huffWeight[n]]++; weightTotal += (uint)(1 << huffWeight[n] >> 1); } @@ -390,9 +380,7 @@ int bmi2 uint verif = (uint)(1 << (int)ZSTD_highbit32(rest)); uint lastWeight = ZSTD_highbit32(rest) + 1; if (verif != rest) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); huffWeight[oSize] = (byte)lastWeight; rankStats[lastWeight]++; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/EstimatedBlockSize.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/EstimatedBlockSize.cs index a1718372a..a2f59adc6 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/EstimatedBlockSize.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/EstimatedBlockSize.cs @@ -4,4 +4,4 @@ public struct EstimatedBlockSize { public nuint estLitSize; public nuint estBlockSize; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_ctx_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_ctx_t.cs index d404ccc0b..8c79a30d1 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_ctx_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_ctx_t.cs @@ -16,4 +16,4 @@ public unsafe struct FASTCOVER_ctx_t public uint d; public uint f; public FASTCOVER_accel_t accelParams; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_tryParameters_data_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_tryParameters_data_s.cs index b166fe457..3a6e14f0a 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_tryParameters_data_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FASTCOVER_tryParameters_data_s.cs @@ -9,4 +9,4 @@ public unsafe struct FASTCOVER_tryParameters_data_s public COVER_best_s* best; public nuint dictBufferCapacity; public ZDICT_cover_params_t parameters; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DTableHeader.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DTableHeader.cs index ae6269ec1..db091f0c9 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DTableHeader.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DTableHeader.cs @@ -5,4 +5,4 @@ public struct FSE_DTableHeader { public ushort tableLog; public ushort fastMode; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DecompressWksp.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DecompressWksp.cs index 3ecda2bc2..cd03c36f5 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DecompressWksp.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FSE_DecompressWksp.cs @@ -3,4 +3,4 @@ namespace SharpCompress.Compressors.ZStandard.Unsafe; public unsafe struct FSE_DecompressWksp { public fixed short ncount[256]; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Fastcover.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Fastcover.cs index ed6f0912a..2bd7e30c1 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Fastcover.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Fastcover.cs @@ -20,23 +20,22 @@ private static nuint FASTCOVER_hashPtrToIndex(void* p, uint f, uint d) return ZSTD_hash8Ptr(p, f); } - private static readonly FASTCOVER_accel_t* FASTCOVER_defaultAccelParameters = - GetArrayPointer( - new FASTCOVER_accel_t[11] - { - new FASTCOVER_accel_t(finalize: 100, skip: 0), - new FASTCOVER_accel_t(finalize: 100, skip: 0), - new FASTCOVER_accel_t(finalize: 50, skip: 1), - new FASTCOVER_accel_t(finalize: 34, skip: 2), - new FASTCOVER_accel_t(finalize: 25, skip: 3), - new FASTCOVER_accel_t(finalize: 20, skip: 4), - new FASTCOVER_accel_t(finalize: 17, skip: 5), - new FASTCOVER_accel_t(finalize: 14, skip: 6), - new FASTCOVER_accel_t(finalize: 13, skip: 7), - new FASTCOVER_accel_t(finalize: 11, skip: 8), - new FASTCOVER_accel_t(finalize: 10, skip: 9), - } - ); + private static readonly FASTCOVER_accel_t* FASTCOVER_defaultAccelParameters = GetArrayPointer( + new FASTCOVER_accel_t[11] + { + new FASTCOVER_accel_t(finalize: 100, skip: 0), + new FASTCOVER_accel_t(finalize: 100, skip: 0), + new FASTCOVER_accel_t(finalize: 50, skip: 1), + new FASTCOVER_accel_t(finalize: 34, skip: 2), + new FASTCOVER_accel_t(finalize: 25, skip: 3), + new FASTCOVER_accel_t(finalize: 20, skip: 4), + new FASTCOVER_accel_t(finalize: 17, skip: 5), + new FASTCOVER_accel_t(finalize: 14, skip: 6), + new FASTCOVER_accel_t(finalize: 13, skip: 7), + new FASTCOVER_accel_t(finalize: 11, skip: 8), + new FASTCOVER_accel_t(finalize: 10, skip: 9), + } + ); /*-************************************* * Helper functions @@ -91,11 +90,7 @@ private static COVER_segment_t FASTCOVER_selectSegment( if (activeSegment.end - activeSegment.begin == dmersInK + 1) { /* Get hash value of the dmer to be eliminated from active segment */ - nuint delIndex = FASTCOVER_hashPtrToIndex( - ctx->samples + activeSegment.begin, - f, - d - ); + nuint delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d); segmentFreqs[delIndex] -= 1; if (segmentFreqs[delIndex] == 0) { @@ -403,9 +398,7 @@ private static void FASTCOVER_tryParameters(void* opaque) parameters, segmentFreqs ); - uint nbFinalizeSamples = (uint)( - ctx->nbTrainSamples * ctx->accelParams.finalize / 100 - ); + uint nbFinalizeSamples = (uint)(ctx->nbTrainSamples * ctx->accelParams.finalize / 100); selection = COVER_selectDict( dict + tail, dictBufferCapacity, @@ -544,10 +537,7 @@ ZDICT_fastCover_params_t parameters COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, g_displayLevel); { /* Initialize array to keep track of frequency of dmer within activeSegment */ - ushort* segmentFreqs = (ushort*)calloc( - (ulong)1 << (int)parameters.f, - sizeof(ushort) - ); + ushort* segmentFreqs = (ushort*)calloc((ulong)1 << (int)parameters.f, sizeof(ushort)); nuint tail = FASTCOVER_buildDictionary( &ctx, ctx.freqs, @@ -556,9 +546,7 @@ ZDICT_fastCover_params_t parameters coverParams, segmentFreqs ); - uint nbFinalizeSamples = (uint)( - ctx.nbTrainSamples * ctx.accelParams.finalize / 100 - ); + uint nbFinalizeSamples = (uint)(ctx.nbTrainSamples * ctx.accelParams.finalize / 100); nuint dictionarySize = ZDICT_finalizeDictionary( dict, dictBufferCapacity, @@ -706,9 +694,7 @@ public static nuint ZDICT_optimizeTrainFromBuffer_fastCover( COVER_best_destroy(&best); FASTCOVER_ctx_destroy(&ctx); POOL_free(pool); - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); } data->ctx = &ctx; diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Fse.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Fse.cs index 05e4b30ae..32798dfe3 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Fse.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Fse.cs @@ -51,9 +51,7 @@ uint symbol ushort* stateTable = (ushort*)statePtr.stateTable; uint nbBitsOut = (uint)statePtr.value + symbolTT.deltaNbBits >> 16; BIT_addBits(ref bitC_bitContainer, ref bitC_bitPos, (nuint)statePtr.value, nbBitsOut); - statePtr.value = stateTable[ - (statePtr.value >> (int)nbBitsOut) + symbolTT.deltaFindState - ]; + statePtr.value = stateTable[(statePtr.value >> (int)nbBitsOut) + symbolTT.deltaFindState]; } [MethodImpl(MethodImplOptions.AggressiveInlining)] @@ -105,8 +103,7 @@ uint accuracyLog assert(accuracyLog < 31 - tableLog); { uint tableSize = (uint)(1 << (int)tableLog); - uint deltaFromThreshold = - threshold - (symbolTT[symbolValue].deltaNbBits + tableSize); + uint deltaFromThreshold = threshold - (symbolTT[symbolValue].deltaNbBits + tableSize); /* linear interpolation (very approximate) */ uint normalizedDeltaFromThreshold = deltaFromThreshold << (int)accuracyLog >> (int)tableLog; @@ -118,19 +115,11 @@ uint accuracyLog } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void FSE_initDState( - ref FSE_DState_t DStatePtr, - ref BIT_DStream_t bitD, - uint* dt - ) + private static void FSE_initDState(ref FSE_DState_t DStatePtr, ref BIT_DStream_t bitD, uint* dt) { void* ptr = dt; FSE_DTableHeader* DTableH = (FSE_DTableHeader*)ptr; - DStatePtr.state = BIT_readBits( - bitD.bitContainer, - ref bitD.bitsConsumed, - DTableH->tableLog - ); + DStatePtr.state = BIT_readBits(bitD.bitContainer, ref bitD.bitsConsumed, DTableH->tableLog); BIT_reloadDStream( ref bitD.bitContainer, ref bitD.bitsConsumed, @@ -206,4 +195,4 @@ private static void FSE_initCState(ref FSE_CState_t statePtr, uint* ct) statePtr.symbolTT = ct + 1 + (tableLog != 0 ? 1 << (int)(tableLog - 1) : 1); statePtr.stateLog = tableLog; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FseCompress.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FseCompress.cs index abe85537f..c4c763a1f 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/FseCompress.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FseCompress.cs @@ -37,10 +37,7 @@ nuint wkspSize assert(((nuint)workSpace & 1) == 0); if ( sizeof(uint) - * ( - (maxSymbolValue + 2 + (1UL << (int)tableLog)) / 2 - + sizeof(ulong) / sizeof(uint) - ) + * ((maxSymbolValue + 2 + (1UL << (int)tableLog)) / 2 + sizeof(ulong) / sizeof(uint)) > wkspSize ) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); @@ -151,8 +148,7 @@ nuint wkspSize switch (normalizedCounter[s]) { case 0: - symbolTT[s].deltaNbBits = - (tableLog + 1 << 16) - (uint)(1 << (int)tableLog); + symbolTT[s].deltaNbBits = (tableLog + 1 << 16) - (uint)(1 << (int)tableLog); break; case -1: case 1: @@ -164,16 +160,14 @@ nuint wkspSize default: assert(normalizedCounter[s] > 1); - { - uint maxBitsOut = - tableLog - ZSTD_highbit32((uint)normalizedCounter[s] - 1); - uint minStatePlus = (uint)normalizedCounter[s] << (int)maxBitsOut; - symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus; - symbolTT[s].deltaFindState = (int)( - total - (uint)normalizedCounter[s] - ); - total += (uint)normalizedCounter[s]; - } + { + uint maxBitsOut = + tableLog - ZSTD_highbit32((uint)normalizedCounter[s] - 1); + uint minStatePlus = (uint)normalizedCounter[s] << (int)maxBitsOut; + symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus; + symbolTT[s].deltaFindState = (int)(total - (uint)normalizedCounter[s]); + total += (uint)normalizedCounter[s]; + } break; } @@ -232,9 +226,7 @@ uint writeIsSafe start += 24; bitStream += 0xFFFFU << bitCount; if (writeIsSafe == 0 && @out > oend - 2) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); @out[0] = (byte)bitStream; @out[1] = (byte)(bitStream >> 8); @out += 2; @@ -253,9 +245,7 @@ uint writeIsSafe if (bitCount > 16) { if (writeIsSafe == 0 && @out > oend - 2) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); @out[0] = (byte)bitStream; @out[1] = (byte)(bitStream >> 8); @out += 2; @@ -384,11 +374,7 @@ uint minus dynamically downsize 'tableLog' when conditions are met. It saves CPU time, by using smaller tables, while preserving or even improving compression ratio. @return : recommended tableLog (necessarily <= 'maxTableLog') */ - private static uint FSE_optimalTableLog( - uint maxTableLog, - nuint srcSize, - uint maxSymbolValue - ) + private static uint FSE_optimalTableLog(uint maxTableLog, nuint srcSize, uint maxSymbolValue) { return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2); } @@ -464,7 +450,7 @@ short lowProbCount probably incompressible data (should have already been detected); find max, then give all remaining points to max */ uint maxV = 0, - maxC = 0; + maxC = 0; for (s = 0; s <= maxSymbolValue; s++) if (count[s] > maxC) { @@ -514,13 +500,13 @@ short lowProbCount } #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_rtbTable => - new uint[8] { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 }; - private static uint* rtbTable => - (uint*) - System.Runtime.CompilerServices.Unsafe.AsPointer( - ref MemoryMarshal.GetReference(Span_rtbTable) - ); + private static ReadOnlySpan Span_rtbTable => + new uint[8] { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 }; + private static uint* rtbTable => + (uint*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_rtbTable) + ); #else private static readonly uint* rtbTable = GetArrayPointer( @@ -655,7 +641,7 @@ uint fast BIT_CStream_t bitC; System.Runtime.CompilerServices.Unsafe.SkipInit(out bitC); FSE_CState_t CState1, - CState2; + CState2; System.Runtime.CompilerServices.Unsafe.SkipInit(out CState1); System.Runtime.CompilerServices.Unsafe.SkipInit(out CState2); if (srcSize <= 2) @@ -683,12 +669,7 @@ uint fast bitC_endPtr ); else - BIT_flushBits( - ref bitC_bitContainer, - ref bitC_bitPos, - ref bitC_ptr, - bitC_endPtr - ); + BIT_flushBits(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr); } else { @@ -709,12 +690,7 @@ uint fast bitC_endPtr ); else - BIT_flushBits( - ref bitC_bitContainer, - ref bitC_bitPos, - ref bitC_ptr, - bitC_endPtr - ); + BIT_flushBits(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr); } while (ip > istart) @@ -750,12 +726,7 @@ uint fast bitC_endPtr ); else - BIT_flushBits( - ref bitC_bitContainer, - ref bitC_bitPos, - ref bitC_ptr, - bitC_endPtr - ); + BIT_flushBits(ref bitC_bitContainer, ref bitC_bitPos, ref bitC_ptr, bitC_endPtr); } FSE_flushCState( diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/FseDecompress.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/FseDecompress.cs index e869c5752..12be4ef69 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/FseDecompress.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/FseDecompress.cs @@ -102,7 +102,7 @@ nuint wkspSize uint tableMask = tableSize - 1; uint step = (tableSize >> 1) + (tableSize >> 3) + 3; uint s, - position = 0; + position = 0; for (s = 0; s < maxSV1; s++) { int i; @@ -213,7 +213,7 @@ uint fast bitD_start, bitD_limitPtr ) == BIT_DStream_status.BIT_DStream_unfinished - && op < olimit; + && op < olimit; op += 4 ) { @@ -288,16 +288,8 @@ uint fast { *op++ = fast != 0 - ? FSE_decodeSymbolFast( - ref state2, - bitD_bitContainer, - ref bitD_bitsConsumed - ) - : FSE_decodeSymbol( - ref state2, - bitD_bitContainer, - ref bitD_bitsConsumed - ); + ? FSE_decodeSymbolFast(ref state2, bitD_bitContainer, ref bitD_bitsConsumed) + : FSE_decodeSymbol(ref state2, bitD_bitContainer, ref bitD_bitsConsumed); break; } @@ -319,16 +311,8 @@ ref bitD_bitsConsumed { *op++ = fast != 0 - ? FSE_decodeSymbolFast( - ref state1, - bitD_bitContainer, - ref bitD_bitsConsumed - ) - : FSE_decodeSymbol( - ref state1, - bitD_bitContainer, - ref bitD_bitsConsumed - ); + ? FSE_decodeSymbolFast(ref state1, bitD_bitContainer, ref bitD_bitsConsumed) + : FSE_decodeSymbol(ref state1, bitD_bitContainer, ref bitD_bitsConsumed); break; } } @@ -394,15 +378,13 @@ int bmi2 return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge)); assert( (nuint)(sizeof(FSE_DecompressWksp) + (1 + (1 << (int)tableLog)) * sizeof(uint)) - <= wkspSize + <= wkspSize ); workSpace = (byte*)workSpace + sizeof(FSE_DecompressWksp) + (1 + (1 << (int)tableLog)) * sizeof(uint); - wkspSize -= (nuint)( - sizeof(FSE_DecompressWksp) + (1 + (1 << (int)tableLog)) * sizeof(uint) - ); + wkspSize -= (nuint)(sizeof(FSE_DecompressWksp) + (1 + (1 << (int)tableLog)) * sizeof(uint)); { nuint _var_err__ = FSE_buildDTable_internal( dtable, @@ -429,14 +411,7 @@ int bmi2 dtable, 1 ); - return FSE_decompress_usingDTable_generic( - dst, - dstCapacity, - ip, - cSrcSize, - dtable, - 0 - ); + return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, dtable, 0); } } @@ -484,4 +459,4 @@ int bmi2 wkspSize ); } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HIST_checkInput_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HIST_checkInput_e.cs index c3a8a82b7..8aca6e8fe 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HIST_checkInput_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HIST_checkInput_e.cs @@ -4,4 +4,4 @@ public enum HIST_checkInput_e { trustInput, checkMaxSymbolValue, -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CompressWeightsWksp.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CompressWeightsWksp.cs index 2ee9b03ac..789f4e7f4 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CompressWeightsWksp.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_CompressWeightsWksp.cs @@ -6,4 +6,4 @@ public unsafe struct HUF_CompressWeightsWksp public fixed uint scratchBuffer[41]; public fixed uint count[13]; public fixed short norm[13]; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DecompressFastArgs.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DecompressFastArgs.cs index ca170534a..c9b8d2d5c 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DecompressFastArgs.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_DecompressFastArgs.cs @@ -46,4 +46,4 @@ public unsafe struct _iend_e__FixedBuffer public byte* e2; public byte* e3; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_ReadDTableX1_Workspace.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_ReadDTableX1_Workspace.cs index 5ba94164c..529a3e5e3 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_ReadDTableX1_Workspace.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_ReadDTableX1_Workspace.cs @@ -7,4 +7,4 @@ public unsafe struct HUF_ReadDTableX1_Workspace public fixed uint statsWksp[219]; public fixed byte symbols[256]; public fixed byte huffWeight[256]; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_ReadDTableX2_Workspace.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_ReadDTableX2_Workspace.cs index 9d188e9b4..a85bbb6c1 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_ReadDTableX2_Workspace.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_ReadDTableX2_Workspace.cs @@ -12,11 +12,11 @@ public unsafe struct HUF_ReadDTableX2_Workspace public fixed uint calleeWksp[219]; #if NET8_0_OR_GREATER - [InlineArray(12)] - public unsafe struct _rankVal_e__FixedBuffer - { - public rankValCol_t e0; - } + [InlineArray(12)] + public unsafe struct _rankVal_e__FixedBuffer + { + public rankValCol_t e0; + } #else public unsafe struct _rankVal_e__FixedBuffer @@ -37,11 +37,11 @@ public unsafe struct _rankVal_e__FixedBuffer #endif #if NET8_0_OR_GREATER - [InlineArray(256)] - public unsafe struct _sortedSymbol_e__FixedBuffer - { - public sortedSymbol_t e0; - } + [InlineArray(256)] + public unsafe struct _sortedSymbol_e__FixedBuffer + { + public sortedSymbol_t e0; + } #else public unsafe struct _sortedSymbol_e__FixedBuffer diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_buildCTable_wksp_tables.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_buildCTable_wksp_tables.cs index 0fb8400a0..4fe92db70 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_buildCTable_wksp_tables.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_buildCTable_wksp_tables.cs @@ -8,11 +8,11 @@ public struct HUF_buildCTable_wksp_tables public _rankPosition_e__FixedBuffer rankPosition; #if NET8_0_OR_GREATER - [InlineArray(512)] - public unsafe struct _huffNodeTbl_e__FixedBuffer - { - public nodeElt_s e0; - } + [InlineArray(512)] + public unsafe struct _huffNodeTbl_e__FixedBuffer + { + public nodeElt_s e0; + } #else public unsafe struct _huffNodeTbl_e__FixedBuffer @@ -533,11 +533,11 @@ public unsafe struct _huffNodeTbl_e__FixedBuffer #endif #if NET8_0_OR_GREATER - [InlineArray(192)] - public unsafe struct _rankPosition_e__FixedBuffer - { - public rankPos e0; - } + [InlineArray(192)] + public unsafe struct _rankPosition_e__FixedBuffer + { + public rankPos e0; + } #else public unsafe struct _rankPosition_e__FixedBuffer @@ -736,4 +736,4 @@ public unsafe struct _rankPosition_e__FixedBuffer public rankPos e191; } #endif -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_compress_tables_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_compress_tables_t.cs index 4d8fde232..6e6e12c09 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_compress_tables_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HUF_compress_tables_t.cs @@ -9,11 +9,11 @@ public unsafe struct HUF_compress_tables_t public _wksps_e__Union wksps; #if NET8_0_OR_GREATER - [InlineArray(257)] - public unsafe struct _CTable_e__FixedBuffer - { - public nuint e0; - } + [InlineArray(257)] + public unsafe struct _CTable_e__FixedBuffer + { + public nuint e0; + } #else public unsafe struct _CTable_e__FixedBuffer diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Hist.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Hist.cs index 7f6a6bb1a..10a433ce0 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Hist.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Hist.cs @@ -157,9 +157,7 @@ private static nuint HIST_count_parallel_wksp( while (Counting1[maxSymbolValue] == 0) maxSymbolValue--; if (check != default && maxSymbolValue > *maxSymbolValuePtr) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooSmall) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooSmall)); *maxSymbolValuePtr = maxSymbolValue; memmove(count, Counting1, countSize); } @@ -260,12 +258,7 @@ nuint sourceSize * or an error code, which can be tested using HIST_isError(). * note : if return == srcSize, there is only one symbol. */ - private static nuint HIST_count( - uint* count, - uint* maxSymbolValuePtr, - void* src, - nuint srcSize - ) + private static nuint HIST_count(uint* count, uint* maxSymbolValuePtr, void* src, nuint srcSize) { uint* tmpCounters = stackalloc uint[1024]; return HIST_count_wksp( diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HufCompress.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HufCompress.cs index 9d00fb3e2..785c81437 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HufCompress.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HufCompress.cs @@ -5,11 +5,7 @@ namespace SharpCompress.Compressors.ZStandard.Unsafe; public static unsafe partial class Methods { - private static void* HUF_alignUpWorkspace( - void* workspace, - nuint* workspaceSizePtr, - nuint align - ) + private static void* HUF_alignUpWorkspace(void* workspace, nuint* workspaceSizePtr, nuint align) { nuint mask = align - 1; nuint rem = (nuint)workspace & mask; @@ -56,12 +52,7 @@ nuint workspaceSize return 0; { /* never fails */ - uint maxCount = HIST_count_simple( - wksp->count, - &maxSymbolValue, - weightTable, - wtSize - ); + uint maxCount = HIST_count_simple(wksp->count, &maxSymbolValue, weightTable, wtSize); if (maxCount == wtSize) return 1; if (maxCount == 1) @@ -289,7 +280,7 @@ private static nuint HUF_readCTable( HUF_writeCTableHeader(CTable, tableLog, *maxSymbolValuePtr); { uint n, - nextRankStart = 0; + nextRankStart = 0; for (n = 1; n <= tableLog; n++) { uint curr = nextRankStart; @@ -377,11 +368,7 @@ private static uint HUF_getNbBitsFromCTable(nuint* CTable, uint symbolValue) * respect targetNbBits. * @return The maximum number of bits of the Huffman tree after adjustment. */ - private static uint HUF_setMaxHeight( - nodeElt_s* huffNode, - uint lastNonNull, - uint targetNbBits - ) + private static uint HUF_setMaxHeight(nodeElt_s* huffNode, uint lastNonNull, uint targetNbBits) { uint largestBits = huffNode[lastNonNull].nbBits; if (largestBits <= targetNbBits) @@ -392,9 +379,7 @@ uint targetNbBits int n = (int)lastNonNull; while (huffNode[n].nbBits > targetNbBits) { - totalCost += (int)( - baseCost - (uint)(1 << (int)(largestBits - huffNode[n].nbBits)) - ); + totalCost += (int)(baseCost - (uint)(1 << (int)(largestBits - huffNode[n].nbBits))); huffNode[n].nbBits = (byte)targetNbBits; n--; } @@ -753,11 +738,7 @@ nuint wkspSize ) { HUF_buildCTable_wksp_tables* wksp_tables = - (HUF_buildCTable_wksp_tables*)HUF_alignUpWorkspace( - workSpace, - &wkspSize, - sizeof(uint) - ); + (HUF_buildCTable_wksp_tables*)HUF_alignUpWorkspace(workSpace, &wkspSize, sizeof(uint)); nodeElt_s* huffNode0 = &wksp_tables->huffNodeTbl.e0; nodeElt_s* huffNode = huffNode0 + 1; int nonNullRank; @@ -777,11 +758,7 @@ nuint wkspSize return maxNbBits; } - private static nuint HUF_estimateCompressedSize( - nuint* CTable, - uint* count, - uint maxSymbolValue - ) + private static nuint HUF_estimateCompressedSize(nuint* CTable, uint* count, uint maxSymbolValue) { nuint* ct = CTable + 1; nuint nbBits = 0; @@ -820,11 +797,7 @@ private static nuint HUF_compressBound(nuint size) * Initializes the bitstream. * @returns 0 or an error code. */ - private static nuint HUF_initCStream( - ref HUF_CStream_t bitC, - void* startPtr, - nuint dstCapacity - ) + private static nuint HUF_initCStream(ref HUF_CStream_t bitC, void* startPtr, nuint dstCapacity) { bitC = new HUF_CStream_t { @@ -937,13 +910,7 @@ private static nuint HUF_endMark() private static nuint HUF_closeCStream(ref HUF_CStream_t bitC) { HUF_addBits(ref bitC.bitContainer.e0, ref bitC.bitPos.e0, HUF_endMark(), 0); - HUF_flushBits( - ref bitC.bitContainer.e0, - ref bitC.bitPos.e0, - ref bitC.ptr, - bitC.endPtr, - 0 - ); + HUF_flushBits(ref bitC.bitContainer.e0, ref bitC.bitPos.e0, ref bitC.ptr, bitC.endPtr, 0); { nuint nbBits = bitC.bitPos.e0 & 0xFF; if (bitC.ptr >= bitC.endPtr) @@ -1006,13 +973,7 @@ int kLastFast int u; for (u = 1; u < kUnroll; ++u) { - HUF_encodeSymbol( - ref bitC_bitContainer_e0, - ref bitC_bitPos_e0, - ip[n - u], - ct, - 1 - ); + HUF_encodeSymbol(ref bitC_bitContainer_e0, ref bitC_bitPos_e0, ip[n - u], ct, 1); } HUF_encodeSymbol( @@ -1039,13 +1000,7 @@ int kLastFast int u; for (u = 1; u < kUnroll; ++u) { - HUF_encodeSymbol( - ref bitC_bitContainer_e0, - ref bitC_bitPos_e0, - ip[n - u], - ct, - 1 - ); + HUF_encodeSymbol(ref bitC_bitContainer_e0, ref bitC_bitPos_e0, ip[n - u], ct, 1); } HUF_encodeSymbol( @@ -1511,12 +1466,12 @@ int flags byte* dst = (byte*)workSpace + sizeof(HUF_WriteCTableWksp); nuint dstSize = wkspSize - (nuint)sizeof(HUF_WriteCTableWksp); nuint hSize, - newSize; + newSize; uint symbolCardinality = HUF_cardinality(count, maxSymbolValue); uint minTableLog = HUF_minTableLog(symbolCardinality); nuint optSize = unchecked((nuint)~0) - 1; uint optLog = maxTableLog, - optLogGuess; + optLogGuess; for (optLogGuess = minTableLog; optLogGuess <= maxTableLog; optLogGuess++) { { @@ -1623,10 +1578,7 @@ int flags ); } - if ( - (flags & (int)HUF_flags_e.HUF_flags_suspectUncompressible) != 0 - && srcSize >= 4096 * 10 - ) + if ((flags & (int)HUF_flags_e.HUF_flags_suspectUncompressible) != 0 && srcSize >= 4096 * 10) { nuint largestTotal = 0; { diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/HufDecompress.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/HufDecompress.cs index 5da4f66d7..262787084 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/HufDecompress.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/HufDecompress.cs @@ -369,8 +369,7 @@ uint dtLog if (MEM_32bits) while ( - BIT_reloadDStream(bitDPtr) == BIT_DStream_status.BIT_DStream_unfinished - && p < pEnd + BIT_reloadDStream(bitDPtr) == BIT_DStream_status.BIT_DStream_unfinished && p < pEnd ) *p++ = HUF_decodeSymbolX1(bitDPtr, dt, dtLog); while (p < pEnd) @@ -513,23 +512,19 @@ private static nuint HUF_decompress4X1_usingDTable_internal_body( *op3++ = HUF_decodeSymbolX1(&bitD3, dt, dtLog); *op4++ = HUF_decodeSymbolX1(&bitD4, dt, dtLog); endSignal &= - BIT_reloadDStreamFast(&bitD1) - == BIT_DStream_status.BIT_DStream_unfinished + BIT_reloadDStreamFast(&bitD1) == BIT_DStream_status.BIT_DStream_unfinished ? 1U : 0U; endSignal &= - BIT_reloadDStreamFast(&bitD2) - == BIT_DStream_status.BIT_DStream_unfinished + BIT_reloadDStreamFast(&bitD2) == BIT_DStream_status.BIT_DStream_unfinished ? 1U : 0U; endSignal &= - BIT_reloadDStreamFast(&bitD3) - == BIT_DStream_status.BIT_DStream_unfinished + BIT_reloadDStreamFast(&bitD3) == BIT_DStream_status.BIT_DStream_unfinished ? 1U : 0U; endSignal &= - BIT_reloadDStreamFast(&bitD4) - == BIT_DStream_status.BIT_DStream_unfinished + BIT_reloadDStreamFast(&bitD4) == BIT_DStream_status.BIT_DStream_unfinished ? 1U : 0U; } @@ -552,9 +547,7 @@ private static nuint HUF_decompress4X1_usingDTable_internal_body( & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); if (endCheck == 0) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); } return dstSize; @@ -569,13 +562,7 @@ private static nuint HUF_decompress4X1_usingDTable_internal_default( uint* DTable ) { - return HUF_decompress4X1_usingDTable_internal_body( - dst, - dstSize, - cSrc, - cSrcSize, - DTable - ); + return HUF_decompress4X1_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable); } private static void HUF_decompress4X1_usingDTable_internal_fast_c_loop( @@ -583,17 +570,17 @@ private static void HUF_decompress4X1_usingDTable_internal_fast_c_loop( ) { ulong bits0, - bits1, - bits2, - bits3; + bits1, + bits2, + bits3; byte* ip0, - ip1, - ip2, - ip3; + ip1, + ip2, + ip3; byte* op0, - op1, - op2, - op3; + op1, + op2, + op3; ushort* dtable = (ushort*)args->dt; byte* oend = args->oend; byte* ilowest = args->ilowest; @@ -907,14 +894,7 @@ private static nuint HUF_decompress4X1_usingDTable_internal_fast( byte* oend = ZSTD_maybeNullPtrAdd((byte*)dst, (nint)dstSize); HUF_DecompressFastArgs args; { - nuint ret = HUF_DecompressFastArgs_init( - &args, - dst, - dstSize, - cSrc, - cSrcSize, - DTable - ); + nuint ret = HUF_DecompressFastArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable); { nuint err_code = ret; if (ERR_isError(err_code)) @@ -964,9 +944,7 @@ private static nuint HUF_decompress4X1_usingDTable_internal_fast( 11 ); if ((&args.op.e0)[i] != segmentEnd) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); } } @@ -983,13 +961,7 @@ private static nuint HUF_decompress1X1_usingDTable_internal( int flags ) { - return HUF_decompress1X1_usingDTable_internal_body( - dst, - dstSize, - cSrc, - cSrcSize, - DTable - ); + return HUF_decompress1X1_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable); } private static nuint HUF_decompress4X1_usingDTable_internal( @@ -1217,16 +1189,16 @@ ushort baseSeq memcpy(DTable + 2, &DEltX2, sizeof(ulong)); break; default: - { - int i; - for (i = 0; i < skipSize; i += 8) { - memcpy(DTable + i + 0, &DEltX2, sizeof(ulong)); - memcpy(DTable + i + 2, &DEltX2, sizeof(ulong)); - memcpy(DTable + i + 4, &DEltX2, sizeof(ulong)); - memcpy(DTable + i + 6, &DEltX2, sizeof(ulong)); + int i; + for (i = 0; i < skipSize; i += 8) + { + memcpy(DTable + i + 0, &DEltX2, sizeof(ulong)); + memcpy(DTable + i + 2, &DEltX2, sizeof(ulong)); + memcpy(DTable + i + 4, &DEltX2, sizeof(ulong)); + memcpy(DTable + i + 6, &DEltX2, sizeof(ulong)); + } } - } break; } @@ -1326,8 +1298,8 @@ int flags ) { uint tableLog, - maxW, - nbSymbols; + maxW, + nbSymbols; DTableDesc dtd = HUF_getDTableDesc(DTable); uint maxTableLog = dtd.maxTableLog; nuint iSize; @@ -1365,7 +1337,7 @@ int flags { uint w, - nextRankStart = 0; + nextRankStart = 0; for (w = 1; w < maxW + 1; w++) { uint curr = nextRankStart; @@ -1667,13 +1639,11 @@ private static nuint HUF_decompress4X2_usingDTable_internal_body( op2 += HUF_decodeSymbolX2(op2, &bitD2, dt, dtLog); op2 += HUF_decodeSymbolX2(op2, &bitD2, dt, dtLog); endSignal &= - BIT_reloadDStreamFast(&bitD1) - == BIT_DStream_status.BIT_DStream_unfinished + BIT_reloadDStreamFast(&bitD1) == BIT_DStream_status.BIT_DStream_unfinished ? 1U : 0U; endSignal &= - BIT_reloadDStreamFast(&bitD2) - == BIT_DStream_status.BIT_DStream_unfinished + BIT_reloadDStreamFast(&bitD2) == BIT_DStream_status.BIT_DStream_unfinished ? 1U : 0U; if (MEM_64bits) @@ -1689,13 +1659,11 @@ private static nuint HUF_decompress4X2_usingDTable_internal_body( op4 += HUF_decodeSymbolX2(op4, &bitD4, dt, dtLog); op4 += HUF_decodeSymbolX2(op4, &bitD4, dt, dtLog); endSignal &= - BIT_reloadDStreamFast(&bitD3) - == BIT_DStream_status.BIT_DStream_unfinished + BIT_reloadDStreamFast(&bitD3) == BIT_DStream_status.BIT_DStream_unfinished ? 1U : 0U; endSignal &= - BIT_reloadDStreamFast(&bitD4) - == BIT_DStream_status.BIT_DStream_unfinished + BIT_reloadDStreamFast(&bitD4) == BIT_DStream_status.BIT_DStream_unfinished ? 1U : 0U; } @@ -1718,9 +1686,7 @@ private static nuint HUF_decompress4X2_usingDTable_internal_body( & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); if (endCheck == 0) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); } return dstSize; @@ -1735,13 +1701,7 @@ private static nuint HUF_decompress4X2_usingDTable_internal_default( uint* DTable ) { - return HUF_decompress4X2_usingDTable_internal_body( - dst, - dstSize, - cSrc, - cSrcSize, - DTable - ); + return HUF_decompress4X2_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable); } private static void HUF_decompress4X2_usingDTable_internal_fast_c_loop( @@ -1749,21 +1709,21 @@ private static void HUF_decompress4X2_usingDTable_internal_fast_c_loop( ) { ulong bits0, - bits1, - bits2, - bits3; + bits1, + bits2, + bits3; byte* ip0, - ip1, - ip2, - ip3; + ip1, + ip2, + ip3; byte* op0, - op1, - op2, - op3; + op1, + op2, + op3; byte* oend0, - oend1, - oend2, - oend3; + oend1, + oend2, + oend3; HUF_DEltX2* dtable = (HUF_DEltX2*)args->dt; byte* ilowest = args->ilowest; bits0 = args->bits[0]; @@ -2126,14 +2086,7 @@ private static nuint HUF_decompress4X2_usingDTable_internal_fast( byte* oend = ZSTD_maybeNullPtrAdd((byte*)dst, (nint)dstSize); HUF_DecompressFastArgs args; { - nuint ret = HUF_DecompressFastArgs_init( - &args, - dst, - dstSize, - cSrc, - cSrcSize, - DTable - ); + nuint ret = HUF_DecompressFastArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable); { nuint err_code = ret; if (ERR_isError(err_code)) @@ -2182,9 +2135,7 @@ private static nuint HUF_decompress4X2_usingDTable_internal_fast( 11 ); if ((&args.op.e0)[i] != segmentEnd) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); } } @@ -2238,13 +2189,7 @@ private static nuint HUF_decompress1X2_usingDTable_internal( int flags ) { - return HUF_decompress1X2_usingDTable_internal_body( - dst, - dstSize, - cSrc, - cSrcSize, - DTable - ); + return HUF_decompress1X2_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable); } private static nuint HUF_decompress1X2_DCtx_wksp( @@ -2462,14 +2407,7 @@ int flags { DTableDesc dtd = HUF_getDTableDesc(DTable); return dtd.tableType != 0 - ? HUF_decompress1X2_usingDTable_internal( - dst, - maxDstSize, - cSrc, - cSrcSize, - DTable, - flags - ) + ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags) : HUF_decompress1X1_usingDTable_internal( dst, maxDstSize, @@ -2513,14 +2451,7 @@ int flags { DTableDesc dtd = HUF_getDTableDesc(DTable); return dtd.tableType != 0 - ? HUF_decompress4X2_usingDTable_internal( - dst, - maxDstSize, - cSrc, - cSrcSize, - DTable, - flags - ) + ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags) : HUF_decompress4X1_usingDTable_internal( dst, maxDstSize, diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Mem.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Mem.cs index aa18dfc2e..673c7c95f 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Mem.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Mem.cs @@ -138,7 +138,7 @@ private static nuint MEM_readLEST(void* memPtr) if (!BitConverter.IsLittleEndian) { #if NET8_0_OR_GREATER - val = BinaryPrimitives.ReverseEndianness(val); + val = BinaryPrimitives.ReverseEndianness(val); #else val = ReverseEndiannessNative(val); #endif @@ -152,7 +152,7 @@ private static void MEM_writeLEST(void* memPtr, nuint val) if (!BitConverter.IsLittleEndian) { #if NET8_0_OR_GREATER - val = BinaryPrimitives.ReverseEndianness(val); + val = BinaryPrimitives.ReverseEndianness(val); #else val = ReverseEndiannessNative(val); #endif diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/RSyncState_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/RSyncState_t.cs index 90f9dd50c..7b5f08ad9 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/RSyncState_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/RSyncState_t.cs @@ -5,4 +5,4 @@ public struct RSyncState_t public ulong hash; public ulong hitMask; public ulong primePower; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/RawSeqStore_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/RawSeqStore_t.cs index b9e5d8a35..07db05a17 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/RawSeqStore_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/RawSeqStore_t.cs @@ -18,13 +18,7 @@ public unsafe struct RawSeqStore_t /* The capacity starting from `seq` pointer */ public nuint capacity; - public RawSeqStore_t( - rawSeq* seq, - nuint pos, - nuint posInSequence, - nuint size, - nuint capacity - ) + public RawSeqStore_t(rawSeq* seq, nuint pos, nuint posInSequence, nuint size, nuint capacity) { this.seq = seq; this.pos = pos; diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/RoundBuff_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/RoundBuff_t.cs index ea0ed8a55..84e42ca41 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/RoundBuff_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/RoundBuff_t.cs @@ -25,4 +25,4 @@ public RoundBuff_t(byte* buffer, nuint capacity, nuint pos) this.capacity = capacity; this.pos = pos; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqDef_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqDef_s.cs index 11c4aeba0..1e5aac57e 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqDef_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/SeqDef_s.cs @@ -11,4 +11,4 @@ public struct SeqDef_s /* mlBase == matchLength - MINMATCH */ public ushort mlBase; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH32_canonical_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH32_canonical_t.cs index ecb030731..7da74bdf9 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH32_canonical_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/XXH32_canonical_t.cs @@ -7,4 +7,4 @@ public unsafe struct XXH32_canonical_t { /*!< Hash bytes, big endian */ public fixed byte digest[4]; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Xxhash.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Xxhash.cs index 396a720dd..ab8d814b5 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Xxhash.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Xxhash.cs @@ -164,12 +164,7 @@ private static uint XXH32_finalize(uint hash, byte* ptr, nuint len, XXH_alignmen * @param align Whether @p input is aligned. * @return The calculated hash. */ - private static uint XXH32_endian_align( - byte* input, - nuint len, - uint seed, - XXH_alignment align - ) + private static uint XXH32_endian_align(byte* input, nuint len, uint seed, XXH_alignment align) { uint h32; if (len >= 16) @@ -324,12 +319,7 @@ private static uint ZSTD_XXH32_digest(XXH32_state_s* state) } h32 += state->total_len_32; - return XXH32_finalize( - h32, - (byte*)state->mem32, - state->memsize, - XXH_alignment.XXH_aligned - ); + return XXH32_finalize(h32, (byte*)state->mem32, state->memsize, XXH_alignment.XXH_aligned); } /*! @ingroup XXH32_family */ @@ -426,9 +416,7 @@ private static ulong XXH64_finalize(ulong hash, byte* ptr, nuint len, XXH_alignm ulong k1 = XXH64_round(0, XXH_readLE64_align(ptr, align)); ptr += 8; hash ^= k1; - hash = - BitOperations.RotateLeft(hash, 27) * 0x9E3779B185EBCA87UL - + 0x85EBCA77C2B2AE63UL; + hash = BitOperations.RotateLeft(hash, 27) * 0x9E3779B185EBCA87UL + 0x85EBCA77C2B2AE63UL; len -= 8; } @@ -436,9 +424,7 @@ private static ulong XXH64_finalize(ulong hash, byte* ptr, nuint len, XXH_alignm { hash ^= XXH_readLE32_align(ptr, align) * 0x9E3779B185EBCA87UL; ptr += 4; - hash = - BitOperations.RotateLeft(hash, 23) * 0xC2B2AE3D27D4EB4FUL - + 0x165667B19E3779F9UL; + hash = BitOperations.RotateLeft(hash, 23) * 0xC2B2AE3D27D4EB4FUL + 0x165667B19E3779F9UL; len -= 4; } @@ -460,12 +446,7 @@ private static ulong XXH64_finalize(ulong hash, byte* ptr, nuint len, XXH_alignm * @param align Whether @p input is aligned. * @return The calculated hash. */ - private static ulong XXH64_endian_align( - byte* input, - nuint len, - ulong seed, - XXH_alignment align - ) + private static ulong XXH64_endian_align(byte* input, nuint len, ulong seed, XXH_alignment align) { ulong h64; if (len >= 32) diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_cover_params_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_cover_params_t.cs index 39811dfd4..dfc4fecf2 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_cover_params_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZDICT_cover_params_t.cs @@ -27,4 +27,4 @@ public struct ZDICT_cover_params_t /* Sets shrinkDictMaxRegression so that a smaller dictionary can be at worse shrinkDictMaxRegression% worse than the max dict size dictionary. */ public uint shrinkDictMaxRegression; public ZDICT_params_t zParams; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_bufferPool_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_bufferPool_s.cs index 1b9d33b46..0c64573a3 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_bufferPool_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTDMT_bufferPool_s.cs @@ -8,4 +8,4 @@ public unsafe struct ZSTDMT_bufferPool_s public uint nbBuffers; public ZSTD_customMem cMem; public buffer_s* buffers; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_CDict_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_CDict_s.cs index 39d2bd171..02642d2a0 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_CDict_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_CDict_s.cs @@ -27,4 +27,4 @@ public unsafe struct ZSTD_CDict_s * the same greedy/lazy matchfinder at compression time. */ public ZSTD_paramSwitch_e useRowMatchFinder; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_DDictHashSet.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_DDictHashSet.cs index 567c503c6..c083994be 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_DDictHashSet.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_DDictHashSet.cs @@ -6,4 +6,4 @@ public unsafe struct ZSTD_DDictHashSet public ZSTD_DDict_s** ddictPtrTable; public nuint ddictPtrTableSize; public nuint ddictPtrCount; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_DDict_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_DDict_s.cs index 50477dc45..33e86e9f0 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_DDict_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_DDict_s.cs @@ -12,4 +12,4 @@ public unsafe struct ZSTD_DDict_s public uint dictID; public uint entropyPresent; public ZSTD_customMem cMem; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_DefaultPolicy_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_DefaultPolicy_e.cs index ed5224e85..ac4ad573e 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_DefaultPolicy_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_DefaultPolicy_e.cs @@ -4,4 +4,4 @@ public enum ZSTD_DefaultPolicy_e { ZSTD_defaultDisallowed = 0, ZSTD_defaultAllowed = 1, -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_ErrorCode.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_ErrorCode.cs index 506f10fec..9e5de435a 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_ErrorCode.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_ErrorCode.cs @@ -56,4 +56,4 @@ public enum ZSTD_ErrorCode /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */ ZSTD_error_maxCode = 120, -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_OptPrice_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_OptPrice_e.cs index bde76eaeb..229a5da89 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_OptPrice_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_OptPrice_e.cs @@ -4,4 +4,4 @@ public enum ZSTD_OptPrice_e { zop_dynamic = 0, zop_predef, -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_SequencePosition.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_SequencePosition.cs index 3b2d2d6f9..db0ec511e 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_SequencePosition.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_SequencePosition.cs @@ -10,4 +10,4 @@ public struct ZSTD_SequencePosition /* Number of bytes given by sequences provided so far */ public nuint posInSrc; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_buffered_policy_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_buffered_policy_e.cs index 3ecc07f5b..8d5a1a917 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_buffered_policy_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_buffered_policy_e.cs @@ -9,4 +9,4 @@ public enum ZSTD_buffered_policy_e { ZSTDb_not_buffered, ZSTDb_buffered, -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cStreamStage.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cStreamStage.cs index 3c1343c49..c929b7fac 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cStreamStage.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cStreamStage.cs @@ -5,4 +5,4 @@ public enum ZSTD_cStreamStage zcss_init = 0, zcss_load, zcss_flush, -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compResetPolicy_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compResetPolicy_e.cs index 36fb340df..d943004d0 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compResetPolicy_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compResetPolicy_e.cs @@ -11,4 +11,4 @@ public enum ZSTD_compResetPolicy_e { ZSTDcrp_makeClean, ZSTDcrp_leaveDirty, -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressionStage_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressionStage_e.cs index 4f31af03b..9ffa4833c 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressionStage_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_compressionStage_e.cs @@ -9,4 +9,4 @@ public enum ZSTD_compressionStage_e ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending, -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_customMem.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_customMem.cs index 21654b295..16776a740 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_customMem.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_customMem.cs @@ -12,4 +12,4 @@ public ZSTD_customMem(void* customAlloc, void* customFree, void* opaque) this.customFree = customFree; this.opaque = opaque; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp_static_alloc_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp_static_alloc_e.cs index 82f30a259..8f290f3a5 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp_static_alloc_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_cwksp_static_alloc_e.cs @@ -9,4 +9,4 @@ public enum ZSTD_cwksp_static_alloc_e { ZSTD_cwksp_dynamic_alloc, ZSTD_cwksp_static_alloc, -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dStreamStage.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dStreamStage.cs index 7cc247e16..851e06f7b 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dStreamStage.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dStreamStage.cs @@ -7,4 +7,4 @@ public enum ZSTD_dStreamStage zdss_read, zdss_load, zdss_flush, -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictContentType_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictContentType_e.cs index 2b821ecf7..3b97b12df 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictContentType_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictContentType_e.cs @@ -10,4 +10,4 @@ public enum ZSTD_dictContentType_e /* refuses to load a dictionary if it does not respect Zstandard's specification, starting with ZSTD_MAGIC_DICTIONARY */ ZSTD_dct_fullDict = 2, -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictUses_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictUses_e.cs index e7f2d2ef0..314507c50 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictUses_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_dictUses_e.cs @@ -10,4 +10,4 @@ public enum ZSTD_dictUses_e /* Use the dictionary once and set to ZSTD_dont_use */ ZSTD_use_once = 1, -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyDTables_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyDTables_t.cs index 84a487a7d..91dd9d7d3 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyDTables_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_entropyDTables_t.cs @@ -19,11 +19,11 @@ public unsafe struct ZSTD_entropyDTables_t public fixed uint workspace[157]; #if NET8_0_OR_GREATER - [InlineArray(513)] - public unsafe struct _LLTable_e__FixedBuffer - { - public ZSTD_seqSymbol e0; - } + [InlineArray(513)] + public unsafe struct _LLTable_e__FixedBuffer + { + public ZSTD_seqSymbol e0; + } #else public unsafe struct _LLTable_e__FixedBuffer @@ -545,11 +545,11 @@ public unsafe struct _LLTable_e__FixedBuffer #endif #if NET8_0_OR_GREATER - [InlineArray(257)] - public unsafe struct _OFTable_e__FixedBuffer - { - public ZSTD_seqSymbol e0; - } + [InlineArray(257)] + public unsafe struct _OFTable_e__FixedBuffer + { + public ZSTD_seqSymbol e0; + } #else public unsafe struct _OFTable_e__FixedBuffer @@ -815,11 +815,11 @@ public unsafe struct _OFTable_e__FixedBuffer #endif #if NET8_0_OR_GREATER - [InlineArray(513)] - public unsafe struct _MLTable_e__FixedBuffer - { - public ZSTD_seqSymbol e0; - } + [InlineArray(513)] + public unsafe struct _MLTable_e__FixedBuffer + { + public ZSTD_seqSymbol e0; + } #else public unsafe struct _MLTable_e__FixedBuffer @@ -1339,4 +1339,4 @@ public unsafe struct _MLTable_e__FixedBuffer public ZSTD_seqSymbol e512; } #endif -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_forceIgnoreChecksum_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_forceIgnoreChecksum_e.cs index f42886222..a83226218 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_forceIgnoreChecksum_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_forceIgnoreChecksum_e.cs @@ -5,4 +5,4 @@ public enum ZSTD_forceIgnoreChecksum_e /* Note: this enum controls ZSTD_d_forceIgnoreChecksum */ ZSTD_d_validateChecksum = 0, ZSTD_d_ignoreChecksum = 1, -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameHeader.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameHeader.cs index afa709f0d..8c80f7592 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameHeader.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameHeader.cs @@ -18,4 +18,4 @@ public struct ZSTD_frameHeader public uint checksumFlag; public uint _reserved1; public uint _reserved2; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameProgression.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameProgression.cs index 68a380fc8..adf34b4df 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameProgression.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameProgression.cs @@ -19,4 +19,4 @@ public struct ZSTD_frameProgression /* MT only : nb of workers actively compressing at probe time */ public uint nbActiveWorkers; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameType_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameType_e.cs index 80e6e8391..a399748fa 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameType_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_frameType_e.cs @@ -4,4 +4,4 @@ public enum ZSTD_frameType_e { ZSTD_frame, ZSTD_skippableFrame, -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseCTables_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseCTables_t.cs index 0fca629fd..3a9d621b3 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseCTables_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_fseCTables_t.cs @@ -8,4 +8,4 @@ public unsafe struct ZSTD_fseCTables_t public FSE_repeat offcode_repeatMode; public FSE_repeat matchlength_repeatMode; public FSE_repeat litlength_repeatMode; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_hufCTables_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_hufCTables_t.cs index 7ba93122f..bedd8c8bf 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_hufCTables_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_hufCTables_t.cs @@ -8,11 +8,11 @@ public unsafe struct ZSTD_hufCTables_t public HUF_repeat repeatMode; #if NET8_0_OR_GREATER - [InlineArray(257)] - public unsafe struct _CTable_e__FixedBuffer - { - public nuint e0; - } + [InlineArray(257)] + public unsafe struct _CTable_e__FixedBuffer + { + public nuint e0; + } #else public unsafe struct _CTable_e__FixedBuffer diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_inBuffer_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_inBuffer_s.cs index 14c4f2c5a..58242a2c5 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_inBuffer_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_inBuffer_s.cs @@ -13,4 +13,4 @@ public unsafe struct ZSTD_inBuffer_s /**< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size */ public nuint pos; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_indexResetPolicy_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_indexResetPolicy_e.cs index f461d52df..b3638aa07 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_indexResetPolicy_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_indexResetPolicy_e.cs @@ -9,4 +9,4 @@ public enum ZSTD_indexResetPolicy_e { ZSTDirp_continue, ZSTDirp_reset, -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_localDict.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_localDict.cs index 3ffa6739c..4b9704519 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_localDict.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_localDict.cs @@ -7,4 +7,4 @@ public unsafe struct ZSTD_localDict public nuint dictSize; public ZSTD_dictContentType_e dictContentType; public ZSTD_CDict_s* cdict; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_longLengthType_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_longLengthType_e.cs index 1994b34d1..141643f29 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_longLengthType_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_longLengthType_e.cs @@ -11,4 +11,4 @@ public enum ZSTD_longLengthType_e /* represents a long match */ ZSTD_llt_matchLength = 2, -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_match_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_match_t.cs index bc953181f..fa44b9ec5 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_match_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_match_t.cs @@ -10,4 +10,4 @@ public struct ZSTD_match_t /* Raw length of match */ public uint len; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_outBuffer_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_outBuffer_s.cs index 38dc21717..c2cc00895 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_outBuffer_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_outBuffer_s.cs @@ -10,4 +10,4 @@ public unsafe struct ZSTD_outBuffer_s /**< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size */ public nuint pos; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_parameters.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_parameters.cs index eefcdcb47..ebdf19696 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_parameters.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_parameters.cs @@ -4,4 +4,4 @@ public struct ZSTD_parameters { public ZSTD_compressionParameters cParams; public ZSTD_frameParameters fParams; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_resetTarget_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_resetTarget_e.cs index 0da6810a7..679e8b281 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_resetTarget_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_resetTarget_e.cs @@ -4,4 +4,4 @@ public enum ZSTD_resetTarget_e { ZSTD_resetTarget_CDict, ZSTD_resetTarget_CCtx, -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_seqSymbol.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_seqSymbol.cs index d088f2454..622ff4fcd 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_seqSymbol.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_seqSymbol.cs @@ -14,4 +14,4 @@ public ZSTD_seqSymbol(ushort nextState, byte nbAdditionalBits, byte nbBits, uint this.nbBits = nbBits; this.baseValue = baseValue; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_sequenceFormat_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_sequenceFormat_e.cs index 606e7be2c..3b6ca5258 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_sequenceFormat_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_sequenceFormat_e.cs @@ -7,4 +7,4 @@ public enum ZSTD_sequenceFormat_e /* ZSTD_Sequence[] contains explicit block delimiters */ ZSTD_sf_explicitBlockDelimiters = 1, -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_tableFillPurpose_e.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_tableFillPurpose_e.cs index 3f58f6ad6..f218de33b 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_tableFillPurpose_e.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZSTD_tableFillPurpose_e.cs @@ -4,4 +4,4 @@ public enum ZSTD_tableFillPurpose_e { ZSTD_tfp_forCCtx, ZSTD_tfp_forCDict, -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/Zdict.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/Zdict.cs index 77c11d81a..a79aa0082 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/Zdict.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/Zdict.cs @@ -172,13 +172,13 @@ uint notificationLevel }; ZSTD_parameters @params; uint u, - huffLog = 11, - Offlog = 8, - mlLog = 9, - llLog = 9, - total; + huffLog = 11, + Offlog = 8, + mlLog = 9, + llLog = 9, + total; nuint pos = 0, - errorCode; + errorCode; nuint eSize = 0; nuint totalSrcSize = ZDICT_totalSampleSize(fileSizes, nbFiles); nuint averageSampleSize = totalSrcSize / (nbFiles + (uint)(nbFiles == 0 ? 1 : 0)); @@ -186,9 +186,7 @@ uint notificationLevel uint* wksp = stackalloc uint[1216]; if (offcodeMax > 30) { - eSize = unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionaryCreation_failed) - ); + eSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionaryCreation_failed)); goto _cleanup; } @@ -285,14 +283,7 @@ uint notificationLevel total = 0; for (u = 0; u <= offcodeMax; u++) total += offcodeCount[u]; - errorCode = FSE_normalizeCount( - offcodeNCount, - Offlog, - offcodeCount, - total, - offcodeMax, - 1 - ); + errorCode = FSE_normalizeCount(offcodeNCount, Offlog, offcodeCount, total, offcodeMax, 1); if (ERR_isError(errorCode)) { eSize = errorCode; @@ -303,14 +294,7 @@ uint notificationLevel total = 0; for (u = 0; u <= 52; u++) total += matchLengthCount[u]; - errorCode = FSE_normalizeCount( - matchLengthNCount, - mlLog, - matchLengthCount, - total, - 52, - 1 - ); + errorCode = FSE_normalizeCount(matchLengthNCount, mlLog, matchLengthCount, total, 52, 1); if (ERR_isError(errorCode)) { eSize = errorCode; @@ -653,4 +637,4 @@ uint nbSamples @params ); } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompress.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompress.cs index a10b181e2..8198034b7 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompress.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompress.cs @@ -26,8 +26,8 @@ public static nuint ZSTD_compressBound(nuint srcSize) srcSize >= (sizeof(nuint) == 8 ? 0xFF00FF00FF00FF00UL : 0xFF00FF00U) ? 0 : srcSize - + (srcSize >> 8) - + (srcSize < 128 << 10 ? (128 << 10) - srcSize >> 11 : 0); + + (srcSize >> 8) + + (srcSize < 128 << 10 ? (128 << 10) - srcSize >> 11 : 0); if (r == 0) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong)); return r; @@ -50,10 +50,7 @@ private static void ZSTD_initCCtx(ZSTD_CCtx_s* cctx, ZSTD_customMem memManager) public static ZSTD_CCtx_s* ZSTD_createCCtx_advanced(ZSTD_customMem customMem) { - if ( - ((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) - != 0 - ) + if (((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) != 0) return null; { ZSTD_CCtx_s* cctx = (ZSTD_CCtx_s*)ZSTD_customMalloc( @@ -205,9 +202,9 @@ public static nuint ZSTD_sizeof_CCtx(ZSTD_CCtx_s* cctx) if (cctx == null) return 0; return (nuint)(cctx->workspace.workspace == cctx ? 0 : sizeof(ZSTD_CCtx_s)) - + ZSTD_cwksp_sizeof(&cctx->workspace) - + ZSTD_sizeof_localDict(cctx->localDict) - + ZSTD_sizeof_mtctx(cctx); + + ZSTD_cwksp_sizeof(&cctx->workspace) + + ZSTD_sizeof_localDict(cctx->localDict) + + ZSTD_sizeof_mtctx(cctx); } public static nuint ZSTD_sizeof_CStream(ZSTD_CCtx_s* zcs) @@ -236,10 +233,9 @@ private static int ZSTD_rowMatchFinderUsed(ZSTD_strategy strategy, ZSTD_paramSwi { assert(mode != ZSTD_paramSwitch_e.ZSTD_ps_auto); return - ZSTD_rowMatchFinderSupported(strategy) != 0 - && mode == ZSTD_paramSwitch_e.ZSTD_ps_enable - ? 1 - : 0; + ZSTD_rowMatchFinderSupported(strategy) != 0 && mode == ZSTD_paramSwitch_e.ZSTD_ps_enable + ? 1 + : 0; } /* Returns row matchfinder usage given an initial mode and cParams */ @@ -282,9 +278,9 @@ uint forDDSDict return forDDSDict != 0 || strategy != ZSTD_strategy.ZSTD_fast - && ZSTD_rowMatchFinderUsed(strategy, useRowMatchFinder) == 0 - ? 1 - : 0; + && ZSTD_rowMatchFinderUsed(strategy, useRowMatchFinder) == 0 + ? 1 + : 0; } /* Returns ZSTD_ps_enable if compression parameters are such that we should @@ -345,8 +341,8 @@ private static int ZSTD_CDictIndicesAreTagged(ZSTD_compressionParameters* cParam return cParams->strategy == ZSTD_strategy.ZSTD_fast || cParams->strategy == ZSTD_strategy.ZSTD_dfast - ? 1 - : 0; + ? 1 + : 0; } private static ZSTD_CCtx_params_s ZSTD_makeCCtxParamsFromCParams( @@ -390,10 +386,7 @@ ZSTD_compressionParameters cParams private static ZSTD_CCtx_params_s* ZSTD_createCCtxParams_advanced(ZSTD_customMem customMem) { ZSTD_CCtx_params_s* @params; - if ( - ((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) - != 0 - ) + if (((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) != 0) return null; @params = (ZSTD_CCtx_params_s*)ZSTD_customCalloc( (nuint)sizeof(ZSTD_CCtx_params_s), @@ -454,10 +447,7 @@ public static nuint ZSTD_CCtxParams_reset(ZSTD_CCtx_params_s* @params) * Initializes the compression parameters of cctxParams according to * compression level. All other parameters are reset to their default values. */ - public static nuint ZSTD_CCtxParams_init( - ZSTD_CCtx_params_s* cctxParams, - int compressionLevel - ) + public static nuint ZSTD_CCtxParams_init(ZSTD_CCtx_params_s* cctxParams, int compressionLevel) { if (cctxParams == null) { @@ -812,11 +802,7 @@ private static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param) * new parameters will be active for next job only (after a flush()). * @return : an error code (which can be tested using ZSTD_isError()). */ - public static nuint ZSTD_CCtx_setParameter( - ZSTD_CCtx_s* cctx, - ZSTD_cParameter param, - int value - ) + public static nuint ZSTD_CCtx_setParameter(ZSTD_CCtx_s* cctx, ZSTD_cParameter param, int value) { if (cctx->streamStage != ZSTD_cStreamStage.zcss_init) { @@ -881,9 +867,7 @@ int value case ZSTD_cParameter.ZSTD_c_experimentalParam19: break; default: - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported)); } return ZSTD_CCtxParams_setParameter(&cctx->requestedParams, param, value); @@ -906,14 +890,9 @@ int value switch (param) { case ZSTD_cParameter.ZSTD_c_experimentalParam2: - if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam2, value) - == 0 - ) + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam2, value) == 0) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } CCtxParams->format = (ZSTD_format_e)value; @@ -995,9 +974,7 @@ int value case ZSTD_cParameter.ZSTD_c_targetLength: if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_targetLength, value) == 0) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } CCtxParams->cParams.targetLength = (uint)value; @@ -1029,15 +1006,11 @@ int value { ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value; if ( - ZSTD_cParam_withinBounds( - ZSTD_cParameter.ZSTD_c_experimentalParam4, - (int)pref - ) == 0 + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam4, (int)pref) + == 0 ) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } CCtxParams->attachDictPref = pref; @@ -1048,15 +1021,11 @@ int value { ZSTD_paramSwitch_e lcm = (ZSTD_paramSwitch_e)value; if ( - ZSTD_cParam_withinBounds( - ZSTD_cParameter.ZSTD_c_experimentalParam5, - (int)lcm - ) == 0 + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam5, (int)lcm) + == 0 ) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } CCtxParams->literalCompressionMode = lcm; @@ -1064,13 +1033,13 @@ int value } case ZSTD_cParameter.ZSTD_c_nbWorkers: - { - nuint err_code = ZSTD_cParam_clampBounds(param, &value); - if (ERR_isError(err_code)) { - return err_code; + nuint err_code = ZSTD_cParam_clampBounds(param, &value); + if (ERR_isError(err_code)) + { + return err_code; + } } - } CCtxParams->nbWorkers = value; return (nuint)CCtxParams->nbWorkers; @@ -1078,42 +1047,42 @@ int value if (value != 0 && value < 512 * (1 << 10)) value = 512 * (1 << 10); - { - nuint err_code = ZSTD_cParam_clampBounds(param, &value); - if (ERR_isError(err_code)) { - return err_code; + nuint err_code = ZSTD_cParam_clampBounds(param, &value); + if (ERR_isError(err_code)) + { + return err_code; + } } - } assert(value >= 0); CCtxParams->jobSize = (nuint)value; return CCtxParams->jobSize; case ZSTD_cParameter.ZSTD_c_overlapLog: - { - nuint err_code = ZSTD_cParam_clampBounds( - ZSTD_cParameter.ZSTD_c_overlapLog, - &value - ); - if (ERR_isError(err_code)) { - return err_code; + nuint err_code = ZSTD_cParam_clampBounds( + ZSTD_cParameter.ZSTD_c_overlapLog, + &value + ); + if (ERR_isError(err_code)) + { + return err_code; + } } - } CCtxParams->overlapLog = value; return (nuint)CCtxParams->overlapLog; case ZSTD_cParameter.ZSTD_c_experimentalParam1: - { - nuint err_code = ZSTD_cParam_clampBounds( - ZSTD_cParameter.ZSTD_c_overlapLog, - &value - ); - if (ERR_isError(err_code)) { - return err_code; + nuint err_code = ZSTD_cParam_clampBounds( + ZSTD_cParameter.ZSTD_c_overlapLog, + &value + ); + if (ERR_isError(err_code)) + { + return err_code; + } } - } CCtxParams->rsyncable = value; return (nuint)CCtxParams->rsyncable; @@ -1128,9 +1097,7 @@ int value ) == 0 ) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } CCtxParams->ldmParams.enableLdm = (ZSTD_paramSwitch_e)value; @@ -1148,9 +1115,7 @@ int value return CCtxParams->ldmParams.hashLog; case ZSTD_cParameter.ZSTD_c_ldmMinMatch: if (value != 0) - if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_ldmMinMatch, value) == 0 - ) + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_ldmMinMatch, value) == 0) { return unchecked( (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) @@ -1175,10 +1140,7 @@ int value return CCtxParams->ldmParams.bucketSizeLog; case ZSTD_cParameter.ZSTD_c_ldmHashRateLog: if (value != 0) - if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_ldmHashRateLog, value) - == 0 - ) + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_ldmHashRateLog, value) == 0) { return unchecked( (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) @@ -1207,10 +1169,8 @@ int value case ZSTD_cParameter.ZSTD_c_experimentalParam7: if (value != 0) if ( - ZSTD_cParam_withinBounds( - ZSTD_cParameter.ZSTD_c_experimentalParam7, - value - ) == 0 + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam7, value) + == 0 ) { return unchecked( @@ -1221,131 +1181,99 @@ int value CCtxParams->srcSizeHint = value; return (nuint)CCtxParams->srcSizeHint; case ZSTD_cParameter.ZSTD_c_experimentalParam9: - if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam9, value) - == 0 - ) + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam9, value) == 0) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } CCtxParams->inBufferMode = (ZSTD_bufferMode_e)value; return (nuint)CCtxParams->inBufferMode; case ZSTD_cParameter.ZSTD_c_experimentalParam10: if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam10, value) - == 0 + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam10, value) == 0 ) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } CCtxParams->outBufferMode = (ZSTD_bufferMode_e)value; return (nuint)CCtxParams->outBufferMode; case ZSTD_cParameter.ZSTD_c_experimentalParam11: if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam11, value) - == 0 + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam11, value) == 0 ) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } CCtxParams->blockDelimiters = (ZSTD_sequenceFormat_e)value; return (nuint)CCtxParams->blockDelimiters; case ZSTD_cParameter.ZSTD_c_experimentalParam12: if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam12, value) - == 0 + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam12, value) == 0 ) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } CCtxParams->validateSequences = value; return (nuint)CCtxParams->validateSequences; case ZSTD_cParameter.ZSTD_c_experimentalParam13: if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam13, value) - == 0 + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam13, value) == 0 ) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } CCtxParams->postBlockSplitter = (ZSTD_paramSwitch_e)value; return (nuint)CCtxParams->postBlockSplitter; case ZSTD_cParameter.ZSTD_c_experimentalParam20: if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam20, value) - == 0 + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam20, value) == 0 ) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } CCtxParams->preBlockSplitter_level = value; return (nuint)CCtxParams->preBlockSplitter_level; case ZSTD_cParameter.ZSTD_c_experimentalParam14: if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam14, value) - == 0 + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam14, value) == 0 ) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } CCtxParams->useRowMatchFinder = (ZSTD_paramSwitch_e)value; return (nuint)CCtxParams->useRowMatchFinder; case ZSTD_cParameter.ZSTD_c_experimentalParam15: if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam15, value) - == 0 + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam15, value) == 0 ) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } CCtxParams->deterministicRefPrefix = !(value == 0) ? 1 : 0; return (nuint)CCtxParams->deterministicRefPrefix; case ZSTD_cParameter.ZSTD_c_experimentalParam16: if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam16, value) - == 0 + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam16, value) == 0 ) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } CCtxParams->prefetchCDictTables = (ZSTD_paramSwitch_e)value; return (nuint)CCtxParams->prefetchCDictTables; case ZSTD_cParameter.ZSTD_c_experimentalParam17: if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam17, value) - == 0 + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam17, value) == 0 ) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } CCtxParams->enableMatchFinderFallback = value; @@ -1353,10 +1281,8 @@ int value case ZSTD_cParameter.ZSTD_c_experimentalParam18: if (value != 0) if ( - ZSTD_cParam_withinBounds( - ZSTD_cParameter.ZSTD_c_experimentalParam18, - value - ) == 0 + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam18, value) + == 0 ) { return unchecked( @@ -1369,21 +1295,16 @@ int value return CCtxParams->maxBlockSize; case ZSTD_cParameter.ZSTD_c_experimentalParam19: if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam19, value) - == 0 + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_experimentalParam19, value) == 0 ) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } CCtxParams->searchForExternalRepcodes = (ZSTD_paramSwitch_e)value; return (nuint)CCtxParams->searchForExternalRepcodes; default: - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported)); } } @@ -1392,11 +1313,7 @@ int value * and store it into int* value. * @return : 0, or an error code (which can be tested with ZSTD_isError()). */ - public static nuint ZSTD_CCtx_getParameter( - ZSTD_CCtx_s* cctx, - ZSTD_cParameter param, - int* value - ) + public static nuint ZSTD_CCtx_getParameter(ZSTD_CCtx_s* cctx, ZSTD_cParameter param, int* value) { return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value); } @@ -1533,9 +1450,7 @@ public static nuint ZSTD_CCtxParams_getParameter( *value = (int)CCtxParams->searchForExternalRepcodes; break; default: - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported)); } return 0; @@ -1574,10 +1489,7 @@ public static nuint ZSTD_CCtx_setParametersUsingCCtxParams( * @return 0 on success, or an error code (can be checked with ZSTD_isError()). * On failure, no parameters are updated. */ - public static nuint ZSTD_CCtx_setCParams( - ZSTD_CCtx_s* cctx, - ZSTD_compressionParameters cparams - ) + public static nuint ZSTD_CCtx_setCParams(ZSTD_CCtx_s* cctx, ZSTD_compressionParameters cparams) { { /* only update if all parameters are valid */ @@ -2049,18 +1961,12 @@ control CParam values remain within authorized range. @return : 0, or an error code if one value is beyond authorized range */ public static nuint ZSTD_checkCParams(ZSTD_compressionParameters cParams) { - if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_windowLog, (int)cParams.windowLog) - == 0 - ) + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_windowLog, (int)cParams.windowLog) == 0) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } - if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_chainLog, (int)cParams.chainLog) - == 0 - ) + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_chainLog, (int)cParams.chainLog) == 0) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } @@ -2070,36 +1976,25 @@ public static nuint ZSTD_checkCParams(ZSTD_compressionParameters cParams) return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } - if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_searchLog, (int)cParams.searchLog) - == 0 - ) + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_searchLog, (int)cParams.searchLog) == 0) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } - if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_minMatch, (int)cParams.minMatch) - == 0 - ) + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_minMatch, (int)cParams.minMatch) == 0) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } if ( - ZSTD_cParam_withinBounds( - ZSTD_cParameter.ZSTD_c_targetLength, - (int)cParams.targetLength - ) == 0 + ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_targetLength, (int)cParams.targetLength) + == 0 ) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } - if ( - ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_strategy, (int)cParams.strategy) - == 0 - ) + if (ZSTD_cParam_withinBounds(ZSTD_cParameter.ZSTD_c_strategy, (int)cParams.strategy) == 0) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound)); } @@ -2110,9 +2005,7 @@ public static nuint ZSTD_checkCParams(ZSTD_compressionParameters cParams) /** ZSTD_clampCParams() : * make CParam values within valid range. * @return : valid CParams */ - private static ZSTD_compressionParameters ZSTD_clampCParams( - ZSTD_compressionParameters cParams - ) + private static ZSTD_compressionParameters ZSTD_clampCParams(ZSTD_compressionParameters cParams) { { ZSTD_bounds bounds = ZSTD_cParam_getBounds(ZSTD_cParameter.ZSTD_c_windowLog); @@ -2421,25 +2314,20 @@ uint forCCtx nuint h3Size = hashLog3 != 0 ? (nuint)1 << (int)hashLog3 : 0; /* We don't use ZSTD_cwksp_alloc_size() here because the tables aren't * surrounded by redzones in ASAN. */ - nuint tableSpace = - chainSize * sizeof(uint) + hSize * sizeof(uint) + h3Size * sizeof(uint); + nuint tableSpace = chainSize * sizeof(uint) + hSize * sizeof(uint) + h3Size * sizeof(uint); nuint optPotentialSpace = ZSTD_cwksp_aligned64_alloc_size((52 + 1) * sizeof(uint)) + ZSTD_cwksp_aligned64_alloc_size((35 + 1) * sizeof(uint)) + ZSTD_cwksp_aligned64_alloc_size((31 + 1) * sizeof(uint)) + ZSTD_cwksp_aligned64_alloc_size((1 << 8) * sizeof(uint)) + ZSTD_cwksp_aligned64_alloc_size((nuint)(((1 << 12) + 3) * sizeof(ZSTD_match_t))) - + ZSTD_cwksp_aligned64_alloc_size( - (nuint)(((1 << 12) + 3) * sizeof(ZSTD_optimal_t)) - ); + + ZSTD_cwksp_aligned64_alloc_size((nuint)(((1 << 12) + 3) * sizeof(ZSTD_optimal_t))); nuint lazyAdditionalSpace = ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder) != 0 ? ZSTD_cwksp_aligned64_alloc_size(hSize) : 0; nuint optSpace = - forCCtx != 0 && cParams->strategy >= ZSTD_strategy.ZSTD_btopt - ? optPotentialSpace - : 0; + forCCtx != 0 && cParams->strategy >= ZSTD_strategy.ZSTD_btopt ? optPotentialSpace : 0; nuint slackSpace = ZSTD_cwksp_slack_space_required(); assert(useRowMatchFinder != ZSTD_paramSwitch_e.ZSTD_ps_auto); return tableSpace + optSpace + slackSpace + lazyAdditionalSpace; @@ -2494,15 +2382,12 @@ nuint maxBlockSize ldmParams->enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable ? ZSTD_cwksp_aligned64_alloc_size(maxNbLdmSeq * (nuint)sizeof(rawSeq)) : 0; - nuint bufferSpace = - ZSTD_cwksp_alloc_size(buffInSize) + ZSTD_cwksp_alloc_size(buffOutSize); + nuint bufferSpace = ZSTD_cwksp_alloc_size(buffInSize) + ZSTD_cwksp_alloc_size(buffOutSize); nuint cctxSpace = isStatic != 0 ? ZSTD_cwksp_alloc_size((nuint)sizeof(ZSTD_CCtx_s)) : 0; nuint maxNbExternalSeq = ZSTD_sequenceBound(blockSize); nuint externalSeqSpace = useSequenceProducer != 0 - ? ZSTD_cwksp_aligned64_alloc_size( - maxNbExternalSeq * (nuint)sizeof(ZSTD_Sequence) - ) + ? ZSTD_cwksp_aligned64_alloc_size(maxNbExternalSeq * (nuint)sizeof(ZSTD_Sequence)) : 0; nuint neededSpace = cctxSpace @@ -2568,13 +2453,13 @@ public static nuint ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameter } #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_srcSizeTiers => - new ulong[4] { 16 * (1 << 10), 128 * (1 << 10), 256 * (1 << 10), unchecked(0UL - 1) }; - private static ulong* srcSizeTiers => - (ulong*) - System.Runtime.CompilerServices.Unsafe.AsPointer( - ref MemoryMarshal.GetReference(Span_srcSizeTiers) - ); + private static ReadOnlySpan Span_srcSizeTiers => + new ulong[4] { 16 * (1 << 10), 128 * (1 << 10), 256 * (1 << 10), unchecked(0UL - 1) }; + private static ulong* srcSizeTiers => + (ulong*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_srcSizeTiers) + ); #else private static readonly ulong* srcSizeTiers = GetArrayPointer( @@ -2671,8 +2556,7 @@ public static nuint ZSTD_estimateCStreamSize_usingCCtxParams(ZSTD_CCtx_params_s* ZSTD_CParamMode_e.ZSTD_cpm_noAttachDict ); nuint blockSize = - ZSTD_resolveMaxBlockSize(@params->maxBlockSize) - < (nuint)1 << (int)cParams.windowLog + ZSTD_resolveMaxBlockSize(@params->maxBlockSize) < (nuint)1 << (int)cParams.windowLog ? ZSTD_resolveMaxBlockSize(@params->maxBlockSize) : (nuint)1 << (int)cParams.windowLog; nuint inBuffSize = @@ -2701,9 +2585,7 @@ public static nuint ZSTD_estimateCStreamSize_usingCCtxParams(ZSTD_CCtx_params_s* } } - public static nuint ZSTD_estimateCStreamSize_usingCParams( - ZSTD_compressionParameters cParams - ) + public static nuint ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams) { ZSTD_CCtx_params_s initialParams = ZSTD_makeCCtxParamsFromCParams(cParams); if (ZSTD_rowMatchFinderSupported(cParams.strategy) != 0) @@ -2884,8 +2766,7 @@ ZSTD_resetTarget_e forWho ZSTD_allocateChainTable( cParams->strategy, useRowMatchFinder, - ms->dedicatedDictSearch != 0 - && forWho == ZSTD_resetTarget_e.ZSTD_resetTarget_CDict + ms->dedicatedDictSearch != 0 && forWho == ZSTD_resetTarget_e.ZSTD_resetTarget_CDict ? 1U : 0U ) != 0 @@ -2964,10 +2845,7 @@ ZSTD_resetTarget_e forWho ws, (52 + 1) * sizeof(uint) ); - ms->opt.offCodeFreq = (uint*)ZSTD_cwksp_reserve_aligned64( - ws, - (31 + 1) * sizeof(uint) - ); + ms->opt.offCodeFreq = (uint*)ZSTD_cwksp_reserve_aligned64(ws, (31 + 1) * sizeof(uint)); ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned64( ws, (nuint)(((1 << 12) + 3) * sizeof(ZSTD_match_t)) @@ -2992,8 +2870,8 @@ private static int ZSTD_indexTooCloseToMax(ZSTD_window_t w) return (nuint)(w.nextSrc - w.@base) > (MEM_64bits ? 3500U * (1 << 20) : 2000U * (1 << 20)) - 16 * (1 << 20) - ? 1 - : 0; + ? 1 + : 0; } /** ZSTD_dictTooBig(): @@ -3006,8 +2884,8 @@ private static int ZSTD_dictTooBig(nuint loadedDictSize) return loadedDictSize > unchecked((uint)-1) - (MEM_64bits ? 3500U * (1 << 20) : 2000U * (1 << 20)) - ? 1 - : 0; + ? 1 + : 0; } /*! ZSTD_resetCCtx_internal() : @@ -3307,10 +3185,10 @@ ulong pledgedSrcSize || pledgedSrcSize == unchecked(0UL - 1) || @params->attachDictPref == ZSTD_dictAttachPref_e.ZSTD_dictForceAttach ) - && @params->attachDictPref != ZSTD_dictAttachPref_e.ZSTD_dictForceCopy - && @params->forceWindow == 0 - ? 1 - : 0; + && @params->attachDictPref != ZSTD_dictAttachPref_e.ZSTD_dictForceCopy + && @params->forceWindow == 0 + ? 1 + : 0; } private static nuint ZSTD_resetCCtx_byAttachingCDict( @@ -3373,9 +3251,9 @@ ZSTD_buffered_policy_e zbuff } cctx->blockState.matchState.loadedDictEnd = cctx->blockState - .matchState - .window - .dictLimit; + .matchState + .window + .dictLimit; } } @@ -3455,8 +3333,7 @@ ZSTD_buffered_policy_e zbuff { /* DDS guaranteed disabled */ nuint chainSize = - ZSTD_allocateChainTable(cdict_cParams->strategy, cdict->useRowMatchFinder, 0) - != 0 + ZSTD_allocateChainTable(cdict_cParams->strategy, cdict->useRowMatchFinder, 0) != 0 ? (nuint)1 << (int)cdict_cParams->chainLog : 0; nuint hSize = (nuint)1 << (int)cdict_cParams->hashLog; @@ -3534,13 +3411,7 @@ ZSTD_buffered_policy_e zbuff { if (ZSTD_shouldAttachDict(cdict, @params, pledgedSrcSize) != 0) { - return ZSTD_resetCCtx_byAttachingCDict( - cctx, - cdict, - *@params, - pledgedSrcSize, - zbuff - ); + return ZSTD_resetCCtx_byAttachingCDict(cctx, cdict, *@params, pledgedSrcSize, zbuff); } else { @@ -3574,9 +3445,7 @@ ZSTD_buffered_policy_e zbuff @params.cParams = srcCCtx->appliedParams.cParams; assert(srcCCtx->appliedParams.useRowMatchFinder != ZSTD_paramSwitch_e.ZSTD_ps_auto); assert(srcCCtx->appliedParams.postBlockSplitter != ZSTD_paramSwitch_e.ZSTD_ps_auto); - assert( - srcCCtx->appliedParams.ldmParams.enableLdm != ZSTD_paramSwitch_e.ZSTD_ps_auto - ); + assert(srcCCtx->appliedParams.ldmParams.enableLdm != ZSTD_paramSwitch_e.ZSTD_ps_auto); @params.useRowMatchFinder = srcCCtx->appliedParams.useRowMatchFinder; @params.postBlockSplitter = srcCCtx->appliedParams.postBlockSplitter; @params.ldmParams = srcCCtx->appliedParams.ldmParams; @@ -3591,23 +3460,19 @@ ZSTD_buffered_policy_e zbuff zbuff ); assert( - dstCCtx->appliedParams.cParams.windowLog - == srcCCtx->appliedParams.cParams.windowLog + dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog ); assert( - dstCCtx->appliedParams.cParams.strategy - == srcCCtx->appliedParams.cParams.strategy + dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy ); assert( dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog ); assert( - dstCCtx->appliedParams.cParams.chainLog - == srcCCtx->appliedParams.cParams.chainLog + dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog ); assert( - dstCCtx->blockState.matchState.hashLog3 - == srcCCtx->blockState.matchState.hashLog3 + dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3 ); } @@ -3889,7 +3754,7 @@ nuint entropyWkspSize ); assert( SymbolEncodingType_e.set_basic < SymbolEncodingType_e.set_compressed - && SymbolEncodingType_e.set_rle < SymbolEncodingType_e.set_compressed + && SymbolEncodingType_e.set_rle < SymbolEncodingType_e.set_compressed ); assert( !( @@ -4098,8 +3963,7 @@ int bmi2 { nuint numSequences = (nuint)(seqStorePtr->sequences - seqStorePtr->sequencesStart); /* Base suspicion of uncompressibility on ratio of literals to sequences */ - int suspectUncompressible = - numSequences == 0 || litSize / numSequences >= 20 ? 1 : 0; + int suspectUncompressible = numSequences == 0 || litSize / numSequences >= 20 ? 1 : 0; nuint cSize = ZSTD_compressLiterals( op, dstCapacity, @@ -4471,9 +4335,7 @@ nuint srcSize if (nbExternalSeqs == outSeqsCapacity) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_sequenceProducer_failed) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_sequenceProducer_failed)); } outSeqs[nbExternalSeqs] = new ZSTD_Sequence(); @@ -4490,8 +4352,8 @@ nuint srcSize private static nuint ZSTD_fastSequenceLengthSum(ZSTD_Sequence* seqBuf, nuint seqBufSize) { nuint matchLenSum, - litLenSum, - i; + litLenSum, + i; matchLenSum = 0; litLenSum = 0; for (i = 0; i < seqBufSize; i++) @@ -4549,11 +4411,7 @@ private static nuint ZSTD_buildSeqStore(ZSTD_CCtx_s* zc, void* src, nuint srcSiz if (curr > ms->nextToUpdate + 384) ms->nextToUpdate = curr - - ( - 192 < curr - ms->nextToUpdate - 384 - ? 192 - : curr - ms->nextToUpdate - 384 - ); + - (192 < curr - ms->nextToUpdate - 384 ? 192 : curr - ms->nextToUpdate - 384); } { @@ -4567,15 +4425,11 @@ private static nuint ZSTD_buildSeqStore(ZSTD_CCtx_s* zc, void* src, nuint srcSiz if (zc->externSeqStore.pos < zc->externSeqStore.size) { - assert( - zc->appliedParams.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_disable - ); + assert(zc->appliedParams.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_disable); if (ZSTD_hasExtSeqProd(&zc->appliedParams) != 0) { return unchecked( - (nuint)( - -(int)ZSTD_ErrorCode.ZSTD_error_parameter_combination_unsupported - ) + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_combination_unsupported) ); } @@ -4596,9 +4450,7 @@ private static nuint ZSTD_buildSeqStore(ZSTD_CCtx_s* zc, void* src, nuint srcSiz if (ZSTD_hasExtSeqProd(&zc->appliedParams) != 0) { return unchecked( - (nuint)( - -(int)ZSTD_ErrorCode.ZSTD_error_parameter_combination_unsupported - ) + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_combination_unsupported) ); } @@ -4648,7 +4500,7 @@ private static nuint ZSTD_buildSeqStore(ZSTD_CCtx_s* zc, void* src, nuint srcSiz int, nuint, nuint>) - zc->appliedParams.extSeqProdFunc + zc->appliedParams.extSeqProdFunc )( zc->appliedParams.extSeqProdState, zc->extSeqBuf, @@ -4681,9 +4533,7 @@ private static nuint ZSTD_buildSeqStore(ZSTD_CCtx_s* zc, void* src, nuint srcSiz if (seqLenSum > srcSize) { return unchecked( - (nuint)( - -(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid - ) + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) ); } @@ -4916,9 +4766,7 @@ nuint srcSize if (targetCBlockSize != 0) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported)); } } @@ -4938,9 +4786,7 @@ nuint srcSize if (nbWorkers != 0) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported)); } } @@ -5102,9 +4948,7 @@ int hufFlags } { - nuint minLitSize = (nuint)( - prevHuf->repeatMode == HUF_repeat.HUF_repeat_valid ? 6 : 63 - ); + nuint minLitSize = (nuint)(prevHuf->repeatMode == HUF_repeat.HUF_repeat_valid ? 6 : 63); if (srcSize <= minLitSize) { hufMetadata->hType = SymbolEncodingType_e.set_basic; @@ -5204,10 +5048,7 @@ int hufFlags countWksp, maxSymbolValue ); - if ( - oldCSize < srcSize - && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize) - ) + if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) { memcpy(nextHuf, prevHuf, (uint)sizeof(ZSTD_hufCTables_t)); hufMetadata->hType = SymbolEncodingType_e.set_repeat; @@ -5322,8 +5163,7 @@ nuint wkspSize ) { nuint litSize = (nuint)(seqStorePtr->lit - seqStorePtr->litStart); - int huf_useOptDepth = - cctxParams->cParams.strategy >= ZSTD_strategy.ZSTD_btultra ? 1 : 0; + int huf_useOptDepth = cctxParams->cParams.strategy >= ZSTD_strategy.ZSTD_btultra ? 1 : 0; int hufFlags = huf_useOptDepth != 0 ? (int)HUF_flags_e.HUF_flags_optimalDepth : 0; entropyMetadata->hufMetadata.hufDesSize = ZSTD_buildBlockEntropyStats_literals( seqStorePtr->litStart, @@ -5899,11 +5739,9 @@ uint isPartition } if ( - zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode - == FSE_repeat.FSE_repeat_valid + zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat.FSE_repeat_valid ) - zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = - FSE_repeat.FSE_repeat_check; + zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat.FSE_repeat_check; return cSize; } @@ -6284,11 +6122,9 @@ uint frame } if ( - zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode - == FSE_repeat.FSE_repeat_valid + zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat.FSE_repeat_valid ) - zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = - FSE_repeat.FSE_repeat_check; + zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat.FSE_repeat_check; return cSize; } @@ -6322,10 +6158,7 @@ uint lastBlock srcSize, lastBlock ); - if ( - cSize - != unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)) - ) + if (cSize != unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall))) { nuint maxCSize = srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy); @@ -6386,11 +6219,9 @@ uint lastBlock } if ( - zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode - == FSE_repeat.FSE_repeat_valid + zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat.FSE_repeat_valid ) - zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = - FSE_repeat.FSE_repeat_check; + zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat.FSE_repeat_check; return cSize; } @@ -6429,13 +6260,13 @@ private static void ZSTD_overflowCorrectIfNeeded( } #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_splitLevels => - new int[10] { 0, 0, 1, 2, 2, 3, 3, 4, 4, 4 }; - private static int* splitLevels => - (int*) - System.Runtime.CompilerServices.Unsafe.AsPointer( - ref MemoryMarshal.GetReference(Span_splitLevels) - ); + private static ReadOnlySpan Span_splitLevels => + new int[10] { 0, 0, 1, 2, 2, 3, 3, 4, 4, 4 }; + private static int* splitLevels => + (int*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_splitLevels) + ); #else private static readonly int* splitLevels = GetArrayPointer( @@ -6627,11 +6458,11 @@ uint lastFrameChunk uint cBlockHeader = cSize == 1 ? lastBlock - + ((uint)blockType_e.bt_rle << 1) - + (uint)(blockSize << 3) + + ((uint)blockType_e.bt_rle << 1) + + (uint)(blockSize << 3) : lastBlock - + ((uint)blockType_e.bt_compressed << 1) - + (uint)(cSize << 3); + + ((uint)blockType_e.bt_compressed << 1) + + (uint)(cSize << 3); MEM_writeLE24(op, cBlockHeader); cSize += ZSTD_blockHeaderSize; } @@ -6676,17 +6507,15 @@ uint dictID uint fcsCode = (uint)( @params->fParams.contentSizeFlag != 0 ? (pledgedSrcSize >= 256 ? 1 : 0) - + (pledgedSrcSize >= 65536 + 256 ? 1 : 0) - + (pledgedSrcSize >= 0xFFFFFFFFU ? 1 : 0) + + (pledgedSrcSize >= 65536 + 256 ? 1 : 0) + + (pledgedSrcSize >= 0xFFFFFFFFU ? 1 : 0) : 0 ); byte frameHeaderDescriptionByte = (byte)( dictIDSizeCode + (checksumFlag << 2) + (singleSegment << 5) + (fcsCode << 6) ); nuint pos = 0; - assert( - !(@params->fParams.contentSizeFlag != 0 && pledgedSrcSize == unchecked(0UL - 1)) - ); + assert(!(@params->fParams.contentSizeFlag != 0 && pledgedSrcSize == unchecked(0UL - 1))); if (dstCapacity < 18) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); @@ -6813,16 +6642,12 @@ private static nuint ZSTD_writeLastEmptyBlock(void* dst, nuint dstCapacity) * NOTE: seqs are not verified! Invalid sequences can cause out-of-bounds memory * access and data corruption. */ - private static void ZSTD_referenceExternalSequences( - ZSTD_CCtx_s* cctx, - rawSeq* seq, - nuint nbSeq - ) + private static void ZSTD_referenceExternalSequences(ZSTD_CCtx_s* cctx, rawSeq* seq, nuint nbSeq) { assert(cctx->stage == ZSTD_compressionStage_e.ZSTDcs_init); assert( nbSeq == 0 - || cctx->appliedParams.ldmParams.enableLdm != ZSTD_paramSwitch_e.ZSTD_ps_enable + || cctx->appliedParams.ldmParams.enableLdm != ZSTD_paramSwitch_e.ZSTD_ps_enable ); cctx->externSeqStore.seq = seq; cctx->externSeqStore.size = nbSeq; @@ -6898,14 +6723,7 @@ uint lastFrameChunk { nuint cSize = frame != 0 - ? ZSTD_compress_frameChunk( - cctx, - dst, - dstCapacity, - src, - srcSize, - lastFrameChunk - ) + ? ZSTD_compress_frameChunk(cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) : ZSTD_compressBlock_internal(cctx, dst, dstCapacity, src, srcSize, 0); { nuint err_code = cSize; @@ -7022,9 +6840,7 @@ ZSTD_tableFillPurpose_e tfp byte* ip = (byte*)src; byte* iend = ip + srcSize; int loadLdmDict = - @params->ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable && ls != null - ? 1 - : 0; + @params->ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable && ls != null ? 1 : 0; ZSTD_assertEqualCParams(@params->cParams, ms->cParams); { /* Allow the dictionary to set indices up to exactly ZSTD_CURRENT_MAX. @@ -7054,10 +6870,7 @@ ZSTD_tableFillPurpose_e tfp } } - if ( - srcSize - > unchecked((uint)-1) - (MEM_64bits ? 3500U * (1 << 20) : 2000U * (1 << 20)) - ) + if (srcSize > unchecked((uint)-1) - (MEM_64bits ? 3500U * (1 << 20) : 2000U * (1 << 20))) { assert(ZSTD_window_isEmpty(ms->window) != 0); #if DEBUG @@ -7258,7 +7071,7 @@ nuint dictSize { short* matchlengthNCount = stackalloc short[53]; uint matchlengthMaxValue = 52, - matchlengthLog; + matchlengthLog; nuint matchlengthHeaderSize = FSE_readNCount( matchlengthNCount, &matchlengthMaxValue, @@ -7303,7 +7116,7 @@ nuint dictSize { short* litlengthNCount = stackalloc short[36]; uint litlengthMaxValue = 35, - litlengthLog; + litlengthLog; nuint litlengthHeaderSize = FSE_readNCount( litlengthNCount, &litlengthMaxValue, @@ -7487,16 +7300,7 @@ private static nuint ZSTD_compress_insertDictionary( { if (dictContentType == ZSTD_dictContentType_e.ZSTD_dct_auto) { - return ZSTD_loadDictionaryContent( - ms, - ls, - ws, - @params, - dict, - dictSize, - dtlm, - tfp - ); + return ZSTD_loadDictionaryContent(ms, ls, ws, @params, dict, dictSize, dtlm, tfp); } if (dictContentType == ZSTD_dictContentType_e.ZSTD_dct_fullDict) @@ -7507,17 +7311,7 @@ private static nuint ZSTD_compress_insertDictionary( assert(0 != 0); } - return ZSTD_loadZstdDictionary( - bs, - ms, - ws, - @params, - dict, - dictSize, - dtlm, - tfp, - workspace - ); + return ZSTD_loadZstdDictionary(bs, ms, ws, @params, dict, dictSize, dtlm, tfp, workspace); } /*! ZSTD_compressBegin_internal() : @@ -7795,15 +7589,7 @@ nuint srcSize ) { nuint endResult; - nuint cSize = ZSTD_compressContinue_internal( - cctx, - dst, - dstCapacity, - src, - srcSize, - 1, - 1 - ); + nuint cSize = ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 1); { nuint err_code = cSize; if (ERR_isError(err_code)) @@ -7822,10 +7608,7 @@ nuint srcSize } assert( - !( - cctx->appliedParams.fParams.contentSizeFlag != 0 - && cctx->pledgedSrcSizePlusOne == 0 - ) + !(cctx->appliedParams.fParams.contentSizeFlag != 0 && cctx->pledgedSrcSizePlusOne == 0) ); if (cctx->pledgedSrcSizePlusOne != 0) { @@ -8032,18 +7815,18 @@ ZSTD_dictLoadMethod_e dictLoadMethod ) { return ZSTD_cwksp_alloc_size((nuint)sizeof(ZSTD_CDict_s)) - + ZSTD_cwksp_alloc_size((8 << 10) + 512) - + ZSTD_sizeof_matchState( - &cParams, - ZSTD_resolveRowMatchFinderMode(ZSTD_paramSwitch_e.ZSTD_ps_auto, &cParams), - 1, - 0 - ) - + ( - dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef - ? 0 - : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, (nuint)sizeof(void*))) - ); + + ZSTD_cwksp_alloc_size((8 << 10) + 512) + + ZSTD_sizeof_matchState( + &cParams, + ZSTD_resolveRowMatchFinderMode(ZSTD_paramSwitch_e.ZSTD_ps_auto, &cParams), + 1, + 0 + ) + + ( + dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef + ? 0 + : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, (nuint)sizeof(void*))) + ); } /*! ZSTD_estimate?DictSize() : @@ -8071,7 +7854,7 @@ public static nuint ZSTD_sizeof_CDict(ZSTD_CDict_s* cdict) if (cdict == null) return 0; return (nuint)(cdict->workspace.workspace == cdict ? 0 : sizeof(ZSTD_CDict_s)) - + ZSTD_cwksp_sizeof(&cdict->workspace); + + ZSTD_cwksp_sizeof(&cdict->workspace); } private static nuint ZSTD_initCDict_internal( @@ -8174,27 +7957,17 @@ ZSTD_CCtx_params_s @params ZSTD_customMem customMem ) { - if ( - ((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) - != 0 - ) + if (((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) != 0) return null; { nuint workspaceSize = ZSTD_cwksp_alloc_size((nuint)sizeof(ZSTD_CDict_s)) + ZSTD_cwksp_alloc_size((8 << 10) + 512) - + ZSTD_sizeof_matchState( - &cParams, - useRowMatchFinder, - enableDedicatedDictSearch, - 0 - ) + + ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, enableDedicatedDictSearch, 0) + ( dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef ? 0 - : ZSTD_cwksp_alloc_size( - ZSTD_cwksp_align(dictSize, (nuint)sizeof(void*)) - ) + : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, (nuint)sizeof(void*))) ); void* workspace = ZSTD_customMalloc(workspaceSize, customMem); ZSTD_cwksp ws; @@ -8260,17 +8033,11 @@ ZSTD_customMem customMem ZSTD_CCtx_params_s cctxParams = *originalCctxParams; ZSTD_compressionParameters cParams; ZSTD_CDict_s* cdict; - if ( - ((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) - != 0 - ) + if (((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) != 0) return null; if (cctxParams.enableDedicatedDictSearch != 0) { - cParams = ZSTD_dedicatedDictSearch_getCParams( - cctxParams.compressionLevel, - dictSize - ); + cParams = ZSTD_dedicatedDictSearch_getCParams(cctxParams.compressionLevel, dictSize); ZSTD_overrideCParams(&cParams, &cctxParams.cParams); } else @@ -8340,11 +8107,7 @@ ZSTD_customMem customMem * in which case the only thing that it transports is the @compressionLevel. * This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively, * expecting a ZSTD_CDict parameter with any data, including those without a known dictionary. */ - public static ZSTD_CDict_s* ZSTD_createCDict( - void* dict, - nuint dictSize, - int compressionLevel - ) + public static ZSTD_CDict_s* ZSTD_createCDict(void* dict, nuint dictSize, int compressionLevel) { ZSTD_compressionParameters cParams = ZSTD_getCParams_internal( compressionLevel, @@ -8550,8 +8313,7 @@ ulong pledgedSrcSize if (pledgedSrcSize != unchecked(0UL - 1)) { uint limitedSrcSize = (uint)(pledgedSrcSize < 1U << 19 ? pledgedSrcSize : 1U << 19); - uint limitedSrcLog = - limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1; + uint limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1; cctxParams.cParams.windowLog = cctxParams.cParams.windowLog > limitedSrcLog ? cctxParams.cParams.windowLog @@ -8621,12 +8383,7 @@ ZSTD_frameParameters fParams { { /* will check if cdict != NULL */ - nuint err_code = ZSTD_compressBegin_usingCDict_internal( - cctx, - cdict, - fParams, - srcSize - ); + nuint err_code = ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, srcSize); if (ERR_isError(err_code)) { return err_code; @@ -8994,11 +8751,7 @@ int compressionLevel * "0" also disables frame content size field. It may be enabled in the future. * This prototype will generate compilation warnings. */ - public static nuint ZSTD_initCStream_srcSize( - ZSTD_CCtx_s* zcs, - int compressionLevel, - ulong pss - ) + public static nuint ZSTD_initCStream_srcSize(ZSTD_CCtx_s* zcs, int compressionLevel, ulong pss) { /* temporary : 0 interpreted as "unknown" during transition period. * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. @@ -9171,8 +8924,7 @@ ZSTD_EndDirective flushMode flushMode == ZSTD_EndDirective.ZSTD_e_end && ( (nuint)(oend - op) >= ZSTD_compressBound((nuint)(iend - ip)) - || zcs->appliedParams.outBufferMode - == ZSTD_bufferMode_e.ZSTD_bm_stable + || zcs->appliedParams.outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable ) && zcs->inBuffPos == 0 ) @@ -9233,9 +8985,7 @@ ZSTD_EndDirective flushMode } else { - assert( - zcs->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable - ); + assert(zcs->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable); if ( flushMode == ZSTD_EndDirective.ZSTD_e_continue && (nuint)(iend - ip) < zcs->blockSizeMax @@ -9254,158 +9004,149 @@ ZSTD_EndDirective flushMode } } - { - int inputBuffered = - zcs->appliedParams.inBufferMode - == ZSTD_bufferMode_e.ZSTD_bm_buffered - ? 1 - : 0; - void* cDst; - nuint cSize; - nuint oSize = (nuint)(oend - op); - nuint iSize = - inputBuffered != 0 ? zcs->inBuffPos - zcs->inToCompress - : (nuint)(iend - ip) < zcs->blockSizeMax ? (nuint)(iend - ip) - : zcs->blockSizeMax; - if ( - oSize >= ZSTD_compressBound(iSize) - || zcs->appliedParams.outBufferMode - == ZSTD_bufferMode_e.ZSTD_bm_stable - ) - cDst = op; - else { - cDst = zcs->outBuff; - oSize = zcs->outBuffSize; - } + int inputBuffered = + zcs->appliedParams.inBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered + ? 1 + : 0; + void* cDst; + nuint cSize; + nuint oSize = (nuint)(oend - op); + nuint iSize = + inputBuffered != 0 ? zcs->inBuffPos - zcs->inToCompress + : (nuint)(iend - ip) < zcs->blockSizeMax ? (nuint)(iend - ip) + : zcs->blockSizeMax; + if ( + oSize >= ZSTD_compressBound(iSize) + || zcs->appliedParams.outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_stable + ) + cDst = op; + else + { + cDst = zcs->outBuff; + oSize = zcs->outBuffSize; + } - if (inputBuffered != 0) - { - uint lastBlock = - flushMode == ZSTD_EndDirective.ZSTD_e_end && ip == iend - ? 1U - : 0U; - cSize = - lastBlock != 0 - ? ZSTD_compressEnd_public( - zcs, - cDst, - oSize, - zcs->inBuff + zcs->inToCompress, - iSize - ) - : ZSTD_compressContinue_public( - zcs, - cDst, - oSize, - zcs->inBuff + zcs->inToCompress, - iSize - ); + if (inputBuffered != 0) { - nuint err_code = cSize; - if (ERR_isError(err_code)) + uint lastBlock = + flushMode == ZSTD_EndDirective.ZSTD_e_end && ip == iend ? 1U : 0U; + cSize = + lastBlock != 0 + ? ZSTD_compressEnd_public( + zcs, + cDst, + oSize, + zcs->inBuff + zcs->inToCompress, + iSize + ) + : ZSTD_compressContinue_public( + zcs, + cDst, + oSize, + zcs->inBuff + zcs->inToCompress, + iSize + ); { - return err_code; + nuint err_code = cSize; + if (ERR_isError(err_code)) + { + return err_code; + } } - } - zcs->frameEnded = lastBlock; - zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSizeMax; - if (zcs->inBuffTarget > zcs->inBuffSize) - { - zcs->inBuffPos = 0; - zcs->inBuffTarget = zcs->blockSizeMax; - } + zcs->frameEnded = lastBlock; + zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSizeMax; + if (zcs->inBuffTarget > zcs->inBuffSize) + { + zcs->inBuffPos = 0; + zcs->inBuffTarget = zcs->blockSizeMax; + } #if DEBUG - if (lastBlock == 0) - assert(zcs->inBuffTarget <= zcs->inBuffSize); + if (lastBlock == 0) + assert(zcs->inBuffTarget <= zcs->inBuffSize); #endif - zcs->inToCompress = zcs->inBuffPos; - } - else - { - uint lastBlock = - flushMode == ZSTD_EndDirective.ZSTD_e_end && ip + iSize == iend - ? 1U - : 0U; - cSize = - lastBlock != 0 - ? ZSTD_compressEnd_public(zcs, cDst, oSize, ip, iSize) - : ZSTD_compressContinue_public(zcs, cDst, oSize, ip, iSize); - if (ip != null) - ip += iSize; + zcs->inToCompress = zcs->inBuffPos; + } + else { - nuint err_code = cSize; - if (ERR_isError(err_code)) + uint lastBlock = + flushMode == ZSTD_EndDirective.ZSTD_e_end && ip + iSize == iend + ? 1U + : 0U; + cSize = + lastBlock != 0 + ? ZSTD_compressEnd_public(zcs, cDst, oSize, ip, iSize) + : ZSTD_compressContinue_public(zcs, cDst, oSize, ip, iSize); + if (ip != null) + ip += iSize; { - return err_code; + nuint err_code = cSize; + if (ERR_isError(err_code)) + { + return err_code; + } } - } - zcs->frameEnded = lastBlock; + zcs->frameEnded = lastBlock; #if DEBUG - if (lastBlock != 0) - assert(ip == iend); + if (lastBlock != 0) + assert(ip == iend); #endif - } + } - if (cDst == op) - { - op += cSize; - if (zcs->frameEnded != 0) + if (cDst == op) { - someMoreWork = 0; - ZSTD_CCtx_reset( - zcs, - ZSTD_ResetDirective.ZSTD_reset_session_only - ); + op += cSize; + if (zcs->frameEnded != 0) + { + someMoreWork = 0; + ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); + } + + break; } - break; + zcs->outBuffContentSize = cSize; + zcs->outBuffFlushedSize = 0; + zcs->streamStage = ZSTD_cStreamStage.zcss_flush; } - zcs->outBuffContentSize = cSize; - zcs->outBuffFlushedSize = 0; - zcs->streamStage = ZSTD_cStreamStage.zcss_flush; - } - goto case ZSTD_cStreamStage.zcss_flush; case ZSTD_cStreamStage.zcss_flush: - assert( - zcs->appliedParams.outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered - ); + assert(zcs->appliedParams.outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered); - { - nuint toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; - nuint flushed = ZSTD_limitCopy( - op, - (nuint)(oend - op), - zcs->outBuff + zcs->outBuffFlushedSize, - toFlush - ); - if (flushed != 0) - op += flushed; - zcs->outBuffFlushedSize += flushed; - if (toFlush != flushed) { - assert(op == oend); - someMoreWork = 0; - break; - } + nuint toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; + nuint flushed = ZSTD_limitCopy( + op, + (nuint)(oend - op), + zcs->outBuff + zcs->outBuffFlushedSize, + toFlush + ); + if (flushed != 0) + op += flushed; + zcs->outBuffFlushedSize += flushed; + if (toFlush != flushed) + { + assert(op == oend); + someMoreWork = 0; + break; + } - zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0; - if (zcs->frameEnded != 0) - { - someMoreWork = 0; - ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); + zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0; + if (zcs->frameEnded != 0) + { + someMoreWork = 0; + ZSTD_CCtx_reset(zcs, ZSTD_ResetDirective.ZSTD_reset_session_only); + break; + } + + zcs->streamStage = ZSTD_cStreamStage.zcss_load; break; } - zcs->streamStage = ZSTD_cStreamStage.zcss_load; - break; - } - default: assert(0 != 0); break; @@ -9601,9 +9342,7 @@ nuint inSize ); if (cctx->mtctx == null) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); } } @@ -9715,18 +9454,14 @@ ZSTD_EndDirective endOp if (input->src != cctx->expectedInBuffer.src) { return unchecked( - (nuint)( - -(int)ZSTD_ErrorCode.ZSTD_error_stabilityCondition_notRespected - ) + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stabilityCondition_notRespected) ); } if (input->pos != cctx->expectedInBuffer.size) { return unchecked( - (nuint)( - -(int)ZSTD_ErrorCode.ZSTD_error_stabilityCondition_notRespected - ) + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_stabilityCondition_notRespected) ); } } @@ -9734,9 +9469,7 @@ ZSTD_EndDirective endOp input->pos = input->size; cctx->expectedInBuffer = *input; cctx->stableIn_notConsumed += inputSize; - return (nuint)( - cctx->requestedParams.format == ZSTD_format_e.ZSTD_f_zstd1 ? 6 : 2 - ); + return (nuint)(cctx->requestedParams.format == ZSTD_format_e.ZSTD_f_zstd1 ? 6 : 2); } { @@ -9783,10 +9516,7 @@ ZSTD_EndDirective endOp flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp); cctx->consumedSrcSize += input->pos - ipos; cctx->producedCSize += output->pos - opos; - if ( - ERR_isError(flushMin) - || endOp == ZSTD_EndDirective.ZSTD_e_end && flushMin == 0 - ) + if (ERR_isError(flushMin) || endOp == ZSTD_EndDirective.ZSTD_e_end && flushMin == 0) { if (flushMin == 0) ZSTD_CCtx_trace(cctx, 0); @@ -9815,7 +9545,7 @@ ZSTD_EndDirective endOp { assert( endOp == ZSTD_EndDirective.ZSTD_e_flush - || endOp == ZSTD_EndDirective.ZSTD_e_end + || endOp == ZSTD_EndDirective.ZSTD_e_end ); if (flushMin == 0 || output->pos == output->size) break; @@ -9824,8 +9554,8 @@ ZSTD_EndDirective endOp assert( endOp == ZSTD_EndDirective.ZSTD_e_continue - || flushMin == 0 - || output->pos == output->size + || flushMin == 0 + || output->pos == output->size ); ZSTD_setBufferExpectations(cctx, output, input); return flushMin; @@ -9969,9 +9699,7 @@ int useSequenceProducer if (matchLength < matchLenLowerBound) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); } return 0; @@ -10046,11 +9774,7 @@ ZSTD_paramSwitch_e externalRepSearch } memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, (uint)sizeof(repcodes_s)); - for ( - ; - idx < inSeqsSize && (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0); - ++idx - ) + for (; idx < inSeqsSize && (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0); ++idx) { uint litLength = inSeqs[idx].litLength; uint matchLength = inSeqs[idx].matchLength; @@ -10100,9 +9824,7 @@ ZSTD_paramSwitch_e externalRepSearch if (idx == inSeqsSize) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); } assert(externalRepSearch != ZSTD_paramSwitch_e.ZSTD_ps_auto); @@ -10143,9 +9865,7 @@ ZSTD_paramSwitch_e externalRepSearch if (ip != iend) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); } seqPos->idx = idx + 1; @@ -10302,8 +10022,7 @@ ZSTD_paramSwitch_e externalRepSearch } assert( - idx == inSeqsSize - || endPosInSequence <= inSeqs[idx].litLength + inSeqs[idx].matchLength + idx == inSeqsSize || endPosInSequence <= inSeqs[idx].litLength + inSeqs[idx].matchLength ); seqPos->idx = idx; seqPos->posInSequence = endPosInSequence; @@ -10382,9 +10101,7 @@ ZSTD_SequencePosition seqPos } if (end == 0) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); return blockSize; } @@ -10793,9 +10510,7 @@ int repcodeResolution nuint seqNb = 0; if (nbSequences >= cctx->seqStore.maxNbSeq) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); } memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, (uint)sizeof(repcodes_s)); @@ -10821,8 +10536,7 @@ int repcodeResolution else { assert(longl <= 2 * (nbSequences - 1)); - cctx->seqStore.longLengthType = - ZSTD_longLengthType_e.ZSTD_llt_literalLength; + cctx->seqStore.longLengthType = ZSTD_longLengthType_e.ZSTD_llt_literalLength; cctx->seqStore.longLengthPos = (uint)(longl - (nbSequences - 1) - 1); } } @@ -10834,11 +10548,7 @@ int repcodeResolution uint litLength = inSeqs[seqNb].litLength; uint matchLength = inSeqs[seqNb].matchLength; uint ll0 = litLength == 0 ? 1U : 0U; - uint offBase = ZSTD_finalizeOffBase( - inSeqs[seqNb].offset, - updatedRepcodes.rep, - ll0 - ); + uint offBase = ZSTD_finalizeOffBase(inSeqs[seqNb].offset, updatedRepcodes.rep, ll0); ZSTD_storeSeqOnly(&cctx->seqStore, litLength, offBase, matchLength); ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0); } @@ -10928,14 +10638,10 @@ nuint srcSize cctx->appliedParams.searchForExternalRepcodes == ZSTD_paramSwitch_e.ZSTD_ps_enable ? 1 : 0; - assert( - cctx->appliedParams.searchForExternalRepcodes != ZSTD_paramSwitch_e.ZSTD_ps_auto - ); + assert(cctx->appliedParams.searchForExternalRepcodes != ZSTD_paramSwitch_e.ZSTD_ps_auto); if (nbSequences == 0) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); } if (nbSequences == 1 && inSeqs[0].litLength == 0) @@ -10956,8 +10662,8 @@ nuint srcSize while (nbSequences != 0) { nuint compressedSeqsSize, - cBlockSize, - conversionStatus; + cBlockSize, + conversionStatus; BlockSummary block = ZSTD_get1BlockSummary(inSeqs, nbSequences); uint lastBlock = block.nbSequences == nbSequences ? 1U : 0U; { @@ -11062,16 +10768,12 @@ nuint srcSize if (litSize != 0) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); } if (remaining != 0) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_externalSequences_invalid)); } return cSize; @@ -11127,14 +10829,9 @@ nuint decompressedSize } } - if ( - cctx->appliedParams.blockDelimiters - == ZSTD_sequenceFormat_e.ZSTD_sf_noBlockDelimiters - ) + if (cctx->appliedParams.blockDelimiters == ZSTD_sequenceFormat_e.ZSTD_sf_noBlockDelimiters) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported)); } if (cctx->appliedParams.validateSequences != 0) @@ -11144,9 +10841,7 @@ nuint decompressedSize if (cctx->appliedParams.fParams.checksumFlag != 0) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported)); } { @@ -11296,8 +10991,8 @@ private static int ZSTD_dedicatedDictSearch_isSupported(ZSTD_compressionParamete && cParams->strategy <= ZSTD_strategy.ZSTD_lazy2 && cParams->hashLog > cParams->chainLog && cParams->chainLog <= 24 - ? 1 - : 0; + ? 1 + : 0; } /** @@ -11305,9 +11000,7 @@ private static int ZSTD_dedicatedDictSearch_isSupported(ZSTD_compressionParamete * search. This is used to recover the params set to be used in the working * context. (Otherwise, those tables would also grow.) */ - private static void ZSTD_dedicatedDictSearch_revertCParams( - ZSTD_compressionParameters* cParams - ) + private static void ZSTD_dedicatedDictSearch_revertCParams(ZSTD_compressionParameters* cParams) { switch (cParams->strategy) { @@ -11529,4 +11222,4 @@ public static void ZSTD_CCtxParams_registerSequenceProducer( @params->extSeqProdState = null; } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressInternal.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressInternal.cs index d41e50a70..2e836fb16 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressInternal.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressInternal.cs @@ -12,10 +12,7 @@ public static unsafe partial class Methods * indicated by longLengthPos and longLengthType, and adds MINMATCH back to matchLength. */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static ZSTD_SequenceLength ZSTD_getSequenceLength( - SeqStore_t* seqStore, - SeqDef_s* seq - ) + private static ZSTD_SequenceLength ZSTD_getSequenceLength(SeqStore_t* seqStore, SeqDef_s* seq) { ZSTD_SequenceLength seqLen; seqLen.litLength = seq->litLength; @@ -44,79 +41,79 @@ private static ZSTD_SequenceLength ZSTD_getSequenceLength( capacity: 0 ); #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_LL_Code => - new byte[64] - { - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - 16, - 17, - 17, - 18, - 18, - 19, - 19, - 20, - 20, - 20, - 20, - 21, - 21, - 21, - 21, - 22, - 22, - 22, - 22, - 22, - 22, - 22, - 22, - 23, - 23, - 23, - 23, - 23, - 23, - 23, - 23, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - }; - private static byte* LL_Code => - (byte*) - System.Runtime.CompilerServices.Unsafe.AsPointer( - ref MemoryMarshal.GetReference(Span_LL_Code) - ); + private static ReadOnlySpan Span_LL_Code => + new byte[64] + { + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 16, + 17, + 17, + 18, + 18, + 19, + 19, + 20, + 20, + 20, + 20, + 21, + 21, + 21, + 21, + 22, + 22, + 22, + 22, + 22, + 22, + 22, + 22, + 23, + 23, + 23, + 23, + 23, + 23, + 23, + 23, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + }; + private static byte* LL_Code => + (byte*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_LL_Code) + ); #else private static readonly byte* LL_Code = GetArrayPointer( @@ -198,143 +195,143 @@ private static uint ZSTD_LLcode(uint litLength) } #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_ML_Code => - new byte[128] - { - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - 17, - 18, - 19, - 20, - 21, - 22, - 23, - 24, - 25, - 26, - 27, - 28, - 29, - 30, - 31, - 32, - 32, - 33, - 33, - 34, - 34, - 35, - 35, - 36, - 36, - 36, - 36, - 37, - 37, - 37, - 37, - 38, - 38, - 38, - 38, - 38, - 38, - 38, - 38, - 39, - 39, - 39, - 39, - 39, - 39, - 39, - 39, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 41, - 41, - 41, - 41, - 41, - 41, - 41, - 41, - 41, - 41, - 41, - 41, - 41, - 41, - 41, - 41, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - 42, - }; - private static byte* ML_Code => - (byte*) - System.Runtime.CompilerServices.Unsafe.AsPointer( - ref MemoryMarshal.GetReference(Span_ML_Code) - ); + private static ReadOnlySpan Span_ML_Code => + new byte[128] + { + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 32, + 33, + 33, + 34, + 34, + 35, + 35, + 36, + 36, + 36, + 36, + 37, + 37, + 37, + 37, + 38, + 38, + 38, + 38, + 38, + 38, + 38, + 38, + 39, + 39, + 39, + 39, + 39, + 39, + 39, + 39, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 41, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + 42, + }; + private static byte* ML_Code => + (byte*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_ML_Code) + ); #else private static readonly byte* ML_Code = GetArrayPointer( @@ -501,12 +498,7 @@ private static int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value) * @return index >= lowLimit ? candidate : backup, * tries to force branchless codegen. */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static byte* ZSTD_selectAddr( - uint index, - uint lowLimit, - byte* candidate, - byte* backup - ) + private static byte* ZSTD_selectAddr(uint index, uint lowLimit, byte* candidate, byte* backup) { return index >= lowLimit ? candidate : backup; } @@ -523,8 +515,7 @@ private static nuint ZSTD_noCompressBlock( uint lastBlock ) { - uint cBlockHeader24 = - lastBlock + ((uint)blockType_e.bt_raw << 1) + (uint)(srcSize << 3); + uint cBlockHeader24 = lastBlock + ((uint)blockType_e.bt_raw << 1) + (uint)(srcSize << 3); if (srcSize + ZSTD_blockHeaderSize > dstCapacity) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); @@ -584,8 +575,8 @@ private static int ZSTD_literalsCompressionIsDisabled(ZSTD_CCtx_params_s* cctxPa return cctxParams->cParams.strategy == ZSTD_strategy.ZSTD_fast && cctxParams->cParams.targetLength > 0 - ? 1 - : 0; + ? 1 + : 0; } } @@ -623,8 +614,7 @@ nuint matchLength ) { assert( - (nuint)(seqStorePtr->sequences - seqStorePtr->sequencesStart) - < seqStorePtr->maxNbSeq + (nuint)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq ); assert(litLength <= 1 << 17); if (litLength > 0xFFFF) @@ -676,8 +666,7 @@ nuint matchLength byte* litLimit_w = litLimit - 32; byte* litEnd = literals + litLength; assert( - (nuint)(seqStorePtr->sequences - seqStorePtr->sequencesStart) - < seqStorePtr->maxNbSeq + (nuint)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq ); assert(seqStorePtr->maxNbLit <= 128 * (1 << 10)); assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit); @@ -1052,10 +1041,9 @@ private static void ZSTD_window_clear(ZSTD_window_t* window) [MethodImpl(MethodImplOptions.AggressiveInlining)] private static uint ZSTD_window_isEmpty(ZSTD_window_t window) { - return - window.dictLimit == 2 && window.lowLimit == 2 && window.nextSrc - window.@base == 2 - ? 1U - : 0U; + return window.dictLimit == 2 && window.lowLimit == 2 && window.nextSrc - window.@base == 2 + ? 1U + : 0U; } /** @@ -1079,9 +1067,9 @@ private static ZSTD_dictMode_e ZSTD_matchState_dictMode(ZSTD_MatchState_t* ms) return ZSTD_window_hasExtDict(ms->window) != 0 ? ZSTD_dictMode_e.ZSTD_extDict : ms->dictMatchState != null ? ms->dictMatchState->dedicatedDictSearch != 0 - ? ZSTD_dictMode_e.ZSTD_dedicatedDictSearch + ? ZSTD_dictMode_e.ZSTD_dedicatedDictSearch : ZSTD_dictMode_e.ZSTD_dictMatchState - : ZSTD_dictMode_e.ZSTD_noDict; + : ZSTD_dictMode_e.ZSTD_noDict; } /** @@ -1305,12 +1293,12 @@ private static void ZSTD_checkDictValidity( } #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_stringToByte_20_00 => new byte[] { 32, 0 }; - private static byte* stringToByte_20_00 => - (byte*) - System.Runtime.CompilerServices.Unsafe.AsPointer( - ref MemoryMarshal.GetReference(Span_stringToByte_20_00) - ); + private static ReadOnlySpan Span_stringToByte_20_00 => new byte[] { 32, 0 }; + private static byte* stringToByte_20_00 => + (byte*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_stringToByte_20_00) + ); #else private static readonly byte* stringToByte_20_00 = GetArrayPointer(new byte[] { 32, 0 }); @@ -1385,11 +1373,7 @@ int forceNonContiguous * Returns the lowest allowed match index. It may either be in the ext-dict or the prefix. */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_getLowestMatchIndex( - ZSTD_MatchState_t* ms, - uint curr, - uint windowLog - ) + private static uint ZSTD_getLowestMatchIndex(ZSTD_MatchState_t* ms, uint curr, uint windowLog) { uint maxDistance = 1U << (int)windowLog; uint lowestValid = ms->window.lowLimit; @@ -1407,11 +1391,7 @@ uint windowLog * Returns the lowest allowed match index in the prefix. */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static uint ZSTD_getLowestPrefixIndex( - ZSTD_MatchState_t* ms, - uint curr, - uint windowLog - ) + private static uint ZSTD_getLowestPrefixIndex(ZSTD_MatchState_t* ms, uint curr, uint windowLog) { uint maxDistance = 1U << (int)windowLog; uint lowestValid = ms->window.dictLimit; diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressLiterals.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressLiterals.cs index 06fe5a4f7..a4f92ab35 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressLiterals.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressLiterals.cs @@ -110,10 +110,7 @@ nuint srcSize * for literal compression to even be attempted. * Minimum is made tighter as compression strategy increases. */ - private static nuint ZSTD_minLiteralsToCompress( - ZSTD_strategy strategy, - HUF_repeat huf_repeat - ) + private static nuint ZSTD_minLiteralsToCompress(ZSTD_strategy strategy, HUF_repeat huf_repeat) { assert((int)strategy >= 0); assert((int)strategy <= 9); @@ -276,40 +273,36 @@ int bmi2 assert(srcSize >= 6); #endif - { - uint lhc = - (uint)hType - + ((singleStream == 0 ? 1U : 0U) << 2) - + ((uint)srcSize << 4) - + ((uint)cLitSize << 14); - MEM_writeLE24(ostart, lhc); - break; - } + { + uint lhc = + (uint)hType + + ((singleStream == 0 ? 1U : 0U) << 2) + + ((uint)srcSize << 4) + + ((uint)cLitSize << 14); + MEM_writeLE24(ostart, lhc); + break; + } case 4: assert(srcSize >= 6); - { - uint lhc = - (uint)(hType + (2 << 2)) - + ((uint)srcSize << 4) - + ((uint)cLitSize << 18); - MEM_writeLE32(ostart, lhc); - break; - } + { + uint lhc = + (uint)(hType + (2 << 2)) + ((uint)srcSize << 4) + ((uint)cLitSize << 18); + MEM_writeLE32(ostart, lhc); + break; + } case 5: assert(srcSize >= 6); - { - uint lhc = - (uint)(hType + (3 << 2)) - + ((uint)srcSize << 4) - + ((uint)cLitSize << 22); - MEM_writeLE32(ostart, lhc); - ostart[4] = (byte)(cLitSize >> 10); - break; - } + { + uint lhc = + (uint)(hType + (3 << 2)) + ((uint)srcSize << 4) + ((uint)cLitSize << 22); + MEM_writeLE32(ostart, lhc); + ostart[4] = (byte)(cLitSize >> 10); + break; + } default: assert(0 != 0); @@ -318,4 +311,4 @@ int bmi2 return lhSize + cLitSize; } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressSequences.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressSequences.cs index 26650be8f..e0fd5eee0 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressSequences.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressSequences.cs @@ -7,271 +7,271 @@ namespace SharpCompress.Compressors.ZStandard.Unsafe; public static unsafe partial class Methods { #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_kInverseProbabilityLog256 => - new uint[256] - { - 0, - 2048, - 1792, - 1642, - 1536, - 1453, - 1386, - 1329, - 1280, - 1236, - 1197, - 1162, - 1130, - 1100, - 1073, - 1047, - 1024, - 1001, - 980, - 960, - 941, - 923, - 906, - 889, - 874, - 859, - 844, - 830, - 817, - 804, - 791, - 779, - 768, - 756, - 745, - 734, - 724, - 714, - 704, - 694, - 685, - 676, - 667, - 658, - 650, - 642, - 633, - 626, - 618, - 610, - 603, - 595, - 588, - 581, - 574, - 567, - 561, - 554, - 548, - 542, - 535, - 529, - 523, - 517, - 512, - 506, - 500, - 495, - 489, - 484, - 478, - 473, - 468, - 463, - 458, - 453, - 448, - 443, - 438, - 434, - 429, - 424, - 420, - 415, - 411, - 407, - 402, - 398, - 394, - 390, - 386, - 382, - 377, - 373, - 370, - 366, - 362, - 358, - 354, - 350, - 347, - 343, - 339, - 336, - 332, - 329, - 325, - 322, - 318, - 315, - 311, - 308, - 305, - 302, - 298, - 295, - 292, - 289, - 286, - 282, - 279, - 276, - 273, - 270, - 267, - 264, - 261, - 258, - 256, - 253, - 250, - 247, - 244, - 241, - 239, - 236, - 233, - 230, - 228, - 225, - 222, - 220, - 217, - 215, - 212, - 209, - 207, - 204, - 202, - 199, - 197, - 194, - 192, - 190, - 187, - 185, - 182, - 180, - 178, - 175, - 173, - 171, - 168, - 166, - 164, - 162, - 159, - 157, - 155, - 153, - 151, - 149, - 146, - 144, - 142, - 140, - 138, - 136, - 134, - 132, - 130, - 128, - 126, - 123, - 121, - 119, - 117, - 115, - 114, - 112, - 110, - 108, - 106, - 104, - 102, - 100, - 98, - 96, - 94, - 93, - 91, - 89, - 87, - 85, - 83, - 82, - 80, - 78, - 76, - 74, - 73, - 71, - 69, - 67, - 66, - 64, - 62, - 61, - 59, - 57, - 55, - 54, - 52, - 50, - 49, - 47, - 46, - 44, - 42, - 41, - 39, - 37, - 36, - 34, - 33, - 31, - 30, - 28, - 26, - 25, - 23, - 22, - 20, - 19, - 17, - 16, - 14, - 13, - 11, - 10, - 8, - 7, - 5, - 4, - 2, - 1, - }; - private static uint* kInverseProbabilityLog256 => - (uint*) - System.Runtime.CompilerServices.Unsafe.AsPointer( - ref MemoryMarshal.GetReference(Span_kInverseProbabilityLog256) - ); + private static ReadOnlySpan Span_kInverseProbabilityLog256 => + new uint[256] + { + 0, + 2048, + 1792, + 1642, + 1536, + 1453, + 1386, + 1329, + 1280, + 1236, + 1197, + 1162, + 1130, + 1100, + 1073, + 1047, + 1024, + 1001, + 980, + 960, + 941, + 923, + 906, + 889, + 874, + 859, + 844, + 830, + 817, + 804, + 791, + 779, + 768, + 756, + 745, + 734, + 724, + 714, + 704, + 694, + 685, + 676, + 667, + 658, + 650, + 642, + 633, + 626, + 618, + 610, + 603, + 595, + 588, + 581, + 574, + 567, + 561, + 554, + 548, + 542, + 535, + 529, + 523, + 517, + 512, + 506, + 500, + 495, + 489, + 484, + 478, + 473, + 468, + 463, + 458, + 453, + 448, + 443, + 438, + 434, + 429, + 424, + 420, + 415, + 411, + 407, + 402, + 398, + 394, + 390, + 386, + 382, + 377, + 373, + 370, + 366, + 362, + 358, + 354, + 350, + 347, + 343, + 339, + 336, + 332, + 329, + 325, + 322, + 318, + 315, + 311, + 308, + 305, + 302, + 298, + 295, + 292, + 289, + 286, + 282, + 279, + 276, + 273, + 270, + 267, + 264, + 261, + 258, + 256, + 253, + 250, + 247, + 244, + 241, + 239, + 236, + 233, + 230, + 228, + 225, + 222, + 220, + 217, + 215, + 212, + 209, + 207, + 204, + 202, + 199, + 197, + 194, + 192, + 190, + 187, + 185, + 182, + 180, + 178, + 175, + 173, + 171, + 168, + 166, + 164, + 162, + 159, + 157, + 155, + 153, + 151, + 149, + 146, + 144, + 142, + 140, + 138, + 136, + 134, + 132, + 130, + 128, + 126, + 123, + 121, + 119, + 117, + 115, + 114, + 112, + 110, + 108, + 106, + 104, + 102, + 100, + 98, + 96, + 94, + 93, + 91, + 89, + 87, + 85, + 83, + 82, + 80, + 78, + 76, + 74, + 73, + 71, + 69, + 67, + 66, + 64, + 62, + 61, + 59, + 57, + 55, + 54, + 52, + 50, + 49, + 47, + 46, + 44, + 42, + 41, + 39, + 37, + 36, + 34, + 33, + 31, + 30, + 28, + 26, + 25, + 23, + 22, + 20, + 19, + 17, + 16, + 14, + 13, + 11, + 10, + 8, + 7, + 5, + 4, + 2, + 1, + }; + private static uint* kInverseProbabilityLog256 => + (uint*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_kInverseProbabilityLog256) + ); #else private static readonly uint* kInverseProbabilityLog256 = GetArrayPointer( @@ -641,12 +641,7 @@ private static nuint ZSTD_fseBitCost(uint* ctable, uint* count, uint max) * table described by norm. The max symbol support by norm is assumed >= max. * norm must be valid for every symbol with non-zero probability in count. */ - private static nuint ZSTD_crossEntropyCost( - short* norm, - uint accuracyLog, - uint* count, - uint max - ) + private static nuint ZSTD_crossEntropyCost(short* norm, uint accuracyLog, uint* count, uint max) { uint shift = 8 - accuracyLog; nuint cost = 0; @@ -732,16 +727,12 @@ ZSTD_strategy strategy if (isDefaultAllowed != default) { assert(!ERR_isError(basicCost)); - assert( - !(*repeatMode == FSE_repeat.FSE_repeat_valid && ERR_isError(repeatCost)) - ); + assert(!(*repeatMode == FSE_repeat.FSE_repeat_valid && ERR_isError(repeatCost))); } #endif assert(!ERR_isError(NCountCost)); - assert( - compressedCost < unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxCode)) - ); + assert(compressedCost < unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxCode))); if (basicCost <= repeatCost && basicCost <= compressedCost) { assert(isDefaultAllowed != default); @@ -786,13 +777,13 @@ nuint entropyWorkspaceSize switch (type) { case SymbolEncodingType_e.set_rle: - { - nuint err_code = FSE_buildCTable_rle(nextCTable, (byte)max); - if (ERR_isError(err_code)) { - return err_code; + nuint err_code = FSE_buildCTable_rle(nextCTable, (byte)max); + if (ERR_isError(err_code)) + { + return err_code; + } } - } if (dstCapacity == 0) { @@ -805,21 +796,21 @@ nuint entropyWorkspaceSize memcpy(nextCTable, prevCTable, (uint)prevCTableSize); return 0; case SymbolEncodingType_e.set_basic: - { - /* note : could be pre-calculated */ - nuint err_code = FSE_buildCTable_wksp( - nextCTable, - defaultNorm, - defaultMax, - defaultNormLog, - entropyWorkspace, - entropyWorkspaceSize - ); - if (ERR_isError(err_code)) { - return err_code; + /* note : could be pre-calculated */ + nuint err_code = FSE_buildCTable_wksp( + nextCTable, + defaultNorm, + defaultMax, + defaultNormLog, + entropyWorkspace, + entropyWorkspaceSize + ); + if (ERR_isError(err_code)) + { + return err_code; + } } - } return 0; case SymbolEncodingType_e.set_compressed: diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressSuperblock.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressSuperblock.cs index bc5dcbdac..8a79453b3 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressSuperblock.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCompressSuperblock.cs @@ -61,7 +61,7 @@ private static nuint ZSTD_compressSubBlock_literal( assert(litSize > 0); assert( hufMetadata->hType == SymbolEncodingType_e.set_compressed - || hufMetadata->hType == SymbolEncodingType_e.set_repeat + || hufMetadata->hType == SymbolEncodingType_e.set_repeat ); if (writeEntropy != 0 && hufMetadata->hType == SymbolEncodingType_e.set_compressed) { @@ -105,9 +105,7 @@ private static nuint ZSTD_compressSubBlock_literal( if ( lhSize < (nuint)( - 3 - + (cLitSize >= 1 * (1 << 10) ? 1 : 0) - + (cLitSize >= 16 * (1 << 10) ? 1 : 0) + 3 + (cLitSize >= 1 * (1 << 10) ? 1 : 0) + (cLitSize >= 16 * (1 << 10) ? 1 : 0) ) ) { @@ -131,16 +129,14 @@ private static nuint ZSTD_compressSubBlock_literal( case 4: { - uint lhc = - (uint)(hType + (2 << 2)) + ((uint)litSize << 4) + ((uint)cLitSize << 18); + uint lhc = (uint)(hType + (2 << 2)) + ((uint)litSize << 4) + ((uint)cLitSize << 18); MEM_writeLE32(ostart, lhc); break; } case 5: { - uint lhc = - (uint)(hType + (3 << 2)) + ((uint)litSize << 4) + ((uint)cLitSize << 22); + uint lhc = (uint)(hType + (3 << 2)) + ((uint)litSize << 4) + ((uint)cLitSize << 22); MEM_writeLE32(ostart, lhc); ostart[4] = (byte)(cLitSize >> 10); break; @@ -617,7 +613,7 @@ private static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t* fse private static nuint countLiterals(SeqStore_t* seqStore, SeqDef_s* sp, nuint seqCount) { nuint n, - total = 0; + total = 0; assert(sp != null); for (n = 0; n < seqCount; n++) { @@ -637,8 +633,8 @@ int firstSubBlock ) { nuint n, - budget = 0, - inSize = 0; + budget = 0, + inSize = 0; /* generous estimate */ nuint headerSize = (nuint)firstSubBlock * 120 * 256; assert(firstSubBlock == 0 || firstSubBlock == 1); @@ -730,8 +726,8 @@ nuint wkspSize ? (ebs.estBlockSize + targetCBlockSize / 2) / targetCBlockSize : 1; nuint n, - avgBlockBudget, - blockBudgetSupp = 0; + avgBlockBudget, + blockBudgetSupp = 0; avgBlockBudget = ebs.estBlockSize * 256 / nbSubBlocks; if (ebs.estBlockSize > srcSize) return 0; @@ -977,4 +973,4 @@ uint lastBlock zc->tmpWkspSize ); } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCwksp.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCwksp.cs index 4197aa631..236c94d46 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCwksp.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdCwksp.cs @@ -448,7 +448,7 @@ private static nuint ZSTD_cwksp_sizeof(ZSTD_cwksp* ws) private static nuint ZSTD_cwksp_used(ZSTD_cwksp* ws) { return (nuint)((byte*)ws->tableEnd - (byte*)ws->workspace) - + (nuint)((byte*)ws->workspaceEnd - (byte*)ws->allocStart); + + (nuint)((byte*)ws->workspaceEnd - (byte*)ws->allocStart); } /** @@ -486,12 +486,7 @@ private static nuint ZSTD_cwksp_create(ZSTD_cwksp* ws, nuint size, ZSTD_customMe return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); } - ZSTD_cwksp_init( - ws, - workspace, - size, - ZSTD_cwksp_static_alloc_e.ZSTD_cwksp_dynamic_alloc - ); + ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_static_alloc_e.ZSTD_cwksp_dynamic_alloc); return 0; } @@ -533,8 +528,8 @@ nuint estimatedSpace return estimatedSpace - ZSTD_cwksp_slack_space_required() <= ZSTD_cwksp_used(ws) && ZSTD_cwksp_used(ws) <= estimatedSpace - ? 1 - : 0; + ? 1 + : 0; } /*-************************************* @@ -564,8 +559,8 @@ private static int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, nuint additionalNee return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace) != 0 && ws->workspaceOversizedDuration > 128 - ? 1 - : 0; + ? 1 + : 0; } [MethodImpl(MethodImplOptions.AggressiveInlining)] @@ -583,4 +578,4 @@ nuint additionalNeededSpace ws->workspaceOversizedDuration = 0; } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDdict.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDdict.cs index 0a840a2ff..ce79c0f16 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDdict.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDdict.cs @@ -75,17 +75,13 @@ ZSTD_dictContentType_e dictContentType if (magic != 0xEC30A437) { if (dictContentType == ZSTD_dictContentType_e.ZSTD_dct_fullDict) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); return 0; } } ddict->dictID = MEM_readLE32((sbyte*)ddict->dictContent + 4); - if ( - ERR_isError(ZSTD_loadDEntropy(&ddict->entropy, ddict->dictContent, ddict->dictSize)) - ) + if (ERR_isError(ZSTD_loadDEntropy(&ddict->entropy, ddict->dictContent, ddict->dictSize))) { return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); } @@ -102,11 +98,7 @@ private static nuint ZSTD_initDDict_internal( ZSTD_dictContentType_e dictContentType ) { - if ( - dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef - || dict == null - || dictSize == 0 - ) + if (dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef || dict == null || dictSize == 0) { ddict->dictBuffer = null; ddict->dictContent = dict; @@ -145,10 +137,7 @@ ZSTD_dictContentType_e dictContentType ZSTD_customMem customMem ) { - if ( - ((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) - != 0 - ) + if (((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) != 0) return null; { ZSTD_DDict_s* ddict = (ZSTD_DDict_s*)ZSTD_customMalloc( @@ -277,13 +266,10 @@ public static nuint ZSTD_freeDDict(ZSTD_DDict_s* ddict) /*! ZSTD_estimateDDictSize() : * Estimate amount of memory that will be needed to create a dictionary for decompression. * Note : dictionary created by reference using ZSTD_dlm_byRef are smaller */ - public static nuint ZSTD_estimateDDictSize( - nuint dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod - ) + public static nuint ZSTD_estimateDDictSize(nuint dictSize, ZSTD_dictLoadMethod_e dictLoadMethod) { return (nuint)sizeof(ZSTD_DDict_s) - + (dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef ? 0 : dictSize); + + (dictLoadMethod == ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef ? 0 : dictSize); } public static nuint ZSTD_sizeof_DDict(ZSTD_DDict_s* ddict) diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompress.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompress.cs index 8e629a5c6..ab20f04c1 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompress.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompress.cs @@ -91,10 +91,7 @@ ZSTD_customMem customMem /* Fetches a DDict with the given dictID * Returns the ZSTD_DDict* with the requested dictID. If it doesn't exist, then returns NULL. */ - private static ZSTD_DDict_s* ZSTD_DDictHashSet_getDDict( - ZSTD_DDictHashSet* hashSet, - uint dictID - ) + private static ZSTD_DDict_s* ZSTD_DDictHashSet_getDDict(ZSTD_DDictHashSet* hashSet, uint dictID) { nuint idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID); nuint idxRangeMask = hashSet->ddictPtrTableSize - 1; @@ -145,10 +142,7 @@ uint dictID /* Frees the table of ZSTD_DDict* within a hashset, then frees the hashset itself. * Note: The ZSTD_DDict* within the table are NOT freed. */ - private static void ZSTD_freeDDictHashSet( - ZSTD_DDictHashSet* hashSet, - ZSTD_customMem customMem - ) + private static void ZSTD_freeDDictHashSet(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) { if (hashSet != null && hashSet->ddictPtrTable != null) { @@ -198,9 +192,9 @@ public static nuint ZSTD_sizeof_DCtx(ZSTD_DCtx_s* dctx) if (dctx == null) return 0; return (nuint)sizeof(ZSTD_DCtx_s) - + ZSTD_sizeof_DDict(dctx->ddictLocal) - + dctx->inBuffSize - + dctx->outBuffSize; + + ZSTD_sizeof_DDict(dctx->ddictLocal) + + dctx->inBuffSize + + dctx->outBuffSize; } public static nuint ZSTD_estimateDCtxSize() @@ -212,8 +206,7 @@ private static nuint ZSTD_startingInputLength(ZSTD_format_e format) { nuint startingInputLength = (nuint)(format == ZSTD_format_e.ZSTD_f_zstd1 ? 5 : 1); assert( - format == ZSTD_format_e.ZSTD_f_zstd1 - || format == ZSTD_format_e.ZSTD_f_zstd1_magicless + format == ZSTD_format_e.ZSTD_f_zstd1 || format == ZSTD_format_e.ZSTD_f_zstd1_magicless ); return startingInputLength; } @@ -264,10 +257,7 @@ private static void ZSTD_initDCtx_internal(ZSTD_DCtx_s* dctx) private static ZSTD_DCtx_s* ZSTD_createDCtx_internal(ZSTD_customMem customMem) { - if ( - ((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) - != 0 - ) + if (((customMem.customAlloc == null ? 1 : 0) ^ (customMem.customFree == null ? 1 : 0)) != 0) return null; { ZSTD_DCtx_s* dctx = (ZSTD_DCtx_s*)ZSTD_customMalloc( @@ -419,10 +409,10 @@ ZSTD_format_e format uint singleSegment = (uint)(fhd >> 5 & 1); uint fcsId = (uint)(fhd >> 6); return minInputSize - + (nuint)(singleSegment == 0 ? 1 : 0) - + ZSTD_did_fieldSize[dictID] - + ZSTD_fcs_fieldSize[fcsId] - + (nuint)(singleSegment != 0 && fcsId == 0 ? 1 : 0); + + (nuint)(singleSegment == 0 ? 1 : 0) + + ZSTD_did_fieldSize[dictID] + + ZSTD_fcs_fieldSize[fcsId] + + (nuint)(singleSegment != 0 && fcsId == 0 ? 1 : 0); } } @@ -477,9 +467,7 @@ ZSTD_format_e format memcpy(hbuf, src, (uint)toCopy); if ((MEM_readLE32(hbuf) & 0xFFFFFFF0) != 0x184D2A50) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_prefix_unknown) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_prefix_unknown)); } } } @@ -643,9 +631,7 @@ private static nuint readSkippableFrameSize(void* src, nuint srcSize) sizeU32 = MEM_readLE32((byte*)src + 4); if (sizeU32 + 8 < sizeU32) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_unsupported)); } { @@ -778,12 +764,7 @@ public static ulong ZSTD_getDecompressedSize(void* src, nuint srcSize) * @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */ private static nuint ZSTD_decodeFrameHeader(ZSTD_DCtx_s* dctx, void* src, nuint headerSize) { - nuint result = ZSTD_getFrameHeader_advanced( - &dctx->fParams, - src, - headerSize, - dctx->format - ); + nuint result = ZSTD_getFrameHeader_advanced(&dctx->fParams, src, headerSize, dctx->format); if (ERR_isError(result)) return result; if (result > 0) @@ -838,8 +819,7 @@ ZSTD_format_e format { frameSizeInfo.compressedSize = readSkippableFrameSize(src, srcSize); assert( - ERR_isError(frameSizeInfo.compressedSize) - || frameSizeInfo.compressedSize <= srcSize + ERR_isError(frameSizeInfo.compressedSize) || frameSizeInfo.compressedSize <= srcSize ); return frameSizeInfo; } @@ -997,8 +977,7 @@ public static nuint ZSTD_decompressionMargin(void* src, nuint srcSize) margin += zfh.headerSize; margin += (nuint)(zfh.checksumFlag != 0 ? 4 : 0); margin += 3 * frameSizeInfo.nbBlocks; - maxBlockSize = - maxBlockSize > zfh.blockSizeMax ? maxBlockSize : zfh.blockSizeMax; + maxBlockSize = maxBlockSize > zfh.blockSizeMax ? maxBlockSize : zfh.blockSizeMax; } else { @@ -1024,12 +1003,7 @@ public static nuint ZSTD_insertBlock(ZSTD_DCtx_s* dctx, void* blockStart, nuint return blockSize; } - private static nuint ZSTD_copyRawBlock( - void* dst, - nuint dstCapacity, - void* src, - nuint srcSize - ) + private static nuint ZSTD_copyRawBlock(void* dst, nuint dstCapacity, void* src, nuint srcSize) { if (srcSize > dstCapacity) { @@ -1174,9 +1148,7 @@ private static nuint ZSTD_decompressFrame( break; case blockType_e.bt_reserved: default: - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); } { @@ -1539,17 +1511,17 @@ nuint srcSize assert(src != null); memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, (uint)srcSize); - { - nuint err_code = ZSTD_decodeFrameHeader( - dctx, - dctx->headerBuffer, - dctx->headerSize - ); - if (ERR_isError(err_code)) { - return err_code; + nuint err_code = ZSTD_decodeFrameHeader( + dctx, + dctx->headerBuffer, + dctx->headerSize + ); + if (ERR_isError(err_code)) + { + return err_code; + } } - } dctx->expected = ZSTD_blockHeaderSize; dctx->stage = ZSTD_dStage.ZSTDds_decodeBlockHeader; @@ -1562,9 +1534,7 @@ nuint srcSize return cBlockSize; if (cBlockSize > dctx->fParams.blockSizeMax) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); } dctx->expected = cBlockSize; @@ -1623,13 +1593,13 @@ nuint srcSize assert(srcSize <= dctx->expected); rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize); - { - nuint err_code = rSize; - if (ERR_isError(err_code)) { - return err_code; + nuint err_code = rSize; + if (ERR_isError(err_code)) + { + return err_code; + } } - } assert(rSize == srcSize); dctx->expected -= rSize; @@ -1655,9 +1625,7 @@ nuint srcSize if (rSize > dctx->fParams.blockSizeMax) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); } dctx->decodedSize += rSize; @@ -1705,24 +1673,24 @@ nuint srcSize case ZSTD_dStage.ZSTDds_checkChecksum: assert(srcSize == 4); - { - if (dctx->validateChecksum != 0) { - uint h32 = (uint)ZSTD_XXH64_digest(&dctx->xxhState); - uint check32 = MEM_readLE32(src); - if (check32 != h32) + if (dctx->validateChecksum != 0) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_checksum_wrong) - ); + uint h32 = (uint)ZSTD_XXH64_digest(&dctx->xxhState); + uint check32 = MEM_readLE32(src); + if (check32 != h32) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_checksum_wrong) + ); + } } - } - ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, 1); - dctx->expected = 0; - dctx->stage = ZSTD_dStage.ZSTDds_getFrameHeaderSize; - return 0; - } + ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, 1); + dctx->expected = 0; + dctx->stage = ZSTD_dStage.ZSTDds_getFrameHeaderSize; + return 0; + } case ZSTD_dStage.ZSTDds_decodeSkippableHeader: assert(src != null); @@ -1797,7 +1765,7 @@ nuint dictSize { short* offcodeNCount = stackalloc short[32]; uint offcodeMaxValue = 31, - offcodeLog; + offcodeLog; nuint offcodeHeaderSize = FSE_readNCount( offcodeNCount, &offcodeMaxValue, @@ -1837,7 +1805,7 @@ nuint dictSize { short* matchlengthNCount = stackalloc short[53]; uint matchlengthMaxValue = 52, - matchlengthLog; + matchlengthLog; nuint matchlengthHeaderSize = FSE_readNCount( matchlengthNCount, &matchlengthMaxValue, @@ -1877,7 +1845,7 @@ nuint dictSize { short* litlengthNCount = stackalloc short[36]; uint litlengthMaxValue = 35, - litlengthLog; + litlengthLog; nuint litlengthHeaderSize = FSE_readNCount( litlengthNCount, &litlengthMaxValue, @@ -1928,9 +1896,7 @@ nuint dictSize dictPtr += 4; if (rep == 0 || rep > dictContentSize) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionary_corrupted)); } entropy->rep[i] = rep; @@ -2463,11 +2429,7 @@ public static nuint ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx_s* dctx, nuint maxWindo * @return : 0, or an error code (which can be tested using ZSTD_isError()). */ public static nuint ZSTD_DCtx_setFormat(ZSTD_DCtx_s* dctx, ZSTD_format_e format) { - return ZSTD_DCtx_setParameter( - dctx, - ZSTD_dParameter.ZSTD_d_experimentalParam1, - (int)format - ); + return ZSTD_DCtx_setParameter(dctx, ZSTD_dParameter.ZSTD_d_experimentalParam1, (int)format); } /*! ZSTD_dParam_getBounds() : @@ -2519,9 +2481,7 @@ public static ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam) break; } - bounds.error = unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported) - ); + bounds.error = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_unsupported)); return bounds; } @@ -2545,11 +2505,7 @@ private static int ZSTD_dParam_withinBounds(ZSTD_dParameter dParam, int value) * and store it into int* value. * @return : 0, or an error code (which can be tested with ZSTD_isError()). */ - public static nuint ZSTD_DCtx_getParameter( - ZSTD_DCtx_s* dctx, - ZSTD_dParameter param, - int* value - ) + public static nuint ZSTD_DCtx_getParameter(ZSTD_DCtx_s* dctx, ZSTD_dParameter param, int* value) { switch (param) { @@ -2588,11 +2544,7 @@ public static nuint ZSTD_DCtx_getParameter( * Setting a parameter is only possible during frame initialization (before starting decompression). * @return : 0, or an error code (which can be tested using ZSTD_isError()). */ - public static nuint ZSTD_DCtx_setParameter( - ZSTD_DCtx_s* dctx, - ZSTD_dParameter dParam, - int value - ) + public static nuint ZSTD_DCtx_setParameter(ZSTD_DCtx_s* dctx, ZSTD_dParameter dParam, int value) { if (dctx->streamStage != ZSTD_dStreamStage.zdss_init) { @@ -2605,85 +2557,74 @@ int value if (value == 0) value = 27; - { - if ( - ZSTD_dParam_withinBounds(ZSTD_dParameter.ZSTD_d_windowLogMax, value) - == 0 - ) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); + if (ZSTD_dParam_withinBounds(ZSTD_dParameter.ZSTD_d_windowLogMax, value) == 0) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); + } } - } dctx->maxWindowSize = (nuint)1 << value; return 0; case ZSTD_dParameter.ZSTD_d_experimentalParam1: - { - if ( - ZSTD_dParam_withinBounds( - ZSTD_dParameter.ZSTD_d_experimentalParam1, - value - ) == 0 - ) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); + if ( + ZSTD_dParam_withinBounds(ZSTD_dParameter.ZSTD_d_experimentalParam1, value) + == 0 + ) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); + } } - } dctx->format = (ZSTD_format_e)value; return 0; case ZSTD_dParameter.ZSTD_d_experimentalParam2: - { - if ( - ZSTD_dParam_withinBounds( - ZSTD_dParameter.ZSTD_d_experimentalParam2, - value - ) == 0 - ) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); + if ( + ZSTD_dParam_withinBounds(ZSTD_dParameter.ZSTD_d_experimentalParam2, value) + == 0 + ) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); + } } - } dctx->outBufferMode = (ZSTD_bufferMode_e)value; return 0; case ZSTD_dParameter.ZSTD_d_experimentalParam3: - { - if ( - ZSTD_dParam_withinBounds( - ZSTD_dParameter.ZSTD_d_experimentalParam3, - value - ) == 0 - ) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); + if ( + ZSTD_dParam_withinBounds(ZSTD_dParameter.ZSTD_d_experimentalParam3, value) + == 0 + ) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); + } } - } dctx->forceIgnoreChecksum = (ZSTD_forceIgnoreChecksum_e)value; return 0; case ZSTD_dParameter.ZSTD_d_experimentalParam4: - { - if ( - ZSTD_dParam_withinBounds( - ZSTD_dParameter.ZSTD_d_experimentalParam4, - value - ) == 0 - ) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); + if ( + ZSTD_dParam_withinBounds(ZSTD_dParameter.ZSTD_d_experimentalParam4, value) + == 0 + ) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); + } } - } if (dctx->staticSize != 0) { @@ -2695,19 +2636,17 @@ int value dctx->refMultipleDDicts = (ZSTD_refMultipleDDicts_e)value; return 0; case ZSTD_dParameter.ZSTD_d_experimentalParam5: - { - if ( - ZSTD_dParam_withinBounds( - ZSTD_dParameter.ZSTD_d_experimentalParam5, - value - ) == 0 - ) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) - ); + if ( + ZSTD_dParam_withinBounds(ZSTD_dParameter.ZSTD_d_experimentalParam5, value) + == 0 + ) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_parameter_outOfBound) + ); + } } - } dctx->disableHufAsm = value != 0 ? 1 : 0; return 0; @@ -2715,10 +2654,8 @@ int value if (value != 0) { if ( - ZSTD_dParam_withinBounds( - ZSTD_dParameter.ZSTD_d_experimentalParam6, - value - ) == 0 + ZSTD_dParam_withinBounds(ZSTD_dParameter.ZSTD_d_experimentalParam6, value) + == 0 ) { return unchecked( @@ -2881,11 +2818,7 @@ private static nuint ZSTD_checkOutBuffer(ZSTD_DCtx_s* zds, ZSTD_outBuffer_s* out return 0; if (zds->streamStage == ZSTD_dStreamStage.zdss_init) return 0; - if ( - expect.dst == output->dst - && expect.pos == output->pos - && expect.size == output->size - ) + if (expect.dst == output->dst && expect.pos == output->pos && expect.size == output->size) return 0; return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstBuffer_wrong)); } @@ -3021,78 +2954,75 @@ public static nuint ZSTD_decompressStream( zds->expectedOutBuffer = *output; goto case ZSTD_dStreamStage.zdss_loadHeader; case ZSTD_dStreamStage.zdss_loadHeader: - { - nuint hSize = ZSTD_getFrameHeader_advanced( - &zds->fParams, - zds->headerBuffer, - zds->lhSize, - zds->format - ); - if (zds->refMultipleDDicts != default && zds->ddictSet != null) { - ZSTD_DCtx_selectFrameDDict(zds); - } + nuint hSize = ZSTD_getFrameHeader_advanced( + &zds->fParams, + zds->headerBuffer, + zds->lhSize, + zds->format + ); + if (zds->refMultipleDDicts != default && zds->ddictSet != null) + { + ZSTD_DCtx_selectFrameDDict(zds); + } - if (ERR_isError(hSize)) - { - return hSize; - } + if (ERR_isError(hSize)) + { + return hSize; + } - if (hSize != 0) - { - /* if hSize!=0, hSize > zds->lhSize */ - nuint toLoad = hSize - zds->lhSize; - nuint remainingInput = (nuint)(iend - ip); - assert(iend >= ip); - if (toLoad > remainingInput) + if (hSize != 0) { - if (remainingInput > 0) + /* if hSize!=0, hSize > zds->lhSize */ + nuint toLoad = hSize - zds->lhSize; + nuint remainingInput = (nuint)(iend - ip); + assert(iend >= ip); + if (toLoad > remainingInput) { - memcpy( - zds->headerBuffer + zds->lhSize, - ip, - (uint)remainingInput - ); - zds->lhSize += remainingInput; - } + if (remainingInput > 0) + { + memcpy( + zds->headerBuffer + zds->lhSize, + ip, + (uint)remainingInput + ); + zds->lhSize += remainingInput; + } - input->pos = input->size; - { - /* check first few bytes */ - nuint err_code = ZSTD_getFrameHeader_advanced( - &zds->fParams, - zds->headerBuffer, - zds->lhSize, - zds->format - ); - if (ERR_isError(err_code)) + input->pos = input->size; { - return err_code; + /* check first few bytes */ + nuint err_code = ZSTD_getFrameHeader_advanced( + &zds->fParams, + zds->headerBuffer, + zds->lhSize, + zds->format + ); + if (ERR_isError(err_code)) + { + return err_code; + } } + + return ( + (nuint)(zds->format == ZSTD_format_e.ZSTD_f_zstd1 ? 6 : 2) + > hSize + ? (nuint)( + zds->format == ZSTD_format_e.ZSTD_f_zstd1 ? 6 : 2 + ) + : hSize + ) + - zds->lhSize + + ZSTD_blockHeaderSize; } - return ( - (nuint)( - zds->format == ZSTD_format_e.ZSTD_f_zstd1 ? 6 : 2 - ) > hSize - ? (nuint)( - zds->format == ZSTD_format_e.ZSTD_f_zstd1 - ? 6 - : 2 - ) - : hSize - ) - - zds->lhSize - + ZSTD_blockHeaderSize; + assert(ip != null); + memcpy(zds->headerBuffer + zds->lhSize, ip, (uint)toLoad); + zds->lhSize = hSize; + ip += toLoad; + break; } - - assert(ip != null); - memcpy(zds->headerBuffer + zds->lhSize, ip, (uint)toLoad); - zds->lhSize = hSize; - ip += toLoad; - break; } - } if ( zds->fParams.frameContentSize != unchecked(0UL - 1) @@ -3135,21 +3065,16 @@ public static nuint ZSTD_decompressStream( && (nuint)(oend - op) < zds->fParams.frameContentSize ) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); } - { - nuint err_code = ZSTD_decompressBegin_usingDDict( - zds, - ZSTD_getDDict(zds) - ); - if (ERR_isError(err_code)) { - return err_code; + nuint err_code = ZSTD_decompressBegin_usingDDict(zds, ZSTD_getDDict(zds)); + if (ERR_isError(err_code)) + { + return err_code; + } } - } if ( zds->format == ZSTD_format_e.ZSTD_f_zstd1 @@ -3182,9 +3107,7 @@ public static nuint ZSTD_decompressStream( if (zds->fParams.windowSize > zds->maxWindowSize) { return unchecked( - (nuint)( - -(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_windowTooLarge - ) + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_frameParameter_windowTooLarge) ); } @@ -3194,112 +3117,103 @@ public static nuint ZSTD_decompressStream( ? zds->fParams.blockSizeMax : (uint)zds->maxBlockSizeParam; - { - /* frame checksum */ - nuint neededInBuffSize = - zds->fParams.blockSizeMax > 4 ? zds->fParams.blockSizeMax : 4; - nuint neededOutBuffSize = - zds->outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered - ? ZSTD_decodingBufferSize_internal( - zds->fParams.windowSize, - zds->fParams.frameContentSize, - zds->fParams.blockSizeMax - ) - : 0; - ZSTD_DCtx_updateOversizedDuration( - zds, - neededInBuffSize, - neededOutBuffSize - ); { - int tooSmall = - zds->inBuffSize < neededInBuffSize - || zds->outBuffSize < neededOutBuffSize - ? 1 + /* frame checksum */ + nuint neededInBuffSize = + zds->fParams.blockSizeMax > 4 ? zds->fParams.blockSizeMax : 4; + nuint neededOutBuffSize = + zds->outBufferMode == ZSTD_bufferMode_e.ZSTD_bm_buffered + ? ZSTD_decodingBufferSize_internal( + zds->fParams.windowSize, + zds->fParams.frameContentSize, + zds->fParams.blockSizeMax + ) : 0; - int tooLarge = ZSTD_DCtx_isOversizedTooLong(zds); - if (tooSmall != 0 || tooLarge != 0) + ZSTD_DCtx_updateOversizedDuration(zds, neededInBuffSize, neededOutBuffSize); { - nuint bufferSize = neededInBuffSize + neededOutBuffSize; - if (zds->staticSize != 0) + int tooSmall = + zds->inBuffSize < neededInBuffSize + || zds->outBuffSize < neededOutBuffSize + ? 1 + : 0; + int tooLarge = ZSTD_DCtx_isOversizedTooLong(zds); + if (tooSmall != 0 || tooLarge != 0) { - assert(zds->staticSize >= (nuint)sizeof(ZSTD_DCtx_s)); - if ( - bufferSize - > zds->staticSize - (nuint)sizeof(ZSTD_DCtx_s) - ) + nuint bufferSize = neededInBuffSize + neededOutBuffSize; + if (zds->staticSize != 0) { - return unchecked( - (nuint)( - -(int) - ZSTD_ErrorCode.ZSTD_error_memory_allocation - ) - ); + assert(zds->staticSize >= (nuint)sizeof(ZSTD_DCtx_s)); + if (bufferSize > zds->staticSize - (nuint)sizeof(ZSTD_DCtx_s)) + { + return unchecked( + (nuint)( + -(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation + ) + ); + } } - } - else - { - ZSTD_customFree(zds->inBuff, zds->customMem); - zds->inBuffSize = 0; - zds->outBuffSize = 0; - zds->inBuff = (sbyte*)ZSTD_customMalloc( - bufferSize, - zds->customMem - ); - if (zds->inBuff == null) + else { - return unchecked( - (nuint)( - -(int) - ZSTD_ErrorCode.ZSTD_error_memory_allocation - ) + ZSTD_customFree(zds->inBuff, zds->customMem); + zds->inBuffSize = 0; + zds->outBuffSize = 0; + zds->inBuff = (sbyte*)ZSTD_customMalloc( + bufferSize, + zds->customMem ); + if (zds->inBuff == null) + { + return unchecked( + (nuint)( + -(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation + ) + ); + } } - } - zds->inBuffSize = neededInBuffSize; - zds->outBuff = zds->inBuff + zds->inBuffSize; - zds->outBuffSize = neededOutBuffSize; + zds->inBuffSize = neededInBuffSize; + zds->outBuff = zds->inBuff + zds->inBuffSize; + zds->outBuffSize = neededOutBuffSize; + } } } - } zds->streamStage = ZSTD_dStreamStage.zdss_read; goto case ZSTD_dStreamStage.zdss_read; case ZSTD_dStreamStage.zdss_read: - { - nuint neededInSize = ZSTD_nextSrcSizeToDecompressWithInputSize( - zds, - (nuint)(iend - ip) - ); - if (neededInSize == 0) { - zds->streamStage = ZSTD_dStreamStage.zdss_init; - someMoreWork = 0; - break; - } + nuint neededInSize = ZSTD_nextSrcSizeToDecompressWithInputSize( + zds, + (nuint)(iend - ip) + ); + if (neededInSize == 0) + { + zds->streamStage = ZSTD_dStreamStage.zdss_init; + someMoreWork = 0; + break; + } - if ((nuint)(iend - ip) >= neededInSize) - { + if ((nuint)(iend - ip) >= neededInSize) { - nuint err_code = ZSTD_decompressContinueStream( - zds, - &op, - oend, - ip, - neededInSize - ); - if (ERR_isError(err_code)) { - return err_code; + nuint err_code = ZSTD_decompressContinueStream( + zds, + &op, + oend, + ip, + neededInSize + ); + if (ERR_isError(err_code)) + { + return err_code; + } } - } - assert(ip != null); - ip += neededInSize; - break; + assert(ip != null); + ip += neededInSize; + break; + } } - } if (ip == iend) { @@ -3317,10 +3231,7 @@ public static nuint ZSTD_decompressStream( nuint loadedSize; assert( neededInSize - == ZSTD_nextSrcSizeToDecompressWithInputSize( - zds, - (nuint)(iend - ip) - ) + == ZSTD_nextSrcSizeToDecompressWithInputSize(zds, (nuint)(iend - ip)) ); if (isSkipFrame != 0) { @@ -3374,30 +3285,30 @@ public static nuint ZSTD_decompressStream( } case ZSTD_dStreamStage.zdss_flush: - { - nuint toFlushSize = zds->outEnd - zds->outStart; - nuint flushedSize = ZSTD_limitCopy( - op, - (nuint)(oend - op), - zds->outBuff + zds->outStart, - toFlushSize - ); - op = op != null ? op + flushedSize : op; - zds->outStart += flushedSize; - if (flushedSize == toFlushSize) { - zds->streamStage = ZSTD_dStreamStage.zdss_read; - if ( - zds->outBuffSize < zds->fParams.frameContentSize - && zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize - ) + nuint toFlushSize = zds->outEnd - zds->outStart; + nuint flushedSize = ZSTD_limitCopy( + op, + (nuint)(oend - op), + zds->outBuff + zds->outStart, + toFlushSize + ); + op = op != null ? op + flushedSize : op; + zds->outStart += flushedSize; + if (flushedSize == toFlushSize) { - zds->outStart = zds->outEnd = 0; - } + zds->streamStage = ZSTD_dStreamStage.zdss_read; + if ( + zds->outBuffSize < zds->fParams.frameContentSize + && zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize + ) + { + zds->outStart = zds->outEnd = 0; + } - break; + break; + } } - } someMoreWork = 0; break; @@ -3468,9 +3379,7 @@ public static nuint ZSTD_decompressStream( nextSrcSizeHint += ZSTD_blockHeaderSize - * (nuint)( - ZSTD_nextInputType(zds) == ZSTD_nextInputType_e.ZSTDnit_block ? 1 : 0 - ); + * (nuint)(ZSTD_nextInputType(zds) == ZSTD_nextInputType_e.ZSTDnit_block ? 1 : 0); assert(zds->inPos <= nextSrcSizeHint); nextSrcSizeHint -= zds->inPos; return nextSrcSizeHint; @@ -3508,4 +3417,4 @@ public static nuint ZSTD_decompressStream_simpleArgs( return cErr; } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompressBlock.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompressBlock.cs index 380f9525a..5fd65243e 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompressBlock.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompressBlock.cs @@ -20,8 +20,7 @@ private static void ZSTD_copy4(void* dst, void* src) ***************************************************************/ private static nuint ZSTD_blockSizeMax(ZSTD_DCtx_s* dctx) { - nuint blockSizeMax = - dctx->isFrameDecompression != 0 ? dctx->fParams.blockSizeMax : 1 << 17; + nuint blockSizeMax = dctx->isFrameDecompression != 0 ? dctx->fParams.blockSizeMax : 1 << 17; assert(blockSizeMax <= 1 << 17); return blockSizeMax; } @@ -65,9 +64,7 @@ uint splitImmediately { nuint blockSizeMax = ZSTD_blockSizeMax(dctx); assert(litSize <= blockSizeMax); - assert( - dctx->isFrameDecompression != 0 || streaming == streaming_operation.not_streaming - ); + assert(dctx->isFrameDecompression != 0 || streaming == streaming_operation.not_streaming); assert(expectedWriteSize <= blockSizeMax); if ( streaming == streaming_operation.not_streaming @@ -148,206 +145,194 @@ streaming_operation streaming ); } - { - nuint lhSize, - litSize, - litCSize; - uint singleStream = 0; - uint lhlCode = (uint)(istart[0] >> 2 & 3); - uint lhc = MEM_readLE32(istart); - nuint hufSuccess; - nuint expectedWriteSize = - blockSizeMax < dstCapacity ? blockSizeMax : dstCapacity; - int flags = - 0 - | ( - ZSTD_DCtx_get_bmi2(dctx) != 0 - ? (int)HUF_flags_e.HUF_flags_bmi2 - : 0 - ) - | ( - dctx->disableHufAsm != 0 - ? (int)HUF_flags_e.HUF_flags_disableAsm - : 0 - ); - switch (lhlCode) { - case 0: - case 1: - default: - singleStream = lhlCode == 0 ? 1U : 0U; - lhSize = 3; - litSize = lhc >> 4 & 0x3FF; - litCSize = lhc >> 14 & 0x3FF; - break; - case 2: - lhSize = 4; - litSize = lhc >> 4 & 0x3FFF; - litCSize = lhc >> 18; - break; - case 3: - lhSize = 5; - litSize = lhc >> 4 & 0x3FFFF; - litCSize = (lhc >> 22) + ((nuint)istart[4] << 10); - break; - } + nuint lhSize, + litSize, + litCSize; + uint singleStream = 0; + uint lhlCode = (uint)(istart[0] >> 2 & 3); + uint lhc = MEM_readLE32(istart); + nuint hufSuccess; + nuint expectedWriteSize = + blockSizeMax < dstCapacity ? blockSizeMax : dstCapacity; + int flags = + 0 + | (ZSTD_DCtx_get_bmi2(dctx) != 0 ? (int)HUF_flags_e.HUF_flags_bmi2 : 0) + | ( + dctx->disableHufAsm != 0 ? (int)HUF_flags_e.HUF_flags_disableAsm : 0 + ); + switch (lhlCode) + { + case 0: + case 1: + default: + singleStream = lhlCode == 0 ? 1U : 0U; + lhSize = 3; + litSize = lhc >> 4 & 0x3FF; + litCSize = lhc >> 14 & 0x3FF; + break; + case 2: + lhSize = 4; + litSize = lhc >> 4 & 0x3FFF; + litCSize = lhc >> 18; + break; + case 3: + lhSize = 5; + litSize = lhc >> 4 & 0x3FFFF; + litCSize = (lhc >> 22) + ((nuint)istart[4] << 10); + break; + } - if (litSize > 0 && dst == null) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) - ); - } + if (litSize > 0 && dst == null) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) + ); + } - if (litSize > blockSizeMax) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); - } + if (litSize > blockSizeMax) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) + ); + } + + if (singleStream == 0) + if (litSize < 6) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_literals_headerWrong) + ); + } - if (singleStream == 0) - if (litSize < 6) + if (litCSize + lhSize > srcSize) { return unchecked( - (nuint)( - -(int)ZSTD_ErrorCode.ZSTD_error_literals_headerWrong - ) + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) ); } - if (litCSize + lhSize > srcSize) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); - } + if (expectedWriteSize < litSize) + { + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) + ); + } - if (expectedWriteSize < litSize) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) + ZSTD_allocateLiteralsBuffer( + dctx, + dst, + dstCapacity, + litSize, + streaming, + expectedWriteSize, + 0 ); - } - - ZSTD_allocateLiteralsBuffer( - dctx, - dst, - dstCapacity, - litSize, - streaming, - expectedWriteSize, - 0 - ); - if (dctx->ddictIsCold != 0 && litSize > 768) - { - sbyte* _ptr = (sbyte*)dctx->HUFptr; - const nuint _size = sizeof(uint) * 4097; - nuint _pos; - for (_pos = 0; _pos < _size; _pos += 64) + if (dctx->ddictIsCold != 0 && litSize > 768) { + sbyte* _ptr = (sbyte*)dctx->HUFptr; + const nuint _size = sizeof(uint) * 4097; + nuint _pos; + for (_pos = 0; _pos < _size; _pos += 64) + { #if NETCOREAPP3_0_OR_GREATER - if (System.Runtime.Intrinsics.X86.Sse.IsSupported) - { - System.Runtime.Intrinsics.X86.Sse.Prefetch1(_ptr + _pos); - } + if (System.Runtime.Intrinsics.X86.Sse.IsSupported) + { + System.Runtime.Intrinsics.X86.Sse.Prefetch1(_ptr + _pos); + } #endif + } } - } - if (litEncType == SymbolEncodingType_e.set_repeat) - { - if (singleStream != 0) + if (litEncType == SymbolEncodingType_e.set_repeat) { - hufSuccess = HUF_decompress1X_usingDTable( - dctx->litBuffer, - litSize, - istart + lhSize, - litCSize, - dctx->HUFptr, - flags - ); + if (singleStream != 0) + { + hufSuccess = HUF_decompress1X_usingDTable( + dctx->litBuffer, + litSize, + istart + lhSize, + litCSize, + dctx->HUFptr, + flags + ); + } + else + { + assert(litSize >= 6); + hufSuccess = HUF_decompress4X_usingDTable( + dctx->litBuffer, + litSize, + istart + lhSize, + litCSize, + dctx->HUFptr, + flags + ); + } } else { - assert(litSize >= 6); - hufSuccess = HUF_decompress4X_usingDTable( - dctx->litBuffer, - litSize, - istart + lhSize, - litCSize, - dctx->HUFptr, - flags - ); + if (singleStream != 0) + { + hufSuccess = HUF_decompress1X1_DCtx_wksp( + dctx->entropy.hufTable, + dctx->litBuffer, + litSize, + istart + lhSize, + litCSize, + dctx->workspace, + sizeof(uint) * 640, + flags + ); + } + else + { + hufSuccess = HUF_decompress4X_hufOnly_wksp( + dctx->entropy.hufTable, + dctx->litBuffer, + litSize, + istart + lhSize, + litCSize, + dctx->workspace, + sizeof(uint) * 640, + flags + ); + } } - } - else - { - if (singleStream != 0) + + if (dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split) { - hufSuccess = HUF_decompress1X1_DCtx_wksp( - dctx->entropy.hufTable, + assert(litSize > 1 << 16); + memcpy(dctx->litExtraBuffer, dctx->litBufferEnd - (1 << 16), 1 << 16); + memmove( + dctx->litBuffer + (1 << 16) - 32, dctx->litBuffer, - litSize, - istart + lhSize, - litCSize, - dctx->workspace, - sizeof(uint) * 640, - flags + litSize - (1 << 16) ); + dctx->litBuffer += (1 << 16) - 32; + dctx->litBufferEnd -= 32; + assert(dctx->litBufferEnd <= (byte*)dst + blockSizeMax); } - else + + if (ERR_isError(hufSuccess)) { - hufSuccess = HUF_decompress4X_hufOnly_wksp( - dctx->entropy.hufTable, - dctx->litBuffer, - litSize, - istart + lhSize, - litCSize, - dctx->workspace, - sizeof(uint) * 640, - flags + return unchecked( + (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) ); } - } - - if (dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split) - { - assert(litSize > 1 << 16); - memcpy( - dctx->litExtraBuffer, - dctx->litBufferEnd - (1 << 16), - 1 << 16 - ); - memmove( - dctx->litBuffer + (1 << 16) - 32, - dctx->litBuffer, - litSize - (1 << 16) - ); - dctx->litBuffer += (1 << 16) - 32; - dctx->litBufferEnd -= 32; - assert(dctx->litBufferEnd <= (byte*)dst + blockSizeMax); - } - if (ERR_isError(hufSuccess)) - { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); + dctx->litPtr = dctx->litBuffer; + dctx->litSize = litSize; + dctx->litEntropy = 1; + if (litEncType == SymbolEncodingType_e.set_compressed) + dctx->HUFptr = dctx->entropy.hufTable; + return litCSize + lhSize; } - dctx->litPtr = dctx->litBuffer; - dctx->litSize = litSize; - dctx->litEntropy = 1; - if (litEncType == SymbolEncodingType_e.set_compressed) - dctx->HUFptr = dctx->entropy.hufTable; - return litCSize + lhSize; - } - case SymbolEncodingType_e.set_basic: { nuint litSize, - lhSize; + lhSize; uint lhlCode = (uint)(istart[0] >> 2 & 3); nuint expectedWriteSize = blockSizeMax < dstCapacity ? blockSizeMax : dstCapacity; @@ -378,9 +363,7 @@ streaming_operation streaming if (litSize > 0 && dst == null) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); } if (litSize > blockSizeMax) @@ -392,9 +375,7 @@ streaming_operation streaming if (expectedWriteSize < litSize) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); } ZSTD_allocateLiteralsBuffer( @@ -417,11 +398,7 @@ streaming_operation streaming if (dctx->litBufferLocation == ZSTD_litLocation_e.ZSTD_split) { - memcpy( - dctx->litBuffer, - istart + lhSize, - (uint)(litSize - (1 << 16)) - ); + memcpy(dctx->litBuffer, istart + lhSize, (uint)(litSize - (1 << 16))); memcpy( dctx->litExtraBuffer, istart + lhSize + litSize - (1 << 16), @@ -449,7 +426,7 @@ streaming_operation streaming { uint lhlCode = (uint)(istart[0] >> 2 & 3); nuint litSize, - lhSize; + lhSize; nuint expectedWriteSize = blockSizeMax < dstCapacity ? blockSizeMax : dstCapacity; switch (lhlCode) @@ -486,9 +463,7 @@ streaming_operation streaming if (litSize > 0 && dst == null) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); } if (litSize > blockSizeMax) @@ -500,9 +475,7 @@ streaming_operation streaming if (expectedWriteSize < litSize) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); } ZSTD_allocateLiteralsBuffer( @@ -530,9 +503,7 @@ streaming_operation streaming } default: - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); } } } @@ -635,94 +606,34 @@ nuint dstCapacity new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 6, nbBits: 4, baseValue: 61), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 9, nbBits: 5, baseValue: 509), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 15, nbBits: 5, baseValue: 32765), - new ZSTD_seqSymbol( - nextState: 0, - nbAdditionalBits: 21, - nbBits: 5, - baseValue: 2097149 - ), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 21, nbBits: 5, baseValue: 2097149), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 3, nbBits: 5, baseValue: 5), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 7, nbBits: 4, baseValue: 125), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 12, nbBits: 5, baseValue: 4093), - new ZSTD_seqSymbol( - nextState: 0, - nbAdditionalBits: 18, - nbBits: 5, - baseValue: 262141 - ), - new ZSTD_seqSymbol( - nextState: 0, - nbAdditionalBits: 23, - nbBits: 5, - baseValue: 8388605 - ), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 18, nbBits: 5, baseValue: 262141), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 23, nbBits: 5, baseValue: 8388605), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 5, nbBits: 5, baseValue: 29), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 8, nbBits: 4, baseValue: 253), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 14, nbBits: 5, baseValue: 16381), - new ZSTD_seqSymbol( - nextState: 0, - nbAdditionalBits: 20, - nbBits: 5, - baseValue: 1048573 - ), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 20, nbBits: 5, baseValue: 1048573), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 2, nbBits: 5, baseValue: 1), new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 7, nbBits: 4, baseValue: 125), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 11, nbBits: 5, baseValue: 2045), - new ZSTD_seqSymbol( - nextState: 0, - nbAdditionalBits: 17, - nbBits: 5, - baseValue: 131069 - ), - new ZSTD_seqSymbol( - nextState: 0, - nbAdditionalBits: 22, - nbBits: 5, - baseValue: 4194301 - ), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 17, nbBits: 5, baseValue: 131069), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 22, nbBits: 5, baseValue: 4194301), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 4, nbBits: 5, baseValue: 13), new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 8, nbBits: 4, baseValue: 253), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 13, nbBits: 5, baseValue: 8189), - new ZSTD_seqSymbol( - nextState: 0, - nbAdditionalBits: 19, - nbBits: 5, - baseValue: 524285 - ), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 19, nbBits: 5, baseValue: 524285), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 1, nbBits: 5, baseValue: 1), new ZSTD_seqSymbol(nextState: 16, nbAdditionalBits: 6, nbBits: 4, baseValue: 61), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 10, nbBits: 5, baseValue: 1021), new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 16, nbBits: 5, baseValue: 65533), - new ZSTD_seqSymbol( - nextState: 0, - nbAdditionalBits: 28, - nbBits: 5, - baseValue: 268435453 - ), - new ZSTD_seqSymbol( - nextState: 0, - nbAdditionalBits: 27, - nbBits: 5, - baseValue: 134217725 - ), - new ZSTD_seqSymbol( - nextState: 0, - nbAdditionalBits: 26, - nbBits: 5, - baseValue: 67108861 - ), - new ZSTD_seqSymbol( - nextState: 0, - nbAdditionalBits: 25, - nbBits: 5, - baseValue: 33554429 - ), - new ZSTD_seqSymbol( - nextState: 0, - nbAdditionalBits: 24, - nbBits: 5, - baseValue: 16777213 - ), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 28, nbBits: 5, baseValue: 268435453), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 27, nbBits: 5, baseValue: 134217725), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 26, nbBits: 5, baseValue: 67108861), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 25, nbBits: 5, baseValue: 33554429), + new ZSTD_seqSymbol(nextState: 0, nbAdditionalBits: 24, nbBits: 5, baseValue: 16777213), } ); private static readonly ZSTD_seqSymbol* ML_defaultDTable = GetArrayPointer( @@ -796,11 +707,7 @@ nuint dstCapacity } ); - private static void ZSTD_buildSeqTable_rle( - ZSTD_seqSymbol* dt, - uint baseValue, - byte nbAddBits - ) + private static void ZSTD_buildSeqTable_rle(ZSTD_seqSymbol* dt, uint baseValue, byte nbAddBits) { void* ptr = dt; ZSTD_seqSymbol_header* DTableH = (ZSTD_seqSymbol_header*)ptr; @@ -916,7 +823,7 @@ nuint wkspSize uint tableMask = tableSize - 1; uint step = (tableSize >> 1) + (tableSize >> 3) + 3; uint s, - position = 0; + position = 0; for (s = 0; s < maxSV1; s++) { int i; @@ -1039,17 +946,15 @@ int bmi2 if (*(byte*)src > max) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); } - { - uint symbol = *(byte*)src; - uint baseline = baseValue[symbol]; - byte nbBits = nbAdditionalBits[symbol]; - ZSTD_buildSeqTable_rle(DTableSpace, baseline, nbBits); - } + { + uint symbol = *(byte*)src; + uint baseline = baseValue[symbol]; + byte nbBits = nbAdditionalBits[symbol]; + ZSTD_buildSeqTable_rle(DTableSpace, baseline, nbBits); + } *DTablePtr = DTableSpace; return 1; @@ -1059,9 +964,7 @@ int bmi2 case SymbolEncodingType_e.set_repeat: if (flagRepeatTable == 0) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); } if (ddictIsCold != 0 && nbSeq > 24) @@ -1075,10 +978,10 @@ int bmi2 for (_pos = 0; _pos < _size; _pos += 64) { #if NETCOREAPP3_0_OR_GREATER - if (System.Runtime.Intrinsics.X86.Sse.IsSupported) - { - System.Runtime.Intrinsics.X86.Sse.Prefetch1(_ptr + _pos); - } + if (System.Runtime.Intrinsics.X86.Sse.IsSupported) + { + System.Runtime.Intrinsics.X86.Sse.Prefetch1(_ptr + _pos); + } #endif } } @@ -1092,16 +995,12 @@ int bmi2 nuint headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize); if (ERR_isError(headerSize)) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); } if (tableLog > maxLog) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); } ZSTD_buildFSETable( @@ -1215,9 +1114,7 @@ nuint srcSize ); if (ERR_isError(llhSize)) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); } ip += llhSize; @@ -1244,9 +1141,7 @@ nuint srcSize ); if (ERR_isError(ofhSize)) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); } ip += ofhSize; @@ -1273,9 +1168,7 @@ nuint srcSize ); if (ERR_isError(mlhSize)) { - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_corruption_detected)); } ip += mlhSize; @@ -1286,12 +1179,12 @@ nuint srcSize } #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_dec32table => new uint[8] { 0, 1, 2, 1, 4, 4, 4, 4 }; - private static uint* dec32table => - (uint*) - System.Runtime.CompilerServices.Unsafe.AsPointer( - ref MemoryMarshal.GetReference(Span_dec32table) - ); + private static ReadOnlySpan Span_dec32table => new uint[8] { 0, 1, 2, 1, 4, 4, 4, 4 }; + private static uint* dec32table => + (uint*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_dec32table) + ); #else private static readonly uint* dec32table = GetArrayPointer( @@ -1299,12 +1192,12 @@ ref MemoryMarshal.GetReference(Span_dec32table) ); #endif #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_dec64table => new int[8] { 8, 8, 8, 7, 8, 9, 10, 11 }; - private static int* dec64table => - (int*) - System.Runtime.CompilerServices.Unsafe.AsPointer( - ref MemoryMarshal.GetReference(Span_dec64table) - ); + private static ReadOnlySpan Span_dec64table => new int[8] { 8, 8, 8, 7, 8, 9, 10, 11 }; + private static int* dec64table => + (int*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_dec64table) + ); #else private static readonly int* dec64table = GetArrayPointer( @@ -1365,9 +1258,8 @@ ZSTD_overlap_e ovtype nint diff = (nint)(op - ip); byte* oend = op + length; assert( - ovtype == ZSTD_overlap_e.ZSTD_no_overlap - && (diff <= -8 || diff >= 8 || op >= oend_w) - || ovtype == ZSTD_overlap_e.ZSTD_overlap_src_before_dst && diff >= 0 + ovtype == ZSTD_overlap_e.ZSTD_no_overlap && (diff <= -8 || diff >= 8 || op >= oend_w) + || ovtype == ZSTD_overlap_e.ZSTD_overlap_src_before_dst && diff >= 0 ); if (length < 8) { @@ -1672,12 +1564,7 @@ private static nuint ZSTD_execSequence( assert(sequence_matchLength >= 1); if (sequence_offset >= 16) { - ZSTD_wildcopy( - op, - match, - (nint)sequence_matchLength, - ZSTD_overlap_e.ZSTD_no_overlap - ); + ZSTD_wildcopy(op, match, (nint)sequence_matchLength, ZSTD_overlap_e.ZSTD_no_overlap); return sequenceLength; } @@ -1783,12 +1670,7 @@ private static nuint ZSTD_execSequenceSplitLitBuffer( assert(sequence.matchLength >= 1); if (sequence.offset >= 16) { - ZSTD_wildcopy( - op, - match, - (nint)sequence.matchLength, - ZSTD_overlap_e.ZSTD_no_overlap - ); + ZSTD_wildcopy(op, match, (nint)sequence.matchLength, ZSTD_overlap_e.ZSTD_no_overlap); return sequenceLength; } @@ -2189,10 +2071,8 @@ ZSTD_longOffset_e isLongOffset { uint i; for (i = 0; i < 3; i++) - System.Runtime.CompilerServices.Unsafe.Add( - ref seqState.prevOffset.e0, - (int)i - ) = dctx->entropy.rep[i]; + System.Runtime.CompilerServices.Unsafe.Add(ref seqState.prevOffset.e0, (int)i) = + dctx->entropy.rep[i]; } if (ERR_isError(BIT_initDStream(ref seqState.DStream, ip, (nuint)(iend - ip)))) @@ -2298,11 +2178,10 @@ ZSTD_longOffset_e isLongOffset ref seqState.prevOffset.e0, (int)ll0 ); - seqState.prevOffset.e1 = - System.Runtime.CompilerServices.Unsafe.Add( - ref seqState.prevOffset.e0, - ll0 == 0 ? 1 : 0 - ); + seqState.prevOffset.e1 = System.Runtime.CompilerServices.Unsafe.Add( + ref seqState.prevOffset.e0, + ll0 == 0 ? 1 : 0 + ); seqState.prevOffset.e0 = offset; } else @@ -2631,11 +2510,11 @@ private static nuint ZSTD_prefetchMatch( (nint)sequence.offset ); #if NETCOREAPP3_0_OR_GREATER - if (System.Runtime.Intrinsics.X86.Sse.IsSupported) - { - System.Runtime.Intrinsics.X86.Sse.Prefetch0(match); - System.Runtime.Intrinsics.X86.Sse.Prefetch0(match + 64); - } + if (System.Runtime.Intrinsics.X86.Sse.IsSupported) + { + System.Runtime.Intrinsics.X86.Sse.Prefetch0(match); + System.Runtime.Intrinsics.X86.Sse.Prefetch0(match + 64); + } #endif } @@ -2788,12 +2667,7 @@ ZSTD_longOffset_e isLongOffset ); if (ERR_isError(oneSeqSize)) return oneSeqSize; - prefetchPos = ZSTD_prefetchMatch( - prefetchPos, - sequence, - prefixStart, - dictEnd - ); + prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd); sequences[seqNb & 8 - 1] = sequence; op += oneSeqSize; } @@ -3027,11 +2901,7 @@ private static nuint ZSTD_totalHistorySize(byte* op, byte* virtualStart) */ private static ZSTD_OffsetInfo ZSTD_getOffsetInfo(ZSTD_seqSymbol* offTable, int nbSeq) { - ZSTD_OffsetInfo info = new ZSTD_OffsetInfo - { - longOffsetShare = 0, - maxNbAdditionalBits = 0, - }; + ZSTD_OffsetInfo info = new ZSTD_OffsetInfo { longOffsetShare = 0, maxNbAdditionalBits = 0 }; if (nbSeq != 0) { void* ptr = offTable; @@ -3290,11 +3160,7 @@ private static void ZSTD_initFseState( { void* ptr = dt; ZSTD_seqSymbol_header* DTableH = (ZSTD_seqSymbol_header*)ptr; - DStatePtr.state = BIT_readBits( - bitD.bitContainer, - ref bitD.bitsConsumed, - DTableH->tableLog - ); + DStatePtr.state = BIT_readBits(bitD.bitContainer, ref bitD.bitsConsumed, DTableH->tableLog); BIT_reloadDStream( ref bitD.bitContainer, ref bitD.bitsConsumed, @@ -3349,4 +3215,4 @@ private static void ZSTD_overlapCopy8(ref byte* op, ref byte* ip, nuint offset) op += 8; assert(op - ip >= 8); } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompressInternal.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompressInternal.cs index 96b38245e..670f58465 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompressInternal.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDecompressInternal.cs @@ -8,51 +8,51 @@ namespace SharpCompress.Compressors.ZStandard.Unsafe; public static unsafe partial class Methods { #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_LL_base => - new uint[36] - { - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - 18, - 20, - 22, - 24, - 28, - 32, - 40, - 48, - 64, - 0x80, - 0x100, - 0x200, - 0x400, - 0x800, - 0x1000, - 0x2000, - 0x4000, - 0x8000, - 0x10000, - }; - private static uint* LL_base => - (uint*) - System.Runtime.CompilerServices.Unsafe.AsPointer( - ref MemoryMarshal.GetReference(Span_LL_base) - ); + private static ReadOnlySpan Span_LL_base => + new uint[36] + { + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 18, + 20, + 22, + 24, + 28, + 32, + 40, + 48, + 64, + 0x80, + 0x100, + 0x200, + 0x400, + 0x800, + 0x1000, + 0x2000, + 0x4000, + 0x8000, + 0x10000, + }; + private static uint* LL_base => + (uint*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_LL_base) + ); #else private static readonly uint* LL_base = GetArrayPointer( @@ -98,47 +98,47 @@ ref MemoryMarshal.GetReference(Span_LL_base) ); #endif #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_OF_base => - new uint[32] - { - 0, - 1, - 1, - 5, - 0xD, - 0x1D, - 0x3D, - 0x7D, - 0xFD, - 0x1FD, - 0x3FD, - 0x7FD, - 0xFFD, - 0x1FFD, - 0x3FFD, - 0x7FFD, - 0xFFFD, - 0x1FFFD, - 0x3FFFD, - 0x7FFFD, - 0xFFFFD, - 0x1FFFFD, - 0x3FFFFD, - 0x7FFFFD, - 0xFFFFFD, - 0x1FFFFFD, - 0x3FFFFFD, - 0x7FFFFFD, - 0xFFFFFFD, - 0x1FFFFFFD, - 0x3FFFFFFD, - 0x7FFFFFFD, - }; - private static uint* OF_base => - (uint*) - System.Runtime.CompilerServices.Unsafe.AsPointer( - ref MemoryMarshal.GetReference(Span_OF_base) - ); + private static ReadOnlySpan Span_OF_base => + new uint[32] + { + 0, + 1, + 1, + 5, + 0xD, + 0x1D, + 0x3D, + 0x7D, + 0xFD, + 0x1FD, + 0x3FD, + 0x7FD, + 0xFFD, + 0x1FFD, + 0x3FFD, + 0x7FFD, + 0xFFFD, + 0x1FFFD, + 0x3FFFD, + 0x7FFFD, + 0xFFFFD, + 0x1FFFFD, + 0x3FFFFD, + 0x7FFFFD, + 0xFFFFFD, + 0x1FFFFFD, + 0x3FFFFFD, + 0x7FFFFFD, + 0xFFFFFFD, + 0x1FFFFFFD, + 0x3FFFFFFD, + 0x7FFFFFFD, + }; + private static uint* OF_base => + (uint*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_OF_base) + ); #else private static readonly uint* OF_base = GetArrayPointer( @@ -180,47 +180,47 @@ ref MemoryMarshal.GetReference(Span_OF_base) ); #endif #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_OF_bits => - new byte[32] - { - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - 17, - 18, - 19, - 20, - 21, - 22, - 23, - 24, - 25, - 26, - 27, - 28, - 29, - 30, - 31, - }; - private static byte* OF_bits => - (byte*) - System.Runtime.CompilerServices.Unsafe.AsPointer( - ref MemoryMarshal.GetReference(Span_OF_bits) - ); + private static ReadOnlySpan Span_OF_bits => + new byte[32] + { + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + }; + private static byte* OF_bits => + (byte*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_OF_bits) + ); #else private static readonly byte* OF_bits = GetArrayPointer( @@ -262,68 +262,68 @@ ref MemoryMarshal.GetReference(Span_OF_bits) ); #endif #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_ML_base => - new uint[53] - { - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - 17, - 18, - 19, - 20, - 21, - 22, - 23, - 24, - 25, - 26, - 27, - 28, - 29, - 30, - 31, - 32, - 33, - 34, - 35, - 37, - 39, - 41, - 43, - 47, - 51, - 59, - 67, - 83, - 99, - 0x83, - 0x103, - 0x203, - 0x403, - 0x803, - 0x1003, - 0x2003, - 0x4003, - 0x8003, - 0x10003, - }; - private static uint* ML_base => - (uint*) - System.Runtime.CompilerServices.Unsafe.AsPointer( - ref MemoryMarshal.GetReference(Span_ML_base) - ); + private static ReadOnlySpan Span_ML_base => + new uint[53] + { + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 37, + 39, + 41, + 43, + 47, + 51, + 59, + 67, + 83, + 99, + 0x83, + 0x103, + 0x203, + 0x403, + 0x803, + 0x1003, + 0x2003, + 0x4003, + 0x8003, + 0x10003, + }; + private static uint* ML_base => + (uint*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_ML_base) + ); #else private static readonly uint* ML_base = GetArrayPointer( diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDoubleFast.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDoubleFast.cs index bbc06b18d..55d383ecb 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDoubleFast.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdDoubleFast.cs @@ -99,13 +99,13 @@ ZSTD_tableFillPurpose_e tfp } #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_dummy => - new byte[10] { 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0, 0xe2, 0xb4 }; - private static byte* dummy => - (byte*) - System.Runtime.CompilerServices.Unsafe.AsPointer( - ref MemoryMarshal.GetReference(Span_dummy) - ); + private static ReadOnlySpan Span_dummy => + new byte[10] { 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0, 0xe2, 0xb4 }; + private static byte* dummy => + (byte*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_dummy) + ); #else private static readonly byte* dummy = GetArrayPointer( @@ -138,9 +138,9 @@ uint mls byte* iend = istart + srcSize; byte* ilimit = iend - 8; uint offset_1 = rep[0], - offset_2 = rep[1]; + offset_2 = rep[1]; uint offsetSaved1 = 0, - offsetSaved2 = 0; + offsetSaved2 = 0; nuint mLength; uint offset; uint curr; @@ -252,11 +252,11 @@ uint mls if (ip1 >= nextStep) { #if NETCOREAPP3_0_OR_GREATER - if (System.Runtime.Intrinsics.X86.Sse.IsSupported) - { - System.Runtime.Intrinsics.X86.Sse.Prefetch0(ip1 + 64); - System.Runtime.Intrinsics.X86.Sse.Prefetch0(ip1 + 128); - } + if (System.Runtime.Intrinsics.X86.Sse.IsSupported) + { + System.Runtime.Intrinsics.X86.Sse.Prefetch0(ip1 + 64); + System.Runtime.Intrinsics.X86.Sse.Prefetch0(ip1 + 128); + } #endif step++; @@ -319,9 +319,7 @@ uint mls hashSmall[ZSTD_hashPtr(ip - 1, hBitsS, mls)] = (uint)(ip - 1 - @base); } - while ( - ip <= ilimit && offset_2 > 0 && MEM_read32(ip) == MEM_read32(ip - offset_2) - ) + while (ip <= ilimit && offset_2 > 0 && MEM_read32(ip) == MEM_read32(ip - offset_2)) { /* store sequence */ nuint rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4; @@ -368,7 +366,7 @@ uint mls byte* iend = istart + srcSize; byte* ilimit = iend - 8; uint offset_1 = rep[0], - offset_2 = rep[1]; + offset_2 = rep[1]; ZSTD_MatchState_t* dms = ms->dictMatchState; ZSTD_compressionParameters* dictCParams = &dms->cParams; uint* dictHashLong = dms->hashTable; @@ -393,10 +391,10 @@ uint mls for (_pos = 0; _pos < _size; _pos += 64) { #if NETCOREAPP3_0_OR_GREATER - if (System.Runtime.Intrinsics.X86.Sse.IsSupported) - { - System.Runtime.Intrinsics.X86.Sse.Prefetch1(_ptr + _pos); - } + if (System.Runtime.Intrinsics.X86.Sse.IsSupported) + { + System.Runtime.Intrinsics.X86.Sse.Prefetch1(_ptr + _pos); + } #endif } } @@ -408,10 +406,10 @@ uint mls for (_pos = 0; _pos < _size; _pos += 64) { #if NETCOREAPP3_0_OR_GREATER - if (System.Runtime.Intrinsics.X86.Sse.IsSupported) - { - System.Runtime.Intrinsics.X86.Sse.Prefetch1(_ptr + _pos); - } + if (System.Runtime.Intrinsics.X86.Sse.IsSupported) + { + System.Runtime.Intrinsics.X86.Sse.Prefetch1(_ptr + _pos); + } #endif } } @@ -450,13 +448,8 @@ uint mls { byte* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; mLength = - ZSTD_count_2segments( - ip + 1 + 4, - repMatch + 4, - iend, - repMatchEnd, - prefixLowest - ) + 4; + ZSTD_count_2segments(ip + 1 + 4, repMatch + 4, iend, repMatchEnd, prefixLowest) + + 4; ip++; assert(1 >= 1); assert(1 <= 3); @@ -486,13 +479,8 @@ uint mls if (dictMatchL > dictStart && MEM_read64(dictMatchL) == MEM_read64(ip)) { mLength = - ZSTD_count_2segments( - ip + 8, - dictMatchL + 8, - iend, - dictEnd, - prefixLowest - ) + 8; + ZSTD_count_2segments(ip + 8, dictMatchL + 8, iend, dictEnd, prefixLowest) + + 8; offset = curr - dictMatchIndexL - dictIndexDelta; while (ip > anchor && dictMatchL > dictStart && ip[-1] == dictMatchL[-1]) { @@ -538,10 +526,7 @@ uint mls ); byte* matchL3 = @base + matchIndexL3; hashLong[hl3] = curr + 1; - if ( - matchIndexL3 >= prefixLowestIndex - && MEM_read64(matchL3) == MEM_read64(ip + 1) - ) + if (matchIndexL3 >= prefixLowestIndex && MEM_read64(matchL3) == MEM_read64(ip + 1)) { mLength = ZSTD_count(ip + 9, matchL3 + 8, iend) + 8; ip++; @@ -561,10 +546,7 @@ uint mls uint dictMatchIndexL3 = dictMatchIndexAndTagL3 >> 8; byte* dictMatchL3 = dictBase + dictMatchIndexL3; assert(dictMatchL3 < dictEnd); - if ( - dictMatchL3 > dictStart - && MEM_read64(dictMatchL3) == MEM_read64(ip + 1) - ) + if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip + 1)) { mLength = ZSTD_count_2segments( @@ -576,9 +558,7 @@ uint mls ) + 8; ip++; offset = curr + 1 - dictMatchIndexL3 - dictIndexDelta; - while ( - ip > anchor && dictMatchL3 > dictStart && ip[-1] == dictMatchL3[-1] - ) + while (ip > anchor && dictMatchL3 > dictStart && ip[-1] == dictMatchL3[-1]) { ip--; dictMatchL3--; @@ -592,8 +572,7 @@ uint mls if (matchIndexS < prefixLowestIndex) { - mLength = - ZSTD_count_2segments(ip + 4, match + 4, iend, dictEnd, prefixLowest) + 4; + mLength = ZSTD_count_2segments(ip + 4, match + 4, iend, dictEnd, prefixLowest) + 4; offset = curr - matchIndexS; while (ip > anchor && match > dictStart && ip[-1] == match[-1]) { @@ -647,13 +626,8 @@ uint mls { byte* repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend; nuint repLength2 = - ZSTD_count_2segments( - ip + 4, - repMatch2 + 4, - iend, - repEnd2, - prefixLowest - ) + 4; + ZSTD_count_2segments(ip + 4, repMatch2 + 4, iend, repEnd2, prefixLowest) + + 4; /* swap offset_2 <=> offset_1 */ uint tmpOffset = offset_2; offset_2 = offset_1; @@ -894,7 +868,7 @@ uint mls byte* dictStart = dictBase + dictStartIndex; byte* dictEnd = dictBase + prefixStartIndex; uint offset_1 = rep[0], - offset_2 = rep[1]; + offset_2 = rep[1]; if (prefixStartIndex == dictStartIndex) return ZSTD_compressBlock_doubleFast(ms, seqStore, rep, src, srcSize); while (ip < ilimit) @@ -924,13 +898,8 @@ uint mls { byte* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; mLength = - ZSTD_count_2segments( - ip + 1 + 4, - repMatch + 4, - iend, - repMatchEnd, - prefixStart - ) + 4; + ZSTD_count_2segments(ip + 1 + 4, repMatch + 4, iend, repMatchEnd, prefixStart) + + 4; ip++; assert(1 >= 1); assert(1 <= 3); @@ -941,8 +910,7 @@ uint mls if (matchLongIndex > dictStartIndex && MEM_read64(matchLong) == MEM_read64(ip)) { byte* matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend; - byte* lowMatchPtr = - matchLongIndex < prefixStartIndex ? dictStart : prefixStart; + byte* lowMatchPtr = matchLongIndex < prefixStartIndex ? dictStart : prefixStart; uint offset; mLength = ZSTD_count_2segments(ip + 8, matchLong + 8, iend, matchEnd, prefixStart) @@ -975,22 +943,14 @@ uint mls byte* match3 = match3Base + matchIndex3; uint offset; hashLong[h3] = curr + 1; - if ( - matchIndex3 > dictStartIndex - && MEM_read64(match3) == MEM_read64(ip + 1) - ) + if (matchIndex3 > dictStartIndex && MEM_read64(match3) == MEM_read64(ip + 1)) { byte* matchEnd = matchIndex3 < prefixStartIndex ? dictEnd : iend; byte* lowMatchPtr = matchIndex3 < prefixStartIndex ? dictStart : prefixStart; mLength = - ZSTD_count_2segments( - ip + 9, - match3 + 8, - iend, - matchEnd, - prefixStart - ) + 8; + ZSTD_count_2segments(ip + 9, match3 + 8, iend, matchEnd, prefixStart) + + 8; ip++; offset = curr + 1 - matchIndex3; while (ip > anchor && match3 > lowMatchPtr && ip[-1] == match3[-1]) @@ -1003,8 +963,7 @@ uint mls else { byte* matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend; - byte* lowMatchPtr = - matchIndex < prefixStartIndex ? dictStart : prefixStart; + byte* lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart; mLength = ZSTD_count_2segments(ip + 4, match + 4, iend, matchEnd, prefixStart) + 4; @@ -1064,13 +1023,8 @@ uint mls { byte* repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; nuint repLength2 = - ZSTD_count_2segments( - ip + 4, - repMatch2 + 4, - iend, - repEnd2, - prefixStart - ) + 4; + ZSTD_count_2segments(ip + 4, repMatch2 + 4, iend, repEnd2, prefixStart) + + 4; /* swap offset_2 <=> offset_1 */ uint tmpOffset = offset_2; offset_2 = offset_1; @@ -1103,14 +1057,7 @@ private static nuint ZSTD_compressBlock_doubleFast_extDict_4( nuint srcSize ) { - return ZSTD_compressBlock_doubleFast_extDict_generic( - ms, - seqStore, - rep, - src, - srcSize, - 4 - ); + return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 4); } private static nuint ZSTD_compressBlock_doubleFast_extDict_5( @@ -1121,14 +1068,7 @@ private static nuint ZSTD_compressBlock_doubleFast_extDict_5( nuint srcSize ) { - return ZSTD_compressBlock_doubleFast_extDict_generic( - ms, - seqStore, - rep, - src, - srcSize, - 5 - ); + return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 5); } private static nuint ZSTD_compressBlock_doubleFast_extDict_6( @@ -1139,14 +1079,7 @@ private static nuint ZSTD_compressBlock_doubleFast_extDict_6( nuint srcSize ) { - return ZSTD_compressBlock_doubleFast_extDict_generic( - ms, - seqStore, - rep, - src, - srcSize, - 6 - ); + return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 6); } private static nuint ZSTD_compressBlock_doubleFast_extDict_7( @@ -1157,14 +1090,7 @@ private static nuint ZSTD_compressBlock_doubleFast_extDict_7( nuint srcSize ) { - return ZSTD_compressBlock_doubleFast_extDict_generic( - ms, - seqStore, - rep, - src, - srcSize, - 7 - ); + return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 7); } private static nuint ZSTD_compressBlock_doubleFast_extDict( diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdFast.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdFast.cs index bfe73ae03..518954b6b 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdFast.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdFast.cs @@ -216,7 +216,7 @@ int useCmov uint rep_offset1 = rep[0]; uint rep_offset2 = rep[1]; uint offsetSaved1 = 0, - offsetSaved2 = 0; + offsetSaved2 = 0; /* hash for ip0 */ nuint hash0; /* hash for ip1 */ @@ -564,7 +564,7 @@ uint hasStep byte* iend = istart + srcSize; byte* ilimit = iend - 8; uint offset_1 = rep[0], - offset_2 = rep[1]; + offset_2 = rep[1]; ZSTD_MatchState_t* dms = ms->dictMatchState; ZSTD_compressionParameters* dictCParams = &dms->cParams; uint* dictHashTable = dms->hashTable; @@ -653,10 +653,7 @@ uint hasStep /* Found a possible dict match */ uint dictMatchIndex = dictMatchIndexAndTag >> 8; byte* dictMatch = dictBase + dictMatchIndex; - if ( - dictMatchIndex > dictStartIndex - && MEM_read32(dictMatch) == MEM_read32(ip0) - ) + if (dictMatchIndex > dictStartIndex && MEM_read32(dictMatch) == MEM_read32(ip0)) { if (matchIndex <= prefixStartIndex) { @@ -670,9 +667,7 @@ uint hasStep prefixStart ) + 4; while ( - ip0 > anchor - && dictMatch > dictStart - && ip0[-1] == dictMatch[-1] + ip0 > anchor && dictMatch > dictStart && ip0[-1] == dictMatch[-1] ) { ip0--; @@ -762,13 +757,8 @@ uint hasStep { byte* repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; nuint repLength2 = - ZSTD_count_2segments( - ip0 + 4, - repMatch2 + 4, - iend, - repEnd2, - prefixStart - ) + 4; + ZSTD_count_2segments(ip0 + 4, repMatch2 + 4, iend, repEnd2, prefixStart) + + 4; /* swap offset_2 <=> offset_1 */ uint tmpOffset = offset_2; offset_2 = offset_1; @@ -886,37 +876,13 @@ nuint srcSize { default: case 4: - return ZSTD_compressBlock_fast_dictMatchState_4_0( - ms, - seqStore, - rep, - src, - srcSize - ); + return ZSTD_compressBlock_fast_dictMatchState_4_0(ms, seqStore, rep, src, srcSize); case 5: - return ZSTD_compressBlock_fast_dictMatchState_5_0( - ms, - seqStore, - rep, - src, - srcSize - ); + return ZSTD_compressBlock_fast_dictMatchState_5_0(ms, seqStore, rep, src, srcSize); case 6: - return ZSTD_compressBlock_fast_dictMatchState_6_0( - ms, - seqStore, - rep, - src, - srcSize - ); + return ZSTD_compressBlock_fast_dictMatchState_6_0(ms, seqStore, rep, src, srcSize); case 7: - return ZSTD_compressBlock_fast_dictMatchState_7_0( - ms, - seqStore, - rep, - src, - srcSize - ); + return ZSTD_compressBlock_fast_dictMatchState_7_0(ms, seqStore, rep, src, srcSize); } } @@ -950,9 +916,9 @@ uint hasStep byte* iend = istart + srcSize; byte* ilimit = iend - 8; uint offset_1 = rep[0], - offset_2 = rep[1]; + offset_2 = rep[1]; uint offsetSaved1 = 0, - offsetSaved2 = 0; + offsetSaved2 = 0; byte* ip0 = istart; byte* ip1; byte* ip2; @@ -1043,8 +1009,7 @@ uint hasStep } { - uint mval = - idx >= dictStartIndex ? MEM_read32(idxBase + idx) : MEM_read32(ip0) ^ 1; + uint mval = idx >= dictStartIndex ? MEM_read32(idxBase + idx) : MEM_read32(ip0) ^ 1; if (MEM_read32(ip0) == mval) { goto _offset; @@ -1061,8 +1026,7 @@ uint hasStep current0 = (uint)(ip0 - @base); hashTable[hash0] = current0; { - uint mval = - idx >= dictStartIndex ? MEM_read32(idxBase + idx) : MEM_read32(ip0) ^ 1; + uint mval = idx >= dictStartIndex ? MEM_read32(idxBase + idx) : MEM_read32(ip0) ^ 1; if (MEM_read32(ip0) == mval) { goto _offset; @@ -1143,10 +1107,8 @@ uint hasStep byte* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : @base + repIndex2; if ( - ( - ZSTD_index_overlap_check(prefixStartIndex, repIndex2) - & (offset_2 > 0 ? 1 : 0) - ) != 0 + (ZSTD_index_overlap_check(prefixStartIndex, repIndex2) & (offset_2 > 0 ? 1 : 0)) + != 0 && MEM_read32(repMatch2) == MEM_read32(ip0) ) { @@ -1244,4 +1206,4 @@ nuint srcSize return ZSTD_compressBlock_fast_extDict_7_0(ms, seqStore, rep, src, srcSize); } } -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdInternal.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdInternal.cs index 16f4c5135..bea3765fa 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdInternal.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdInternal.cs @@ -14,12 +14,12 @@ namespace SharpCompress.Compressors.ZStandard.Unsafe; public static unsafe partial class Methods { #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_repStartValue => new uint[3] { 1, 4, 8 }; - private static uint* repStartValue => - (uint*) - System.Runtime.CompilerServices.Unsafe.AsPointer( - ref MemoryMarshal.GetReference(Span_repStartValue) - ); + private static ReadOnlySpan Span_repStartValue => new uint[3] { 1, 4, 8 }; + private static uint* repStartValue => + (uint*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_repStartValue) + ); #else private static readonly uint* repStartValue = GetArrayPointer(new uint[3] { 1, 4, 8 }); @@ -32,51 +32,51 @@ ref MemoryMarshal.GetReference(Span_repStartValue) ); private const uint ZSTD_blockHeaderSize = 3; #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_LL_bits => - new byte[36] - { - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 1, - 1, - 1, - 1, - 2, - 2, - 3, - 3, - 4, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - }; - private static byte* LL_bits => - (byte*) - System.Runtime.CompilerServices.Unsafe.AsPointer( - ref MemoryMarshal.GetReference(Span_LL_bits) - ); + private static ReadOnlySpan Span_LL_bits => + new byte[36] + { + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 2, + 2, + 3, + 3, + 4, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + }; + private static byte* LL_bits => + (byte*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_LL_bits) + ); #else private static readonly byte* LL_bits = GetArrayPointer( @@ -122,51 +122,51 @@ ref MemoryMarshal.GetReference(Span_LL_bits) ); #endif #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_LL_defaultNorm => - new short[36] - { - 4, - 3, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 1, - 1, - 1, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 3, - 2, - 1, - 1, - 1, - 1, - 1, - -1, - -1, - -1, - -1, - }; - private static short* LL_defaultNorm => - (short*) - System.Runtime.CompilerServices.Unsafe.AsPointer( - ref MemoryMarshal.GetReference(Span_LL_defaultNorm) - ); + private static ReadOnlySpan Span_LL_defaultNorm => + new short[36] + { + 4, + 3, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 1, + 1, + 1, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 3, + 2, + 1, + 1, + 1, + 1, + 1, + -1, + -1, + -1, + -1, + }; + private static short* LL_defaultNorm => + (short*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_LL_defaultNorm) + ); #else private static readonly short* LL_defaultNorm = GetArrayPointer( @@ -213,68 +213,68 @@ ref MemoryMarshal.GetReference(Span_LL_defaultNorm) #endif private const uint LL_defaultNormLog = 6; #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_ML_bits => - new byte[53] - { - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 1, - 1, - 1, - 1, - 2, - 2, - 3, - 3, - 4, - 4, - 5, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - }; - private static byte* ML_bits => - (byte*) - System.Runtime.CompilerServices.Unsafe.AsPointer( - ref MemoryMarshal.GetReference(Span_ML_bits) - ); + private static ReadOnlySpan Span_ML_bits => + new byte[53] + { + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 2, + 2, + 3, + 3, + 4, + 4, + 5, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + }; + private static byte* ML_bits => + (byte*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_ML_bits) + ); #else private static readonly byte* ML_bits = GetArrayPointer( @@ -337,68 +337,68 @@ ref MemoryMarshal.GetReference(Span_ML_bits) ); #endif #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_ML_defaultNorm => - new short[53] - { - 1, - 4, - 3, - 2, - 2, - 2, - 2, - 2, - 2, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - -1, - -1, - -1, - -1, - -1, - -1, - -1, - }; - private static short* ML_defaultNorm => - (short*) - System.Runtime.CompilerServices.Unsafe.AsPointer( - ref MemoryMarshal.GetReference(Span_ML_defaultNorm) - ); + private static ReadOnlySpan Span_ML_defaultNorm => + new short[53] + { + 1, + 4, + 3, + 2, + 2, + 2, + 2, + 2, + 2, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + }; + private static short* ML_defaultNorm => + (short*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_ML_defaultNorm) + ); #else private static readonly short* ML_defaultNorm = GetArrayPointer( @@ -462,44 +462,44 @@ ref MemoryMarshal.GetReference(Span_ML_defaultNorm) #endif private const uint ML_defaultNormLog = 6; #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_OF_defaultNorm => - new short[29] - { - 1, - 1, - 1, - 1, - 1, - 1, - 2, - 2, - 2, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - -1, - -1, - -1, - -1, - -1, - }; - private static short* OF_defaultNorm => - (short*) - System.Runtime.CompilerServices.Unsafe.AsPointer( - ref MemoryMarshal.GetReference(Span_OF_defaultNorm) - ); + private static ReadOnlySpan Span_OF_defaultNorm => + new short[29] + { + 1, + 1, + 1, + 1, + 1, + 1, + 2, + 2, + 2, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + -1, + -1, + -1, + -1, + -1, + }; + private static short* OF_defaultNorm => + (short*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_OF_defaultNorm) + ); #else private static readonly short* OF_defaultNorm = GetArrayPointer( @@ -555,24 +555,22 @@ the dst buffer. In circumstances where the op "catches up" to where the private static void ZSTD_copy16(void* dst, void* src) { #if NET5_0_OR_GREATER - if (AdvSimd.IsSupported) - { - AdvSimd.Store((byte*)dst, AdvSimd.LoadVector128((byte*)src)); - } - else + if (AdvSimd.IsSupported) + { + AdvSimd.Store((byte*)dst, AdvSimd.LoadVector128((byte*)src)); + } + else #endif #if NETCOREAPP3_0_OR_GREATER - if (Sse2.IsSupported) - { - Sse2.Store((byte*)dst, Sse2.LoadVector128((byte*)src)); - } - else + if (Sse2.IsSupported) + { + Sse2.Store((byte*)dst, Sse2.LoadVector128((byte*)src)); + } + else #endif { var v1 = System.Runtime.CompilerServices.Unsafe.ReadUnaligned((ulong*)src); - var v2 = System.Runtime.CompilerServices.Unsafe.ReadUnaligned( - (ulong*)src + 1 - ); + var v2 = System.Runtime.CompilerServices.Unsafe.ReadUnaligned((ulong*)src + 1); System.Runtime.CompilerServices.Unsafe.WriteUnaligned((ulong*)dst, v1); System.Runtime.CompilerServices.Unsafe.WriteUnaligned((ulong*)dst + 1, v2); } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLazy.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLazy.cs index e75c84765..2256ffbfe 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLazy.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLazy.cs @@ -1,6 +1,7 @@ using System; using System.Numerics; using System.Runtime.CompilerServices; +using static SharpCompress.Compressors.ZStandard.UnsafeHelper; #if NETCOREAPP3_0_OR_GREATER using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.X86; @@ -8,7 +9,6 @@ #if NET5_0_OR_GREATER using System.Runtime.Intrinsics.Arm; #endif -using static SharpCompress.Compressors.ZStandard.UnsafeHelper; namespace SharpCompress.Compressors.ZStandard.Unsafe; @@ -63,7 +63,7 @@ ZSTD_dictMode_e dictMode uint btLog = cParams->chainLog - 1; uint btMask = (uint)((1 << (int)btLog) - 1); nuint commonLengthSmaller = 0, - commonLengthLarger = 0; + commonLengthLarger = 0; byte* @base = ms->window.@base; byte* dictBase = ms->window.dictBase; uint dictLimit = ms->window.dictLimit; @@ -88,9 +88,7 @@ ZSTD_dictMode_e dictMode uint* nextPtr = bt + 2 * (matchIndex & btMask); /* guaranteed minimum nb of common bytes */ nuint matchLength = - commonLengthSmaller < commonLengthLarger - ? commonLengthSmaller - : commonLengthLarger; + commonLengthSmaller < commonLengthLarger ? commonLengthSmaller : commonLengthLarger; assert(matchIndex < curr); if ( dictMode != ZSTD_dictMode_e.ZSTD_extDict @@ -185,19 +183,16 @@ ZSTD_dictMode_e dictMode uint* dictBt = dms->chainTable; uint btLog = dmsCParams->chainLog - 1; uint btMask = (uint)((1 << (int)btLog) - 1); - uint btLow = - btMask >= dictHighLimit - dictLowLimit ? dictLowLimit : dictHighLimit - btMask; + uint btLow = btMask >= dictHighLimit - dictLowLimit ? dictLowLimit : dictHighLimit - btMask; nuint commonLengthSmaller = 0, - commonLengthLarger = 0; + commonLengthLarger = 0; assert(dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState); for (; nbCompares != 0 && dictMatchIndex > dictLowLimit; --nbCompares) { uint* nextPtr = dictBt + 2 * (dictMatchIndex & btMask); /* guaranteed minimum nb of common bytes */ nuint matchLength = - commonLengthSmaller < commonLengthLarger - ? commonLengthSmaller - : commonLengthLarger; + commonLengthSmaller < commonLengthLarger ? commonLengthSmaller : commonLengthLarger; byte* match = dictBase + dictMatchIndex; matchLength += ZSTD_count_2segments( ip + matchLength, @@ -317,7 +312,7 @@ ZSTD_dictMode_e dictMode { nuint commonLengthSmaller = 0, - commonLengthLarger = 0; + commonLengthLarger = 0; byte* dictBase = ms->window.dictBase; uint dictLimit = ms->window.dictLimit; byte* dictEnd = dictBase + dictLimit; @@ -618,10 +613,10 @@ nuint ddsIdx for (ddsAttempt = 0; ddsAttempt < bucketSize - 1; ddsAttempt++) { #if NETCOREAPP3_0_OR_GREATER - if (Sse.IsSupported) - { - Sse.Prefetch0(ddsBase + dms->hashTable[ddsIdx + ddsAttempt]); - } + if (Sse.IsSupported) + { + Sse.Prefetch0(ddsBase + dms->hashTable[ddsIdx + ddsAttempt]); + } #endif } @@ -629,10 +624,10 @@ nuint ddsIdx uint chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1]; uint chainIndex = chainPackedPointer >> 8; #if NETCOREAPP3_0_OR_GREATER - if (Sse.IsSupported) - { - Sse.Prefetch0(&dms->chainTable[chainIndex]); - } + if (Sse.IsSupported) + { + Sse.Prefetch0(&dms->chainTable[chainIndex]); + } #endif } @@ -677,10 +672,10 @@ nuint ddsIdx for (chainAttempt = 0; chainAttempt < chainLimit; chainAttempt++) { #if NETCOREAPP3_0_OR_GREATER - if (Sse.IsSupported) - { - Sse.Prefetch0(ddsBase + dms->chainTable[chainIndex + chainAttempt]); - } + if (Sse.IsSupported) + { + Sse.Prefetch0(ddsBase + dms->chainTable[chainIndex + chainAttempt]); + } #endif } @@ -695,8 +690,7 @@ nuint ddsIdx if (MEM_read32(match) == MEM_read32(ip)) { currentMl = - ZSTD_count_2segments(ip + 4, match + 4, iLimit, ddsEnd, prefixStart) - + 4; + ZSTD_count_2segments(ip + 4, match + 4, iLimit, ddsEnd, prefixStart) + 4; } if (currentMl > ml) @@ -793,10 +787,10 @@ ZSTD_dictMode_e dictMode { uint* entry = &dms->hashTable[ddsIdx]; #if NETCOREAPP3_0_OR_GREATER - if (Sse.IsSupported) - { - Sse.Prefetch0(entry); - } + if (Sse.IsSupported) + { + Sse.Prefetch0(entry); + } #endif } @@ -823,8 +817,7 @@ ZSTD_dictMode_e dictMode assert(match + 4 <= dictEnd); if (MEM_read32(match) == MEM_read32(ip)) currentMl = - ZSTD_count_2segments(ip + 4, match + 4, iLimit, dictEnd, prefixStart) - + 4; + ZSTD_count_2segments(ip + 4, match + 4, iLimit, dictEnd, prefixStart) + 4; } if (currentMl > ml) @@ -876,8 +869,7 @@ ZSTD_dictMode_e dictMode assert(match + 4 <= dmsEnd); if (MEM_read32(match) == MEM_read32(ip)) currentMl = - ZSTD_count_2segments(ip + 4, match + 4, iLimit, dmsEnd, prefixStart) - + 4; + ZSTD_count_2segments(ip + 4, match + 4, iLimit, dmsEnd, prefixStart) + 4; if (currentMl > ml) { ml = currentMl; @@ -935,44 +927,39 @@ private static int ZSTD_isAligned(void* ptr, nuint align) * Performs prefetching for the hashTable and tagTable at a given row. */ [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void ZSTD_row_prefetch( - uint* hashTable, - byte* tagTable, - uint relRow, - uint rowLog - ) + private static void ZSTD_row_prefetch(uint* hashTable, byte* tagTable, uint relRow, uint rowLog) { #if NETCOREAPP3_0_OR_GREATER - if (Sse.IsSupported) - { - Sse.Prefetch0(hashTable + relRow); - } + if (Sse.IsSupported) + { + Sse.Prefetch0(hashTable + relRow); + } #endif if (rowLog >= 5) { #if NETCOREAPP3_0_OR_GREATER - if (Sse.IsSupported) - { - Sse.Prefetch0(hashTable + relRow + 16); - } + if (Sse.IsSupported) + { + Sse.Prefetch0(hashTable + relRow + 16); + } #endif } #if NETCOREAPP3_0_OR_GREATER - if (Sse.IsSupported) - { - Sse.Prefetch0(tagTable + relRow); - } + if (Sse.IsSupported) + { + Sse.Prefetch0(tagTable + relRow); + } #endif if (rowLog == 6) { #if NETCOREAPP3_0_OR_GREATER - if (Sse.IsSupported) - { - Sse.Prefetch0(tagTable + relRow + 32); - } + if (Sse.IsSupported) + { + Sse.Prefetch0(tagTable + relRow + 32); + } #endif } @@ -1080,13 +1067,7 @@ uint useCache byte* tagRow = tagTable + relRow; uint pos = ZSTD_row_nextIndex(tagRow, rowMask); assert( - hash - == ZSTD_hashPtrSalted( - @base + updateStartIdx, - hashLog + 8, - mls, - ms->hashSalt - ) + hash == ZSTD_hashPtrSalted(@base + updateStartIdx, hashLog + 8, mls, ms->hashSalt) ); tagRow[pos] = (byte)(hash & (1U << 8) - 1); row[pos] = updateStartIdx; @@ -1155,71 +1136,71 @@ private static uint ZSTD_row_matchMaskGroupWidth(uint rowEntries) assert(rowEntries == 16 || rowEntries == 32 || rowEntries == 64); assert(rowEntries <= 64); #if NET5_0_OR_GREATER - if (AdvSimd.IsSupported && BitConverter.IsLittleEndian) - { - if (rowEntries == 16) - return 4; + if (AdvSimd.IsSupported && BitConverter.IsLittleEndian) + { + if (rowEntries == 16) + return 4; #if NET9_0_OR_GREATER - if (AdvSimd.Arm64.IsSupported) - { - if (rowEntries == 32) - return 2; - if (rowEntries == 64) - return 1; - } -#endif + if (AdvSimd.Arm64.IsSupported) + { + if (rowEntries == 32) + return 2; + if (rowEntries == 64) + return 1; } +#endif + } #endif return 1; } #if NETCOREAPP3_0_OR_GREATER - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static ulong ZSTD_row_getSSEMask(int nbChunks, byte* src, byte tag, uint head) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ulong ZSTD_row_getSSEMask(int nbChunks, byte* src, byte tag, uint head) + { + Vector128 comparisonMask = Vector128.Create(tag); + assert(nbChunks is 1 or 2 or 4); + if (nbChunks == 1) { - Vector128 comparisonMask = Vector128.Create(tag); - assert(nbChunks is 1 or 2 or 4); - if (nbChunks == 1) - { - Vector128 chunk0 = Sse2.LoadVector128(src); - Vector128 equalMask0 = Sse2.CompareEqual(chunk0, comparisonMask); - int matches0 = Sse2.MoveMask(equalMask0); - return BitOperations.RotateRight((ushort)matches0, (int)head); - } + Vector128 chunk0 = Sse2.LoadVector128(src); + Vector128 equalMask0 = Sse2.CompareEqual(chunk0, comparisonMask); + int matches0 = Sse2.MoveMask(equalMask0); + return BitOperations.RotateRight((ushort)matches0, (int)head); + } - if (nbChunks == 2) - { - Vector128 chunk0 = Sse2.LoadVector128(src); - Vector128 equalMask0 = Sse2.CompareEqual(chunk0, comparisonMask); - int matches0 = Sse2.MoveMask(equalMask0); - Vector128 chunk1 = Sse2.LoadVector128(src + 16); - Vector128 equalMask1 = Sse2.CompareEqual(chunk1, comparisonMask); - int matches1 = Sse2.MoveMask(equalMask1); - return BitOperations.RotateRight((uint)matches1 << 16 | (uint)matches0, (int)head); - } + if (nbChunks == 2) + { + Vector128 chunk0 = Sse2.LoadVector128(src); + Vector128 equalMask0 = Sse2.CompareEqual(chunk0, comparisonMask); + int matches0 = Sse2.MoveMask(equalMask0); + Vector128 chunk1 = Sse2.LoadVector128(src + 16); + Vector128 equalMask1 = Sse2.CompareEqual(chunk1, comparisonMask); + int matches1 = Sse2.MoveMask(equalMask1); + return BitOperations.RotateRight((uint)matches1 << 16 | (uint)matches0, (int)head); + } - { - Vector128 chunk0 = Sse2.LoadVector128(src); - Vector128 equalMask0 = Sse2.CompareEqual(chunk0, comparisonMask); - int matches0 = Sse2.MoveMask(equalMask0); - Vector128 chunk1 = Sse2.LoadVector128(src + 16 * 1); - Vector128 equalMask1 = Sse2.CompareEqual(chunk1, comparisonMask); - int matches1 = Sse2.MoveMask(equalMask1); - Vector128 chunk2 = Sse2.LoadVector128(src + 16 * 2); - Vector128 equalMask2 = Sse2.CompareEqual(chunk2, comparisonMask); - int matches2 = Sse2.MoveMask(equalMask2); - Vector128 chunk3 = Sse2.LoadVector128(src + 16 * 3); - Vector128 equalMask3 = Sse2.CompareEqual(chunk3, comparisonMask); - int matches3 = Sse2.MoveMask(equalMask3); - return BitOperations.RotateRight( - (ulong)matches3 << 48 - | (ulong)matches2 << 32 - | (ulong)matches1 << 16 - | (uint)matches0, - (int)head - ); - } + { + Vector128 chunk0 = Sse2.LoadVector128(src); + Vector128 equalMask0 = Sse2.CompareEqual(chunk0, comparisonMask); + int matches0 = Sse2.MoveMask(equalMask0); + Vector128 chunk1 = Sse2.LoadVector128(src + 16 * 1); + Vector128 equalMask1 = Sse2.CompareEqual(chunk1, comparisonMask); + int matches1 = Sse2.MoveMask(equalMask1); + Vector128 chunk2 = Sse2.LoadVector128(src + 16 * 2); + Vector128 equalMask2 = Sse2.CompareEqual(chunk2, comparisonMask); + int matches2 = Sse2.MoveMask(equalMask2); + Vector128 chunk3 = Sse2.LoadVector128(src + 16 * 3); + Vector128 equalMask3 = Sse2.CompareEqual(chunk3, comparisonMask); + int matches3 = Sse2.MoveMask(equalMask3); + return BitOperations.RotateRight( + (ulong)matches3 << 48 + | (ulong)matches2 << 32 + | (ulong)matches1 << 16 + | (uint)matches0, + (int)head + ); } + } #endif /* Returns a ZSTD_VecMask (U64) that has the nth group (determined by @@ -1241,88 +1222,87 @@ uint rowEntries assert(rowEntries <= 64); assert(ZSTD_row_matchMaskGroupWidth(rowEntries) * rowEntries <= sizeof(ulong) * 8); #if NETCOREAPP3_0_OR_GREATER - if (Sse2.IsSupported) - { - return ZSTD_row_getSSEMask((int)(rowEntries / 16), src, tag, headGrouped); - } + if (Sse2.IsSupported) + { + return ZSTD_row_getSSEMask((int)(rowEntries / 16), src, tag, headGrouped); + } #endif #if NET5_0_OR_GREATER - if (AdvSimd.IsSupported && BitConverter.IsLittleEndian) + if (AdvSimd.IsSupported && BitConverter.IsLittleEndian) + { + if (rowEntries == 16) { - if (rowEntries == 16) + /* vshrn_n_u16 shifts by 4 every u16 and narrows to 8 lower bits. + * After that groups of 4 bits represent the equalMask. We lower + * all bits except the highest in these groups by doing AND with + * 0x88 = 0b10001000. + */ + Vector128 chunk = AdvSimd.LoadVector128(src); + Vector128 equalMask = AdvSimd + .CompareEqual(chunk, AdvSimd.DuplicateToVector128(tag)) + .As(); + Vector64 res = AdvSimd.ShiftRightLogicalNarrowingLower(equalMask, 4); + ulong matches = res.As().GetElement(0); + return BitOperations.RotateRight(matches, (int)headGrouped) & 0x8888888888888888; + } + else if (rowEntries == 32) + { +#if NET9_0_OR_GREATER + if (AdvSimd.Arm64.IsSupported) { - /* vshrn_n_u16 shifts by 4 every u16 and narrows to 8 lower bits. - * After that groups of 4 bits represent the equalMask. We lower - * all bits except the highest in these groups by doing AND with - * 0x88 = 0b10001000. + /* Same idea as with rowEntries == 16 but doing AND with + * 0x55 = 0b01010101. */ - Vector128 chunk = AdvSimd.LoadVector128(src); - Vector128 equalMask = AdvSimd - .CompareEqual(chunk, AdvSimd.DuplicateToVector128(tag)) - .As(); - Vector64 res = AdvSimd.ShiftRightLogicalNarrowingLower(equalMask, 4); + (Vector128 chunk0, Vector128 chunk1) = + AdvSimd.Arm64.Load2xVector128AndUnzip((ushort*)src); + Vector128 dup = AdvSimd.DuplicateToVector128(tag); + Vector64 t0 = AdvSimd.ShiftRightLogicalNarrowingLower( + AdvSimd.CompareEqual(chunk0.As(), dup).As(), + 6 + ); + Vector64 t1 = AdvSimd.ShiftRightLogicalNarrowingLower( + AdvSimd.CompareEqual(chunk1.As(), dup).As(), + 6 + ); + Vector64 res = AdvSimd.ShiftLeftAndInsert(t0, t1, 4); ulong matches = res.As().GetElement(0); return BitOperations.RotateRight(matches, (int)headGrouped) - & 0x8888888888888888; + & 0x5555555555555555; } - else if (rowEntries == 32) - { -#if NET9_0_OR_GREATER - if (AdvSimd.Arm64.IsSupported) - { - /* Same idea as with rowEntries == 16 but doing AND with - * 0x55 = 0b01010101. - */ - (Vector128 chunk0, Vector128 chunk1) = - AdvSimd.Arm64.Load2xVector128AndUnzip((ushort*)src); - Vector128 dup = AdvSimd.DuplicateToVector128(tag); - Vector64 t0 = AdvSimd.ShiftRightLogicalNarrowingLower( - AdvSimd.CompareEqual(chunk0.As(), dup).As(), - 6 - ); - Vector64 t1 = AdvSimd.ShiftRightLogicalNarrowingLower( - AdvSimd.CompareEqual(chunk1.As(), dup).As(), - 6 - ); - Vector64 res = AdvSimd.ShiftLeftAndInsert(t0, t1, 4); - ulong matches = res.As().GetElement(0); - return BitOperations.RotateRight(matches, (int)headGrouped) - & 0x5555555555555555; - } #endif - } - else - { /* rowEntries == 64 */ + } + else + { /* rowEntries == 64 */ #if NET9_0_OR_GREATER - if (AdvSimd.Arm64.IsSupported) - { - ( - Vector128 chunk0, - Vector128 chunk1, - Vector128 chunk2, - Vector128 chunk3 - ) = AdvSimd.Arm64.Load4xVector128AndUnzip(src); - Vector128 dup = AdvSimd.DuplicateToVector128(tag); - Vector128 cmp0 = AdvSimd.CompareEqual(chunk0, dup); - Vector128 cmp1 = AdvSimd.CompareEqual(chunk1, dup); - Vector128 cmp2 = AdvSimd.CompareEqual(chunk2, dup); - Vector128 cmp3 = AdvSimd.CompareEqual(chunk3, dup); - - Vector128 t0 = AdvSimd.ShiftRightAndInsert(cmp1, cmp0, 1); - Vector128 t1 = AdvSimd.ShiftRightAndInsert(cmp3, cmp2, 1); - Vector128 t2 = AdvSimd.ShiftRightAndInsert(t1, t0, 2); - Vector128 t3 = AdvSimd.ShiftRightAndInsert(t2, t2, 4); - Vector64 t4 = AdvSimd.ShiftRightLogicalNarrowingLower( - t3.As(), - 4 - ); - ulong matches = t4.As().GetElement(0); - return BitOperations.RotateRight(matches, (int)headGrouped); - } -#endif + if (AdvSimd.Arm64.IsSupported) + { + ( + Vector128 chunk0, + Vector128 chunk1, + Vector128 chunk2, + Vector128 chunk3 + ) = AdvSimd.Arm64.Load4xVector128AndUnzip(src); + Vector128 dup = AdvSimd.DuplicateToVector128(tag); + Vector128 cmp0 = AdvSimd.CompareEqual(chunk0, dup); + Vector128 cmp1 = AdvSimd.CompareEqual(chunk1, dup); + Vector128 cmp2 = AdvSimd.CompareEqual(chunk2, dup); + Vector128 cmp3 = AdvSimd.CompareEqual(chunk3, dup); + + Vector128 t0 = AdvSimd.ShiftRightAndInsert(cmp1, cmp0, 1); + Vector128 t1 = AdvSimd.ShiftRightAndInsert(cmp3, cmp2, 1); + Vector128 t2 = AdvSimd.ShiftRightAndInsert(t1, t0, 2); + Vector128 t3 = AdvSimd.ShiftRightAndInsert(t2, t2, 4); + Vector64 t4 = AdvSimd.ShiftRightLogicalNarrowingLower( + t3.As(), + 4 + ); + ulong matches = t4.As().GetElement(0); + return BitOperations.RotateRight(matches, (int)headGrouped); } +#endif } + } #endif { @@ -1446,10 +1426,10 @@ uint rowLog { ddsIdx = ZSTD_hashPtr(ip, ddsHashLog, mls) << 2; #if NETCOREAPP3_0_OR_GREATER - if (Sse.IsSupported) - { - Sse.Prefetch0(&dms->hashTable[ddsIdx]); - } + if (Sse.IsSupported) + { + Sse.Prefetch0(&dms->hashTable[ddsIdx]); + } #endif } @@ -1504,8 +1484,7 @@ uint rowLog ulong matches = ZSTD_row_getMatchMask(tagRow, (byte)tag, headGrouped, rowEntries); for (; matches > 0 && nbAttempts > 0; matches &= matches - 1) { - uint matchPos = - (headGrouped + ZSTD_VecMask_next(matches)) / groupWidth & rowMask; + uint matchPos = (headGrouped + ZSTD_VecMask_next(matches)) / groupWidth & rowMask; uint matchIndex = row[matchPos]; if (matchPos == 0) continue; @@ -1515,19 +1494,19 @@ uint rowLog if (dictMode != ZSTD_dictMode_e.ZSTD_extDict || matchIndex >= dictLimit) { #if NETCOREAPP3_0_OR_GREATER - if (Sse.IsSupported) - { - Sse.Prefetch0(@base + matchIndex); - } + if (Sse.IsSupported) + { + Sse.Prefetch0(@base + matchIndex); + } #endif } else { #if NETCOREAPP3_0_OR_GREATER - if (Sse.IsSupported) - { - Sse.Prefetch0(dictBase + matchIndex); - } + if (Sse.IsSupported) + { + Sse.Prefetch0(dictBase + matchIndex); + } #endif } @@ -1560,13 +1539,8 @@ uint rowLog assert(match + 4 <= dictEnd); if (MEM_read32(match) == MEM_read32(ip)) currentMl = - ZSTD_count_2segments( - ip + 4, - match + 4, - iLimit, - dictEnd, - prefixStart - ) + 4; + ZSTD_count_2segments(ip + 4, match + 4, iLimit, dictEnd, prefixStart) + + 4; } if (currentMl > ml) @@ -1625,10 +1599,10 @@ uint rowLog if (matchIndex < dmsLowestIndex) break; #if NETCOREAPP3_0_OR_GREATER - if (Sse.IsSupported) - { - Sse.Prefetch0(dmsBase + matchIndex); - } + if (Sse.IsSupported) + { + Sse.Prefetch0(dmsBase + matchIndex); + } #endif matchBuffer[numMatches++] = matchIndex; @@ -1646,13 +1620,8 @@ uint rowLog assert(match + 4 <= dmsEnd); if (MEM_read32(match) == MEM_read32(ip)) currentMl = - ZSTD_count_2segments( - ip + 4, - match + 4, - iLimit, - dmsEnd, - prefixStart - ) + 4; + ZSTD_count_2segments(ip + 4, match + 4, iLimit, dmsEnd, prefixStart) + + 4; } if (currentMl > ml) @@ -1693,15 +1662,7 @@ private static nuint ZSTD_RowFindBestMatch_noDict_4_4( : ms->cParams.searchLog ) == 4 ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 4, - ZSTD_dictMode_e.ZSTD_noDict, - 4 - ); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_noDict, 4); } private static nuint ZSTD_RowFindBestMatch_noDict_4_5( @@ -1725,15 +1686,7 @@ private static nuint ZSTD_RowFindBestMatch_noDict_4_5( : ms->cParams.searchLog ) == 5 ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 4, - ZSTD_dictMode_e.ZSTD_noDict, - 5 - ); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_noDict, 5); } private static nuint ZSTD_RowFindBestMatch_noDict_4_6( @@ -1757,15 +1710,7 @@ private static nuint ZSTD_RowFindBestMatch_noDict_4_6( : ms->cParams.searchLog ) == 6 ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 4, - ZSTD_dictMode_e.ZSTD_noDict, - 6 - ); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_noDict, 6); } private static nuint ZSTD_RowFindBestMatch_noDict_5_4( @@ -1789,15 +1734,7 @@ private static nuint ZSTD_RowFindBestMatch_noDict_5_4( : ms->cParams.searchLog ) == 4 ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 5, - ZSTD_dictMode_e.ZSTD_noDict, - 4 - ); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_noDict, 4); } private static nuint ZSTD_RowFindBestMatch_noDict_5_5( @@ -1821,15 +1758,7 @@ private static nuint ZSTD_RowFindBestMatch_noDict_5_5( : ms->cParams.searchLog ) == 5 ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 5, - ZSTD_dictMode_e.ZSTD_noDict, - 5 - ); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_noDict, 5); } private static nuint ZSTD_RowFindBestMatch_noDict_5_6( @@ -1853,15 +1782,7 @@ private static nuint ZSTD_RowFindBestMatch_noDict_5_6( : ms->cParams.searchLog ) == 6 ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 5, - ZSTD_dictMode_e.ZSTD_noDict, - 6 - ); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_noDict, 6); } private static nuint ZSTD_RowFindBestMatch_noDict_6_4( @@ -1885,15 +1806,7 @@ private static nuint ZSTD_RowFindBestMatch_noDict_6_4( : ms->cParams.searchLog ) == 4 ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 6, - ZSTD_dictMode_e.ZSTD_noDict, - 4 - ); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_noDict, 4); } private static nuint ZSTD_RowFindBestMatch_noDict_6_5( @@ -1917,15 +1830,7 @@ private static nuint ZSTD_RowFindBestMatch_noDict_6_5( : ms->cParams.searchLog ) == 5 ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 6, - ZSTD_dictMode_e.ZSTD_noDict, - 5 - ); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_noDict, 5); } private static nuint ZSTD_RowFindBestMatch_noDict_6_6( @@ -1949,15 +1854,7 @@ private static nuint ZSTD_RowFindBestMatch_noDict_6_6( : ms->cParams.searchLog ) == 6 ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 6, - ZSTD_dictMode_e.ZSTD_noDict, - 6 - ); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_noDict, 6); } private static nuint ZSTD_RowFindBestMatch_extDict_4_4( @@ -1981,15 +1878,7 @@ private static nuint ZSTD_RowFindBestMatch_extDict_4_4( : ms->cParams.searchLog ) == 4 ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 4, - ZSTD_dictMode_e.ZSTD_extDict, - 4 - ); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_extDict, 4); } private static nuint ZSTD_RowFindBestMatch_extDict_4_5( @@ -2013,15 +1902,7 @@ private static nuint ZSTD_RowFindBestMatch_extDict_4_5( : ms->cParams.searchLog ) == 5 ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 4, - ZSTD_dictMode_e.ZSTD_extDict, - 5 - ); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_extDict, 5); } private static nuint ZSTD_RowFindBestMatch_extDict_4_6( @@ -2045,15 +1926,7 @@ private static nuint ZSTD_RowFindBestMatch_extDict_4_6( : ms->cParams.searchLog ) == 6 ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 4, - ZSTD_dictMode_e.ZSTD_extDict, - 6 - ); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMode_e.ZSTD_extDict, 6); } private static nuint ZSTD_RowFindBestMatch_extDict_5_4( @@ -2077,15 +1950,7 @@ private static nuint ZSTD_RowFindBestMatch_extDict_5_4( : ms->cParams.searchLog ) == 4 ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 5, - ZSTD_dictMode_e.ZSTD_extDict, - 4 - ); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_extDict, 4); } private static nuint ZSTD_RowFindBestMatch_extDict_5_5( @@ -2109,15 +1974,7 @@ private static nuint ZSTD_RowFindBestMatch_extDict_5_5( : ms->cParams.searchLog ) == 5 ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 5, - ZSTD_dictMode_e.ZSTD_extDict, - 5 - ); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_extDict, 5); } private static nuint ZSTD_RowFindBestMatch_extDict_5_6( @@ -2141,15 +1998,7 @@ private static nuint ZSTD_RowFindBestMatch_extDict_5_6( : ms->cParams.searchLog ) == 6 ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 5, - ZSTD_dictMode_e.ZSTD_extDict, - 6 - ); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMode_e.ZSTD_extDict, 6); } private static nuint ZSTD_RowFindBestMatch_extDict_6_4( @@ -2173,15 +2022,7 @@ private static nuint ZSTD_RowFindBestMatch_extDict_6_4( : ms->cParams.searchLog ) == 4 ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 6, - ZSTD_dictMode_e.ZSTD_extDict, - 4 - ); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_extDict, 4); } private static nuint ZSTD_RowFindBestMatch_extDict_6_5( @@ -2205,15 +2046,7 @@ private static nuint ZSTD_RowFindBestMatch_extDict_6_5( : ms->cParams.searchLog ) == 5 ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 6, - ZSTD_dictMode_e.ZSTD_extDict, - 5 - ); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_extDict, 5); } private static nuint ZSTD_RowFindBestMatch_extDict_6_6( @@ -2237,15 +2070,7 @@ private static nuint ZSTD_RowFindBestMatch_extDict_6_6( : ms->cParams.searchLog ) == 6 ); - return ZSTD_RowFindBestMatch( - ms, - ip, - iLimit, - offsetPtr, - 6, - ZSTD_dictMode_e.ZSTD_extDict, - 6 - ); + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMode_e.ZSTD_extDict, 6); } private static nuint ZSTD_RowFindBestMatch_dictMatchState_4_4( @@ -2890,14 +2715,7 @@ private static nuint ZSTD_BtFindBestMatch_extDict_4( : ms->cParams.minMatch ) == 4 ); - return ZSTD_BtFindBestMatch( - ms, - ip, - iLimit, - offBasePtr, - 4, - ZSTD_dictMode_e.ZSTD_extDict - ); + return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, 4, ZSTD_dictMode_e.ZSTD_extDict); } private static nuint ZSTD_BtFindBestMatch_extDict_5( @@ -2914,14 +2732,7 @@ private static nuint ZSTD_BtFindBestMatch_extDict_5( : ms->cParams.minMatch ) == 5 ); - return ZSTD_BtFindBestMatch( - ms, - ip, - iLimit, - offBasePtr, - 5, - ZSTD_dictMode_e.ZSTD_extDict - ); + return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, 5, ZSTD_dictMode_e.ZSTD_extDict); } private static nuint ZSTD_BtFindBestMatch_extDict_6( @@ -2938,14 +2749,7 @@ private static nuint ZSTD_BtFindBestMatch_extDict_6( : ms->cParams.minMatch ) == 6 ); - return ZSTD_BtFindBestMatch( - ms, - ip, - iLimit, - offBasePtr, - 6, - ZSTD_dictMode_e.ZSTD_extDict - ); + return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, 6, ZSTD_dictMode_e.ZSTD_extDict); } private static nuint ZSTD_BtFindBestMatch_dictMatchState_4( @@ -3477,57 +3281,27 @@ ZSTD_dictMode_e dictMode if (mls == 4) { if (rowLog == 4) - return ZSTD_RowFindBestMatch_dictMatchState_4_4( - ms, - ip, - iend, - offsetPtr - ); + return ZSTD_RowFindBestMatch_dictMatchState_4_4(ms, ip, iend, offsetPtr); if (rowLog == 5) - return ZSTD_RowFindBestMatch_dictMatchState_4_5( - ms, - ip, - iend, - offsetPtr - ); + return ZSTD_RowFindBestMatch_dictMatchState_4_5(ms, ip, iend, offsetPtr); return ZSTD_RowFindBestMatch_dictMatchState_4_6(ms, ip, iend, offsetPtr); } if (mls == 5) { if (rowLog == 4) - return ZSTD_RowFindBestMatch_dictMatchState_5_4( - ms, - ip, - iend, - offsetPtr - ); + return ZSTD_RowFindBestMatch_dictMatchState_5_4(ms, ip, iend, offsetPtr); if (rowLog == 5) - return ZSTD_RowFindBestMatch_dictMatchState_5_5( - ms, - ip, - iend, - offsetPtr - ); + return ZSTD_RowFindBestMatch_dictMatchState_5_5(ms, ip, iend, offsetPtr); return ZSTD_RowFindBestMatch_dictMatchState_5_6(ms, ip, iend, offsetPtr); } if (mls == 6) { if (rowLog == 4) - return ZSTD_RowFindBestMatch_dictMatchState_6_4( - ms, - ip, - iend, - offsetPtr - ); + return ZSTD_RowFindBestMatch_dictMatchState_6_4(ms, ip, iend, offsetPtr); if (rowLog == 5) - return ZSTD_RowFindBestMatch_dictMatchState_6_5( - ms, - ip, - iend, - offsetPtr - ); + return ZSTD_RowFindBestMatch_dictMatchState_6_5(ms, ip, iend, offsetPtr); return ZSTD_RowFindBestMatch_dictMatchState_6_6(ms, ip, iend, offsetPtr); } } @@ -3554,57 +3328,27 @@ ZSTD_dictMode_e dictMode if (mls == 4) { if (rowLog == 4) - return ZSTD_RowFindBestMatch_dedicatedDictSearch_4_4( - ms, - ip, - iend, - offsetPtr - ); + return ZSTD_RowFindBestMatch_dedicatedDictSearch_4_4(ms, ip, iend, offsetPtr); if (rowLog == 5) - return ZSTD_RowFindBestMatch_dedicatedDictSearch_4_5( - ms, - ip, - iend, - offsetPtr - ); + return ZSTD_RowFindBestMatch_dedicatedDictSearch_4_5(ms, ip, iend, offsetPtr); return ZSTD_RowFindBestMatch_dedicatedDictSearch_4_6(ms, ip, iend, offsetPtr); } if (mls == 5) { if (rowLog == 4) - return ZSTD_RowFindBestMatch_dedicatedDictSearch_5_4( - ms, - ip, - iend, - offsetPtr - ); + return ZSTD_RowFindBestMatch_dedicatedDictSearch_5_4(ms, ip, iend, offsetPtr); if (rowLog == 5) - return ZSTD_RowFindBestMatch_dedicatedDictSearch_5_5( - ms, - ip, - iend, - offsetPtr - ); + return ZSTD_RowFindBestMatch_dedicatedDictSearch_5_5(ms, ip, iend, offsetPtr); return ZSTD_RowFindBestMatch_dedicatedDictSearch_5_6(ms, ip, iend, offsetPtr); } if (mls == 6) { if (rowLog == 4) - return ZSTD_RowFindBestMatch_dedicatedDictSearch_6_4( - ms, - ip, - iend, - offsetPtr - ); + return ZSTD_RowFindBestMatch_dedicatedDictSearch_6_4(ms, ip, iend, offsetPtr); if (rowLog == 5) - return ZSTD_RowFindBestMatch_dedicatedDictSearch_6_5( - ms, - ip, - iend, - offsetPtr - ); + return ZSTD_RowFindBestMatch_dedicatedDictSearch_6_5(ms, ip, iend, offsetPtr); return ZSTD_RowFindBestMatch_dedicatedDictSearch_6_6(ms, ip, iend, offsetPtr); } } @@ -3658,9 +3402,9 @@ ZSTD_dictMode_e dictMode : ms->cParams.searchLog <= 6 ? ms->cParams.searchLog : 6; uint offset_1 = rep[0], - offset_2 = rep[1]; + offset_2 = rep[1]; uint offsetSaved1 = 0, - offsetSaved2 = 0; + offsetSaved2 = 0; int isDMS = dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? 1 : 0; int isDDS = dictMode == ZSTD_dictMode_e.ZSTD_dedicatedDictSearch ? 1 : 0; int isDxS = isDMS != 0 || isDDS != 0 ? 1 : 0; @@ -3827,9 +3571,7 @@ ZSTD_dictMode_e dictMode prefixLowest ) + 4; int gain2 = (int)(mlRep * 3); - int gain1 = (int)( - matchLength * 3 - ZSTD_highbit32((uint)offBase) + 1 - ); + int gain1 = (int)(matchLength * 3 - ZSTD_highbit32((uint)offBase) + 1); if (mlRep >= 4 && gain2 > gain1) { matchLength = mlRep; @@ -3877,9 +3619,7 @@ ZSTD_dictMode_e dictMode { nuint mlRep = ZSTD_count(ip + 4, ip + 4 - offset_1, iend) + 4; int gain2 = (int)(mlRep * 4); - int gain1 = (int)( - matchLength * 4 - ZSTD_highbit32((uint)offBase) + 1 - ); + int gain1 = (int)(matchLength * 4 - ZSTD_highbit32((uint)offBase) + 1); if (mlRep >= 4 && gain2 > gain1) { matchLength = mlRep; @@ -3902,8 +3642,7 @@ ZSTD_dictMode_e dictMode && MEM_read32(repMatch) == MEM_read32(ip) ) { - byte* repMatchEnd = - repIndex < prefixLowestIndex ? dictEnd : iend; + byte* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; nuint mlRep = ZSTD_count_2segments( ip + 4, @@ -3941,9 +3680,7 @@ ZSTD_dictMode_e dictMode ); /* raw approx */ int gain2 = (int)(ml2 * 4 - ZSTD_highbit32((uint)ofbCandidate)); - int gain1 = (int)( - matchLength * 4 - ZSTD_highbit32((uint)offBase) + 7 - ); + int gain1 = (int)(matchLength * 4 - ZSTD_highbit32((uint)offBase) + 7); if (ml2 >= 4 && gain2 > gain1) { matchLength = ml2; @@ -4030,13 +3767,8 @@ ZSTD_dictMode_e dictMode { byte* repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend; matchLength = - ZSTD_count_2segments( - ip + 4, - repMatch + 4, - iend, - repEnd2, - prefixLowest - ) + 4; + ZSTD_count_2segments(ip + 4, repMatch + 4, iend, repEnd2, prefixLowest) + + 4; offBase = offset_2; offset_2 = offset_1; offset_1 = (uint)offBase; @@ -4054,9 +3786,7 @@ ZSTD_dictMode_e dictMode if (dictMode == ZSTD_dictMode_e.ZSTD_noDict) { - while ( - ip <= ilimit && offset_2 > 0 && MEM_read32(ip) == MEM_read32(ip - offset_2) - ) + while (ip <= ilimit && offset_2 > 0 && MEM_read32(ip) == MEM_read32(ip - offset_2)) { matchLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4; offBase = offset_2; @@ -4510,7 +4240,7 @@ uint depth : ms->cParams.searchLog <= 6 ? ms->cParams.searchLog : 6; uint offset_1 = rep[0], - offset_2 = rep[1]; + offset_2 = rep[1]; ms->lazySkipping = 0; ip += ip == prefixStart ? 1 : 0; if (searchMethod == searchMethod_e.search_rowHash) @@ -4707,9 +4437,7 @@ uint depth ); /* raw approx */ int gain2 = (int)(ml2 * 4 - ZSTD_highbit32((uint)ofbCandidate)); - int gain1 = (int)( - matchLength * 4 - ZSTD_highbit32((uint)offBase) + 7 - ); + int gain1 = (int)(matchLength * 4 - ZSTD_highbit32((uint)offBase) + 7); if (ml2 >= 4 && gain2 > gain1) { matchLength = ml2; @@ -4727,8 +4455,7 @@ uint depth { assert(offBase > 3); uint matchIndex = (uint)((nuint)(start - @base) - (offBase - 3)); - byte* match = - matchIndex < dictLimit ? dictBase + matchIndex : @base + matchIndex; + byte* match = matchIndex < dictLimit ? dictBase + matchIndex : @base + matchIndex; byte* mStart = matchIndex < dictLimit ? dictStart : prefixStart; while (start > anchor && match > mStart && start[-1] == match[-1]) { @@ -4777,13 +4504,8 @@ uint depth /* repcode detected we should take it */ byte* repEnd = repIndex < dictLimit ? dictEnd : iend; matchLength = - ZSTD_count_2segments( - ip + 4, - repMatch + 4, - iend, - repEnd, - prefixStart - ) + 4; + ZSTD_count_2segments(ip + 4, repMatch + 4, iend, repEnd, prefixStart) + + 4; offBase = offset_2; offset_2 = offset_1; offset_1 = (uint)offBase; diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLdm.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLdm.cs index 6b8c3471b..eb6349798 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLdm.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLdm.cs @@ -86,7 +86,7 @@ private static nuint ZSTD_ldm_gear_feed( { nuint n; ulong hash, - mask; + mask; hash = state->rolling; mask = state->stopMask; n = 0; @@ -180,13 +180,13 @@ private static void ZSTD_ldm_adjustParameters( { assert( @params->hashLog - <= (uint)( - (sizeof(nuint) == 4 ? 30 : 31) < 30 - ? sizeof(nuint) == 4 - ? 30 - : 31 - : 30 - ) + <= (uint)( + (sizeof(nuint) == 4 ? 30 : 31) < 30 + ? sizeof(nuint) == 4 + ? 30 + : 31 + : 30 + ) ); if (@params->windowLog > @params->hashLog) { @@ -204,22 +204,22 @@ private static void ZSTD_ldm_adjustParameters( { @params->hashLog = @params->windowLog - @params->hashRateLog <= 6 ? 6 - : @params->windowLog - @params->hashRateLog - <= (uint)( - (sizeof(nuint) == 4 ? 30 : 31) < 30 - ? sizeof(nuint) == 4 - ? 30 - : 31 - : 30 - ) - ? @params->windowLog - @params->hashRateLog - : (uint)( - (sizeof(nuint) == 4 ? 30 : 31) < 30 - ? sizeof(nuint) == 4 - ? 30 - : 31 - : 30 - ); + : @params->windowLog - @params->hashRateLog + <= (uint)( + (sizeof(nuint) == 4 ? 30 : 31) < 30 + ? sizeof(nuint) == 4 + ? 30 + : 31 + : 30 + ) + ? @params->windowLog - @params->hashRateLog + : (uint)( + (sizeof(nuint) == 4 ? 30 : 31) < 30 + ? sizeof(nuint) == 4 + ? 30 + : 31 + : 30 + ); } if (@params->minMatchLength == 0) @@ -239,9 +239,7 @@ private static void ZSTD_ldm_adjustParameters( } @params->bucketSizeLog = - @params->bucketSizeLog < @params->hashLog - ? @params->bucketSizeLog - : @params->hashLog; + @params->bucketSizeLog < @params->hashLog ? @params->bucketSizeLog : @params->hashLog; } /** ZSTD_ldm_getTableSize() : @@ -492,13 +490,7 @@ nuint srcSize nuint hashed; uint n; numSplits = 0; - hashed = ZSTD_ldm_gear_feed( - &hashState, - ip, - (nuint)(ilimit - ip), - splits, - &numSplits - ); + hashed = ZSTD_ldm_gear_feed(&hashState, ip, (nuint)(ilimit - ip), splits, &numSplits); for (n = 0; n < numSplits; n++) { byte* split = ip + splits[n] - minMatchLength; @@ -507,11 +499,7 @@ nuint srcSize candidates[n].split = split; candidates[n].hash = hash; candidates[n].checksum = (uint)(xxhash >> 32); - candidates[n].bucket = ZSTD_ldm_getBucket( - ldmState, - hash, - @params->bucketSizeLog - ); + candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, @params->bucketSizeLog); #if NETCOREAPP3_0_OR_GREATER if (System.Runtime.Intrinsics.X86.Sse.IsSupported) { @@ -523,9 +511,9 @@ nuint srcSize for (n = 0; n < numSplits; n++) { nuint forwardMatchLength = 0, - backwardMatchLength = 0, - bestMatchLength = 0, - mLength; + backwardMatchLength = 0, + bestMatchLength = 0, + mLength; uint offset; byte* split = candidates[n].split; uint checksum = candidates[n].checksum; @@ -545,8 +533,8 @@ nuint srcSize for (cur = bucket; cur < bucket + entsPerBucket; cur++) { nuint curForwardMatchLength, - curBackwardMatchLength, - curTotalMatchLength; + curBackwardMatchLength, + curTotalMatchLength; if (cur->checksum != checksum || cur->offset <= lowestIndex) { continue; @@ -617,9 +605,7 @@ nuint srcSize { rawSeq* seq = rawSeqStore->seq + rawSeqStore->size; if (rawSeqStore->size == rawSeqStore->capacity) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall)); seq->litLength = (uint)(split - backwardMatchLength - anchor); seq->matchLength = (uint)mLength; seq->offset = offset; @@ -682,13 +668,12 @@ nuint srcSize byte* istart = (byte*)src; byte* iend = istart + srcSize; const nuint kMaxChunkSize = 1 << 20; - nuint nbChunks = - srcSize / kMaxChunkSize + (nuint)(srcSize % kMaxChunkSize != 0 ? 1 : 0); + nuint nbChunks = srcSize / kMaxChunkSize + (nuint)(srcSize % kMaxChunkSize != 0 ? 1 : 0); nuint chunk; nuint leftoverSize = 0; assert( unchecked((uint)-1) - (MEM_64bits ? 3500U * (1 << 20) : 2000U * (1 << 20)) - >= kMaxChunkSize + >= kMaxChunkSize ); assert(ldmState->window.nextSrc >= (byte*)src + srcSize); assert(sequences->pos <= sequences->size); diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLdmGeartab.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLdmGeartab.cs index a828beb2a..5edf11974 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLdmGeartab.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdLdmGeartab.cs @@ -7,271 +7,271 @@ namespace SharpCompress.Compressors.ZStandard.Unsafe; public static unsafe partial class Methods { #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_ZSTD_ldm_gearTab => - new ulong[256] - { - 0xf5b8f72c5f77775c, - 0x84935f266b7ac412, - 0xb647ada9ca730ccc, - 0xb065bb4b114fb1de, - 0x34584e7e8c3a9fd0, - 0x4e97e17c6ae26b05, - 0x3a03d743bc99a604, - 0xcecd042422c4044f, - 0x76de76c58524259e, - 0x9c8528f65badeaca, - 0x86563706e2097529, - 0x2902475fa375d889, - 0xafb32a9739a5ebe6, - 0xce2714da3883e639, - 0x21eaf821722e69e, - 0x37b628620b628, - 0x49a8d455d88caf5, - 0x8556d711e6958140, - 0x4f7ae74fc605c1f, - 0x829f0c3468bd3a20, - 0x4ffdc885c625179e, - 0x8473de048a3daf1b, - 0x51008822b05646b2, - 0x69d75d12b2d1cc5f, - 0x8c9d4a19159154bc, - 0xc3cc10f4abbd4003, - 0xd06ddc1cecb97391, - 0xbe48e6e7ed80302e, - 0x3481db31cee03547, - 0xacc3f67cdaa1d210, - 0x65cb771d8c7f96cc, - 0x8eb27177055723dd, - 0xc789950d44cd94be, - 0x934feadc3700b12b, - 0x5e485f11edbdf182, - 0x1e2e2a46fd64767a, - 0x2969ca71d82efa7c, - 0x9d46e9935ebbba2e, - 0xe056b67e05e6822b, - 0x94d73f55739d03a0, - 0xcd7010bdb69b5a03, - 0x455ef9fcd79b82f4, - 0x869cb54a8749c161, - 0x38d1a4fa6185d225, - 0xb475166f94bbe9bb, - 0xa4143548720959f1, - 0x7aed4780ba6b26ba, - 0xd0ce264439e02312, - 0x84366d746078d508, - 0xa8ce973c72ed17be, - 0x21c323a29a430b01, - 0x9962d617e3af80ee, - 0xab0ce91d9c8cf75b, - 0x530e8ee6d19a4dbc, - 0x2ef68c0cf53f5d72, - 0xc03a681640a85506, - 0x496e4e9f9c310967, - 0x78580472b59b14a0, - 0x273824c23b388577, - 0x66bf923ad45cb553, - 0x47ae1a5a2492ba86, - 0x35e304569e229659, - 0x4765182a46870b6f, - 0x6cbab625e9099412, - 0xddac9a2e598522c1, - 0x7172086e666624f2, - 0xdf5003ca503b7837, - 0x88c0c1db78563d09, - 0x58d51865acfc289d, - 0x177671aec65224f1, - 0xfb79d8a241e967d7, - 0x2be1e101cad9a49a, - 0x6625682f6e29186b, - 0x399553457ac06e50, - 0x35dffb4c23abb74, - 0x429db2591f54aade, - 0xc52802a8037d1009, - 0x6acb27381f0b25f3, - 0xf45e2551ee4f823b, - 0x8b0ea2d99580c2f7, - 0x3bed519cbcb4e1e1, - 0xff452823dbb010a, - 0x9d42ed614f3dd267, - 0x5b9313c06257c57b, - 0xa114b8008b5e1442, - 0xc1fe311c11c13d4b, - 0x66e8763ea34c5568, - 0x8b982af1c262f05d, - 0xee8876faaa75fbb7, - 0x8a62a4d0d172bb2a, - 0xc13d94a3b7449a97, - 0x6dbbba9dc15d037c, - 0xc786101f1d92e0f1, - 0xd78681a907a0b79b, - 0xf61aaf2962c9abb9, - 0x2cfd16fcd3cb7ad9, - 0x868c5b6744624d21, - 0x25e650899c74ddd7, - 0xba042af4a7c37463, - 0x4eb1a539465a3eca, - 0xbe09dbf03b05d5ca, - 0x774e5a362b5472ba, - 0x47a1221229d183cd, - 0x504b0ca18ef5a2df, - 0xdffbdfbde2456eb9, - 0x46cd2b2fbee34634, - 0xf2aef8fe819d98c3, - 0x357f5276d4599d61, - 0x24a5483879c453e3, - 0x88026889192b4b9, - 0x28da96671782dbec, - 0x4ef37c40588e9aaa, - 0x8837b90651bc9fb3, - 0xc164f741d3f0e5d6, - 0xbc135a0a704b70ba, - 0x69cd868f7622ada, - 0xbc37ba89e0b9c0ab, - 0x47c14a01323552f6, - 0x4f00794bacee98bb, - 0x7107de7d637a69d5, - 0x88af793bb6f2255e, - 0xf3c6466b8799b598, - 0xc288c616aa7f3b59, - 0x81ca63cf42fca3fd, - 0x88d85ace36a2674b, - 0xd056bd3792389e7, - 0xe55c396c4e9dd32d, - 0xbefb504571e6c0a6, - 0x96ab32115e91e8cc, - 0xbf8acb18de8f38d1, - 0x66dae58801672606, - 0x833b6017872317fb, - 0xb87c16f2d1c92864, - 0xdb766a74e58b669c, - 0x89659f85c61417be, - 0xc8daad856011ea0c, - 0x76a4b565b6fe7eae, - 0xa469d085f6237312, - 0xaaf0365683a3e96c, - 0x4dbb746f8424f7b8, - 0x638755af4e4acc1, - 0x3d7807f5bde64486, - 0x17be6d8f5bbb7639, - 0x903f0cd44dc35dc, - 0x67b672eafdf1196c, - 0xa676ff93ed4c82f1, - 0x521d1004c5053d9d, - 0x37ba9ad09ccc9202, - 0x84e54d297aacfb51, - 0xa0b4b776a143445, - 0x820d471e20b348e, - 0x1874383cb83d46dc, - 0x97edeec7a1efe11c, - 0xb330e50b1bdc42aa, - 0x1dd91955ce70e032, - 0xa514cdb88f2939d5, - 0x2791233fd90db9d3, - 0x7b670a4cc50f7a9b, - 0x77c07d2a05c6dfa5, - 0xe3778b6646d0a6fa, - 0xb39c8eda47b56749, - 0x933ed448addbef28, - 0xaf846af6ab7d0bf4, - 0xe5af208eb666e49, - 0x5e6622f73534cd6a, - 0x297daeca42ef5b6e, - 0x862daef3d35539a6, - 0xe68722498f8e1ea9, - 0x981c53093dc0d572, - 0xfa09b0bfbf86fbf5, - 0x30b1e96166219f15, - 0x70e7d466bdc4fb83, - 0x5a66736e35f2a8e9, - 0xcddb59d2b7c1baef, - 0xd6c7d247d26d8996, - 0xea4e39eac8de1ba3, - 0x539c8bb19fa3aff2, - 0x9f90e4c5fd508d8, - 0xa34e5956fbaf3385, - 0x2e2f8e151d3ef375, - 0x173691e9b83faec1, - 0xb85a8d56bf016379, - 0x8382381267408ae3, - 0xb90f901bbdc0096d, - 0x7c6ad32933bcec65, - 0x76bb5e2f2c8ad595, - 0x390f851a6cf46d28, - 0xc3e6064da1c2da72, - 0xc52a0c101cfa5389, - 0xd78eaf84a3fbc530, - 0x3781b9e2288b997e, - 0x73c2f6dea83d05c4, - 0x4228e364c5b5ed7, - 0x9d7a3edf0da43911, - 0x8edcfeda24686756, - 0x5e7667a7b7a9b3a1, - 0x4c4f389fa143791d, - 0xb08bc1023da7cddc, - 0x7ab4be3ae529b1cc, - 0x754e6132dbe74ff9, - 0x71635442a839df45, - 0x2f6fb1643fbe52de, - 0x961e0a42cf7a8177, - 0xf3b45d83d89ef2ea, - 0xee3de4cf4a6e3e9b, - 0xcd6848542c3295e7, - 0xe4cee1664c78662f, - 0x9947548b474c68c4, - 0x25d73777a5ed8b0b, - 0xc915b1d636b7fc, - 0x21c2ba75d9b0d2da, - 0x5f6b5dcf608a64a1, - 0xdcf333255ff9570c, - 0x633b922418ced4ee, - 0xc136dde0b004b34a, - 0x58cc83b05d4b2f5a, - 0x5eb424dda28e42d2, - 0x62df47369739cd98, - 0xb4e0b42485e4ce17, - 0x16e1f0c1f9a8d1e7, - 0x8ec3916707560ebf, - 0x62ba6e2df2cc9db3, - 0xcbf9f4ff77d83a16, - 0x78d9d7d07d2bbcc4, - 0xef554ce1e02c41f4, - 0x8d7581127eccf94d, - 0xa9b53336cb3c8a05, - 0x38c42c0bf45c4f91, - 0x640893cdf4488863, - 0x80ec34bc575ea568, - 0x39f324f5b48eaa40, - 0xe9d9ed1f8eff527f, - 0x9224fc058cc5a214, - 0xbaba00b04cfe7741, - 0x309a9f120fcf52af, - 0xa558f3ec65626212, - 0x424bec8b7adabe2f, - 0x41622513a6aea433, - 0xb88da2d5324ca798, - 0xd287733b245528a4, - 0x9a44697e6d68aec3, - 0x7b1093be2f49bb28, - 0x50bbec632e3d8aad, - 0x6cd90723e1ea8283, - 0x897b9e7431b02bf3, - 0x219efdcb338a7047, - 0x3b0311f0a27c0656, - 0xdb17bf91c0db96e7, - 0x8cd4fd6b4e85a5b2, - 0xfab071054ba6409d, - 0x40d6fe831fa9dfd9, - 0xaf358debad7d791e, - 0xeb8d0e25a65e3e58, - 0xbbcbd3df14e08580, - 0xcf751f27ecdab2b, - 0x2b4da14f2613d8f4, - }; - private static ulong* ZSTD_ldm_gearTab => - (ulong*) - System.Runtime.CompilerServices.Unsafe.AsPointer( - ref MemoryMarshal.GetReference(Span_ZSTD_ldm_gearTab) - ); + private static ReadOnlySpan Span_ZSTD_ldm_gearTab => + new ulong[256] + { + 0xf5b8f72c5f77775c, + 0x84935f266b7ac412, + 0xb647ada9ca730ccc, + 0xb065bb4b114fb1de, + 0x34584e7e8c3a9fd0, + 0x4e97e17c6ae26b05, + 0x3a03d743bc99a604, + 0xcecd042422c4044f, + 0x76de76c58524259e, + 0x9c8528f65badeaca, + 0x86563706e2097529, + 0x2902475fa375d889, + 0xafb32a9739a5ebe6, + 0xce2714da3883e639, + 0x21eaf821722e69e, + 0x37b628620b628, + 0x49a8d455d88caf5, + 0x8556d711e6958140, + 0x4f7ae74fc605c1f, + 0x829f0c3468bd3a20, + 0x4ffdc885c625179e, + 0x8473de048a3daf1b, + 0x51008822b05646b2, + 0x69d75d12b2d1cc5f, + 0x8c9d4a19159154bc, + 0xc3cc10f4abbd4003, + 0xd06ddc1cecb97391, + 0xbe48e6e7ed80302e, + 0x3481db31cee03547, + 0xacc3f67cdaa1d210, + 0x65cb771d8c7f96cc, + 0x8eb27177055723dd, + 0xc789950d44cd94be, + 0x934feadc3700b12b, + 0x5e485f11edbdf182, + 0x1e2e2a46fd64767a, + 0x2969ca71d82efa7c, + 0x9d46e9935ebbba2e, + 0xe056b67e05e6822b, + 0x94d73f55739d03a0, + 0xcd7010bdb69b5a03, + 0x455ef9fcd79b82f4, + 0x869cb54a8749c161, + 0x38d1a4fa6185d225, + 0xb475166f94bbe9bb, + 0xa4143548720959f1, + 0x7aed4780ba6b26ba, + 0xd0ce264439e02312, + 0x84366d746078d508, + 0xa8ce973c72ed17be, + 0x21c323a29a430b01, + 0x9962d617e3af80ee, + 0xab0ce91d9c8cf75b, + 0x530e8ee6d19a4dbc, + 0x2ef68c0cf53f5d72, + 0xc03a681640a85506, + 0x496e4e9f9c310967, + 0x78580472b59b14a0, + 0x273824c23b388577, + 0x66bf923ad45cb553, + 0x47ae1a5a2492ba86, + 0x35e304569e229659, + 0x4765182a46870b6f, + 0x6cbab625e9099412, + 0xddac9a2e598522c1, + 0x7172086e666624f2, + 0xdf5003ca503b7837, + 0x88c0c1db78563d09, + 0x58d51865acfc289d, + 0x177671aec65224f1, + 0xfb79d8a241e967d7, + 0x2be1e101cad9a49a, + 0x6625682f6e29186b, + 0x399553457ac06e50, + 0x35dffb4c23abb74, + 0x429db2591f54aade, + 0xc52802a8037d1009, + 0x6acb27381f0b25f3, + 0xf45e2551ee4f823b, + 0x8b0ea2d99580c2f7, + 0x3bed519cbcb4e1e1, + 0xff452823dbb010a, + 0x9d42ed614f3dd267, + 0x5b9313c06257c57b, + 0xa114b8008b5e1442, + 0xc1fe311c11c13d4b, + 0x66e8763ea34c5568, + 0x8b982af1c262f05d, + 0xee8876faaa75fbb7, + 0x8a62a4d0d172bb2a, + 0xc13d94a3b7449a97, + 0x6dbbba9dc15d037c, + 0xc786101f1d92e0f1, + 0xd78681a907a0b79b, + 0xf61aaf2962c9abb9, + 0x2cfd16fcd3cb7ad9, + 0x868c5b6744624d21, + 0x25e650899c74ddd7, + 0xba042af4a7c37463, + 0x4eb1a539465a3eca, + 0xbe09dbf03b05d5ca, + 0x774e5a362b5472ba, + 0x47a1221229d183cd, + 0x504b0ca18ef5a2df, + 0xdffbdfbde2456eb9, + 0x46cd2b2fbee34634, + 0xf2aef8fe819d98c3, + 0x357f5276d4599d61, + 0x24a5483879c453e3, + 0x88026889192b4b9, + 0x28da96671782dbec, + 0x4ef37c40588e9aaa, + 0x8837b90651bc9fb3, + 0xc164f741d3f0e5d6, + 0xbc135a0a704b70ba, + 0x69cd868f7622ada, + 0xbc37ba89e0b9c0ab, + 0x47c14a01323552f6, + 0x4f00794bacee98bb, + 0x7107de7d637a69d5, + 0x88af793bb6f2255e, + 0xf3c6466b8799b598, + 0xc288c616aa7f3b59, + 0x81ca63cf42fca3fd, + 0x88d85ace36a2674b, + 0xd056bd3792389e7, + 0xe55c396c4e9dd32d, + 0xbefb504571e6c0a6, + 0x96ab32115e91e8cc, + 0xbf8acb18de8f38d1, + 0x66dae58801672606, + 0x833b6017872317fb, + 0xb87c16f2d1c92864, + 0xdb766a74e58b669c, + 0x89659f85c61417be, + 0xc8daad856011ea0c, + 0x76a4b565b6fe7eae, + 0xa469d085f6237312, + 0xaaf0365683a3e96c, + 0x4dbb746f8424f7b8, + 0x638755af4e4acc1, + 0x3d7807f5bde64486, + 0x17be6d8f5bbb7639, + 0x903f0cd44dc35dc, + 0x67b672eafdf1196c, + 0xa676ff93ed4c82f1, + 0x521d1004c5053d9d, + 0x37ba9ad09ccc9202, + 0x84e54d297aacfb51, + 0xa0b4b776a143445, + 0x820d471e20b348e, + 0x1874383cb83d46dc, + 0x97edeec7a1efe11c, + 0xb330e50b1bdc42aa, + 0x1dd91955ce70e032, + 0xa514cdb88f2939d5, + 0x2791233fd90db9d3, + 0x7b670a4cc50f7a9b, + 0x77c07d2a05c6dfa5, + 0xe3778b6646d0a6fa, + 0xb39c8eda47b56749, + 0x933ed448addbef28, + 0xaf846af6ab7d0bf4, + 0xe5af208eb666e49, + 0x5e6622f73534cd6a, + 0x297daeca42ef5b6e, + 0x862daef3d35539a6, + 0xe68722498f8e1ea9, + 0x981c53093dc0d572, + 0xfa09b0bfbf86fbf5, + 0x30b1e96166219f15, + 0x70e7d466bdc4fb83, + 0x5a66736e35f2a8e9, + 0xcddb59d2b7c1baef, + 0xd6c7d247d26d8996, + 0xea4e39eac8de1ba3, + 0x539c8bb19fa3aff2, + 0x9f90e4c5fd508d8, + 0xa34e5956fbaf3385, + 0x2e2f8e151d3ef375, + 0x173691e9b83faec1, + 0xb85a8d56bf016379, + 0x8382381267408ae3, + 0xb90f901bbdc0096d, + 0x7c6ad32933bcec65, + 0x76bb5e2f2c8ad595, + 0x390f851a6cf46d28, + 0xc3e6064da1c2da72, + 0xc52a0c101cfa5389, + 0xd78eaf84a3fbc530, + 0x3781b9e2288b997e, + 0x73c2f6dea83d05c4, + 0x4228e364c5b5ed7, + 0x9d7a3edf0da43911, + 0x8edcfeda24686756, + 0x5e7667a7b7a9b3a1, + 0x4c4f389fa143791d, + 0xb08bc1023da7cddc, + 0x7ab4be3ae529b1cc, + 0x754e6132dbe74ff9, + 0x71635442a839df45, + 0x2f6fb1643fbe52de, + 0x961e0a42cf7a8177, + 0xf3b45d83d89ef2ea, + 0xee3de4cf4a6e3e9b, + 0xcd6848542c3295e7, + 0xe4cee1664c78662f, + 0x9947548b474c68c4, + 0x25d73777a5ed8b0b, + 0xc915b1d636b7fc, + 0x21c2ba75d9b0d2da, + 0x5f6b5dcf608a64a1, + 0xdcf333255ff9570c, + 0x633b922418ced4ee, + 0xc136dde0b004b34a, + 0x58cc83b05d4b2f5a, + 0x5eb424dda28e42d2, + 0x62df47369739cd98, + 0xb4e0b42485e4ce17, + 0x16e1f0c1f9a8d1e7, + 0x8ec3916707560ebf, + 0x62ba6e2df2cc9db3, + 0xcbf9f4ff77d83a16, + 0x78d9d7d07d2bbcc4, + 0xef554ce1e02c41f4, + 0x8d7581127eccf94d, + 0xa9b53336cb3c8a05, + 0x38c42c0bf45c4f91, + 0x640893cdf4488863, + 0x80ec34bc575ea568, + 0x39f324f5b48eaa40, + 0xe9d9ed1f8eff527f, + 0x9224fc058cc5a214, + 0xbaba00b04cfe7741, + 0x309a9f120fcf52af, + 0xa558f3ec65626212, + 0x424bec8b7adabe2f, + 0x41622513a6aea433, + 0xb88da2d5324ca798, + 0xd287733b245528a4, + 0x9a44697e6d68aec3, + 0x7b1093be2f49bb28, + 0x50bbec632e3d8aad, + 0x6cd90723e1ea8283, + 0x897b9e7431b02bf3, + 0x219efdcb338a7047, + 0x3b0311f0a27c0656, + 0xdb17bf91c0db96e7, + 0x8cd4fd6b4e85a5b2, + 0xfab071054ba6409d, + 0x40d6fe831fa9dfd9, + 0xaf358debad7d791e, + 0xeb8d0e25a65e3e58, + 0xbbcbd3df14e08580, + 0xcf751f27ecdab2b, + 0x2b4da14f2613d8f4, + }; + private static ulong* ZSTD_ldm_gearTab => + (ulong*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_ZSTD_ldm_gearTab) + ); #else private static readonly ulong* ZSTD_ldm_gearTab = GetArrayPointer( diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdOpt.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdOpt.cs index 7c8a98cae..4b05bb17c 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdOpt.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdOpt.cs @@ -42,9 +42,7 @@ private static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel) { if (ZSTD_compressedLiterals(optPtr) != 0) optPtr->litSumBasePrice = - optLevel != 0 - ? ZSTD_fracWeight(optPtr->litSum) - : ZSTD_bitWeight(optPtr->litSum); + optLevel != 0 ? ZSTD_fracWeight(optPtr->litSum) : ZSTD_bitWeight(optPtr->litSum); optPtr->litLengthSumBasePrice = optLevel != 0 ? ZSTD_fracWeight(optPtr->litLengthSum) @@ -79,7 +77,7 @@ base_directive_e base1 ) { uint s, - sum = 0; + sum = 0; assert(shift < 30); for (s = 0; s < lastEltIndex + 1; s++) { @@ -115,51 +113,51 @@ private static uint ZSTD_scaleStats(uint* table, uint lastEltIndex, uint logTarg } #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_baseLLfreqs => - new uint[36] - { - 4, - 2, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - }; - private static uint* baseLLfreqs => - (uint*) - System.Runtime.CompilerServices.Unsafe.AsPointer( - ref MemoryMarshal.GetReference(Span_baseLLfreqs) - ); + private static ReadOnlySpan Span_baseLLfreqs => + new uint[36] + { + 4, + 2, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + }; + private static uint* baseLLfreqs => + (uint*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_baseLLfreqs) + ); #else private static readonly uint* baseLLfreqs = GetArrayPointer( @@ -205,47 +203,47 @@ ref MemoryMarshal.GetReference(Span_baseLLfreqs) ); #endif #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_baseOFCfreqs => - new uint[32] - { - 6, - 2, - 1, - 1, - 2, - 3, - 4, - 4, - 4, - 3, - 2, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - }; - private static uint* baseOFCfreqs => - (uint*) - System.Runtime.CompilerServices.Unsafe.AsPointer( - ref MemoryMarshal.GetReference(Span_baseOFCfreqs) - ); + private static ReadOnlySpan Span_baseOFCfreqs => + new uint[32] + { + 6, + 2, + 1, + 1, + 2, + 3, + 4, + 4, + 4, + 3, + 2, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + }; + private static uint* baseOFCfreqs => + (uint*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_baseOFCfreqs) + ); #else private static readonly uint* baseOFCfreqs = GetArrayPointer( @@ -481,12 +479,12 @@ private static uint ZSTD_litLengthPrice(uint litLength, optState_t* optPtr, int { uint llCode = ZSTD_LLcode(litLength); return (uint)(LL_bits[llCode] * (1 << 8)) - + optPtr->litLengthSumBasePrice - - ( - optLevel != 0 - ? ZSTD_fracWeight(optPtr->litLengthFreq[llCode]) - : ZSTD_bitWeight(optPtr->litLengthFreq[llCode]) - ); + + optPtr->litLengthSumBasePrice + - ( + optLevel != 0 + ? ZSTD_fracWeight(optPtr->litLengthFreq[llCode]) + : ZSTD_bitWeight(optPtr->litLengthFreq[llCode]) + ); } } @@ -510,7 +508,7 @@ int optLevel assert(matchLength >= 3); if (optPtr->priceType == ZSTD_OptPrice_e.zop_predef) return (optLevel != 0 ? ZSTD_fracWeight(mlBase) : ZSTD_bitWeight(mlBase)) - + (16 + offCode) * (1 << 8); + + (16 + offCode) * (1 << 8); price = offCode * (1 << 8) + ( @@ -649,7 +647,7 @@ int extDict uint btMask = (uint)((1 << (int)btLog) - 1); uint matchIndex = hashTable[h]; nuint commonLengthSmaller = 0, - commonLengthLarger = 0; + commonLengthLarger = 0; byte* @base = ms->window.@base; byte* dictBase = ms->window.dictBase; uint dictLimit = ms->window.dictLimit; @@ -678,9 +676,7 @@ int extDict uint* nextPtr = bt + 2 * (matchIndex & btMask); /* guaranteed minimum nb of common bytes */ nuint matchLength = - commonLengthSmaller < commonLengthLarger - ? commonLengthSmaller - : commonLengthLarger; + commonLengthSmaller < commonLengthLarger ? commonLengthSmaller : commonLengthLarger; assert(matchIndex < curr); if (extDict == 0 || matchIndex + matchLength >= dictLimit) { @@ -786,13 +782,7 @@ ZSTD_dictMode_e dictMode /* used in ZSTD_loadDictionaryContent() */ private static void ZSTD_updateTree(ZSTD_MatchState_t* ms, byte* ip, byte* iend) { - ZSTD_updateTree_internal( - ms, - ip, - iend, - ms->cParams.minMatch, - ZSTD_dictMode_e.ZSTD_noDict - ); + ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_dictMode_e.ZSTD_noDict); } [MethodImpl(MethodImplOptions.AggressiveInlining)] @@ -823,7 +813,7 @@ uint mls uint btLog = cParams->chainLog - 1; uint btMask = (1U << (int)btLog) - 1; nuint commonLengthSmaller = 0, - commonLengthLarger = 0; + commonLengthLarger = 0; byte* dictBase = ms->window.dictBase; uint dictLimit = ms->window.dictLimit; byte* dictEnd = dictBase + dictLimit; @@ -843,10 +833,8 @@ uint mls dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? ms->dictMatchState : null; ZSTD_compressionParameters* dmsCParams = dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? &dms->cParams : null; - byte* dmsBase = - dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? dms->window.@base : null; - byte* dmsEnd = - dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? dms->window.nextSrc : null; + byte* dmsBase = dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? dms->window.@base : null; + byte* dmsEnd = dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? dms->window.nextSrc : null; uint dmsHighLimit = dictMode == ZSTD_dictMode_e.ZSTD_dictMatchState ? (uint)(dmsEnd - dmsBase) : 0; uint dmsLowLimit = @@ -880,7 +868,7 @@ uint mls if ( repIndex >= windowLow && ZSTD_readMINMATCH(ip, minMatch) - == ZSTD_readMINMATCH(ip - repOffset, minMatch) + == ZSTD_readMINMATCH(ip - repOffset, minMatch) ) { repLen = @@ -901,8 +889,7 @@ uint mls (repOffset - 1 < curr - windowLow ? 1 : 0) & ZSTD_index_overlap_check(dictLimit, repIndex) ) != 0 - && ZSTD_readMINMATCH(ip, minMatch) - == ZSTD_readMINMATCH(repMatch, minMatch) + && ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch) ) { repLen = @@ -921,8 +908,7 @@ uint mls (repOffset - 1 < curr - (dmsLowLimit + dmsIndexDelta) ? 1 : 0) & ZSTD_index_overlap_check(dictLimit, repIndex) ) != 0 - && ZSTD_readMINMATCH(ip, minMatch) - == ZSTD_readMINMATCH(repMatch, minMatch) + && ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch) ) { repLen = @@ -998,9 +984,7 @@ uint mls byte* match; /* guaranteed minimum nb of common bytes */ nuint matchLength = - commonLengthSmaller < commonLengthLarger - ? commonLengthSmaller - : commonLengthLarger; + commonLengthSmaller < commonLengthLarger ? commonLengthSmaller : commonLengthLarger; assert(curr > matchIndex); if ( dictMode == ZSTD_dictMode_e.ZSTD_noDict @@ -1480,31 +1464,32 @@ uint lengthToBeat ); } - private static readonly ZSTD_getAllMatchesFn[][] getAllMatchesFns = - new ZSTD_getAllMatchesFn[3][] + private static readonly ZSTD_getAllMatchesFn[][] getAllMatchesFns = new ZSTD_getAllMatchesFn[ + 3 + ][] + { + new ZSTD_getAllMatchesFn[4] { - new ZSTD_getAllMatchesFn[4] - { - ZSTD_btGetAllMatches_noDict_3, - ZSTD_btGetAllMatches_noDict_4, - ZSTD_btGetAllMatches_noDict_5, - ZSTD_btGetAllMatches_noDict_6, - }, - new ZSTD_getAllMatchesFn[4] - { - ZSTD_btGetAllMatches_extDict_3, - ZSTD_btGetAllMatches_extDict_4, - ZSTD_btGetAllMatches_extDict_5, - ZSTD_btGetAllMatches_extDict_6, - }, - new ZSTD_getAllMatchesFn[4] - { - ZSTD_btGetAllMatches_dictMatchState_3, - ZSTD_btGetAllMatches_dictMatchState_4, - ZSTD_btGetAllMatches_dictMatchState_5, - ZSTD_btGetAllMatches_dictMatchState_6, - }, - }; + ZSTD_btGetAllMatches_noDict_3, + ZSTD_btGetAllMatches_noDict_4, + ZSTD_btGetAllMatches_noDict_5, + ZSTD_btGetAllMatches_noDict_6, + }, + new ZSTD_getAllMatchesFn[4] + { + ZSTD_btGetAllMatches_extDict_3, + ZSTD_btGetAllMatches_extDict_4, + ZSTD_btGetAllMatches_extDict_5, + ZSTD_btGetAllMatches_extDict_6, + }, + new ZSTD_getAllMatchesFn[4] + { + ZSTD_btGetAllMatches_dictMatchState_3, + ZSTD_btGetAllMatches_dictMatchState_4, + ZSTD_btGetAllMatches_dictMatchState_5, + ZSTD_btGetAllMatches_dictMatchState_6, + }, + }; private static ZSTD_getAllMatchesFn ZSTD_selectBtGetAllMatches( ZSTD_MatchState_t* ms, @@ -1524,10 +1509,7 @@ ZSTD_dictMode_e dictMode * Moves forward in @rawSeqStore by @nbBytes, * which will update the fields 'pos' and 'posInSequence'. */ - private static void ZSTD_optLdm_skipRawSeqStoreBytes( - RawSeqStore_t* rawSeqStore, - nuint nbBytes - ) + private static void ZSTD_optLdm_skipRawSeqStoreBytes(RawSeqStore_t* rawSeqStore, nuint nbBytes) { uint currPos = (uint)(rawSeqStore->posInSequence + nbBytes); while (currPos != 0 && rawSeqStore->pos < rawSeqStore->size) @@ -1581,8 +1563,7 @@ uint blockBytesRemaining : 0; matchBytesRemaining = literalsBytesRemaining == 0 - ? currSeq.matchLength - - ((uint)optLdm->seqStore.posInSequence - currSeq.litLength) + ? currSeq.matchLength - ((uint)optLdm->seqStore.posInSequence - currSeq.litLength) : currSeq.matchLength; if (literalsBytesRemaining >= blockBytesRemaining) { @@ -1598,10 +1579,7 @@ uint blockBytesRemaining if (optLdm->endPosInBlock > currBlockEndPos) { optLdm->endPosInBlock = currBlockEndPos; - ZSTD_optLdm_skipRawSeqStoreBytes( - &optLdm->seqStore, - currBlockEndPos - currPosInBlock - ); + ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, currBlockEndPos - currPosInBlock); } else { @@ -1724,7 +1702,7 @@ ZSTD_dictMode_e dictMode while (ip < ilimit) { uint cur, - last_pos = 0; + last_pos = 0; { uint litlen = (uint)(ip - anchor); uint ll0 = litlen == 0 ? 1U : 0U; @@ -1798,8 +1776,7 @@ ZSTD_dictMode_e dictMode opt[pos].off = offBase; opt[pos].litlen = 0; opt[pos].price = - sequencePrice - + (int)ZSTD_litLengthPrice(0, optStatePtr, optLevel); + sequencePrice + (int)ZSTD_litLengthPrice(0, optStatePtr, optLevel); } } @@ -1832,8 +1809,8 @@ ZSTD_dictMode_e dictMode optLevel >= 1 && prevMatch.litlen == 0 && (int)ZSTD_litLengthPrice(1, optStatePtr, optLevel) - - (int)ZSTD_litLengthPrice(1 - 1, optStatePtr, optLevel) - < 0 + - (int)ZSTD_litLengthPrice(1 - 1, optStatePtr, optLevel) + < 0 && ip + cur < iend ) { @@ -2143,14 +2120,7 @@ nuint srcSize assert(seqStore->sequences == seqStore->sequencesStart); assert(ms->window.dictLimit == ms->window.lowLimit); assert(ms->window.dictLimit - ms->nextToUpdate <= 1); - ZSTD_compressBlock_opt2( - ms, - seqStore, - tmpRep, - src, - srcSize, - ZSTD_dictMode_e.ZSTD_noDict - ); + ZSTD_compressBlock_opt2(ms, seqStore, tmpRep, src, srcSize, ZSTD_dictMode_e.ZSTD_noDict); ZSTD_resetSeqStore(seqStore); ms->window.@base -= srcSize; ms->window.dictLimit += (uint)srcSize; diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdPresplit.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdPresplit.cs index 51f0fd229..e0d139218 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdPresplit.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdPresplit.cs @@ -163,12 +163,12 @@ private static void removeEvents(Fingerprint* acc, Fingerprint* slice) (delegate* managed)(&ZSTD_recordFingerprint_1), }; #if NET7_0_OR_GREATER - private static ReadOnlySpan Span_hashParams => new uint[4] { 8, 9, 10, 10 }; - private static uint* hashParams => - (uint*) - System.Runtime.CompilerServices.Unsafe.AsPointer( - ref MemoryMarshal.GetReference(Span_hashParams) - ); + private static ReadOnlySpan Span_hashParams => new uint[4] { 8, 9, 10, 10 }; + private static uint* hashParams => + (uint*) + System.Runtime.CompilerServices.Unsafe.AsPointer( + ref MemoryMarshal.GetReference(Span_hashParams) + ); #else private static readonly uint* hashParams = GetArrayPointer(new uint[4] { 8, 9, 10, 10 }); @@ -245,8 +245,7 @@ nuint wkspSize ) { FPStats* fpstats = (FPStats*)workspace; - Fingerprint* middleEvents = (Fingerprint*) - (void*)((sbyte*)workspace + 512 * sizeof(uint)); + Fingerprint* middleEvents = (Fingerprint*)(void*)((sbyte*)workspace + 512 * sizeof(uint)); assert(blockSize == 128 << 10); assert(workspace != null); assert((nuint)workspace % (nuint)Math.Max(sizeof(uint), sizeof(ulong)) == 0); diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdmtCompress.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdmtCompress.cs index 7d955bb50..2a21c3238 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdmtCompress.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ZstdmtCompress.cs @@ -193,10 +193,7 @@ private static void ZSTDMT_setNbSeq(ZSTDMT_bufferPool_s* seqPool, nuint nbSeq) ZSTDMT_setBufferSize(seqPool, nbSeq * (nuint)sizeof(rawSeq)); } - private static ZSTDMT_bufferPool_s* ZSTDMT_createSeqPool( - uint nbWorkers, - ZSTD_customMem cMem - ) + private static ZSTDMT_bufferPool_s* ZSTDMT_createSeqPool(uint nbWorkers, ZSTD_customMem cMem) { ZSTDMT_bufferPool_s* seqPool = ZSTDMT_createBufferPool(nbWorkers, cMem); if (seqPool == null) @@ -270,10 +267,7 @@ private static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool) return cctxPool; } - private static ZSTDMT_CCtxPool* ZSTDMT_expandCCtxPool( - ZSTDMT_CCtxPool* srcPool, - int nbWorkers - ) + private static ZSTDMT_CCtxPool* ZSTDMT_expandCCtxPool(ZSTDMT_CCtxPool* srcPool, int nbWorkers) { if (srcPool == null) return null; @@ -381,19 +375,13 @@ ZSTD_dictContentType_e dictContentType ) { ZSTD_customFree(serialState->ldmState.hashTable, cMem); - serialState->ldmState.hashTable = (ldmEntry_t*)ZSTD_customMalloc( - hashSize, - cMem - ); + serialState->ldmState.hashTable = (ldmEntry_t*)ZSTD_customMalloc(hashSize, cMem); } if (serialState->ldmState.bucketOffsets == null || prevBucketLog < bucketLog) { ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem); - serialState->ldmState.bucketOffsets = (byte*)ZSTD_customMalloc( - numBuckets, - cMem - ); + serialState->ldmState.bucketOffsets = (byte*)ZSTD_customMalloc(numBuckets, cMem); } if ( @@ -473,9 +461,9 @@ uint jobID nuint error; assert( seqStore->seq != null - && seqStore->pos == 0 - && seqStore->size == 0 - && seqStore->capacity > 0 + && seqStore->pos == 0 + && seqStore->size == 0 + && seqStore->capacity > 0 ); assert(src.size <= serialState->@params.jobSize); ZSTD_window_update(&serialState->ldmState.window, src.start, src.size, 0); @@ -510,9 +498,7 @@ private static void ZSTDMT_serialState_applySequences( { if (seqStore->size > 0) { - assert( - serialState->@params.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable - ); + assert(serialState->@params.ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable); assert(jobCCtx != null); ZSTD_referenceExternalSequences(jobCCtx, seqStore->seq, seqStore->size); } @@ -565,9 +551,7 @@ private static void ZSTDMT_compressionJob(void* jobDescription) if (dstBuff.start == null) { SynchronizationWrapper.Enter(&job->job_mutex); - job->cSize = unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) - ); + job->cSize = unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); SynchronizationWrapper.Exit(&job->job_mutex); goto _endJob; } @@ -734,18 +718,10 @@ private static void ZSTDMT_compressionJob(void* jobDescription) { nuint lastBlockSize1 = job->src.size & chunkSize - 1; nuint lastBlockSize = - lastBlockSize1 == 0 && job->src.size >= chunkSize - ? chunkSize - : lastBlockSize1; + lastBlockSize1 == 0 && job->src.size >= chunkSize ? chunkSize : lastBlockSize1; nuint cSize = job->lastJob != 0 - ? ZSTD_compressEnd_public( - cctx, - op, - (nuint)(oend - op), - ip, - lastBlockSize - ) + ? ZSTD_compressEnd_public(cctx, op, (nuint)(oend - op), ip, lastBlockSize) : ZSTD_compressContinue_public( cctx, op, @@ -863,10 +839,7 @@ private static nuint ZSTDMT_expandJobsTable(ZSTDMT_CCtx_s* mtctx, uint nbWorkers /* ZSTDMT_CCtxParam_setNbWorkers(): * Internal use only */ - private static nuint ZSTDMT_CCtxParam_setNbWorkers( - ZSTD_CCtx_params_s* @params, - uint nbWorkers - ) + private static nuint ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params_s* @params, uint nbWorkers) { return ZSTD_CCtxParams_setParameter( @params, @@ -961,11 +934,7 @@ private static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx_s* mtctx) void* mutex = mtctx->jobs[jobID].job_mutex; void* cond = mtctx->jobs[jobID].job_cond; ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff); - mtctx->jobs[jobID] = new ZSTDMT_jobDescription - { - job_mutex = mutex, - job_cond = cond, - }; + mtctx->jobs[jobID] = new ZSTDMT_jobDescription { job_mutex = mutex, job_cond = cond }; } mtctx->inBuff.buffer = g_nullBuffer; @@ -1013,13 +982,13 @@ private static nuint ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx_s* mtctx) if (mtctx == null) return 0; return (nuint)sizeof(ZSTDMT_CCtx_s) - + POOL_sizeof(mtctx->factory) - + ZSTDMT_sizeof_bufferPool(mtctx->bufPool) - + (mtctx->jobIDMask + 1) * (uint)sizeof(ZSTDMT_jobDescription) - + ZSTDMT_sizeof_CCtxPool(mtctx->cctxPool) - + ZSTDMT_sizeof_seqPool(mtctx->seqPool) - + ZSTD_sizeof_CDict(mtctx->cdictLocal) - + mtctx->roundBuff.capacity; + + POOL_sizeof(mtctx->factory) + + ZSTDMT_sizeof_bufferPool(mtctx->bufPool) + + (mtctx->jobIDMask + 1) * (uint)sizeof(ZSTDMT_jobDescription) + + ZSTDMT_sizeof_CCtxPool(mtctx->cctxPool) + + ZSTDMT_sizeof_seqPool(mtctx->seqPool) + + ZSTD_sizeof_CDict(mtctx->cdictLocal) + + mtctx->roundBuff.capacity; } /* ZSTDMT_resize() : @@ -1206,9 +1175,7 @@ private static int ZSTDMT_overlapLog(int ovlog, ZSTD_strategy strat) private static nuint ZSTDMT_computeOverlapSize(ZSTD_CCtx_params_s* @params) { int overlapRLog = 9 - ZSTDMT_overlapLog(@params->overlapLog, @params->cParams.strategy); - int ovLog = (int)( - overlapRLog >= 8 ? 0 : @params->cParams.windowLog - (uint)overlapRLog - ); + int ovLog = (int)(overlapRLog >= 8 ? 0 : @params->cParams.windowLog - (uint)overlapRLog); assert(0 <= overlapRLog && overlapRLog <= 8); if (@params->ldmParams.enableLdm == ZSTD_paramSwitch_e.ZSTD_ps_enable) { @@ -1288,9 +1255,7 @@ ulong pledgedSrcSize mtctx->targetSectionSize = @params.jobSize; if (mtctx->targetSectionSize == 0) { - mtctx->targetSectionSize = (nuint)( - 1UL << (int)ZSTDMT_computeTargetJobLog(&@params) - ); + mtctx->targetSectionSize = (nuint)(1UL << (int)ZSTDMT_computeTargetJobLog(&@params)); } assert( @@ -1326,12 +1291,9 @@ ulong pledgedSrcSize nuint nbSlackBuffers = (nuint)(2 + (mtctx->targetPrefixSize > 0 ? 1 : 0)); nuint slackSize = mtctx->targetSectionSize * nbSlackBuffers; /* Compute the total size, and always have enough slack */ - nuint nbWorkers = (nuint)( - mtctx->@params.nbWorkers > 1 ? mtctx->@params.nbWorkers : 1 - ); + nuint nbWorkers = (nuint)(mtctx->@params.nbWorkers > 1 ? mtctx->@params.nbWorkers : 1); nuint sectionsSize = mtctx->targetSectionSize * nbWorkers; - nuint capacity = - (windowSize > sectionsSize ? windowSize : sectionsSize) + slackSize; + nuint capacity = (windowSize > sectionsSize ? windowSize : sectionsSize) + slackSize; if (mtctx->roundBuff.capacity < capacity) { if (mtctx->roundBuff.buffer != null) @@ -1340,9 +1302,7 @@ ulong pledgedSrcSize if (mtctx->roundBuff.buffer == null) { mtctx->roundBuff.capacity = 0; - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); } mtctx->roundBuff.capacity = capacity; @@ -1381,9 +1341,7 @@ ulong pledgedSrcSize ); mtctx->cdict = mtctx->cdictLocal; if (mtctx->cdictLocal == null) - return unchecked( - (nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation) - ); + return unchecked((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation)); } } else @@ -1441,9 +1399,7 @@ ZSTD_EndDirective endOp int endFrame = endOp == ZSTD_EndDirective.ZSTD_e_end ? 1 : 0; if (mtctx->nextJobID > mtctx->doneJobID + mtctx->jobIDMask) { - assert( - (mtctx->nextJobID & mtctx->jobIDMask) == (mtctx->doneJobID & mtctx->jobIDMask) - ); + assert((mtctx->nextJobID & mtctx->jobIDMask) == (mtctx->doneJobID & mtctx->jobIDMask)); return 0; } @@ -1468,9 +1424,7 @@ ZSTD_EndDirective endOp mtctx->jobs[jobID].firstJob = mtctx->nextJobID == 0 ? 1U : 0U; mtctx->jobs[jobID].lastJob = (uint)endFrame; mtctx->jobs[jobID].frameChecksumNeeded = - mtctx->@params.fParams.checksumFlag != 0 - && endFrame != 0 - && mtctx->nextJobID > 0 + mtctx->@params.fParams.checksumFlag != 0 && endFrame != 0 && mtctx->nextJobID > 0 ? 1U : 0U; mtctx->jobs[jobID].dstFlushed = 0; @@ -1593,8 +1547,7 @@ ZSTD_EndDirective end { memcpy( (sbyte*)output->dst + output->pos, - (sbyte*)mtctx->jobs[wJobID].dstBuff.start - + mtctx->jobs[wJobID].dstFlushed, + (sbyte*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].dstFlushed, (uint)toFlush ); } @@ -1694,11 +1647,9 @@ private static int ZSTDMT_doesOverlapWindow(buffer_s buffer, ZSTD_window_t windo extDict.size = window.dictLimit - window.lowLimit; prefix.start = window.@base + window.dictLimit; prefix.size = (nuint)(window.nextSrc - (window.@base + window.dictLimit)); - return - ZSTDMT_isOverlapped(buffer, extDict) != 0 - || ZSTDMT_isOverlapped(buffer, prefix) != 0 - ? 1 - : 0; + return ZSTDMT_isOverlapped(buffer, extDict) != 0 || ZSTDMT_isOverlapped(buffer, prefix) != 0 + ? 1 + : 0; } private static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx_s* mtctx, buffer_s buffer) @@ -1770,10 +1721,7 @@ private static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx_s* mtctx) * Otherwise, we will load as many bytes as possible and instruct the caller * to continue as normal. */ - private static SyncPoint findSynchronizationPoint( - ZSTDMT_CCtx_s* mtctx, - ZSTD_inBuffer_s input - ) + private static SyncPoint findSynchronizationPoint(ZSTDMT_CCtx_s* mtctx, ZSTD_inBuffer_s input) { byte* istart = (byte*)input.src + input.pos; ulong primePower = mtctx->rsync.primePower; @@ -1906,8 +1854,8 @@ ZSTD_EndDirective endOp { assert( mtctx->inBuff.filled == 0 - || mtctx->inBuff.filled == mtctx->targetSectionSize - || mtctx->@params.rsyncable != 0 + || mtctx->inBuff.filled == mtctx->targetSectionSize + || mtctx->@params.rsyncable != 0 ); endOp = ZSTD_EndDirective.ZSTD_e_flush; } diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/inBuff_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/inBuff_t.cs index df3642fa9..1ea5fe48a 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/inBuff_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/inBuff_t.cs @@ -9,4 +9,4 @@ public struct InBuff_t public Range prefix; public buffer_s buffer; public nuint filled; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmRollingHashState_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmRollingHashState_t.cs index c27abfcae..73d11fd48 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmRollingHashState_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmRollingHashState_t.cs @@ -4,4 +4,4 @@ public struct ldmRollingHashState_t { public ulong rolling; public ulong stopMask; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmState_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmState_t.cs index 8ee9d9a71..5596880d7 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmState_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/ldmState_t.cs @@ -15,11 +15,11 @@ public unsafe struct ldmState_t public _matchCandidates_e__FixedBuffer matchCandidates; #if NET8_0_OR_GREATER - [InlineArray(64)] - public unsafe struct _splitIndices_e__FixedBuffer - { - public nuint e0; - } + [InlineArray(64)] + public unsafe struct _splitIndices_e__FixedBuffer + { + public nuint e0; + } #else public unsafe struct _splitIndices_e__FixedBuffer @@ -92,11 +92,11 @@ public unsafe struct _splitIndices_e__FixedBuffer #endif #if NET8_0_OR_GREATER - [InlineArray(64)] - public unsafe struct _matchCandidates_e__FixedBuffer - { - public ldmMatchCandidate_t e0; - } + [InlineArray(64)] + public unsafe struct _matchCandidates_e__FixedBuffer + { + public ldmMatchCandidate_t e0; + } #else public unsafe struct _matchCandidates_e__FixedBuffer diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/offsetCount_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/offsetCount_t.cs index 6f24d4f65..9a5c80981 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/offsetCount_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/offsetCount_t.cs @@ -4,4 +4,4 @@ public struct offsetCount_t { public uint offset; public uint count; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/rankValCol_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/rankValCol_t.cs index ea52c69fa..94eea418a 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/rankValCol_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/rankValCol_t.cs @@ -3,4 +3,4 @@ namespace SharpCompress.Compressors.ZStandard.Unsafe; public unsafe struct rankValCol_t { public fixed uint Body[13]; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/repcodes_s.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/repcodes_s.cs index a9624f7cd..f9b97bda6 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/repcodes_s.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/repcodes_s.cs @@ -3,4 +3,4 @@ namespace SharpCompress.Compressors.ZStandard.Unsafe; public unsafe struct repcodes_s { public fixed uint rep[3]; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/Unsafe/sortedSymbol_t.cs b/src/SharpCompress/Compressors/ZStandard/Unsafe/sortedSymbol_t.cs index f80373a18..acfe3dd90 100644 --- a/src/SharpCompress/Compressors/ZStandard/Unsafe/sortedSymbol_t.cs +++ b/src/SharpCompress/Compressors/ZStandard/Unsafe/sortedSymbol_t.cs @@ -3,4 +3,4 @@ namespace SharpCompress.Compressors.ZStandard.Unsafe; public struct sortedSymbol_t { public byte symbol; -} \ No newline at end of file +} diff --git a/src/SharpCompress/Compressors/ZStandard/UnsafeHelper.cs b/src/SharpCompress/Compressors/ZStandard/UnsafeHelper.cs index d2ea3575e..b145099b3 100644 --- a/src/SharpCompress/Compressors/ZStandard/UnsafeHelper.cs +++ b/src/SharpCompress/Compressors/ZStandard/UnsafeHelper.cs @@ -19,12 +19,12 @@ public static unsafe class UnsafeHelper #if NET6_0_OR_GREATER var ptr = NativeMemory.Alloc((nuint)size); #else - var ptr = (void*)Marshal.AllocHGlobal((nint)size); + var ptr = (void*)Marshal.AllocHGlobal((nint)size); #endif #if DEBUG return PoisonMemory(ptr, size); #else - return ptr; + return ptr; #endif } @@ -34,11 +34,11 @@ public static unsafe class UnsafeHelper #if NET6_0_OR_GREATER return NativeMemory.AllocZeroed((nuint)num, (nuint)size); #else - var total = num * size; - assert(total <= uint.MaxValue); - var destination = (void*)Marshal.AllocHGlobal((nint)total); - memset(destination, 0, (uint)total); - return destination; + var total = num * size; + assert(total <= uint.MaxValue); + var destination = (void*)Marshal.AllocHGlobal((nint)total); + memset(destination, 0, (uint)total); + return destination; #endif } @@ -56,7 +56,7 @@ public static void free(void* ptr) #if NET6_0_OR_GREATER NativeMemory.Free(ptr); #else - Marshal.FreeHGlobal((IntPtr)ptr); + Marshal.FreeHGlobal((IntPtr)ptr); #endif } @@ -66,23 +66,19 @@ public static void free(void* ptr) { var size = (uint)(sizeof(T) * array.Length); #if NET9_0_OR_GREATER - // This function is used to allocate memory for static data blocks. - // We have to use AllocateTypeAssociatedMemory and link the memory's - // lifetime to this assembly, in order to prevent memory leaks when - // loading the assembly in an unloadable AssemblyLoadContext. - // While introduced in .NET 5, we call this only in .NET 9+, because - // it's not implemented in the Mono runtime until then. - var destination = (T*) - RuntimeHelpers.AllocateTypeAssociatedMemory(typeof(UnsafeHelper), (int)size); + // This function is used to allocate memory for static data blocks. + // We have to use AllocateTypeAssociatedMemory and link the memory's + // lifetime to this assembly, in order to prevent memory leaks when + // loading the assembly in an unloadable AssemblyLoadContext. + // While introduced in .NET 5, we call this only in .NET 9+, because + // it's not implemented in the Mono runtime until then. + var destination = (T*) + RuntimeHelpers.AllocateTypeAssociatedMemory(typeof(UnsafeHelper), (int)size); #else var destination = (T*)malloc(size); #endif fixed (void* source = &array[0]) - System.Runtime.CompilerServices.Unsafe.CopyBlockUnaligned( - destination, - source, - size - ); + System.Runtime.CompilerServices.Unsafe.CopyBlockUnaligned(destination, source, size); return destination; } diff --git a/src/SharpCompress/Factories/ArcFactory.cs b/src/SharpCompress/Factories/ArcFactory.cs index 040263e27..ea0617eed 100644 --- a/src/SharpCompress/Factories/ArcFactory.cs +++ b/src/SharpCompress/Factories/ArcFactory.cs @@ -42,4 +42,4 @@ public override bool IsArchive( public IReader OpenReader(Stream stream, ReaderOptions? options) => ArcReader.Open(stream, options); -} \ No newline at end of file +} diff --git a/src/SharpCompress/IO/IStreamStack.cs b/src/SharpCompress/IO/IStreamStack.cs index c572de1ec..1c7582adc 100644 --- a/src/SharpCompress/IO/IStreamStack.cs +++ b/src/SharpCompress/IO/IStreamStack.cs @@ -42,10 +42,10 @@ public interface IStreamStack void SetPosition(long position); #if DEBUG_STREAMS - /// - /// Gets or sets the unique instance identifier for debugging purposes. - /// - long InstanceId { get; set; } + /// + /// Gets or sets the unique instance identifier for debugging purposes. + /// + long InstanceId { get; set; } #endif } @@ -240,124 +240,118 @@ out int baseReadCount } #if DEBUG_STREAMS - private static long _instanceCounter = 0; + private static long _instanceCounter = 0; - private static string cleansePos(long pos) - { - if (pos < 0) - return ""; - return "Px" + pos.ToString("x"); - } + private static string cleansePos(long pos) + { + if (pos < 0) + return ""; + return "Px" + pos.ToString("x"); + } - /// - /// Gets or creates a unique instance ID for the stream stack for debugging purposes. - /// - /// The stream stack. - /// Reference to the instance ID field. - /// Whether this is being called during construction. - /// The instance ID. - public static long GetInstanceId( - this IStreamStack stream, - ref long instanceId, - bool construct - ) - { - if (instanceId == 0) //will not be equal to 0 when inherited IStackStream types are being used - instanceId = System.Threading.Interlocked.Increment(ref _instanceCounter); - return instanceId; - } + /// + /// Gets or creates a unique instance ID for the stream stack for debugging purposes. + /// + /// The stream stack. + /// Reference to the instance ID field. + /// Whether this is being called during construction. + /// The instance ID. + public static long GetInstanceId(this IStreamStack stream, ref long instanceId, bool construct) + { + if (instanceId == 0) //will not be equal to 0 when inherited IStackStream types are being used + instanceId = System.Threading.Interlocked.Increment(ref _instanceCounter); + return instanceId; + } - /// - /// Writes a debug message for stream construction. - /// - /// The stream stack. - /// The type being constructed. - public static void DebugConstruct(this IStreamStack stream, Type constructing) - { - long id = stream.InstanceId; - stream.InstanceId = GetInstanceId(stream, ref id, true); - var frame = (new StackTrace()).GetFrame(3); - string parentInfo = - frame != null - ? $"{frame.GetMethod()?.DeclaringType?.Name}.{frame.GetMethod()?.Name}()" - : "Unknown"; - if (constructing.FullName == stream.GetType().FullName) //don't debug base IStackStream types - Debug.WriteLine( - $"{GetStreamStackString(stream, true)} : Constructed by [{parentInfo}]" - ); - } + /// + /// Writes a debug message for stream construction. + /// + /// The stream stack. + /// The type being constructed. + public static void DebugConstruct(this IStreamStack stream, Type constructing) + { + long id = stream.InstanceId; + stream.InstanceId = GetInstanceId(stream, ref id, true); + var frame = (new StackTrace()).GetFrame(3); + string parentInfo = + frame != null + ? $"{frame.GetMethod()?.DeclaringType?.Name}.{frame.GetMethod()?.Name}()" + : "Unknown"; + if (constructing.FullName == stream.GetType().FullName) //don't debug base IStackStream types + Debug.WriteLine($"{GetStreamStackString(stream, true)} : Constructed by [{parentInfo}]"); + } - /// - /// Writes a debug message for stream disposal. - /// - /// The stream stack. - /// The type being disposed. - public static void DebugDispose(this IStreamStack stream, Type constructing) - { - var frame = (new StackTrace()).GetFrame(3); - string parentInfo = - frame != null - ? $"{frame.GetMethod()?.DeclaringType?.Name}.{frame.GetMethod()?.Name}()" - : "Unknown"; - if (constructing.FullName == stream.GetType().FullName) //don't debug base IStackStream types - Debug.WriteLine($"{GetStreamStackString(stream, false)} : Disposed by [{parentInfo}]"); - } + /// + /// Writes a debug message for stream disposal. + /// + /// The stream stack. + /// The type being disposed. + public static void DebugDispose(this IStreamStack stream, Type constructing) + { + var frame = (new StackTrace()).GetFrame(3); + string parentInfo = + frame != null + ? $"{frame.GetMethod()?.DeclaringType?.Name}.{frame.GetMethod()?.Name}()" + : "Unknown"; + if (constructing.FullName == stream.GetType().FullName) //don't debug base IStackStream types + Debug.WriteLine($"{GetStreamStackString(stream, false)} : Disposed by [{parentInfo}]"); + } - /// - /// Writes a debug trace message for the stream. - /// - /// The stream stack. - /// The debug message to write. - public static void DebugTrace(this IStreamStack stream, string message) - { - Debug.WriteLine( - $"{GetStreamStackString(stream, false)} : [{stream.GetType().Name}]{message}" - ); - } + /// + /// Writes a debug trace message for the stream. + /// + /// The stream stack. + /// The debug message to write. + public static void DebugTrace(this IStreamStack stream, string message) + { + Debug.WriteLine( + $"{GetStreamStackString(stream, false)} : [{stream.GetType().Name}]{message}" + ); + } - /// - /// Returns the full stream chain as a string, including instance IDs and positions. - /// - /// The stream stack to represent. - /// Whether this is being called during construction. - /// A string representation of the entire stream stack. - public static string GetStreamStackString(this IStreamStack stream, bool construct) + /// + /// Returns the full stream chain as a string, including instance IDs and positions. + /// + /// The stream stack to represent. + /// Whether this is being called during construction. + /// A string representation of the entire stream stack. + public static string GetStreamStackString(this IStreamStack stream, bool construct) + { + var sb = new StringBuilder(); + Stream? current = stream as Stream; + while (current != null) { - var sb = new StringBuilder(); - Stream? current = stream as Stream; - while (current != null) + IStreamStack? sStack = current as IStreamStack; + string id = sStack != null ? "#" + sStack.InstanceId.ToString() : ""; + string buffSize = sStack != null ? "Bx" + sStack.BufferSize.ToString("x") : ""; + string defBuffSize = + sStack != null ? "Dx" + sStack.DefaultBufferSize.ToString("x") : ""; + + if (sb.Length > 0) + sb.Insert(0, "/"); + try + { + sb.Insert( + 0, + $"{current.GetType().Name}{id}[{cleansePos(current.Position)}:{buffSize}:{defBuffSize}]" + ); + } + catch { - IStreamStack? sStack = current as IStreamStack; - string id = sStack != null ? "#" + sStack.InstanceId.ToString() : ""; - string buffSize = sStack != null ? "Bx" + sStack.BufferSize.ToString("x") : ""; - string defBuffSize = - sStack != null ? "Dx" + sStack.DefaultBufferSize.ToString("x") : ""; - - if (sb.Length > 0) - sb.Insert(0, "/"); - try - { + if (current is SharpCompressStream scs) sb.Insert( 0, - $"{current.GetType().Name}{id}[{cleansePos(current.Position)}:{buffSize}:{defBuffSize}]" + $"{current.GetType().Name}{id}[{cleansePos(scs.InternalPosition)}:{buffSize}:{defBuffSize}]" ); - } - catch - { - if (current is SharpCompressStream scs) - sb.Insert( - 0, - $"{current.GetType().Name}{id}[{cleansePos(scs.InternalPosition)}:{buffSize}:{defBuffSize}]" - ); - else - sb.Insert(0, $"{current.GetType().Name}{id}[:{buffSize}]"); - } - if (sStack != null) - current = sStack.BaseStream(); //current may not be a IStreamStack, allow one more loop else - break; + sb.Insert(0, $"{current.GetType().Name}{id}[:{buffSize}]"); } - return sb.ToString(); + if (sStack != null) + current = sStack.BaseStream(); //current may not be a IStreamStack, allow one more loop + else + break; } + return sb.ToString(); + } #endif } diff --git a/src/SharpCompress/Readers/Arc/ArcReader.cs b/src/SharpCompress/Readers/Arc/ArcReader.cs index 6ad6a3822..760f63454 100644 --- a/src/SharpCompress/Readers/Arc/ArcReader.cs +++ b/src/SharpCompress/Readers/Arc/ArcReader.cs @@ -37,4 +37,4 @@ protected override IEnumerable GetEntries(Stream stream) yield return new ArcEntry(new ArcFilePart(header, stream)); } } -} \ No newline at end of file +}