@@ -1105,9 +1105,9 @@ HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize,
1105
1105
static size_t
1106
1106
HUF_compress1X_usingCTable_internal (void * dst , size_t dstSize ,
1107
1107
const void * src , size_t srcSize ,
1108
- const HUF_CElt * CTable , const int bmi2 )
1108
+ const HUF_CElt * CTable , const int flags )
1109
1109
{
1110
- if (bmi2 ) {
1110
+ if (flags & HUF_flags_bmi2 ) {
1111
1111
return HUF_compress1X_usingCTable_internal_bmi2 (dst , dstSize , src , srcSize , CTable );
1112
1112
}
1113
1113
return HUF_compress1X_usingCTable_internal_default (dst , dstSize , src , srcSize , CTable );
@@ -1118,23 +1118,23 @@ HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
1118
1118
static size_t
1119
1119
HUF_compress1X_usingCTable_internal (void * dst , size_t dstSize ,
1120
1120
const void * src , size_t srcSize ,
1121
- const HUF_CElt * CTable , const int bmi2 )
1121
+ const HUF_CElt * CTable , const int flags )
1122
1122
{
1123
- (void )bmi2 ;
1123
+ (void )flags ;
1124
1124
return HUF_compress1X_usingCTable_internal_body (dst , dstSize , src , srcSize , CTable );
1125
1125
}
1126
1126
1127
1127
#endif
1128
1128
1129
- size_t HUF_compress1X_usingCTable_bmi2 (void * dst , size_t dstSize , const void * src , size_t srcSize , const HUF_CElt * CTable , int bmi2 )
1129
+ size_t HUF_compress1X_usingCTable (void * dst , size_t dstSize , const void * src , size_t srcSize , const HUF_CElt * CTable , int flags )
1130
1130
{
1131
- return HUF_compress1X_usingCTable_internal (dst , dstSize , src , srcSize , CTable , bmi2 );
1131
+ return HUF_compress1X_usingCTable_internal (dst , dstSize , src , srcSize , CTable , flags );
1132
1132
}
1133
1133
1134
1134
static size_t
1135
1135
HUF_compress4X_usingCTable_internal (void * dst , size_t dstSize ,
1136
1136
const void * src , size_t srcSize ,
1137
- const HUF_CElt * CTable , int bmi2 )
1137
+ const HUF_CElt * CTable , int flags )
1138
1138
{
1139
1139
size_t const segmentSize = (srcSize + 3 )/4 ; /* first 3 segments */
1140
1140
const BYTE * ip = (const BYTE * ) src ;
@@ -1148,23 +1148,23 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
1148
1148
op += 6 ; /* jumpTable */
1149
1149
1150
1150
assert (op <= oend );
1151
- { CHECK_V_F (cSize , HUF_compress1X_usingCTable_internal (op , (size_t )(oend - op ), ip , segmentSize , CTable , bmi2 ) );
1151
+ { CHECK_V_F (cSize , HUF_compress1X_usingCTable_internal (op , (size_t )(oend - op ), ip , segmentSize , CTable , flags ) );
1152
1152
if (cSize == 0 || cSize > 65535 ) return 0 ;
1153
1153
MEM_writeLE16 (ostart , (U16 )cSize );
1154
1154
op += cSize ;
1155
1155
}
1156
1156
1157
1157
ip += segmentSize ;
1158
1158
assert (op <= oend );
1159
- { CHECK_V_F (cSize , HUF_compress1X_usingCTable_internal (op , (size_t )(oend - op ), ip , segmentSize , CTable , bmi2 ) );
1159
+ { CHECK_V_F (cSize , HUF_compress1X_usingCTable_internal (op , (size_t )(oend - op ), ip , segmentSize , CTable , flags ) );
1160
1160
if (cSize == 0 || cSize > 65535 ) return 0 ;
1161
1161
MEM_writeLE16 (ostart + 2 , (U16 )cSize );
1162
1162
op += cSize ;
1163
1163
}
1164
1164
1165
1165
ip += segmentSize ;
1166
1166
assert (op <= oend );
1167
- { CHECK_V_F (cSize , HUF_compress1X_usingCTable_internal (op , (size_t )(oend - op ), ip , segmentSize , CTable , bmi2 ) );
1167
+ { CHECK_V_F (cSize , HUF_compress1X_usingCTable_internal (op , (size_t )(oend - op ), ip , segmentSize , CTable , flags ) );
1168
1168
if (cSize == 0 || cSize > 65535 ) return 0 ;
1169
1169
MEM_writeLE16 (ostart + 4 , (U16 )cSize );
1170
1170
op += cSize ;
@@ -1173,29 +1173,29 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
1173
1173
ip += segmentSize ;
1174
1174
assert (op <= oend );
1175
1175
assert (ip <= iend );
1176
- { CHECK_V_F (cSize , HUF_compress1X_usingCTable_internal (op , (size_t )(oend - op ), ip , (size_t )(iend - ip ), CTable , bmi2 ) );
1176
+ { CHECK_V_F (cSize , HUF_compress1X_usingCTable_internal (op , (size_t )(oend - op ), ip , (size_t )(iend - ip ), CTable , flags ) );
1177
1177
if (cSize == 0 || cSize > 65535 ) return 0 ;
1178
1178
op += cSize ;
1179
1179
}
1180
1180
1181
1181
return (size_t )(op - ostart );
1182
1182
}
1183
1183
1184
- size_t HUF_compress4X_usingCTable_bmi2 (void * dst , size_t dstSize , const void * src , size_t srcSize , const HUF_CElt * CTable , int bmi2 )
1184
+ size_t HUF_compress4X_usingCTable (void * dst , size_t dstSize , const void * src , size_t srcSize , const HUF_CElt * CTable , int flags )
1185
1185
{
1186
- return HUF_compress4X_usingCTable_internal (dst , dstSize , src , srcSize , CTable , bmi2 );
1186
+ return HUF_compress4X_usingCTable_internal (dst , dstSize , src , srcSize , CTable , flags );
1187
1187
}
1188
1188
1189
1189
typedef enum { HUF_singleStream , HUF_fourStreams } HUF_nbStreams_e ;
1190
1190
1191
1191
static size_t HUF_compressCTable_internal (
1192
1192
BYTE * const ostart , BYTE * op , BYTE * const oend ,
1193
1193
const void * src , size_t srcSize ,
1194
- HUF_nbStreams_e nbStreams , const HUF_CElt * CTable , const int bmi2 )
1194
+ HUF_nbStreams_e nbStreams , const HUF_CElt * CTable , const int flags )
1195
1195
{
1196
1196
size_t const cSize = (nbStreams == HUF_singleStream ) ?
1197
- HUF_compress1X_usingCTable_internal (op , (size_t )(oend - op ), src , srcSize , CTable , bmi2 ) :
1198
- HUF_compress4X_usingCTable_internal (op , (size_t )(oend - op ), src , srcSize , CTable , bmi2 );
1197
+ HUF_compress1X_usingCTable_internal (op , (size_t )(oend - op ), src , srcSize , CTable , flags ) :
1198
+ HUF_compress4X_usingCTable_internal (op , (size_t )(oend - op ), src , srcSize , CTable , flags );
1199
1199
if (HUF_isError (cSize )) { return cSize ; }
1200
1200
if (cSize == 0 ) { return 0 ; } /* uncompressible */
1201
1201
op += cSize ;
@@ -1243,12 +1243,12 @@ unsigned HUF_optimalTableLog(
1243
1243
void * workSpace , size_t wkspSize ,
1244
1244
HUF_CElt * table ,
1245
1245
const unsigned * count ,
1246
- HUF_depth_mode depthMode )
1246
+ int flags )
1247
1247
{
1248
1248
assert (srcSize > 1 ); /* Not supported, RLE should be used instead */
1249
1249
assert (wkspSize >= sizeof (HUF_buildCTable_wksp_tables ));
1250
1250
1251
- if (depthMode != HUF_depth_optimal ) {
1251
+ if (!( flags & HUF_flags_optimalDepth ) ) {
1252
1252
/* cheap evaluation, based on FSE */
1253
1253
return FSE_optimalTableLog_internal (maxTableLog , srcSize , maxSymbolValue , 1 );
1254
1254
}
@@ -1300,8 +1300,7 @@ HUF_compress_internal (void* dst, size_t dstSize,
1300
1300
unsigned maxSymbolValue , unsigned huffLog ,
1301
1301
HUF_nbStreams_e nbStreams ,
1302
1302
void * workSpace , size_t wkspSize ,
1303
- HUF_CElt * oldHufTable , HUF_repeat * repeat , int preferRepeat ,
1304
- const int bmi2 , int suspectUncompressible , HUF_depth_mode depthMode )
1303
+ HUF_CElt * oldHufTable , HUF_repeat * repeat , int flags )
1305
1304
{
1306
1305
HUF_compress_tables_t * const table = (HUF_compress_tables_t * )HUF_alignUpWorkspace (workSpace , & wkspSize , ZSTD_ALIGNOF (size_t ));
1307
1306
BYTE * const ostart = (BYTE * )dst ;
@@ -1322,15 +1321,15 @@ HUF_compress_internal (void* dst, size_t dstSize,
1322
1321
if (!huffLog ) huffLog = HUF_TABLELOG_DEFAULT ;
1323
1322
1324
1323
/* Heuristic : If old table is valid, use it for small inputs */
1325
- if (preferRepeat && repeat && * repeat == HUF_repeat_valid ) {
1324
+ if (( flags & HUF_flags_preferRepeat ) && repeat && * repeat == HUF_repeat_valid ) {
1326
1325
return HUF_compressCTable_internal (ostart , op , oend ,
1327
1326
src , srcSize ,
1328
- nbStreams , oldHufTable , bmi2 );
1327
+ nbStreams , oldHufTable , flags );
1329
1328
}
1330
1329
1331
1330
/* If uncompressible data is suspected, do a smaller sampling first */
1332
1331
DEBUG_STATIC_ASSERT (SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO >= 2 );
1333
- if (suspectUncompressible && srcSize >= (SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE * SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO )) {
1332
+ if (( flags & HUF_flags_suspectUncompressible ) && srcSize >= (SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE * SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO )) {
1334
1333
size_t largestTotal = 0 ;
1335
1334
DEBUGLOG (5 , "input suspected incompressible : sampling to check" );
1336
1335
{ unsigned maxSymbolValueBegin = maxSymbolValue ;
@@ -1358,14 +1357,14 @@ HUF_compress_internal (void* dst, size_t dstSize,
1358
1357
* repeat = HUF_repeat_none ;
1359
1358
}
1360
1359
/* Heuristic : use existing table for small inputs */
1361
- if (preferRepeat && repeat && * repeat != HUF_repeat_none ) {
1360
+ if (( flags & HUF_flags_preferRepeat ) && repeat && * repeat != HUF_repeat_none ) {
1362
1361
return HUF_compressCTable_internal (ostart , op , oend ,
1363
1362
src , srcSize ,
1364
- nbStreams , oldHufTable , bmi2 );
1363
+ nbStreams , oldHufTable , flags );
1365
1364
}
1366
1365
1367
1366
/* Build Huffman Tree */
1368
- huffLog = HUF_optimalTableLog (huffLog , srcSize , maxSymbolValue , & table -> wksps , sizeof (table -> wksps ), table -> CTable , table -> count , depthMode );
1367
+ huffLog = HUF_optimalTableLog (huffLog , srcSize , maxSymbolValue , & table -> wksps , sizeof (table -> wksps ), table -> CTable , table -> count , flags );
1369
1368
{ size_t const maxBits = HUF_buildCTable_wksp (table -> CTable , table -> count ,
1370
1369
maxSymbolValue , huffLog ,
1371
1370
& table -> wksps .buildCTable_wksp , sizeof (table -> wksps .buildCTable_wksp ));
@@ -1390,7 +1389,7 @@ HUF_compress_internal (void* dst, size_t dstSize,
1390
1389
if (oldSize <= hSize + newSize || hSize + 12 >= srcSize ) {
1391
1390
return HUF_compressCTable_internal (ostart , op , oend ,
1392
1391
src , srcSize ,
1393
- nbStreams , oldHufTable , bmi2 );
1392
+ nbStreams , oldHufTable , flags );
1394
1393
} }
1395
1394
1396
1395
/* Use the new huffman table */
@@ -1402,21 +1401,20 @@ HUF_compress_internal (void* dst, size_t dstSize,
1402
1401
}
1403
1402
return HUF_compressCTable_internal (ostart , op , oend ,
1404
1403
src , srcSize ,
1405
- nbStreams , table -> CTable , bmi2 );
1404
+ nbStreams , table -> CTable , flags );
1406
1405
}
1407
1406
1408
1407
size_t HUF_compress1X_repeat (void * dst , size_t dstSize ,
1409
1408
const void * src , size_t srcSize ,
1410
1409
unsigned maxSymbolValue , unsigned huffLog ,
1411
1410
void * workSpace , size_t wkspSize ,
1412
- HUF_CElt * hufTable , HUF_repeat * repeat , int preferRepeat ,
1413
- int bmi2 , int suspectUncompressible , HUF_depth_mode depthMode )
1411
+ HUF_CElt * hufTable , HUF_repeat * repeat , int flags )
1414
1412
{
1415
1413
DEBUGLOG (5 , "HUF_compress1X_repeat (srcSize = %zu)" , srcSize );
1416
1414
return HUF_compress_internal (dst , dstSize , src , srcSize ,
1417
1415
maxSymbolValue , huffLog , HUF_singleStream ,
1418
1416
workSpace , wkspSize , hufTable ,
1419
- repeat , preferRepeat , bmi2 , suspectUncompressible , depthMode );
1417
+ repeat , flags );
1420
1418
}
1421
1419
1422
1420
/* HUF_compress4X_repeat():
@@ -1427,12 +1425,11 @@ size_t HUF_compress4X_repeat (void* dst, size_t dstSize,
1427
1425
const void * src , size_t srcSize ,
1428
1426
unsigned maxSymbolValue , unsigned huffLog ,
1429
1427
void * workSpace , size_t wkspSize ,
1430
- HUF_CElt * hufTable , HUF_repeat * repeat , int preferRepeat , int bmi2 ,
1431
- int suspectUncompressible , HUF_depth_mode depthMode )
1428
+ HUF_CElt * hufTable , HUF_repeat * repeat , int flags )
1432
1429
{
1433
1430
DEBUGLOG (5 , "HUF_compress4X_repeat (srcSize = %zu)" , srcSize );
1434
1431
return HUF_compress_internal (dst , dstSize , src , srcSize ,
1435
1432
maxSymbolValue , huffLog , HUF_fourStreams ,
1436
1433
workSpace , wkspSize ,
1437
- hufTable , repeat , preferRepeat , bmi2 , suspectUncompressible , depthMode );
1434
+ hufTable , repeat , flags );
1438
1435
}
0 commit comments