Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix make clangbuild & add CI #3393

Merged
merged 1 commit into from
Dec 22, 2022
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Fix make clangbuild & add CI
Fix the errors for:
* `-Wdocumentation`
* `-Wconversion` except `-Wsign-conversion`
  • Loading branch information
terrelln committed Dec 22, 2022

Verified

This commit was created on GitHub.com and signed with GitHub’s verified signature. The key has expired.
commit b50ee62cc31534a2af551dbcdb6c2ace74733de5
8 changes: 8 additions & 0 deletions .github/workflows/dev-short-tests.yml
Original file line number Diff line number Diff line change
@@ -498,6 +498,14 @@ jobs:
run: |
make -C tests versionsTest

clangbuild:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # tag=v3
- name: make clangbuild
run: |
make clangbuild


# For reference : icc tests
# icc tests are currently failing on Github Actions, likely to issues during installation stage
4 changes: 2 additions & 2 deletions contrib/largeNbDicts/largeNbDicts.c
Original file line number Diff line number Diff line change
@@ -856,7 +856,7 @@ int bench(const char **fileNameTable, unsigned nbFiles, const char *dictionary,
CONTROL(cTotalSizeNoDict != 0);
DISPLAYLEVEL(3, "compressing at level %u without dictionary : Ratio=%.2f (%u bytes) \n",
clevel,
(double)totalSrcSlicesSize / cTotalSizeNoDict, (unsigned)cTotalSizeNoDict);
(double)totalSrcSlicesSize / (double)cTotalSizeNoDict, (unsigned)cTotalSizeNoDict);

size_t* const cSizes = malloc(nbBlocks * sizeof(size_t));
CONTROL(cSizes != NULL);
@@ -865,7 +865,7 @@ int bench(const char **fileNameTable, unsigned nbFiles, const char *dictionary,
CONTROL(cTotalSize != 0);
DISPLAYLEVEL(3, "compressed using a %u bytes dictionary : Ratio=%.2f (%u bytes) \n",
(unsigned)dictBuffer.size,
(double)totalSrcSlicesSize / cTotalSize, (unsigned)cTotalSize);
(double)totalSrcSlicesSize / (double)cTotalSize, (unsigned)cTotalSize);

/* now dstSlices contain the real compressed size of each block, instead of the maximum capacity */
shrinkSizes(dstSlices, cSizes);
2 changes: 1 addition & 1 deletion lib/compress/zstd_compress.c
Original file line number Diff line number Diff line change
@@ -376,7 +376,7 @@ size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_paramete

/**
* Sets cctxParams' cParams and fParams from params, but otherwise leaves them alone.
* @param param Validated zstd parameters.
* @param params Validated zstd parameters.
*/
static void ZSTD_CCtxParams_setZstdParams(
ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params)
4 changes: 2 additions & 2 deletions lib/dictBuilder/cover.c
Original file line number Diff line number Diff line change
@@ -647,7 +647,7 @@ static size_t COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer,

void COVER_warnOnSmallCorpus(size_t maxDictSize, size_t nbDmers, int displayLevel)
{
const double ratio = (double)nbDmers / maxDictSize;
const double ratio = (double)nbDmers / (double)maxDictSize;
if (ratio >= 10) {
return;
}
@@ -1040,7 +1040,7 @@ COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent, size_t dictBuffe
return COVER_dictSelectionError(totalCompressedSize);
}

if (totalCompressedSize <= largestCompressed * regressionTolerance) {
if ((double)totalCompressedSize <= (double)largestCompressed * regressionTolerance) {
COVER_dictSelection_t selection = { candidateDictBuffer, dictContentSize, totalCompressedSize };
free(largestDictbuffer);
return selection;
4 changes: 2 additions & 2 deletions lib/dictBuilder/zdict.c
Original file line number Diff line number Diff line change
@@ -373,7 +373,7 @@ static U32 ZDICT_tryMerge(dictItem* table, dictItem elt, U32 eltNbToSkip, const
elt = table[u];
/* sort : improve rank */
while ((u>1) && (table[u-1].savings < elt.savings))
table[u] = table[u-1], u--;
table[u] = table[u-1], u--;
table[u] = elt;
return u;
} }
@@ -524,7 +524,7 @@ static size_t ZDICT_trainBuffer_legacy(dictItem* dictList, U32 dictListSize,
if (solution.length==0) { cursor++; continue; }
ZDICT_insertDictItem(dictList, dictListSize, solution, buffer);
cursor += solution.length;
DISPLAYUPDATE(2, "\r%4.2f %% \r", (double)cursor / bufferSize * 100);
DISPLAYUPDATE(2, "\r%4.2f %% \r", (double)cursor / (double)bufferSize * 100.0);
} }

_cleanup:
6 changes: 3 additions & 3 deletions lib/zdict.h
Original file line number Diff line number Diff line change
@@ -212,9 +212,9 @@ ZDICTLIB_API size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCap
const size_t* samplesSizes, unsigned nbSamples);

typedef struct {
int compressionLevel; /*< optimize for a specific zstd compression level; 0 means default */
unsigned notificationLevel; /*< Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */
unsigned dictID; /*< force dictID value; 0 means auto mode (32-bits random value)
int compressionLevel; /**< optimize for a specific zstd compression level; 0 means default */
unsigned notificationLevel; /**< Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */
unsigned dictID; /**< force dictID value; 0 means auto mode (32-bits random value)
* NOTE: The zstd format reserves some dictionary IDs for future use.
* You may use them in private settings, but be warned that they
* may be used by zstd in a public dictionary registry in the future.
8 changes: 4 additions & 4 deletions lib/zstd.h
Original file line number Diff line number Diff line change
@@ -2535,8 +2535,8 @@ size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_
Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.
Data fragment must be large enough to ensure successful decoding.
`ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.
@result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
>0 : `srcSize` is too small, please provide at least @result bytes on next attempt.
result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
>0 : `srcSize` is too small, please provide at least result bytes on next attempt.
errorCode, which can be tested using ZSTD_isError().

It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
@@ -2555,7 +2555,7 @@ size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_

The most memory efficient way is to use a round buffer of sufficient size.
Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(),
which can @return an error code if required value is too large for current system (in 32-bits mode).
which can return an error code if required value is too large for current system (in 32-bits mode).
In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,
up to the moment there is not enough room left in the buffer to guarantee decoding another full block,
which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.
@@ -2575,7 +2575,7 @@ size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_
ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().
ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.

@result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.
It can also be an error code, which can be tested with ZSTD_isError().

6 changes: 3 additions & 3 deletions programs/benchfn.c
Original file line number Diff line number Diff line change
@@ -229,17 +229,17 @@ BMK_runOutcome_t BMK_benchTimedFn(BMK_timedFnState_t* cont,
cont->timeSpent_ns += (unsigned long long)loopDuration_ns;

/* estimate nbLoops for next run to last approximately 1 second */
if (loopDuration_ns > (runBudget_ns / 50)) {
if (loopDuration_ns > ((double)runBudget_ns / 50)) {
double const fastestRun_ns = MIN(bestRunTime.nanoSecPerRun, newRunTime.nanoSecPerRun);
cont->nbLoops = (unsigned)(runBudget_ns / fastestRun_ns) + 1;
cont->nbLoops = (unsigned)((double)runBudget_ns / fastestRun_ns) + 1;
} else {
/* previous run was too short : blindly increase workload by x multiplier */
const unsigned multiplier = 10;
assert(cont->nbLoops < ((unsigned)-1) / multiplier); /* avoid overflow */
cont->nbLoops *= multiplier;
}

if(loopDuration_ns < runTimeMin_ns) {
if(loopDuration_ns < (double)runTimeMin_ns) {
/* don't report results for which benchmark run time was too small : increased risks of rounding errors */
assert(completed == 0);
continue;
12 changes: 6 additions & 6 deletions programs/fileio.c
Original file line number Diff line number Diff line change
@@ -1059,12 +1059,12 @@ FIO_compressGzFrame(const cRess_t* ress, /* buffers & handlers are used, but no
DISPLAYUPDATE_PROGRESS(
"\rRead : %u MB ==> %.2f%% ",
(unsigned)(inFileSize>>20),
(double)outFileSize/inFileSize*100)
(double)outFileSize/(double)inFileSize*100)
} else {
DISPLAYUPDATE_PROGRESS(
"\rRead : %u / %u MB ==> %.2f%% ",
(unsigned)(inFileSize>>20), (unsigned)(srcFileSize>>20),
(double)outFileSize/inFileSize*100);
(double)outFileSize/(double)inFileSize*100);
} }

while (1) {
@@ -1157,11 +1157,11 @@ FIO_compressLzmaFrame(cRess_t* ress,
if (srcFileSize == UTIL_FILESIZE_UNKNOWN)
DISPLAYUPDATE_PROGRESS("\rRead : %u MB ==> %.2f%%",
(unsigned)(inFileSize>>20),
(double)outFileSize/inFileSize*100)
(double)outFileSize/(double)inFileSize*100)
else
DISPLAYUPDATE_PROGRESS("\rRead : %u / %u MB ==> %.2f%%",
(unsigned)(inFileSize>>20), (unsigned)(srcFileSize>>20),
(double)outFileSize/inFileSize*100);
(double)outFileSize/(double)inFileSize*100);
if (ret == LZMA_STREAM_END) break;
}

@@ -1241,11 +1241,11 @@ FIO_compressLz4Frame(cRess_t* ress,
if (srcFileSize == UTIL_FILESIZE_UNKNOWN) {
DISPLAYUPDATE_PROGRESS("\rRead : %u MB ==> %.2f%%",
(unsigned)(inFileSize>>20),
(double)outFileSize/inFileSize*100)
(double)outFileSize/(double)inFileSize*100)
} else {
DISPLAYUPDATE_PROGRESS("\rRead : %u / %u MB ==> %.2f%%",
(unsigned)(inFileSize>>20), (unsigned)(srcFileSize>>20),
(double)outFileSize/inFileSize*100);
(double)outFileSize/(double)inFileSize*100);
}

/* Write Block */
1 change: 0 additions & 1 deletion programs/util.h
Original file line number Diff line number Diff line change
@@ -262,7 +262,6 @@ UTIL_mergeFileNamesTable(FileNamesTable* table1, FileNamesTable* table2);
/*! UTIL_expandFNT() :
* read names from @fnt, and expand those corresponding to directories
* update @fnt, now containing only file names,
* @return : 0 in case of success, 1 if error
* note : in case of error, @fnt[0] is NULL
*/
void UTIL_expandFNT(FileNamesTable** fnt, int followLinks);
6 changes: 3 additions & 3 deletions tests/decodecorpus.c
Original file line number Diff line number Diff line change
@@ -136,7 +136,7 @@ static void RAND_genDist(U32* seed, BYTE* dist, double weight)
BYTE step = (BYTE) ((RAND(seed) % 256) | 1); /* force it to be odd so it's relatively prime to 256 */

while (i < DISTSIZE) {
size_t states = ((size_t)(weight * statesLeft)) + 1;
size_t states = ((size_t)(weight * (double)statesLeft)) + 1;
size_t j;
for (j = 0; j < states && i < DISTSIZE; j++, i++) {
dist[i] = symb;
@@ -662,11 +662,11 @@ generateSequences(U32* seed, frame_t* frame, seqStore_t* seqStore,
* ensure nice numbers */
U32 matchLen =
MIN_SEQ_LEN +
ROUND(RAND_exp(seed, excessMatch / (double)(numSequences - i)));
ROUND(RAND_exp(seed, (double)excessMatch / (double)(numSequences - i)));
U32 literalLen =
(RAND(seed) & 7)
? ROUND(RAND_exp(seed,
literalsSize /
(double)literalsSize /
(double)(numSequences - i)))
: 0;
/* actual offset, code to send, and point to copy up to when shifting
38 changes: 19 additions & 19 deletions tests/paramgrill.c
Original file line number Diff line number Diff line change
@@ -566,10 +566,10 @@ resultScore(const BMK_benchResult_t res, const size_t srcSize, const constraint_
double cs = 0., ds = 0., rt, cm = 0.;
const double r1 = 1, r2 = 0.1, rtr = 0.5;
double ret;
if(target.cSpeed) { cs = res.cSpeed / (double)target.cSpeed; }
if(target.dSpeed) { ds = res.dSpeed / (double)target.dSpeed; }
if(target.cMem != (U32)-1) { cm = (double)target.cMem / res.cMem; }
rt = ((double)srcSize / res.cSize);
if(target.cSpeed) { cs = (double)res.cSpeed / (double)target.cSpeed; }
if(target.dSpeed) { ds = (double)res.dSpeed / (double)target.dSpeed; }
if(target.cMem != (U32)-1) { cm = (double)target.cMem / (double)res.cMem; }
rt = ((double)srcSize / (double)res.cSize);

ret = (MIN(1, cs) + MIN(1, ds) + MIN(1, cm))*r1 + rt * rtr +
(MAX(0, log(cs))+ MAX(0, log(ds))+ MAX(0, log(cm))) * r2;
@@ -581,8 +581,8 @@ resultScore(const BMK_benchResult_t res, const size_t srcSize, const constraint_
static double
resultDistLvl(const BMK_benchResult_t result1, const BMK_benchResult_t lvlRes)
{
double normalizedCSpeedGain1 = ((double)result1.cSpeed / lvlRes.cSpeed) - 1;
double normalizedRatioGain1 = ((double)lvlRes.cSize / result1.cSize) - 1;
double normalizedCSpeedGain1 = ((double)result1.cSpeed / (double)lvlRes.cSpeed) - 1;
double normalizedRatioGain1 = ((double)lvlRes.cSize / (double)result1.cSize) - 1;
if(normalizedRatioGain1 < 0 || normalizedCSpeedGain1 < 0) {
return 0.0;
}
@@ -854,7 +854,7 @@ BMK_displayOneResult(FILE* f, winnerInfo_t res, const size_t srcSize)
}

{ double const ratio = res.result.cSize ?
(double)srcSize / res.result.cSize : 0;
(double)srcSize / (double)res.result.cSize : 0;
double const cSpeedMBps = (double)res.result.cSpeed / MB_UNIT;
double const dSpeedMBps = (double)res.result.dSpeed / MB_UNIT;

@@ -937,7 +937,7 @@ BMK_printWinnerOpt(FILE* f, const U32 cLevel, const BMK_benchResult_t result, co
}
fprintf(f, "================================\n");
fprintf(f, "Level Bounds: R: > %.3f AND C: < %.1f MB/s \n\n",
(double)srcSize / g_lvltarget.cSize, (double)g_lvltarget.cSpeed / MB_UNIT);
(double)srcSize / (double)g_lvltarget.cSize, (double)g_lvltarget.cSpeed / MB_UNIT);


fprintf(f, "Overall Winner: \n");
@@ -977,7 +977,7 @@ BMK_print_cLevelEntry(FILE* f, const int cLevel,
}
/* print comment */
{ double const ratio = result.cSize ?
(double)srcSize / result.cSize : 0;
(double)srcSize / (double)result.cSize : 0;
double const cSpeedMBps = (double)result.cSpeed / MB_UNIT;
double const dSpeedMBps = (double)result.dSpeed / MB_UNIT;

@@ -1726,19 +1726,19 @@ static int allBench(BMK_benchResult_t* resultPtr,

/* calculate uncertainty in compression / decompression runs */
if (benchres.cSpeed) {
U64 const loopDurationC = (((U64)buf.srcSize * TIMELOOP_NANOSEC) / benchres.cSpeed);
double const loopDurationC = (double)(((U64)buf.srcSize * TIMELOOP_NANOSEC) / benchres.cSpeed);
uncertaintyConstantC = ((loopDurationC + (double)(2 * g_clockGranularity))/loopDurationC);
}

if (benchres.dSpeed) {
U64 const loopDurationD = (((U64)buf.srcSize * TIMELOOP_NANOSEC) / benchres.dSpeed);
double const loopDurationD = (double)(((U64)buf.srcSize * TIMELOOP_NANOSEC) / benchres.dSpeed);
uncertaintyConstantD = ((loopDurationD + (double)(2 * g_clockGranularity))/loopDurationD);
}

/* optimistic assumption of benchres */
{ BMK_benchResult_t resultMax = benchres;
resultMax.cSpeed = (unsigned long long)(resultMax.cSpeed * uncertaintyConstantC * VARIANCE);
resultMax.dSpeed = (unsigned long long)(resultMax.dSpeed * uncertaintyConstantD * VARIANCE);
resultMax.cSpeed = (unsigned long long)((double)resultMax.cSpeed * uncertaintyConstantC * VARIANCE);
resultMax.dSpeed = (unsigned long long)((double)resultMax.dSpeed * uncertaintyConstantD * VARIANCE);

/* disregard infeasible results in feas mode */
/* disregard if resultMax < winner in infeas mode */
@@ -1850,8 +1850,8 @@ static int BMK_seed(winnerInfo_t* winners,

if ((double)testResult.cSize <= ((double)winners[cLevel].result.cSize * (1. + (0.02 / cLevel))) ) {
/* Validate solution is "good enough" */
double W_ratio = (double)buf.srcSize / testResult.cSize;
double O_ratio = (double)buf.srcSize / winners[cLevel].result.cSize;
double W_ratio = (double)buf.srcSize / (double)testResult.cSize;
double O_ratio = (double)buf.srcSize / (double)winners[cLevel].result.cSize;
double W_ratioNote = log (W_ratio);
double O_ratioNote = log (O_ratio);
size_t W_DMemUsed = (1 << params.vals[wlog_ind]) + (16 KB);
@@ -1864,11 +1864,11 @@ static int BMK_seed(winnerInfo_t* winners,
double W_CMemUsed_note = W_ratioNote * ( 50 + 13*cLevel) - log((double)W_CMemUsed);
double O_CMemUsed_note = O_ratioNote * ( 50 + 13*cLevel) - log((double)O_CMemUsed);

double W_CSpeed_note = W_ratioNote * (double)( 30 + 10*cLevel) + log(testResult.cSpeed);
double O_CSpeed_note = O_ratioNote * (double)( 30 + 10*cLevel) + log(winners[cLevel].result.cSpeed);
double W_CSpeed_note = W_ratioNote * (double)( 30 + 10*cLevel) + log((double)testResult.cSpeed);
double O_CSpeed_note = O_ratioNote * (double)( 30 + 10*cLevel) + log((double)winners[cLevel].result.cSpeed);

double W_DSpeed_note = W_ratioNote * (double)( 20 + 2*cLevel) + log(testResult.dSpeed);
double O_DSpeed_note = O_ratioNote * (double)( 20 + 2*cLevel) + log(winners[cLevel].result.dSpeed);
double W_DSpeed_note = W_ratioNote * (double)( 20 + 2*cLevel) + log((double)testResult.dSpeed);
double O_DSpeed_note = O_ratioNote * (double)( 20 + 2*cLevel) + log((double)winners[cLevel].result.dSpeed);

if (W_DMemUsed_note < O_DMemUsed_note) {
/* uses too much Decompression memory for too little benefit */
Loading