We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 778e2de commit 1d9d0d5Copy full SHA for 1d9d0d5
src/llama-vocab.cpp
@@ -3078,7 +3078,7 @@ int32_t llama_vocab::tokenize(
3078
LLAMA_LOG_ERROR("%s: tokenization result size %zu exceeds int32_t limit\n", __func__, res.size());
3079
return std::numeric_limits<int32_t>::min();
3080
}
3081
-
+
3082
if (n_tokens_max < (int) res.size()) {
3083
// LLAMA_LOG_ERROR("%s: too many tokens\n", __func__);
3084
return -((int) res.size());
0 commit comments