From 731ecc0d1be4cb3119b2e371a544914edf95d591 Mon Sep 17 00:00:00 2001 From: anon Date: Fri, 2 Jun 2023 05:45:16 -0300 Subject: [PATCH 1/7] fix typo --- examples/server/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/server/CMakeLists.txt b/examples/server/CMakeLists.txt index 67b0867545574..74126c687ffd3 100644 --- a/examples/server/CMakeLists.txt +++ b/examples/server/CMakeLists.txt @@ -4,7 +4,7 @@ add_executable(${TARGET} server.cpp json.hpp httplib.h) target_compile_definitions(${TARGET} PRIVATE # single thread CPPHTTPLIB_THREAD_POOL_COUNT=1 - # crash the server in the debug mode, otherwise send http 500 error + # crash the server in debug mode, otherwise send an http 500 error $<$: CPPHTTPLIB_NO_EXCEPTIONS=1 > From ebfead6e5a16d0c77ab3e42aeb61eb10f5c831f8 Mon Sep 17 00:00:00 2001 From: anon Date: Fri, 2 Jun 2023 05:45:57 -0300 Subject: [PATCH 2/7] remove unused variables --- examples/server/server.cpp | 3 --- 1 file changed, 3 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index afa52a28651ef..c7b8158c6d0a2 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -55,8 +55,6 @@ struct llama_server_context size_t num_tokens_predicted = 0; size_t n_past = 0; - size_t n_consumed = 0; - size_t n_session_consumed = 0; size_t n_remain = 0; std::vector embd; @@ -87,7 +85,6 @@ struct llama_server_context n_remain = 0; n_past = 0; - n_consumed = 0; } bool loadModel(const gpt_params ¶ms_) From 1488a0f528f338a50be397baea98ac739d174be3 Mon Sep 17 00:00:00 2001 From: anon Date: Fri, 2 Jun 2023 05:47:00 -0300 Subject: [PATCH 3/7] make functions that never return false void --- examples/server/server.cpp | 21 ++++----------------- 1 file changed, 4 insertions(+), 17 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index c7b8158c6d0a2..a3b16cad1a34b 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -102,7 +102,7 @@ struct llama_server_context return true; } - bool loadPrompt() { + void loadPrompt() { params.prompt.insert(0, 1, ' '); // always add a first space std::vector prompt_tokens = ::llama_tokenize(ctx, params.prompt, true); @@ -132,7 +132,6 @@ struct llama_server_context n_past--; } has_next_token = true; - return true; } void beginCompletion() @@ -389,7 +388,7 @@ void server_print_usage(int /*argc*/, char **argv, const gpt_params ¶ms, con fprintf(stderr, "\n"); } -bool server_params_parse(int argc, char **argv, server_params &sparams, gpt_params ¶ms) +void server_params_parse(int argc, char **argv, server_params &sparams, gpt_params ¶ms) { gpt_params default_params; server_params default_sparams; @@ -531,7 +530,6 @@ bool server_params_parse(int argc, char **argv, server_params &sparams, gpt_para server_print_usage(argc, argv, default_params, default_sparams); exit(1); } - return true; } json format_generation_settings(llama_server_context &llama) { @@ -706,10 +704,7 @@ int main(int argc, char **argv) llama_server_context llama; params.model = "ggml-model.bin"; - if (server_params_parse(argc, argv, sparams, params) == false) - { - return 1; - } + server_params_parse(argc, argv, sparams, params); llama.verbose = sparams.verbose; llama.json_indent = sparams.verbose ? 4 : -1; @@ -757,15 +752,7 @@ int main(int argc, char **argv) return; } - if (!llama.loadPrompt()) { - json data = {{"status", "error"}, {"reason", "Context too long."}}; - res.set_content( - data.dump(llama.json_indent, ' ', false, json::error_handler_t::replace), - "application/json"); - res.status = 400; - return; - } - + llama.loadPrompt(); llama.beginCompletion(); if (!llama.stream) { From 49dce94885a72d264afc15e9619239ecda214af5 Mon Sep 17 00:00:00 2001 From: anon Date: Fri, 2 Jun 2023 05:51:34 -0300 Subject: [PATCH 4/7] make types match gpt_params exactly --- examples/server/server.cpp | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index a3b16cad1a34b..fcdc38e8aa690 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -567,12 +567,12 @@ bool parse_options_completion(json body, llama_server_context& llama, Response & llama.stream = false; } if (!body["n_predict"].is_null()) { - llama.params.n_predict = body["n_predict"].get(); + llama.params.n_predict = body["n_predict"].get(); } else { llama.params.n_predict = default_params.n_predict; } if (!body["top_k"].is_null()) { - llama.params.top_k = body["top_k"].get(); + llama.params.top_k = body["top_k"].get(); } else { llama.params.top_k = default_params.top_k; } @@ -592,7 +592,7 @@ bool parse_options_completion(json body, llama_server_context& llama, Response & llama.params.typical_p = default_params.typical_p; } if (!body["repeat_last_n"].is_null()) { - llama.params.repeat_last_n = body["repeat_last_n"].get(); + llama.params.repeat_last_n = body["repeat_last_n"].get(); } else { llama.params.repeat_last_n = default_params.repeat_last_n; } @@ -617,7 +617,7 @@ bool parse_options_completion(json body, llama_server_context& llama, Response & llama.params.frequency_penalty = default_params.frequency_penalty; } if (!body["mirostat"].is_null()) { - llama.params.mirostat = body["mirostat"].get(); + llama.params.mirostat = body["mirostat"].get(); } else { llama.params.mirostat = default_params.mirostat; } @@ -632,17 +632,17 @@ bool parse_options_completion(json body, llama_server_context& llama, Response & llama.params.mirostat_eta = default_params.mirostat_eta; } if (!body["penalize_nl"].is_null()) { - llama.params.penalize_nl = body["penalize_nl"].get(); + llama.params.penalize_nl = body["penalize_nl"].get(); } else { llama.params.penalize_nl = default_params.penalize_nl; } if (!body["n_keep"].is_null()) { - llama.params.n_keep = body["n_keep"].get(); + llama.params.n_keep = body["n_keep"].get(); } else { llama.params.n_keep = default_params.n_keep; } if (!body["seed"].is_null()) { - llama.params.seed = body["seed"].get(); + llama.params.seed = body["seed"].get(); } else { llama.params.seed = time(NULL); } From a8a9f1968956ebd65a44ed460d015e4cf60c1d65 Mon Sep 17 00:00:00 2001 From: anon Date: Fri, 2 Jun 2023 05:57:20 -0300 Subject: [PATCH 5/7] small fixes --- examples/server/server.cpp | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index fcdc38e8aa690..d195fb1673be7 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -137,8 +137,6 @@ struct llama_server_context void beginCompletion() { // number of tokens to keep when resetting context - - n_remain = params.n_predict; llama_set_rng_seed(ctx, params.seed); } @@ -192,9 +190,8 @@ struct llama_server_context auto n_vocab = llama_n_vocab(ctx); // Apply params.logit_bias map - for (auto it = params.logit_bias.begin(); it != params.logit_bias.end(); it++) - { - logits[it->first] += it->second; + for (const auto &it : params.logit_bias) { + logits[it.first] += it.second; } std::vector candidates; @@ -271,7 +268,7 @@ struct llama_server_context return result; } - has_next_token = params.n_predict == -1 ? true : n_remain != 0; + has_next_token = params.n_predict == -1 || n_remain != 0; return result; } @@ -330,7 +327,7 @@ struct llama_server_context std::vector embedding(std::string content, int threads) { content.insert(0, 1, ' '); std::vector tokens = ::llama_tokenize(ctx, content, true); - if (tokens.size() > 0) + if (!tokens.empty()) { if (llama_eval(ctx, tokens.data(), tokens.size(), 0, threads)) { @@ -340,7 +337,7 @@ struct llama_server_context } } const int n_embd = llama_n_embd(ctx); - const auto embeddings = llama_get_embeddings(ctx); + auto *const embeddings = llama_get_embeddings(ctx); std::vector embeddings_(embeddings, embeddings + n_embd); return embeddings_; } From 2932db15a30b26c0168dd40e826e58df64cb98be Mon Sep 17 00:00:00 2001 From: anon Date: Fri, 2 Jun 2023 06:55:38 -0300 Subject: [PATCH 6/7] avoid creating element in logit_bias accidentally --- examples/server/server.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index d195fb1673be7..44f6b49055084 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -530,7 +530,9 @@ void server_params_parse(int argc, char **argv, server_params &sparams, gpt_para } json format_generation_settings(llama_server_context &llama) { - const bool ignore_eos = -INFINITY == llama.params.logit_bias[llama_token_eos()]; + const auto eos_bias = llama.params.logit_bias.find(llama_token_eos()); + const bool ignore_eos = + eos_bias != llama.params.logit_bias.end() && -INFINITY == eos_bias->second; return json { { "seed", llama.params.seed }, { "temp", llama.params.temp }, From 47efbb5cf379399416f1cee611b7585507f948bb Mon Sep 17 00:00:00 2001 From: anon Date: Fri, 2 Jun 2023 07:19:21 -0300 Subject: [PATCH 7/7] use std::isinf to check if ignore_eos is active --- examples/server/server.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 44f6b49055084..7f287e1c782d7 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -531,8 +531,8 @@ void server_params_parse(int argc, char **argv, server_params &sparams, gpt_para json format_generation_settings(llama_server_context &llama) { const auto eos_bias = llama.params.logit_bias.find(llama_token_eos()); - const bool ignore_eos = - eos_bias != llama.params.logit_bias.end() && -INFINITY == eos_bias->second; + const bool ignore_eos = eos_bias != llama.params.logit_bias.end() && + eos_bias->second < 0.0f && std::isinf(eos_bias->second); return json { { "seed", llama.params.seed }, { "temp", llama.params.temp },