From 1dde723b3e680c6ff9ef234ac5ba58c31a5d6359 Mon Sep 17 00:00:00 2001 From: Hao Wu Date: Sun, 8 Sep 2024 10:30:24 +0800 Subject: [PATCH] Remove commented-out token limit check and unused completion model addition. (#523) --- api/chat_main_handler.go | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/api/chat_main_handler.go b/api/chat_main_handler.go index ada2e9b3..d77591e2 100644 --- a/api/chat_main_handler.go +++ b/api/chat_main_handler.go @@ -250,18 +250,6 @@ func genAnswer(h *ChatHandler, w http.ResponseWriter, chatSessionUuid string, ch return } - // check if total tokens exceed limit - // context window, max token - // totalTokens := totalInputToken(msgs) - // if totalTokens > chatSession.MaxTokens { - // RespondWithError(w, http.StatusRequestEntityTooLarge, "error.token_length_exceed_limit", - // map[string]interface{}{ - // "max_tokens": chatSession.MaxTokens, - // "total_tokens": totalTokens, - // }) - // return - // } - chatStreamFn := h.chooseChatStreamFn(chatSession, msgs) answerText, answerID, shouldReturn := chatStreamFn(w, chatSession, msgs, chatUuid, false, streamOutput) @@ -385,8 +373,7 @@ func (h *ChatHandler) chooseChatStreamFn(chat_session sqlc_queries.ChatSession, completionModel := mapset.NewSet[string]() - completionModel.Add(openai.GPT3TextDavinci003) - completionModel.Add(openai.GPT3TextDavinci002) + // completionModel.Add(openai.GPT3TextDavinci002) isCompletion := completionModel.Contains(model) isCustom := strings.HasPrefix(model, "custom-")