From b5a3f400c58c6f6bf93e86e881b24f10cb6c3414 Mon Sep 17 00:00:00 2001 From: Kevin Hu Date: Thu, 30 May 2024 10:30:21 +0800 Subject: [PATCH 1/2] fix data initial bug --- api/db/init_data.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/db/init_data.py b/api/db/init_data.py index 01b019ebbb..89e0cf4702 100644 --- a/api/db/init_data.py +++ b/api/db/init_data.py @@ -532,8 +532,8 @@ def init_llm_factory(): { "fid": factory_infos[12]["name"], "llm_name": "BAAI/bge-reranker-v2-m3", - "tags": "LLM,CHAT,", - "max_tokens": 16385, + "tags": "RE-RANK,2k", + "max_tokens": 2048, "model_type": LLMType.RERANK.value }, ] From ce56d4ae49cfb2fce51e03b2864b6566f1b6129b Mon Sep 17 00:00:00 2001 From: Kevin Hu Date: Thu, 30 May 2024 11:26:53 +0800 Subject: [PATCH 2/2] set ollama keep_alive --- rag/llm/chat_model.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/rag/llm/chat_model.py b/rag/llm/chat_model.py index a9530fe61e..f730c6375e 100644 --- a/rag/llm/chat_model.py +++ b/rag/llm/chat_model.py @@ -303,7 +303,8 @@ def chat(self, system, history, gen_conf): response = self.client.chat( model=self.model_name, messages=history, - options=options + options=options, + keep_alive=-1 ) ans = response["message"]["content"].strip() return ans, response["eval_count"] + response.get("prompt_eval_count", 0) @@ -325,7 +326,8 @@ def chat_streamly(self, system, history, gen_conf): model=self.model_name, messages=history, stream=True, - options=options + options=options, + keep_alive=-1 ) for resp in response: if resp["done"]: