From cbb986b5a2b65fed8ee257722eb86a088c3b7ff1 Mon Sep 17 00:00:00 2001 From: Ivaylo Gochkov Date: Mon, 28 Oct 2024 22:19:10 +0100 Subject: [PATCH] fixed warnings --- Dockerfile | 7 ------- gpt_researcher/llm_provider/generic/base.py | 2 +- gpt_researcher/memory/embeddings.py | 2 +- gpt_researcher/scraper/arxiv/arxiv.py | 2 +- 4 files changed, 3 insertions(+), 10 deletions(-) diff --git a/Dockerfile b/Dockerfile index ba4e29a32..9cb4fba85 100644 --- a/Dockerfile +++ b/Dockerfile @@ -29,13 +29,6 @@ RUN pip install --no-cache-dir -r requirements.txt && \ # Stage 3: Final stage with non-root user and app FROM gpt-researcher-install AS gpt-researcher -# Use environment variables for API keys (defaults can be overridden at runtime) -ARG OPENAI_API_KEY -ARG TAVILY_API_KEY - -ENV OPENAI_API_KEY=${OPENAI_API_KEY} -ENV TAVILY_API_KEY=${TAVILY_API_KEY} - # Create a non-root user for security RUN useradd -ms /bin/bash gpt-researcher && \ chown -R gpt-researcher:gpt-researcher /usr/src/app diff --git a/gpt_researcher/llm_provider/generic/base.py b/gpt_researcher/llm_provider/generic/base.py index 7e64a8254..e7c8e2814 100644 --- a/gpt_researcher/llm_provider/generic/base.py +++ b/gpt_researcher/llm_provider/generic/base.py @@ -68,7 +68,7 @@ def from_provider(cls, provider: str, **kwargs: Any): llm = ChatFireworks(**kwargs) elif provider == "ollama": _check_pkg("langchain_community") - from langchain_community.chat_models import ChatOllama + from langchain_ollama import ChatOllama llm = ChatOllama(base_url=os.environ["OLLAMA_BASE_URL"], **kwargs) elif provider == "together": diff --git a/gpt_researcher/memory/embeddings.py b/gpt_researcher/memory/embeddings.py index 0e917ed4e..5bfcc8cb5 100644 --- a/gpt_researcher/memory/embeddings.py +++ b/gpt_researcher/memory/embeddings.py @@ -17,7 +17,7 @@ def __init__(self, embedding_provider: str, model: str, **embdding_kwargs: Any): _embeddings = None match embedding_provider: case "ollama": - from langchain_community.embeddings import OllamaEmbeddings + from langchain_ollama import OllamaEmbeddings _embeddings = OllamaEmbeddings( model=model, diff --git a/gpt_researcher/scraper/arxiv/arxiv.py b/gpt_researcher/scraper/arxiv/arxiv.py index e3c902830..2af550417 100644 --- a/gpt_researcher/scraper/arxiv/arxiv.py +++ b/gpt_researcher/scraper/arxiv/arxiv.py @@ -18,5 +18,5 @@ def scrape(self): """ query = self.link.split("/")[-1] retriever = ArxivRetriever(load_max_docs=2, doc_content_chars_max=None) - docs = retriever.get_relevant_documents(query=query) + docs = retriever.invoke(query=query) return docs[0].page_content