Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions tests/test_litellm/containers/test_container_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,21 @@
)


@pytest.fixture(autouse=True)
def clear_client_cache():
"""
Clear the HTTP client cache before each test to ensure mocks are used.
This prevents cached real clients from being reused across tests.
"""
cache = getattr(litellm, "in_memory_llm_clients_cache", None)
if cache is not None:
cache.flush_cache()
yield
# Clear again after test to avoid polluting other tests
if cache is not None:
cache.flush_cache()


class TestContainerAPI:
"""Test suite for container API functionality."""

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,29 @@

sys.path.insert(0, os.path.abspath("../../.."))

import litellm
from litellm_enterprise.enterprise_callbacks.send_emails.resend_email import (
ResendEmailLogger,
)

# Test file for Resend email integration


@pytest.fixture(autouse=True)
def clear_client_cache():
"""
Clear the HTTP client cache before each test to ensure mocks are used.
This prevents cached real clients from being reused across tests.
"""
cache = getattr(litellm, "in_memory_llm_clients_cache", None)
if cache is not None:
cache.flush_cache()
yield
# Clear again after test to avoid polluting other tests
if cache is not None:
cache.flush_cache()


@pytest.fixture
def mock_env_vars():
with mock.patch.dict(os.environ, {"RESEND_API_KEY": "test_api_key"}):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,26 @@

sys.path.insert(0, os.path.abspath("../../.."))

import litellm
from litellm_enterprise.enterprise_callbacks.send_emails.sendgrid_email import (
SendGridEmailLogger,
)


@pytest.fixture(autouse=True)
def clear_client_cache():
"""
Clear the HTTP client cache before each test to ensure mocks are used.
This prevents cached real clients from being reused across tests.
"""
cache = getattr(litellm, "in_memory_llm_clients_cache", None)
if cache is not None:
cache.flush_cache()
yield
if cache is not None:
cache.flush_cache()


@pytest.fixture
def mock_env_vars():
# Store original values
Expand Down
14 changes: 14 additions & 0 deletions tests/test_litellm/test_main.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,20 @@
from litellm import main as litellm_main


@pytest.fixture(autouse=True)
def clear_client_cache():
"""
Clear the HTTP client cache before each test to ensure mocks are used.
This prevents cached real clients from being reused across tests.
"""
cache = getattr(litellm, "in_memory_llm_clients_cache", None)
if cache is not None:
cache.flush_cache()
yield
if cache is not None:
cache.flush_cache()


@pytest.fixture(autouse=True)
def add_api_keys_to_env(monkeypatch):
monkeypatch.setenv("ANTHROPIC_API_KEY", "sk-ant-api03-1234567890")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,9 +51,10 @@ def test_vector_store_create_with_simple_provider_name():
)

assert vector_store_provider_config is not None, "Should return a config for OpenAI"
assert isinstance(
vector_store_provider_config, OpenAIVectorStoreConfig
), "Should return OpenAIVectorStoreConfig for OpenAI provider"
# Use type name check instead of isinstance to avoid module identity issues
# caused by sys.path manipulation in test setup
assert type(vector_store_provider_config).__name__ == "OpenAIVectorStoreConfig", \
f"Should return OpenAIVectorStoreConfig for OpenAI provider, got {type(vector_store_provider_config).__name__}"

print("✅ Test passed: Simple provider name 'openai' handled correctly")

Expand Down Expand Up @@ -97,9 +98,9 @@ def test_vector_store_create_with_provider_api_type():
)

assert vector_store_provider_config is not None, "Should return a config for Vertex AI"
assert isinstance(
vector_store_provider_config, VertexVectorStoreConfig
), "Should return VertexVectorStoreConfig for vertex_ai provider with rag_api"
# Use type name check instead of isinstance to avoid module identity issues
assert type(vector_store_provider_config).__name__ == "VertexVectorStoreConfig", \
f"Should return VertexVectorStoreConfig for vertex_ai provider with rag_api, got {type(vector_store_provider_config).__name__}"

print("✅ Test passed: Provider with api_type 'vertex_ai/rag_api' handled correctly")

Expand Down Expand Up @@ -134,9 +135,9 @@ def test_vector_store_create_with_ragflow_provider():
)

assert vector_store_provider_config is not None, "Should return a config for RAGFlow"
assert isinstance(
vector_store_provider_config, RAGFlowVectorStoreConfig
), "Should return RAGFlowVectorStoreConfig for RAGFlow provider"
# Use type name check instead of isinstance to avoid module identity issues
assert type(vector_store_provider_config).__name__ == "RAGFlowVectorStoreConfig", \
f"Should return RAGFlowVectorStoreConfig for RAGFlow provider, got {type(vector_store_provider_config).__name__}"

print("✅ Test passed: RAGFlow provider handled correctly")

14 changes: 14 additions & 0 deletions tests/test_litellm/vector_stores/test_vector_store_registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,20 @@
from litellm.vector_stores.vector_store_registry import VectorStoreRegistry


@pytest.fixture(autouse=True)
def clear_client_cache():
"""
Clear the HTTP client cache before each test to ensure mocks are used.
This prevents cached real clients from being reused across tests.
"""
cache = getattr(litellm, "in_memory_llm_clients_cache", None)
if cache is not None:
cache.flush_cache()
yield
if cache is not None:
cache.flush_cache()


def test_get_credentials_for_vector_store():
"""Test that get_credentials_for_vector_store returns correct credentials"""
# Create test vector stores
Expand Down
Loading