Skip to content

Commit 11874b9

Browse files
committed
add common litellm call to remove duplicate code
1 parent 94c8236 commit 11874b9

File tree

2 files changed

+77
-0
lines changed

2 files changed

+77
-0
lines changed

src/lightspeed_evaluation/core/llm/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
"""LLM management for Evaluation Framework."""
22

3+
from lightspeed_evaluation.core.llm.custom import BaseCustomLLM
34
from lightspeed_evaluation.core.llm.deepeval import DeepEvalLLMManager
45
from lightspeed_evaluation.core.llm.manager import LLMManager
56
from lightspeed_evaluation.core.llm.ragas import RagasLLMManager
@@ -11,6 +12,7 @@
1112
"LLMConfig",
1213
"LLMError",
1314
"LLMManager",
15+
"BaseCustomLLM",
1416
"DeepEvalLLMManager",
1517
"RagasLLMManager",
1618
"validate_provider_env",
Lines changed: 75 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,75 @@
1+
"""Base Custom LLM class for evaluation framework."""
2+
3+
from typing import Any, Optional, Union
4+
5+
import litellm
6+
7+
from lightspeed_evaluation.core.system.exceptions import LLMError
8+
9+
10+
class BaseCustomLLM: # pylint: disable=too-few-public-methods
11+
"""Base LLM class with core calling functionality."""
12+
13+
def __init__(self, model_name: str, llm_params: dict[str, Any]):
14+
"""Initialize with model configuration."""
15+
self.model_name = model_name
16+
self.llm_params = llm_params
17+
18+
def call(
19+
self,
20+
prompt: str,
21+
n: int = 1,
22+
temperature: Optional[float] = None,
23+
return_single: bool = True,
24+
**kwargs: Any,
25+
) -> Union[str, list[str]]:
26+
"""Make LLM call and return response(s).
27+
28+
Args:
29+
prompt: Text prompt to send
30+
n: Number of responses to generate (default 1)
31+
temperature: Override temperature (uses config default if None)
32+
return_single: If True and n=1, return single string. If False, always return list.
33+
**kwargs: Additional LLM parameters
34+
35+
Returns:
36+
Single string if return_single=True and n=1, otherwise list of strings
37+
"""
38+
temp = (
39+
temperature
40+
if temperature is not None
41+
else self.llm_params.get("temperature", 0.0)
42+
)
43+
44+
call_params = {
45+
"model": self.model_name,
46+
"messages": [{"role": "user", "content": prompt}],
47+
"temperature": temp,
48+
"n": n,
49+
"max_tokens": self.llm_params.get("max_tokens"),
50+
"timeout": self.llm_params.get("timeout"),
51+
"num_retries": self.llm_params.get("num_retries", 3),
52+
**kwargs,
53+
}
54+
55+
try:
56+
response = litellm.completion(**call_params)
57+
58+
# Extract content from all choices
59+
results = []
60+
for choice in response.choices: # type: ignore
61+
content = choice.message.content # type: ignore
62+
if content is None:
63+
content = ""
64+
results.append(content.strip())
65+
66+
# Return format based on parameters
67+
if return_single and n == 1:
68+
if not results:
69+
raise LLMError("LLM returned empty response")
70+
return results[0]
71+
72+
return results
73+
74+
except Exception as e:
75+
raise LLMError(f"LLM call failed: {str(e)}") from e

0 commit comments

Comments
 (0)