Skip to content

Commit 6163277

Browse files
committed
helicone deeper integration
1 parent 647dcd0 commit 6163277

File tree

3 files changed

+117
-5
lines changed

3 files changed

+117
-5
lines changed

docs/howtos/integrations/helicone.ipynb

+3-2
Original file line numberDiff line numberDiff line change
@@ -53,10 +53,11 @@
5353
"from datasets import Dataset\n",
5454
"from ragas import evaluate\n",
5555
"from ragas.metrics import faithfulness, answer_relevancy, context_precision\n",
56+
"from ragas.integrations.helicone import helicone_config # import helicone_config\n",
57+
"\n",
5658
"\n",
5759
"# Set up Helicone\n",
58-
"HELICONE_API_KEY = \"your_helicone_api_key_here\" # Replace with your actual Helicone API key\n",
59-
"os.environ[\"OPENAI_API_BASE\"] = f\"https://oai.helicone.ai/{HELICONE_API_KEY}/v1\"\n",
60+
"helicone_config.api_key = \"your_helicone_api_key_here\" # Replace with your actual Helicone API key\n",
6061
"os.environ[\"OPENAI_API_KEY\"] = \"your_openai_api_key_here\" # Replace with your actual OpenAI API key\n",
6162
"\n",
6263
"# Verify Helicone API key is set\n",

src/ragas/integrations/helicone.py

+96
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,96 @@
1+
from dataclasses import dataclass, field
2+
from typing import Optional, Dict, Any
3+
4+
5+
@dataclass
6+
class CacheConfig:
7+
ttl: int = 60 * 60 * 24 * 30 # 30 days
8+
maxsize: int = 1000
9+
10+
11+
@dataclass
12+
class HeliconeSingleton:
13+
api_key: Optional[str] = None
14+
base_url: Optional[str] = "https://oai.helicone.ai"
15+
cache_config: Optional[CacheConfig] = None
16+
_instance: Optional['HeliconeSingleton'] = None
17+
18+
# New fields for configurable headers
19+
target_url: Optional[str] = None
20+
openai_api_base: Optional[str] = None
21+
request_id: Optional[str] = None
22+
model_override: Optional[str] = None
23+
prompt_id: Optional[str] = None
24+
user_id: Optional[str] = None
25+
fallbacks: Optional[str] = None
26+
rate_limit_policy: Optional[str] = None
27+
session_id: Optional[str] = None
28+
session_path: Optional[str] = None
29+
session_name: Optional[str] = None
30+
posthog_key: Optional[str] = None
31+
posthog_host: Optional[str] = None
32+
omit_response: Optional[bool] = None
33+
omit_request: Optional[bool] = None
34+
cache_enabled: Optional[bool] = None
35+
retry_enabled: Optional[bool] = None
36+
moderations_enabled: Optional[bool] = None
37+
llm_security_enabled: Optional[bool] = None
38+
stream_force_format: Optional[bool] = None
39+
custom_properties: Dict[str, str] = field(default_factory=dict)
40+
41+
def __new__(cls):
42+
if cls._instance is None:
43+
cls._instance = super().__new__(cls)
44+
return cls._instance
45+
46+
def default_headers(self) -> Dict[str, Any]:
47+
headers = {"Helicone-Auth": f"Bearer {self.api_key}"}
48+
49+
if self.target_url:
50+
headers["Helicone-Target-URL"] = self.target_url
51+
if self.openai_api_base:
52+
headers["Helicone-OpenAI-Api-Base"] = self.openai_api_base
53+
if self.request_id:
54+
headers["Helicone-Request-Id"] = self.request_id
55+
if self.model_override:
56+
headers["Helicone-Model-Override"] = self.model_override
57+
if self.prompt_id:
58+
headers["Helicone-Prompt-Id"] = self.prompt_id
59+
if self.user_id:
60+
headers["Helicone-User-Id"] = self.user_id
61+
if self.fallbacks:
62+
headers["Helicone-Fallbacks"] = self.fallbacks
63+
if self.rate_limit_policy:
64+
headers["Helicone-RateLimit-Policy"] = self.rate_limit_policy
65+
if self.session_id:
66+
headers["Helicone-Session-Id"] = self.session_id
67+
if self.session_path:
68+
headers["Helicone-Session-Path"] = self.session_path
69+
if self.session_name:
70+
headers["Helicone-Session-Name"] = self.session_name
71+
if self.posthog_key:
72+
headers["Helicone-Posthog-Key"] = self.posthog_key
73+
if self.posthog_host:
74+
headers["Helicone-Posthog-Host"] = self.posthog_host
75+
76+
# Boolean headers
77+
for header, value in {
78+
"Helicone-Omit-Response": self.omit_response,
79+
"Helicone-Omit-Request": self.omit_request,
80+
"Helicone-Cache-Enabled": (self.cache_enabled and "true") or (self.cache_config.maxsize or self.cache_config.ttl and "true"),
81+
"Helicone-Retry-Enabled": self.retry_enabled,
82+
"Helicone-Moderations-Enabled": self.moderations_enabled,
83+
"Helicone-LLM-Security-Enabled": self.llm_security_enabled,
84+
"Helicone-Stream-Force-Format": self.stream_force_format
85+
}.items():
86+
if value is not None:
87+
headers[header] = str(value).lower()
88+
89+
# Custom properties
90+
for key, value in self.custom_properties.items():
91+
headers[f"Helicone-Property-{key}"] = value
92+
93+
return headers
94+
95+
96+
helicone_config = HeliconeSingleton()

src/ragas/llms/base.py

+18-3
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
from langchain_openai.llms import AzureOpenAI, OpenAI
1616
from langchain_openai.llms.base import BaseOpenAI
1717

18+
from ragas.integrations.helicone import helicone_config
1819
from ragas.run_config import RunConfig, add_async_retry, add_retry
1920

2021
if t.TYPE_CHECKING:
@@ -103,7 +104,8 @@ async def generate(
103104
)
104105
else:
105106
loop = asyncio.get_event_loop()
106-
generate_text_with_retry = add_retry(self.generate_text, self.run_config)
107+
generate_text_with_retry = add_retry(
108+
self.generate_text, self.run_config)
107109
generate_text = partial(
108110
generate_text_with_retry,
109111
prompt=prompt,
@@ -240,7 +242,8 @@ def check_args(
240242
callbacks: Callbacks,
241243
) -> dict[str, t.Any]:
242244
if n != 1:
243-
logger.warning("n values greater than 1 not support for LlamaIndex LLMs")
245+
logger.warning(
246+
"n values greater than 1 not support for LlamaIndex LLMs")
244247
if temperature != 1e-8:
245248
logger.info("temperature kwarg passed to LlamaIndex LLM")
246249
if stop is not None:
@@ -294,5 +297,17 @@ def llm_factory(
294297
timeout = None
295298
if run_config is not None:
296299
timeout = run_config.timeout
297-
openai_model = ChatOpenAI(model=model, timeout=timeout)
300+
301+
default_headers = None
302+
base_url = None
303+
if (helicone_config.api_key is not None):
304+
default_headers = helicone_config.default_headers()
305+
base_url = helicone_config.base_url
306+
307+
openai_model = ChatOpenAI(
308+
model=model,
309+
timeout=timeout,
310+
default_headers=default_headers,
311+
base_url=base_url
312+
)
298313
return LangchainLLMWrapper(openai_model, run_config)

0 commit comments

Comments
 (0)