Skip to content

Commit f7f9b70

Browse files
authored
Hconeai to helicone (#2140)
* oai.hconeai -> helicone * more * revert wrangler
1 parent ea458bf commit f7f9b70

File tree

77 files changed

+637
-565
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

77 files changed

+637
-565
lines changed

.env.example

+1-1
Original file line numberDiff line numberDiff line change
@@ -6,4 +6,4 @@ NEXT_PUBLIC_SUPABASE_URL="http://localhost:54321"
66
SUPABASE_SERVICE_KEY="eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwicm9sZSI6InNlcnZpY2Vfcm9sZSIsImV4cCI6MTk4MzgxMjk5Nn0.EGIM96RAZx35lJzdJsyH-qQwv8Hdp7fsn3W0YpN81IU"
77
SUPABASE_URL="http://localhost:54321"
88
NEXT_PUBLIC_HELICONE_RESTRICT_PRO="true"
9-
NEXT_PUBLIC_BASE_PATH="https://oai.hconeai.com/v1"
9+
NEXT_PUBLIC_BASE_PATH="https://oai.helicone.ai/v1"

bifrost/components/shared/diffHighlight.tsx

+3-3
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ export function DiffHighlight(props: DiffHighlightProps) {
1212

1313
const hasBaseURL = (tokens: Token[]) => {
1414
return tokens.some((token) =>
15-
token.content.includes("https://oai.hconeai.com/v1")
15+
token.content.includes("https://oai.helicone.ai/v1")
1616
);
1717
};
1818

@@ -26,8 +26,8 @@ export function DiffHighlight(props: DiffHighlightProps) {
2626
<span className="text-gray-500 pr-4">{i + 1}</span>
2727
{line.map((token, key) => {
2828
if (
29-
token.content.includes("https://oai.hconeai.com") ||
30-
token.content.includes("https://anthropic.hconeai.com")
29+
token.content.includes("https://oai.helicone.ai") ||
30+
token.content.includes("https://anthropic.helicone.ai")
3131
) {
3232
return (
3333
<span

bifrost/components/templates/landing/integrations.tsx

+11-11
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ const Integrations = (props: IntegrationsProps) => {
4848
4949
const openai = new OpenAI({
5050
apiKey: request.env.OPENAI_API_KEY,
51-
baseURL: "https://oai.hconeai.com/v1",
51+
baseURL: "https://oai.helicone.ai/v1",
5252
defaultHeaders: {
5353
"Helicone-Auth": Bearer <HELICONE_API_KEY>,
5454
},
@@ -60,7 +60,7 @@ const openai = new OpenAI({
6060
6161
client = OpenAI(
6262
api_key="your-api-key-here",
63-
base_url="https://oai.hconeai.com/v1",
63+
base_url="https://oai.helicone.ai/v1",
6464
default_headers= {
6565
"Helicone-Auth": Bearer <HELICONE_API_KEY>,
6666
}
@@ -69,7 +69,7 @@ client = OpenAI(
6969
langchain: {
7070
language: "python",
7171
code: `llm = ChatOpenAI(
72-
openai_api_base="https://oai.hconeai.com/v1"
72+
openai_api_base="https://oai.helicone.ai/v1"
7373
openai_api_key='<>',
7474
model_kwargs={
7575
"extra_headers":{
@@ -83,7 +83,7 @@ client = OpenAI(
8383
code: `const llm = new OpenAI({
8484
modelName: "gpt-3.5-turbo",
8585
configuration: {
86-
basePath: "https://oai.hconeai.com/v1",
86+
basePath: "https://oai.helicone.ai/v1",
8787
defaultHeaders: {
8888
"Helicone-Auth": Bearer <HELICONE_API_KEY>,
8989
},
@@ -110,7 +110,7 @@ client = OpenAI(
110110
code: `import OpenAI from "openai";
111111
112112
const openai = new OpenAI({
113-
baseURL: "https://oai.hconeai.com/openai/deployments/[DEPLOYMENT_NAME]",
113+
baseURL: "https://oai.helicone.ai/openai/deployments/[DEPLOYMENT_NAME]",
114114
defaultHeaders: {
115115
"Helicone-Auth": Bearer <HELICONE_API_KEY>,
116116
"Helicone-OpenAI-API-Base": "https://[AZURE_DOMAIN].openai.azure.com",
@@ -127,7 +127,7 @@ const openai = new OpenAI({
127127
128128
client = OpenAI(
129129
api_key="[AZURE_OPENAI_API_KEY]",
130-
base_url="https://oai.hconeai.com/openai/deployments/[DEPLOYMENT]",
130+
base_url="https://oai.helicone.ai/openai/deployments/[DEPLOYMENT]",
131131
default_headers={
132132
"Helicone-OpenAI-Api-Base": "https://[AZURE_DOMAIN].openai.azure.com",
133133
"Helicone-Auth": Bearer <HELICONE_API_KEY>,
@@ -148,7 +148,7 @@ helicone_headers = {
148148
}
149149
150150
self.model = AzureChatOpenAI(
151-
openai_api_base="https://oai.hconeai.com"
151+
openai_api_base="https://oai.helicone.ai"
152152
deployment_name="gpt-35-turbo",
153153
openai_api_key=<AZURE_OPENAI_API_KEY>,
154154
openai_api_version="2023-05-15",
@@ -164,7 +164,7 @@ self.model = AzureChatOpenAI(
164164
azureOpenAIApiKey: "[AZURE_OPENAI_API_KEY]",
165165
azureOpenAIApiDeploymentName: "openai/deployments/gpt-35-turbo",
166166
azureOpenAIApiVersion: "2023-03-15-preview",
167-
azureOpenAIBasePath: "https://oai.hconeai.com",
167+
azureOpenAIBasePath: "https://oai.helicone.ai",
168168
configuration: {
169169
organization: "[organization]",
170170
baseOptions: {
@@ -197,7 +197,7 @@ self.model = AzureChatOpenAI(
197197
code: `import Anthropic from "@anthropic-ai/sdk";
198198
199199
const anthropic = new Anthropic({
200-
baseURL: "https://anthropic.hconeai.com/",
200+
baseURL: "https://anthropic.helicone.ai/",
201201
apiKey: process.env.ANTHROPIC_API_KEY,
202202
defaultHeaders: {
203203
"Helicone-Auth": <HELICONE_API_KEY>,
@@ -217,7 +217,7 @@ await anthropic.messages.create({
217217
218218
client = anthropic.Anthropic(
219219
api_key=os.environ.get("ANTHROPIC_API_KEY"),
220-
base_url="https://anthropic.hconeai.com/v1"
220+
base_url="https://anthropic.helicone.ai/v1"
221221
defaultHeaders={
222222
"Helicone-Auth": <HELICONE_API_KEY>,
223223
},
@@ -238,7 +238,7 @@ client.messages.create(
238238
modelName: "claude-2",
239239
anthropicApiKey: "ANTHROPIC_API_KEY",
240240
clientOptions: {
241-
baseURL: "https://anthropic.hconeai.com/",
241+
baseURL: "https://anthropic.helicone.ai/",
242242
defaultHeaders: {
243243
"Helicone-Auth": Bearer <HELICONE_API_KEY>,
244244
},

docs/custom-parameters/overview.mdx

+3-2
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
---
22
title: "Custom Parameters"
3-
description: "Custom parameters allow users to add extra metadata that is specific to their
3+
description:
4+
"Custom parameters allow users to add extra metadata that is specific to their
45
workflow which is common among LLM developers. Adding custom parameters help
56
our users a bunch with"
67
---
@@ -41,7 +42,7 @@ import { Configuration, OpenAIApi } from "openai";
4142
dfkljd;
4243
const configuration = new Configuration({
4344
apiKey: process.env.OPENAI_API_KEY,
44-
basePath: "https://oai.hconeai.com/v1",
45+
basePath: "https://oai.helicone.ai/v1",
4546
baseOptions: {
4647
headers: {
4748
"Helicone-Prompt-ID": "wedding-cards-001",

docs/external/togetherai.mdx

+3-3
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ Start by creating a Helicone account and obtaining your API key from the [Helico
1818

1919
### 2. Implement Helicone
2020

21-
Wherever you use the TogetherAI API, simply substitute the base URL with `https://together.hconeai.com` and include the `Helicone-Auth` header with your Helicone API key.
21+
Wherever you use the TogetherAI API, simply substitute the base URL with `https://together.helicone.ai` and include the `Helicone-Auth` header with your Helicone API key.
2222

2323
```python
2424
TOGETHER_API_KEY = os.environ.get("TOGETHER_API_KEY")
@@ -27,7 +27,7 @@ HELICONE_API_KEY = os.environ.get("HELICONE_API_KEY")
2727
client = OpenAI(
2828
api_key=TOGETHER_API_KEY,
2929
# base_url='https://api.together.xyz/v1', Old
30-
base_url='https://together.hconeai.com/v1', # Change to Helicone
30+
base_url='https://together.helicone.ai/v1', # Change to Helicone
3131
default_headers={
3232
"Helicone-Auth": f"Bearer {HELICONE_API_KEY}", # Add Helicone API Key
3333
}
@@ -36,4 +36,4 @@ client = OpenAI(
3636

3737
### 3. Start Utilizing Helicone
3838

39-
Congratulations! You are now using Helicone to proxy your requests to TogetherAI. You can now leverage Helicone's features such as data ingestion, caching, and analytics.
39+
Congratulations! You are now using Helicone to proxy your requests to TogetherAI. You can now leverage Helicone's features such as data ingestion, caching, and analytics.

docs/faq/secret-vs-public-key.mdx

+2-2
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ import OpenAI from "openai";
2020
// Use secret or public key
2121
const openai = new OpenAI({
2222
apiKey: request.env.OPENAI_API_KEY,
23-
baseURL: "https://oai.hconeai.com/v1",
23+
baseURL: "https://oai.helicone.ai/v1",
2424
defaultHeaders: {
2525
"Helicone-Auth": `Bearer ${HELICONE_SECRET_API_KEY}`,
2626
},
@@ -33,7 +33,7 @@ import OpenAI from "openai";
3333
// Use public key
3434
const openai = new OpenAI({
3535
apiKey: request.env.OPENAI_API_KEY,
36-
baseURL: `https://oai.hconeai.com/${HELICONE_PUBLIC_API_KEY}/v1`,
36+
baseURL: `https://oai.helicone.ai/${HELICONE_PUBLIC_API_KEY}/v1`,
3737
});
3838
```
3939

docs/features/advanced-usage/caching.mdx

+49-30
Original file line numberDiff line numberDiff line change
@@ -3,33 +3,40 @@ title: "Cache"
33
description: "Reduce latency and save costs by caching on the edge"
44
---
55

6-
import QuestionsSection from '/snippets/questions-section.mdx';
6+
import QuestionsSection from "/snippets/questions-section.mdx";
77

8-
<Info>**Who can use this feature**: Anyone on any [plan](https://www.helicone.ai/pricing). However, the maximum number of caches you can store within a bucket is `20`. If you need to store more, you will need to upgrade to an enterprise plan.</Info>
8+
<Info>
9+
**Who can use this feature**: Anyone on any
10+
[plan](https://www.helicone.ai/pricing). However, the maximum number of caches
11+
you can store within a bucket is `20`. If you need to store more, you will
12+
need to upgrade to an enterprise plan.
13+
</Info>
914

1015
## Introduction
11-
Caching, by temporarily storing data closer to the user at the edge, can significantly speed up access time and enhance your application performance.
1216

13-
Helicone uses [Cloudflare Workers](https://www.cloudflare.com/network/) to temporarily store data closer to the user to ensure low latency, resulting in faster responses and an efficient app development process.
17+
Caching, by temporarily storing data closer to the user at the edge, can significantly speed up access time and enhance your application performance.
18+
19+
Helicone uses [Cloudflare Workers](https://www.cloudflare.com/network/) to temporarily store data closer to the user to ensure low latency, resulting in faster responses and an efficient app development process.
1420

1521
<Frame caption="Dashboard view of cache hits, cost and time saved. ">
1622
<img src="/images/example-cache.png" />
1723
</Frame>
1824

19-
2025
## Why Cache
26+
2127
- Faster response for commonly asked questions, resulting in better experience for your users.
2228
- Lower latency and reduce the load on backend resources by pre-computing results or frequently accessed data, so you can develop your app more efficiently.
2329
- Save money while testing your app by making fewer calls to model providers such as OpenAI.
2430
- Determine the most common requests with your application and visualize on a dashboard.
2531

2632
## Quick Start
33+
2734
To get started, set `Helicone-Cache-Enabled` to true in the headers, or use the Python or NPM packages to turn it on via parameters.
2835

2936
<CodeGroup>
3037

3138
```bash Curl
32-
curl https://oai.hconeai.com/v1/completions \
39+
curl https://oai.helicone.ai/v1/completions \
3340
-H 'Content-Type: application/json' \
3441
-H 'Authorization: Bearer YOUR_API_KEY' \
3542
-H 'Helicone-Cache-Enabled: true' \ # add this header and set to true
@@ -48,7 +55,7 @@ response = openai.Completion.create(
4855
```
4956

5057
```python Python w/o Package
51-
openai.api_base = "https://oai.hconeai.com/v1"
58+
openai.api_base = "https://oai.helicone.ai/v1"
5259

5360
openai.Completion.create(
5461
model="text-davinci-003",
@@ -66,7 +73,7 @@ const configuration = new Configuration({
6673
apiKey: process.env.OPENAI_API_KEY,
6774
heliconeMeta: {
6875
// ... other meta data
69-
cache: true, // set cache to true
76+
cache: true, // set cache to true
7077
},
7178
});
7279

@@ -77,37 +84,41 @@ const openai = new OpenAIApi(configuration);
7784
import { Configuration, OpenAIApi } from "openai";
7885
const configuration = new Configuration({
7986
apiKey: process.env.OPENAI_API_KEY,
80-
basePath: "https://oai.hconeai.com/v1",
87+
basePath: "https://oai.helicone.ai/v1",
8188
defaultHeaders: {
8289
"Helicone-Cache-Enabled": "true", // add this header and set to true
8390
},
8491
});
8592
const openai = new OpenAIApi(configuration);
8693
```
94+
8795
</CodeGroup>
8896

8997
### Cache Parameters
9098

91-
| Parameter | Description |
92-
| ------------------------------------------- | ----------------------------------------------------- |
93-
| `Helicone-Cache-Enabled` (required) | Set to `true` to enable storing and loading from your cache. |
94-
| `Cache-Control` (optional) | Configure cache limit as a `string` based on the [Cloudflare Cache Directive](https://developers.cloudflare.com/cache/about/cache-control#cache-control-directives). Currently we only support `max-age`, but we will be adding more configuration options soon. I.e. 1 hour is `max-age=3600`.|
95-
| `Helicone-Cache-Bucket-Max-Size` (optional) | Configure your Cache Bucket size as a `number`. |
96-
| `Helicone-Cache-Seed` (optional) | Define a separate cache state as a `string` to generate predictable results, i.e. `user-123`.|
99+
| Parameter | Description |
100+
| ------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
101+
| `Helicone-Cache-Enabled` (required) | Set to `true` to enable storing and loading from your cache. |
102+
| `Cache-Control` (optional) | Configure cache limit as a `string` based on the [Cloudflare Cache Directive](https://developers.cloudflare.com/cache/about/cache-control#cache-control-directives). Currently we only support `max-age`, but we will be adding more configuration options soon. I.e. 1 hour is `max-age=3600`. |
103+
| `Helicone-Cache-Bucket-Max-Size` (optional) | Configure your Cache Bucket size as a `number`. |
104+
| `Helicone-Cache-Seed` (optional) | Define a separate cache state as a `string` to generate predictable results, i.e. `user-123`. |
97105

98-
<Info>Header values have to be strings. For example, `"Helicone-Cache-Bucket-Max-Size": "10"`. </Info>
106+
<Info>
107+
Header values have to be strings. For example,
108+
`"Helicone-Cache-Bucket-Max-Size": "10"`.{" "}
109+
</Info>
99110

100111
### Changing Cache Limit
101-
The default cache limit is 7 days. To change the limit, add the `Cache-Control` header to your request.
112+
113+
The default cache limit is 7 days. To change the limit, add the `Cache-Control` header to your request.
102114

103115
**Example**: Setting the cache limit to 30 days, aka `2592000 seconds`
104116

105117
```tsx
106118
"Cache-Control": "max-age=2592000"
107119
```
108-
<Note>The max cache limit is 365 days, or `max-age=31536000`. </Note>
109-
110120

121+
<Note>The max cache limit is 365 days, or `max-age=31536000`. </Note>
111122

112123
### Configuring Bucket Size
113124

@@ -116,14 +127,14 @@ Simply add `Helicone-Cache-Bucket-Max-Size` with some number to choose how large
116127
<CodeGroup>
117128

118129
```python Python
119-
openai.api_base = "https://oai.hconeai.com/v1"
130+
openai.api_base = "https://oai.helicone.ai/v1"
120131

121132
openai.Completion.create(
122133
model="text-davinci-003",
123134
prompt="Say this is a test",
124135
headers={
125136
"Helicone-Auth": f"Bearer {HELICONE_API_KEY}",
126-
"Helicone-Cache-Enabled": "true", # mandatory
137+
"Helicone-Cache-Enabled": "true", # mandatory
127138
"Helicone-Cache-Bucket-Max-Size": "3", # set cache bucket size to 3
128139
}
129140
)
@@ -133,9 +144,9 @@ headers={
133144
import { Configuration, OpenAIApi } from "openai";
134145
const configuration = new Configuration({
135146
apiKey: process.env.OPENAI_API_KEY,
136-
basePath: "https://oai.hconeai.com/v1",
147+
basePath: "https://oai.helicone.ai/v1",
137148
defaultHeaders: {
138-
"Helicone-Cache-Enabled": "true", // mandatory
149+
"Helicone-Cache-Enabled": "true", // mandatory
139150
"Helicone-Cache-Bucket-Max-Size": "3", // set cache bucket size to 3
140151
},
141152
});
@@ -146,7 +157,6 @@ const openai = new OpenAIApi(configuration);
146157

147158
**Example**: A bucket size of 3
148159

149-
150160
```
151161
openai.completion("give me a random number") -> "42"
152162
# Cache Miss
@@ -158,11 +168,16 @@ openai.completion("give me a random number") -> "17"
158168
openai.completion("give me a random number") -> This will randomly choose 42 | 47 | 17
159169
# Cache Hit
160170
```
161-
<Note>The max number of caches you can store is `20` within a bucket, if you want more you will need to upgrade to an enterprise [plan](https://www.helicone.ai/pricing).</Note>
171+
172+
<Note>
173+
The max number of caches you can store is `20` within a bucket, if you want
174+
more you will need to upgrade to an enterprise
175+
[plan](https://www.helicone.ai/pricing).
176+
</Note>
162177

163178
### Adding Cache Seed
164-
When you make a request to Helicone with the same seed, you will receive the same cached response for the same query. This feature allows for predictable results, which can be beneficial in scenarios where you want to have a consistent cache across multiple requests.
165179

180+
When you make a request to Helicone with the same seed, you will receive the same cached response for the same query. This feature allows for predictable results, which can be beneficial in scenarios where you want to have a consistent cache across multiple requests.
166181

167182
To set a cache seed, add a header called `Helicone-Cache-Seed` with a string value for the seed.
168183

@@ -190,15 +205,19 @@ openai.completion("give me a random number") -> "42"
190205
# Cache Seed "user-456"
191206
openai.completion("give me a random number") -> "17"
192207
```
193-
<Tip> If you don’t like one of generated response stored in cache, you can update your seed to a different value as a way to clear your cache. </Tip>
208+
209+
<Tip>
210+
{" "}
211+
If you don’t like one of generated response stored in cache, you can update your
212+
seed to a different value as a way to clear your cache.{" "}
213+
</Tip>{" "}
194214

195215
### Extracting Cache Response Headers
196216

197217
When cache is enabled, you can capture the cache status from the headers of the response, such as a `cache hit / miss` and the `cache bucket index` of the response returned.
198218

199-
200219
```ts
201-
helicone-cache: "HIT" | "MISS" // indicates whether the response was cached.
220+
helicone-cache: "HIT" | "MISS" // indicates whether the response was cached.
202221
helicone-cache-bucket-idx: number // indicates the cache bucket index used.
203222
```
204223

@@ -207,7 +226,7 @@ helicone-cache-bucket-idx: number // indicates the cache bucket index used.
207226
```python
208227
client = OpenAI(
209228
api_key="<OPENAI_API_KEY>",
210-
base_url="https://oai.hconeai.com/v1",
229+
base_url="https://oai.helicone.ai/v1",
211230
default_headers={
212231
"Helicone-Auth": f"Bearer <API_KEY>",
213232
}

0 commit comments

Comments
 (0)