Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 8 additions & 10 deletions docs/my-website/docs/interactions.md
Original file line number Diff line number Diff line change
Expand Up @@ -130,13 +130,12 @@ Point the Google GenAI SDK to LiteLLM Proxy:

```python showLineNumbers title="Google GenAI SDK with LiteLLM Proxy"
from google import genai
import os

# Point SDK to LiteLLM Proxy
os.environ["GOOGLE_GENAI_BASE_URL"] = "http://localhost:4000"
os.environ["GEMINI_API_KEY"] = "sk-1234" # Your LiteLLM API key

client = genai.Client()
client = genai.Client(
api_key="sk-1234", # Your LiteLLM API key
http_options={"base_url": "http://localhost:4000"},
)

# Create an interaction
interaction = client.interactions.create(
Expand All @@ -151,12 +150,11 @@ print(interaction.outputs[-1].text)

```python showLineNumbers title="Google GenAI SDK Streaming"
from google import genai
import os

os.environ["GOOGLE_GENAI_BASE_URL"] = "http://localhost:4000"
os.environ["GEMINI_API_KEY"] = "sk-1234"

client = genai.Client()
client = genai.Client(
api_key="sk-1234", # Your LiteLLM API key
http_options={"base_url": "http://localhost:4000"},
)

for chunk in client.interactions.create_stream(
model="gemini/gemini-2.5-flash",
Expand Down
75 changes: 37 additions & 38 deletions docs/my-website/docs/pass_through/google_ai_studio.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,26 +35,25 @@ curl 'http://0.0.0.0:4000/gemini/v1beta/models/gemini-1.5-flash:countTokens?key=
```

</TabItem>
<TabItem value="js" label="Google AI Node.js SDK">
<TabItem value="js" label="Google GenAI JS SDK">

```javascript
const { GoogleGenerativeAI } = require("@google/generative-ai");

const modelParams = {
model: 'gemini-pro',
};

const requestOptions = {
baseUrl: 'http://localhost:4000/gemini', // http://<proxy-base-url>/gemini
};

const genAI = new GoogleGenerativeAI("sk-1234"); // litellm proxy API key
const model = genAI.getGenerativeModel(modelParams, requestOptions);
const { GoogleGenAI } = require("@google/genai");

const ai = new GoogleGenAI({
apiKey: "sk-1234", // litellm proxy API key
httpOptions: {
baseUrl: "http://localhost:4000/gemini", // http://<proxy-base-url>/gemini
},
});

async function main() {
try {
const result = await model.generateContent("Explain how AI works");
console.log(result.response.text());
const response = await ai.models.generateContent({
model: "gemini-2.5-flash",
contents: "Explain how AI works",
});
console.log(response.text);
} catch (error) {
console.error('Error:', error);
}
Expand All @@ -63,12 +62,13 @@ async function main() {
// For streaming responses
async function main_streaming() {
try {
const streamingResult = await model.generateContentStream("Explain how AI works");
for await (const chunk of streamingResult.stream) {
console.log('Stream chunk:', JSON.stringify(chunk));
const response = await ai.models.generateContentStream({
model: "gemini-2.5-flash",
contents: "Explain how AI works",
});
for await (const chunk of response) {
process.stdout.write(chunk.text);
}
const aggregatedResponse = await streamingResult.response;
console.log('Aggregated response:', JSON.stringify(aggregatedResponse));
} catch (error) {
console.error('Error:', error);
}
Expand Down Expand Up @@ -321,29 +321,28 @@ curl 'http://0.0.0.0:4000/gemini/v1beta/models/gemini-1.5-flash:generateContent?
```

</TabItem>
<TabItem value="js" label="Google AI Node.js SDK">
<TabItem value="js" label="Google GenAI JS SDK">

```javascript
const { GoogleGenerativeAI } = require("@google/generative-ai");

const modelParams = {
model: 'gemini-pro',
};

const requestOptions = {
baseUrl: 'http://localhost:4000/gemini', // http://<proxy-base-url>/gemini
customHeaders: {
"tags": "gemini-js-sdk,pass-through-endpoint"
}
};

const genAI = new GoogleGenerativeAI("sk-1234");
const model = genAI.getGenerativeModel(modelParams, requestOptions);
const { GoogleGenAI } = require("@google/genai");

const ai = new GoogleGenAI({
apiKey: "sk-1234",
httpOptions: {
baseUrl: "http://localhost:4000/gemini", // http://<proxy-base-url>/gemini
headers: {
"tags": "gemini-js-sdk,pass-through-endpoint",
},
},
});

async function main() {
try {
const result = await model.generateContent("Explain how AI works");
console.log(result.response.text());
const response = await ai.models.generateContent({
model: "gemini-2.5-flash",
contents: "Explain how AI works",
});
console.log(response.text);
} catch (error) {
console.error('Error:', error);
}
Expand Down
Loading
Loading