Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
445 changes: 261 additions & 184 deletions README.md

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion core/go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ require github.com/joho/godotenv v1.5.1
require (
github.com/aws/aws-sdk-go-v2 v1.36.3
github.com/aws/aws-sdk-go-v2/config v1.29.14
github.com/goccy/go-json v0.10.5
github.com/maximhq/bifrost/plugins v1.0.0
github.com/valyala/fasthttp v1.60.0
)
Expand All @@ -24,7 +25,6 @@ require (
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 // indirect
github.com/aws/smithy-go v1.22.3 // indirect
github.com/goccy/go-json v0.10.5 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/maximhq/maxim-go v0.1.1 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
Expand Down
5 changes: 3 additions & 2 deletions core/tests/account.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"os"
"time"

bifrost "github.com/maximhq/bifrost/core"
schemas "github.com/maximhq/bifrost/core/schemas"
"github.com/maximhq/bifrost/core/schemas/meta"
)
Expand Down Expand Up @@ -156,7 +157,7 @@ func (baseAccount *BaseAccount) GetConfigForProvider(providerKey schemas.ModelPr
},
MetaConfig: &meta.BedrockMetaConfig{
SecretAccessKey: os.Getenv("BEDROCK_ACCESS_KEY"),
Region: StrPtr("us-east-1"),
Region: bifrost.Ptr("us-east-1"),
},
ConcurrencyAndBufferSize: schemas.ConcurrencyAndBufferSize{
Concurrency: 3,
Expand Down Expand Up @@ -189,7 +190,7 @@ func (baseAccount *BaseAccount) GetConfigForProvider(providerKey schemas.ModelPr
Deployments: map[string]string{
"gpt-4o": "gpt-4o-aug",
},
APIVersion: StrPtr("2024-08-01-preview"),
APIVersion: bifrost.Ptr("2024-08-01-preview"),
},
ConcurrencyAndBufferSize: schemas.ConcurrencyAndBufferSize{
Concurrency: 3,
Expand Down
4 changes: 0 additions & 4 deletions core/tests/setup.go
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,3 @@ func getBifrost() (*bifrost.Bifrost, error) {

return b, nil
}

func StrPtr(s string) *string {
return &s
}
30 changes: 15 additions & 15 deletions core/tests/tests.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ var WeatherToolParams = schemas.ModelParameters{
// - bifrost: The Bifrost instance to use for the request
// - config: Test configuration containing model and parameters
// - ctx: Context for the request
func setupTextCompletionRequest(bifrost *bifrost.Bifrost, config TestConfig, ctx context.Context) {
func setupTextCompletionRequest(bifrostClient *bifrost.Bifrost, config TestConfig, ctx context.Context) {
text := "Hello world!"
if config.CustomTextCompletion != nil {
text = *config.CustomTextCompletion
Expand All @@ -94,7 +94,7 @@ func setupTextCompletionRequest(bifrost *bifrost.Bifrost, config TestConfig, ctx
}

go func() {
result, err := bifrost.TextCompletionRequest(config.Provider, &schemas.BifrostRequest{
result, err := bifrostClient.TextCompletionRequest(config.Provider, &schemas.BifrostRequest{
Model: config.TextModel,
Input: schemas.RequestInput{
TextCompletionInput: &text,
Expand All @@ -117,7 +117,7 @@ func setupTextCompletionRequest(bifrost *bifrost.Bifrost, config TestConfig, ctx
// - bifrost: The Bifrost instance to use for the requests
// - config: Test configuration containing model and parameters
// - ctx: Context for the requests
func setupChatCompletionRequests(bifrost *bifrost.Bifrost, config TestConfig, ctx context.Context) {
func setupChatCompletionRequests(bifrostClient *bifrost.Bifrost, config TestConfig, ctx context.Context) {
messages := config.Messages
if len(messages) == 0 {
messages = CommonTestMessages
Expand All @@ -138,7 +138,7 @@ func setupChatCompletionRequests(bifrost *bifrost.Bifrost, config TestConfig, ct
Content: &msg,
},
}
result, err := bifrost.ChatCompletionRequest(config.Provider, &schemas.BifrostRequest{
result, err := bifrostClient.ChatCompletionRequest(config.Provider, &schemas.BifrostRequest{
Model: config.ChatModel,
Input: schemas.RequestInput{
ChatCompletionInput: &messages,
Expand All @@ -162,7 +162,7 @@ func setupChatCompletionRequests(bifrost *bifrost.Bifrost, config TestConfig, ct
// - bifrost: The Bifrost instance to use for the requests
// - config: Test configuration containing model and parameters
// - ctx: Context for the requests
func setupImageTests(bifrost *bifrost.Bifrost, config TestConfig, ctx context.Context) {
func setupImageTests(bifrostClient *bifrost.Bifrost, config TestConfig, ctx context.Context) {
params := schemas.ModelParameters{}
if config.CustomParams != nil {
params = *config.CustomParams
Expand All @@ -172,20 +172,20 @@ func setupImageTests(bifrost *bifrost.Bifrost, config TestConfig, ctx context.Co
urlImageMessages := []schemas.Message{
{
Role: schemas.RoleUser,
Content: StrPtr("What is Happening in this picture?"),
Content: bifrost.Ptr("What is Happening in this picture?"),
ImageContent: &schemas.ImageContent{
Type: StrPtr("url"),
Type: bifrost.Ptr("url"),
URL: "https://upload.wikimedia.org/wikipedia/commons/a/a7/Camponotus_flavomarginatus_ant.jpg",
},
},
}

if config.Provider == schemas.Anthropic {
urlImageMessages[0].ImageContent.Type = StrPtr("url")
urlImageMessages[0].ImageContent.Type = bifrost.Ptr("url")
}

go func() {
result, err := bifrost.ChatCompletionRequest(config.Provider, &schemas.BifrostRequest{
result, err := bifrostClient.ChatCompletionRequest(config.Provider, &schemas.BifrostRequest{
Model: config.ChatModel,
Input: schemas.RequestInput{
ChatCompletionInput: &urlImageMessages,
Expand All @@ -205,17 +205,17 @@ func setupImageTests(bifrost *bifrost.Bifrost, config TestConfig, ctx context.Co
base64ImageMessages := []schemas.Message{
{
Role: schemas.RoleUser,
Content: StrPtr("What is this image about?"),
Content: bifrost.Ptr("What is this image about?"),
ImageContent: &schemas.ImageContent{
Type: StrPtr("base64"),
Type: bifrost.Ptr("base64"),
URL: "/9j/4AAQSkZJRgABAQEAYABgAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAAIAAoDASIAAhEBAxEB/8QAFQABAQAAAAAAAAAAAAAAAAAAAAb/xAAUEAEAAAAAAAAAAAAAAAAAAAAA/8QAFQEBAQAAAAAAAAAAAAAAAAAAAAX/xAAUEQEAAAAAAAAAAAAAAAAAAAAA/9oADAMBAAIRAxEAPwCdABmX/9k=",
MediaType: StrPtr("image/jpeg"),
MediaType: bifrost.Ptr("image/jpeg"),
},
},
}

go func() {
result, err := bifrost.ChatCompletionRequest(config.Provider, &schemas.BifrostRequest{
result, err := bifrostClient.ChatCompletionRequest(config.Provider, &schemas.BifrostRequest{
Model: config.ChatModel,
Input: schemas.RequestInput{
ChatCompletionInput: &base64ImageMessages,
Expand All @@ -239,7 +239,7 @@ func setupImageTests(bifrost *bifrost.Bifrost, config TestConfig, ctx context.Co
// - bifrost: The Bifrost instance to use for the requests
// - config: Test configuration containing model and parameters
// - ctx: Context for the requests
func setupToolCalls(bifrost *bifrost.Bifrost, config TestConfig, ctx context.Context) {
func setupToolCalls(bifrostClient *bifrost.Bifrost, config TestConfig, ctx context.Context) {
messages := []string{"What's the weather like in Mumbai?"}

params := WeatherToolParams
Expand All @@ -263,7 +263,7 @@ func setupToolCalls(bifrost *bifrost.Bifrost, config TestConfig, ctx context.Con
Content: &msg,
},
}
result, err := bifrost.ChatCompletionRequest(config.Provider, &schemas.BifrostRequest{
result, err := bifrostClient.ChatCompletionRequest(config.Provider, &schemas.BifrostRequest{
Model: config.ChatModel,
Input: schemas.RequestInput{
ChatCompletionInput: &messages,
Expand Down
5 changes: 5 additions & 0 deletions core/utils.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
package bifrost

func Ptr[T any](v T) *T {
return &v
}
2 changes: 1 addition & 1 deletion transports/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ ARG TRANSPORT_TYPE=http

# Initialize Go module and fetch the bifrost transport package
RUN go mod init bifrost-transports && \
go get github.com/maximhq/bifrost/transports/${TRANSPORT_TYPE}@latest
go get github.com/maximhq/bifrost/transports/bifrost-${TRANSPORT_TYPE}@latest

# Build the binary from the fetched package with static linking
RUN go build -ldflags="-w -s" -o /app/main github.com/maximhq/bifrost/transports/${TRANSPORT_TYPE} && \
Expand Down
44 changes: 30 additions & 14 deletions transports/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ This package contains clients for various transports that can be used to spin up
## 🚀 Setting Up Transports

### Prerequisites

- Go 1.23 or higher (if not using Docker)
- Access to at least one AI model provider (OpenAI, Anthropic, etc.)
- API keys for the providers you wish to use
Expand All @@ -31,16 +32,17 @@ This package contains clients for various transports that can be used to spin up
Bifrost uses a combination of a JSON configuration file and environment variables:

1. **JSON Configuration File**: Bifrost requires a configuration file to set up the gateway. This includes all your provider-level settings, keys, and meta configs for each of your providers.

2. **Environment Variables**: If you don't want to include your keys in your config file, you can provide a `.env` file and add a prefix of `env.` followed by its key in your `.env` file.
2. **Environment Variables**: If you don't want to include your keys in your config file, you can provide a `.env` file and add a prefix of `env.` followed by its key in your `.env` file.

```json
{
"keys": [{
"value": "env.OPENAI_API_KEY",
"models": ["gpt-4o-mini", "gpt-4-turbo"],
"weight": 1.0
}]
"keys": [
{
"value": "env.OPENAI_API_KEY",
"models": ["gpt-4o-mini", "gpt-4-turbo"],
"weight": 1.0
}
]
}
```

Expand All @@ -63,7 +65,13 @@ Please refer to `config.example.json` and `.env.sample` for examples.

### Docker Setup

You can run Bifrost using our **independent Dockerfile**. Just copy our Dockerfile and run these commands to get your Bifrost instance up and running:
1. Download the Dockerfile:

```bash
curl -L -o Dockerfile https://raw.githubusercontent.com/maximhq/bifrost/main/transports/Dockerfile
```

2. Build the Docker image:

```bash
docker build \
Expand All @@ -72,7 +80,11 @@ docker build \
--build-arg PORT=8080 \
--build-arg POOL_SIZE=300 \
-t bifrost-transports .
```

3. Run the Docker container:

```bash
docker run -p 8080:8080 bifrost-transports
```

Expand All @@ -87,37 +99,41 @@ If you wish to run Bifrost in your Go environment, follow these steps:
1. Install your binary:

```bash
go install github.com/maximhq/bifrost/transports/http@latest
go install github.com/maximhq/bifrost/transports/bifrost-http@latest
```

2. Run your binary:

- If it's in your PATH:

```bash
http -config config.json -env .env -port 8080 -pool-size 300
bifrost-http -config config.json -env .env -port 8080 -pool-size 300
```

- Otherwise:

```bash
./http -config config.json -env .env -port 8080 -pool-size 300
./bifrost-http -config config.json -env .env -port 8080 -pool-size 300
```

You can also add a flag for `-drop-excess-requests=false` in your command to drop excess requests when the buffer is full. Read more about `DROP_EXCESS_REQUESTS` and `POOL_SIZE` [here](https://github.com/maximhq/bifrost/tree/main?tab=README-ov-file#additional-configurations).

## 🧰 Usage

Ensure that:

- Bifrost's HTTP server is running
- The providers/models you use are configured in your JSON config file

### Text Completions

```bash
# Make sure to setup anthropic and claude-2.1 in your config.json
curl -X POST http://localhost:8080/v1/text/completions \
-H "Content-Type: application/json" \
-d '{
"provider": "openai",
"model": "gpt-4o-mini",
"provider": "anthropic",
"model": "claude-2.1",
"text": "Once upon a time in the land of AI,",
"params": {
"temperature": 0.7,
Expand Down Expand Up @@ -175,4 +191,4 @@ Read more about fallbacks and other additional configurations [here](https://git

---

Built with ❤️ by [Maxim](https://github.com/maximhq)
Built with ❤️ by [Maxim](https://github.com/maximhq)
File renamed without changes.