diff --git a/transports/bifrost-http/integrations/anthropic/router.go b/transports/bifrost-http/integrations/anthropic/router.go new file mode 100644 index 0000000000..2fd374e02c --- /dev/null +++ b/transports/bifrost-http/integrations/anthropic/router.go @@ -0,0 +1,49 @@ +package anthropic + +import ( + "encoding/json" + + "github.com/fasthttp/router" + bifrost "github.com/maximhq/bifrost/core" + "github.com/maximhq/bifrost/transports/bifrost-http/lib" + "github.com/valyala/fasthttp" +) + +// AnthropicRouter holds route registrations for anthropic endpoints. +type AnthropicRouter struct { + client *bifrost.Bifrost +} + +// NewAnthropicRouter creates a new AnthropicRouter with the given bifrost client. +func NewAnthropicRouter(client *bifrost.Bifrost) *AnthropicRouter { + return &AnthropicRouter{client: client} +} + +// RegisterRoutes registers all anthropic routes on the given router. +func (a *AnthropicRouter) RegisterRoutes(r *router.Router) { + r.POST("/anthropic/v1/messages", a.handleChatCompletion) +} + +// handleChatCompletion handles POST /anthropic/v1/messages +func (a *AnthropicRouter) handleChatCompletion(ctx *fasthttp.RequestCtx) { + var req ChatCompletionRequest + if err := json.Unmarshal(ctx.PostBody(), &req); err != nil { + ctx.SetStatusCode(fasthttp.StatusBadRequest) + json.NewEncoder(ctx).Encode(err) + return + } + + bifrostReq := req.ConvertToBifrostRequest("") + bifrostCtx := lib.ConvertToBifrostContext(ctx) + + result, err := a.client.ChatCompletionRequest(*bifrostCtx, bifrostReq) + if err != nil { + ctx.SetStatusCode(fasthttp.StatusInternalServerError) + json.NewEncoder(ctx).Encode(err) + return + } + + ctx.SetStatusCode(fasthttp.StatusOK) + ctx.SetContentType("application/json") + json.NewEncoder(ctx).Encode(result) +} diff --git a/transports/bifrost-http/integrations/anthropic/types.go b/transports/bifrost-http/integrations/anthropic/types.go new file mode 100644 index 0000000000..82f15d9db1 --- /dev/null +++ b/transports/bifrost-http/integrations/anthropic/types.go @@ -0,0 +1,48 @@ +package anthropic + +import schemas "github.com/maximhq/bifrost/core/schemas" + +// ChatCompletionRequest represents the Anthropic messages API request. +// Only a subset of parameters are supported. +type ChatCompletionRequest struct { + Model string `json:"model"` + Messages []schemas.BifrostMessage `json:"messages"` + Temperature *float64 `json:"temperature,omitempty"` + TopP *float64 `json:"top_p,omitempty"` + TopK *int `json:"top_k,omitempty"` + MaxTokens *int `json:"max_tokens,omitempty"` + StopSequences *[]string `json:"stop_sequences,omitempty"` + PresencePenalty *float64 `json:"presence_penalty,omitempty"` + FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"` + ParallelToolCalls *bool `json:"parallel_tool_calls,omitempty"` + Tools *[]schemas.Tool `json:"tools,omitempty"` + ToolChoice *schemas.ToolChoice `json:"tool_choice,omitempty"` +} + +// ConvertToBifrostRequest converts the request to a BifrostRequest. +func (r *ChatCompletionRequest) ConvertToBifrostRequest(model string) *schemas.BifrostRequest { + if model == "" { + model = r.Model + } + params := &schemas.ModelParameters{ + Temperature: r.Temperature, + TopP: r.TopP, + TopK: r.TopK, + MaxTokens: r.MaxTokens, + StopSequences: r.StopSequences, + PresencePenalty: r.PresencePenalty, + FrequencyPenalty: r.FrequencyPenalty, + ParallelToolCalls: r.ParallelToolCalls, + Tools: r.Tools, + ToolChoice: r.ToolChoice, + } + + return &schemas.BifrostRequest{ + Provider: schemas.Anthropic, + Model: model, + Input: schemas.RequestInput{ + ChatCompletionInput: &r.Messages, + }, + Params: params, + } +} diff --git a/transports/bifrost-http/integrations/anthropic/types_test.go b/transports/bifrost-http/integrations/anthropic/types_test.go new file mode 100644 index 0000000000..a296d079d3 --- /dev/null +++ b/transports/bifrost-http/integrations/anthropic/types_test.go @@ -0,0 +1,35 @@ +package anthropic + +import ( + "testing" + + bifrost "github.com/maximhq/bifrost/core" + schemas "github.com/maximhq/bifrost/core/schemas" +) + +func TestConvertToBifrostRequest(t *testing.T) { + temp := 0.5 + req := ChatCompletionRequest{ + Model: "claude-test", + Messages: []schemas.BifrostMessage{ + {Role: schemas.ModelChatMessageRoleUser, Content: bifrost.Ptr("hi")}, + }, + Temperature: &temp, + } + + bfReq := req.ConvertToBifrostRequest("") + + if bfReq.Provider != schemas.Anthropic { + t.Errorf("expected provider %s, got %s", schemas.Anthropic, bfReq.Provider) + } + if bfReq.Model != "claude-test" { + t.Errorf("expected model claude-test, got %s", bfReq.Model) + } + if bfReq.Params == nil || bfReq.Params.Temperature == nil || *bfReq.Params.Temperature != temp { + t.Errorf("temperature not copied") + } + if bfReq.Input.ChatCompletionInput == nil || len(*bfReq.Input.ChatCompletionInput) != 1 { + t.Fatalf("expected 1 message, got %v", bfReq.Input.ChatCompletionInput) + } +} + diff --git a/transports/bifrost-http/integrations/langchain/router.go b/transports/bifrost-http/integrations/langchain/router.go new file mode 100644 index 0000000000..82ab182bdc --- /dev/null +++ b/transports/bifrost-http/integrations/langchain/router.go @@ -0,0 +1,49 @@ +package langchain + +import ( + "encoding/json" + + "github.com/fasthttp/router" + bifrost "github.com/maximhq/bifrost/core" + "github.com/maximhq/bifrost/transports/bifrost-http/lib" + "github.com/valyala/fasthttp" +) + +// LangChainRouter holds route registrations for langchain endpoints. +type LangChainRouter struct { + client *bifrost.Bifrost +} + +// NewLangChainRouter creates a new LangChainRouter with the given bifrost client. +func NewLangChainRouter(client *bifrost.Bifrost) *LangChainRouter { + return &LangChainRouter{client: client} +} + +// RegisterRoutes registers all langchain routes on the given router. +func (l *LangChainRouter) RegisterRoutes(r *router.Router) { + r.POST("/langchain/v1/chat/completions", l.handleChatCompletion) +} + +// handleChatCompletion handles POST /langchain/v1/chat/completions +func (l *LangChainRouter) handleChatCompletion(ctx *fasthttp.RequestCtx) { + var req ChatCompletionRequest + if err := json.Unmarshal(ctx.PostBody(), &req); err != nil { + ctx.SetStatusCode(fasthttp.StatusBadRequest) + json.NewEncoder(ctx).Encode(err) + return + } + + bifrostReq := req.ConvertToBifrostRequest("") + bifrostCtx := lib.ConvertToBifrostContext(ctx) + + result, err := l.client.ChatCompletionRequest(*bifrostCtx, bifrostReq) + if err != nil { + ctx.SetStatusCode(fasthttp.StatusInternalServerError) + json.NewEncoder(ctx).Encode(err) + return + } + + ctx.SetStatusCode(fasthttp.StatusOK) + ctx.SetContentType("application/json") + json.NewEncoder(ctx).Encode(result) +} diff --git a/transports/bifrost-http/integrations/langchain/types.go b/transports/bifrost-http/integrations/langchain/types.go new file mode 100644 index 0000000000..8a88956f7a --- /dev/null +++ b/transports/bifrost-http/integrations/langchain/types.go @@ -0,0 +1,48 @@ +package langchain + +import schemas "github.com/maximhq/bifrost/core/schemas" + +// ChatCompletionRequest represents a LangChain API request. LangChain +// often proxies OpenAI style payloads, so the field set is similar. +type ChatCompletionRequest struct { + Model string `json:"model"` + Messages []schemas.BifrostMessage `json:"messages"` + Temperature *float64 `json:"temperature,omitempty"` + TopP *float64 `json:"top_p,omitempty"` + TopK *int `json:"top_k,omitempty"` + MaxTokens *int `json:"max_tokens,omitempty"` + StopSequences *[]string `json:"stop_sequences,omitempty"` + PresencePenalty *float64 `json:"presence_penalty,omitempty"` + FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"` + ParallelToolCalls *bool `json:"parallel_tool_calls,omitempty"` + Tools *[]schemas.Tool `json:"tools,omitempty"` + ToolChoice *schemas.ToolChoice `json:"tool_choice,omitempty"` +} + +// ConvertToBifrostRequest converts the request to a BifrostRequest. +func (r *ChatCompletionRequest) ConvertToBifrostRequest(model string) *schemas.BifrostRequest { + if model == "" { + model = r.Model + } + params := &schemas.ModelParameters{ + Temperature: r.Temperature, + TopP: r.TopP, + TopK: r.TopK, + MaxTokens: r.MaxTokens, + StopSequences: r.StopSequences, + PresencePenalty: r.PresencePenalty, + FrequencyPenalty: r.FrequencyPenalty, + ParallelToolCalls: r.ParallelToolCalls, + Tools: r.Tools, + ToolChoice: r.ToolChoice, + } + + return &schemas.BifrostRequest{ + Provider: schemas.OpenAI, + Model: model, + Input: schemas.RequestInput{ + ChatCompletionInput: &r.Messages, + }, + Params: params, + } +} diff --git a/transports/bifrost-http/integrations/langchain/types_test.go b/transports/bifrost-http/integrations/langchain/types_test.go new file mode 100644 index 0000000000..ca076a7b1b --- /dev/null +++ b/transports/bifrost-http/integrations/langchain/types_test.go @@ -0,0 +1,35 @@ +package langchain + +import ( + "testing" + + bifrost "github.com/maximhq/bifrost/core" + schemas "github.com/maximhq/bifrost/core/schemas" +) + +func TestConvertToBifrostRequest(t *testing.T) { + temp := 0.5 + req := ChatCompletionRequest{ + Model: "gpt-test", + Messages: []schemas.BifrostMessage{ + {Role: schemas.ModelChatMessageRoleUser, Content: bifrost.Ptr("hi")}, + }, + Temperature: &temp, + } + + bfReq := req.ConvertToBifrostRequest("override") + + if bfReq.Provider != schemas.OpenAI { + t.Errorf("expected provider %s, got %s", schemas.OpenAI, bfReq.Provider) + } + if bfReq.Model != "override" { + t.Errorf("expected model override, got %s", bfReq.Model) + } + if bfReq.Params == nil || bfReq.Params.Temperature == nil || *bfReq.Params.Temperature != temp { + t.Errorf("temperature not copied") + } + if bfReq.Input.ChatCompletionInput == nil || len(*bfReq.Input.ChatCompletionInput) != 1 { + t.Fatalf("expected 1 message, got %v", bfReq.Input.ChatCompletionInput) + } +} + diff --git a/transports/bifrost-http/integrations/langgraph/router.go b/transports/bifrost-http/integrations/langgraph/router.go new file mode 100644 index 0000000000..b4b4c230a1 --- /dev/null +++ b/transports/bifrost-http/integrations/langgraph/router.go @@ -0,0 +1,49 @@ +package langgraph + +import ( + "encoding/json" + + "github.com/fasthttp/router" + bifrost "github.com/maximhq/bifrost/core" + "github.com/maximhq/bifrost/transports/bifrost-http/lib" + "github.com/valyala/fasthttp" +) + +// LangGraphRouter holds route registrations for langgraph endpoints. +type LangGraphRouter struct { + client *bifrost.Bifrost +} + +// NewLangGraphRouter creates a new LangGraphRouter with the given bifrost client. +func NewLangGraphRouter(client *bifrost.Bifrost) *LangGraphRouter { + return &LangGraphRouter{client: client} +} + +// RegisterRoutes registers all langgraph routes on the given router. +func (l *LangGraphRouter) RegisterRoutes(r *router.Router) { + r.POST("/langgraph/v1/chat/completions", l.handleChatCompletion) +} + +// handleChatCompletion handles POST /langgraph/v1/chat/completions +func (l *LangGraphRouter) handleChatCompletion(ctx *fasthttp.RequestCtx) { + var req ChatCompletionRequest + if err := json.Unmarshal(ctx.PostBody(), &req); err != nil { + ctx.SetStatusCode(fasthttp.StatusBadRequest) + json.NewEncoder(ctx).Encode(err) + return + } + + bifrostReq := req.ConvertToBifrostRequest("") + bifrostCtx := lib.ConvertToBifrostContext(ctx) + + result, err := l.client.ChatCompletionRequest(*bifrostCtx, bifrostReq) + if err != nil { + ctx.SetStatusCode(fasthttp.StatusInternalServerError) + json.NewEncoder(ctx).Encode(err) + return + } + + ctx.SetStatusCode(fasthttp.StatusOK) + ctx.SetContentType("application/json") + json.NewEncoder(ctx).Encode(result) +} diff --git a/transports/bifrost-http/integrations/langgraph/types.go b/transports/bifrost-http/integrations/langgraph/types.go new file mode 100644 index 0000000000..c23bcae66c --- /dev/null +++ b/transports/bifrost-http/integrations/langgraph/types.go @@ -0,0 +1,48 @@ +package langgraph + +import schemas "github.com/maximhq/bifrost/core/schemas" + +// ChatCompletionRequest describes the payload expected by LangGraph's HTTP +// interface, which is largely compatible with OpenAI's API. +type ChatCompletionRequest struct { + Model string `json:"model"` + Messages []schemas.BifrostMessage `json:"messages"` + Temperature *float64 `json:"temperature,omitempty"` + TopP *float64 `json:"top_p,omitempty"` + TopK *int `json:"top_k,omitempty"` + MaxTokens *int `json:"max_tokens,omitempty"` + StopSequences *[]string `json:"stop_sequences,omitempty"` + PresencePenalty *float64 `json:"presence_penalty,omitempty"` + FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"` + ParallelToolCalls *bool `json:"parallel_tool_calls,omitempty"` + Tools *[]schemas.Tool `json:"tools,omitempty"` + ToolChoice *schemas.ToolChoice `json:"tool_choice,omitempty"` +} + +// ConvertToBifrostRequest converts the request to a BifrostRequest. +func (r *ChatCompletionRequest) ConvertToBifrostRequest(model string) *schemas.BifrostRequest { + if model == "" { + model = r.Model + } + params := &schemas.ModelParameters{ + Temperature: r.Temperature, + TopP: r.TopP, + TopK: r.TopK, + MaxTokens: r.MaxTokens, + StopSequences: r.StopSequences, + PresencePenalty: r.PresencePenalty, + FrequencyPenalty: r.FrequencyPenalty, + ParallelToolCalls: r.ParallelToolCalls, + Tools: r.Tools, + ToolChoice: r.ToolChoice, + } + + return &schemas.BifrostRequest{ + Provider: schemas.OpenAI, + Model: model, + Input: schemas.RequestInput{ + ChatCompletionInput: &r.Messages, + }, + Params: params, + } +} diff --git a/transports/bifrost-http/integrations/langgraph/types_test.go b/transports/bifrost-http/integrations/langgraph/types_test.go new file mode 100644 index 0000000000..4b6188bf45 --- /dev/null +++ b/transports/bifrost-http/integrations/langgraph/types_test.go @@ -0,0 +1,35 @@ +package langgraph + +import ( + "testing" + + bifrost "github.com/maximhq/bifrost/core" + schemas "github.com/maximhq/bifrost/core/schemas" +) + +func TestConvertToBifrostRequest(t *testing.T) { + temp := 0.5 + req := ChatCompletionRequest{ + Model: "gpt-test", + Messages: []schemas.BifrostMessage{ + {Role: schemas.ModelChatMessageRoleUser, Content: bifrost.Ptr("hi")}, + }, + Temperature: &temp, + } + + bfReq := req.ConvertToBifrostRequest("") + + if bfReq.Provider != schemas.OpenAI { + t.Errorf("expected provider %s, got %s", schemas.OpenAI, bfReq.Provider) + } + if bfReq.Model != "gpt-test" { + t.Errorf("expected model gpt-test, got %s", bfReq.Model) + } + if bfReq.Params == nil || bfReq.Params.Temperature == nil || *bfReq.Params.Temperature != temp { + t.Errorf("temperature not copied") + } + if bfReq.Input.ChatCompletionInput == nil || len(*bfReq.Input.ChatCompletionInput) != 1 { + t.Fatalf("expected 1 message, got %v", bfReq.Input.ChatCompletionInput) + } +} + diff --git a/transports/bifrost-http/integrations/litellm/router.go b/transports/bifrost-http/integrations/litellm/router.go new file mode 100644 index 0000000000..929fb137e2 --- /dev/null +++ b/transports/bifrost-http/integrations/litellm/router.go @@ -0,0 +1,49 @@ +package litellm + +import ( + "encoding/json" + + "github.com/fasthttp/router" + bifrost "github.com/maximhq/bifrost/core" + "github.com/maximhq/bifrost/transports/bifrost-http/lib" + "github.com/valyala/fasthttp" +) + +// LiteLLMRouter holds route registrations for litellm endpoints. +type LiteLLMRouter struct { + client *bifrost.Bifrost +} + +// NewLiteLLMRouter creates a new LiteLLMRouter with the given bifrost client. +func NewLiteLLMRouter(client *bifrost.Bifrost) *LiteLLMRouter { + return &LiteLLMRouter{client: client} +} + +// RegisterRoutes registers all litellm routes on the given router. +func (l *LiteLLMRouter) RegisterRoutes(r *router.Router) { + r.POST("/litellm/v1/chat/completions", l.handleChatCompletion) +} + +// handleChatCompletion handles POST /litellm/v1/chat/completions +func (l *LiteLLMRouter) handleChatCompletion(ctx *fasthttp.RequestCtx) { + var req ChatCompletionRequest + if err := json.Unmarshal(ctx.PostBody(), &req); err != nil { + ctx.SetStatusCode(fasthttp.StatusBadRequest) + json.NewEncoder(ctx).Encode(err) + return + } + + bifrostReq := req.ConvertToBifrostRequest("") + bifrostCtx := lib.ConvertToBifrostContext(ctx) + + result, err := l.client.ChatCompletionRequest(*bifrostCtx, bifrostReq) + if err != nil { + ctx.SetStatusCode(fasthttp.StatusInternalServerError) + json.NewEncoder(ctx).Encode(err) + return + } + + ctx.SetStatusCode(fasthttp.StatusOK) + ctx.SetContentType("application/json") + json.NewEncoder(ctx).Encode(result) +} diff --git a/transports/bifrost-http/integrations/litellm/types.go b/transports/bifrost-http/integrations/litellm/types.go new file mode 100644 index 0000000000..3f1e57abf8 --- /dev/null +++ b/transports/bifrost-http/integrations/litellm/types.go @@ -0,0 +1,48 @@ +package litellm + +import schemas "github.com/maximhq/bifrost/core/schemas" + +// ChatCompletionRequest represents a LiteLLM style request. The LiteLLM +// server closely mirrors the OpenAI API, so we reuse the same fields. +type ChatCompletionRequest struct { + Model string `json:"model"` + Messages []schemas.BifrostMessage `json:"messages"` + Temperature *float64 `json:"temperature,omitempty"` + TopP *float64 `json:"top_p,omitempty"` + TopK *int `json:"top_k,omitempty"` + MaxTokens *int `json:"max_tokens,omitempty"` + StopSequences *[]string `json:"stop_sequences,omitempty"` + PresencePenalty *float64 `json:"presence_penalty,omitempty"` + FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"` + ParallelToolCalls *bool `json:"parallel_tool_calls,omitempty"` + Tools *[]schemas.Tool `json:"tools,omitempty"` + ToolChoice *schemas.ToolChoice `json:"tool_choice,omitempty"` +} + +// ConvertToBifrostRequest converts the request to a BifrostRequest. +func (r *ChatCompletionRequest) ConvertToBifrostRequest(model string) *schemas.BifrostRequest { + if model == "" { + model = r.Model + } + params := &schemas.ModelParameters{ + Temperature: r.Temperature, + TopP: r.TopP, + TopK: r.TopK, + MaxTokens: r.MaxTokens, + StopSequences: r.StopSequences, + PresencePenalty: r.PresencePenalty, + FrequencyPenalty: r.FrequencyPenalty, + ParallelToolCalls: r.ParallelToolCalls, + Tools: r.Tools, + ToolChoice: r.ToolChoice, + } + + return &schemas.BifrostRequest{ + Provider: schemas.OpenAI, + Model: model, + Input: schemas.RequestInput{ + ChatCompletionInput: &r.Messages, + }, + Params: params, + } +} diff --git a/transports/bifrost-http/integrations/litellm/types_test.go b/transports/bifrost-http/integrations/litellm/types_test.go new file mode 100644 index 0000000000..f27aa2d4f7 --- /dev/null +++ b/transports/bifrost-http/integrations/litellm/types_test.go @@ -0,0 +1,35 @@ +package litellm + +import ( + "testing" + + bifrost "github.com/maximhq/bifrost/core" + schemas "github.com/maximhq/bifrost/core/schemas" +) + +func TestConvertToBifrostRequest(t *testing.T) { + temp := 0.5 + req := ChatCompletionRequest{ + Model: "gpt-test", + Messages: []schemas.BifrostMessage{ + {Role: schemas.ModelChatMessageRoleUser, Content: bifrost.Ptr("hi")}, + }, + Temperature: &temp, + } + + bfReq := req.ConvertToBifrostRequest("") + + if bfReq.Provider != schemas.OpenAI { + t.Errorf("expected provider %s, got %s", schemas.OpenAI, bfReq.Provider) + } + if bfReq.Model != "gpt-test" { + t.Errorf("expected model gpt-test, got %s", bfReq.Model) + } + if bfReq.Params == nil || bfReq.Params.Temperature == nil || *bfReq.Params.Temperature != temp { + t.Errorf("temperature not copied") + } + if bfReq.Input.ChatCompletionInput == nil || len(*bfReq.Input.ChatCompletionInput) != 1 { + t.Fatalf("expected 1 message, got %v", bfReq.Input.ChatCompletionInput) + } +} + diff --git a/transports/bifrost-http/integrations/mistral/router.go b/transports/bifrost-http/integrations/mistral/router.go new file mode 100644 index 0000000000..cb3fd88a1b --- /dev/null +++ b/transports/bifrost-http/integrations/mistral/router.go @@ -0,0 +1,49 @@ +package mistral + +import ( + "encoding/json" + + "github.com/fasthttp/router" + bifrost "github.com/maximhq/bifrost/core" + "github.com/maximhq/bifrost/transports/bifrost-http/lib" + "github.com/valyala/fasthttp" +) + +// MistralRouter holds route registrations for mistral endpoints. +type MistralRouter struct { + client *bifrost.Bifrost +} + +// NewMistralRouter creates a new MistralRouter with the given bifrost client. +func NewMistralRouter(client *bifrost.Bifrost) *MistralRouter { + return &MistralRouter{client: client} +} + +// RegisterRoutes registers all mistral routes on the given router. +func (m *MistralRouter) RegisterRoutes(r *router.Router) { + r.POST("/mistral/v1/chat/completions", m.handleChatCompletion) +} + +// handleChatCompletion handles POST /mistral/v1/chat/completions +func (m *MistralRouter) handleChatCompletion(ctx *fasthttp.RequestCtx) { + var req ChatCompletionRequest + if err := json.Unmarshal(ctx.PostBody(), &req); err != nil { + ctx.SetStatusCode(fasthttp.StatusBadRequest) + json.NewEncoder(ctx).Encode(err) + return + } + + bifrostReq := req.ConvertToBifrostRequest("") + bifrostCtx := lib.ConvertToBifrostContext(ctx) + + result, err := m.client.ChatCompletionRequest(*bifrostCtx, bifrostReq) + if err != nil { + ctx.SetStatusCode(fasthttp.StatusInternalServerError) + json.NewEncoder(ctx).Encode(err) + return + } + + ctx.SetStatusCode(fasthttp.StatusOK) + ctx.SetContentType("application/json") + json.NewEncoder(ctx).Encode(result) +} diff --git a/transports/bifrost-http/integrations/mistral/types.go b/transports/bifrost-http/integrations/mistral/types.go new file mode 100644 index 0000000000..9c11c8bfa3 --- /dev/null +++ b/transports/bifrost-http/integrations/mistral/types.go @@ -0,0 +1,48 @@ +package mistral + +import schemas "github.com/maximhq/bifrost/core/schemas" + +// ChatCompletionRequest represents a request to the Mistral API. +// The structure follows the OpenAI style payload. +type ChatCompletionRequest struct { + Model string `json:"model"` + Messages []schemas.BifrostMessage `json:"messages"` + Temperature *float64 `json:"temperature,omitempty"` + TopP *float64 `json:"top_p,omitempty"` + TopK *int `json:"top_k,omitempty"` + MaxTokens *int `json:"max_tokens,omitempty"` + StopSequences *[]string `json:"stop_sequences,omitempty"` + PresencePenalty *float64 `json:"presence_penalty,omitempty"` + FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"` + ParallelToolCalls *bool `json:"parallel_tool_calls,omitempty"` + Tools *[]schemas.Tool `json:"tools,omitempty"` + ToolChoice *schemas.ToolChoice `json:"tool_choice,omitempty"` +} + +// ConvertToBifrostRequest converts the request to a BifrostRequest. +func (r *ChatCompletionRequest) ConvertToBifrostRequest(model string) *schemas.BifrostRequest { + if model == "" { + model = r.Model + } + params := &schemas.ModelParameters{ + Temperature: r.Temperature, + TopP: r.TopP, + TopK: r.TopK, + MaxTokens: r.MaxTokens, + StopSequences: r.StopSequences, + PresencePenalty: r.PresencePenalty, + FrequencyPenalty: r.FrequencyPenalty, + ParallelToolCalls: r.ParallelToolCalls, + Tools: r.Tools, + ToolChoice: r.ToolChoice, + } + + return &schemas.BifrostRequest{ + Provider: schemas.Mistral, + Model: model, + Input: schemas.RequestInput{ + ChatCompletionInput: &r.Messages, + }, + Params: params, + } +} diff --git a/transports/bifrost-http/integrations/mistral/types_test.go b/transports/bifrost-http/integrations/mistral/types_test.go new file mode 100644 index 0000000000..010d3ed69c --- /dev/null +++ b/transports/bifrost-http/integrations/mistral/types_test.go @@ -0,0 +1,35 @@ +package mistral + +import ( + "testing" + + bifrost "github.com/maximhq/bifrost/core" + schemas "github.com/maximhq/bifrost/core/schemas" +) + +func TestConvertToBifrostRequest(t *testing.T) { + temp := 0.5 + req := ChatCompletionRequest{ + Model: "mistral-test", + Messages: []schemas.BifrostMessage{ + {Role: schemas.ModelChatMessageRoleUser, Content: bifrost.Ptr("hi")}, + }, + Temperature: &temp, + } + + bfReq := req.ConvertToBifrostRequest("override") + + if bfReq.Provider != schemas.Mistral { + t.Errorf("expected provider %s, got %s", schemas.Mistral, bfReq.Provider) + } + if bfReq.Model != "override" { + t.Errorf("expected model override, got %s", bfReq.Model) + } + if bfReq.Params == nil || bfReq.Params.Temperature == nil || *bfReq.Params.Temperature != temp { + t.Errorf("temperature not copied") + } + if bfReq.Input.ChatCompletionInput == nil || len(*bfReq.Input.ChatCompletionInput) != 1 { + t.Fatalf("expected 1 message, got %v", bfReq.Input.ChatCompletionInput) + } +} + diff --git a/transports/bifrost-http/integrations/openai/router.go b/transports/bifrost-http/integrations/openai/router.go new file mode 100644 index 0000000000..24c2ddff26 --- /dev/null +++ b/transports/bifrost-http/integrations/openai/router.go @@ -0,0 +1,50 @@ +package openai + +import ( + "encoding/json" + + "github.com/fasthttp/router" + bifrost "github.com/maximhq/bifrost/core" + "github.com/maximhq/bifrost/transports/bifrost-http/lib" + "github.com/valyala/fasthttp" +) + +// OpenAIRouter holds route registrations for openai endpoints. +type OpenAIRouter struct { + client *bifrost.Bifrost +} + +// NewOpenAIRouter creates a new OpenAIRouter with the given bifrost client. +func NewOpenAIRouter(client *bifrost.Bifrost) *OpenAIRouter { + return &OpenAIRouter{client: client} +} + +// RegisterRoutes registers all openai routes on the given router. +func (o *OpenAIRouter) RegisterRoutes(r *router.Router) { + r.POST("/openai/v1/chat/completions", o.handleChatCompletion) +} + +// handleChatCompletion handles POST /openai/v1/chat/completions +func (o *OpenAIRouter) handleChatCompletion(ctx *fasthttp.RequestCtx) { + var req ChatCompletionRequest + if err := json.Unmarshal(ctx.PostBody(), &req); err != nil { + ctx.SetStatusCode(fasthttp.StatusBadRequest) + json.NewEncoder(ctx).Encode(err) + return + } + + bifrostReq := req.ConvertToBifrostRequest("") + bifrostCtx := lib.ConvertToBifrostContext(ctx) + + result, err := o.client.ChatCompletionRequest(*bifrostCtx, bifrostReq) + if err != nil { + ctx.SetStatusCode(fasthttp.StatusInternalServerError) + json.NewEncoder(ctx).Encode(err) + return + } + + ctx.SetStatusCode(fasthttp.StatusOK) + ctx.SetContentType("application/json") + json.NewEncoder(ctx).Encode(result) + +} diff --git a/transports/bifrost-http/integrations/openai/types.go b/transports/bifrost-http/integrations/openai/types.go new file mode 100644 index 0000000000..dd41c3da33 --- /dev/null +++ b/transports/bifrost-http/integrations/openai/types.go @@ -0,0 +1,49 @@ +package openai + +import schemas "github.com/maximhq/bifrost/core/schemas" + +// ChatCompletionRequest mirrors the basic OpenAI Chat API request. +// Only a subset of fields are implemented as the goal of this +// middleware is simply to forward the request to Bifrost. +type ChatCompletionRequest struct { + Model string `json:"model"` + Messages []schemas.BifrostMessage `json:"messages"` + Temperature *float64 `json:"temperature,omitempty"` + TopP *float64 `json:"top_p,omitempty"` + TopK *int `json:"top_k,omitempty"` + MaxTokens *int `json:"max_tokens,omitempty"` + StopSequences *[]string `json:"stop_sequences,omitempty"` + PresencePenalty *float64 `json:"presence_penalty,omitempty"` + FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"` + ParallelToolCalls *bool `json:"parallel_tool_calls,omitempty"` + Tools *[]schemas.Tool `json:"tools,omitempty"` + ToolChoice *schemas.ToolChoice `json:"tool_choice,omitempty"` +} + +// ConvertToBifrostRequest converts the request to a BifrostRequest. +func (r *ChatCompletionRequest) ConvertToBifrostRequest(model string) *schemas.BifrostRequest { + if model == "" { + model = r.Model + } + params := &schemas.ModelParameters{ + Temperature: r.Temperature, + TopP: r.TopP, + TopK: r.TopK, + MaxTokens: r.MaxTokens, + StopSequences: r.StopSequences, + PresencePenalty: r.PresencePenalty, + FrequencyPenalty: r.FrequencyPenalty, + ParallelToolCalls: r.ParallelToolCalls, + Tools: r.Tools, + ToolChoice: r.ToolChoice, + } + + return &schemas.BifrostRequest{ + Provider: schemas.OpenAI, + Model: model, + Input: schemas.RequestInput{ + ChatCompletionInput: &r.Messages, + }, + Params: params, + } +} diff --git a/transports/bifrost-http/integrations/openai/types_test.go b/transports/bifrost-http/integrations/openai/types_test.go new file mode 100644 index 0000000000..ed451c4e34 --- /dev/null +++ b/transports/bifrost-http/integrations/openai/types_test.go @@ -0,0 +1,35 @@ +package openai + +import ( + "testing" + + bifrost "github.com/maximhq/bifrost/core" + schemas "github.com/maximhq/bifrost/core/schemas" +) + +func TestConvertToBifrostRequest(t *testing.T) { + temp := 0.5 + req := ChatCompletionRequest{ + Model: "gpt-test", + Messages: []schemas.BifrostMessage{ + {Role: schemas.ModelChatMessageRoleUser, Content: bifrost.Ptr("hi")}, + }, + Temperature: &temp, + } + + bfReq := req.ConvertToBifrostRequest("override") + + if bfReq.Provider != schemas.OpenAI { + t.Errorf("expected provider %s, got %s", schemas.OpenAI, bfReq.Provider) + } + if bfReq.Model != "override" { + t.Errorf("expected model override, got %s", bfReq.Model) + } + if bfReq.Params == nil || bfReq.Params.Temperature == nil || *bfReq.Params.Temperature != temp { + t.Errorf("temperature not copied") + } + if bfReq.Input.ChatCompletionInput == nil || len(*bfReq.Input.ChatCompletionInput) != 1 { + t.Fatalf("expected 1 message, got %v", bfReq.Input.ChatCompletionInput) + } +} + diff --git a/transports/bifrost-http/main.go b/transports/bifrost-http/main.go index 8e953ace17..f2b85bc073 100644 --- a/transports/bifrost-http/main.go +++ b/transports/bifrost-http/main.go @@ -29,7 +29,17 @@ import ( schemas "github.com/maximhq/bifrost/core/schemas" "github.com/maximhq/bifrost/plugins/maxim" "github.com/maximhq/bifrost/transports/bifrost-http/integrations" - "github.com/maximhq/bifrost/transports/bifrost-http/integrations/genai" +// File: core/schemas/bifrost.go + +const ( + OpenAI ModelProvider = "openai" + Azure ModelProvider = "azure" + Anthropic ModelProvider = "anthropic" + Bedrock ModelProvider = "bedrock" + Cohere ModelProvider = "cohere" + Vertex ModelProvider = "vertex" + Mistral ModelProvider = "mistral" +) "github.com/maximhq/bifrost/transports/bifrost-http/lib" "github.com/maximhq/bifrost/transports/bifrost-http/tracking" "github.com/prometheus/client_golang/prometheus" @@ -176,7 +186,15 @@ func main() { r := router.New() - extensions := []integrations.ExtensionRouter{genai.NewGenAIRouter(client)} + extensions := []integrations.ExtensionRouter{ + genai.NewGenAIRouter(client), + openai.NewOpenAIRouter(client), + anthropic.NewAnthropicRouter(client), + litellm.NewLiteLLMRouter(client), + langchain.NewLangChainRouter(client), + langgraph.NewLangGraphRouter(client), + mistral.NewMistralRouter(client), + } r.POST("/v1/text/completions", func(ctx *fasthttp.RequestCtx) { handleCompletion(ctx, client, false)