Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
49 changes: 49 additions & 0 deletions transports/bifrost-http/integrations/anthropic/router.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
package anthropic

import (
"encoding/json"

"github.com/fasthttp/router"
bifrost "github.com/maximhq/bifrost/core"
"github.com/maximhq/bifrost/transports/bifrost-http/lib"
"github.com/valyala/fasthttp"
)

// AnthropicRouter holds route registrations for anthropic endpoints.
type AnthropicRouter struct {
client *bifrost.Bifrost
}

// NewAnthropicRouter creates a new AnthropicRouter with the given bifrost client.
func NewAnthropicRouter(client *bifrost.Bifrost) *AnthropicRouter {
return &AnthropicRouter{client: client}
}

// RegisterRoutes registers all anthropic routes on the given router.
func (a *AnthropicRouter) RegisterRoutes(r *router.Router) {
r.POST("/anthropic/v1/messages", a.handleChatCompletion)
}

// handleChatCompletion handles POST /anthropic/v1/messages
func (a *AnthropicRouter) handleChatCompletion(ctx *fasthttp.RequestCtx) {
var req ChatCompletionRequest
if err := json.Unmarshal(ctx.PostBody(), &req); err != nil {
ctx.SetStatusCode(fasthttp.StatusBadRequest)
json.NewEncoder(ctx).Encode(err)
return
}
Comment on lines +30 to +34
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🧹 Nitpick (assertive)

Improve error response formatting.

The current error encoding directly serializes the unmarshaling error, which may expose internal implementation details. Consider using a structured error response format.

	if err := json.Unmarshal(ctx.PostBody(), &req); err != nil {
		ctx.SetStatusCode(fasthttp.StatusBadRequest)
-		json.NewEncoder(ctx).Encode(err)
+		ctx.SetContentType("application/json")
+		json.NewEncoder(ctx).Encode(map[string]string{"error": "Invalid request format"})
		return
	}
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
if err := json.Unmarshal(ctx.PostBody(), &req); err != nil {
ctx.SetStatusCode(fasthttp.StatusBadRequest)
json.NewEncoder(ctx).Encode(err)
return
}
if err := json.Unmarshal(ctx.PostBody(), &req); err != nil {
ctx.SetStatusCode(fasthttp.StatusBadRequest)
ctx.SetContentType("application/json")
json.NewEncoder(ctx).Encode(map[string]string{"error": "Invalid request format"})
return
}
🤖 Prompt for AI Agents
In transports/bifrost-http/integrations/anthropic/router.go around lines 30 to
34, the error response directly encodes the unmarshaling error, potentially
exposing internal details. Modify the error handling to return a structured JSON
response with a clear error message field instead of encoding the raw error.
This improves security and client-side error handling by providing a consistent
error format.


bifrostReq := req.ConvertToBifrostRequest("")
bifrostCtx := lib.ConvertToBifrostContext(ctx)

result, err := a.client.ChatCompletionRequest(*bifrostCtx, bifrostReq)
if err != nil {
ctx.SetStatusCode(fasthttp.StatusInternalServerError)
json.NewEncoder(ctx).Encode(err)
return
}
Comment on lines +40 to +44
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🧹 Nitpick (assertive)

Improve error response consistency.

The error handling for internal server errors should also use structured error responses for consistency.

	if err != nil {
		ctx.SetStatusCode(fasthttp.StatusInternalServerError)
-		json.NewEncoder(ctx).Encode(err)
+		ctx.SetContentType("application/json")
+		json.NewEncoder(ctx).Encode(map[string]string{"error": "Internal server error"})
		return
	}
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
if err != nil {
ctx.SetStatusCode(fasthttp.StatusInternalServerError)
json.NewEncoder(ctx).Encode(err)
return
}
if err != nil {
ctx.SetStatusCode(fasthttp.StatusInternalServerError)
ctx.SetContentType("application/json")
json.NewEncoder(ctx).Encode(map[string]string{"error": "Internal server error"})
return
}
🤖 Prompt for AI Agents
In transports/bifrost-http/integrations/anthropic/router.go around lines 40 to
44, the error response for internal server errors is currently unstructured.
Modify the error handling to return a structured JSON error response, such as an
object with an "error" field containing the error message, to maintain
consistency with other error responses in the codebase.


ctx.SetStatusCode(fasthttp.StatusOK)
ctx.SetContentType("application/json")
json.NewEncoder(ctx).Encode(result)
}
Comment on lines +1 to +49
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Address code duplication across router implementations.

This router implementation is nearly identical to the Mistral router, differing only in package name, struct name, and endpoint path. Consider creating a generic router pattern to reduce duplication.

Consider creating a common interface or base router:

type IntegrationRouter interface {
    RegisterRoutes(r *router.Router)
}

type BaseRouter struct {
    client *bifrost.Bifrost
    endpoint string
    name string
}

func (b *BaseRouter) handleChatCompletion(ctx *fasthttp.RequestCtx, converter func([]byte) (*schemas.BifrostRequest, error)) {
    // Common implementation
}

This would eliminate the duplicate code across all integration routers.

🤖 Prompt for AI Agents
In transports/bifrost-http/integrations/anthropic/router.go lines 1 to 49, the
router implementation duplicates code found in other integration routers like
Mistral, differing only in package, struct, and endpoint path. Refactor by
creating a generic base router struct that holds the bifrost client, endpoint
path, and any identifying name. Implement a common handler method on this base
router that accepts a converter function to transform the request body into a
Bifrost request. Then, have specific routers embed or use this base router,
passing their unique endpoint and converter, and register routes using the
shared handler to eliminate duplication.

48 changes: 48 additions & 0 deletions transports/bifrost-http/integrations/anthropic/types.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
package anthropic

import schemas "github.com/maximhq/bifrost/core/schemas"

// ChatCompletionRequest represents the Anthropic messages API request.
// Only a subset of parameters are supported.
type ChatCompletionRequest struct {
Model string `json:"model"`
Messages []schemas.BifrostMessage `json:"messages"`
Temperature *float64 `json:"temperature,omitempty"`
TopP *float64 `json:"top_p,omitempty"`
TopK *int `json:"top_k,omitempty"`
MaxTokens *int `json:"max_tokens,omitempty"`
StopSequences *[]string `json:"stop_sequences,omitempty"`
PresencePenalty *float64 `json:"presence_penalty,omitempty"`
FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"`
ParallelToolCalls *bool `json:"parallel_tool_calls,omitempty"`
Tools *[]schemas.Tool `json:"tools,omitempty"`
ToolChoice *schemas.ToolChoice `json:"tool_choice,omitempty"`
}
Comment on lines +7 to +20
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Consider refactoring to reduce code duplication.

This struct definition is identical across all AI provider integrations (Anthropic, Mistral, LiteLLM, LangGraph). Consider extracting a common BaseChatCompletionRequest struct to eliminate duplication and improve maintainability.

+// BaseChatCompletionRequest contains common fields across all AI providers
+type BaseChatCompletionRequest struct {
+	Model             string                   `json:"model"`
+	Messages          []schemas.BifrostMessage `json:"messages"`
+	Temperature       *float64                 `json:"temperature,omitempty"`
+	TopP              *float64                 `json:"top_p,omitempty"`
+	TopK              *int                     `json:"top_k,omitempty"`
+	MaxTokens         *int                     `json:"max_tokens,omitempty"`
+	StopSequences     *[]string                `json:"stop_sequences,omitempty"`
+	PresencePenalty   *float64                 `json:"presence_penalty,omitempty"`
+	FrequencyPenalty  *float64                 `json:"frequency_penalty,omitempty"`
+	ParallelToolCalls *bool                    `json:"parallel_tool_calls,omitempty"`
+	Tools             *[]schemas.Tool          `json:"tools,omitempty"`
+	ToolChoice        *schemas.ToolChoice      `json:"tool_choice,omitempty"`
+}

// ChatCompletionRequest represents the Anthropic messages API request.
type ChatCompletionRequest struct {
-	Model             string                   `json:"model"`
-	Messages          []schemas.BifrostMessage `json:"messages"`
-	Temperature       *float64                 `json:"temperature,omitempty"`
-	TopP              *float64                 `json:"top_p,omitempty"`
-	TopK              *int                     `json:"top_k,omitempty"`
-	MaxTokens         *int                     `json:"max_tokens,omitempty"`
-	StopSequences     *[]string                `json:"stop_sequences,omitempty"`
-	PresencePenalty   *float64                 `json:"presence_penalty,omitempty"`
-	FrequencyPenalty  *float64                 `json:"frequency_penalty,omitempty"`
-	ParallelToolCalls *bool                    `json:"parallel_tool_calls,omitempty"`
-	Tools             *[]schemas.Tool          `json:"tools,omitempty"`
-	ToolChoice        *schemas.ToolChoice      `json:"tool_choice,omitempty"`
+	BaseChatCompletionRequest
}
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
type ChatCompletionRequest struct {
Model string `json:"model"`
Messages []schemas.BifrostMessage `json:"messages"`
Temperature *float64 `json:"temperature,omitempty"`
TopP *float64 `json:"top_p,omitempty"`
TopK *int `json:"top_k,omitempty"`
MaxTokens *int `json:"max_tokens,omitempty"`
StopSequences *[]string `json:"stop_sequences,omitempty"`
PresencePenalty *float64 `json:"presence_penalty,omitempty"`
FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"`
ParallelToolCalls *bool `json:"parallel_tool_calls,omitempty"`
Tools *[]schemas.Tool `json:"tools,omitempty"`
ToolChoice *schemas.ToolChoice `json:"tool_choice,omitempty"`
}
// BaseChatCompletionRequest contains common fields across all AI providers
type BaseChatCompletionRequest struct {
Model string `json:"model"`
Messages []schemas.BifrostMessage `json:"messages"`
Temperature *float64 `json:"temperature,omitempty"`
TopP *float64 `json:"top_p,omitempty"`
TopK *int `json:"top_k,omitempty"`
MaxTokens *int `json:"max_tokens,omitempty"`
StopSequences *[]string `json:"stop_sequences,omitempty"`
PresencePenalty *float64 `json:"presence_penalty,omitempty"`
FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"`
ParallelToolCalls *bool `json:"parallel_tool_calls,omitempty"`
Tools *[]schemas.Tool `json:"tools,omitempty"`
ToolChoice *schemas.ToolChoice `json:"tool_choice,omitempty"`
}
// ChatCompletionRequest represents the Anthropic messages API request.
type ChatCompletionRequest struct {
BaseChatCompletionRequest
}
🤖 Prompt for AI Agents
In transports/bifrost-http/integrations/anthropic/types.go around lines 7 to 20,
the ChatCompletionRequest struct is duplicated across multiple AI provider
integrations. To fix this, extract the common fields into a new
BaseChatCompletionRequest struct and have each provider-specific
ChatCompletionRequest embed or extend this base struct. This will reduce code
duplication and improve maintainability.


// ConvertToBifrostRequest converts the request to a BifrostRequest.
func (r *ChatCompletionRequest) ConvertToBifrostRequest(model string) *schemas.BifrostRequest {
if model == "" {
model = r.Model
}
params := &schemas.ModelParameters{
Temperature: r.Temperature,
TopP: r.TopP,
TopK: r.TopK,
MaxTokens: r.MaxTokens,
StopSequences: r.StopSequences,
PresencePenalty: r.PresencePenalty,
FrequencyPenalty: r.FrequencyPenalty,
ParallelToolCalls: r.ParallelToolCalls,
Tools: r.Tools,
ToolChoice: r.ToolChoice,
}

return &schemas.BifrostRequest{
Provider: schemas.Anthropic,
Model: model,
Input: schemas.RequestInput{
ChatCompletionInput: &r.Messages,
},
Params: params,
}
}
Comment on lines +23 to +48
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Add validation for required fields.

The conversion method doesn't validate required fields like Model or Messages. Consider adding validation to prevent runtime errors downstream.

// ConvertToBifrostRequest converts the request to a BifrostRequest.
-func (r *ChatCompletionRequest) ConvertToBifrostRequest(model string) *schemas.BifrostRequest {
+func (r *ChatCompletionRequest) ConvertToBifrostRequest(model string) (*schemas.BifrostRequest, error) {
	if model == "" {
		model = r.Model
	}
+	if model == "" {
+		return nil, fmt.Errorf("model is required")
+	}
+	if len(r.Messages) == 0 {
+		return nil, fmt.Errorf("messages are required")
+	}
	
	params := &schemas.ModelParameters{
		Temperature:       r.Temperature,
		TopP:              r.TopP,
		TopK:              r.TopK,
		MaxTokens:         r.MaxTokens,
		StopSequences:     r.StopSequences,
		PresencePenalty:   r.PresencePenalty,
		FrequencyPenalty:  r.FrequencyPenalty,
		ParallelToolCalls: r.ParallelToolCalls,
		Tools:             r.Tools,
		ToolChoice:        r.ToolChoice,
	}

	return &schemas.BifrostRequest{
		Provider: schemas.Anthropic,
		Model:    model,
		Input: schemas.RequestInput{
			ChatCompletionInput: &r.Messages,
		},
		Params: params,
-	}
+	}, nil
}
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
func (r *ChatCompletionRequest) ConvertToBifrostRequest(model string) *schemas.BifrostRequest {
if model == "" {
model = r.Model
}
params := &schemas.ModelParameters{
Temperature: r.Temperature,
TopP: r.TopP,
TopK: r.TopK,
MaxTokens: r.MaxTokens,
StopSequences: r.StopSequences,
PresencePenalty: r.PresencePenalty,
FrequencyPenalty: r.FrequencyPenalty,
ParallelToolCalls: r.ParallelToolCalls,
Tools: r.Tools,
ToolChoice: r.ToolChoice,
}
return &schemas.BifrostRequest{
Provider: schemas.Anthropic,
Model: model,
Input: schemas.RequestInput{
ChatCompletionInput: &r.Messages,
},
Params: params,
}
}
// ConvertToBifrostRequest converts the request to a BifrostRequest.
func (r *ChatCompletionRequest) ConvertToBifrostRequest(model string) (*schemas.BifrostRequest, error) {
if model == "" {
model = r.Model
}
if model == "" {
return nil, fmt.Errorf("model is required")
}
if len(r.Messages) == 0 {
return nil, fmt.Errorf("messages are required")
}
params := &schemas.ModelParameters{
Temperature: r.Temperature,
TopP: r.TopP,
TopK: r.TopK,
MaxTokens: r.MaxTokens,
StopSequences: r.StopSequences,
PresencePenalty: r.PresencePenalty,
FrequencyPenalty: r.FrequencyPenalty,
ParallelToolCalls: r.ParallelToolCalls,
Tools: r.Tools,
ToolChoice: r.ToolChoice,
}
return &schemas.BifrostRequest{
Provider: schemas.Anthropic,
Model: model,
Input: schemas.RequestInput{
ChatCompletionInput: &r.Messages,
},
Params: params,
}, nil
}
🤖 Prompt for AI Agents
In transports/bifrost-http/integrations/anthropic/types.go between lines 23 and
48, the ConvertToBifrostRequest method lacks validation for required fields such
as Model and Messages. Add checks at the start of the method to ensure Model is
not empty (either from the argument or the struct) and that Messages is not nil
or empty. If validation fails, handle it appropriately by returning an error or
a nil pointer to prevent runtime errors downstream.

35 changes: 35 additions & 0 deletions transports/bifrost-http/integrations/anthropic/types_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
package anthropic

import (
"testing"

bifrost "github.com/maximhq/bifrost/core"
schemas "github.com/maximhq/bifrost/core/schemas"
)

func TestConvertToBifrostRequest(t *testing.T) {
temp := 0.5
req := ChatCompletionRequest{
Model: "claude-test",
Messages: []schemas.BifrostMessage{
{Role: schemas.ModelChatMessageRoleUser, Content: bifrost.Ptr("hi")},
},
Temperature: &temp,
}

bfReq := req.ConvertToBifrostRequest("")

if bfReq.Provider != schemas.Anthropic {
t.Errorf("expected provider %s, got %s", schemas.Anthropic, bfReq.Provider)
}
if bfReq.Model != "claude-test" {
t.Errorf("expected model claude-test, got %s", bfReq.Model)
}
if bfReq.Params == nil || bfReq.Params.Temperature == nil || *bfReq.Params.Temperature != temp {
t.Errorf("temperature not copied")
}
if bfReq.Input.ChatCompletionInput == nil || len(*bfReq.Input.ChatCompletionInput) != 1 {
t.Fatalf("expected 1 message, got %v", bfReq.Input.ChatCompletionInput)
}
}

49 changes: 49 additions & 0 deletions transports/bifrost-http/integrations/langchain/router.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
package langchain

import (
"encoding/json"

"github.com/fasthttp/router"
bifrost "github.com/maximhq/bifrost/core"
"github.com/maximhq/bifrost/transports/bifrost-http/lib"
"github.com/valyala/fasthttp"
)

// LangChainRouter holds route registrations for langchain endpoints.
type LangChainRouter struct {
client *bifrost.Bifrost
}

// NewLangChainRouter creates a new LangChainRouter with the given bifrost client.
func NewLangChainRouter(client *bifrost.Bifrost) *LangChainRouter {
return &LangChainRouter{client: client}
}

// RegisterRoutes registers all langchain routes on the given router.
func (l *LangChainRouter) RegisterRoutes(r *router.Router) {
r.POST("/langchain/v1/chat/completions", l.handleChatCompletion)
}
Comment on lines +12 to +25
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🧹 Nitpick (assertive)

Reduce duplication across SDK routers
The route registration and handler scaffolding for LangChain mirror the other integration routers verbatim. Extract common logic (parsing, error handling, response encoding) into a shared helper or base router to enforce consistency and DRY.

🤖 Prompt for AI Agents
In transports/bifrost-http/integrations/langchain/router.go around lines 12 to
25, the LangChainRouter duplicates route registration and handler scaffolding
logic found in other SDK routers. Refactor by extracting common functionality
such as route registration, request parsing, error handling, and response
encoding into a shared helper or base router struct. Then have LangChainRouter
embed or use this shared component to reduce duplication and ensure consistent
behavior across all integration routers.


// handleChatCompletion handles POST /langchain/v1/chat/completions
func (l *LangChainRouter) handleChatCompletion(ctx *fasthttp.RequestCtx) {
var req ChatCompletionRequest
if err := json.Unmarshal(ctx.PostBody(), &req); err != nil {
ctx.SetStatusCode(fasthttp.StatusBadRequest)
json.NewEncoder(ctx).Encode(err)
return
}
Comment on lines +30 to +34
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Avoid exposing internal errors on malformed input
Directly encoding err can leak internal details and omits setting the response Content-Type. Instead, call ctx.SetContentType("application/json") and return a structured error payload (e.g. {"error":"invalid request format"}).

🤖 Prompt for AI Agents
In transports/bifrost-http/integrations/langchain/router.go around lines 30 to
34, the code directly encodes the internal error object on JSON unmarshal
failure, which can expose sensitive details and does not set the response
Content-Type. Fix this by setting the response Content-Type to
"application/json" using ctx.SetContentType, and return a structured JSON error
message like {"error":"invalid request format"} instead of encoding the raw
error.


bifrostReq := req.ConvertToBifrostRequest("")
bifrostCtx := lib.ConvertToBifrostContext(ctx)

Comment on lines +36 to +38
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🧹 Nitpick (assertive)

Simplify context conversion signature
ConvertToBifrostContext returns *context.Context which you immediately dereference on use. Change it to return context.Context directly to remove unnecessary pointer indirection.

🤖 Prompt for AI Agents
In transports/bifrost-http/integrations/langchain/router.go around lines 36 to
38, the function ConvertToBifrostContext currently returns a pointer to
context.Context which is immediately dereferenced. Modify
ConvertToBifrostContext to return context.Context directly instead of
*context.Context, and update all call sites accordingly to remove pointer usage
and simplify the code.

result, err := l.client.ChatCompletionRequest(*bifrostCtx, bifrostReq)
if err != nil {
ctx.SetStatusCode(fasthttp.StatusInternalServerError)
json.NewEncoder(ctx).Encode(err)
return
}
Comment on lines +39 to +44
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Apply consistent error handling for Bifrost client errors.

Same issue as above - directly encoding internal errors can expose implementation details.

-	result, err := l.client.ChatCompletionRequest(*bifrostCtx, bifrostReq)
-	if err != nil {
-		ctx.SetStatusCode(fasthttp.StatusInternalServerError)
-		json.NewEncoder(ctx).Encode(err)
-		return
-	}
+	result, err := l.client.ChatCompletionRequest(*bifrostCtx, bifrostReq)
+	if err != nil {
+		ctx.SetStatusCode(fasthttp.StatusInternalServerError)
+		ctx.SetContentType("application/json")
+		json.NewEncoder(ctx).Encode(map[string]string{
+			"error": "Internal server error",
+		})
+		return
+	}
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
result, err := l.client.ChatCompletionRequest(*bifrostCtx, bifrostReq)
if err != nil {
ctx.SetStatusCode(fasthttp.StatusInternalServerError)
json.NewEncoder(ctx).Encode(err)
return
}
result, err := l.client.ChatCompletionRequest(*bifrostCtx, bifrostReq)
if err != nil {
ctx.SetStatusCode(fasthttp.StatusInternalServerError)
ctx.SetContentType("application/json")
json.NewEncoder(ctx).Encode(map[string]string{
"error": "Internal server error",
})
return
}
🤖 Prompt for AI Agents
In transports/bifrost-http/integrations/langchain/router.go around lines 39 to
44, the error handling directly encodes the internal error which may expose
sensitive implementation details. Modify the error handling to log the detailed
error internally but respond to the client with a generic error message and
appropriate HTTP status code to avoid leaking internal information.


ctx.SetStatusCode(fasthttp.StatusOK)
ctx.SetContentType("application/json")
json.NewEncoder(ctx).Encode(result)
}
Comment on lines +12 to +49
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🧹 Nitpick (assertive)

🛠️ Refactor suggestion

Significant code duplication across integration routers.

The router implementations for LangChain, LangGraph, and LiteLLM are nearly identical. This violates the DRY principle and makes maintenance more difficult.

Consider implementing a generic router pattern or base router that can be composed for different integrations. This would:

  1. Reduce code duplication
  2. Ensure consistent error handling across all integrations
  3. Make it easier to add new integrations
  4. Centralize common logic like logging and validation

Would you like me to generate a refactored implementation that addresses these concerns?

🤖 Prompt for AI Agents
In transports/bifrost-http/integrations/langchain/router.go lines 12 to 49, the
LangChainRouter implementation duplicates code found in other integration
routers, violating DRY principles. Refactor by creating a generic base router or
handler that encapsulates common logic such as request parsing, error handling,
and response encoding. Then compose or extend this base for each specific
integration to reduce duplication, ensure consistent error handling, and
simplify adding new integrations.

48 changes: 48 additions & 0 deletions transports/bifrost-http/integrations/langchain/types.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
package langchain

import schemas "github.com/maximhq/bifrost/core/schemas"

// ChatCompletionRequest represents a LangChain API request. LangChain
// often proxies OpenAI style payloads, so the field set is similar.
type ChatCompletionRequest struct {
Model string `json:"model"`
Messages []schemas.BifrostMessage `json:"messages"`
Temperature *float64 `json:"temperature,omitempty"`
TopP *float64 `json:"top_p,omitempty"`
TopK *int `json:"top_k,omitempty"`
MaxTokens *int `json:"max_tokens,omitempty"`
StopSequences *[]string `json:"stop_sequences,omitempty"`
PresencePenalty *float64 `json:"presence_penalty,omitempty"`
FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"`
ParallelToolCalls *bool `json:"parallel_tool_calls,omitempty"`
Tools *[]schemas.Tool `json:"tools,omitempty"`
ToolChoice *schemas.ToolChoice `json:"tool_choice,omitempty"`
}
Comment on lines +7 to +20
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Include explicit provider metadata in the request struct
ChatCompletionRequest lacks a field to carry the actual provider (e.g. OpenAI, Anthropic, Mistral, etc.) into ConvertToBifrostRequest. Without it, all LangChain calls are incorrectly tagged as OpenAI. Add a Provider field (or similar) to map it into the resulting schemas.BifrostRequest.

🤖 Prompt for AI Agents
In transports/bifrost-http/integrations/langchain/types.go between lines 7 and
20, the ChatCompletionRequest struct is missing a field to specify the provider
(such as OpenAI, Anthropic, Mistral). Add a new string field named Provider (or
similar) to this struct to explicitly carry the provider metadata. This will
allow ConvertToBifrostRequest to correctly map and tag the request with the
actual provider instead of defaulting to OpenAI.


// ConvertToBifrostRequest converts the request to a BifrostRequest.
func (r *ChatCompletionRequest) ConvertToBifrostRequest(model string) *schemas.BifrostRequest {
if model == "" {
model = r.Model
}
params := &schemas.ModelParameters{
Temperature: r.Temperature,
TopP: r.TopP,
TopK: r.TopK,
MaxTokens: r.MaxTokens,
StopSequences: r.StopSequences,
PresencePenalty: r.PresencePenalty,
FrequencyPenalty: r.FrequencyPenalty,
ParallelToolCalls: r.ParallelToolCalls,
Tools: r.Tools,
ToolChoice: r.ToolChoice,
}

return &schemas.BifrostRequest{
Provider: schemas.OpenAI,
Model: model,
Comment on lines +40 to +42
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

💡 Verification agent

🧩 Analysis chain

Verify hardcoding provider to OpenAI is appropriate.

While the comment mentions LangChain often proxies OpenAI style payloads, hardcoding the provider to schemas.OpenAI may not be correct for all LangChain requests, especially if LangChain supports other model providers.


🌐 Web query:

Does LangChain support model providers other than OpenAI, and how should they be identified in requests?

💡 Result:

LangChain supports a wide range of model providers beyond just OpenAI. It offers integrations with numerous LLM and chat model providers through a standardized interface.

Supported Model Providers

LangChain integrates with many popular model providers including:

  • Chat Model Providers: Anthropic, Ollama, Microsoft Azure, Google Vertex, Amazon Bedrock, Hugging Face, Cohere, Groq, NVIDIA, and many others[5].

  • LLM Providers: Aleph Alpha, Alibaba Cloud PAI EAS, Anthropic, and numerous others[4].

  • Other AI Service Providers: IBM's watsonx.ai, SambaNovaCloud, SambaStudio, ChatSeekrFlow, Snowflake Cortex[1][7].

LangChain organizes these integrations into two categories:

  1. Official models: Supported by LangChain and/or the model provider, found in langchain-<provider> packages[5].
  2. Community models: Contributed and supported primarily by the community, available in the langchain-community package[5].

Identifying Models in Requests

When working with different model providers in LangChain, you should identify them using a standardized approach:

  1. Using the init_chat_model function:
    from langchain.chat_models import init_chat_model
    
    # Format: "provider:model_id"
    model = init_chat_model("anthropic:claude-3-5-haiku-latest")
    ```[8]
  2. Using provider-specific classes directly:
    from langchain_anthropic import ChatAnthropic
    
    model = ChatAnthropic(
        model="claude-3-7-sonnet-latest",
        temperature=0,
        max_tokens=2048
    )
    ```[8]

Each provider typically has their own class naming convention, often prefixed with "Chat" for chat models (e.g., ChatOllama, ChatAnthropic, ChatOpenAI)[5].

Custom Model Integration

For models not directly supported, you can create custom LLM classes that implement the required interface. This approach allows you to connect to models via wrappers and APIs with custom authentication methods like HMAC signatures[9].

LangChain's standardized interface helps address historical inconsistencies between providers, where some used model_name and others used model parameters, making it easier to integrate with different LLM providers regardless of their specific API requirements[6].

Citations:


Avoid Hardcoding the Provider Field

Hardcoding Provider: schemas.OpenAI will misidentify requests when LangChain is configured to use non-OpenAI models. Since LangChain supports many providers (Anthropic, Azure, Google Vertex, Amazon Bedrock, Hugging Face, Cohere, etc.), we need to propagate the actual provider into the BifrostRequest.

• File: transports/bifrost-http/integrations/langchain/types.go
Lines: 40–42

return &schemas.BifrostRequest{
    // ❌ Always OpenAI
    Provider: schemas.OpenAI,
    Model:    model,
    …
}

Suggestions:

  • Add a Provider field (or metadata) to the LangChain request struct so it can carry the chosen provider.
  • In ConvertToBifrostRequest(), map that provider value to the corresponding schemas.Provider enum instead of hardcoding OpenAI.
  • Optionally, parse the model string prefix (e.g. "anthropic:claude-3") to infer the provider when an explicit field isn’t available.

Fixing this will ensure BifrostRequest accurately reflects the intended model provider.

🤖 Prompt for AI Agents
In transports/bifrost-http/integrations/langchain/types.go around lines 40 to
42, the Provider field in BifrostRequest is hardcoded to schemas.OpenAI, which
is incorrect for LangChain requests using other model providers. To fix this,
add a Provider field or metadata to the LangChain request struct to carry the
actual provider information. Then, update ConvertToBifrostRequest() to map this
provider value to the corresponding schemas.Provider enum instead of hardcoding
OpenAI. If no explicit provider field exists, parse the model string prefix
(e.g., "anthropic:claude-3") to infer and set the correct provider dynamically.

Input: schemas.RequestInput{
ChatCompletionInput: &r.Messages,
},
Params: params,
}
}
35 changes: 35 additions & 0 deletions transports/bifrost-http/integrations/langchain/types_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
package langchain

import (
"testing"

bifrost "github.com/maximhq/bifrost/core"
schemas "github.com/maximhq/bifrost/core/schemas"
)

func TestConvertToBifrostRequest(t *testing.T) {
temp := 0.5
req := ChatCompletionRequest{
Model: "gpt-test",
Messages: []schemas.BifrostMessage{
{Role: schemas.ModelChatMessageRoleUser, Content: bifrost.Ptr("hi")},
},
Temperature: &temp,
}

bfReq := req.ConvertToBifrostRequest("override")

if bfReq.Provider != schemas.OpenAI {
t.Errorf("expected provider %s, got %s", schemas.OpenAI, bfReq.Provider)
}
if bfReq.Model != "override" {
t.Errorf("expected model override, got %s", bfReq.Model)
}
if bfReq.Params == nil || bfReq.Params.Temperature == nil || *bfReq.Params.Temperature != temp {
t.Errorf("temperature not copied")
}
if bfReq.Input.ChatCompletionInput == nil || len(*bfReq.Input.ChatCompletionInput) != 1 {
t.Fatalf("expected 1 message, got %v", bfReq.Input.ChatCompletionInput)
}
}
Comment on lines +10 to +34
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

LangChain conversion logic correctly tested.

The test validates that LangChain requests properly map to schemas.OpenAI provider while supporting model override functionality. This is consistent with LangChain acting as an interface to OpenAI models.

The identical test structure across all integration files creates significant maintenance overhead. Consider implementing a table-driven test approach:

+// In a shared test file or package
+func TestAllProviderConversions(t *testing.T) {
+    testCases := []struct {
+        name             string
+        provider         string
+        modelOverride    string
+        expectedProvider schemas.Provider
+        expectedModel    string
+    }{
+        {"OpenAI", "openai", "override", schemas.OpenAI, "override"},
+        {"Mistral", "mistral", "override", schemas.Mistral, "override"},
+        {"LiteLLM", "litellm", "", schemas.OpenAI, "gpt-test"},
+        {"LangChain", "langchain", "override", schemas.OpenAI, "override"},
+    }
+    
+    for _, tc := range testCases {
+        t.Run(tc.name, func(t *testing.T) {
+            // Test implementation using provider factories
+        })
+    }
+}

Committable suggestion skipped: line range outside the PR's diff.

🤖 Prompt for AI Agents
In transports/bifrost-http/integrations/langchain/types_test.go around lines 10
to 34, the test for ConvertToBifrostRequest is repetitive and similar to other
integration tests, causing maintenance overhead. Refactor this test into a
table-driven test format by defining a slice of test cases with input parameters
and expected outputs, then iterate over them using t.Run to execute subtests.
This will consolidate similar tests, reduce duplication, and improve
maintainability.


49 changes: 49 additions & 0 deletions transports/bifrost-http/integrations/langgraph/router.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
package langgraph

import (
"encoding/json"

"github.com/fasthttp/router"
bifrost "github.com/maximhq/bifrost/core"
"github.com/maximhq/bifrost/transports/bifrost-http/lib"
"github.com/valyala/fasthttp"
)

// LangGraphRouter holds route registrations for langgraph endpoints.
type LangGraphRouter struct {
client *bifrost.Bifrost
}

// NewLangGraphRouter creates a new LangGraphRouter with the given bifrost client.
func NewLangGraphRouter(client *bifrost.Bifrost) *LangGraphRouter {
return &LangGraphRouter{client: client}
}

// RegisterRoutes registers all langgraph routes on the given router.
func (l *LangGraphRouter) RegisterRoutes(r *router.Router) {
r.POST("/langgraph/v1/chat/completions", l.handleChatCompletion)
}

// handleChatCompletion handles POST /langgraph/v1/chat/completions
func (l *LangGraphRouter) handleChatCompletion(ctx *fasthttp.RequestCtx) {
var req ChatCompletionRequest
if err := json.Unmarshal(ctx.PostBody(), &req); err != nil {
ctx.SetStatusCode(fasthttp.StatusBadRequest)
json.NewEncoder(ctx).Encode(err)
return
}
Comment on lines +30 to +34
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Improve error handling to avoid exposing internal details.

Directly encoding Go errors to JSON responses can expose internal implementation details and potentially sensitive information to clients. Consider creating structured error responses instead.

-	if err := json.Unmarshal(ctx.PostBody(), &req); err != nil {
-		ctx.SetStatusCode(fasthttp.StatusBadRequest)
-		json.NewEncoder(ctx).Encode(err)
-		return
-	}
+	if err := json.Unmarshal(ctx.PostBody(), &req); err != nil {
+		ctx.SetStatusCode(fasthttp.StatusBadRequest)
+		ctx.SetContentType("application/json")
+		json.NewEncoder(ctx).Encode(map[string]string{
+			"error": "Invalid request format",
+		})
+		return
+	}
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
if err := json.Unmarshal(ctx.PostBody(), &req); err != nil {
ctx.SetStatusCode(fasthttp.StatusBadRequest)
json.NewEncoder(ctx).Encode(err)
return
}
if err := json.Unmarshal(ctx.PostBody(), &req); err != nil {
ctx.SetStatusCode(fasthttp.StatusBadRequest)
ctx.SetContentType("application/json")
json.NewEncoder(ctx).Encode(map[string]string{
"error": "Invalid request format",
})
return
}
🤖 Prompt for AI Agents
In transports/bifrost-http/integrations/langgraph/router.go around lines 30 to
34, the current error handling directly encodes the Go error to the JSON
response, which may expose internal details. To fix this, replace the direct
error encoding with a structured error response that includes a generic error
message and an appropriate error code, avoiding exposure of internal error
details to clients.


bifrostReq := req.ConvertToBifrostRequest("")
bifrostCtx := lib.ConvertToBifrostContext(ctx)

result, err := l.client.ChatCompletionRequest(*bifrostCtx, bifrostReq)
if err != nil {
ctx.SetStatusCode(fasthttp.StatusInternalServerError)
json.NewEncoder(ctx).Encode(err)
return
}
Comment on lines +40 to +44
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Apply consistent error handling for Bifrost client errors.

Same issue as above - directly encoding internal errors can expose implementation details.

-	if err != nil {
-		ctx.SetStatusCode(fasthttp.StatusInternalServerError)
-		json.NewEncoder(ctx).Encode(err)
-		return
-	}
+	if err != nil {
+		ctx.SetStatusCode(fasthttp.StatusInternalServerError)
+		ctx.SetContentType("application/json")
+		json.NewEncoder(ctx).Encode(map[string]string{
+			"error": "Internal server error",
+		})
+		return
+	}
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
if err != nil {
ctx.SetStatusCode(fasthttp.StatusInternalServerError)
json.NewEncoder(ctx).Encode(err)
return
}
if err != nil {
ctx.SetStatusCode(fasthttp.StatusInternalServerError)
ctx.SetContentType("application/json")
json.NewEncoder(ctx).Encode(map[string]string{
"error": "Internal server error",
})
return
}
🤖 Prompt for AI Agents
In transports/bifrost-http/integrations/langgraph/router.go around lines 40 to
44, the error handling directly encodes the internal error which may expose
sensitive implementation details. Modify the error response to return a generic
error message or a sanitized error object instead of the raw error. Ensure the
status code remains InternalServerError and the response format is consistent
with other Bifrost client error handlers.


ctx.SetStatusCode(fasthttp.StatusOK)
ctx.SetContentType("application/json")
json.NewEncoder(ctx).Encode(result)
}
48 changes: 48 additions & 0 deletions transports/bifrost-http/integrations/langgraph/types.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
package langgraph

import schemas "github.com/maximhq/bifrost/core/schemas"

// ChatCompletionRequest describes the payload expected by LangGraph's HTTP
// interface, which is largely compatible with OpenAI's API.
type ChatCompletionRequest struct {
Model string `json:"model"`
Messages []schemas.BifrostMessage `json:"messages"`
Temperature *float64 `json:"temperature,omitempty"`
TopP *float64 `json:"top_p,omitempty"`
TopK *int `json:"top_k,omitempty"`
MaxTokens *int `json:"max_tokens,omitempty"`
StopSequences *[]string `json:"stop_sequences,omitempty"`
PresencePenalty *float64 `json:"presence_penalty,omitempty"`
FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"`
ParallelToolCalls *bool `json:"parallel_tool_calls,omitempty"`
Tools *[]schemas.Tool `json:"tools,omitempty"`
ToolChoice *schemas.ToolChoice `json:"tool_choice,omitempty"`
}

// ConvertToBifrostRequest converts the request to a BifrostRequest.
func (r *ChatCompletionRequest) ConvertToBifrostRequest(model string) *schemas.BifrostRequest {
if model == "" {
model = r.Model
}
params := &schemas.ModelParameters{
Temperature: r.Temperature,
TopP: r.TopP,
TopK: r.TopK,
MaxTokens: r.MaxTokens,
StopSequences: r.StopSequences,
PresencePenalty: r.PresencePenalty,
FrequencyPenalty: r.FrequencyPenalty,
ParallelToolCalls: r.ParallelToolCalls,
Tools: r.Tools,
ToolChoice: r.ToolChoice,
}

return &schemas.BifrostRequest{
Provider: schemas.OpenAI,
Model: model,
Input: schemas.RequestInput{
ChatCompletionInput: &r.Messages,
},
Params: params,
}
}
Loading