Skip to content

Commit 59ee1c9

Browse files
daniel-lxsmrubens
andauthored
feat: add native tools support for OpenAI-compatible providers (#9676)
Co-authored-by: Matt Rubens <[email protected]>
1 parent 5d02099 commit 59ee1c9

File tree

6 files changed

+36
-2
lines changed

6 files changed

+36
-2
lines changed

packages/types/src/providers/featherless.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ export const featherlessModels = {
3131
contextWindow: 32678,
3232
supportsImages: false,
3333
supportsPromptCache: false,
34+
supportsNativeTools: true,
3435
inputPrice: 0,
3536
outputPrice: 0,
3637
description: "Kimi K2 Instruct model.",
@@ -49,6 +50,7 @@ export const featherlessModels = {
4950
contextWindow: 32678,
5051
supportsImages: false,
5152
supportsPromptCache: false,
53+
supportsNativeTools: true,
5254
inputPrice: 0,
5355
outputPrice: 0,
5456
description: "Qwen3 Coder 480B A35B Instruct model.",

packages/types/src/providers/fireworks.ts

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ export const fireworksModels = {
2323
contextWindow: 262144,
2424
supportsImages: false,
2525
supportsPromptCache: true,
26+
supportsNativeTools: true,
2627
inputPrice: 0.6,
2728
outputPrice: 2.5,
2829
cacheReadsPrice: 0.15,
@@ -34,6 +35,7 @@ export const fireworksModels = {
3435
contextWindow: 128000,
3536
supportsImages: false,
3637
supportsPromptCache: false,
38+
supportsNativeTools: true,
3739
inputPrice: 0.6,
3840
outputPrice: 2.5,
3941
description:
@@ -44,6 +46,7 @@ export const fireworksModels = {
4446
contextWindow: 204800,
4547
supportsImages: false,
4648
supportsPromptCache: false,
49+
supportsNativeTools: true,
4750
inputPrice: 0.3,
4851
outputPrice: 1.2,
4952
description:
@@ -54,6 +57,7 @@ export const fireworksModels = {
5457
contextWindow: 256000,
5558
supportsImages: false,
5659
supportsPromptCache: false,
60+
supportsNativeTools: true,
5761
inputPrice: 0.22,
5862
outputPrice: 0.88,
5963
description: "Latest Qwen3 thinking model, competitive against the best closed source models in Jul 2025.",
@@ -63,6 +67,7 @@ export const fireworksModels = {
6367
contextWindow: 256000,
6468
supportsImages: false,
6569
supportsPromptCache: false,
70+
supportsNativeTools: true,
6671
inputPrice: 0.45,
6772
outputPrice: 1.8,
6873
description: "Qwen3's most agentic code model to date.",
@@ -72,6 +77,7 @@ export const fireworksModels = {
7277
contextWindow: 160000,
7378
supportsImages: false,
7479
supportsPromptCache: false,
80+
supportsNativeTools: true,
7581
inputPrice: 3,
7682
outputPrice: 8,
7783
description:
@@ -82,6 +88,7 @@ export const fireworksModels = {
8288
contextWindow: 128000,
8389
supportsImages: false,
8490
supportsPromptCache: false,
91+
supportsNativeTools: true,
8592
inputPrice: 0.9,
8693
outputPrice: 0.9,
8794
description:
@@ -92,6 +99,7 @@ export const fireworksModels = {
9299
contextWindow: 163840,
93100
supportsImages: false,
94101
supportsPromptCache: false,
102+
supportsNativeTools: true,
95103
inputPrice: 0.56,
96104
outputPrice: 1.68,
97105
description:
@@ -102,6 +110,7 @@ export const fireworksModels = {
102110
contextWindow: 128000,
103111
supportsImages: false,
104112
supportsPromptCache: false,
113+
supportsNativeTools: true,
105114
inputPrice: 0.55,
106115
outputPrice: 2.19,
107116
description:
@@ -112,6 +121,7 @@ export const fireworksModels = {
112121
contextWindow: 128000,
113122
supportsImages: false,
114123
supportsPromptCache: false,
124+
supportsNativeTools: true,
115125
inputPrice: 0.55,
116126
outputPrice: 2.19,
117127
description:
@@ -122,6 +132,7 @@ export const fireworksModels = {
122132
contextWindow: 198000,
123133
supportsImages: false,
124134
supportsPromptCache: false,
135+
supportsNativeTools: true,
125136
inputPrice: 0.55,
126137
outputPrice: 2.19,
127138
description:
@@ -132,6 +143,7 @@ export const fireworksModels = {
132143
contextWindow: 128000,
133144
supportsImages: false,
134145
supportsPromptCache: false,
146+
supportsNativeTools: true,
135147
inputPrice: 0.07,
136148
outputPrice: 0.3,
137149
description:
@@ -142,6 +154,7 @@ export const fireworksModels = {
142154
contextWindow: 128000,
143155
supportsImages: false,
144156
supportsPromptCache: false,
157+
supportsNativeTools: true,
145158
inputPrice: 0.15,
146159
outputPrice: 0.6,
147160
description:

packages/types/src/providers/io-intelligence.ts

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,27 +18,31 @@ export const ioIntelligenceModels = {
1818
contextWindow: 128000,
1919
supportsImages: false,
2020
supportsPromptCache: false,
21+
supportsNativeTools: true,
2122
description: "DeepSeek R1 reasoning model",
2223
},
2324
"meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8": {
2425
maxTokens: 8192,
2526
contextWindow: 430000,
2627
supportsImages: true,
2728
supportsPromptCache: false,
29+
supportsNativeTools: true,
2830
description: "Llama 4 Maverick 17B model",
2931
},
3032
"Intel/Qwen3-Coder-480B-A35B-Instruct-int4-mixed-ar": {
3133
maxTokens: 8192,
3234
contextWindow: 106000,
3335
supportsImages: false,
3436
supportsPromptCache: false,
37+
supportsNativeTools: true,
3538
description: "Qwen3 Coder 480B specialized for coding",
3639
},
3740
"openai/gpt-oss-120b": {
3841
maxTokens: 8192,
3942
contextWindow: 131072,
4043
supportsImages: false,
4144
supportsPromptCache: false,
45+
supportsNativeTools: true,
4246
description: "OpenAI GPT-OSS 120B model",
4347
},
4448
} as const satisfies Record<string, ModelInfo>

packages/types/src/providers/sambanova.ts

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ export const sambaNovaModels = {
2121
contextWindow: 16384,
2222
supportsImages: false,
2323
supportsPromptCache: false,
24+
supportsNativeTools: true,
2425
inputPrice: 0.1,
2526
outputPrice: 0.2,
2627
description: "Meta Llama 3.1 8B Instruct model with 16K context window.",
@@ -30,6 +31,7 @@ export const sambaNovaModels = {
3031
contextWindow: 131072,
3132
supportsImages: false,
3233
supportsPromptCache: false,
34+
supportsNativeTools: true,
3335
inputPrice: 0.6,
3436
outputPrice: 1.2,
3537
description: "Meta Llama 3.3 70B Instruct model with 128K context window.",
@@ -40,6 +42,7 @@ export const sambaNovaModels = {
4042
supportsImages: false,
4143
supportsPromptCache: false,
4244
supportsReasoningBudget: true,
45+
supportsNativeTools: true,
4346
inputPrice: 5.0,
4447
outputPrice: 7.0,
4548
description: "DeepSeek R1 reasoning model with 32K context window.",
@@ -49,6 +52,7 @@ export const sambaNovaModels = {
4952
contextWindow: 32768,
5053
supportsImages: false,
5154
supportsPromptCache: false,
55+
supportsNativeTools: true,
5256
inputPrice: 3.0,
5357
outputPrice: 4.5,
5458
description: "DeepSeek V3 model with 32K context window.",
@@ -58,6 +62,7 @@ export const sambaNovaModels = {
5862
contextWindow: 32768,
5963
supportsImages: false,
6064
supportsPromptCache: false,
65+
supportsNativeTools: true,
6166
inputPrice: 3.0,
6267
outputPrice: 4.5,
6368
description: "DeepSeek V3.1 model with 32K context window.",
@@ -76,6 +81,7 @@ export const sambaNovaModels = {
7681
contextWindow: 131072,
7782
supportsImages: true,
7883
supportsPromptCache: false,
84+
supportsNativeTools: true,
7985
inputPrice: 0.63,
8086
outputPrice: 1.8,
8187
description: "Meta Llama 4 Maverick 17B 128E Instruct model with 128K context window.",
@@ -94,6 +100,7 @@ export const sambaNovaModels = {
94100
contextWindow: 8192,
95101
supportsImages: false,
96102
supportsPromptCache: false,
103+
supportsNativeTools: true,
97104
inputPrice: 0.4,
98105
outputPrice: 0.8,
99106
description: "Alibaba Qwen 3 32B model with 8K context window.",
@@ -103,6 +110,7 @@ export const sambaNovaModels = {
103110
contextWindow: 131072,
104111
supportsImages: false,
105112
supportsPromptCache: false,
113+
supportsNativeTools: true,
106114
inputPrice: 0.22,
107115
outputPrice: 0.59,
108116
description: "OpenAI gpt oss 120b model with 128k context window.",

src/api/providers/__tests__/io-intelligence.spec.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -255,6 +255,7 @@ describe("IOIntelligenceHandler", () => {
255255
description: "Llama 4 Maverick 17B model",
256256
supportsImages: true,
257257
supportsPromptCache: false,
258+
supportsNativeTools: true,
258259
})
259260
})
260261

@@ -271,6 +272,7 @@ describe("IOIntelligenceHandler", () => {
271272
description: "Llama 4 Maverick 17B model",
272273
supportsImages: true,
273274
supportsPromptCache: false,
275+
supportsNativeTools: true,
274276
})
275277
})
276278

src/api/providers/featherless.ts

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ import { convertToR1Format } from "../transform/r1-format"
1313
import { convertToOpenAiMessages } from "../transform/openai-format"
1414
import { ApiStream } from "../transform/stream"
1515

16+
import type { ApiHandlerCreateMessageMetadata } from "../index"
1617
import { BaseOpenAiCompatibleProvider } from "./base-openai-compatible-provider"
1718

1819
export class FeatherlessHandler extends BaseOpenAiCompatibleProvider<FeatherlessModelId> {
@@ -49,7 +50,11 @@ export class FeatherlessHandler extends BaseOpenAiCompatibleProvider<Featherless
4950
}
5051
}
5152

52-
override async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
53+
override async *createMessage(
54+
systemPrompt: string,
55+
messages: Anthropic.Messages.MessageParam[],
56+
metadata?: ApiHandlerCreateMessageMetadata,
57+
): ApiStream {
5358
const model = this.getModel()
5459

5560
if (model.id.includes("DeepSeek-R1")) {
@@ -90,7 +95,7 @@ export class FeatherlessHandler extends BaseOpenAiCompatibleProvider<Featherless
9095
yield processedChunk
9196
}
9297
} else {
93-
yield* super.createMessage(systemPrompt, messages)
98+
yield* super.createMessage(systemPrompt, messages, metadata)
9499
}
95100
}
96101

0 commit comments

Comments
 (0)