From 2e59d2661b031b5ea8eac340010cb947245c9c45 Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Fri, 12 Jul 2024 17:56:14 +0200 Subject: [PATCH] feat (provider/google): add cachedContent optional setting (#2261) --- .changeset/cool-donkeys-build.md | 5 +++++ .../01-ai-sdk-providers/10-google-generative-ai.mdx | 5 +++++ packages/google/src/google-generative-ai-language-model.ts | 3 +++ packages/google/src/google-generative-ai-settings.ts | 7 +++++++ 4 files changed, 20 insertions(+) create mode 100644 .changeset/cool-donkeys-build.md diff --git a/.changeset/cool-donkeys-build.md b/.changeset/cool-donkeys-build.md new file mode 100644 index 00000000000..19adc4ad9e4 --- /dev/null +++ b/.changeset/cool-donkeys-build.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/google': patch +--- + +feat (provider/google): add cachedContent optional setting diff --git a/content/providers/01-ai-sdk-providers/10-google-generative-ai.mdx b/content/providers/01-ai-sdk-providers/10-google-generative-ai.mdx index 012d64efcf3..c294c6ee333 100644 --- a/content/providers/01-ai-sdk-providers/10-google-generative-ai.mdx +++ b/content/providers/01-ai-sdk-providers/10-google-generative-ai.mdx @@ -93,6 +93,11 @@ The following optional settings are available for Google Generative AI models: Top-k sampling considers the set of topK most probable tokens. Models running with nucleus sampling don't allow topK setting. +- **cachedContent** _string_ + + Optional. The name of the cached content used as context to serve the prediction. + Format: cachedContents/{cachedContent} + - **safetySettings** _Array\<\{ category: string; threshold: string \}\>_ Optional. Safety settings for the model. diff --git a/packages/google/src/google-generative-ai-language-model.ts b/packages/google/src/google-generative-ai-language-model.ts index da4417aed78..37f405edd62 100644 --- a/packages/google/src/google-generative-ai-language-model.ts +++ b/packages/google/src/google-generative-ai-language-model.ts @@ -110,6 +110,7 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV1 { systemInstruction, safetySettings: this.settings.safetySettings, ...prepareToolsAndToolConfig(mode), + cachedContent: this.settings.cachedContent, }, warnings, }; @@ -125,6 +126,7 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV1 { contents, systemInstruction, safetySettings: this.settings.safetySettings, + cachedContent: this.settings.cachedContent, }, warnings, }; @@ -146,6 +148,7 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV1 { }, toolConfig: { functionCallingConfig: { mode: 'ANY' } }, safetySettings: this.settings.safetySettings, + cachedContent: this.settings.cachedContent, }, warnings, }; diff --git a/packages/google/src/google-generative-ai-settings.ts b/packages/google/src/google-generative-ai-settings.ts index 36aa32c18e0..d43193bfd0a 100644 --- a/packages/google/src/google-generative-ai-settings.ts +++ b/packages/google/src/google-generative-ai-settings.ts @@ -16,6 +16,13 @@ Models running with nucleus sampling don't allow topK setting. */ topK?: number; + /** +Optional. +The name of the cached content used as context to serve the prediction. +Format: cachedContents/{cachedContent} + */ + cachedContent?: string; + /** Optional. A list of unique safety settings for blocking unsafe content. */