diff --git a/output/openapi/elasticsearch-openapi.json b/output/openapi/elasticsearch-openapi.json index 2b1e590179..5b025e6e4c 100644 --- a/output/openapi/elasticsearch-openapi.json +++ b/output/openapi/elasticsearch-openapi.json @@ -17844,6 +17844,92 @@ "x-state": "Added in 8.16.0" } }, + "/_inference/{task_type}/{amazonbedrock_inference_id}": { + "put": { + "tags": [ + "inference" + ], + "summary": "Create an Amazon Bedrock inference endpoint", + "description": "Creates an inference endpoint to perform an inference task with the `amazonbedrock` service.\n\n>info\n> You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", + "operationId": "inference-put-amazonbedrock", + "parameters": [ + { + "in": "path", + "name": "task_type", + "description": "The type of the inference task that the model will perform.", + "required": true, + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/inference.put_amazonbedrock:AmazonBedrockTaskType" + }, + "style": "simple" + }, + { + "in": "path", + "name": "amazonbedrock_inference_id", + "description": "The unique identifier of the inference endpoint.", + "required": true, + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types:Id" + }, + "style": "simple" + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "chunking_settings": { + "$ref": "#/components/schemas/inference._types:InferenceChunkingSettings" + }, + "service": { + "$ref": "#/components/schemas/inference.put_amazonbedrock:ServiceType" + }, + "service_settings": { + "$ref": "#/components/schemas/inference.put_amazonbedrock:AmazonBedrockServiceSettings" + }, + "task_settings": { + "$ref": "#/components/schemas/inference.put_amazonbedrock:AmazonBedrockTaskSettings" + } + }, + "required": [ + "service", + "service_settings" + ] + }, + "examples": { + "PutAmazonBedrockRequestExample1": { + "summary": "A text embedding task", + "description": "Run `PUT _inference/text_embedding/amazon_bedrock_embeddings` to create an inference endpoint that performs a text embedding task.", + "value": "{\n \"service\": \"amazonbedrock\",\n \"service_settings\": {\n \"access_key\": \"AWS-access-key\",\n \"secret_key\": \"AWS-secret-key\",\n \"region\": \"us-east-1\",\n \"provider\": \"amazontitan\",\n \"model\": \"amazon.titan-embed-text-v2:0\"\n }\n}" + }, + "PutAmazonBedrockRequestExample2": { + "summary": "A completion task", + "description": "Run `PUT _inference/completion/openai-completion` to create an inference endpoint to perform a completion task type.", + "value": "{\n \"service\": \"openai\",\n \"service_settings\": {\n \"api_key\": \"OpenAI-API-Key\",\n \"model_id\": \"gpt-3.5-turbo\"\n }\n}" + } + } + } + } + }, + "responses": { + "200": { + "description": "", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/inference._types:InferenceEndpointInfo" + } + } + } + } + }, + "x-state": "Added in 8.12.0" + } + }, "/_inference/{task_type}/{azureaistudio_inference_id}": { "put": { "tags": [ @@ -18321,8 +18407,8 @@ }, "PutOpenAiRequestExample2": { "summary": "A completion task", - "description": "Run `PUT _inference/completion/openai-completion` to create an inference endpoint to perform a completion task type.", - "value": "{\n \"service\": \"openai\",\n \"service_settings\": {\n \"api_key\": \"OpenAI-API-Key\",\n \"model_id\": \"gpt-3.5-turbo\"\n }\n}" + "description": "Run `PUT _inference/completion/amazon_bedrock_completion` to create an inference endpoint to perform a completion task.", + "value": "{\n \"service\": \"amazonbedrock\",\n \"service_settings\": {\n \"access_key\": \"AWS-access-key\",\n \"secret_key\": \"AWS-secret-key\",\n \"region\": \"us-east-1\",\n \"provider\": \"amazontitan\",\n \"model\": \"amazon.titan-text-premier-v1:0\"\n }\n}" } } } @@ -77397,6 +77483,83 @@ } } }, + "inference.put_amazonbedrock:AmazonBedrockTaskType": { + "type": "string", + "enum": [ + "completion", + "text_embedding" + ] + }, + "inference.put_amazonbedrock:ServiceType": { + "type": "string", + "enum": [ + "amazonbedrock" + ] + }, + "inference.put_amazonbedrock:AmazonBedrockServiceSettings": { + "type": "object", + "properties": { + "access_key": { + "description": "A valid AWS access key that has permissions to use Amazon Bedrock and access to models for inference requests.", + "type": "string" + }, + "model": { + "externalDocs": { + "url": "https://docs.aws.amazon.com/bedrock/latest/userguide/models-supported.html" + }, + "description": "The base model ID or an ARN to a custom model based on a foundational model.\nThe base model IDs can be found in the Amazon Bedrock documentation.\nNote that the model ID must be available for the provider chosen and your IAM user must have access to the model.", + "type": "string" + }, + "provider": { + "description": "The model provider for your deployment.\nNote that some providers may support only certain task types.\nSupported providers include:\n\n* `amazontitan` - available for `text_embedding` and `completion` task types\n* `anthropic` - available for `completion` task type only\n* `ai21labs` - available for `completion` task type only\n* `cohere` - available for `text_embedding` and `completion` task types\n* `meta` - available for `completion` task type only\n* `mistral` - available for `completion` task type only", + "type": "string" + }, + "region": { + "externalDocs": { + "url": "https://docs.aws.amazon.com/bedrock/latest/userguide/models-supported.html" + }, + "description": "The region that your model or ARN is deployed in.\nThe list of available regions per model can be found in the Amazon Bedrock documentation.", + "type": "string" + }, + "rate_limit": { + "$ref": "#/components/schemas/inference._types:RateLimitSetting" + }, + "secret_key": { + "externalDocs": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html" + }, + "description": "A valid AWS secret key that is paired with the `access_key`.\nFor informationg about creating and managing access and secret keys, refer to the AWS documentation.", + "type": "string" + } + }, + "required": [ + "access_key", + "model", + "region", + "secret_key" + ] + }, + "inference.put_amazonbedrock:AmazonBedrockTaskSettings": { + "type": "object", + "properties": { + "max_new_tokens": { + "description": "For a `completion` task, it sets the maximum number for the output tokens to be generated.", + "type": "number" + }, + "temperature": { + "description": "For a `completion` task, it is a number between 0.0 and 1.0 that controls the apparent creativity of the results.\nAt temperature 0.0 the model is most deterministic, at temperature 1.0 most random.\nIt should not be used if `top_p` or `top_k` is specified.", + "type": "number" + }, + "top_k": { + "description": "For a `completion` task, it limits samples to the top-K most likely words, balancing coherence and variability.\nIt is only available for anthropic, cohere, and mistral providers.\nIt is an alternative to `temperature`; it should not be used if `temperature` is specified.", + "type": "number" + }, + "top_p": { + "description": "For a `completion` task, it is a number in the range of 0.0 to 1.0, to eliminate low-probability tokens.\nTop-p uses nucleus sampling to select top tokens whose sum of likelihoods does not exceed a certain value, ensuring both variety and coherence.\nIt is an alternative to `temperature`; it should not be used if `temperature` is specified.", + "type": "number" + } + } + }, "inference.put_azureaistudio:AzureAiStudioTaskType": { "type": "string", "enum": [ diff --git a/output/openapi/elasticsearch-serverless-openapi.json b/output/openapi/elasticsearch-serverless-openapi.json index 6db845f83b..dd058f24b7 100644 --- a/output/openapi/elasticsearch-serverless-openapi.json +++ b/output/openapi/elasticsearch-serverless-openapi.json @@ -9670,6 +9670,92 @@ "x-state": "Added in 8.16.0" } }, + "/_inference/{task_type}/{amazonbedrock_inference_id}": { + "put": { + "tags": [ + "inference" + ], + "summary": "Create an Amazon Bedrock inference endpoint", + "description": "Creates an inference endpoint to perform an inference task with the `amazonbedrock` service.\n\n>info\n> You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", + "operationId": "inference-put-amazonbedrock", + "parameters": [ + { + "in": "path", + "name": "task_type", + "description": "The type of the inference task that the model will perform.", + "required": true, + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/inference.put_amazonbedrock:AmazonBedrockTaskType" + }, + "style": "simple" + }, + { + "in": "path", + "name": "amazonbedrock_inference_id", + "description": "The unique identifier of the inference endpoint.", + "required": true, + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types:Id" + }, + "style": "simple" + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "chunking_settings": { + "$ref": "#/components/schemas/inference._types:InferenceChunkingSettings" + }, + "service": { + "$ref": "#/components/schemas/inference.put_amazonbedrock:ServiceType" + }, + "service_settings": { + "$ref": "#/components/schemas/inference.put_amazonbedrock:AmazonBedrockServiceSettings" + }, + "task_settings": { + "$ref": "#/components/schemas/inference.put_amazonbedrock:AmazonBedrockTaskSettings" + } + }, + "required": [ + "service", + "service_settings" + ] + }, + "examples": { + "PutAmazonBedrockRequestExample1": { + "summary": "A text embedding task", + "description": "Run `PUT _inference/text_embedding/amazon_bedrock_embeddings` to create an inference endpoint that performs a text embedding task.", + "value": "{\n \"service\": \"amazonbedrock\",\n \"service_settings\": {\n \"access_key\": \"AWS-access-key\",\n \"secret_key\": \"AWS-secret-key\",\n \"region\": \"us-east-1\",\n \"provider\": \"amazontitan\",\n \"model\": \"amazon.titan-embed-text-v2:0\"\n }\n}" + }, + "PutAmazonBedrockRequestExample2": { + "summary": "A completion task", + "description": "Run `PUT _inference/completion/openai-completion` to create an inference endpoint to perform a completion task type.", + "value": "{\n \"service\": \"openai\",\n \"service_settings\": {\n \"api_key\": \"OpenAI-API-Key\",\n \"model_id\": \"gpt-3.5-turbo\"\n }\n}" + } + } + } + } + }, + "responses": { + "200": { + "description": "", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/inference._types:InferenceEndpointInfo" + } + } + } + } + }, + "x-state": "Added in 8.12.0" + } + }, "/_inference/{task_type}/{azureaistudio_inference_id}": { "put": { "tags": [ @@ -10147,8 +10233,8 @@ }, "PutOpenAiRequestExample2": { "summary": "A completion task", - "description": "Run `PUT _inference/completion/openai-completion` to create an inference endpoint to perform a completion task type.", - "value": "{\n \"service\": \"openai\",\n \"service_settings\": {\n \"api_key\": \"OpenAI-API-Key\",\n \"model_id\": \"gpt-3.5-turbo\"\n }\n}" + "description": "Run `PUT _inference/completion/amazon_bedrock_completion` to create an inference endpoint to perform a completion task.", + "value": "{\n \"service\": \"amazonbedrock\",\n \"service_settings\": {\n \"access_key\": \"AWS-access-key\",\n \"secret_key\": \"AWS-secret-key\",\n \"region\": \"us-east-1\",\n \"provider\": \"amazontitan\",\n \"model\": \"amazon.titan-text-premier-v1:0\"\n }\n}" } } } @@ -48593,6 +48679,83 @@ } } }, + "inference.put_amazonbedrock:AmazonBedrockTaskType": { + "type": "string", + "enum": [ + "completion", + "text_embedding" + ] + }, + "inference.put_amazonbedrock:ServiceType": { + "type": "string", + "enum": [ + "amazonbedrock" + ] + }, + "inference.put_amazonbedrock:AmazonBedrockServiceSettings": { + "type": "object", + "properties": { + "access_key": { + "description": "A valid AWS access key that has permissions to use Amazon Bedrock and access to models for inference requests.", + "type": "string" + }, + "model": { + "externalDocs": { + "url": "https://docs.aws.amazon.com/bedrock/latest/userguide/models-supported.html" + }, + "description": "The base model ID or an ARN to a custom model based on a foundational model.\nThe base model IDs can be found in the Amazon Bedrock documentation.\nNote that the model ID must be available for the provider chosen and your IAM user must have access to the model.", + "type": "string" + }, + "provider": { + "description": "The model provider for your deployment.\nNote that some providers may support only certain task types.\nSupported providers include:\n\n* `amazontitan` - available for `text_embedding` and `completion` task types\n* `anthropic` - available for `completion` task type only\n* `ai21labs` - available for `completion` task type only\n* `cohere` - available for `text_embedding` and `completion` task types\n* `meta` - available for `completion` task type only\n* `mistral` - available for `completion` task type only", + "type": "string" + }, + "region": { + "externalDocs": { + "url": "https://docs.aws.amazon.com/bedrock/latest/userguide/models-supported.html" + }, + "description": "The region that your model or ARN is deployed in.\nThe list of available regions per model can be found in the Amazon Bedrock documentation.", + "type": "string" + }, + "rate_limit": { + "$ref": "#/components/schemas/inference._types:RateLimitSetting" + }, + "secret_key": { + "externalDocs": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html" + }, + "description": "A valid AWS secret key that is paired with the `access_key`.\nFor informationg about creating and managing access and secret keys, refer to the AWS documentation.", + "type": "string" + } + }, + "required": [ + "access_key", + "model", + "region", + "secret_key" + ] + }, + "inference.put_amazonbedrock:AmazonBedrockTaskSettings": { + "type": "object", + "properties": { + "max_new_tokens": { + "description": "For a `completion` task, it sets the maximum number for the output tokens to be generated.", + "type": "number" + }, + "temperature": { + "description": "For a `completion` task, it is a number between 0.0 and 1.0 that controls the apparent creativity of the results.\nAt temperature 0.0 the model is most deterministic, at temperature 1.0 most random.\nIt should not be used if `top_p` or `top_k` is specified.", + "type": "number" + }, + "top_k": { + "description": "For a `completion` task, it limits samples to the top-K most likely words, balancing coherence and variability.\nIt is only available for anthropic, cohere, and mistral providers.\nIt is an alternative to `temperature`; it should not be used if `temperature` is specified.", + "type": "number" + }, + "top_p": { + "description": "For a `completion` task, it is a number in the range of 0.0 to 1.0, to eliminate low-probability tokens.\nTop-p uses nucleus sampling to select top tokens whose sum of likelihoods does not exceed a certain value, ensuring both variety and coherence.\nIt is an alternative to `temperature`; it should not be used if `temperature` is specified.", + "type": "number" + } + } + }, "inference.put_azureaistudio:AzureAiStudioTaskType": { "type": "string", "enum": [ diff --git a/output/schema/schema-serverless.json b/output/schema/schema-serverless.json index 8820a53585..24af2546f3 100644 --- a/output/schema/schema-serverless.json +++ b/output/schema/schema-serverless.json @@ -4598,15 +4598,72 @@ "visibility": "public" }, "stack": { +<<<<<<< HEAD +======= +<<<<<<< HEAD +======= + "since": "8.12.0", + "stability": "stable", + "visibility": "public" + } + }, + "description": "Create an Amazon Bedrock inference endpoint.\n\nCreates an inference endpoint to perform an inference task with the `amazonbedrock` service.\n\n>info\n> You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", + "docId": "inference-api-amazonbedrock", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-amazon-bedrock.html", + "name": "inference.put_amazonbedrock", + "privileges": { + "cluster": [ + "manage_inference" + ] + }, + "request": { + "name": "Request", + "namespace": "inference.put_amazonbedrock" + }, + "requestBodyRequired": false, + "requestMediaType": [ + "application/json" + ], + "response": { + "name": "Response", + "namespace": "inference.put_amazonbedrock" + }, + "responseMediaType": [ + "application/json" + ], + "urls": [ + { + "methods": [ + "PUT" + ], + "path": "/_inference/{task_type}/{amazonbedrock_inference_id}" + } + ] + }, + { + "availability": { + "serverless": { + "stability": "stable", + "visibility": "public" + }, + "stack": { +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) "since": "8.16.0", "stability": "stable", "visibility": "public" } }, +<<<<<<< HEAD "description": "Create an AlibabaCloud AI Search inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", "docId": "inference-api-put-alibabacloud", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-alibabacloud-ai-search.html", "name": "inference.put_alibabacloud", +======= + "description": "Create an Anthropic inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `anthropic` service.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", + "docId": "inference-api-anthropic", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-anthropic.html", + "name": "inference.put_anthropic", +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) "privileges": { "cluster": [ "manage_inference" @@ -4614,7 +4671,11 @@ }, "request": { "name": "Request", +<<<<<<< HEAD "namespace": "inference.put_alibabacloud" +======= + "namespace": "inference.put_anthropic" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) }, "requestBodyRequired": false, "requestMediaType": [ @@ -4622,7 +4683,11 @@ ], "response": { "name": "Response", +<<<<<<< HEAD "namespace": "inference.put_alibabacloud" +======= + "namespace": "inference.put_anthropic" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) }, "responseMediaType": [ "application/json" @@ -4632,7 +4697,11 @@ "methods": [ "PUT" ], +<<<<<<< HEAD "path": "/_inference/{task_type}/{alibabacloud_inference_id}" +======= + "path": "/_inference/{task_type}/{anthropic_inference_id}" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) } ] }, @@ -4643,15 +4712,26 @@ "visibility": "public" }, "stack": { +<<<<<<< HEAD "since": "8.14.0", +======= + "since": "8.13.0", +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) "stability": "stable", "visibility": "public" } }, +<<<<<<< HEAD "description": "Create an Azure AI studio inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `azureaistudio` service.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", "docId": "inference-api-put-azureaistudio", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-azure-ai-studio.html", "name": "inference.put_azureaistudio", +======= + "description": "Create a Cohere inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `cohere` service.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", + "docId": "inference-api-put-cohere", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/branch/infer-service-cohere.html", + "name": "inference.put_cohere", +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) "privileges": { "cluster": [ "manage_inference" @@ -4659,7 +4739,11 @@ }, "request": { "name": "Request", +<<<<<<< HEAD "namespace": "inference.put_azureaistudio" +======= + "namespace": "inference.put_cohere" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) }, "requestBodyRequired": false, "requestMediaType": [ @@ -4667,7 +4751,11 @@ ], "response": { "name": "Response", +<<<<<<< HEAD "namespace": "inference.put_azureaistudio" +======= + "namespace": "inference.put_cohere" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) }, "responseMediaType": [ "application/json" @@ -4677,7 +4765,11 @@ "methods": [ "PUT" ], +<<<<<<< HEAD "path": "/_inference/{task_type}/{azureaistudio_inference_id}" +======= + "path": "/_inference/{task_type}/{cohere_inference_id}" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) } ] }, @@ -4688,15 +4780,26 @@ "visibility": "public" }, "stack": { +<<<<<<< HEAD "since": "8.14.0", +======= + "since": "8.12.0", +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) "stability": "stable", "visibility": "public" } }, +<<<<<<< HEAD "description": "Create an Azure OpenAI inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `azureopenai` service.\n\nThe list of chat completion models that you can choose from in your Azure OpenAI deployment include:\n\n* [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models)\n* [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35)\n\nThe list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings).\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", "docId": "inference-api-put-azureopenai", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-azure-openai.html", "name": "inference.put_azureopenai", +======= + "description": "Create an Elastic Inference Service (EIS) inference endpoint.\n\nCreate an inference endpoint to perform an inference task through the Elastic Inference Service (EIS).", + "docId": "inference-api-put-eis", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-eis.html", + "name": "inference.put_eis", +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) "privileges": { "cluster": [ "manage_inference" @@ -4704,7 +4807,11 @@ }, "request": { "name": "Request", +<<<<<<< HEAD "namespace": "inference.put_azureopenai" +======= + "namespace": "inference.put_eis" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) }, "requestBodyRequired": false, "requestMediaType": [ @@ -4712,7 +4819,11 @@ ], "response": { "name": "Response", +<<<<<<< HEAD "namespace": "inference.put_azureopenai" +======= + "namespace": "inference.put_eis" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) }, "responseMediaType": [ "application/json" @@ -4722,7 +4833,197 @@ "methods": [ "PUT" ], +<<<<<<< HEAD "path": "/_inference/{task_type}/{azureopenai_inference_id}" +======= + "path": "/_inference/{task_type}/{eis_inference_id}" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) + } + ] + }, + { + "availability": { + "serverless": { + "stability": "stable", + "visibility": "public" + }, + "stack": { +<<<<<<< HEAD +======= + "since": "8.13.0", + "stability": "stable", + "visibility": "public" + } + }, + "description": "Create an OpenAI inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `elasticsearch` service.\n\n> info\n> Your Elasticsearch deployment contains preconfigured ELSER and E5 inference endpoints, you only need to create the enpoints using the API if you want to customize the settings.\n\nIf you use the ELSER or the E5 model through the `elasticsearch` service, the API request will automatically download and deploy the model if it isn't downloaded yet.\n\n> info\n> You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value.\n\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", + "docId": "inference-api-put-elasticsearch", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-elasticsearch.html", + "name": "inference.put_elasticsearch", + "privileges": { + "cluster": [ + "manage_inference" + ] + }, + "request": { + "name": "Request", + "namespace": "inference.put_elasticsearch" + }, + "requestBodyRequired": false, + "requestMediaType": [ + "application/json" + ], + "response": { + "name": "Response", + "namespace": "inference.put_elasticsearch" + }, + "responseMediaType": [ + "application/json" + ], + "urls": [ + { + "methods": [ + "PUT" + ], + "path": "/_inference/{task_type}/{elasticsearch_inference_id}" + } + ] + }, + { + "availability": { + "serverless": { + "stability": "stable", + "visibility": "public" + }, + "stack": { + "since": "8.11.0", + "stability": "stable", + "visibility": "public" + } + }, + "deprecation": { + "description": "The elser service is deprecated and will be removed in a future release. Use the Elasticsearch inference integration instead, with model_id included in the service_settings.", + "version": "8.16.0" + }, + "description": "Create an ELSER inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `elser` service.\nYou can also deploy ELSER by using the Elasticsearch inference integration.\n\n> info\n> Your Elasticsearch deployment contains a preconfigured ELSER inference endpoint, you only need to create the enpoint using the API if you want to customize the settings.\n\nThe API request will automatically download and deploy the ELSER model if it isn't already downloaded.\n\n> info\n> You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value.\n\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", + "docId": "inference-api-put-elser", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-elser.html", + "name": "inference.put_elser", + "privileges": { + "cluster": [ + "manage_inference" + ] + }, + "request": { + "name": "Request", + "namespace": "inference.put_elser" + }, + "requestBodyRequired": false, + "requestMediaType": [ + "application/json" + ], + "response": { + "name": "Response", + "namespace": "inference.put_elser" + }, + "responseMediaType": [ + "application/json" + ], + "urls": [ + { + "methods": [ + "PUT" + ], + "path": "/_inference/{task_type}/{elser_inference_id}" + } + ] + }, + { + "availability": { + "serverless": { + "stability": "stable", + "visibility": "public" + }, + "stack": { + "since": "8.15.0", + "stability": "stable", + "visibility": "public" + } + }, + "description": "Create an Google AI Studio inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `googleaistudio` service.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", + "docId": "inference-api-put-googleaistudio", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-google-ai-studio.html", + "name": "inference.put_googleaistudio", + "privileges": { + "cluster": [ + "manage_inference" + ] + }, + "request": { + "name": "Request", + "namespace": "inference.put_googleaistudio" + }, + "requestBodyRequired": false, + "requestMediaType": [ + "application/json" + ], + "response": { + "name": "Response", + "namespace": "inference.put_googleaistudio" + }, + "responseMediaType": [ + "application/json" + ], + "urls": [ + { + "methods": [ + "PUT" + ], + "path": "/_inference/{task_type}/{googleaistudio_inference_id}" + } + ] + }, + { + "availability": { + "serverless": { + "stability": "stable", + "visibility": "public" + }, + "stack": { + "since": "8.15.0", + "stability": "stable", + "visibility": "public" + } + }, + "description": "Create a Google Vertex AI inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `googlevertexai` service.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", + "docId": "inference-api-put-googlevertexai", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-google-vertex-ai.html", + "name": "inference.put_googlevertexai", + "privileges": { + "cluster": [ + "manage_inference" + ] + }, + "request": { + "name": "Request", + "namespace": "inference.put_googlevertexai" + }, + "requestBodyRequired": false, + "requestMediaType": [ + "application/json" + ], + "response": { + "name": "Response", + "namespace": "inference.put_googlevertexai" + }, + "responseMediaType": [ + "application/json" + ], + "urls": [ + { + "methods": [ + "PUT" + ], + "path": "/_inference/{task_type}/{googlevertexai_inference_id}" } ] }, @@ -4733,6 +5034,8 @@ "visibility": "public" }, "stack": { +>>>>>>> f5eaaab24 (Add Amazon Bedrock inference API (#4022)) +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) "since": "8.12.0", "stability": "stable", "visibility": "public" @@ -27203,26 +27506,45 @@ } }, { +<<<<<<< HEAD "description": "The type of service supported for the specified task type. In this case, `alibabacloud-ai-search`.", +======= +<<<<<<< HEAD +======= + "description": "The type of service supported for the specified task type. In this case, `amazonbedrock`.", +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) "name": "service", "required": true, "type": { "kind": "instance_of", "type": { "name": "ServiceType", +<<<<<<< HEAD "namespace": "inference.put_alibabacloud" +======= + "namespace": "inference.put_amazonbedrock" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) } } }, { +<<<<<<< HEAD "description": "Settings used to install the inference model. These settings are specific to the `alibabacloud-ai-search` service.", +======= + "description": "Settings used to install the inference model. These settings are specific to the `amazonbedrock` service.", +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) "name": "service_settings", "required": true, "type": { "kind": "instance_of", "type": { +<<<<<<< HEAD "name": "AlibabaCloudServiceSettings", "namespace": "inference.put_alibabacloud" +======= + "name": "AmazonBedrockServiceSettings", + "namespace": "inference.put_amazonbedrock" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) } } }, @@ -27233,13 +27555,19 @@ "type": { "kind": "instance_of", "type": { +<<<<<<< HEAD "name": "AlibabaCloudTaskSettings", "namespace": "inference.put_alibabacloud" +======= + "name": "AmazonBedrockTaskSettings", + "namespace": "inference.put_amazonbedrock" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) } } } ] }, +<<<<<<< HEAD "description": "Create an AlibabaCloud AI Search inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", "examples": { "PutAlibabaCloudRequestExample1": { @@ -27261,6 +27589,18 @@ "description": "Run `PUT _inference/text_embedding/alibabacloud_ai_search_embeddings` to create an inference endpoint that performs a text embedding task.", "summary": "A text embedding task", "value": "{\n \"service\": \"alibabacloud-ai-search\",\n \"service_settings\": {\n \"api_key\": \"AlibabaCloud-API-Key\",\n \"service_id\": \"ops-text-embedding-001\",\n \"host\": \"default-j01.platform-cn-shanghai.opensearch.aliyuncs.com\",\n \"workspace\": \"default\"\n }\n}" +======= + "description": "Create an Amazon Bedrock inference endpoint.\n\nCreates an inference endpoint to perform an inference task with the `amazonbedrock` service.\n\n>info\n> You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", + "examples": { + "PutAmazonBedrockRequestExample1": { + "description": "Run `PUT _inference/text_embedding/amazon_bedrock_embeddings` to create an inference endpoint that performs a text embedding task.", + "summary": "A text embedding task", + "value": "{\n \"service\": \"amazonbedrock\",\n \"service_settings\": {\n \"access_key\": \"AWS-access-key\",\n \"secret_key\": \"AWS-secret-key\",\n \"region\": \"us-east-1\",\n \"provider\": \"amazontitan\",\n \"model\": \"amazon.titan-embed-text-v2:0\"\n }\n}" + }, + "PutAmazonBedrockRequestExample2": { + "description": "Run `PUT _inference/completion/openai-completion` to create an inference endpoint to perform a completion task type.", + "summary": "A completion task", + "value": "{\n \"service\": \"openai\",\n \"service_settings\": {\n \"api_key\": \"OpenAI-API-Key\",\n \"model_id\": \"gpt-3.5-turbo\"\n }\n}" } }, "inherits": { @@ -27272,7 +27612,7 @@ "kind": "request", "name": { "name": "Request", - "namespace": "inference.put_alibabacloud" + "namespace": "inference.put_amazonbedrock" }, "path": [ { @@ -27282,14 +27622,14 @@ "type": { "kind": "instance_of", "type": { - "name": "AlibabaCloudTaskType", - "namespace": "inference.put_alibabacloud" + "name": "AmazonBedrockTaskType", + "namespace": "inference.put_amazonbedrock" } } }, { "description": "The unique identifier of the inference endpoint.", - "name": "alibabacloud_inference_id", + "name": "amazonbedrock_inference_id", "required": true, "type": { "kind": "instance_of", @@ -27301,7 +27641,7 @@ } ], "query": [], - "specLocation": "inference/put_alibabacloud/PutAlibabaCloudRequest.ts#L27-L80" + "specLocation": "inference/put_amazonbedrock/PutAmazonBedrockRequest.ts#L28-L84" }, { "body": { @@ -27317,9 +27657,9 @@ "kind": "response", "name": { "name": "Response", - "namespace": "inference.put_alibabacloud" + "namespace": "inference.put_amazonbedrock" }, - "specLocation": "inference/put_alibabacloud/PutAlibabaCloudResponse.ts#L22-L24" + "specLocation": "inference/put_amazonbedrock/PutAmazonBedrockResponse.ts#L22-L24" }, { "attachedBehaviors": [ @@ -27343,26 +27683,26 @@ } }, { - "description": "The type of service supported for the specified task type. In this case, `azureaistudio`.", + "description": "The type of service supported for the specified task type. In this case, `anthropic`.", "name": "service", "required": true, "type": { "kind": "instance_of", "type": { "name": "ServiceType", - "namespace": "inference.put_azureaistudio" + "namespace": "inference.put_anthropic" } } }, { - "description": "Settings used to install the inference model. These settings are specific to the `openai` service.", + "description": "Settings used to install the inference model. These settings are specific to the `watsonxai` service.", "name": "service_settings", "required": true, "type": { "kind": "instance_of", "type": { - "name": "AzureAiStudioServiceSettings", - "namespace": "inference.put_azureaistudio" + "name": "AnthropicServiceSettings", + "namespace": "inference.put_anthropic" } } }, @@ -27373,24 +27713,18 @@ "type": { "kind": "instance_of", "type": { - "name": "AzureAiStudioTaskSettings", - "namespace": "inference.put_azureaistudio" + "name": "AnthropicTaskSettings", + "namespace": "inference.put_anthropic" } } } ] }, - "description": "Create an Azure AI studio inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `azureaistudio` service.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", + "description": "Create an Anthropic inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `anthropic` service.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", "examples": { - "PutAzureAiStudioRequestExample1": { - "description": "Run `PUT _inference/text_embedding/azure_ai_studio_embeddings` to create an inference endpoint that performs a text_embedding task. Note that you do not specify a model here, as it is defined already in the Azure AI Studio deployment.", - "summary": "A text embedding task", - "value": "{\n \"service\": \"azureaistudio\",\n \"service_settings\": {\n \"api_key\": \"Azure-AI-Studio-API-key\",\n \"target\": \"Target-Uri\",\n \"provider\": \"openai\",\n \"endpoint_type\": \"token\"\n }\n}" - }, - "PutAzureAiStudioRequestExample2": { - "description": "Run `PUT _inference/completion/azure_ai_studio_completion` to create an inference endpoint that performs a completion task.", - "summary": "A completion task", - "value": "{\n \"service\": \"azureaistudio\",\n \"service_settings\": {\n \"api_key\": \"Azure-AI-Studio-API-key\",\n \"target\": \"Target-URI\",\n \"provider\": \"databricks\",\n \"endpoint_type\": \"realtime\"\n }\n}" + "PutAnthropicRequestExample1": { + "description": "Run `PUT _inference/completion/anthropic_completion` to create an inference endpoint that performs a completion task.", + "value": "{\n \"service\": \"anthropic\",\n \"service_settings\": {\n \"api_key\": \"Anthropic-Api-Key\",\n \"model_id\": \"Model-ID\"\n },\n \"task_settings\": {\n \"max_tokens\": 1024\n }\n}" } }, "inherits": { @@ -27402,24 +27736,24 @@ "kind": "request", "name": { "name": "Request", - "namespace": "inference.put_azureaistudio" + "namespace": "inference.put_anthropic" }, "path": [ { - "description": "The type of the inference task that the model will perform.", + "description": "The task type.\nThe only valid task type for the model to perform is `completion`.", "name": "task_type", "required": true, "type": { "kind": "instance_of", "type": { - "name": "AzureAiStudioTaskType", - "namespace": "inference.put_azureaistudio" + "name": "AnthropicTaskType", + "namespace": "inference.put_anthropic" } } }, { "description": "The unique identifier of the inference endpoint.", - "name": "azureaistudio_inference_id", + "name": "anthropic_inference_id", "required": true, "type": { "kind": "instance_of", @@ -27431,7 +27765,7 @@ } ], "query": [], - "specLocation": "inference/put_azureaistudio/PutAzureAiStudioRequest.ts#L28-L81" + "specLocation": "inference/put_anthropic/PutAnthropicRequest.ts#L28-L82" }, { "body": { @@ -27447,9 +27781,9 @@ "kind": "response", "name": { "name": "Response", - "namespace": "inference.put_azureaistudio" + "namespace": "inference.put_anthropic" }, - "specLocation": "inference/put_azureaistudio/PutAzureAiStudioResponse.ts#L22-L24" + "specLocation": "inference/put_anthropic/PutAnthropicResponse.ts#L22-L24" }, { "attachedBehaviors": [ @@ -27473,26 +27807,26 @@ } }, { - "description": "The type of service supported for the specified task type. In this case, `azureopenai`.", + "description": "The type of service supported for the specified task type. In this case, `cohere`.", "name": "service", "required": true, "type": { "kind": "instance_of", "type": { "name": "ServiceType", - "namespace": "inference.put_azureopenai" + "namespace": "inference.put_cohere" } } }, { - "description": "Settings used to install the inference model. These settings are specific to the `azureopenai` service.", + "description": "Settings used to install the inference model.\nThese settings are specific to the `cohere` service.", "name": "service_settings", "required": true, "type": { "kind": "instance_of", "type": { - "name": "AzureOpenAIServiceSettings", - "namespace": "inference.put_azureopenai" + "name": "CohereServiceSettings", + "namespace": "inference.put_cohere" } } }, @@ -27503,24 +27837,25 @@ "type": { "kind": "instance_of", "type": { - "name": "AzureOpenAITaskSettings", - "namespace": "inference.put_azureopenai" + "name": "CohereTaskSettings", + "namespace": "inference.put_cohere" } } } ] }, - "description": "Create an Azure OpenAI inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `azureopenai` service.\n\nThe list of chat completion models that you can choose from in your Azure OpenAI deployment include:\n\n* [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models)\n* [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35)\n\nThe list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings).\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", + "description": "Create a Cohere inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `cohere` service.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", "examples": { - "PutAzureOpenAiRequestExample1": { - "description": "Run `PUT _inference/text_embedding/azure_openai_embeddings` to create an inference endpoint that performs a `text_embedding` task. You do not specify a model, as it is defined already in the Azure OpenAI deployment.", + "PutCohereRequestExample1": { + "description": "Run `PUT _inference/text_embedding/cohere-embeddings` to create an inference endpoint that performs a text embedding task.", "summary": "A text embedding task", - "value": "{\n \"service\": \"azureopenai\",\n \"service_settings\": {\n \"api_key\": \"Api-Key\",\n \"resource_name\": \"Resource-name\",\n \"deployment_id\": \"Deployment-id\",\n \"api_version\": \"2024-02-01\"\n }\n}" + "value": "{\n \"service\": \"cohere\",\n \"service_settings\": {\n \"api_key\": \"Cohere-Api-key\",\n \"model_id\": \"embed-english-light-v3.0\",\n \"embedding_type\": \"byte\"\n }\n}" }, - "PutAzureOpenAiRequestExample2": { - "description": "Run `PUT _inference/completion/azure_openai_completion` to create an inference endpoint that performs a `completion` task.", - "summary": "A completion task", - "value": "{\n \"service\": \"azureopenai\",\n \"service_settings\": {\n \"api_key\": \"Api-Key\",\n \"resource_name\": \"Resource-name\",\n \"deployment_id\": \"Deployment-id\",\n \"api_version\": \"2024-02-01\"\n }\n}" + "PutCohereRequestExample2": { + "description": "Run `PUT _inference/rerank/cohere-rerank` to create an inference endpoint that performs a rerank task.", + "summary": "A rerank task", + "value": "{\n \"service\": \"cohere\",\n \"service_settings\": {\n \"api_key\": \"Cohere-API-key\",\n \"model_id\": \"rerank-english-v3.0\"\n },\n \"task_settings\": {\n \"top_n\": 10,\n \"return_documents\": true\n }\n}" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) } }, "inherits": { @@ -27532,24 +27867,37 @@ "kind": "request", "name": { "name": "Request", - "namespace": "inference.put_azureopenai" +<<<<<<< HEAD + "namespace": "inference.put_alibabacloud" +======= + "namespace": "inference.put_cohere" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) }, "path": [ { - "description": "The type of the inference task that the model will perform.\nNOTE: The `chat_completion` task type only supports streaming and only through the _stream API.", + "description": "The type of the inference task that the model will perform.", "name": "task_type", "required": true, "type": { "kind": "instance_of", "type": { - "name": "AzureOpenAITaskType", - "namespace": "inference.put_azureopenai" +<<<<<<< HEAD + "name": "AlibabaCloudTaskType", + "namespace": "inference.put_alibabacloud" +======= + "name": "CohereTaskType", + "namespace": "inference.put_cohere" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) } } }, { "description": "The unique identifier of the inference endpoint.", - "name": "azureopenai_inference_id", +<<<<<<< HEAD + "name": "alibabacloud_inference_id", +======= + "name": "cohere_inference_id", +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) "required": true, "type": { "kind": "instance_of", @@ -27561,7 +27909,11 @@ } ], "query": [], - "specLocation": "inference/put_azureopenai/PutAzureOpenAiRequest.ts#L27-L88" +<<<<<<< HEAD + "specLocation": "inference/put_alibabacloud/PutAlibabaCloudRequest.ts#L27-L80" +======= + "specLocation": "inference/put_cohere/PutCohereRequest.ts#L28-L82" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) }, { "body": { @@ -27577,9 +27929,15 @@ "kind": "response", "name": { "name": "Response", - "namespace": "inference.put_azureopenai" +<<<<<<< HEAD + "namespace": "inference.put_alibabacloud" }, - "specLocation": "inference/put_azureopenai/PutAzureOpenAiResponse.ts#L22-L24" + "specLocation": "inference/put_alibabacloud/PutAlibabaCloudResponse.ts#L22-L24" +======= + "namespace": "inference.put_cohere" + }, + "specLocation": "inference/put_cohere/PutCohereResponse.ts#L22-L24" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) }, { "attachedBehaviors": [ @@ -27589,6 +27947,7 @@ "kind": "properties", "properties": [ { +<<<<<<< HEAD "description": "The chunking configuration object.", "extDocId": "inference-chunking", "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/inference-apis.html#infer-chunking-config", @@ -27603,39 +27962,76 @@ } }, { - "description": "The type of service supported for the specified task type. In this case, `hugging_face`.", + "description": "The type of service supported for the specified task type. In this case, `azureaistudio`.", +======= + "description": "The type of service supported for the specified task type. In this case, `elastic`.", +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) "name": "service", "required": true, "type": { "kind": "instance_of", "type": { "name": "ServiceType", - "namespace": "inference.put_hugging_face" +<<<<<<< HEAD + "namespace": "inference.put_azureaistudio" +======= + "namespace": "inference.put_eis" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) } } }, { - "description": "Settings used to install the inference model. These settings are specific to the `hugging_face` service.", +<<<<<<< HEAD + "description": "Settings used to install the inference model. These settings are specific to the `openai` service.", +======= + "description": "Settings used to install the inference model. These settings are specific to the `elastic` service.", +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) "name": "service_settings", "required": true, "type": { "kind": "instance_of", "type": { - "name": "HuggingFaceServiceSettings", - "namespace": "inference.put_hugging_face" +<<<<<<< HEAD + "name": "AzureAiStudioServiceSettings", + "namespace": "inference.put_azureaistudio" + } + } + }, + { + "description": "Settings to configure the inference task.\nThese settings are specific to the task type you specified.", + "name": "task_settings", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "AzureAiStudioTaskSettings", + "namespace": "inference.put_azureaistudio" +======= + "name": "EisServiceSettings", + "namespace": "inference.put_eis" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) } } } ] }, - "description": "Create a Hugging Face inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `hugging_face` service.\n\nYou must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL.\nSelect the model you want to use on the new endpoint creation page (for example `intfloat/e5-small-v2`), then select the sentence embeddings task under the advanced configuration section.\nCreate the endpoint and copy the URL after the endpoint initialization has been finished.\n\nThe following models are recommended for the Hugging Face service:\n\n* `all-MiniLM-L6-v2`\n* `all-MiniLM-L12-v2`\n* `all-mpnet-base-v2`\n* `e5-base-v2`\n* `e5-small-v2`\n* `multilingual-e5-base`\n* `multilingual-e5-small`\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", +<<<<<<< HEAD + "description": "Create an Azure AI studio inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `azureaistudio` service.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", "examples": { - "PutHuggingFaceRequestExample1": { - "description": "Run `PUT _inference/text_embedding/hugging-face-embeddings` to create an inference endpoint that performs a `text_embedding` task type.", + "PutAzureAiStudioRequestExample1": { + "description": "Run `PUT _inference/text_embedding/azure_ai_studio_embeddings` to create an inference endpoint that performs a text_embedding task. Note that you do not specify a model here, as it is defined already in the Azure AI Studio deployment.", "summary": "A text embedding task", - "value": "{\n \"service\": \"hugging_face\",\n \"service_settings\": {\n \"api_key\": \"hugging-face-access-token\", \n \"url\": \"url-endpoint\" \n }\n}" + "value": "{\n \"service\": \"azureaistudio\",\n \"service_settings\": {\n \"api_key\": \"Azure-AI-Studio-API-key\",\n \"target\": \"Target-Uri\",\n \"provider\": \"openai\",\n \"endpoint_type\": \"token\"\n }\n}" + }, + "PutAzureAiStudioRequestExample2": { + "description": "Run `PUT _inference/completion/azure_ai_studio_completion` to create an inference endpoint that performs a completion task.", + "summary": "A completion task", + "value": "{\n \"service\": \"azureaistudio\",\n \"service_settings\": {\n \"api_key\": \"Azure-AI-Studio-API-key\",\n \"target\": \"Target-URI\",\n \"provider\": \"databricks\",\n \"endpoint_type\": \"realtime\"\n }\n}" } }, +======= + "description": "Create an Elastic Inference Service (EIS) inference endpoint.\n\nCreate an inference endpoint to perform an inference task through the Elastic Inference Service (EIS).", +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) "inherits": { "type": { "name": "RequestBase", @@ -27645,7 +28041,8 @@ "kind": "request", "name": { "name": "Request", - "namespace": "inference.put_hugging_face" +<<<<<<< HEAD + "namespace": "inference.put_azureaistudio" }, "path": [ { @@ -27655,14 +28052,14 @@ "type": { "kind": "instance_of", "type": { - "name": "HuggingFaceTaskType", - "namespace": "inference.put_hugging_face" + "name": "AzureAiStudioTaskType", + "namespace": "inference.put_azureaistudio" } } }, { "description": "The unique identifier of the inference endpoint.", - "name": "huggingface_inference_id", + "name": "azureaistudio_inference_id", "required": true, "type": { "kind": "instance_of", @@ -27674,7 +28071,7 @@ } ], "query": [], - "specLocation": "inference/put_hugging_face/PutHuggingFaceRequest.ts#L27-L89" + "specLocation": "inference/put_azureaistudio/PutAzureAiStudioRequest.ts#L28-L81" }, { "body": { @@ -27690,9 +28087,9 @@ "kind": "response", "name": { "name": "Response", - "namespace": "inference.put_hugging_face" + "namespace": "inference.put_azureaistudio" }, - "specLocation": "inference/put_hugging_face/PutHuggingFaceResponse.ts#L22-L24" + "specLocation": "inference/put_azureaistudio/PutAzureAiStudioResponse.ts#L22-L24" }, { "attachedBehaviors": [ @@ -27716,26 +28113,26 @@ } }, { - "description": "The type of service supported for the specified task type. In this case, `jinaai`.", + "description": "The type of service supported for the specified task type. In this case, `azureopenai`.", "name": "service", "required": true, "type": { "kind": "instance_of", "type": { "name": "ServiceType", - "namespace": "inference.put_jinaai" + "namespace": "inference.put_azureopenai" } } }, { - "description": "Settings used to install the inference model. These settings are specific to the `jinaai` service.", + "description": "Settings used to install the inference model. These settings are specific to the `azureopenai` service.", "name": "service_settings", "required": true, "type": { "kind": "instance_of", "type": { - "name": "JinaAIServiceSettings", - "namespace": "inference.put_jinaai" + "name": "AzureOpenAIServiceSettings", + "namespace": "inference.put_azureopenai" } } }, @@ -27746,24 +28143,24 @@ "type": { "kind": "instance_of", "type": { - "name": "JinaAITaskSettings", - "namespace": "inference.put_jinaai" + "name": "AzureOpenAITaskSettings", + "namespace": "inference.put_azureopenai" } } } ] }, - "description": "Create an JinaAI inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `jinaai` service.\n\nTo review the available `rerank` models, refer to .\nTo review the available `text_embedding` models, refer to the .\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", + "description": "Create an Azure OpenAI inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `azureopenai` service.\n\nThe list of chat completion models that you can choose from in your Azure OpenAI deployment include:\n\n* [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models)\n* [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35)\n\nThe list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings).\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", "examples": { - "PutJinaAiRequestExample1": { - "description": "Run `PUT _inference/text_embedding/jinaai-embeddings` to create an inference endpoint for text embedding tasks using the JinaAI service.", + "PutAzureOpenAiRequestExample1": { + "description": "Run `PUT _inference/text_embedding/azure_openai_embeddings` to create an inference endpoint that performs a `text_embedding` task. You do not specify a model, as it is defined already in the Azure OpenAI deployment.", "summary": "A text embedding task", - "value": "{\n \"service\": \"jinaai\",\n \"service_settings\": {\n \"model_id\": \"jina-embeddings-v3\",\n \"api_key\": \"JinaAi-Api-key\"\n }\n}" + "value": "{\n \"service\": \"azureopenai\",\n \"service_settings\": {\n \"api_key\": \"Api-Key\",\n \"resource_name\": \"Resource-name\",\n \"deployment_id\": \"Deployment-id\",\n \"api_version\": \"2024-02-01\"\n }\n}" }, - "PutJinaAiRequestExample2": { - "description": "Run `PUT _inference/rerank/jinaai-rerank` to create an inference endpoint for rerank tasks using the JinaAI service.", - "summary": "A rerank task", - "value": "{\n \"service\": \"jinaai\",\n \"service_settings\": {\n \"api_key\": \"JinaAI-Api-key\",\n \"model_id\": \"jina-reranker-v2-base-multilingual\"\n },\n \"task_settings\": {\n \"top_n\": 10,\n \"return_documents\": true\n }\n}" + "PutAzureOpenAiRequestExample2": { + "description": "Run `PUT _inference/completion/azure_openai_completion` to create an inference endpoint that performs a `completion` task.", + "summary": "A completion task", + "value": "{\n \"service\": \"azureopenai\",\n \"service_settings\": {\n \"api_key\": \"Api-Key\",\n \"resource_name\": \"Resource-name\",\n \"deployment_id\": \"Deployment-id\",\n \"api_version\": \"2024-02-01\"\n }\n}" } }, "inherits": { @@ -27775,24 +28172,36 @@ "kind": "request", "name": { "name": "Request", - "namespace": "inference.put_jinaai" + "namespace": "inference.put_azureopenai" +======= + "namespace": "inference.put_eis" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) }, "path": [ { - "description": "The type of the inference task that the model will perform.", + "description": "The type of the inference task that the model will perform.\nNOTE: The `chat_completion` task type only supports streaming and only through the _stream API.", "name": "task_type", "required": true, "type": { "kind": "instance_of", "type": { - "name": "JinaAITaskType", - "namespace": "inference.put_jinaai" +<<<<<<< HEAD + "name": "AzureOpenAITaskType", + "namespace": "inference.put_azureopenai" +======= + "name": "EisTaskType", + "namespace": "inference.put_eis" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) } } }, { "description": "The unique identifier of the inference endpoint.", - "name": "jinaai_inference_id", +<<<<<<< HEAD + "name": "azureopenai_inference_id", +======= + "name": "eis_inference_id", +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) "required": true, "type": { "kind": "instance_of", @@ -27804,7 +28213,11 @@ } ], "query": [], - "specLocation": "inference/put_jinaai/PutJinaAiRequest.ts#L28-L84" +<<<<<<< HEAD + "specLocation": "inference/put_azureopenai/PutAzureOpenAiRequest.ts#L27-L88" +======= + "specLocation": "inference/put_eis/PutEisRequest.ts#L24-L62" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) }, { "body": { @@ -27820,9 +28233,15 @@ "kind": "response", "name": { "name": "Response", - "namespace": "inference.put_jinaai" +<<<<<<< HEAD + "namespace": "inference.put_azureopenai" }, - "specLocation": "inference/put_jinaai/PutJinaAiResponse.ts#L22-L24" + "specLocation": "inference/put_azureopenai/PutAzureOpenAiResponse.ts#L22-L24" +======= + "namespace": "inference.put_eis" + }, + "specLocation": "inference/put_eis/PutEisResponse.ts#L22-L24" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) }, { "attachedBehaviors": [ @@ -27846,36 +28265,76 @@ } }, { - "description": "The type of service supported for the specified task type. In this case, `mistral`.", +<<<<<<< HEAD +======= + "description": "The type of service supported for the specified task type. In this case, `elasticsearch`.", "name": "service", "required": true, "type": { "kind": "instance_of", "type": { "name": "ServiceType", - "namespace": "inference.put_mistral" + "namespace": "inference.put_elasticsearch" } } }, { - "description": "Settings used to install the inference model. These settings are specific to the `mistral` service.", + "description": "Settings used to install the inference model. These settings are specific to the `elasticsearch` service.", "name": "service_settings", "required": true, "type": { "kind": "instance_of", "type": { - "name": "MistralServiceSettings", - "namespace": "inference.put_mistral" + "name": "ElasticsearchServiceSettings", + "namespace": "inference.put_elasticsearch" + } + } + }, + { + "description": "Settings to configure the inference task.\nThese settings are specific to the task type you specified.", + "name": "task_settings", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "ElasticsearchTaskSettings", + "namespace": "inference.put_elasticsearch" } } } ] }, - "description": "Create a Mistral inference endpoint.\n\nCreates an inference endpoint to perform an inference task with the `mistral` service.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", + "description": "Create an OpenAI inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `elasticsearch` service.\n\n> info\n> Your Elasticsearch deployment contains preconfigured ELSER and E5 inference endpoints, you only need to create the enpoints using the API if you want to customize the settings.\n\nIf you use the ELSER or the E5 model through the `elasticsearch` service, the API request will automatically download and deploy the model if it isn't downloaded yet.\n\n> info\n> You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value.\n\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", "examples": { - "PutMistralRequestExample1": { - "description": "Run `PUT _inference/text_embedding/mistral-embeddings-test` to create a Mistral inference endpoint that performs a text embedding task.", - "value": "{\n \"service\": \"mistral\",\n \"service_settings\": {\n \"api_key\": \"Mistral-API-Key\",\n \"model\": \"mistral-embed\" \n }\n}" + "PutElasticsearchRequestExample1": { + "description": "Run `PUT _inference/sparse_embedding/my-elser-model` to create an inference endpoint that performs a `sparse_embedding` task. The `model_id` must be the ID of one of the built-in ELSER models. The API will automatically download the ELSER model if it isn't already downloaded and then deploy the model.", + "summary": "ELSER sparse embedding task", + "value": "{\n \"service\": \"elasticsearch\",\n \"service_settings\": {\n \"adaptive_allocations\": { \n \"enabled\": true,\n \"min_number_of_allocations\": 1,\n \"max_number_of_allocations\": 4\n },\n \"num_threads\": 1,\n \"model_id\": \".elser_model_2\" \n }\n}" + }, + "PutElasticsearchRequestExample2": { + "description": "Run `PUT _inference/rerank/my-elastic-rerank` to create an inference endpoint that performs a rerank task using the built-in Elastic Rerank cross-encoder model. The `model_id` must be `.rerank-v1`, which is the ID of the built-in Elastic Rerank model. The API will automatically download the Elastic Rerank model if it isn't already downloaded and then deploy the model. Once deployed, the model can be used for semantic re-ranking with a `text_similarity_reranker` retriever.", + "summary": "Elastic rerank task", + "value": "{\n \"service\": \"elasticsearch\",\n \"service_settings\": {\n \"model_id\": \".rerank-v1\", \n \"num_threads\": 1,\n \"adaptive_allocations\": { \n \"enabled\": true,\n \"min_number_of_allocations\": 1,\n \"max_number_of_allocations\": 4\n }\n }\n}" + }, + "PutElasticsearchRequestExample3": { + "description": "Run `PUT _inference/text_embedding/my-e5-model` to create an inference endpoint that performs a `text_embedding` task. The `model_id` must be the ID of one of the built-in E5 models. The API will automatically download the E5 model if it isn't already downloaded and then deploy the model.", + "summary": "E5 text embedding task", + "value": "{\n \"service\": \"elasticsearch\",\n \"service_settings\": {\n \"num_allocations\": 1,\n \"num_threads\": 1,\n \"model_id\": \".multilingual-e5-small\" \n }\n}" + }, + "PutElasticsearchRequestExample4": { + "description": "Run `PUT _inference/text_embedding/my-msmarco-minilm-model` to create an inference endpoint that performs a `text_embedding` task with a model that was uploaded by Eland.", + "summary": "Eland text embedding task", + "value": "{\n \"service\": \"elasticsearch\",\n \"service_settings\": {\n \"num_allocations\": 1,\n \"num_threads\": 1,\n \"model_id\": \"msmarco-MiniLM-L12-cos-v5\" \n }\n}" + }, + "PutElasticsearchRequestExample5": { + "description": "Run `PUT _inference/text_embedding/my-e5-model` to create an inference endpoint that performs a `text_embedding` task and to configure adaptive allocations. The API request will automatically download the E5 model if it isn't already downloaded and then deploy the model.", + "summary": "Adaptive allocation", + "value": "{\n \"service\": \"elasticsearch\",\n \"service_settings\": {\n \"adaptive_allocations\": {\n \"enabled\": true,\n \"min_number_of_allocations\": 3,\n \"max_number_of_allocations\": 10\n },\n \"num_threads\": 1,\n \"model_id\": \".multilingual-e5-small\"\n }\n}" + }, + "PutElasticsearchRequestExample6": { + "description": "Run `PUT _inference/sparse_embedding/use_existing_deployment` to use an already existing model deployment when creating an inference endpoint.", + "summary": "Existing model deployment", + "value": "{\n \"service\": \"elasticsearch\",\n \"service_settings\": {\n \"deployment_id\": \".elser_model_2\"\n }\n}" } }, "inherits": { @@ -27887,24 +28346,24 @@ "kind": "request", "name": { "name": "Request", - "namespace": "inference.put_mistral" + "namespace": "inference.put_elasticsearch" }, "path": [ { - "description": "The task type.\nThe only valid task type for the model to perform is `text_embedding`.", + "description": "The type of the inference task that the model will perform.", "name": "task_type", "required": true, "type": { "kind": "instance_of", "type": { - "name": "MistralTaskType", - "namespace": "inference.put_mistral" + "name": "ElasticsearchTaskType", + "namespace": "inference.put_elasticsearch" } } }, { - "description": "The unique identifier of the inference endpoint.", - "name": "mistral_inference_id", + "description": "The unique identifier of the inference endpoint.\nThe must not match the `model_id`.", + "name": "elasticsearch_inference_id", "required": true, "type": { "kind": "instance_of", @@ -27916,7 +28375,7 @@ } ], "query": [], - "specLocation": "inference/put_mistral/PutMistralRequest.ts#L28-L77" + "specLocation": "inference/put_elasticsearch/PutElasticsearchRequest.ts#L25-L86" }, { "body": { @@ -27929,12 +28388,18 @@ } } }, + "examples": { + "PutElasticsearchResponseExample1": { + "description": "A successful response from `PUT _inference/sparse_embedding/use_existing_deployment`. It contains the model ID and the threads and allocations settings from the model deployment.\n", + "value": "{\n \"inference_id\": \"use_existing_deployment\",\n \"task_type\": \"sparse_embedding\",\n \"service\": \"elasticsearch\",\n \"service_settings\": {\n \"num_allocations\": 2,\n \"num_threads\": 1,\n \"model_id\": \".elser_model_2\",\n \"deployment_id\": \".elser_model_2\"\n },\n \"chunking_settings\": {\n \"strategy\": \"sentence\",\n \"max_chunk_size\": 250,\n \"sentence_overlap\": 1\n }\n}" + } + }, "kind": "response", "name": { "name": "Response", - "namespace": "inference.put_mistral" + "namespace": "inference.put_elasticsearch" }, - "specLocation": "inference/put_mistral/PutMistralResponse.ts#L22-L24" + "specLocation": "inference/put_elasticsearch/PutElasticsearchResponse.ts#L22-L24" }, { "attachedBehaviors": [ @@ -27958,54 +28423,46 @@ } }, { - "description": "The type of service supported for the specified task type. In this case, `openai`.", + "description": "The type of service supported for the specified task type. In this case, `elser`.", "name": "service", "required": true, "type": { "kind": "instance_of", "type": { "name": "ServiceType", - "namespace": "inference.put_openai" + "namespace": "inference.put_elser" } } }, { - "description": "Settings used to install the inference model. These settings are specific to the `openai` service.", + "description": "Settings used to install the inference model. These settings are specific to the `elser` service.", "name": "service_settings", "required": true, "type": { "kind": "instance_of", "type": { - "name": "OpenAIServiceSettings", - "namespace": "inference.put_openai" - } - } - }, - { - "description": "Settings to configure the inference task.\nThese settings are specific to the task type you specified.", - "name": "task_settings", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "OpenAITaskSettings", - "namespace": "inference.put_openai" + "name": "ElserServiceSettings", + "namespace": "inference.put_elser" } } } ] }, - "description": "Create an OpenAI inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `openai` service.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", + "deprecation": { + "description": "The elser service is deprecated and will be removed in a future release. Use the Elasticsearch inference integration instead, with model_id included in the service_settings.", + "version": "8.16.0" + }, + "description": "Create an ELSER inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `elser` service.\nYou can also deploy ELSER by using the Elasticsearch inference integration.\n\n> info\n> Your Elasticsearch deployment contains a preconfigured ELSER inference endpoint, you only need to create the enpoint using the API if you want to customize the settings.\n\nThe API request will automatically download and deploy the ELSER model if it isn't already downloaded.\n\n> info\n> You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value.\n\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", "examples": { - "PutOpenAiRequestExample1": { - "description": "Run `PUT _inference/text_embedding/openai-embeddings` to create an inference endpoint that performs a `text_embedding` task. The embeddings created by requests to this endpoint will have 128 dimensions.", - "summary": "A text embedding task", - "value": "{\n \"service\": \"openai\",\n \"service_settings\": {\n \"api_key\": \"OpenAI-API-Key\",\n \"model_id\": \"text-embedding-3-small\",\n \"dimensions\": 128\n }\n}" + "PutElserRequestExample1": { + "description": "Run `PUT _inference/sparse_embedding/my-elser-model` to create an inference endpoint that performs a `sparse_embedding` task. The request will automatically download the ELSER model if it isn't already downloaded and then deploy the model.", + "summary": "A sparse embedding task", + "value": "{\n \"service\": \"elser\",\n \"service_settings\": {\n \"num_allocations\": 1,\n \"num_threads\": 1\n }\n}" }, - "PutOpenAiRequestExample2": { - "description": "Run `PUT _inference/completion/openai-completion` to create an inference endpoint to perform a completion task type.", - "summary": "A completion task", - "value": "{\n \"service\": \"openai\",\n \"service_settings\": {\n \"api_key\": \"OpenAI-API-Key\",\n \"model_id\": \"gpt-3.5-turbo\"\n }\n}" + "PutElserRequestExample2": { + "description": "Run `PUT _inference/sparse_embedding/my-elser-model` to create an inference endpoint that performs a `sparse_embedding` task with adaptive allocations. When adaptive allocations are enabled, the number of allocations of the model is set automatically based on the current load.", + "summary": "Adaptive allocations", + "value": "{\n \"service\": \"elser\",\n \"service_settings\": {\n \"adaptive_allocations\": {\n \"enabled\": true,\n \"min_number_of_allocations\": 3,\n \"max_number_of_allocations\": 10\n },\n \"num_threads\": 1\n }\n}" } }, "inherits": { @@ -28017,24 +28474,24 @@ "kind": "request", "name": { "name": "Request", - "namespace": "inference.put_openai" + "namespace": "inference.put_elser" }, "path": [ { - "description": "The type of the inference task that the model will perform.\nNOTE: The `chat_completion` task type only supports streaming and only through the _stream API.", + "description": "The type of the inference task that the model will perform.", "name": "task_type", "required": true, "type": { "kind": "instance_of", "type": { - "name": "OpenAITaskType", - "namespace": "inference.put_openai" + "name": "ElserTaskType", + "namespace": "inference.put_elser" } } }, { "description": "The unique identifier of the inference endpoint.", - "name": "openai_inference_id", + "name": "elser_inference_id", "required": true, "type": { "kind": "instance_of", @@ -28046,7 +28503,7 @@ } ], "query": [], - "specLocation": "inference/put_openai/PutOpenAiRequest.ts#L28-L82" + "specLocation": "inference/put_elser/PutElserRequest.ts#L25-L82" }, { "body": { @@ -28059,12 +28516,18 @@ } } }, + "examples": { + "PutElserResponseExample1": { + "description": "A successful response when creating an ELSER inference endpoint.", + "value": "{\n \"inference_id\": \"my-elser-model\",\n \"task_type\": \"sparse_embedding\",\n \"service\": \"elser\",\n \"service_settings\": {\n \"num_allocations\": 1,\n \"num_threads\": 1\n },\n \"task_settings\": {}\n}" + } + }, "kind": "response", "name": { "name": "Response", - "namespace": "inference.put_openai" + "namespace": "inference.put_elser" }, - "specLocation": "inference/put_openai/PutOpenAiResponse.ts#L22-L24" + "specLocation": "inference/put_elser/PutElserResponse.ts#L22-L24" }, { "attachedBehaviors": [ @@ -28074,36 +28537,51 @@ "kind": "properties", "properties": [ { - "description": "The type of service supported for the specified task type. In this case, `watsonxai`.", + "description": "The chunking configuration object.", + "extDocId": "inference-chunking", + "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/inference-apis.html#infer-chunking-config", + "name": "chunking_settings", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "InferenceChunkingSettings", + "namespace": "inference._types" + } + } + }, + { + "description": "The type of service supported for the specified task type. In this case, `googleaistudio`.", "name": "service", "required": true, "type": { "kind": "instance_of", "type": { "name": "ServiceType", - "namespace": "inference.put_watsonx" + "namespace": "inference.put_googleaistudio" } } }, { - "description": "Settings used to install the inference model. These settings are specific to the `watsonxai` service.", + "description": "Settings used to install the inference model. These settings are specific to the `googleaistudio` service.", "name": "service_settings", "required": true, "type": { "kind": "instance_of", "type": { - "name": "WatsonxServiceSettings", - "namespace": "inference.put_watsonx" + "name": "GoogleAiStudioServiceSettings", + "namespace": "inference.put_googleaistudio" } } } ] }, - "description": "Create a Watsonx inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `watsonxai` service.\nYou need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service.\nYou can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", + "description": "Create an Google AI Studio inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `googleaistudio` service.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", "examples": { - "InferenceRequestExample1": { - "description": "Run `PUT _inference/text_embedding/watsonx-embeddings` to create an Watonsx inference endpoint that performs a text embedding task.", - "value": "{\n \"service\": \"watsonxai\",\n \"service_settings\": {\n \"api_key\": \"Watsonx-API-Key\", \n \"url\": \"Wastonx-URL\", \n \"model_id\": \"ibm/slate-30m-english-rtrvr\",\n \"project_id\": \"IBM-Cloud-ID\", \n \"api_version\": \"2024-03-14\"\n }\n}" + "PutGoogleAiStudioRequestExample1": { + "description": "Run `PUT _inference/completion/google_ai_studio_completion` to create an inference endpoint to perform a `completion` task type.", + "summary": "A completion task", + "value": "{\n \"service\": \"googleaistudio\",\n \"service_settings\": {\n \"api_key\": \"api-key\",\n \"model_id\": \"model-id\"\n }\n}" } }, "inherits": { @@ -28115,24 +28593,24 @@ "kind": "request", "name": { "name": "Request", - "namespace": "inference.put_watsonx" + "namespace": "inference.put_googleaistudio" }, "path": [ { - "description": "The task type.\nThe only valid task type for the model to perform is `text_embedding`.", + "description": "The type of the inference task that the model will perform.", "name": "task_type", "required": true, "type": { "kind": "instance_of", "type": { - "name": "WatsonxTaskType", - "namespace": "inference.put_watsonx" + "name": "GoogleAiStudioTaskType", + "namespace": "inference.put_googleaistudio" } } }, { "description": "The unique identifier of the inference endpoint.", - "name": "watsonx_inference_id", + "name": "googleaistudio_inference_id", "required": true, "type": { "kind": "instance_of", @@ -28144,7 +28622,7 @@ } ], "query": [], - "specLocation": "inference/put_watsonx/PutWatsonxRequest.ts#L24-L70" + "specLocation": "inference/put_googleaistudio/PutGoogleAiStudioRequest.ts#L27-L75" }, { "body": { @@ -28160,9 +28638,9 @@ "kind": "response", "name": { "name": "Response", - "namespace": "inference.put_watsonx" + "namespace": "inference.put_googleaistudio" }, - "specLocation": "inference/put_watsonx/PutWatsonxResponse.ts#L22-L24" + "specLocation": "inference/put_googleaistudio/PutGoogleAiStudioResponse.ts#L22-L24" }, { "attachedBehaviors": [ @@ -28172,64 +28650,68 @@ "kind": "properties", "properties": [ { - "description": "Query input.", - "name": "query", + "description": "The chunking configuration object.", + "extDocId": "inference-chunking", + "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/inference-apis.html#infer-chunking-config", + "name": "chunking_settings", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "InferenceChunkingSettings", + "namespace": "inference._types" + } + } + }, + { + "description": "The type of service supported for the specified task type. In this case, `googlevertexai`.", + "name": "service", "required": true, "type": { "kind": "instance_of", "type": { - "name": "string", - "namespace": "_builtins" + "name": "ServiceType", + "namespace": "inference.put_googlevertexai" } } }, { - "description": "The text on which you want to perform the inference task.\nIt can be a single string or an array.\n\n> info\n> Inference endpoints for the `completion` task type currently only support a single string as input.", - "name": "input", + "description": "Settings used to install the inference model. These settings are specific to the `googlevertexai` service.", + "name": "service_settings", "required": true, "type": { - "items": [ - { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - }, - { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - } - } - ], - "kind": "union_of" + "kind": "instance_of", + "type": { + "name": "GoogleVertexAIServiceSettings", + "namespace": "inference.put_googlevertexai" + } } }, { - "description": "Task settings for the individual inference request.\nThese settings are specific to the task type you specified and override the task settings specified when initializing the service.", + "description": "Settings to configure the inference task.\nThese settings are specific to the task type you specified.", "name": "task_settings", "required": false, "type": { "kind": "instance_of", "type": { - "name": "TaskSettings", - "namespace": "inference._types" + "name": "GoogleVertexAITaskSettings", + "namespace": "inference.put_googlevertexai" } } } ] }, - "description": "Perform rereanking inference on the service", + "description": "Create a Google Vertex AI inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `googlevertexai` service.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", "examples": { - "RerankRequestExample1": { - "description": "Run `POST _inference/rerank/cohere_rerank` to perform reranking on the example input.", - "summary": "Rerank task", - "value": "{\n \"input\": [\"luke\", \"like\", \"leia\", \"chewy\",\"r2d2\", \"star\", \"wars\"],\n \"query\": \"star wars main character\"\n}" + "PutGoogleVertexAiRequestExample1": { + "description": "Run `PUT _inference/text_embedding/google_vertex_ai_embeddings` to create an inference endpoint to perform a `text_embedding` task type.", + "summary": "A text embedding task", + "value": "{\n \"service\": \"googlevertexai\",\n \"service_settings\": {\n \"service_account_json\": \"service-account-json\",\n \"model_id\": \"model-id\",\n \"location\": \"location\",\n \"project_id\": \"project-id\"\n }\n}" + }, + "PutGoogleVertexAiRequestExample2": { + "description": "Run `PUT _inference/rerank/google_vertex_ai_rerank` to create an inference endpoint to perform a `rerank` task type.", + "summary": "A rerank task", + "value": "{\n \"service\": \"googlevertexai\",\n \"service_settings\": {\n \"service_account_json\": \"service-account-json\",\n \"project_id\": \"project-id\"\n }\n}" } }, "inherits": { @@ -28241,38 +28723,36 @@ "kind": "request", "name": { "name": "Request", - "namespace": "inference.rerank" + "namespace": "inference.put_googlevertexai" }, "path": [ { - "description": "The unique identifier for the inference endpoint.", - "name": "inference_id", + "description": "The type of the inference task that the model will perform.", + "name": "task_type", "required": true, "type": { "kind": "instance_of", "type": { - "name": "Id", - "namespace": "_types" + "name": "GoogleVertexAITaskType", + "namespace": "inference.put_googlevertexai" } } - } - ], - "query": [ + }, { - "description": "The amount of time to wait for the inference request to complete.", - "name": "timeout", - "required": false, - "serverDefault": "30s", + "description": "The unique identifier of the inference endpoint.", + "name": "googlevertexai_inference_id", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "Duration", + "name": "Id", "namespace": "_types" } } } ], - "specLocation": "inference/rerank/RerankRequest.ts#L25-L72" + "query": [], + "specLocation": "inference/put_googlevertexai/PutGoogleVertexAiRequest.ts#L28-L81" }, { "body": { @@ -28280,24 +28760,17 @@ "value": { "kind": "instance_of", "type": { - "name": "RerankedInferenceResult", + "name": "InferenceEndpointInfo", "namespace": "inference._types" } } }, - "examples": { - "RerankResponseExample1": { - "description": "A successful response from `POST _inference/rerank/cohere_rerank`.\n", - "summary": "Rerank task", - "value": "{\n \"rerank\": [\n {\n \"index\": \"2\",\n \"relevance_score\": \"0.011597361\",\n \"text\": \"leia\"\n },\n {\n \"index\": \"0\",\n \"relevance_score\": \"0.006338922\",\n \"text\": \"luke\"\n },\n {\n \"index\": \"5\",\n \"relevance_score\": \"0.0016166499\",\n \"text\": \"star\"\n },\n {\n \"index\": \"4\",\n \"relevance_score\": \"0.0011695103\",\n \"text\": \"r2d2\"\n },\n {\n \"index\": \"1\",\n \"relevance_score\": \"5.614787E-4\",\n \"text\": \"like\"\n },\n {\n \"index\": \"6\",\n \"relevance_score\": \"3.7850367E-4\",\n \"text\": \"wars\"\n },\n {\n \"index\": \"3\",\n \"relevance_score\": \"1.2508839E-5\",\n \"text\": \"chewy\"\n }\n ]\n}" - } - }, "kind": "response", "name": { "name": "Response", - "namespace": "inference.rerank" + "namespace": "inference.put_googlevertexai" }, - "specLocation": "inference/rerank/RerankResponse.ts#L22-L24" + "specLocation": "inference/put_googlevertexai/PutGoogleVertexAiResponse.ts#L22-L24" }, { "attachedBehaviors": [ @@ -28307,52 +28780,53 @@ "kind": "properties", "properties": [ { - "description": "Inference input.\nEither a string or an array of strings.", - "name": "input", + "description": "The chunking configuration object.", + "extDocId": "inference-chunking", + "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/inference-apis.html#infer-chunking-config", + "name": "chunking_settings", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "InferenceChunkingSettings", + "namespace": "inference._types" + } + } + }, + { +>>>>>>> f5eaaab24 (Add Amazon Bedrock inference API (#4022)) +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) + "description": "The type of service supported for the specified task type. In this case, `hugging_face`.", + "name": "service", "required": true, "type": { - "items": [ - { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - }, - { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - } - } - ], - "kind": "union_of" + "kind": "instance_of", + "type": { + "name": "ServiceType", + "namespace": "inference.put_hugging_face" + } } }, { - "description": "Optional task settings", - "name": "task_settings", - "required": false, + "description": "Settings used to install the inference model. These settings are specific to the `hugging_face` service.", + "name": "service_settings", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "TaskSettings", - "namespace": "inference._types" + "name": "HuggingFaceServiceSettings", + "namespace": "inference.put_hugging_face" } } } ] }, - "description": "Perform sparse embedding inference on the service", + "description": "Create a Hugging Face inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `hugging_face` service.\n\nYou must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL.\nSelect the model you want to use on the new endpoint creation page (for example `intfloat/e5-small-v2`), then select the sentence embeddings task under the advanced configuration section.\nCreate the endpoint and copy the URL after the endpoint initialization has been finished.\n\nThe following models are recommended for the Hugging Face service:\n\n* `all-MiniLM-L6-v2`\n* `all-MiniLM-L12-v2`\n* `all-mpnet-base-v2`\n* `e5-base-v2`\n* `e5-small-v2`\n* `multilingual-e5-base`\n* `multilingual-e5-small`\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", "examples": { - "SparseEmbeddingRequestExample1": { - "description": "Run `POST _inference/sparse_embedding/my-elser-model` to perform sparse embedding on the example sentence.", - "summary": "Sparse embedding task", - "value": "{\n \"input\": \"The sky above the port was the color of television tuned to a dead channel.\"\n}" + "PutHuggingFaceRequestExample1": { + "description": "Run `PUT _inference/text_embedding/hugging-face-embeddings` to create an inference endpoint that performs a `text_embedding` task type.", + "summary": "A text embedding task", + "value": "{\n \"service\": \"hugging_face\",\n \"service_settings\": {\n \"api_key\": \"hugging-face-access-token\", \n \"url\": \"url-endpoint\" \n }\n}" } }, "inherits": { @@ -28364,38 +28838,36 @@ "kind": "request", "name": { "name": "Request", - "namespace": "inference.sparse_embedding" + "namespace": "inference.put_hugging_face" }, "path": [ { - "description": "The inference Id", - "name": "inference_id", + "description": "The type of the inference task that the model will perform.", + "name": "task_type", "required": true, "type": { "kind": "instance_of", "type": { - "name": "Id", - "namespace": "_types" + "name": "HuggingFaceTaskType", + "namespace": "inference.put_hugging_face" } } - } - ], - "query": [ + }, { - "description": "Specifies the amount of time to wait for the inference request to complete.", - "name": "timeout", - "required": false, - "serverDefault": "30s", + "description": "The unique identifier of the inference endpoint.", + "name": "huggingface_inference_id", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "Duration", + "name": "Id", "namespace": "_types" } } } ], - "specLocation": "inference/sparse_embedding/SparseEmbeddingRequest.ts#L25-L63" + "query": [], + "specLocation": "inference/put_hugging_face/PutHuggingFaceRequest.ts#L27-L89" }, { "body": { @@ -28403,24 +28875,17 @@ "value": { "kind": "instance_of", "type": { - "name": "SparseEmbeddingInferenceResult", + "name": "InferenceEndpointInfo", "namespace": "inference._types" } } }, - "examples": { - "SparseEmbeddingResponseExample1": { - "description": "An abbreviated response from `POST _inference/sparse_embedding/my-elser-model`.\n", - "summary": "Sparse embedding task", - "value": "{\n \"sparse_embedding\": [\n {\n \"port\": 2.1259406,\n \"sky\": 1.7073475,\n \"color\": 1.6922266,\n \"dead\": 1.6247464,\n \"television\": 1.3525393,\n \"above\": 1.2425821,\n \"tuned\": 1.1440028,\n \"colors\": 1.1218185,\n \"tv\": 1.0111054,\n \"ports\": 1.0067928,\n \"poem\": 1.0042328,\n \"channel\": 0.99471164,\n \"tune\": 0.96235967,\n \"scene\": 0.9020516\n }\n ]\n}" - } - }, "kind": "response", "name": { "name": "Response", - "namespace": "inference.sparse_embedding" + "namespace": "inference.put_hugging_face" }, - "specLocation": "inference/sparse_embedding/SparseEmbeddingResponse.ts#L22-L24" + "specLocation": "inference/put_hugging_face/PutHuggingFaceResponse.ts#L22-L24" }, { "attachedBehaviors": [ @@ -28430,52 +28895,68 @@ "kind": "properties", "properties": [ { - "description": "Inference input.\nEither a string or an array of strings.", - "name": "input", + "description": "The chunking configuration object.", + "extDocId": "inference-chunking", + "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/inference-apis.html#infer-chunking-config", + "name": "chunking_settings", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "InferenceChunkingSettings", + "namespace": "inference._types" + } + } + }, + { + "description": "The type of service supported for the specified task type. In this case, `jinaai`.", + "name": "service", "required": true, "type": { - "items": [ - { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - }, - { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - } - } - ], - "kind": "union_of" + "kind": "instance_of", + "type": { + "name": "ServiceType", + "namespace": "inference.put_jinaai" + } } }, { - "description": "Optional task settings", + "description": "Settings used to install the inference model. These settings are specific to the `jinaai` service.", + "name": "service_settings", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "JinaAIServiceSettings", + "namespace": "inference.put_jinaai" + } + } + }, + { + "description": "Settings to configure the inference task.\nThese settings are specific to the task type you specified.", "name": "task_settings", "required": false, "type": { "kind": "instance_of", "type": { - "name": "TaskSettings", - "namespace": "inference._types" + "name": "JinaAITaskSettings", + "namespace": "inference.put_jinaai" } } } ] }, - "description": "Perform text embedding inference on the service", + "description": "Create an JinaAI inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `jinaai` service.\n\nTo review the available `rerank` models, refer to .\nTo review the available `text_embedding` models, refer to the .\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", "examples": { - "TextEmbeddingRequestExample1": { - "description": "Run `POST _inference/text_embedding/my-cohere-endpoint` to perform text embedding on the example sentence using the Cohere integration,", - "summary": "Text embedding task", - "value": "{\n \"input\": \"The sky above the port was the color of television tuned to a dead channel.\",\n \"task_settings\": {\n \"input_type\": \"ingest\"\n }\n}" + "PutJinaAiRequestExample1": { + "description": "Run `PUT _inference/text_embedding/jinaai-embeddings` to create an inference endpoint for text embedding tasks using the JinaAI service.", + "summary": "A text embedding task", + "value": "{\n \"service\": \"jinaai\",\n \"service_settings\": {\n \"model_id\": \"jina-embeddings-v3\",\n \"api_key\": \"JinaAi-Api-key\"\n }\n}" + }, + "PutJinaAiRequestExample2": { + "description": "Run `PUT _inference/rerank/jinaai-rerank` to create an inference endpoint for rerank tasks using the JinaAI service.", + "summary": "A rerank task", + "value": "{\n \"service\": \"jinaai\",\n \"service_settings\": {\n \"api_key\": \"JinaAI-Api-key\",\n \"model_id\": \"jina-reranker-v2-base-multilingual\"\n },\n \"task_settings\": {\n \"top_n\": 10,\n \"return_documents\": true\n }\n}" } }, "inherits": { @@ -28487,38 +28968,36 @@ "kind": "request", "name": { "name": "Request", - "namespace": "inference.text_embedding" + "namespace": "inference.put_jinaai" }, "path": [ { - "description": "The inference Id", - "name": "inference_id", + "description": "The type of the inference task that the model will perform.", + "name": "task_type", "required": true, "type": { "kind": "instance_of", "type": { - "name": "Id", - "namespace": "_types" + "name": "JinaAITaskType", + "namespace": "inference.put_jinaai" } } - } - ], - "query": [ + }, { - "description": "Specifies the amount of time to wait for the inference request to complete.", - "name": "timeout", - "required": false, - "serverDefault": "30s", + "description": "The unique identifier of the inference endpoint.", + "name": "jinaai_inference_id", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "Duration", + "name": "Id", "namespace": "_types" } } } ], - "specLocation": "inference/text_embedding/TextEmbeddingRequest.ts#L25-L63" + "query": [], + "specLocation": "inference/put_jinaai/PutJinaAiRequest.ts#L28-L84" }, { "body": { @@ -28526,133 +29005,72 @@ "value": { "kind": "instance_of", "type": { - "name": "TextEmbeddingInferenceResult", + "name": "InferenceEndpointInfo", "namespace": "inference._types" } } }, - "examples": { - "TextEmbeddingResponseExample1": { - "description": "An abbreviated response from `POST _inference/text_embedding/my-cohere-endpoint`.\n", - "summary": "Text embedding task", - "value": "{\n \"text_embedding\": [\n {\n \"embedding\": [\n {\n 0.018569946,\n -0.036895752,\n 0.01486969,\n -0.0045204163,\n -0.04385376,\n 0.0075950623,\n 0.04260254,\n -0.004005432,\n 0.007865906,\n 0.030792236,\n -0.050476074,\n 0.011795044,\n -0.011642456,\n -0.010070801\n }\n ]\n }\n ]\n}" - } - }, "kind": "response", "name": { "name": "Response", - "namespace": "inference.text_embedding" + "namespace": "inference.put_jinaai" }, - "specLocation": "inference/text_embedding/TextEmbeddingResponse.ts#L22-L24" + "specLocation": "inference/put_jinaai/PutJinaAiResponse.ts#L22-L24" }, { "attachedBehaviors": [ "CommonQueryParameters" ], - "body": { - "kind": "no_body" - }, - "description": "Get cluster info.\nGet basic build, version, and cluster information.", - "inherits": { - "type": { - "name": "RequestBase", - "namespace": "_types" - } - }, - "kind": "request", - "name": { - "name": "Request", - "namespace": "_global.info" - }, - "path": [], - "query": [], - "specLocation": "_global/info/RootNodeInfoRequest.ts#L22-L39" - }, - { "body": { "kind": "properties", "properties": [ { - "description": "The responding cluster's name.", - "name": "cluster_name", - "required": true, - "type": { - "kind": "instance_of", - "type": { - "name": "Name", - "namespace": "_types" - } - } - }, - { - "name": "cluster_uuid", - "required": true, - "type": { - "kind": "instance_of", - "type": { - "name": "Uuid", - "namespace": "_types" - } - } - }, - { - "description": "The responding node's name.", - "name": "name", - "required": true, + "description": "The chunking configuration object.", + "extDocId": "inference-chunking", + "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/inference-apis.html#infer-chunking-config", + "name": "chunking_settings", + "required": false, "type": { "kind": "instance_of", "type": { - "name": "Name", - "namespace": "_types" + "name": "InferenceChunkingSettings", + "namespace": "inference._types" } } }, { - "name": "tagline", + "description": "The type of service supported for the specified task type. In this case, `mistral`.", + "name": "service", "required": true, "type": { "kind": "instance_of", "type": { - "name": "string", - "namespace": "_builtins" + "name": "ServiceType", + "namespace": "inference.put_mistral" } } }, { - "description": "The running version of Elasticsearch.", - "name": "version", + "description": "Settings used to install the inference model. These settings are specific to the `mistral` service.", + "name": "service_settings", "required": true, "type": { "kind": "instance_of", "type": { - "name": "ElasticsearchVersionInfo", - "namespace": "_types" + "name": "MistralServiceSettings", + "namespace": "inference.put_mistral" } } } ] }, + "description": "Create a Mistral inference endpoint.\n\nCreates an inference endpoint to perform an inference task with the `mistral` service.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", "examples": { - "RootNodeInfoResponseExample1": { - "description": "A successful response from `GET /`s.", - "value": "{\n \"name\": \"instance-0000000000\",\n \"cluster_name\": \"my_test_cluster\",\n \"cluster_uuid\": \"5QaxoN0pRZuOmWSxstBBwQ\",\n \"version\": {\n \"build_date\": \"2024-02-01T13:07:13.727175297Z\",\n \"minimum_wire_compatibility_version\": \"7.17.0\",\n \"build_hash\": \"6185ba65d27469afabc9bc951cded6c17c21e3f3\",\n \"number\": \"8.12.1\",\n \"lucene_version\": \"9.9.2\",\n \"minimum_index_compatibility_version\": \"7.0.0\",\n \"build_flavor\": \"default\",\n \"build_snapshot\": false,\n \"build_type\": \"docker\"\n },\n \"tagline\": \"You Know, for Search\"\n}" + "PutMistralRequestExample1": { + "description": "Run `PUT _inference/text_embedding/mistral-embeddings-test` to create a Mistral inference endpoint that performs a text embedding task.", + "value": "{\n \"service\": \"mistral\",\n \"service_settings\": {\n \"api_key\": \"Mistral-API-Key\",\n \"model\": \"mistral-embed\" \n }\n}" } }, - "kind": "response", - "name": { - "name": "Response", - "namespace": "_global.info" - }, - "specLocation": "_global/info/RootNodeInfoResponse.ts#L23-L40" - }, - { - "attachedBehaviors": [ - "CommonQueryParameters" - ], - "body": { - "kind": "no_body" - }, - "description": "Delete pipelines.\nDelete one or more ingest pipelines.", "inherits": { "type": { "name": "RequestBase", @@ -28662,51 +29080,36 @@ "kind": "request", "name": { "name": "Request", - "namespace": "ingest.delete_pipeline" + "namespace": "inference.put_mistral" }, "path": [ { - "description": "Pipeline ID or wildcard expression of pipeline IDs used to limit the request.\nTo delete all ingest pipelines in a cluster, use a value of `*`.", - "name": "id", + "description": "The task type.\nThe only valid task type for the model to perform is `text_embedding`.", + "name": "task_type", "required": true, "type": { "kind": "instance_of", "type": { - "name": "Id", - "namespace": "_types" - } - } - } - ], - "query": [ - { - "description": "Period to wait for a connection to the master node.\nIf no response is received before the timeout expires, the request fails and returns an error.", - "name": "master_timeout", - "required": false, - "serverDefault": "30s", - "type": { - "kind": "instance_of", - "type": { - "name": "Duration", - "namespace": "_types" + "name": "MistralTaskType", + "namespace": "inference.put_mistral" } } }, { - "description": "Period to wait for a response.\nIf no response is received before the timeout expires, the request fails and returns an error.", - "name": "timeout", - "required": false, - "serverDefault": "30s", + "description": "The unique identifier of the inference endpoint.", + "name": "mistral_inference_id", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "Duration", + "name": "Id", "namespace": "_types" } } } ], - "specLocation": "ingest/delete_pipeline/DeletePipelineRequest.ts#L24-L61" + "query": [], + "specLocation": "inference/put_mistral/PutMistralRequest.ts#L28-L77" }, { "body": { @@ -28714,26 +29117,90 @@ "value": { "kind": "instance_of", "type": { - "name": "AcknowledgedResponseBase", - "namespace": "_types" + "name": "InferenceEndpointInfo", + "namespace": "inference._types" } } }, "kind": "response", "name": { "name": "Response", - "namespace": "ingest.delete_pipeline" + "namespace": "inference.put_mistral" }, - "specLocation": "ingest/delete_pipeline/DeletePipelineResponse.ts#L22-L24" + "specLocation": "inference/put_mistral/PutMistralResponse.ts#L22-L24" }, { "attachedBehaviors": [ "CommonQueryParameters" ], "body": { - "kind": "no_body" + "kind": "properties", + "properties": [ + { + "description": "The chunking configuration object.", + "extDocId": "inference-chunking", + "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/inference-apis.html#infer-chunking-config", + "name": "chunking_settings", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "InferenceChunkingSettings", + "namespace": "inference._types" + } + } + }, + { + "description": "The type of service supported for the specified task type. In this case, `openai`.", + "name": "service", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "ServiceType", + "namespace": "inference.put_openai" + } + } + }, + { + "description": "Settings used to install the inference model. These settings are specific to the `openai` service.", + "name": "service_settings", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "OpenAIServiceSettings", + "namespace": "inference.put_openai" + } + } + }, + { + "description": "Settings to configure the inference task.\nThese settings are specific to the task type you specified.", + "name": "task_settings", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "OpenAITaskSettings", + "namespace": "inference.put_openai" + } + } + } + ] + }, + "description": "Create an OpenAI inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `openai` service.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", + "examples": { + "PutOpenAiRequestExample1": { + "description": "Run `PUT _inference/text_embedding/openai-embeddings` to create an inference endpoint that performs a `text_embedding` task. The embeddings created by requests to this endpoint will have 128 dimensions.", + "summary": "A text embedding task", + "value": "{\n \"service\": \"openai\",\n \"service_settings\": {\n \"api_key\": \"OpenAI-API-Key\",\n \"model_id\": \"text-embedding-3-small\",\n \"dimensions\": 128\n }\n}" + }, + "PutOpenAiRequestExample2": { + "description": "Run `PUT _inference/completion/amazon_bedrock_completion` to create an inference endpoint to perform a completion task.", + "summary": "A completion task", + "value": "{\n \"service\": \"amazonbedrock\",\n \"service_settings\": {\n \"access_key\": \"AWS-access-key\",\n \"secret_key\": \"AWS-secret-key\",\n \"region\": \"us-east-1\",\n \"provider\": \"amazontitan\",\n \"model\": \"amazon.titan-text-premier-v1:0\"\n }\n}" + } }, - "description": "Get pipelines.\n\nGet information about one or more ingest pipelines.\nThis API returns a local reference of the pipeline.", "inherits": { "type": { "name": "RequestBase", @@ -28743,96 +29210,95 @@ "kind": "request", "name": { "name": "Request", - "namespace": "ingest.get_pipeline" + "namespace": "inference.put_openai" }, "path": [ { - "description": "Comma-separated list of pipeline IDs to retrieve.\nWildcard (`*`) expressions are supported.\nTo get all ingest pipelines, omit this parameter or use `*`.", - "name": "id", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "Id", - "namespace": "_types" - } - } - } - ], - "query": [ - { - "description": "Period to wait for a connection to the master node.\nIf no response is received before the timeout expires, the request fails and returns an error.", - "name": "master_timeout", - "required": false, - "serverDefault": "30s", + "description": "The type of the inference task that the model will perform.\nNOTE: The `chat_completion` task type only supports streaming and only through the _stream API.", + "name": "task_type", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "Duration", - "namespace": "_types" + "name": "OpenAITaskType", + "namespace": "inference.put_openai" } } }, { - "description": "Return pipelines without their definitions (default: false)", - "name": "summary", - "required": false, - "serverDefault": false, + "description": "The unique identifier of the inference endpoint.", + "name": "openai_inference_id", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "Id", + "namespace": "_types" } } } ], - "specLocation": "ingest/get_pipeline/GetPipelineRequest.ts#L24-L64" + "query": [], + "specLocation": "inference/put_openai/PutOpenAiRequest.ts#L28-L82" }, { "body": { - "codegenName": "pipelines", "kind": "value", "value": { - "key": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - }, - "kind": "dictionary_of", - "singleKey": false, - "value": { - "kind": "instance_of", - "type": { - "name": "Pipeline", - "namespace": "ingest._types" - } + "kind": "instance_of", + "type": { + "name": "InferenceEndpointInfo", + "namespace": "inference._types" } } }, - "examples": { - "GetPipelineResponseExample1": { - "description": "A successful response for retrieving information about an ingest pipeline.", - "value": "{\n \"my-pipeline-id\" : {\n \"description\" : \"describe pipeline\",\n \"version\" : 123,\n \"processors\" : [\n {\n \"set\" : {\n \"field\" : \"foo\",\n \"value\" : \"bar\"\n }\n }\n ]\n }\n}" - } - }, "kind": "response", "name": { "name": "Response", - "namespace": "ingest.get_pipeline" + "namespace": "inference.put_openai" }, - "specLocation": "ingest/get_pipeline/GetPipelineResponse.ts#L23-L26" + "specLocation": "inference/put_openai/PutOpenAiResponse.ts#L22-L24" }, { "attachedBehaviors": [ "CommonQueryParameters" ], "body": { - "kind": "no_body" + "kind": "properties", + "properties": [ + { + "description": "The type of service supported for the specified task type. In this case, `watsonxai`.", + "name": "service", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "ServiceType", + "namespace": "inference.put_watsonx" + } + } + }, + { + "description": "Settings used to install the inference model. These settings are specific to the `watsonxai` service.", + "name": "service_settings", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "WatsonxServiceSettings", + "namespace": "inference.put_watsonx" + } + } + } + ] + }, + "description": "Create a Watsonx inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `watsonxai` service.\nYou need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service.\nYou can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", + "examples": { + "InferenceRequestExample1": { + "description": "Run `PUT _inference/text_embedding/watsonx-embeddings` to create an Watonsx inference endpoint that performs a text embedding task.", + "value": "{\n \"service\": \"watsonxai\",\n \"service_settings\": {\n \"api_key\": \"Watsonx-API-Key\", \n \"url\": \"Wastonx-URL\", \n \"model_id\": \"ibm/slate-30m-english-rtrvr\",\n \"project_id\": \"IBM-Cloud-ID\", \n \"api_version\": \"2024-03-14\"\n }\n}" + } }, - "description": "Run a grok processor.\nExtract structured fields out of a single text field within a document.\nYou must choose which field to extract matched fields from, as well as the grok pattern you expect will match.\nA grok pattern is like a regular expression that supports aliased expressions that can be reused.", "inherits": { "type": { "name": "RequestBase", @@ -28842,46 +29308,54 @@ "kind": "request", "name": { "name": "Request", - "namespace": "ingest.processor_grok" + "namespace": "inference.put_watsonx" }, - "path": [], + "path": [ + { + "description": "The task type.\nThe only valid task type for the model to perform is `text_embedding`.", + "name": "task_type", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "WatsonxTaskType", + "namespace": "inference.put_watsonx" + } + } + }, + { + "description": "The unique identifier of the inference endpoint.", + "name": "watsonx_inference_id", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "Id", + "namespace": "_types" + } + } + } + ], "query": [], - "specLocation": "ingest/processor_grok/GrokProcessorPatternsRequest.ts#L22-L40" + "specLocation": "inference/put_watsonx/PutWatsonxRequest.ts#L24-L70" }, { "body": { - "kind": "properties", - "properties": [ - { - "name": "patterns", - "required": true, - "type": { - "key": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - }, - "kind": "dictionary_of", - "singleKey": false, - "value": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - } - } + "kind": "value", + "value": { + "kind": "instance_of", + "type": { + "name": "InferenceEndpointInfo", + "namespace": "inference._types" } - ] + } }, "kind": "response", "name": { "name": "Response", - "namespace": "ingest.processor_grok" + "namespace": "inference.put_watsonx" }, - "specLocation": "ingest/processor_grok/GrokProcessorPatternsResponse.ts#L22-L24" + "specLocation": "inference/put_watsonx/PutWatsonxResponse.ts#L22-L24" }, { "attachedBehaviors": [ @@ -28891,21 +29365,9 @@ "kind": "properties", "properties": [ { - "description": "Optional metadata about the ingest pipeline. May have any contents. This map is not automatically generated by Elasticsearch.", - "name": "_meta", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "Metadata", - "namespace": "_types" - } - } - }, - { - "description": "Description of the ingest pipeline.", - "name": "description", - "required": false, + "description": "Query input.", + "name": "query", + "required": true, "type": { "kind": "instance_of", "type": { @@ -28915,72 +29377,52 @@ } }, { - "description": "Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors.", - "name": "on_failure", - "required": false, - "type": { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "ProcessorContainer", - "namespace": "ingest._types" - } - } - } - }, - { - "description": "Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified.", - "name": "processors", - "required": false, + "description": "The text on which you want to perform the inference task.\nIt can be a single string or an array.\n\n> info\n> Inference endpoints for the `completion` task type currently only support a single string as input.", + "name": "input", + "required": true, "type": { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "ProcessorContainer", - "namespace": "ingest._types" + "items": [ + { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + }, + { + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } } - } - } - }, - { - "description": "Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers.", - "name": "version", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "VersionNumber", - "namespace": "_types" - } + ], + "kind": "union_of" } }, { - "description": "Marks this ingest pipeline as deprecated.\nWhen a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning.", - "name": "deprecated", + "description": "Task settings for the individual inference request.\nThese settings are specific to the task type you specified and override the task settings specified when initializing the service.", + "name": "task_settings", "required": false, - "serverDefault": false, "type": { "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "TaskSettings", + "namespace": "inference._types" } } } ] }, - "description": "Create or update a pipeline.\nChanges made using this API take effect immediately.", + "description": "Perform rereanking inference on the service", "examples": { - "PutPipelineRequestExample1": { - "summary": "Create an ingest pipeline.", - "value": "{\n \"description\" : \"My optional pipeline description\",\n \"processors\" : [\n {\n \"set\" : {\n \"description\" : \"My optional processor description\",\n \"field\": \"my-keyword-field\",\n \"value\": \"foo\"\n }\n }\n ]\n}" - }, - "PutPipelineRequestExample2": { - "description": "You can use the `_meta` parameter to add arbitrary metadata to a pipeline.", - "summary": "Create an ingest pipeline with metadata.", - "value": "{\n \"description\" : \"My optional pipeline description\",\n \"processors\" : [\n {\n \"set\" : {\n \"description\" : \"My optional processor description\",\n \"field\": \"my-keyword-field\",\n \"value\": \"foo\"\n }\n }\n ],\n \"_meta\": {\n \"reason\": \"set my-keyword-field to foo\",\n \"serialization\": {\n \"class\": \"MyPipeline\",\n \"id\": 10\n }\n }\n}" + "RerankRequestExample1": { + "description": "Run `POST _inference/rerank/cohere_rerank` to perform reranking on the example input.", + "summary": "Rerank task", + "value": "{\n \"input\": [\"luke\", \"like\", \"leia\", \"chewy\",\"r2d2\", \"star\", \"wars\"],\n \"query\": \"star wars main character\"\n}" } }, "inherits": { @@ -28992,12 +29434,12 @@ "kind": "request", "name": { "name": "Request", - "namespace": "ingest.put_pipeline" + "namespace": "inference.rerank" }, "path": [ { - "description": "ID of the ingest pipeline to create or update.", - "name": "id", + "description": "The unique identifier for the inference endpoint.", + "name": "inference_id", "required": true, "type": { "kind": "instance_of", @@ -29010,20 +29452,7 @@ ], "query": [ { - "description": "Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.", - "name": "master_timeout", - "required": false, - "serverDefault": "30s", - "type": { - "kind": "instance_of", - "type": { - "name": "Duration", - "namespace": "_types" - } - } - }, - { - "description": "Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.", + "description": "The amount of time to wait for the inference request to complete.", "name": "timeout", "required": false, "serverDefault": "30s", @@ -29034,21 +29463,9 @@ "namespace": "_types" } } - }, - { - "description": "Required version for optimistic concurrency control for pipeline updates", - "name": "if_version", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "VersionNumber", - "namespace": "_types" - } - } } ], - "specLocation": "ingest/put_pipeline/PutPipelineRequest.ts#L25-L90" + "specLocation": "inference/rerank/RerankRequest.ts#L25-L72" }, { "body": { @@ -29056,17 +29473,24 @@ "value": { "kind": "instance_of", "type": { - "name": "AcknowledgedResponseBase", - "namespace": "_types" + "name": "RerankedInferenceResult", + "namespace": "inference._types" } } }, + "examples": { + "RerankResponseExample1": { + "description": "A successful response from `POST _inference/rerank/cohere_rerank`.\n", + "summary": "Rerank task", + "value": "{\n \"rerank\": [\n {\n \"index\": \"2\",\n \"relevance_score\": \"0.011597361\",\n \"text\": \"leia\"\n },\n {\n \"index\": \"0\",\n \"relevance_score\": \"0.006338922\",\n \"text\": \"luke\"\n },\n {\n \"index\": \"5\",\n \"relevance_score\": \"0.0016166499\",\n \"text\": \"star\"\n },\n {\n \"index\": \"4\",\n \"relevance_score\": \"0.0011695103\",\n \"text\": \"r2d2\"\n },\n {\n \"index\": \"1\",\n \"relevance_score\": \"5.614787E-4\",\n \"text\": \"like\"\n },\n {\n \"index\": \"6\",\n \"relevance_score\": \"3.7850367E-4\",\n \"text\": \"wars\"\n },\n {\n \"index\": \"3\",\n \"relevance_score\": \"1.2508839E-5\",\n \"text\": \"chewy\"\n }\n ]\n}" + } + }, "kind": "response", "name": { "name": "Response", - "namespace": "ingest.put_pipeline" + "namespace": "inference.rerank" }, - "specLocation": "ingest/put_pipeline/PutPipelineResponse.ts#L22-L24" + "specLocation": "inference/rerank/RerankResponse.ts#L22-L24" }, { "attachedBehaviors": [ @@ -29076,40 +29500,52 @@ "kind": "properties", "properties": [ { - "description": "Sample documents to test in the pipeline.", - "name": "docs", + "description": "Inference input.\nEither a string or an array of strings.", + "name": "input", "required": true, "type": { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "Document", - "namespace": "ingest._types" + "items": [ + { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + }, + { + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } } - } + ], + "kind": "union_of" } }, { - "description": "The pipeline to test.\nIf you don't specify the `pipeline` request path parameter, this parameter is required.\nIf you specify both this and the request path parameter, the API only uses the request path parameter.", - "name": "pipeline", + "description": "Optional task settings", + "name": "task_settings", "required": false, "type": { "kind": "instance_of", "type": { - "name": "Pipeline", - "namespace": "ingest._types" + "name": "TaskSettings", + "namespace": "inference._types" } } } ] }, - "description": "Simulate a pipeline.\n\nRun an ingest pipeline against a set of provided documents.\nYou can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request.", + "description": "Perform sparse embedding inference on the service", "examples": { - "SimulatePipelineRequestExample1": { - "description": "You can specify the used pipeline either in the request body or as a path parameter.", - "summary": "Run an ingest pipeline against a set of provided documents.", - "value": "{\n \"pipeline\" :\n {\n \"description\": \"_description\",\n \"processors\": [\n {\n \"set\" : {\n \"field\" : \"field2\",\n \"value\" : \"_value\"\n }\n }\n ]\n },\n \"docs\": [\n {\n \"_index\": \"index\",\n \"_id\": \"id\",\n \"_source\": {\n \"foo\": \"bar\"\n }\n },\n {\n \"_index\": \"index\",\n \"_id\": \"id\",\n \"_source\": {\n \"foo\": \"rab\"\n }\n }\n ]\n}" + "SparseEmbeddingRequestExample1": { + "description": "Run `POST _inference/sparse_embedding/my-elser-model` to perform sparse embedding on the example sentence.", + "summary": "Sparse embedding task", + "value": "{\n \"input\": \"The sky above the port was the color of television tuned to a dead channel.\"\n}" } }, "inherits": { @@ -29121,13 +29557,13 @@ "kind": "request", "name": { "name": "Request", - "namespace": "ingest.simulate" + "namespace": "inference.sparse_embedding" }, "path": [ { - "description": "The pipeline to test.\nIf you don't specify a `pipeline` in the request body, this parameter is required.", - "name": "id", - "required": false, + "description": "The inference Id", + "name": "inference_id", + "required": true, "type": { "kind": "instance_of", "type": { @@ -29139,61 +29575,102 @@ ], "query": [ { - "description": "If `true`, the response includes output data for each processor in the executed pipeline.", - "name": "verbose", + "description": "Specifies the amount of time to wait for the inference request to complete.", + "name": "timeout", "required": false, + "serverDefault": "30s", "type": { "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "Duration", + "namespace": "_types" } } } ], - "specLocation": "ingest/simulate/SimulatePipelineRequest.ts#L25-L72" + "specLocation": "inference/sparse_embedding/SparseEmbeddingRequest.ts#L25-L63" }, { "body": { - "kind": "properties", - "properties": [ - { - "name": "docs", - "required": true, - "type": { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "SimulateDocumentResult", - "namespace": "ingest._types" - } - } - } + "kind": "value", + "value": { + "kind": "instance_of", + "type": { + "name": "SparseEmbeddingInferenceResult", + "namespace": "inference._types" } - ] + } }, "examples": { - "SimulatePipelineResponseExample1": { - "description": "A successful response for running an ingest pipeline against a set of provided documents.", - "value": "{\n \"docs\": [\n {\n \"doc\": {\n \"_id\": \"id\",\n \"_index\": \"index\",\n \"_version\": \"-3\",\n \"_source\": {\n \"field2\": \"_value\",\n \"foo\": \"bar\"\n },\n \"_ingest\": {\n \"timestamp\": \"2017-05-04T22:30:03.187Z\"\n }\n }\n },\n {\n \"doc\": {\n \"_id\": \"id\",\n \"_index\": \"index\",\n \"_version\": \"-3\",\n \"_source\": {\n \"field2\": \"_value\",\n \"foo\": \"rab\"\n },\n \"_ingest\": {\n \"timestamp\": \"2017-05-04T22:30:03.188Z\"\n }\n }\n }\n ]\n}" + "SparseEmbeddingResponseExample1": { + "description": "An abbreviated response from `POST _inference/sparse_embedding/my-elser-model`.\n", + "summary": "Sparse embedding task", + "value": "{\n \"sparse_embedding\": [\n {\n \"port\": 2.1259406,\n \"sky\": 1.7073475,\n \"color\": 1.6922266,\n \"dead\": 1.6247464,\n \"television\": 1.3525393,\n \"above\": 1.2425821,\n \"tuned\": 1.1440028,\n \"colors\": 1.1218185,\n \"tv\": 1.0111054,\n \"ports\": 1.0067928,\n \"poem\": 1.0042328,\n \"channel\": 0.99471164,\n \"tune\": 0.96235967,\n \"scene\": 0.9020516\n }\n ]\n}" } }, "kind": "response", "name": { "name": "Response", - "namespace": "ingest.simulate" + "namespace": "inference.sparse_embedding" }, - "specLocation": "ingest/simulate/SimulatePipelineResponse.ts#L22-L24" + "specLocation": "inference/sparse_embedding/SparseEmbeddingResponse.ts#L22-L24" }, { "attachedBehaviors": [ "CommonQueryParameters" ], "body": { - "kind": "no_body" + "kind": "properties", + "properties": [ + { + "description": "Inference input.\nEither a string or an array of strings.", + "name": "input", + "required": true, + "type": { + "items": [ + { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + }, + { + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } + } + ], + "kind": "union_of" + } + }, + { + "description": "Optional task settings", + "name": "task_settings", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "TaskSettings", + "namespace": "inference._types" + } + } + } + ] + }, + "description": "Perform text embedding inference on the service", + "examples": { + "TextEmbeddingRequestExample1": { + "description": "Run `POST _inference/text_embedding/my-cohere-endpoint` to perform text embedding on the example sentence using the Cohere integration,", + "summary": "Text embedding task", + "value": "{\n \"input\": \"The sky above the port was the color of television tuned to a dead channel.\",\n \"task_settings\": {\n \"input_type\": \"ingest\"\n }\n}" + } }, - "description": "Get license information.\n\nGet information about your Elastic license including its type, its status, when it was issued, and when it expires.\n\n>info\n> If the master node is generating a new cluster state, the get license API may return a `404 Not Found` response.\n> If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request.", "inherits": { "type": { "name": "RequestBase", @@ -29203,72 +29680,163 @@ "kind": "request", "name": { "name": "Request", - "namespace": "license.get" + "namespace": "inference.text_embedding" }, - "path": [], - "query": [ + "path": [ { - "deprecation": { - "description": "", - "version": "7.6.0" - }, - "description": "If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum and enterprise license types. This behavior is maintained for backwards compatibility.\nThis parameter is deprecated and will always be set to true in 8.x.", - "name": "accept_enterprise", - "required": false, - "serverDefault": true, + "description": "The inference Id", + "name": "inference_id", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "Id", + "namespace": "_types" } } - }, + } + ], + "query": [ { - "description": "Specifies whether to retrieve local information. The default value is `false`, which means the information is retrieved from the master node.", - "name": "local", + "description": "Specifies the amount of time to wait for the inference request to complete.", + "name": "timeout", "required": false, - "serverDefault": false, + "serverDefault": "30s", "type": { "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "Duration", + "namespace": "_types" } } } ], - "specLocation": "license/get/GetLicenseRequest.ts#L22-L56" + "specLocation": "inference/text_embedding/TextEmbeddingRequest.ts#L25-L63" + }, + { + "body": { + "kind": "value", + "value": { + "kind": "instance_of", + "type": { + "name": "TextEmbeddingInferenceResult", + "namespace": "inference._types" + } + } + }, + "examples": { + "TextEmbeddingResponseExample1": { + "description": "An abbreviated response from `POST _inference/text_embedding/my-cohere-endpoint`.\n", + "summary": "Text embedding task", + "value": "{\n \"text_embedding\": [\n {\n \"embedding\": [\n {\n 0.018569946,\n -0.036895752,\n 0.01486969,\n -0.0045204163,\n -0.04385376,\n 0.0075950623,\n 0.04260254,\n -0.004005432,\n 0.007865906,\n 0.030792236,\n -0.050476074,\n 0.011795044,\n -0.011642456,\n -0.010070801\n }\n ]\n }\n ]\n}" + } + }, + "kind": "response", + "name": { + "name": "Response", + "namespace": "inference.text_embedding" + }, + "specLocation": "inference/text_embedding/TextEmbeddingResponse.ts#L22-L24" + }, + { + "attachedBehaviors": [ + "CommonQueryParameters" + ], + "body": { + "kind": "no_body" + }, + "description": "Get cluster info.\nGet basic build, version, and cluster information.", + "inherits": { + "type": { + "name": "RequestBase", + "namespace": "_types" + } + }, + "kind": "request", + "name": { + "name": "Request", + "namespace": "_global.info" + }, + "path": [], + "query": [], + "specLocation": "_global/info/RootNodeInfoRequest.ts#L22-L39" }, { "body": { "kind": "properties", "properties": [ { - "name": "license", + "description": "The responding cluster's name.", + "name": "cluster_name", "required": true, "type": { "kind": "instance_of", "type": { - "name": "LicenseInformation", - "namespace": "license.get" + "name": "Name", + "namespace": "_types" + } + } + }, + { + "name": "cluster_uuid", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "Uuid", + "namespace": "_types" + } + } + }, + { + "description": "The responding node's name.", + "name": "name", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "Name", + "namespace": "_types" + } + } + }, + { + "name": "tagline", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } + }, + { + "description": "The running version of Elasticsearch.", + "name": "version", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "ElasticsearchVersionInfo", + "namespace": "_types" } } } ] }, "examples": { - "GetLicenseResponseExample1": { - "description": "A successful response from `GET /_license`.", - "value": "{\n \"license\" : {\n \"status\" : \"active\",\n \"uid\" : \"cbff45e7-c553-41f7-ae4f-9205eabd80xx\",\n \"type\" : \"trial\",\n \"issue_date\" : \"2018-10-20T22:05:12.332Z\",\n \"issue_date_in_millis\" : 1540073112332,\n \"expiry_date\" : \"2018-11-19T22:05:12.332Z\",\n \"expiry_date_in_millis\" : 1542665112332,\n \"max_nodes\" : 1000,\n \"max_resource_units\" : null,\n \"issued_to\" : \"test\",\n \"issuer\" : \"elasticsearch\",\n \"start_date_in_millis\" : -1\n }\n}" + "RootNodeInfoResponseExample1": { + "description": "A successful response from `GET /`s.", + "value": "{\n \"name\": \"instance-0000000000\",\n \"cluster_name\": \"my_test_cluster\",\n \"cluster_uuid\": \"5QaxoN0pRZuOmWSxstBBwQ\",\n \"version\": {\n \"build_date\": \"2024-02-01T13:07:13.727175297Z\",\n \"minimum_wire_compatibility_version\": \"7.17.0\",\n \"build_hash\": \"6185ba65d27469afabc9bc951cded6c17c21e3f3\",\n \"number\": \"8.12.1\",\n \"lucene_version\": \"9.9.2\",\n \"minimum_index_compatibility_version\": \"7.0.0\",\n \"build_flavor\": \"default\",\n \"build_snapshot\": false,\n \"build_type\": \"docker\"\n },\n \"tagline\": \"You Know, for Search\"\n}" } }, "kind": "response", "name": { "name": "Response", - "namespace": "license.get" + "namespace": "_global.info" }, - "specLocation": "license/get/GetLicenseResponse.ts#L22-L24" + "specLocation": "_global/info/RootNodeInfoResponse.ts#L23-L40" }, { "attachedBehaviors": [ @@ -29277,7 +29845,7 @@ "body": { "kind": "no_body" }, - "description": "Delete a Logstash pipeline.\nDelete a pipeline that is used for Logstash Central Management.\nIf the request succeeds, you receive an empty response with an appropriate status code.", + "description": "Delete pipelines.\nDelete one or more ingest pipelines.", "inherits": { "type": { "name": "RequestBase", @@ -29287,11 +29855,11 @@ "kind": "request", "name": { "name": "Request", - "namespace": "logstash.delete_pipeline" + "namespace": "ingest.delete_pipeline" }, "path": [ { - "description": "An identifier for the pipeline.", + "description": "Pipeline ID or wildcard expression of pipeline IDs used to limit the request.\nTo delete all ingest pipelines in a cluster, use a value of `*`.", "name": "id", "required": true, "type": { @@ -29303,19 +29871,53 @@ } } ], - "query": [], - "specLocation": "logstash/delete_pipeline/LogstashDeletePipelineRequest.ts#L23-L47" + "query": [ + { + "description": "Period to wait for a connection to the master node.\nIf no response is received before the timeout expires, the request fails and returns an error.", + "name": "master_timeout", + "required": false, + "serverDefault": "30s", + "type": { + "kind": "instance_of", + "type": { + "name": "Duration", + "namespace": "_types" + } + } + }, + { + "description": "Period to wait for a response.\nIf no response is received before the timeout expires, the request fails and returns an error.", + "name": "timeout", + "required": false, + "serverDefault": "30s", + "type": { + "kind": "instance_of", + "type": { + "name": "Duration", + "namespace": "_types" + } + } + } + ], + "specLocation": "ingest/delete_pipeline/DeletePipelineRequest.ts#L24-L61" }, { "body": { - "kind": "no_body" + "kind": "value", + "value": { + "kind": "instance_of", + "type": { + "name": "AcknowledgedResponseBase", + "namespace": "_types" + } + } }, "kind": "response", "name": { "name": "Response", - "namespace": "logstash.delete_pipeline" + "namespace": "ingest.delete_pipeline" }, - "specLocation": "logstash/delete_pipeline/LogstashDeletePipelineResponse.ts#L22-L24" + "specLocation": "ingest/delete_pipeline/DeletePipelineResponse.ts#L22-L24" }, { "attachedBehaviors": [ @@ -29324,7 +29926,7 @@ "body": { "kind": "no_body" }, - "description": "Get Logstash pipelines.\nGet pipelines that are used for Logstash Central Management.", + "description": "Get pipelines.\n\nGet information about one or more ingest pipelines.\nThis API returns a local reference of the pipeline.", "inherits": { "type": { "name": "RequestBase", @@ -29334,24 +29936,51 @@ "kind": "request", "name": { "name": "Request", - "namespace": "logstash.get_pipeline" + "namespace": "ingest.get_pipeline" }, "path": [ { - "description": "A comma-separated list of pipeline identifiers.", + "description": "Comma-separated list of pipeline IDs to retrieve.\nWildcard (`*`) expressions are supported.\nTo get all ingest pipelines, omit this parameter or use `*`.", "name": "id", "required": false, "type": { "kind": "instance_of", "type": { - "name": "Ids", + "name": "Id", "namespace": "_types" } } } ], - "query": [], - "specLocation": "logstash/get_pipeline/LogstashGetPipelineRequest.ts#L23-L50" + "query": [ + { + "description": "Period to wait for a connection to the master node.\nIf no response is received before the timeout expires, the request fails and returns an error.", + "name": "master_timeout", + "required": false, + "serverDefault": "30s", + "type": { + "kind": "instance_of", + "type": { + "name": "Duration", + "namespace": "_types" + } + } + }, + { + "description": "Return pipelines without their definitions (default: false)", + "name": "summary", + "required": false, + "serverDefault": false, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + } + ], + "specLocation": "ingest/get_pipeline/GetPipelineRequest.ts#L24-L64" }, { "body": { @@ -29361,8 +29990,8 @@ "key": { "kind": "instance_of", "type": { - "name": "Id", - "namespace": "_types" + "name": "string", + "namespace": "_builtins" } }, "kind": "dictionary_of", @@ -29371,47 +30000,32 @@ "kind": "instance_of", "type": { "name": "Pipeline", - "namespace": "logstash._types" + "namespace": "ingest._types" } } } }, "examples": { - "LogstashGetPipelineResponseExample1": { - "description": "A successful response from `GET _logstash/pipeline/my_pipeline`.\n", - "value": "{\n \"my_pipeline\": {\n \"description\": \"Sample pipeline for illustration purposes\",\n \"last_modified\": \"2021-01-02T02:50:51.250Z\",\n \"pipeline_metadata\": {\n \"type\": \"logstash_pipeline\",\n \"version\": \"1\"\n },\n \"username\": \"elastic\",\n \"pipeline\": \"input {}\\\\n filter { grok {} }\\\\n output {}\",\n \"pipeline_settings\": {\n \"pipeline.workers\": 1,\n \"pipeline.batch.size\": 125,\n \"pipeline.batch.delay\": 50,\n \"queue.type\": \"memory\",\n \"queue.max_bytes\": \"1gb\",\n \"queue.checkpoint.writes\": 1024\n }\n }\n}" + "GetPipelineResponseExample1": { + "description": "A successful response for retrieving information about an ingest pipeline.", + "value": "{\n \"my-pipeline-id\" : {\n \"description\" : \"describe pipeline\",\n \"version\" : 123,\n \"processors\" : [\n {\n \"set\" : {\n \"field\" : \"foo\",\n \"value\" : \"bar\"\n }\n }\n ]\n }\n}" } }, "kind": "response", "name": { "name": "Response", - "namespace": "logstash.get_pipeline" + "namespace": "ingest.get_pipeline" }, - "specLocation": "logstash/get_pipeline/LogstashGetPipelineResponse.ts#L24-L27" + "specLocation": "ingest/get_pipeline/GetPipelineResponse.ts#L23-L26" }, { "attachedBehaviors": [ "CommonQueryParameters" ], "body": { - "codegenName": "pipeline", - "kind": "value", - "value": { - "kind": "instance_of", - "type": { - "name": "Pipeline", - "namespace": "logstash._types" - } - } - }, - "description": "Create or update a Logstash pipeline.\n\nCreate a pipeline that is used for Logstash Central Management.\nIf the specified pipeline exists, it is replaced.", - "examples": { - "LogstashPutPipelineRequestExample1": { - "description": "Run `PUT _logstash/pipeline/my_pipeline` to create a pipeline.", - "summary": "Create a pipeline", - "value": "{\n \"description\": \"Sample pipeline for illustration purposes\",\n \"last_modified\": \"2021-01-02T02:50:51.250Z\",\n \"pipeline_metadata\": {\n \"type\": \"logstash_pipeline\",\n \"version\": 1\n },\n \"username\": \"elastic\",\n \"pipeline\": \"input {}\\\\n filter { grok {} }\\\\n output {}\",\n \"pipeline_settings\": {\n \"pipeline.workers\": 1,\n \"pipeline.batch.size\": 125,\n \"pipeline.batch.delay\": 50,\n \"queue.type\": \"memory\",\n \"queue.max_bytes\": \"1gb\",\n \"queue.checkpoint.writes\": 1024\n }\n}" - } + "kind": "no_body" }, + "description": "Run a grok processor.\nExtract structured fields out of a single text field within a document.\nYou must choose which field to extract matched fields from, as well as the grok pattern you expect will match.\nA grok pattern is like a regular expression that supports aliased expressions that can be reused.", "inherits": { "type": { "name": "RequestBase", @@ -29421,35 +30035,46 @@ "kind": "request", "name": { "name": "Request", - "namespace": "logstash.put_pipeline" + "namespace": "ingest.processor_grok" }, - "path": [ - { - "description": "An identifier for the pipeline.", - "name": "id", - "required": true, - "type": { - "kind": "instance_of", - "type": { - "name": "Id", - "namespace": "_types" - } - } - } - ], + "path": [], "query": [], - "specLocation": "logstash/put_pipeline/LogstashPutPipelineRequest.ts#L24-L51" + "specLocation": "ingest/processor_grok/GrokProcessorPatternsRequest.ts#L22-L40" }, { "body": { - "kind": "no_body" + "kind": "properties", + "properties": [ + { + "name": "patterns", + "required": true, + "type": { + "key": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + }, + "kind": "dictionary_of", + "singleKey": false, + "value": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } + } + } + ] }, "kind": "response", "name": { "name": "Response", - "namespace": "logstash.put_pipeline" + "namespace": "ingest.processor_grok" }, - "specLocation": "logstash/put_pipeline/LogstashPutPipelineResponse.ts#L22-L24" + "specLocation": "ingest/processor_grok/GrokProcessorPatternsResponse.ts#L22-L24" }, { "attachedBehaviors": [ @@ -29459,55 +30084,96 @@ "kind": "properties", "properties": [ { - "description": "The documents you want to retrieve. Required if no index is specified in the request URI.", - "name": "docs", + "description": "Optional metadata about the ingest pipeline. May have any contents. This map is not automatically generated by Elasticsearch.", + "name": "_meta", "required": false, "type": { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "Operation", - "namespace": "_global.mget" - } + "kind": "instance_of", + "type": { + "name": "Metadata", + "namespace": "_types" } } }, { - "description": "The IDs of the documents you want to retrieve. Allowed when the index is specified in the request URI.", - "name": "ids", + "description": "Description of the ingest pipeline.", + "name": "description", "required": false, "type": { "kind": "instance_of", "type": { - "name": "Ids", - "namespace": "_types" - } + "name": "string", + "namespace": "_builtins" + } + } + }, + { + "description": "Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors.", + "name": "on_failure", + "required": false, + "type": { + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "ProcessorContainer", + "namespace": "ingest._types" + } + } + } + }, + { + "description": "Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified.", + "name": "processors", + "required": false, + "type": { + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "ProcessorContainer", + "namespace": "ingest._types" + } + } + } + }, + { + "description": "Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers.", + "name": "version", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "VersionNumber", + "namespace": "_types" + } + } + }, + { + "description": "Marks this ingest pipeline as deprecated.\nWhen a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning.", + "name": "deprecated", + "required": false, + "serverDefault": false, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } } } ] }, - "description": "Get multiple documents.\n\nGet multiple JSON documents by ID from one or more indices.\nIf you specify an index in the request URI, you only need to specify the document IDs in the request body.\nTo ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail.\n\n**Filter source fields**\n\nBy default, the `_source` field is returned for every document (if stored).\nUse the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document.\nYou can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions.\n\n**Get stored fields**\n\nUse the `stored_fields` attribute to specify the set of stored fields you want to retrieve.\nAny requested fields that are not stored are ignored.\nYou can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions.", + "description": "Create or update a pipeline.\nChanges made using this API take effect immediately.", "examples": { - "MultiGetRequestExample1": { - "description": "Run `GET /my-index-000001/_mget`. When you specify an index in the request URI, only the document IDs are required in the request body.\n", - "summary": "Get documents by ID", - "value": "{\n \"docs\": [\n {\n \"_id\": \"1\"\n },\n {\n \"_id\": \"2\"\n }\n ]\n}" - }, - "MultiGetRequestExample2": { - "description": "Run `GET /_mget`. This request sets `_source` to `false` for document 1 to exclude the source entirely. It retrieves `field3` and `field4` from document 2. It retrieves the `user` field from document 3 but filters out the `user.location` field.\n", - "summary": "Filter source fields", - "value": "{\n \"docs\": [\n {\n \"_index\": \"test\",\n \"_id\": \"1\",\n \"_source\": false\n },\n {\n \"_index\": \"test\",\n \"_id\": \"2\",\n \"_source\": [ \"field3\", \"field4\" ]\n },\n {\n \"_index\": \"test\",\n \"_id\": \"3\",\n \"_source\": {\n \"include\": [ \"user\" ],\n \"exclude\": [ \"user.location\" ]\n }\n }\n ]\n}" - }, - "MultiGetRequestExample3": { - "description": "Run `GET /_mget`. This request retrieves `field1` and `field2` from document 1 and `field3` and `field4` from document 2.\n", - "summary": "Get stored fields", - "value": "{\n \"docs\": [\n {\n \"_index\": \"test\",\n \"_id\": \"1\",\n \"stored_fields\": [ \"field1\", \"field2\" ]\n },\n {\n \"_index\": \"test\",\n \"_id\": \"2\",\n \"stored_fields\": [ \"field3\", \"field4\" ]\n }\n ]\n}" + "PutPipelineRequestExample1": { + "summary": "Create an ingest pipeline.", + "value": "{\n \"description\" : \"My optional pipeline description\",\n \"processors\" : [\n {\n \"set\" : {\n \"description\" : \"My optional processor description\",\n \"field\": \"my-keyword-field\",\n \"value\": \"foo\"\n }\n }\n ]\n}" }, - "MultiGetRequestExample4": { - "description": "Run `GET /_mget?routing=key1`. If routing is used during indexing, you need to specify the routing value to retrieve documents. This request fetches `test/_doc/2` from the shard corresponding to routing key `key1`. It fetches `test/_doc/1` from the shard corresponding to routing key `key2`.\n", - "summary": "Document routing", - "value": "{\n \"docs\": [\n {\n \"_index\": \"test\",\n \"_id\": \"1\",\n \"routing\": \"key2\"\n },\n {\n \"_index\": \"test\",\n \"_id\": \"2\"\n }\n ]\n}" + "PutPipelineRequestExample2": { + "description": "You can use the `_meta` parameter to add arbitrary metadata to a pipeline.", + "summary": "Create an ingest pipeline with metadata.", + "value": "{\n \"description\" : \"My optional pipeline description\",\n \"processors\" : [\n {\n \"set\" : {\n \"description\" : \"My optional processor description\",\n \"field\": \"my-keyword-field\",\n \"value\": \"foo\"\n }\n }\n ],\n \"_meta\": {\n \"reason\": \"set my-keyword-field to foo\",\n \"serialization\": {\n \"class\": \"MyPipeline\",\n \"id\": 10\n }\n }\n}" } }, "inherits": { @@ -29519,17 +30185,17 @@ "kind": "request", "name": { "name": "Request", - "namespace": "_global.mget" + "namespace": "ingest.put_pipeline" }, "path": [ { - "description": "Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index.", - "name": "index", - "required": false, + "description": "ID of the ingest pipeline to create or update.", + "name": "id", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "IndexName", + "name": "Id", "namespace": "_types" } } @@ -29537,205 +30203,190 @@ ], "query": [ { - "description": "Specifies the node or shard the operation should be performed on. Random by default.", - "name": "preference", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - } - }, - { - "description": "If `true`, the request is real-time as opposed to near-real-time.", - "docId": "realtime", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html#realtime", - "name": "realtime", + "description": "Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error.", + "name": "master_timeout", "required": false, - "serverDefault": true, + "serverDefault": "30s", "type": { "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "Duration", + "namespace": "_types" } } }, { - "description": "If `true`, the request refreshes relevant shards before retrieving documents.", - "name": "refresh", + "description": "Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.", + "name": "timeout", "required": false, - "serverDefault": false, + "serverDefault": "30s", "type": { "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "Duration", + "namespace": "_types" } } }, { - "description": "Custom value used to route operations to a specific shard.", - "name": "routing", + "description": "Required version for optimistic concurrency control for pipeline updates", + "name": "if_version", "required": false, "type": { "kind": "instance_of", "type": { - "name": "Routing", + "name": "VersionNumber", "namespace": "_types" } } - }, - { - "description": "True or false to return the `_source` field or not, or a list of fields to return.", - "name": "_source", - "required": false, + } + ], + "specLocation": "ingest/put_pipeline/PutPipelineRequest.ts#L25-L90" + }, + { + "body": { + "kind": "value", + "value": { + "kind": "instance_of", "type": { - "kind": "instance_of", + "name": "AcknowledgedResponseBase", + "namespace": "_types" + } + } + }, + "kind": "response", + "name": { + "name": "Response", + "namespace": "ingest.put_pipeline" + }, + "specLocation": "ingest/put_pipeline/PutPipelineResponse.ts#L22-L24" + }, + { + "attachedBehaviors": [ + "CommonQueryParameters" + ], + "body": { + "kind": "properties", + "properties": [ + { + "description": "Sample documents to test in the pipeline.", + "name": "docs", + "required": true, "type": { - "name": "SourceConfigParam", - "namespace": "_global.search._types" + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "Document", + "namespace": "ingest._types" + } + } } - } - }, - { - "description": "A comma-separated list of source fields to exclude from the response.\nYou can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter.", - "docId": "mapping-source-field", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html", - "name": "_source_excludes", - "required": false, - "type": { - "kind": "instance_of", + }, + { + "description": "The pipeline to test.\nIf you don't specify the `pipeline` request path parameter, this parameter is required.\nIf you specify both this and the request path parameter, the API only uses the request path parameter.", + "name": "pipeline", + "required": false, "type": { - "name": "Fields", - "namespace": "_types" + "kind": "instance_of", + "type": { + "name": "Pipeline", + "namespace": "ingest._types" + } } } - }, + ] + }, + "description": "Simulate a pipeline.\n\nRun an ingest pipeline against a set of provided documents.\nYou can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request.", + "examples": { + "SimulatePipelineRequestExample1": { + "description": "You can specify the used pipeline either in the request body or as a path parameter.", + "summary": "Run an ingest pipeline against a set of provided documents.", + "value": "{\n \"pipeline\" :\n {\n \"description\": \"_description\",\n \"processors\": [\n {\n \"set\" : {\n \"field\" : \"field2\",\n \"value\" : \"_value\"\n }\n }\n ]\n },\n \"docs\": [\n {\n \"_index\": \"index\",\n \"_id\": \"id\",\n \"_source\": {\n \"foo\": \"bar\"\n }\n },\n {\n \"_index\": \"index\",\n \"_id\": \"id\",\n \"_source\": {\n \"foo\": \"rab\"\n }\n }\n ]\n}" + } + }, + "inherits": { + "type": { + "name": "RequestBase", + "namespace": "_types" + } + }, + "kind": "request", + "name": { + "name": "Request", + "namespace": "ingest.simulate" + }, + "path": [ { - "description": "A comma-separated list of source fields to include in the response.\nIf this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter.\nIf the `_source` parameter is `false`, this parameter is ignored.", - "docId": "mapping-source-field", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html", - "name": "_source_includes", + "description": "The pipeline to test.\nIf you don't specify a `pipeline` in the request body, this parameter is required.", + "name": "id", "required": false, "type": { "kind": "instance_of", "type": { - "name": "Fields", + "name": "Id", "namespace": "_types" } } - }, + } + ], + "query": [ { - "description": "If `true`, retrieves the document fields stored in the index rather than the document `_source`.", - "name": "stored_fields", + "description": "If `true`, the response includes output data for each processor in the executed pipeline.", + "name": "verbose", "required": false, - "serverDefault": "false", "type": { "kind": "instance_of", "type": { - "name": "Fields", - "namespace": "_types" + "name": "boolean", + "namespace": "_builtins" } } } ], - "specLocation": "_global/mget/MultiGetRequest.ts#L25-L127" + "specLocation": "ingest/simulate/SimulatePipelineRequest.ts#L25-L72" }, { "body": { "kind": "properties", "properties": [ { - "description": "The response includes a docs array that contains the documents in the order specified in the request.\nThe structure of the returned documents is similar to that returned by the get API.\nIf there is a failure getting a particular document, the error is included in place of the document.", "name": "docs", "required": true, "type": { "kind": "array_of", "value": { - "generics": [ - { - "kind": "instance_of", - "type": { - "name": "TDocument", - "namespace": "_global.mget.Response" - } - } - ], "kind": "instance_of", "type": { - "name": "ResponseItem", - "namespace": "_global.mget" + "name": "SimulateDocumentResult", + "namespace": "ingest._types" } } } } ] }, - "generics": [ - { - "name": "TDocument", - "namespace": "_global.mget.Response" + "examples": { + "SimulatePipelineResponseExample1": { + "description": "A successful response for running an ingest pipeline against a set of provided documents.", + "value": "{\n \"docs\": [\n {\n \"doc\": {\n \"_id\": \"id\",\n \"_index\": \"index\",\n \"_version\": \"-3\",\n \"_source\": {\n \"field2\": \"_value\",\n \"foo\": \"bar\"\n },\n \"_ingest\": {\n \"timestamp\": \"2017-05-04T22:30:03.187Z\"\n }\n }\n },\n {\n \"doc\": {\n \"_id\": \"id\",\n \"_index\": \"index\",\n \"_version\": \"-3\",\n \"_source\": {\n \"field2\": \"_value\",\n \"foo\": \"rab\"\n },\n \"_ingest\": {\n \"timestamp\": \"2017-05-04T22:30:03.188Z\"\n }\n }\n }\n ]\n}" } - ], + }, "kind": "response", "name": { "name": "Response", - "namespace": "_global.mget" + "namespace": "ingest.simulate" }, - "specLocation": "_global/mget/MultiGetResponse.ts#L22-L31" + "specLocation": "ingest/simulate/SimulatePipelineResponse.ts#L22-L24" }, { "attachedBehaviors": [ "CommonQueryParameters" ], "body": { - "kind": "properties", - "properties": [ - { - "description": "Refer to the description for the `allow_no_match` query parameter.", - "name": "allow_no_match", - "required": false, - "serverDefault": true, - "type": { - "kind": "instance_of", - "type": { - "name": "boolean", - "namespace": "_builtins" - } - } - }, - { - "description": "Refer to the descriptiion for the `force` query parameter.", - "name": "force", - "required": false, - "serverDefault": false, - "type": { - "kind": "instance_of", - "type": { - "name": "boolean", - "namespace": "_builtins" - } - } - }, - { - "description": "Refer to the description for the `timeout` query parameter.", - "name": "timeout", - "required": false, - "serverDefault": "30m", - "type": { - "kind": "instance_of", - "type": { - "name": "Duration", - "namespace": "_types" - } - } - } - ] + "kind": "no_body" }, - "description": "Close anomaly detection jobs.\n\nA job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results.\nWhen you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data.\nIf you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request.\nWhen a datafeed that has a specified end date stops, it automatically closes its associated job.", + "description": "Get license information.\n\nGet information about your Elastic license including its type, its status, when it was issued, and when it expires.\n\n>info\n> If the master node is generating a new cluster state, the get license API may return a `404 Not Found` response.\n> If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request.", "inherits": { "type": { "name": "RequestBase", @@ -29745,26 +30396,17 @@ "kind": "request", "name": { "name": "Request", - "namespace": "ml.close_job" + "namespace": "license.get" }, - "path": [ - { - "description": "Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. You can close multiple anomaly detection jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can close all jobs by using `_all` or by specifying `*` as the job identifier.", - "name": "job_id", - "required": true, - "type": { - "kind": "instance_of", - "type": { - "name": "Id", - "namespace": "_types" - } - } - } - ], + "path": [], "query": [ { - "description": "Specifies what to do when the request: contains wildcard expressions and there are no jobs that match; contains the `_all` string or no identifiers and there are no matches; or contains wildcard expressions and there are only partial matches. By default, it returns an empty jobs array when there are no matches and the subset of results when there are partial matches.\nIf `false`, the request returns a 404 status code when there are no matches or only partial matches.", - "name": "allow_no_match", + "deprecation": { + "description": "", + "version": "7.6.0" + }, + "description": "If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum and enterprise license types. This behavior is maintained for backwards compatibility.\nThis parameter is deprecated and will always be set to true in 8.x.", + "name": "accept_enterprise", "required": false, "serverDefault": true, "type": { @@ -29776,8 +30418,8 @@ } }, { - "description": "Use to close a failed job, or to forcefully close a job which has not responded to its initial close request; the request returns without performing the associated actions such as flushing buffers and persisting the model snapshots.\nIf you want the job to be in a consistent state after the close job API returns, do not set to `true`. This parameter should be used only in situations where the job has already failed or where you are not interested in results the job might have recently produced or might produce in the future.", - "name": "force", + "description": "Specifies whether to retrieve local information. The default value is `false`, which means the information is retrieved from the master node.", + "name": "local", "required": false, "serverDefault": false, "type": { @@ -29787,52 +30429,39 @@ "namespace": "_builtins" } } - }, - { - "description": "Controls the time to wait until a job has closed.", - "name": "timeout", - "required": false, - "serverDefault": "30m", - "type": { - "kind": "instance_of", - "type": { - "name": "Duration", - "namespace": "_types" - } - } } ], - "specLocation": "ml/close_job/MlCloseJobRequest.ts#L24-L85" + "specLocation": "license/get/GetLicenseRequest.ts#L22-L56" }, { "body": { "kind": "properties", "properties": [ { - "name": "closed", + "name": "license", "required": true, "type": { "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "LicenseInformation", + "namespace": "license.get" } } } ] }, "examples": { - "MlCloseJobResponseExample1": { - "description": "A successful response when closing anomaly detection jobs.", - "value": "{\n \"closed\": true\n}" + "GetLicenseResponseExample1": { + "description": "A successful response from `GET /_license`.", + "value": "{\n \"license\" : {\n \"status\" : \"active\",\n \"uid\" : \"cbff45e7-c553-41f7-ae4f-9205eabd80xx\",\n \"type\" : \"trial\",\n \"issue_date\" : \"2018-10-20T22:05:12.332Z\",\n \"issue_date_in_millis\" : 1540073112332,\n \"expiry_date\" : \"2018-11-19T22:05:12.332Z\",\n \"expiry_date_in_millis\" : 1542665112332,\n \"max_nodes\" : 1000,\n \"max_resource_units\" : null,\n \"issued_to\" : \"test\",\n \"issuer\" : \"elasticsearch\",\n \"start_date_in_millis\" : -1\n }\n}" } }, "kind": "response", "name": { "name": "Response", - "namespace": "ml.close_job" + "namespace": "license.get" }, - "specLocation": "ml/close_job/MlCloseJobResponse.ts#L20-L22" + "specLocation": "license/get/GetLicenseResponse.ts#L22-L24" }, { "attachedBehaviors": [ @@ -29841,7 +30470,7 @@ "body": { "kind": "no_body" }, - "description": "Delete a calendar.\n\nRemove all scheduled events from a calendar, then delete it.", + "description": "Delete a Logstash pipeline.\nDelete a pipeline that is used for Logstash Central Management.\nIf the request succeeds, you receive an empty response with an appropriate status code.", "inherits": { "type": { "name": "RequestBase", @@ -29851,12 +30480,12 @@ "kind": "request", "name": { "name": "Request", - "namespace": "ml.delete_calendar" + "namespace": "logstash.delete_pipeline" }, "path": [ { - "description": "A string that uniquely identifies a calendar.", - "name": "calendar_id", + "description": "An identifier for the pipeline.", + "name": "id", "required": true, "type": { "kind": "instance_of", @@ -29868,31 +30497,18 @@ } ], "query": [], - "specLocation": "ml/delete_calendar/MlDeleteCalendarRequest.ts#L23-L45" + "specLocation": "logstash/delete_pipeline/LogstashDeletePipelineRequest.ts#L23-L47" }, { "body": { - "kind": "value", - "value": { - "kind": "instance_of", - "type": { - "name": "AcknowledgedResponseBase", - "namespace": "_types" - } - } - }, - "examples": { - "MlDeleteCalendarResponseExample1": { - "description": "A successful response when deleting a calendar.", - "value": "{\n \"acknowledged\": true\n}" - } + "kind": "no_body" }, "kind": "response", "name": { "name": "Response", - "namespace": "ml.delete_calendar" + "namespace": "logstash.delete_pipeline" }, - "specLocation": "ml/delete_calendar/MlDeleteCalendarResponse.ts#L22-L24" + "specLocation": "logstash/delete_pipeline/LogstashDeletePipelineResponse.ts#L22-L24" }, { "attachedBehaviors": [ @@ -29901,7 +30517,7 @@ "body": { "kind": "no_body" }, - "description": "Delete events from a calendar.", + "description": "Get Logstash pipelines.\nGet pipelines that are used for Logstash Central Management.", "inherits": { "type": { "name": "RequestBase", @@ -29911,69 +30527,84 @@ "kind": "request", "name": { "name": "Request", - "namespace": "ml.delete_calendar_event" + "namespace": "logstash.get_pipeline" }, "path": [ { - "description": "A string that uniquely identifies a calendar.", - "name": "calendar_id", - "required": true, - "type": { - "kind": "instance_of", - "type": { - "name": "Id", - "namespace": "_types" - } - } - }, - { - "description": "Identifier for the scheduled event.\nYou can obtain this identifier by using the get calendar events API.", - "name": "event_id", - "required": true, + "description": "A comma-separated list of pipeline identifiers.", + "name": "id", + "required": false, "type": { "kind": "instance_of", "type": { - "name": "Id", + "name": "Ids", "namespace": "_types" } } } ], "query": [], - "specLocation": "ml/delete_calendar_event/MlDeleteCalendarEventRequest.ts#L23-L49" + "specLocation": "logstash/get_pipeline/LogstashGetPipelineRequest.ts#L23-L50" }, { "body": { + "codegenName": "pipelines", "kind": "value", "value": { - "kind": "instance_of", - "type": { - "name": "AcknowledgedResponseBase", - "namespace": "_types" + "key": { + "kind": "instance_of", + "type": { + "name": "Id", + "namespace": "_types" + } + }, + "kind": "dictionary_of", + "singleKey": false, + "value": { + "kind": "instance_of", + "type": { + "name": "Pipeline", + "namespace": "logstash._types" + } } } }, "examples": { - "MlDeleteCalendarEventResponseExample1": { - "description": "A successful response when deleting a calendar event.", - "value": "{\n \"acknowledged\": true\n}" + "LogstashGetPipelineResponseExample1": { + "description": "A successful response from `GET _logstash/pipeline/my_pipeline`.\n", + "value": "{\n \"my_pipeline\": {\n \"description\": \"Sample pipeline for illustration purposes\",\n \"last_modified\": \"2021-01-02T02:50:51.250Z\",\n \"pipeline_metadata\": {\n \"type\": \"logstash_pipeline\",\n \"version\": \"1\"\n },\n \"username\": \"elastic\",\n \"pipeline\": \"input {}\\\\n filter { grok {} }\\\\n output {}\",\n \"pipeline_settings\": {\n \"pipeline.workers\": 1,\n \"pipeline.batch.size\": 125,\n \"pipeline.batch.delay\": 50,\n \"queue.type\": \"memory\",\n \"queue.max_bytes\": \"1gb\",\n \"queue.checkpoint.writes\": 1024\n }\n }\n}" } }, "kind": "response", "name": { "name": "Response", - "namespace": "ml.delete_calendar_event" + "namespace": "logstash.get_pipeline" }, - "specLocation": "ml/delete_calendar_event/MlDeleteCalendarEventResponse.ts#L22-L24" + "specLocation": "logstash/get_pipeline/LogstashGetPipelineResponse.ts#L24-L27" }, { "attachedBehaviors": [ "CommonQueryParameters" ], "body": { - "kind": "no_body" + "codegenName": "pipeline", + "kind": "value", + "value": { + "kind": "instance_of", + "type": { + "name": "Pipeline", + "namespace": "logstash._types" + } + } + }, + "description": "Create or update a Logstash pipeline.\n\nCreate a pipeline that is used for Logstash Central Management.\nIf the specified pipeline exists, it is replaced.", + "examples": { + "LogstashPutPipelineRequestExample1": { + "description": "Run `PUT _logstash/pipeline/my_pipeline` to create a pipeline.", + "summary": "Create a pipeline", + "value": "{\n \"description\": \"Sample pipeline for illustration purposes\",\n \"last_modified\": \"2021-01-02T02:50:51.250Z\",\n \"pipeline_metadata\": {\n \"type\": \"logstash_pipeline\",\n \"version\": 1\n },\n \"username\": \"elastic\",\n \"pipeline\": \"input {}\\\\n filter { grok {} }\\\\n output {}\",\n \"pipeline_settings\": {\n \"pipeline.workers\": 1,\n \"pipeline.batch.size\": 125,\n \"pipeline.batch.delay\": 50,\n \"queue.type\": \"memory\",\n \"queue.max_bytes\": \"1gb\",\n \"queue.checkpoint.writes\": 1024\n }\n}" + } }, - "description": "Delete anomaly jobs from a calendar.", "inherits": { "type": { "name": "RequestBase", @@ -29983,12 +30614,12 @@ "kind": "request", "name": { "name": "Request", - "namespace": "ml.delete_calendar_job" + "namespace": "logstash.put_pipeline" }, "path": [ { - "description": "A string that uniquely identifies a calendar.", - "name": "calendar_id", + "description": "An identifier for the pipeline.", + "name": "id", "required": true, "type": { "kind": "instance_of", @@ -29997,55 +30628,48 @@ "namespace": "_types" } } - }, - { - "description": "An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a\ncomma-separated list of jobs or groups.", - "name": "job_id", - "required": true, - "type": { - "kind": "instance_of", - "type": { - "name": "Ids", - "namespace": "_types" - } - } } ], "query": [], - "specLocation": "ml/delete_calendar_job/MlDeleteCalendarJobRequest.ts#L23-L50" + "specLocation": "logstash/put_pipeline/LogstashPutPipelineRequest.ts#L24-L51" }, { "body": { - "kind": "properties", - "properties": [ - { - "description": "A string that uniquely identifies a calendar.", - "name": "calendar_id", - "required": true, - "type": { - "kind": "instance_of", - "type": { - "name": "Id", - "namespace": "_types" - } - } - }, + "kind": "no_body" + }, + "kind": "response", + "name": { + "name": "Response", + "namespace": "logstash.put_pipeline" + }, + "specLocation": "logstash/put_pipeline/LogstashPutPipelineResponse.ts#L22-L24" + }, + { + "attachedBehaviors": [ + "CommonQueryParameters" + ], + "body": { + "kind": "properties", + "properties": [ { - "description": "A description of the calendar.", - "name": "description", + "description": "The documents you want to retrieve. Required if no index is specified in the request URI.", + "name": "docs", "required": false, "type": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "Operation", + "namespace": "_global.mget" + } } } }, { - "description": "A list of anomaly detection job identifiers or group names.", - "name": "job_ids", - "required": true, + "description": "The IDs of the documents you want to retrieve. Allowed when the index is specified in the request URI.", + "name": "ids", + "required": false, "type": { "kind": "instance_of", "type": { @@ -30056,27 +30680,29 @@ } ] }, + "description": "Get multiple documents.\n\nGet multiple JSON documents by ID from one or more indices.\nIf you specify an index in the request URI, you only need to specify the document IDs in the request body.\nTo ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail.\n\n**Filter source fields**\n\nBy default, the `_source` field is returned for every document (if stored).\nUse the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document.\nYou can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions.\n\n**Get stored fields**\n\nUse the `stored_fields` attribute to specify the set of stored fields you want to retrieve.\nAny requested fields that are not stored are ignored.\nYou can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions.", "examples": { - "MlDeleteCalendarJobResponseExample1": { - "description": "A successful response when deleting an anomaly detection job from a calendar.", - "value": "{\n \"calendar_id\": \"planned-outages\",\n \"job_ids\": []\n}" + "MultiGetRequestExample1": { + "description": "Run `GET /my-index-000001/_mget`. When you specify an index in the request URI, only the document IDs are required in the request body.\n", + "summary": "Get documents by ID", + "value": "{\n \"docs\": [\n {\n \"_id\": \"1\"\n },\n {\n \"_id\": \"2\"\n }\n ]\n}" + }, + "MultiGetRequestExample2": { + "description": "Run `GET /_mget`. This request sets `_source` to `false` for document 1 to exclude the source entirely. It retrieves `field3` and `field4` from document 2. It retrieves the `user` field from document 3 but filters out the `user.location` field.\n", + "summary": "Filter source fields", + "value": "{\n \"docs\": [\n {\n \"_index\": \"test\",\n \"_id\": \"1\",\n \"_source\": false\n },\n {\n \"_index\": \"test\",\n \"_id\": \"2\",\n \"_source\": [ \"field3\", \"field4\" ]\n },\n {\n \"_index\": \"test\",\n \"_id\": \"3\",\n \"_source\": {\n \"include\": [ \"user\" ],\n \"exclude\": [ \"user.location\" ]\n }\n }\n ]\n}" + }, + "MultiGetRequestExample3": { + "description": "Run `GET /_mget`. This request retrieves `field1` and `field2` from document 1 and `field3` and `field4` from document 2.\n", + "summary": "Get stored fields", + "value": "{\n \"docs\": [\n {\n \"_index\": \"test\",\n \"_id\": \"1\",\n \"stored_fields\": [ \"field1\", \"field2\" ]\n },\n {\n \"_index\": \"test\",\n \"_id\": \"2\",\n \"stored_fields\": [ \"field3\", \"field4\" ]\n }\n ]\n}" + }, + "MultiGetRequestExample4": { + "description": "Run `GET /_mget?routing=key1`. If routing is used during indexing, you need to specify the routing value to retrieve documents. This request fetches `test/_doc/2` from the shard corresponding to routing key `key1`. It fetches `test/_doc/1` from the shard corresponding to routing key `key2`.\n", + "summary": "Document routing", + "value": "{\n \"docs\": [\n {\n \"_index\": \"test\",\n \"_id\": \"1\",\n \"routing\": \"key2\"\n },\n {\n \"_index\": \"test\",\n \"_id\": \"2\"\n }\n ]\n}" } }, - "kind": "response", - "name": { - "name": "Response", - "namespace": "ml.delete_calendar_job" - }, - "specLocation": "ml/delete_calendar_job/MlDeleteCalendarJobResponse.ts#L22-L31" - }, - { - "attachedBehaviors": [ - "CommonQueryParameters" - ], - "body": { - "kind": "no_body" - }, - "description": "Delete a data frame analytics job.", "inherits": { "type": { "name": "RequestBase", @@ -30086,17 +30712,17 @@ "kind": "request", "name": { "name": "Request", - "namespace": "ml.delete_data_frame_analytics" + "namespace": "_global.mget" }, "path": [ { - "description": "Identifier for the data frame analytics job.", - "name": "id", - "required": true, + "description": "Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index.", + "name": "index", + "required": false, "type": { "kind": "instance_of", "type": { - "name": "Id", + "name": "IndexName", "namespace": "_types" } } @@ -30104,8 +30730,35 @@ ], "query": [ { - "description": "If `true`, it deletes a job that is not stopped; this method is quicker than stopping and deleting the job.", - "name": "force", + "description": "Specifies the node or shard the operation should be performed on. Random by default.", + "name": "preference", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } + }, + { + "description": "If `true`, the request is real-time as opposed to near-real-time.", + "docId": "realtime", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html#realtime", + "name": "realtime", + "required": false, + "serverDefault": true, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + }, + { + "description": "If `true`, the request refreshes relevant shards before retrieving documents.", + "name": "refresh", "required": false, "serverDefault": false, "type": { @@ -30117,186 +30770,165 @@ } }, { - "description": "The time to wait for the job to be deleted.", - "name": "timeout", + "description": "Custom value used to route operations to a specific shard.", + "name": "routing", "required": false, - "serverDefault": "1m", "type": { "kind": "instance_of", "type": { - "name": "Duration", + "name": "Routing", "namespace": "_types" } } - } - ], - "specLocation": "ml/delete_data_frame_analytics/MlDeleteDataFrameAnalyticsRequest.ts#L24-L58" - }, - { - "body": { - "kind": "value", - "value": { - "kind": "instance_of", + }, + { + "description": "True or false to return the `_source` field or not, or a list of fields to return.", + "name": "_source", + "required": false, "type": { - "name": "AcknowledgedResponseBase", - "namespace": "_types" + "kind": "instance_of", + "type": { + "name": "SourceConfigParam", + "namespace": "_global.search._types" + } } - } - }, - "examples": { - "MlDeleteDataFrameAnalyticsResponseExample1": { - "description": "A successful response when deleting a data frame analytics job.", - "value": "{\n \"acknowledged\": true\n}" - } - }, - "kind": "response", - "name": { - "name": "Response", - "namespace": "ml.delete_data_frame_analytics" - }, - "specLocation": "ml/delete_data_frame_analytics/MlDeleteDataFrameAnalyticsResponse.ts#L22-L24" - }, - { - "attachedBehaviors": [ - "CommonQueryParameters" - ], - "body": { - "kind": "no_body" - }, - "description": "Delete a datafeed.", - "inherits": { - "type": { - "name": "RequestBase", - "namespace": "_types" - } - }, - "kind": "request", - "name": { - "name": "Request", - "namespace": "ml.delete_datafeed" - }, - "path": [ + }, { - "description": "A numerical character string that uniquely identifies the datafeed. This\nidentifier can contain lowercase alphanumeric characters (a-z and 0-9),\nhyphens, and underscores. It must start and end with alphanumeric\ncharacters.", - "name": "datafeed_id", - "required": true, + "description": "A comma-separated list of source fields to exclude from the response.\nYou can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter.", + "docId": "mapping-source-field", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html", + "name": "_source_excludes", + "required": false, "type": { "kind": "instance_of", "type": { - "name": "Id", + "name": "Fields", "namespace": "_types" } } - } - ], - "query": [ + }, { - "description": "Use to forcefully delete a started datafeed; this method is quicker than\nstopping and deleting the datafeed.", - "name": "force", + "description": "A comma-separated list of source fields to include in the response.\nIf this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter.\nIf the `_source` parameter is `false`, this parameter is ignored.", + "docId": "mapping-source-field", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html", + "name": "_source_includes", "required": false, "type": { "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "Fields", + "namespace": "_types" } } - } - ], - "specLocation": "ml/delete_datafeed/MlDeleteDatafeedRequest.ts#L23-L55" - }, - { - "body": { - "kind": "value", - "value": { - "kind": "instance_of", - "type": { - "name": "AcknowledgedResponseBase", - "namespace": "_types" - } - } - }, - "examples": { - "MlDeleteDatafeedResponseExample1": { - "description": "A successful response when deleting a datafeed.", - "value": "{\n \"acknowledged\": true\n}" - } - }, - "kind": "response", - "name": { - "name": "Response", - "namespace": "ml.delete_datafeed" - }, - "specLocation": "ml/delete_datafeed/MlDeleteDatafeedResponse.ts#L22-L24" - }, - { - "attachedBehaviors": [ - "CommonQueryParameters" - ], - "body": { - "kind": "no_body" - }, - "description": "Delete a filter.\n\nIf an anomaly detection job references the filter, you cannot delete the\nfilter. You must update or delete the job before you can delete the filter.", - "inherits": { - "type": { - "name": "RequestBase", - "namespace": "_types" - } - }, - "kind": "request", - "name": { - "name": "Request", - "namespace": "ml.delete_filter" - }, - "path": [ + }, { - "description": "A string that uniquely identifies a filter.", - "name": "filter_id", - "required": true, + "description": "If `true`, retrieves the document fields stored in the index rather than the document `_source`.", + "name": "stored_fields", + "required": false, + "serverDefault": "false", "type": { "kind": "instance_of", "type": { - "name": "Id", + "name": "Fields", "namespace": "_types" } } } ], - "query": [], - "specLocation": "ml/delete_filter/MlDeleteFilterRequest.ts#L23-L48" + "specLocation": "_global/mget/MultiGetRequest.ts#L25-L127" }, { "body": { - "kind": "value", - "value": { - "kind": "instance_of", - "type": { - "name": "AcknowledgedResponseBase", - "namespace": "_types" + "kind": "properties", + "properties": [ + { + "description": "The response includes a docs array that contains the documents in the order specified in the request.\nThe structure of the returned documents is similar to that returned by the get API.\nIf there is a failure getting a particular document, the error is included in place of the document.", + "name": "docs", + "required": true, + "type": { + "kind": "array_of", + "value": { + "generics": [ + { + "kind": "instance_of", + "type": { + "name": "TDocument", + "namespace": "_global.mget.Response" + } + } + ], + "kind": "instance_of", + "type": { + "name": "ResponseItem", + "namespace": "_global.mget" + } + } + } } - } + ] }, - "examples": { - "MlDeleteFilterResponseExample1": { - "description": "A successful response when deleting a filter.", - "value": "{\n \"acknowledged\": true\n}" + "generics": [ + { + "name": "TDocument", + "namespace": "_global.mget.Response" } - }, + ], "kind": "response", "name": { "name": "Response", - "namespace": "ml.delete_filter" + "namespace": "_global.mget" }, - "specLocation": "ml/delete_filter/MlDeleteFilterResponse.ts#L22-L24" + "specLocation": "_global/mget/MultiGetResponse.ts#L22-L31" }, { "attachedBehaviors": [ "CommonQueryParameters" ], "body": { - "kind": "no_body" + "kind": "properties", + "properties": [ + { + "description": "Refer to the description for the `allow_no_match` query parameter.", + "name": "allow_no_match", + "required": false, + "serverDefault": true, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + }, + { + "description": "Refer to the descriptiion for the `force` query parameter.", + "name": "force", + "required": false, + "serverDefault": false, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + }, + { + "description": "Refer to the description for the `timeout` query parameter.", + "name": "timeout", + "required": false, + "serverDefault": "30m", + "type": { + "kind": "instance_of", + "type": { + "name": "Duration", + "namespace": "_types" + } + } + } + ] }, - "description": "Delete an anomaly detection job.\n\nAll job configuration, model state and results are deleted.\nIt is not currently possible to delete multiple jobs using wildcards or a\ncomma separated list. If you delete a job that has a datafeed, the request\nfirst tries to delete the datafeed. This behavior is equivalent to calling\nthe delete datafeed API with the same timeout and force parameters as the\ndelete job request.", + "description": "Close anomaly detection jobs.\n\nA job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results.\nWhen you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data.\nIf you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request.\nWhen a datafeed that has a specified end date stops, it automatically closes its associated job.", "inherits": { "type": { "name": "RequestBase", @@ -30306,11 +30938,11 @@ "kind": "request", "name": { "name": "Request", - "namespace": "ml.delete_job" + "namespace": "ml.close_job" }, "path": [ { - "description": "Identifier for the anomaly detection job.", + "description": "Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. You can close multiple anomaly detection jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can close all jobs by using `_all` or by specifying `*` as the job identifier.", "name": "job_id", "required": true, "type": { @@ -30324,9 +30956,10 @@ ], "query": [ { - "description": "Use to forcefully delete an opened job; this method is quicker than\nclosing and deleting the job.", - "name": "force", + "description": "Specifies what to do when the request: contains wildcard expressions and there are no jobs that match; contains the `_all` string or no identifiers and there are no matches; or contains wildcard expressions and there are only partial matches. By default, it returns an empty jobs array when there are no matches and the subset of results when there are partial matches.\nIf `false`, the request returns a 404 status code when there are no matches or only partial matches.", + "name": "allow_no_match", "required": false, + "serverDefault": true, "type": { "kind": "instance_of", "type": { @@ -30336,8 +30969,8 @@ } }, { - "description": "Specifies whether annotations that have been added by the\nuser should be deleted along with any auto-generated annotations when the job is\nreset.", - "name": "delete_user_annotations", + "description": "Use to close a failed job, or to forcefully close a job which has not responded to its initial close request; the request returns without performing the associated actions such as flushing buffers and persisting the model snapshots.\nIf you want the job to be in a consistent state after the close job API returns, do not set to `true`. This parameter should be used only in situations where the job has already failed or where you are not interested in results the job might have recently produced or might produce in the future.", + "name": "force", "required": false, "serverDefault": false, "type": { @@ -30349,50 +30982,50 @@ } }, { - "description": "Specifies whether the request should return immediately or wait until the\njob deletion completes.", - "name": "wait_for_completion", + "description": "Controls the time to wait until a job has closed.", + "name": "timeout", "required": false, - "serverDefault": true, + "serverDefault": "30m", "type": { "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "Duration", + "namespace": "_types" } } } ], - "specLocation": "ml/delete_job/MlDeleteJobRequest.ts#L23-L72" + "specLocation": "ml/close_job/MlCloseJobRequest.ts#L24-L85" }, { "body": { - "kind": "value", - "value": { - "kind": "instance_of", - "type": { - "name": "AcknowledgedResponseBase", - "namespace": "_types" + "kind": "properties", + "properties": [ + { + "name": "closed", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } } - } + ] }, "examples": { - "MlDeleteJobResponseExample1": { - "description": "A successful response when deleting an anomaly detection job.", - "summary": "Delete job", - "value": "{\n \"acknowledged\": true\n}" - }, - "MlDeleteJobResponseExample2": { - "description": "A successful response when deleting an anomaly detection job asynchronously. When the `wait_for_completion` query parameter is set to `false`, the response contains an identifier for the job deletion task.\n", - "summary": "Delete job asynchronously", - "value": "{\n \"task\": \"oTUltX4IQMOUUVeiohTt8A:39\"\n}" + "MlCloseJobResponseExample1": { + "description": "A successful response when closing anomaly detection jobs.", + "value": "{\n \"closed\": true\n}" } }, "kind": "response", "name": { "name": "Response", - "namespace": "ml.delete_job" + "namespace": "ml.close_job" }, - "specLocation": "ml/delete_job/MlDeleteJobResponse.ts#L22-L24" + "specLocation": "ml/close_job/MlCloseJobResponse.ts#L20-L22" }, { "attachedBehaviors": [ @@ -30401,7 +31034,7 @@ "body": { "kind": "no_body" }, - "description": "Delete an unreferenced trained model.\n\nThe request deletes a trained inference model that is not referenced by an ingest pipeline.", + "description": "Delete a calendar.\n\nRemove all scheduled events from a calendar, then delete it.", "inherits": { "type": { "name": "RequestBase", @@ -30411,12 +31044,12 @@ "kind": "request", "name": { "name": "Request", - "namespace": "ml.delete_trained_model" + "namespace": "ml.delete_calendar" }, "path": [ { - "description": "The unique identifier of the trained model.", - "name": "model_id", + "description": "A string that uniquely identifies a calendar.", + "name": "calendar_id", "required": true, "type": { "kind": "instance_of", @@ -30427,34 +31060,8 @@ } } ], - "query": [ - { - "description": "Forcefully deletes a trained model that is referenced by ingest pipelines or has a started deployment.", - "name": "force", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "boolean", - "namespace": "_builtins" - } - } - }, - { - "description": "Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.", - "name": "timeout", - "required": false, - "serverDefault": "30s", - "type": { - "kind": "instance_of", - "type": { - "name": "Duration", - "namespace": "_types" - } - } - } - ], - "specLocation": "ml/delete_trained_model/MlDeleteTrainedModelRequest.ts#L24-L57" + "query": [], + "specLocation": "ml/delete_calendar/MlDeleteCalendarRequest.ts#L23-L45" }, { "body": { @@ -30468,17 +31075,17 @@ } }, "examples": { - "MlDeleteTrainedModelResponseExample1": { - "description": "A successful response when deleting an existing trained inference model.", + "MlDeleteCalendarResponseExample1": { + "description": "A successful response when deleting a calendar.", "value": "{\n \"acknowledged\": true\n}" } }, "kind": "response", "name": { "name": "Response", - "namespace": "ml.delete_trained_model" + "namespace": "ml.delete_calendar" }, - "specLocation": "ml/delete_trained_model/MlDeleteTrainedModelResponse.ts#L22-L24" + "specLocation": "ml/delete_calendar/MlDeleteCalendarResponse.ts#L22-L24" }, { "attachedBehaviors": [ @@ -30487,7 +31094,7 @@ "body": { "kind": "no_body" }, - "description": "Delete a trained model alias.\n\nThis API deletes an existing model alias that refers to a trained model. If\nthe model alias is missing or refers to a model other than the one identified\nby the `model_id`, this API returns an error.", + "description": "Delete events from a calendar.", "inherits": { "type": { "name": "RequestBase", @@ -30497,24 +31104,24 @@ "kind": "request", "name": { "name": "Request", - "namespace": "ml.delete_trained_model_alias" + "namespace": "ml.delete_calendar_event" }, "path": [ { - "description": "The model alias to delete.", - "name": "model_alias", + "description": "A string that uniquely identifies a calendar.", + "name": "calendar_id", "required": true, "type": { "kind": "instance_of", "type": { - "name": "Name", + "name": "Id", "namespace": "_types" } } }, { - "description": "The trained model ID to which the model alias refers.", - "name": "model_id", + "description": "Identifier for the scheduled event.\nYou can obtain this identifier by using the get calendar events API.", + "name": "event_id", "required": true, "type": { "kind": "instance_of", @@ -30526,7 +31133,7 @@ } ], "query": [], - "specLocation": "ml/delete_trained_model_alias/MlDeleteTrainedModelAliasRequest.ts#L23-L53" + "specLocation": "ml/delete_calendar_event/MlDeleteCalendarEventRequest.ts#L23-L49" }, { "body": { @@ -30540,37 +31147,623 @@ } }, "examples": { - "MlDeleteTrainedModelAliasResponseExample1": { - "description": "A successful response when deleting a trained model alias.", + "MlDeleteCalendarEventResponseExample1": { + "description": "A successful response when deleting a calendar event.", "value": "{\n \"acknowledged\": true\n}" } }, "kind": "response", "name": { "name": "Response", - "namespace": "ml.delete_trained_model_alias" + "namespace": "ml.delete_calendar_event" }, - "specLocation": "ml/delete_trained_model_alias/MlDeleteTrainedModelAliasResponse.ts#L22-L24" + "specLocation": "ml/delete_calendar_event/MlDeleteCalendarEventResponse.ts#L22-L24" }, { "attachedBehaviors": [ "CommonQueryParameters" ], "body": { - "kind": "properties", - "properties": [ - { - "description": "For a list of the properties that you can specify in the\n`analysis_config` component of the body of this API.", - "name": "analysis_config", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "AnalysisConfig", - "namespace": "ml._types" - } - } - }, + "kind": "no_body" + }, + "description": "Delete anomaly jobs from a calendar.", + "inherits": { + "type": { + "name": "RequestBase", + "namespace": "_types" + } + }, + "kind": "request", + "name": { + "name": "Request", + "namespace": "ml.delete_calendar_job" + }, + "path": [ + { + "description": "A string that uniquely identifies a calendar.", + "name": "calendar_id", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "Id", + "namespace": "_types" + } + } + }, + { + "description": "An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a\ncomma-separated list of jobs or groups.", + "name": "job_id", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "Ids", + "namespace": "_types" + } + } + } + ], + "query": [], + "specLocation": "ml/delete_calendar_job/MlDeleteCalendarJobRequest.ts#L23-L50" + }, + { + "body": { + "kind": "properties", + "properties": [ + { + "description": "A string that uniquely identifies a calendar.", + "name": "calendar_id", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "Id", + "namespace": "_types" + } + } + }, + { + "description": "A description of the calendar.", + "name": "description", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } + }, + { + "description": "A list of anomaly detection job identifiers or group names.", + "name": "job_ids", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "Ids", + "namespace": "_types" + } + } + } + ] + }, + "examples": { + "MlDeleteCalendarJobResponseExample1": { + "description": "A successful response when deleting an anomaly detection job from a calendar.", + "value": "{\n \"calendar_id\": \"planned-outages\",\n \"job_ids\": []\n}" + } + }, + "kind": "response", + "name": { + "name": "Response", + "namespace": "ml.delete_calendar_job" + }, + "specLocation": "ml/delete_calendar_job/MlDeleteCalendarJobResponse.ts#L22-L31" + }, + { + "attachedBehaviors": [ + "CommonQueryParameters" + ], + "body": { + "kind": "no_body" + }, + "description": "Delete a data frame analytics job.", + "inherits": { + "type": { + "name": "RequestBase", + "namespace": "_types" + } + }, + "kind": "request", + "name": { + "name": "Request", + "namespace": "ml.delete_data_frame_analytics" + }, + "path": [ + { + "description": "Identifier for the data frame analytics job.", + "name": "id", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "Id", + "namespace": "_types" + } + } + } + ], + "query": [ + { + "description": "If `true`, it deletes a job that is not stopped; this method is quicker than stopping and deleting the job.", + "name": "force", + "required": false, + "serverDefault": false, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + }, + { + "description": "The time to wait for the job to be deleted.", + "name": "timeout", + "required": false, + "serverDefault": "1m", + "type": { + "kind": "instance_of", + "type": { + "name": "Duration", + "namespace": "_types" + } + } + } + ], + "specLocation": "ml/delete_data_frame_analytics/MlDeleteDataFrameAnalyticsRequest.ts#L24-L58" + }, + { + "body": { + "kind": "value", + "value": { + "kind": "instance_of", + "type": { + "name": "AcknowledgedResponseBase", + "namespace": "_types" + } + } + }, + "examples": { + "MlDeleteDataFrameAnalyticsResponseExample1": { + "description": "A successful response when deleting a data frame analytics job.", + "value": "{\n \"acknowledged\": true\n}" + } + }, + "kind": "response", + "name": { + "name": "Response", + "namespace": "ml.delete_data_frame_analytics" + }, + "specLocation": "ml/delete_data_frame_analytics/MlDeleteDataFrameAnalyticsResponse.ts#L22-L24" + }, + { + "attachedBehaviors": [ + "CommonQueryParameters" + ], + "body": { + "kind": "no_body" + }, + "description": "Delete a datafeed.", + "inherits": { + "type": { + "name": "RequestBase", + "namespace": "_types" + } + }, + "kind": "request", + "name": { + "name": "Request", + "namespace": "ml.delete_datafeed" + }, + "path": [ + { + "description": "A numerical character string that uniquely identifies the datafeed. This\nidentifier can contain lowercase alphanumeric characters (a-z and 0-9),\nhyphens, and underscores. It must start and end with alphanumeric\ncharacters.", + "name": "datafeed_id", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "Id", + "namespace": "_types" + } + } + } + ], + "query": [ + { + "description": "Use to forcefully delete a started datafeed; this method is quicker than\nstopping and deleting the datafeed.", + "name": "force", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + } + ], + "specLocation": "ml/delete_datafeed/MlDeleteDatafeedRequest.ts#L23-L55" + }, + { + "body": { + "kind": "value", + "value": { + "kind": "instance_of", + "type": { + "name": "AcknowledgedResponseBase", + "namespace": "_types" + } + } + }, + "examples": { + "MlDeleteDatafeedResponseExample1": { + "description": "A successful response when deleting a datafeed.", + "value": "{\n \"acknowledged\": true\n}" + } + }, + "kind": "response", + "name": { + "name": "Response", + "namespace": "ml.delete_datafeed" + }, + "specLocation": "ml/delete_datafeed/MlDeleteDatafeedResponse.ts#L22-L24" + }, + { + "attachedBehaviors": [ + "CommonQueryParameters" + ], + "body": { + "kind": "no_body" + }, + "description": "Delete a filter.\n\nIf an anomaly detection job references the filter, you cannot delete the\nfilter. You must update or delete the job before you can delete the filter.", + "inherits": { + "type": { + "name": "RequestBase", + "namespace": "_types" + } + }, + "kind": "request", + "name": { + "name": "Request", + "namespace": "ml.delete_filter" + }, + "path": [ + { + "description": "A string that uniquely identifies a filter.", + "name": "filter_id", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "Id", + "namespace": "_types" + } + } + } + ], + "query": [], + "specLocation": "ml/delete_filter/MlDeleteFilterRequest.ts#L23-L48" + }, + { + "body": { + "kind": "value", + "value": { + "kind": "instance_of", + "type": { + "name": "AcknowledgedResponseBase", + "namespace": "_types" + } + } + }, + "examples": { + "MlDeleteFilterResponseExample1": { + "description": "A successful response when deleting a filter.", + "value": "{\n \"acknowledged\": true\n}" + } + }, + "kind": "response", + "name": { + "name": "Response", + "namespace": "ml.delete_filter" + }, + "specLocation": "ml/delete_filter/MlDeleteFilterResponse.ts#L22-L24" + }, + { + "attachedBehaviors": [ + "CommonQueryParameters" + ], + "body": { + "kind": "no_body" + }, + "description": "Delete an anomaly detection job.\n\nAll job configuration, model state and results are deleted.\nIt is not currently possible to delete multiple jobs using wildcards or a\ncomma separated list. If you delete a job that has a datafeed, the request\nfirst tries to delete the datafeed. This behavior is equivalent to calling\nthe delete datafeed API with the same timeout and force parameters as the\ndelete job request.", + "inherits": { + "type": { + "name": "RequestBase", + "namespace": "_types" + } + }, + "kind": "request", + "name": { + "name": "Request", + "namespace": "ml.delete_job" + }, + "path": [ + { + "description": "Identifier for the anomaly detection job.", + "name": "job_id", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "Id", + "namespace": "_types" + } + } + } + ], + "query": [ + { + "description": "Use to forcefully delete an opened job; this method is quicker than\nclosing and deleting the job.", + "name": "force", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + }, + { + "description": "Specifies whether annotations that have been added by the\nuser should be deleted along with any auto-generated annotations when the job is\nreset.", + "name": "delete_user_annotations", + "required": false, + "serverDefault": false, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + }, + { + "description": "Specifies whether the request should return immediately or wait until the\njob deletion completes.", + "name": "wait_for_completion", + "required": false, + "serverDefault": true, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + } + ], + "specLocation": "ml/delete_job/MlDeleteJobRequest.ts#L23-L72" + }, + { + "body": { + "kind": "value", + "value": { + "kind": "instance_of", + "type": { + "name": "AcknowledgedResponseBase", + "namespace": "_types" + } + } + }, + "examples": { + "MlDeleteJobResponseExample1": { + "description": "A successful response when deleting an anomaly detection job.", + "summary": "Delete job", + "value": "{\n \"acknowledged\": true\n}" + }, + "MlDeleteJobResponseExample2": { + "description": "A successful response when deleting an anomaly detection job asynchronously. When the `wait_for_completion` query parameter is set to `false`, the response contains an identifier for the job deletion task.\n", + "summary": "Delete job asynchronously", + "value": "{\n \"task\": \"oTUltX4IQMOUUVeiohTt8A:39\"\n}" + } + }, + "kind": "response", + "name": { + "name": "Response", + "namespace": "ml.delete_job" + }, + "specLocation": "ml/delete_job/MlDeleteJobResponse.ts#L22-L24" + }, + { + "attachedBehaviors": [ + "CommonQueryParameters" + ], + "body": { + "kind": "no_body" + }, + "description": "Delete an unreferenced trained model.\n\nThe request deletes a trained inference model that is not referenced by an ingest pipeline.", + "inherits": { + "type": { + "name": "RequestBase", + "namespace": "_types" + } + }, + "kind": "request", + "name": { + "name": "Request", + "namespace": "ml.delete_trained_model" + }, + "path": [ + { + "description": "The unique identifier of the trained model.", + "name": "model_id", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "Id", + "namespace": "_types" + } + } + } + ], + "query": [ + { + "description": "Forcefully deletes a trained model that is referenced by ingest pipelines or has a started deployment.", + "name": "force", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + }, + { + "description": "Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error.", + "name": "timeout", + "required": false, + "serverDefault": "30s", + "type": { + "kind": "instance_of", + "type": { + "name": "Duration", + "namespace": "_types" + } + } + } + ], + "specLocation": "ml/delete_trained_model/MlDeleteTrainedModelRequest.ts#L24-L57" + }, + { + "body": { + "kind": "value", + "value": { + "kind": "instance_of", + "type": { + "name": "AcknowledgedResponseBase", + "namespace": "_types" + } + } + }, + "examples": { + "MlDeleteTrainedModelResponseExample1": { + "description": "A successful response when deleting an existing trained inference model.", + "value": "{\n \"acknowledged\": true\n}" + } + }, + "kind": "response", + "name": { + "name": "Response", + "namespace": "ml.delete_trained_model" + }, + "specLocation": "ml/delete_trained_model/MlDeleteTrainedModelResponse.ts#L22-L24" + }, + { + "attachedBehaviors": [ + "CommonQueryParameters" + ], + "body": { + "kind": "no_body" + }, + "description": "Delete a trained model alias.\n\nThis API deletes an existing model alias that refers to a trained model. If\nthe model alias is missing or refers to a model other than the one identified\nby the `model_id`, this API returns an error.", + "inherits": { + "type": { + "name": "RequestBase", + "namespace": "_types" + } + }, + "kind": "request", + "name": { + "name": "Request", + "namespace": "ml.delete_trained_model_alias" + }, + "path": [ + { + "description": "The model alias to delete.", + "name": "model_alias", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "Name", + "namespace": "_types" + } + } + }, + { + "description": "The trained model ID to which the model alias refers.", + "name": "model_id", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "Id", + "namespace": "_types" + } + } + } + ], + "query": [], + "specLocation": "ml/delete_trained_model_alias/MlDeleteTrainedModelAliasRequest.ts#L23-L53" + }, + { + "body": { + "kind": "value", + "value": { + "kind": "instance_of", + "type": { + "name": "AcknowledgedResponseBase", + "namespace": "_types" + } + } + }, + "examples": { + "MlDeleteTrainedModelAliasResponseExample1": { + "description": "A successful response when deleting a trained model alias.", + "value": "{\n \"acknowledged\": true\n}" + } + }, + "kind": "response", + "name": { + "name": "Response", + "namespace": "ml.delete_trained_model_alias" + }, + "specLocation": "ml/delete_trained_model_alias/MlDeleteTrainedModelAliasResponse.ts#L22-L24" + }, + { + "attachedBehaviors": [ + "CommonQueryParameters" + ], + "body": { + "kind": "properties", + "properties": [ + { + "description": "For a list of the properties that you can specify in the\n`analysis_config` component of the body of this API.", + "name": "analysis_config", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "AnalysisConfig", + "namespace": "ml._types" + } + } + }, { "description": "Estimates of the highest cardinality in a single bucket that is observed\nfor influencer fields over the time period that the job analyzes data.\nTo produce a good answer, values must be provided for all influencer\nfields. Providing values for fields that are not listed as `influencers`\nhas no effect on the estimation.", "name": "max_bucket_cardinality", @@ -101271,6 +102464,7 @@ "kind": "enum", "members": [ { +<<<<<<< HEAD "name": "completion" }, { @@ -101278,29 +102472,272 @@ }, { "name": "space_embedding" +======= +<<<<<<< HEAD +======= + "name": "completion" + }, + { + "name": "text_embedding" + } + ], + "name": { + "name": "AmazonBedrockTaskType", + "namespace": "inference.put_amazonbedrock" + }, + "specLocation": "inference/put_amazonbedrock/PutAmazonBedrockRequest.ts#L86-L89" + }, + { + "kind": "enum", + "members": [ + { + "name": "amazonbedrock" + } + ], + "name": { + "name": "ServiceType", + "namespace": "inference.put_amazonbedrock" + }, + "specLocation": "inference/put_amazonbedrock/PutAmazonBedrockRequest.ts#L91-L93" + }, + { + "kind": "enum", + "members": [ + { + "name": "completion" + } + ], + "name": { + "name": "AnthropicTaskType", + "namespace": "inference.put_anthropic" + }, + "specLocation": "inference/put_anthropic/PutAnthropicRequest.ts#L84-L86" + }, + { + "kind": "enum", + "members": [ + { + "name": "anthropic" + } + ], + "name": { + "name": "ServiceType", + "namespace": "inference.put_anthropic" + }, + "specLocation": "inference/put_anthropic/PutAnthropicRequest.ts#L88-L90" + }, + { + "kind": "enum", + "members": [ + { + "name": "completion" + }, + { + "name": "rerank" + }, + { + "name": "text_embedding" + } + ], + "name": { + "name": "CohereTaskType", + "namespace": "inference.put_cohere" + }, + "specLocation": "inference/put_cohere/PutCohereRequest.ts#L84-L88" + }, + { + "kind": "enum", + "members": [ + { + "name": "byte" + }, + { + "name": "float" + }, + { + "name": "int8" + } + ], + "name": { + "name": "EmbeddingType", + "namespace": "inference.put_cohere" + }, + "specLocation": "inference/put_cohere/PutCohereRequest.ts#L94-L98" + }, + { + "kind": "enum", + "members": [ + { + "name": "classification" + }, + { + "name": "clustering" + }, + { + "name": "ingest" + }, + { + "name": "search" + } + ], + "name": { + "name": "InputType", + "namespace": "inference.put_cohere" + }, + "specLocation": "inference/put_cohere/PutCohereRequest.ts#L100-L105" + }, + { + "kind": "enum", + "members": [ + { + "name": "cohere" + } + ], + "name": { + "name": "ServiceType", + "namespace": "inference.put_cohere" + }, + "specLocation": "inference/put_cohere/PutCohereRequest.ts#L90-L92" + }, + { + "kind": "enum", + "members": [ + { + "name": "cosine" + }, + { + "name": "dot_product" + }, + { + "name": "l2_norm" + } + ], + "name": { + "name": "SimilarityType", + "namespace": "inference.put_cohere" + }, + "specLocation": "inference/put_cohere/PutCohereRequest.ts#L107-L111" + }, + { + "kind": "enum", + "members": [ + { + "name": "END" + }, + { + "name": "NONE" + }, + { + "name": "START" + } + ], + "name": { + "name": "TruncateType", + "namespace": "inference.put_cohere" + }, + "specLocation": "inference/put_cohere/PutCohereRequest.ts#L113-L117" + }, + { + "kind": "enum", + "members": [ + { + "name": "chat_completion" + } + ], + "name": { + "name": "EisTaskType", + "namespace": "inference.put_eis" + }, + "specLocation": "inference/put_eis/PutEisRequest.ts#L64-L66" + }, + { + "kind": "enum", + "members": [ + { + "name": "elastic" + } + ], + "name": { + "name": "ServiceType", + "namespace": "inference.put_eis" + }, + "specLocation": "inference/put_eis/PutEisRequest.ts#L68-L70" + }, + { + "kind": "enum", + "members": [ + { + "name": "rerank" + }, + { + "name": "sparse_embedding" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) }, { "name": "text_embedding" } ], "name": { +<<<<<<< HEAD "name": "AlibabaCloudTaskType", "namespace": "inference.put_alibabacloud" }, "specLocation": "inference/put_alibabacloud/PutAlibabaCloudRequest.ts#L82-L87" +======= + "name": "ElasticsearchTaskType", + "namespace": "inference.put_elasticsearch" + }, + "specLocation": "inference/put_elasticsearch/PutElasticsearchRequest.ts#L88-L92" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) }, { "kind": "enum", "members": [ { +<<<<<<< HEAD "name": "alibabacloud-ai-search" +======= + "name": "elasticsearch" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) } ], "name": { "name": "ServiceType", +<<<<<<< HEAD "namespace": "inference.put_alibabacloud" }, "specLocation": "inference/put_alibabacloud/PutAlibabaCloudRequest.ts#L89-L91" +======= + "namespace": "inference.put_elasticsearch" + }, + "specLocation": "inference/put_elasticsearch/PutElasticsearchRequest.ts#L94-L96" + }, + { + "kind": "enum", + "members": [ + { + "name": "sparse_embedding" + } + ], + "name": { + "name": "ElserTaskType", + "namespace": "inference.put_elser" + }, + "specLocation": "inference/put_elser/PutElserRequest.ts#L84-L86" + }, + { + "kind": "enum", + "members": [ + { + "name": "elser" + } + ], + "name": { + "name": "ServiceType", + "namespace": "inference.put_elser" + }, + "specLocation": "inference/put_elser/PutElserRequest.ts#L88-L90" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) }, { "kind": "enum", @@ -101313,57 +102750,99 @@ } ], "name": { +<<<<<<< HEAD "name": "AzureAiStudioTaskType", "namespace": "inference.put_azureaistudio" }, "specLocation": "inference/put_azureaistudio/PutAzureAiStudioRequest.ts#L83-L86" +======= + "name": "GoogleAiStudioTaskType", + "namespace": "inference.put_googleaistudio" + }, + "specLocation": "inference/put_googleaistudio/PutGoogleAiStudioRequest.ts#L77-L80" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) }, { "kind": "enum", "members": [ { +<<<<<<< HEAD "name": "azureaistudio" +======= + "name": "googleaistudio" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) } ], "name": { "name": "ServiceType", +<<<<<<< HEAD "namespace": "inference.put_azureaistudio" }, "specLocation": "inference/put_azureaistudio/PutAzureAiStudioRequest.ts#L88-L90" +======= + "namespace": "inference.put_googleaistudio" + }, + "specLocation": "inference/put_googleaistudio/PutGoogleAiStudioRequest.ts#L82-L84" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) }, { "kind": "enum", "members": [ { +<<<<<<< HEAD "name": "completion" +======= + "name": "rerank" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) }, { "name": "text_embedding" } ], "name": { +<<<<<<< HEAD "name": "AzureOpenAITaskType", "namespace": "inference.put_azureopenai" }, "specLocation": "inference/put_azureopenai/PutAzureOpenAiRequest.ts#L90-L93" +======= + "name": "GoogleVertexAITaskType", + "namespace": "inference.put_googlevertexai" + }, + "specLocation": "inference/put_googlevertexai/PutGoogleVertexAiRequest.ts#L83-L86" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) }, { "kind": "enum", "members": [ { +<<<<<<< HEAD "name": "azureopenai" +======= + "name": "googlevertexai" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) } ], "name": { "name": "ServiceType", +<<<<<<< HEAD "namespace": "inference.put_azureopenai" }, "specLocation": "inference/put_azureopenai/PutAzureOpenAiRequest.ts#L95-L97" +======= + "namespace": "inference.put_googlevertexai" + }, + "specLocation": "inference/put_googlevertexai/PutGoogleVertexAiRequest.ts#L88-L90" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) }, { "kind": "enum", "members": [ { +<<<<<<< HEAD +======= +>>>>>>> f5eaaab24 (Add Amazon Bedrock inference API (#4022)) +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) "name": "text_embedding" } ], @@ -117837,36 +119316,522 @@ "value": { "kind": "instance_of", "type": { - "name": "Policy", - "namespace": "enrich._types" + "name": "Policy", + "namespace": "enrich._types" + } + } + } + } + ], + "specLocation": "enrich/_types/Policy.ts#L24-L26" + }, + { + "kind": "interface", + "name": { + "name": "Policy", + "namespace": "enrich._types" + }, + "properties": [ + { + "name": "enrich_fields", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "Fields", + "namespace": "_types" + } + } + }, + { + "name": "indices", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "Indices", + "namespace": "_types" + } + } + }, + { + "name": "match_field", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "Field", + "namespace": "_types" + } + } + }, + { + "name": "query", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "QueryContainer", + "namespace": "_types.query_dsl" + } + } + }, + { + "name": "name", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "Name", + "namespace": "_types" + } + } + }, + { + "name": "elasticsearch_version", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } + } + ], + "specLocation": "enrich/_types/Policy.ts#L34-L41" + }, + { + "generics": [ + { + "name": "TEvent", + "namespace": "eql._types.EqlSearchResponseBase" + } + ], + "kind": "interface", + "name": { + "name": "EqlSearchResponseBase", + "namespace": "eql._types" + }, + "properties": [ + { + "description": "Identifier for the search.", + "name": "id", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "Id", + "namespace": "_types" + } + } + }, + { + "description": "If true, the response does not contain complete search results.", + "name": "is_partial", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + }, + { + "description": "If true, the search request is still executing.", + "name": "is_running", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + }, + { + "description": "Milliseconds it took Elasticsearch to execute the request.", + "name": "took", + "required": false, + "type": { + "generics": [ + { + "kind": "instance_of", + "type": { + "name": "UnitMillis", + "namespace": "_types" + } + } + ], + "kind": "instance_of", + "type": { + "name": "DurationValue", + "namespace": "_types" + } + } + }, + { + "description": "If true, the request timed out before completion.", + "name": "timed_out", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + }, + { + "description": "Contains matching events and sequences. Also contains related metadata.", + "name": "hits", + "required": true, + "type": { + "generics": [ + { + "kind": "instance_of", + "type": { + "name": "TEvent", + "namespace": "eql._types.EqlSearchResponseBase" + } + } + ], + "kind": "instance_of", + "type": { + "name": "EqlHits", + "namespace": "eql._types" + } + } + }, + { + "description": "Contains information about shard failures (if any), in case allow_partial_search_results=true", + "name": "shard_failures", + "required": false, + "type": { + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "ShardFailure", + "namespace": "_types" + } + } + } + } + ], + "specLocation": "eql/_types/EqlSearchResponseBase.ts#L25-L54" + }, + { + "generics": [ + { + "name": "TEvent", + "namespace": "eql._types.EqlHits" + } + ], + "kind": "interface", + "name": { + "name": "EqlHits", + "namespace": "eql._types" + }, + "properties": [ + { + "description": "Metadata about the number of matching events or sequences.", + "name": "total", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "TotalHits", + "namespace": "_global.search._types" + } + } + }, + { + "description": "Contains events matching the query. Each object represents a matching event.", + "name": "events", + "required": false, + "type": { + "kind": "array_of", + "value": { + "generics": [ + { + "kind": "instance_of", + "type": { + "name": "TEvent", + "namespace": "eql._types.EqlHits" + } + } + ], + "kind": "instance_of", + "type": { + "name": "HitsEvent", + "namespace": "eql._types" + } + } + } + }, + { + "description": "Contains event sequences matching the query. Each object represents a matching sequence. This parameter is only returned for EQL queries containing a sequence.", + "docId": "eql-sequences", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-syntax.html#eql-sequences", + "name": "sequences", + "required": false, + "type": { + "kind": "array_of", + "value": { + "generics": [ + { + "kind": "instance_of", + "type": { + "name": "TEvent", + "namespace": "eql._types.EqlHits" + } + } + ], + "kind": "instance_of", + "type": { + "name": "HitsSequence", + "namespace": "eql._types" + } + } + } + } + ], + "specLocation": "eql/_types/EqlHits.ts#L25-L39" + }, + { + "generics": [ + { + "name": "TEvent", + "namespace": "eql._types.HitsEvent" + } + ], + "kind": "interface", + "name": { + "name": "HitsEvent", + "namespace": "eql._types" + }, + "properties": [ + { + "description": "Name of the index containing the event.", + "name": "_index", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "IndexName", + "namespace": "_types" + } + } + }, + { + "description": "Unique identifier for the event. This ID is only unique within the index.", + "name": "_id", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "Id", + "namespace": "_types" + } + } + }, + { + "description": "Original JSON body passed for the event at index time.", + "name": "_source", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "TEvent", + "namespace": "eql._types.HitsEvent" + } + } + }, + { + "description": "Set to `true` for events in a timespan-constrained sequence that do not meet a given condition.", + "docId": "eql-missing-events", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-syntax.html#eql-missing-events", + "name": "missing", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + }, + { + "name": "fields", + "required": false, + "type": { + "key": { + "kind": "instance_of", + "type": { + "name": "Field", + "namespace": "_types" + } + }, + "kind": "dictionary_of", + "singleKey": false, + "value": { + "kind": "array_of", + "value": { + "kind": "user_defined_value" + } + } + } + } + ], + "specLocation": "eql/_types/EqlHits.ts#L41-L54" + }, + { + "generics": [ + { + "name": "TEvent", + "namespace": "eql._types.HitsSequence" + } + ], + "kind": "interface", + "name": { + "name": "HitsSequence", + "namespace": "eql._types" + }, + "properties": [ + { + "description": "Contains events matching the query. Each object represents a matching event.", + "name": "events", + "required": true, + "type": { + "kind": "array_of", + "value": { + "generics": [ + { + "kind": "instance_of", + "type": { + "name": "TEvent", + "namespace": "eql._types.HitsSequence" + } + } + ], + "kind": "instance_of", + "type": { + "name": "HitsEvent", + "namespace": "eql._types" + } + } + } + }, + { + "description": "Shared field values used to constrain matches in the sequence. These are defined using the by keyword in the EQL query syntax.", + "docId": "eql-sequences", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-syntax.html#eql-sequences", + "name": "join_keys", + "required": false, + "type": { + "kind": "array_of", + "value": { + "kind": "user_defined_value" + } + } + } + ], + "specLocation": "eql/_types/EqlHits.ts#L56-L64" + }, + { + "kind": "interface", + "name": { + "name": "TableValuesContainer", + "namespace": "esql._types" + }, + "properties": [ + { + "name": "integer", + "required": false, + "type": { + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "TableValuesIntegerValue", + "namespace": "esql._types" + } + } + } + }, + { + "name": "keyword", + "required": false, + "type": { + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "TableValuesKeywordValue", + "namespace": "esql._types" + } + } + } + }, + { + "name": "long", + "required": false, + "type": { + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "TableValuesLongValue", + "namespace": "esql._types" + } + } + } + }, + { + "name": "double", + "required": false, + "type": { + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "TableValuesLongDouble", + "namespace": "esql._types" } } } } ], - "specLocation": "enrich/_types/Policy.ts#L24-L26" + "specLocation": "esql/_types/TableValuesContainer.ts#L22-L28", + "variants": { + "kind": "container" + } }, { "kind": "interface", "name": { - "name": "Policy", - "namespace": "enrich._types" + "name": "FieldCapability", + "namespace": "_global.field_caps" }, "properties": [ { - "name": "enrich_fields", + "description": "Whether this field can be aggregated on all indices.", + "name": "aggregatable", "required": true, "type": { "kind": "instance_of", "type": { - "name": "Fields", - "namespace": "_types" + "name": "boolean", + "namespace": "_builtins" } } }, { + "description": "The list of indices where this field has the same type family, or null if all indices have the same type family for the field.", "name": "indices", - "required": true, + "required": false, "type": { "kind": "instance_of", "type": { @@ -117876,80 +119841,69 @@ } }, { - "name": "match_field", - "required": true, + "description": "Merged metadata across all indices as a map of string keys to arrays of values. A value length of 1 indicates that all indices had the same value for this key, while a length of 2 or more indicates that not all indices had the same value for this key.", + "name": "meta", + "required": false, "type": { "kind": "instance_of", "type": { - "name": "Field", + "name": "Metadata", "namespace": "_types" } } }, { - "name": "query", + "description": "The list of indices where this field is not aggregatable, or null if all indices have the same definition for the field.", + "name": "non_aggregatable_indices", "required": false, "type": { "kind": "instance_of", "type": { - "name": "QueryContainer", - "namespace": "_types.query_dsl" + "name": "Indices", + "namespace": "_types" } } }, { - "name": "name", + "description": "The list of indices where this field is not searchable, or null if all indices have the same definition for the field.", + "name": "non_searchable_indices", "required": false, "type": { "kind": "instance_of", "type": { - "name": "Name", + "name": "Indices", "namespace": "_types" } } }, { - "name": "elasticsearch_version", - "required": false, + "description": "Whether this field is indexed for search on all indices.", + "name": "searchable", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "string", + "name": "boolean", "namespace": "_builtins" } } - } - ], - "specLocation": "enrich/_types/Policy.ts#L34-L41" - }, - { - "generics": [ - { - "name": "TEvent", - "namespace": "eql._types.EqlSearchResponseBase" - } - ], - "kind": "interface", - "name": { - "name": "EqlSearchResponseBase", - "namespace": "eql._types" - }, - "properties": [ + }, { - "description": "Identifier for the search.", - "name": "id", - "required": false, + "name": "type", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "Id", - "namespace": "_types" + "name": "string", + "namespace": "_builtins" } } }, { - "description": "If true, the response does not contain complete search results.", - "name": "is_partial", + "description": "Whether this field is registered as a metadata field.", + "docId": "mapping-metadata", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-fields.html", + "name": "metadata_field", "required": false, "type": { "kind": "instance_of", @@ -117960,8 +119914,17 @@ } }, { - "description": "If true, the search request is still executing.", - "name": "is_running", + "availability": { + "serverless": { + "stability": "experimental" + }, + "stack": { + "since": "8.0.0", + "stability": "experimental" + } + }, + "description": "Whether this field is used as a time series dimension.", + "name": "time_series_dimension", "required": false, "type": { "kind": "instance_of", @@ -117972,434 +119935,527 @@ } }, { - "description": "Milliseconds it took Elasticsearch to execute the request.", - "name": "took", + "availability": { + "serverless": { + "stability": "experimental" + }, + "stack": { + "since": "8.0.0", + "stability": "experimental" + } + }, + "description": "Contains metric type if this fields is used as a time series\nmetrics, absent if the field is not used as metric.", + "name": "time_series_metric", "required": false, "type": { - "generics": [ - { - "kind": "instance_of", - "type": { - "name": "UnitMillis", - "namespace": "_types" - } - } - ], "kind": "instance_of", "type": { - "name": "DurationValue", - "namespace": "_types" + "name": "TimeSeriesMetricType", + "namespace": "_types.mapping" } } }, { - "description": "If true, the request timed out before completion.", - "name": "timed_out", + "availability": { + "serverless": { + "stability": "experimental" + }, + "stack": { + "since": "8.0.0", + "stability": "experimental" + } + }, + "description": "If this list is present in response then some indices have the\nfield marked as a dimension and other indices, the ones in this list, do not.", + "name": "non_dimension_indices", "required": false, "type": { - "kind": "instance_of", - "type": { - "name": "boolean", - "namespace": "_builtins" + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "IndexName", + "namespace": "_types" + } } } }, { - "description": "Contains matching events and sequences. Also contains related metadata.", - "name": "hits", - "required": true, + "availability": { + "serverless": { + "stability": "experimental" + }, + "stack": { + "since": "8.0.0", + "stability": "experimental" + } + }, + "description": "The list of indices where this field is present if these indices\ndonโ€™t have the same `time_series_metric` value for this field.", + "name": "metric_conflicts_indices", + "required": false, "type": { - "generics": [ - { - "kind": "instance_of", - "type": { - "name": "TEvent", - "namespace": "eql._types.EqlSearchResponseBase" - } + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "IndexName", + "namespace": "_types" } - ], + } + } + } + ], + "specLocation": "_global/field_caps/types.ts#L23-L81" + }, + { + "kind": "interface", + "name": { + "name": "StoredScript", + "namespace": "_types" + }, + "properties": [ + { + "description": "The language the script is written in.\nFor serach templates, use `mustache`.", + "name": "lang", + "required": true, + "type": { "kind": "instance_of", "type": { - "name": "EqlHits", - "namespace": "eql._types" + "name": "ScriptLanguage", + "namespace": "_types" } } }, { - "description": "Contains information about shard failures (if any), in case allow_partial_search_results=true", - "name": "shard_failures", + "name": "options", "required": false, "type": { - "kind": "array_of", + "key": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + }, + "kind": "dictionary_of", + "singleKey": false, "value": { "kind": "instance_of", "type": { - "name": "ShardFailure", - "namespace": "_types" + "name": "string", + "namespace": "_builtins" } } } + }, + { + "description": "The script source.\nFor search templates, an object containing the search template.", + "name": "source", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } } ], - "specLocation": "eql/_types/EqlSearchResponseBase.ts#L25-L54" + "specLocation": "_types/Scripting.ts#L47-L59" }, { - "generics": [ - { - "name": "TEvent", - "namespace": "eql._types.EqlHits" - } - ], "kind": "interface", "name": { - "name": "EqlHits", - "namespace": "eql._types" + "name": "Hop", + "namespace": "graph._types" }, "properties": [ { - "description": "Metadata about the number of matching events or sequences.", - "name": "total", + "description": "Specifies one or more fields from which you want to extract terms that are associated with the specified vertices.", + "name": "connections", "required": false, "type": { "kind": "instance_of", "type": { - "name": "TotalHits", - "namespace": "_global.search._types" + "name": "Hop", + "namespace": "graph._types" } } }, { - "description": "Contains events matching the query. Each object represents a matching event.", - "name": "events", + "description": "An optional guiding query that constrains the Graph API as it explores connected terms.", + "name": "query", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "QueryContainer", + "namespace": "_types.query_dsl" + } + } + }, + { + "description": "Contains the fields you are interested in.", + "name": "vertices", + "required": true, + "type": { + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "VertexDefinition", + "namespace": "graph._types" + } + } + } + } + ], + "specLocation": "graph/_types/Hop.ts#L23-L36" + }, + { + "kind": "interface", + "name": { + "name": "VertexDefinition", + "namespace": "graph._types" + }, + "properties": [ + { + "description": "Prevents the specified terms from being included in the results.", + "name": "exclude", "required": false, "type": { "kind": "array_of", "value": { - "generics": [ - { - "kind": "instance_of", - "type": { - "name": "TEvent", - "namespace": "eql._types.EqlHits" - } - } - ], "kind": "instance_of", "type": { - "name": "HitsEvent", - "namespace": "eql._types" + "name": "string", + "namespace": "_builtins" } } } }, { - "description": "Contains event sequences matching the query. Each object represents a matching sequence. This parameter is only returned for EQL queries containing a sequence.", - "docId": "eql-sequences", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-syntax.html#eql-sequences", - "name": "sequences", + "description": "Identifies a field in the documents of interest.", + "name": "field", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "Field", + "namespace": "_types" + } + } + }, + { + "description": "Identifies the terms of interest that form the starting points from which you want to spider out.", + "name": "include", "required": false, "type": { "kind": "array_of", "value": { - "generics": [ - { - "kind": "instance_of", - "type": { - "name": "TEvent", - "namespace": "eql._types.EqlHits" - } - } - ], "kind": "instance_of", "type": { - "name": "HitsSequence", - "namespace": "eql._types" + "name": "VertexInclude", + "namespace": "graph._types" } } } + }, + { + "description": "Specifies how many documents must contain a pair of terms before it is considered to be a useful connection.\nThis setting acts as a certainty threshold.", + "name": "min_doc_count", + "required": false, + "serverDefault": 3, + "type": { + "kind": "instance_of", + "type": { + "name": "long", + "namespace": "_types" + } + } + }, + { + "description": "Controls how many documents on a particular shard have to contain a pair of terms before the connection is returned for global consideration.", + "name": "shard_min_doc_count", + "required": false, + "serverDefault": 2, + "type": { + "kind": "instance_of", + "type": { + "name": "long", + "namespace": "_types" + } + } + }, + { + "description": "Specifies the maximum number of vertex terms returned for each field.", + "name": "size", + "required": false, + "serverDefault": 5, + "type": { + "kind": "instance_of", + "type": { + "name": "integer", + "namespace": "_types" + } + } } ], - "specLocation": "eql/_types/EqlHits.ts#L25-L39" + "specLocation": "graph/_types/Vertex.ts#L30-L59" }, { - "generics": [ - { - "name": "TEvent", - "namespace": "eql._types.HitsEvent" - } - ], "kind": "interface", "name": { - "name": "HitsEvent", - "namespace": "eql._types" + "name": "VertexInclude", + "namespace": "graph._types" }, "properties": [ { - "description": "Name of the index containing the event.", - "name": "_index", - "required": true, + "name": "boost", + "required": false, "type": { "kind": "instance_of", "type": { - "name": "IndexName", + "name": "double", "namespace": "_types" } } }, { - "description": "Unique identifier for the event. This ID is only unique within the index.", - "name": "_id", + "name": "term", "required": true, "type": { "kind": "instance_of", "type": { - "name": "Id", - "namespace": "_types" + "name": "string", + "namespace": "_builtins" } } - }, + } + ], + "shortcutProperty": "term", + "specLocation": "graph/_types/Vertex.ts#L61-L65" + }, + { + "kind": "interface", + "name": { + "name": "ExploreControls", + "namespace": "graph._types" + }, + "properties": [ { - "description": "Original JSON body passed for the event at index time.", - "name": "_source", - "required": true, + "description": "To avoid the top-matching documents sample being dominated by a single source of results, it is sometimes necessary to request diversity in the sample.\nYou can do this by selecting a single-value field and setting a maximum number of documents per value for that field.", + "name": "sample_diversity", + "required": false, "type": { "kind": "instance_of", "type": { - "name": "TEvent", - "namespace": "eql._types.HitsEvent" + "name": "SampleDiversity", + "namespace": "graph._types" } } }, { - "description": "Set to `true` for events in a timespan-constrained sequence that do not meet a given condition.", - "docId": "eql-missing-events", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-syntax.html#eql-missing-events", - "name": "missing", + "description": "Each hop considers a sample of the best-matching documents on each shard.\nUsing samples improves the speed of execution and keeps exploration focused on meaningfully-connected terms.\nVery small values (less than 50) might not provide sufficient weight-of-evidence to identify significant connections between terms.\nVery large sample sizes can dilute the quality of the results and increase execution times.", + "name": "sample_size", "required": false, + "serverDefault": 100, "type": { "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "integer", + "namespace": "_types" } } }, { - "name": "fields", + "description": "The length of time in milliseconds after which exploration will be halted and the results gathered so far are returned.\nThis timeout is honored on a best-effort basis.\nExecution might overrun this timeout if, for example, a long pause is encountered while FieldData is loaded for a field.", + "name": "timeout", "required": false, "type": { - "key": { - "kind": "instance_of", - "type": { - "name": "Field", - "namespace": "_types" - } - }, - "kind": "dictionary_of", - "singleKey": false, - "value": { - "kind": "array_of", - "value": { - "kind": "user_defined_value" - } + "kind": "instance_of", + "type": { + "name": "Duration", + "namespace": "_types" + } + } + }, + { + "description": "Filters associated terms so only those that are significantly associated with your query are included.", + "docId": "search-aggregations-bucket-significantterms-aggregation", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html", + "name": "use_significance", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" } } } ], - "specLocation": "eql/_types/EqlHits.ts#L41-L54" + "specLocation": "graph/_types/ExploreControls.ts#L24-L49" }, { - "generics": [ - { - "name": "TEvent", - "namespace": "eql._types.HitsSequence" - } - ], "kind": "interface", "name": { - "name": "HitsSequence", - "namespace": "eql._types" + "name": "SampleDiversity", + "namespace": "graph._types" }, "properties": [ { - "description": "Contains events matching the query. Each object represents a matching event.", - "name": "events", + "name": "field", "required": true, "type": { - "kind": "array_of", - "value": { - "generics": [ - { - "kind": "instance_of", - "type": { - "name": "TEvent", - "namespace": "eql._types.HitsSequence" - } - } - ], - "kind": "instance_of", - "type": { - "name": "HitsEvent", - "namespace": "eql._types" - } + "kind": "instance_of", + "type": { + "name": "Field", + "namespace": "_types" } } }, { - "description": "Shared field values used to constrain matches in the sequence. These are defined using the by keyword in the EQL query syntax.", - "docId": "eql-sequences", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-syntax.html#eql-sequences", - "name": "join_keys", - "required": false, - "type": { - "kind": "array_of", - "value": { - "kind": "user_defined_value" + "name": "max_docs_per_value", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "integer", + "namespace": "_types" } } } ], - "specLocation": "eql/_types/EqlHits.ts#L56-L64" + "specLocation": "graph/_types/ExploreControls.ts#L51-L54" }, { "kind": "interface", "name": { - "name": "TableValuesContainer", - "namespace": "esql._types" + "name": "Connection", + "namespace": "graph._types" }, "properties": [ { - "name": "integer", - "required": false, + "name": "doc_count", + "required": true, "type": { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "TableValuesIntegerValue", - "namespace": "esql._types" - } + "kind": "instance_of", + "type": { + "name": "long", + "namespace": "_types" } } }, { - "name": "keyword", - "required": false, + "name": "source", + "required": true, "type": { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "TableValuesKeywordValue", - "namespace": "esql._types" - } + "kind": "instance_of", + "type": { + "name": "long", + "namespace": "_types" } } }, { - "name": "long", - "required": false, + "name": "target", + "required": true, "type": { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "TableValuesLongValue", - "namespace": "esql._types" - } + "kind": "instance_of", + "type": { + "name": "long", + "namespace": "_types" } } }, { - "name": "double", - "required": false, + "name": "weight", + "required": true, "type": { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "TableValuesLongDouble", - "namespace": "esql._types" - } + "kind": "instance_of", + "type": { + "name": "double", + "namespace": "_types" } } } ], - "specLocation": "esql/_types/TableValuesContainer.ts#L22-L28", - "variants": { - "kind": "container" - } + "specLocation": "graph/_types/Connection.ts#L22-L27" }, { "kind": "interface", "name": { - "name": "FieldCapability", - "namespace": "_global.field_caps" + "name": "Vertex", + "namespace": "graph._types" }, "properties": [ { - "description": "Whether this field can be aggregated on all indices.", - "name": "aggregatable", + "name": "depth", "required": true, "type": { "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "long", + "namespace": "_types" } } }, { - "description": "The list of indices where this field has the same type family, or null if all indices have the same type family for the field.", - "name": "indices", - "required": false, + "name": "field", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "Indices", + "name": "Field", "namespace": "_types" } } }, { - "description": "Merged metadata across all indices as a map of string keys to arrays of values. A value length of 1 indicates that all indices had the same value for this key, while a length of 2 or more indicates that not all indices had the same value for this key.", - "name": "meta", - "required": false, + "name": "term", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "Metadata", - "namespace": "_types" + "name": "string", + "namespace": "_builtins" } } }, { - "description": "The list of indices where this field is not aggregatable, or null if all indices have the same definition for the field.", - "name": "non_aggregatable_indices", - "required": false, + "name": "weight", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "Indices", + "name": "double", "namespace": "_types" } } - }, + } + ], + "specLocation": "graph/_types/Vertex.ts#L23-L28" + }, + { + "kind": "interface", + "name": { + "name": "IndicesBlockStatus", + "namespace": "indices.add_block" + }, + "properties": [ { - "description": "The list of indices where this field is not searchable, or null if all indices have the same definition for the field.", - "name": "non_searchable_indices", - "required": false, + "name": "name", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "Indices", + "name": "IndexName", "namespace": "_types" } } }, { - "description": "Whether this field is indexed for search on all indices.", - "name": "searchable", + "name": "blocked", "required": true, "type": { "kind": "instance_of", @@ -118408,45 +120464,45 @@ "namespace": "_builtins" } } - }, + } + ], + "specLocation": "indices/add_block/IndicesAddBlockResponse.ts#L30-L33" + }, + { + "kind": "interface", + "name": { + "name": "AnalyzeDetail", + "namespace": "indices.analyze" + }, + "properties": [ { - "name": "type", - "required": true, + "name": "analyzer", + "required": false, "type": { "kind": "instance_of", "type": { - "name": "string", - "namespace": "_builtins" + "name": "AnalyzerDetail", + "namespace": "indices.analyze" } } }, { - "description": "Whether this field is registered as a metadata field.", - "docId": "mapping-metadata", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-fields.html", - "name": "metadata_field", + "name": "charfilters", "required": false, "type": { - "kind": "instance_of", - "type": { - "name": "boolean", - "namespace": "_builtins" + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "CharFilterDetail", + "namespace": "indices.analyze" + } } } }, { - "availability": { - "serverless": { - "stability": "experimental" - }, - "stack": { - "since": "8.0.0", - "stability": "experimental" - } - }, - "description": "Whether this field is used as a time series dimension.", - "name": "time_series_dimension", - "required": false, + "name": "custom_analyzer", + "required": true, "type": { "kind": "instance_of", "type": { @@ -118456,236 +120512,171 @@ } }, { - "availability": { - "serverless": { - "stability": "experimental" - }, - "stack": { - "since": "8.0.0", - "stability": "experimental" - } - }, - "description": "Contains metric type if this fields is used as a time series\nmetrics, absent if the field is not used as metric.", - "name": "time_series_metric", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "TimeSeriesMetricType", - "namespace": "_types.mapping" - } - } - }, - { - "availability": { - "serverless": { - "stability": "experimental" - }, - "stack": { - "since": "8.0.0", - "stability": "experimental" - } - }, - "description": "If this list is present in response then some indices have the\nfield marked as a dimension and other indices, the ones in this list, do not.", - "name": "non_dimension_indices", + "name": "tokenfilters", "required": false, "type": { "kind": "array_of", "value": { "kind": "instance_of", "type": { - "name": "IndexName", - "namespace": "_types" + "name": "TokenDetail", + "namespace": "indices.analyze" } } } }, { - "availability": { - "serverless": { - "stability": "experimental" - }, - "stack": { - "since": "8.0.0", - "stability": "experimental" - } - }, - "description": "The list of indices where this field is present if these indices\ndonโ€™t have the same `time_series_metric` value for this field.", - "name": "metric_conflicts_indices", + "name": "tokenizer", "required": false, "type": { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "IndexName", - "namespace": "_types" - } + "kind": "instance_of", + "type": { + "name": "TokenDetail", + "namespace": "indices.analyze" } } } ], - "specLocation": "_global/field_caps/types.ts#L23-L81" + "specLocation": "indices/analyze/types.ts#L24-L30" }, { "kind": "interface", "name": { - "name": "StoredScript", - "namespace": "_types" + "name": "AnalyzerDetail", + "namespace": "indices.analyze" }, "properties": [ { - "description": "The language the script is written in.\nFor serach templates, use `mustache`.", - "name": "lang", + "name": "name", "required": true, "type": { "kind": "instance_of", "type": { - "name": "ScriptLanguage", - "namespace": "_types" + "name": "string", + "namespace": "_builtins" } } }, { - "name": "options", - "required": false, + "name": "tokens", + "required": true, "type": { - "key": { + "kind": "array_of", + "value": { "kind": "instance_of", "type": { - "name": "string", - "namespace": "_builtins" + "name": "ExplainAnalyzeToken", + "namespace": "indices.analyze" } - }, - "kind": "dictionary_of", - "singleKey": false, - "value": { + } + } + } + ], + "specLocation": "indices/analyze/types.ts#L32-L35" + }, + { + "attachedBehaviors": [ + "AdditionalProperties" + ], + "behaviors": [ + { + "generics": [ + { "kind": "instance_of", "type": { "name": "string", "namespace": "_builtins" } + }, + { + "kind": "user_defined_value" } - } - }, - { - "description": "The script source.\nFor search templates, an object containing the search template.", - "name": "source", - "required": true, + ], + "meta": { + "description": "Additional tokenizer-specific attributes", + "fieldname": "attributes" + }, "type": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } + "name": "AdditionalProperties", + "namespace": "_spec_utils" } } ], - "specLocation": "_types/Scripting.ts#L47-L59" - }, - { "kind": "interface", "name": { - "name": "Hop", - "namespace": "graph._types" + "name": "ExplainAnalyzeToken", + "namespace": "indices.analyze" }, "properties": [ { - "description": "Specifies one or more fields from which you want to extract terms that are associated with the specified vertices.", - "name": "connections", - "required": false, + "name": "bytes", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "Hop", - "namespace": "graph._types" + "name": "string", + "namespace": "_builtins" } } }, { - "description": "An optional guiding query that constrains the Graph API as it explores connected terms.", - "name": "query", - "required": false, + "name": "end_offset", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "QueryContainer", - "namespace": "_types.query_dsl" + "name": "long", + "namespace": "_types" } } }, { - "description": "Contains the fields you are interested in.", - "name": "vertices", - "required": true, + "name": "keyword", + "required": false, "type": { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "VertexDefinition", - "namespace": "graph._types" - } + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" } } - } - ], - "specLocation": "graph/_types/Hop.ts#L23-L36" - }, - { - "kind": "interface", - "name": { - "name": "VertexDefinition", - "namespace": "graph._types" - }, - "properties": [ + }, { - "description": "Prevents the specified terms from being included in the results.", - "name": "exclude", - "required": false, + "name": "position", + "required": true, "type": { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } + "kind": "instance_of", + "type": { + "name": "long", + "namespace": "_types" } } }, { - "description": "Identifies a field in the documents of interest.", - "name": "field", + "name": "positionLength", "required": true, "type": { "kind": "instance_of", "type": { - "name": "Field", + "name": "long", "namespace": "_types" } } }, { - "description": "Identifies the terms of interest that form the starting points from which you want to spider out.", - "name": "include", - "required": false, + "name": "start_offset", + "required": true, "type": { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "VertexInclude", - "namespace": "graph._types" - } + "kind": "instance_of", + "type": { + "name": "long", + "namespace": "_types" } } }, { - "description": "Specifies how many documents must contain a pair of terms before it is considered to be a useful connection.\nThis setting acts as a certainty threshold.", - "name": "min_doc_count", - "required": false, - "serverDefault": 3, + "name": "termFrequency", + "required": true, "type": { "kind": "instance_of", "type": { @@ -118695,54 +120686,74 @@ } }, { - "description": "Controls how many documents on a particular shard have to contain a pair of terms before the connection is returned for global consideration.", - "name": "shard_min_doc_count", - "required": false, - "serverDefault": 2, + "name": "token", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "long", - "namespace": "_types" + "name": "string", + "namespace": "_builtins" } } }, { - "description": "Specifies the maximum number of vertex terms returned for each field.", - "name": "size", - "required": false, - "serverDefault": 5, + "name": "type", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "integer", - "namespace": "_types" + "name": "string", + "namespace": "_builtins" } } } ], - "specLocation": "graph/_types/Vertex.ts#L30-L59" + "specLocation": "indices/analyze/types.ts#L52-L67" }, { "kind": "interface", "name": { - "name": "VertexInclude", - "namespace": "graph._types" + "name": "CharFilterDetail", + "namespace": "indices.analyze" }, "properties": [ { - "name": "boost", - "required": false, + "name": "filtered_text", + "required": true, + "type": { + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } + } + }, + { + "name": "name", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "double", - "namespace": "_types" + "name": "string", + "namespace": "_builtins" } } - }, + } + ], + "specLocation": "indices/analyze/types.ts#L46-L49" + }, + { + "kind": "interface", + "name": { + "name": "TokenDetail", + "namespace": "indices.analyze" + }, + "properties": [ { - "name": "term", + "name": "name", "required": true, "type": { "kind": "instance_of", @@ -118751,217 +120762,291 @@ "namespace": "_builtins" } } + }, + { + "name": "tokens", + "required": true, + "type": { + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "ExplainAnalyzeToken", + "namespace": "indices.analyze" + } + } + } } ], - "shortcutProperty": "term", - "specLocation": "graph/_types/Vertex.ts#L61-L65" + "specLocation": "indices/analyze/types.ts#L71-L74" }, { "kind": "interface", "name": { - "name": "ExploreControls", - "namespace": "graph._types" + "name": "AnalyzeToken", + "namespace": "indices.analyze" }, "properties": [ { - "description": "To avoid the top-matching documents sample being dominated by a single source of results, it is sometimes necessary to request diversity in the sample.\nYou can do this by selecting a single-value field and setting a maximum number of documents per value for that field.", - "name": "sample_diversity", + "name": "end_offset", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "long", + "namespace": "_types" + } + } + }, + { + "name": "position", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "long", + "namespace": "_types" + } + } + }, + { + "name": "positionLength", "required": false, "type": { "kind": "instance_of", "type": { - "name": "SampleDiversity", - "namespace": "graph._types" + "name": "long", + "namespace": "_types" } } }, { - "description": "Each hop considers a sample of the best-matching documents on each shard.\nUsing samples improves the speed of execution and keeps exploration focused on meaningfully-connected terms.\nVery small values (less than 50) might not provide sufficient weight-of-evidence to identify significant connections between terms.\nVery large sample sizes can dilute the quality of the results and increase execution times.", - "name": "sample_size", - "required": false, - "serverDefault": 100, + "name": "start_offset", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "integer", + "name": "long", "namespace": "_types" } } }, { - "description": "The length of time in milliseconds after which exploration will be halted and the results gathered so far are returned.\nThis timeout is honored on a best-effort basis.\nExecution might overrun this timeout if, for example, a long pause is encountered while FieldData is loaded for a field.", - "name": "timeout", - "required": false, + "name": "token", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "Duration", - "namespace": "_types" + "name": "string", + "namespace": "_builtins" } } }, { - "description": "Filters associated terms so only those that are significantly associated with your query are included.", - "docId": "search-aggregations-bucket-significantterms-aggregation", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html", - "name": "use_significance", + "name": "type", "required": true, "type": { "kind": "instance_of", "type": { - "name": "boolean", + "name": "string", "namespace": "_builtins" } } } ], - "specLocation": "graph/_types/ExploreControls.ts#L24-L49" + "specLocation": "indices/analyze/types.ts#L37-L44" }, { + "inherits": { + "type": { + "name": "AcknowledgedResponseBase", + "namespace": "_types" + } + }, "kind": "interface", "name": { - "name": "SampleDiversity", - "namespace": "graph._types" + "name": "IndicesResponseBase", + "namespace": "_types" }, "properties": [ { - "name": "field", - "required": true, - "type": { - "kind": "instance_of", - "type": { - "name": "Field", - "namespace": "_types" - } - } - }, - { - "name": "max_docs_per_value", - "required": true, + "name": "_shards", + "required": false, "type": { "kind": "instance_of", "type": { - "name": "integer", + "name": "ShardStatistics", "namespace": "_types" } } } ], - "specLocation": "graph/_types/ExploreControls.ts#L51-L54" + "specLocation": "_types/Base.ts#L138-L140" }, { "kind": "interface", "name": { - "name": "Connection", - "namespace": "graph._types" + "name": "DataStreamLifecycleExplain", + "namespace": "indices.explain_data_lifecycle" }, "properties": [ { - "name": "doc_count", + "name": "index", "required": true, "type": { "kind": "instance_of", "type": { - "name": "long", + "name": "IndexName", "namespace": "_types" } } }, { - "name": "source", + "name": "managed_by_lifecycle", "required": true, "type": { "kind": "instance_of", "type": { - "name": "long", - "namespace": "_types" + "name": "boolean", + "namespace": "_builtins" } } }, { - "name": "target", - "required": true, + "name": "index_creation_date_millis", + "required": false, "type": { + "generics": [ + { + "kind": "instance_of", + "type": { + "name": "UnitMillis", + "namespace": "_types" + } + } + ], "kind": "instance_of", "type": { - "name": "long", + "name": "EpochTime", "namespace": "_types" } } }, { - "name": "weight", - "required": true, + "name": "time_since_index_creation", + "required": false, "type": { "kind": "instance_of", "type": { - "name": "double", + "name": "Duration", "namespace": "_types" } } - } - ], - "specLocation": "graph/_types/Connection.ts#L22-L27" - }, - { - "kind": "interface", - "name": { - "name": "Vertex", - "namespace": "graph._types" - }, - "properties": [ + }, { - "name": "depth", - "required": true, + "name": "rollover_date_millis", + "required": false, "type": { + "generics": [ + { + "kind": "instance_of", + "type": { + "name": "UnitMillis", + "namespace": "_types" + } + } + ], "kind": "instance_of", "type": { - "name": "long", + "name": "EpochTime", "namespace": "_types" } } }, { - "name": "field", - "required": true, + "name": "time_since_rollover", + "required": false, "type": { "kind": "instance_of", "type": { - "name": "Field", + "name": "Duration", "namespace": "_types" } } }, { - "name": "term", - "required": true, + "name": "lifecycle", + "required": false, "type": { "kind": "instance_of", "type": { - "name": "string", - "namespace": "_builtins" + "name": "DataStreamLifecycleWithRollover", + "namespace": "indices._types" } } }, { - "name": "weight", - "required": true, + "name": "generation_time", + "required": false, "type": { "kind": "instance_of", "type": { - "name": "double", + "name": "Duration", "namespace": "_types" } } + }, + { + "name": "error", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } } ], - "specLocation": "graph/_types/Vertex.ts#L23-L28" + "specLocation": "indices/explain_data_lifecycle/IndicesExplainDataLifecycleResponse.ts#L31-L41" }, { "kind": "interface", "name": { - "name": "IndicesBlockStatus", - "namespace": "indices.add_block" + "name": "IndexAliases", + "namespace": "indices.get_alias" + }, + "properties": [ + { + "name": "aliases", + "required": true, + "type": { + "key": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + }, + "kind": "dictionary_of", + "singleKey": false, + "value": { + "kind": "instance_of", + "type": { + "name": "AliasDefinition", + "namespace": "indices._types" + } + } + } + } + ], + "specLocation": "indices/get_alias/IndicesGetAliasResponse.ts#L37-L39" + }, + { + "kind": "interface", + "name": { + "name": "DataStreamWithLifecycle", + "namespace": "indices.get_data_lifecycle" }, "properties": [ { @@ -118970,60 +121055,50 @@ "type": { "kind": "instance_of", "type": { - "name": "IndexName", + "name": "DataStreamName", "namespace": "_types" } } }, { - "name": "blocked", - "required": true, + "name": "lifecycle", + "required": false, "type": { "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "DataStreamLifecycleWithRollover", + "namespace": "indices._types" } } } ], - "specLocation": "indices/add_block/IndicesAddBlockResponse.ts#L30-L33" + "specLocation": "indices/get_data_lifecycle/IndicesGetDataLifecycleResponse.ts#L27-L30" }, { "kind": "interface", "name": { - "name": "AnalyzeDetail", - "namespace": "indices.analyze" + "name": "DataStream", + "namespace": "indices._types" }, "properties": [ { - "name": "analyzer", + "description": "Custom metadata for the stream, copied from the `_meta` object of the streamโ€™s matching index template.\nIf empty, the response omits this property.", + "docId": "mapping-meta-field", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-meta-field.html", + "name": "_meta", "required": false, "type": { "kind": "instance_of", "type": { - "name": "AnalyzerDetail", - "namespace": "indices.analyze" + "name": "Metadata", + "namespace": "_types" } } }, { - "name": "charfilters", + "description": "If `true`, the data stream allows custom routing on write request.", + "name": "allow_custom_routing", "required": false, - "type": { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "CharFilterDetail", - "namespace": "indices.analyze" - } - } - } - }, - { - "name": "custom_analyzer", - "required": true, "type": { "kind": "instance_of", "type": { @@ -119033,520 +121108,539 @@ } }, { - "name": "tokenfilters", + "description": "Information about failure store backing indices", + "name": "failure_store", "required": false, "type": { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "TokenDetail", - "namespace": "indices.analyze" - } + "kind": "instance_of", + "type": { + "name": "FailureStore", + "namespace": "indices._types" } } }, { - "name": "tokenizer", - "required": false, + "description": "Current generation for the data stream. This number acts as a cumulative count of the streamโ€™s rollovers, starting at 1.", + "name": "generation", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "TokenDetail", - "namespace": "indices.analyze" + "name": "integer", + "namespace": "_types" } } - } - ], - "specLocation": "indices/analyze/types.ts#L24-L30" - }, - { - "kind": "interface", - "name": { - "name": "AnalyzerDetail", - "namespace": "indices.analyze" - }, - "properties": [ + }, { - "name": "name", + "description": "If `true`, the data stream is hidden.", + "name": "hidden", "required": true, "type": { "kind": "instance_of", "type": { - "name": "string", + "name": "boolean", "namespace": "_builtins" } } }, { - "name": "tokens", - "required": true, + "description": "Name of the current ILM lifecycle policy in the streamโ€™s matching index template.\nThis lifecycle policy is set in the `index.lifecycle.name` setting.\nIf the template does not include a lifecycle policy, this property is not included in the response.\nNOTE: A data streamโ€™s backing indices may be assigned different lifecycle policies. To retrieve the lifecycle policy for individual backing indices, use the get index settings API.", + "name": "ilm_policy", + "required": false, "type": { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "ExplainAnalyzeToken", - "namespace": "indices.analyze" - } + "kind": "instance_of", + "type": { + "name": "Name", + "namespace": "_types" } } - } - ], - "specLocation": "indices/analyze/types.ts#L32-L35" - }, - { - "attachedBehaviors": [ - "AdditionalProperties" - ], - "behaviors": [ + }, { - "generics": [ - { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - }, - { - "kind": "user_defined_value" - } - ], - "meta": { - "description": "Additional tokenizer-specific attributes", - "fieldname": "attributes" - }, + "description": "Name of the lifecycle system that'll manage the next generation of the data stream.", + "name": "next_generation_managed_by", + "required": true, "type": { - "name": "AdditionalProperties", - "namespace": "_spec_utils" + "kind": "instance_of", + "type": { + "name": "ManagedBy", + "namespace": "indices._types" + } } - } - ], - "kind": "interface", - "name": { - "name": "ExplainAnalyzeToken", - "namespace": "indices.analyze" - }, - "properties": [ + }, { - "name": "bytes", + "description": "Indicates if ILM should take precedence over DSL in case both are configured to managed this data stream.", + "name": "prefer_ilm", "required": true, "type": { "kind": "instance_of", "type": { - "name": "string", + "name": "boolean", "namespace": "_builtins" } } }, { - "name": "end_offset", + "description": "Array of objects containing information about the data streamโ€™s backing indices.\nThe last item in this array contains information about the streamโ€™s current write index.", + "name": "indices", "required": true, "type": { - "kind": "instance_of", - "type": { - "name": "long", - "namespace": "_types" + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "DataStreamIndex", + "namespace": "indices._types" + } } } }, { - "name": "keyword", + "availability": { + "serverless": { + "stability": "stable" + }, + "stack": { + "since": "8.11.0", + "stability": "stable" + } + }, + "description": "Contains the configuration for the data stream lifecycle of this data stream.", + "name": "lifecycle", "required": false, "type": { "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "DataStreamLifecycleWithRollover", + "namespace": "indices._types" } } }, { - "name": "position", + "description": "Name of the data stream.", + "name": "name", "required": true, "type": { "kind": "instance_of", "type": { - "name": "long", + "name": "DataStreamName", "namespace": "_types" } } }, { - "name": "positionLength", - "required": true, + "description": "If `true`, the data stream is created and managed by cross-cluster replication and the local cluster can not write into this data stream or change its mappings.", + "name": "replicated", + "required": false, "type": { "kind": "instance_of", "type": { - "name": "long", - "namespace": "_types" + "name": "boolean", + "namespace": "_builtins" } } }, { - "name": "start_offset", + "description": "If `true`, the next write to this data stream will trigger a rollover first and the document will be indexed in the new backing index. If the rollover fails the indexing request will fail too.", + "name": "rollover_on_write", "required": true, "type": { "kind": "instance_of", "type": { - "name": "long", - "namespace": "_types" + "name": "boolean", + "namespace": "_builtins" } } }, { - "name": "termFrequency", + "description": "Health status of the data stream.\nThis health status is based on the state of the primary and replica shards of the streamโ€™s backing indices.", + "name": "status", "required": true, "type": { "kind": "instance_of", "type": { - "name": "long", + "name": "HealthStatus", "namespace": "_types" } } }, { - "name": "token", - "required": true, + "availability": { + "serverless": {}, + "stack": { + "since": "7.10.0" + } + }, + "description": "If `true`, the data stream is created and managed by an Elastic stack component and cannot be modified through normal user interaction.", + "name": "system", + "required": false, "type": { "kind": "instance_of", "type": { - "name": "string", + "name": "boolean", "namespace": "_builtins" } } }, { - "name": "type", + "description": "Name of the index template used to create the data streamโ€™s backing indices.\nThe templateโ€™s index pattern must match the name of this data stream.", + "name": "template", "required": true, "type": { "kind": "instance_of", "type": { - "name": "string", - "namespace": "_builtins" - } - } - } - ], - "specLocation": "indices/analyze/types.ts#L52-L67" - }, - { - "kind": "interface", - "name": { - "name": "CharFilterDetail", - "namespace": "indices.analyze" - }, - "properties": [ - { - "name": "filtered_text", - "required": true, - "type": { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } + "name": "Name", + "namespace": "_types" } } }, { - "name": "name", + "description": "Information about the `@timestamp` field in the data stream.", + "name": "timestamp_field", "required": true, "type": { "kind": "instance_of", "type": { - "name": "string", - "namespace": "_builtins" + "name": "DataStreamTimestampField", + "namespace": "indices._types" } } } ], - "specLocation": "indices/analyze/types.ts#L46-L49" + "specLocation": "indices/_types/DataStream.ts#L45-L127" }, { "kind": "interface", "name": { - "name": "TokenDetail", - "namespace": "indices.analyze" + "name": "FailureStore", + "namespace": "indices._types" }, "properties": [ { - "name": "name", + "name": "enabled", "required": true, "type": { "kind": "instance_of", "type": { - "name": "string", + "name": "boolean", "namespace": "_builtins" } } }, { - "name": "tokens", + "name": "indices", "required": true, "type": { "kind": "array_of", "value": { "kind": "instance_of", "type": { - "name": "ExplainAnalyzeToken", - "namespace": "indices.analyze" + "name": "DataStreamIndex", + "namespace": "indices._types" } } } + }, + { + "name": "rollover_on_write", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } } ], - "specLocation": "indices/analyze/types.ts#L71-L74" + "specLocation": "indices/_types/DataStream.ts#L39-L43" }, { "kind": "interface", "name": { - "name": "AnalyzeToken", - "namespace": "indices.analyze" + "name": "DataStreamIndex", + "namespace": "indices._types" }, "properties": [ { - "name": "end_offset", + "description": "Name of the backing index.", + "name": "index_name", "required": true, "type": { "kind": "instance_of", "type": { - "name": "long", + "name": "IndexName", "namespace": "_types" } } }, { - "name": "position", + "description": "Universally unique identifier (UUID) for the index.", + "name": "index_uuid", "required": true, "type": { "kind": "instance_of", "type": { - "name": "long", + "name": "Uuid", "namespace": "_types" } } }, { - "name": "positionLength", + "description": "Name of the current ILM lifecycle policy configured for this backing index.", + "name": "ilm_policy", "required": false, "type": { "kind": "instance_of", "type": { - "name": "long", + "name": "Name", "namespace": "_types" } } }, { - "name": "start_offset", - "required": true, + "description": "Name of the lifecycle system that's currently managing this backing index.", + "name": "managed_by", + "required": false, "type": { "kind": "instance_of", "type": { - "name": "long", - "namespace": "_types" + "name": "ManagedBy", + "namespace": "indices._types" } } }, { - "name": "token", - "required": true, + "description": "Indicates if ILM should take precedence over DSL in case both are configured to manage this index.", + "name": "prefer_ilm", + "required": false, "type": { "kind": "instance_of", "type": { - "name": "string", + "name": "boolean", "namespace": "_builtins" } } - }, + } + ], + "specLocation": "indices/_types/DataStream.ts#L136-L157" + }, + { + "kind": "interface", + "name": { + "name": "DataStreamTimestampField", + "namespace": "indices._types" + }, + "properties": [ { - "name": "type", + "description": "Name of the timestamp field for the data stream, which must be `@timestamp`. The `@timestamp` field must be included in every document indexed to the data stream.", + "name": "name", "required": true, "type": { "kind": "instance_of", "type": { - "name": "string", - "namespace": "_builtins" + "name": "Field", + "namespace": "_types" } } } ], - "specLocation": "indices/analyze/types.ts#L37-L44" + "specLocation": "indices/_types/DataStream.ts#L129-L134" }, { - "inherits": { - "type": { - "name": "AcknowledgedResponseBase", - "namespace": "_types" - } - }, "kind": "interface", "name": { - "name": "IndicesResponseBase", - "namespace": "_types" + "name": "IndexTemplateItem", + "namespace": "indices.get_index_template" }, "properties": [ { - "name": "_shards", - "required": false, + "name": "name", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "ShardStatistics", + "name": "Name", "namespace": "_types" } } + }, + { + "name": "index_template", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "IndexTemplate", + "namespace": "indices._types" + } + } } ], - "specLocation": "_types/Base.ts#L138-L140" + "specLocation": "indices/get_index_template/IndicesGetIndexTemplateResponse.ts#L29-L32" }, { "kind": "interface", "name": { - "name": "DataStreamLifecycleExplain", - "namespace": "indices.explain_data_lifecycle" + "name": "IndexTemplate", + "namespace": "indices._types" }, "properties": [ { - "name": "index", + "description": "Name of the index template.", + "name": "index_patterns", "required": true, "type": { "kind": "instance_of", "type": { - "name": "IndexName", + "name": "Names", "namespace": "_types" } } }, { - "name": "managed_by_lifecycle", + "description": "An ordered list of component template names.\nComponent templates are merged in the order specified, meaning that the last component template specified has the highest precedence.", + "name": "composed_of", "required": true, + "type": { + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "Name", + "namespace": "_types" + } + } + } + }, + { + "description": "Template to be applied.\nIt may optionally include an `aliases`, `mappings`, or `settings` configuration.", + "name": "template", + "required": false, "type": { "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "IndexTemplateSummary", + "namespace": "indices._types" } } }, { - "name": "index_creation_date_millis", + "description": "Version number used to manage index templates externally.\nThis number is not automatically generated by Elasticsearch.", + "name": "version", "required": false, "type": { - "generics": [ - { - "kind": "instance_of", - "type": { - "name": "UnitMillis", - "namespace": "_types" - } - } - ], "kind": "instance_of", "type": { - "name": "EpochTime", + "name": "VersionNumber", "namespace": "_types" } } }, { - "name": "time_since_index_creation", + "description": "Priority to determine index template precedence when a new data stream or index is created.\nThe index template with the highest priority is chosen.\nIf no priority is specified the template is treated as though it is of priority 0 (lowest priority).\nThis number is not automatically generated by Elasticsearch.", + "name": "priority", "required": false, "type": { "kind": "instance_of", "type": { - "name": "Duration", + "name": "long", "namespace": "_types" } } }, { - "name": "rollover_date_millis", + "description": "Optional user metadata about the index template. May have any contents.\nThis map is not automatically generated by Elasticsearch.", + "docId": "mapping-meta-field", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-meta-field.html", + "name": "_meta", "required": false, "type": { - "generics": [ - { - "kind": "instance_of", - "type": { - "name": "UnitMillis", - "namespace": "_types" - } - } - ], "kind": "instance_of", "type": { - "name": "EpochTime", + "name": "Metadata", "namespace": "_types" } } }, { - "name": "time_since_rollover", + "name": "allow_auto_create", "required": false, "type": { "kind": "instance_of", "type": { - "name": "Duration", - "namespace": "_types" + "name": "boolean", + "namespace": "_builtins" } } }, { - "name": "lifecycle", + "description": "If this object is included, the template is used to create data streams and their backing indices.\nSupports an empty object.\nData streams require a matching index template with a `data_stream` object.", + "name": "data_stream", "required": false, "type": { "kind": "instance_of", "type": { - "name": "DataStreamLifecycleWithRollover", + "name": "IndexTemplateDataStreamConfiguration", "namespace": "indices._types" } } }, { - "name": "generation_time", + "availability": { + "serverless": {}, + "stack": { + "since": "8.12.0" + } + }, + "description": "Marks this index template as deprecated.\nWhen creating or updating a non-deprecated index template that uses deprecated components,\nElasticsearch will emit a deprecation warning.", + "name": "deprecated", "required": false, "type": { "kind": "instance_of", "type": { - "name": "Duration", - "namespace": "_types" + "name": "boolean", + "namespace": "_builtins" } } }, { - "name": "error", + "availability": { + "serverless": {}, + "stack": { + "since": "8.7.0" + } + }, + "description": "A list of component template names that are allowed to be absent.", + "name": "ignore_missing_component_templates", "required": false, "type": { "kind": "instance_of", "type": { - "name": "string", - "namespace": "_builtins" + "name": "Names", + "namespace": "_types" } } } ], - "specLocation": "indices/explain_data_lifecycle/IndicesExplainDataLifecycleResponse.ts#L31-L41" + "specLocation": "indices/_types/IndexTemplate.ts#L28-L81" }, { "kind": "interface", "name": { - "name": "IndexAliases", - "namespace": "indices.get_alias" + "name": "IndexTemplateSummary", + "namespace": "indices._types" }, "properties": [ { + "description": "Aliases to add.\nIf the index template includes a `data_stream` object, these are data stream aliases.\nOtherwise, these are index aliases.\nData stream aliases ignore the `index_routing`, `routing`, and `search_routing` options.", "name": "aliases", - "required": true, + "required": false, "type": { "key": { "kind": "instance_of", "type": { - "name": "string", - "namespace": "_builtins" + "name": "IndexName", + "namespace": "_types" } }, "kind": "dictionary_of", @@ -119554,34 +121648,46 @@ "value": { "kind": "instance_of", "type": { - "name": "AliasDefinition", + "name": "Alias", "namespace": "indices._types" } } } - } - ], - "specLocation": "indices/get_alias/IndicesGetAliasResponse.ts#L37-L39" - }, - { - "kind": "interface", - "name": { - "name": "DataStreamWithLifecycle", - "namespace": "indices.get_data_lifecycle" - }, - "properties": [ + }, { - "name": "name", - "required": true, + "description": "Mapping for fields in the index.\nIf specified, this mapping can include field names, field data types, and mapping parameters.", + "name": "mappings", + "required": false, "type": { "kind": "instance_of", "type": { - "name": "DataStreamName", - "namespace": "_types" + "name": "TypeMapping", + "namespace": "_types.mapping" + } + } + }, + { + "description": "Configuration options for the index.", + "name": "settings", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "IndexSettings", + "namespace": "indices._types" } } }, { + "availability": { + "serverless": { + "stability": "stable" + }, + "stack": { + "since": "8.11.0", + "stability": "stable" + } + }, "name": "lifecycle", "required": false, "type": { @@ -119593,33 +121699,33 @@ } } ], - "specLocation": "indices/get_data_lifecycle/IndicesGetDataLifecycleResponse.ts#L27-L30" + "specLocation": "indices/_types/IndexTemplate.ts#L96-L118" }, { "kind": "interface", "name": { - "name": "DataStream", + "name": "IndexTemplateDataStreamConfiguration", "namespace": "indices._types" }, "properties": [ { - "description": "Custom metadata for the stream, copied from the `_meta` object of the streamโ€™s matching index template.\nIf empty, the response omits this property.", - "docId": "mapping-meta-field", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-meta-field.html", - "name": "_meta", + "description": "If true, the data stream is hidden.", + "name": "hidden", "required": false, + "serverDefault": false, "type": { "kind": "instance_of", "type": { - "name": "Metadata", - "namespace": "_types" + "name": "boolean", + "namespace": "_builtins" } } }, { - "description": "If `true`, the data stream allows custom routing on write request.", + "description": "If true, the data stream supports custom routing.", "name": "allow_custom_routing", "required": false, + "serverDefault": false, "type": { "kind": "instance_of", "type": { @@ -119627,218 +121733,200 @@ "namespace": "_builtins" } } - }, + } + ], + "specLocation": "indices/_types/IndexTemplate.ts#L83-L94" + }, + { + "kind": "interface", + "name": { + "name": "IndexMappingRecord", + "namespace": "indices.get_mapping" + }, + "properties": [ { - "description": "Information about failure store backing indices", - "name": "failure_store", + "name": "item", "required": false, "type": { "kind": "instance_of", "type": { - "name": "FailureStore", - "namespace": "indices._types" + "name": "TypeMapping", + "namespace": "_types.mapping" } } }, { - "description": "Current generation for the data stream. This number acts as a cumulative count of the streamโ€™s rollovers, starting at 1.", - "name": "generation", + "name": "mappings", "required": true, "type": { "kind": "instance_of", "type": { - "name": "integer", - "namespace": "_types" + "name": "TypeMapping", + "namespace": "_types.mapping" } } - }, + } + ], + "specLocation": "indices/get_mapping/IndicesGetMappingResponse.ts#L29-L32" + }, + { + "kind": "interface", + "name": { + "name": "Action", + "namespace": "indices.modify_data_stream" + }, + "properties": [ { - "description": "If `true`, the data stream is hidden.", - "name": "hidden", - "required": true, + "description": "Adds an existing index as a backing index for a data stream.\nThe index is hidden as part of this operation.\nWARNING: Adding indices with the `add_backing_index` action can potentially result in improper data stream behavior.\nThis should be considered an expert level API.", + "name": "add_backing_index", + "required": false, "type": { "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "IndexAndDataStreamAction", + "namespace": "indices.modify_data_stream" } } }, { - "description": "Name of the current ILM lifecycle policy in the streamโ€™s matching index template.\nThis lifecycle policy is set in the `index.lifecycle.name` setting.\nIf the template does not include a lifecycle policy, this property is not included in the response.\nNOTE: A data streamโ€™s backing indices may be assigned different lifecycle policies. To retrieve the lifecycle policy for individual backing indices, use the get index settings API.", - "name": "ilm_policy", + "description": "Removes a backing index from a data stream.\nThe index is unhidden as part of this operation.\nA data streamโ€™s write index cannot be removed.", + "name": "remove_backing_index", "required": false, "type": { "kind": "instance_of", "type": { - "name": "Name", - "namespace": "_types" + "name": "IndexAndDataStreamAction", + "namespace": "indices.modify_data_stream" } } - }, + } + ], + "specLocation": "indices/modify_data_stream/types.ts#L22-L37", + "variants": { + "kind": "container" + } + }, + { + "kind": "interface", + "name": { + "name": "IndexAndDataStreamAction", + "namespace": "indices.modify_data_stream" + }, + "properties": [ { - "description": "Name of the lifecycle system that'll manage the next generation of the data stream.", - "name": "next_generation_managed_by", + "description": "Data stream targeted by the action.", + "name": "data_stream", "required": true, "type": { "kind": "instance_of", "type": { - "name": "ManagedBy", - "namespace": "indices._types" + "name": "DataStreamName", + "namespace": "_types" } } }, { - "description": "Indicates if ILM should take precedence over DSL in case both are configured to managed this data stream.", - "name": "prefer_ilm", + "description": "Index for the action.", + "name": "index", "required": true, "type": { "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "IndexName", + "namespace": "_types" } } - }, + } + ], + "specLocation": "indices/modify_data_stream/types.ts#L39-L44" + }, + { + "kind": "interface", + "name": { + "name": "IndexTemplateMapping", + "namespace": "indices.put_index_template" + }, + "properties": [ { - "description": "Array of objects containing information about the data streamโ€™s backing indices.\nThe last item in this array contains information about the streamโ€™s current write index.", - "name": "indices", - "required": true, + "description": "Aliases to add.\nIf the index template includes a `data_stream` object, these are data stream aliases.\nOtherwise, these are index aliases.\nData stream aliases ignore the `index_routing`, `routing`, and `search_routing` options.", + "name": "aliases", + "required": false, "type": { - "kind": "array_of", + "key": { + "kind": "instance_of", + "type": { + "name": "IndexName", + "namespace": "_types" + } + }, + "kind": "dictionary_of", + "singleKey": false, "value": { "kind": "instance_of", "type": { - "name": "DataStreamIndex", + "name": "Alias", "namespace": "indices._types" } } } }, { - "availability": { - "serverless": { - "stability": "stable" - }, - "stack": { - "since": "8.11.0", - "stability": "stable" - } - }, - "description": "Contains the configuration for the data stream lifecycle of this data stream.", - "name": "lifecycle", + "description": "Mapping for fields in the index.\nIf specified, this mapping can include field names, field data types, and mapping parameters.", + "name": "mappings", "required": false, "type": { "kind": "instance_of", "type": { - "name": "DataStreamLifecycleWithRollover", - "namespace": "indices._types" - } - } - }, - { - "description": "Name of the data stream.", - "name": "name", - "required": true, - "type": { - "kind": "instance_of", - "type": { - "name": "DataStreamName", - "namespace": "_types" + "name": "TypeMapping", + "namespace": "_types.mapping" } } }, { - "description": "If `true`, the data stream is created and managed by cross-cluster replication and the local cluster can not write into this data stream or change its mappings.", - "name": "replicated", + "description": "Configuration options for the index.", + "name": "settings", "required": false, "type": { "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" - } - } - }, - { - "description": "If `true`, the next write to this data stream will trigger a rollover first and the document will be indexed in the new backing index. If the rollover fails the indexing request will fail too.", - "name": "rollover_on_write", - "required": true, - "type": { - "kind": "instance_of", - "type": { - "name": "boolean", - "namespace": "_builtins" - } - } - }, - { - "description": "Health status of the data stream.\nThis health status is based on the state of the primary and replica shards of the streamโ€™s backing indices.", - "name": "status", - "required": true, - "type": { - "kind": "instance_of", - "type": { - "name": "HealthStatus", - "namespace": "_types" + "name": "IndexSettings", + "namespace": "indices._types" } } }, { "availability": { - "serverless": {}, + "serverless": { + "stability": "stable" + }, "stack": { - "since": "7.10.0" + "since": "8.11.0", + "stability": "stable" } }, - "description": "If `true`, the data stream is created and managed by an Elastic stack component and cannot be modified through normal user interaction.", - "name": "system", + "name": "lifecycle", "required": false, "type": { "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" - } - } - }, - { - "description": "Name of the index template used to create the data streamโ€™s backing indices.\nThe templateโ€™s index pattern must match the name of this data stream.", - "name": "template", - "required": true, - "type": { - "kind": "instance_of", - "type": { - "name": "Name", - "namespace": "_types" - } - } - }, - { - "description": "Information about the `@timestamp` field in the data stream.", - "name": "timestamp_field", - "required": true, - "type": { - "kind": "instance_of", - "type": { - "name": "DataStreamTimestampField", + "name": "DataStreamLifecycle", "namespace": "indices._types" } } } ], - "specLocation": "indices/_types/DataStream.ts#L45-L127" + "specLocation": "indices/put_index_template/IndicesPutIndexTemplateRequest.ts#L159-L181" }, { "kind": "interface", "name": { - "name": "FailureStore", + "name": "DataStreamVisibility", "namespace": "indices._types" }, "properties": [ { - "name": "enabled", - "required": true, + "name": "hidden", + "required": false, "type": { "kind": "instance_of", "type": { @@ -119848,22 +121936,8 @@ } }, { - "name": "indices", - "required": true, - "type": { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "DataStreamIndex", - "namespace": "indices._types" - } - } - } - }, - { - "name": "rollover_on_write", - "required": true, + "name": "allow_custom_routing", + "required": false, "type": { "kind": "instance_of", "type": { @@ -119873,105 +121947,126 @@ } } ], - "specLocation": "indices/_types/DataStream.ts#L39-L43" + "specLocation": "indices/_types/DataStream.ts#L159-L162" }, { "kind": "interface", "name": { - "name": "DataStreamIndex", - "namespace": "indices._types" + "name": "ShardsOperationResponseBase", + "namespace": "_types" }, "properties": [ { - "description": "Name of the backing index.", - "name": "index_name", - "required": true, + "name": "_shards", + "required": false, "type": { "kind": "instance_of", "type": { - "name": "IndexName", + "name": "ShardStatistics", "namespace": "_types" } } - }, + } + ], + "specLocation": "_types/Base.ts#L142-L145" + }, + { + "kind": "interface", + "name": { + "name": "ResolveIndexItem", + "namespace": "indices.resolve_index" + }, + "properties": [ { - "description": "Universally unique identifier (UUID) for the index.", - "name": "index_uuid", + "name": "name", "required": true, "type": { "kind": "instance_of", "type": { - "name": "Uuid", + "name": "Name", "namespace": "_types" } } }, { - "description": "Name of the current ILM lifecycle policy configured for this backing index.", - "name": "ilm_policy", + "name": "aliases", "required": false, "type": { - "kind": "instance_of", - "type": { - "name": "Name", - "namespace": "_types" + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } } } }, { - "description": "Name of the lifecycle system that's currently managing this backing index.", - "name": "managed_by", - "required": false, + "name": "attributes", + "required": true, "type": { - "kind": "instance_of", - "type": { - "name": "ManagedBy", - "namespace": "indices._types" + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } } } }, { - "description": "Indicates if ILM should take precedence over DSL in case both are configured to manage this index.", - "name": "prefer_ilm", + "name": "data_stream", "required": false, "type": { "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "DataStreamName", + "namespace": "_types" } } } ], - "specLocation": "indices/_types/DataStream.ts#L136-L157" + "specLocation": "indices/resolve_index/ResolveIndexResponse.ts#L30-L35" }, { "kind": "interface", "name": { - "name": "DataStreamTimestampField", - "namespace": "indices._types" + "name": "ResolveIndexAliasItem", + "namespace": "indices.resolve_index" }, "properties": [ { - "description": "Name of the timestamp field for the data stream, which must be `@timestamp`. The `@timestamp` field must be included in every document indexed to the data stream.", "name": "name", "required": true, "type": { "kind": "instance_of", "type": { - "name": "Field", + "name": "Name", + "namespace": "_types" + } + } + }, + { + "name": "indices", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "Indices", "namespace": "_types" } } } ], - "specLocation": "indices/_types/DataStream.ts#L129-L134" + "specLocation": "indices/resolve_index/ResolveIndexResponse.ts#L37-L40" }, { "kind": "interface", "name": { - "name": "IndexTemplateItem", - "namespace": "indices.get_index_template" + "name": "ResolveIndexDataStreamsItem", + "namespace": "indices.resolve_index" }, "properties": [ { @@ -119980,86 +122075,98 @@ "type": { "kind": "instance_of", "type": { - "name": "Name", + "name": "DataStreamName", "namespace": "_types" } } }, { - "name": "index_template", + "name": "timestamp_field", "required": true, "type": { "kind": "instance_of", "type": { - "name": "IndexTemplate", - "namespace": "indices._types" + "name": "Field", + "namespace": "_types" + } + } + }, + { + "name": "backing_indices", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "Indices", + "namespace": "_types" } } } ], - "specLocation": "indices/get_index_template/IndicesGetIndexTemplateResponse.ts#L29-L32" + "specLocation": "indices/resolve_index/ResolveIndexResponse.ts#L42-L46" }, { "kind": "interface", "name": { - "name": "IndexTemplate", - "namespace": "indices._types" + "name": "RolloverConditions", + "namespace": "indices.rollover" }, "properties": [ { - "description": "Name of the index template.", - "name": "index_patterns", - "required": true, + "name": "min_age", + "required": false, "type": { "kind": "instance_of", "type": { - "name": "Names", + "name": "Duration", "namespace": "_types" } } }, { - "description": "An ordered list of component template names.\nComponent templates are merged in the order specified, meaning that the last component template specified has the highest precedence.", - "name": "composed_of", - "required": true, + "name": "max_age", + "required": false, "type": { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "Name", - "namespace": "_types" - } + "kind": "instance_of", + "type": { + "name": "Duration", + "namespace": "_types" } } }, { - "description": "Template to be applied.\nIt may optionally include an `aliases`, `mappings`, or `settings` configuration.", - "name": "template", + "name": "max_age_millis", "required": false, "type": { + "generics": [ + { + "kind": "instance_of", + "type": { + "name": "UnitMillis", + "namespace": "_types" + } + } + ], "kind": "instance_of", "type": { - "name": "IndexTemplateSummary", - "namespace": "indices._types" + "name": "DurationValue", + "namespace": "_types" } } }, { - "description": "Version number used to manage index templates externally.\nThis number is not automatically generated by Elasticsearch.", - "name": "version", + "name": "min_docs", "required": false, "type": { "kind": "instance_of", "type": { - "name": "VersionNumber", + "name": "long", "namespace": "_types" } } }, { - "description": "Priority to determine index template precedence when a new data stream or index is created.\nThe index template with the highest priority is chosen.\nIf no priority is specified the template is treated as though it is of priority 0 (lowest priority).\nThis number is not automatically generated by Elasticsearch.", - "name": "priority", + "name": "max_docs", "required": false, "type": { "kind": "instance_of", @@ -120070,204 +122177,185 @@ } }, { - "description": "Optional user metadata about the index template. May have any contents.\nThis map is not automatically generated by Elasticsearch.", - "docId": "mapping-meta-field", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-meta-field.html", - "name": "_meta", + "name": "max_size", "required": false, "type": { "kind": "instance_of", "type": { - "name": "Metadata", + "name": "ByteSize", "namespace": "_types" } } }, { - "name": "allow_auto_create", + "name": "max_size_bytes", "required": false, "type": { "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "long", + "namespace": "_types" } } }, { - "description": "If this object is included, the template is used to create data streams and their backing indices.\nSupports an empty object.\nData streams require a matching index template with a `data_stream` object.", - "name": "data_stream", + "name": "min_size", "required": false, "type": { "kind": "instance_of", "type": { - "name": "IndexTemplateDataStreamConfiguration", - "namespace": "indices._types" + "name": "ByteSize", + "namespace": "_types" } } }, { - "availability": { - "serverless": {}, - "stack": { - "since": "8.12.0" - } - }, - "description": "Marks this index template as deprecated.\nWhen creating or updating a non-deprecated index template that uses deprecated components,\nElasticsearch will emit a deprecation warning.", - "name": "deprecated", + "name": "min_size_bytes", "required": false, "type": { "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "long", + "namespace": "_types" } } }, { - "availability": { - "serverless": {}, - "stack": { - "since": "8.7.0" - } - }, - "description": "A list of component template names that are allowed to be absent.", - "name": "ignore_missing_component_templates", + "name": "max_primary_shard_size", "required": false, "type": { "kind": "instance_of", "type": { - "name": "Names", + "name": "ByteSize", "namespace": "_types" } } - } - ], - "specLocation": "indices/_types/IndexTemplate.ts#L28-L81" - }, - { - "kind": "interface", - "name": { - "name": "IndexTemplateSummary", - "namespace": "indices._types" - }, - "properties": [ + }, { - "description": "Aliases to add.\nIf the index template includes a `data_stream` object, these are data stream aliases.\nOtherwise, these are index aliases.\nData stream aliases ignore the `index_routing`, `routing`, and `search_routing` options.", - "name": "aliases", + "name": "max_primary_shard_size_bytes", "required": false, "type": { - "key": { - "kind": "instance_of", - "type": { - "name": "IndexName", - "namespace": "_types" - } - }, - "kind": "dictionary_of", - "singleKey": false, - "value": { - "kind": "instance_of", - "type": { - "name": "Alias", - "namespace": "indices._types" - } + "kind": "instance_of", + "type": { + "name": "long", + "namespace": "_types" } } }, { - "description": "Mapping for fields in the index.\nIf specified, this mapping can include field names, field data types, and mapping parameters.", - "name": "mappings", + "name": "min_primary_shard_size", "required": false, "type": { "kind": "instance_of", "type": { - "name": "TypeMapping", - "namespace": "_types.mapping" + "name": "ByteSize", + "namespace": "_types" } } }, { - "description": "Configuration options for the index.", - "name": "settings", + "name": "min_primary_shard_size_bytes", "required": false, "type": { "kind": "instance_of", "type": { - "name": "IndexSettings", - "namespace": "indices._types" + "name": "long", + "namespace": "_types" } } }, { - "availability": { - "serverless": { - "stability": "stable" - }, - "stack": { - "since": "8.11.0", - "stability": "stable" + "name": "max_primary_shard_docs", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "long", + "namespace": "_types" } - }, - "name": "lifecycle", + } + }, + { + "name": "min_primary_shard_docs", "required": false, "type": { "kind": "instance_of", "type": { - "name": "DataStreamLifecycleWithRollover", - "namespace": "indices._types" + "name": "long", + "namespace": "_types" } } } ], - "specLocation": "indices/_types/IndexTemplate.ts#L96-L118" + "specLocation": "indices/rollover/types.ts#L24-L40" }, { "kind": "interface", "name": { - "name": "IndexTemplateDataStreamConfiguration", - "namespace": "indices._types" + "name": "Overlapping", + "namespace": "indices.simulate_template" }, "properties": [ { - "description": "If true, the data stream is hidden.", - "name": "hidden", - "required": false, - "serverDefault": false, + "name": "name", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "Name", + "namespace": "_types" } } }, { - "description": "If true, the data stream supports custom routing.", - "name": "allow_custom_routing", - "required": false, - "serverDefault": false, + "name": "index_patterns", + "required": true, "type": { - "kind": "instance_of", - "type": { - "name": "boolean", - "namespace": "_builtins" + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } } } } ], - "specLocation": "indices/_types/IndexTemplate.ts#L83-L94" + "specLocation": "indices/simulate_template/IndicesSimulateTemplateResponse.ts#L39-L42" }, { "kind": "interface", "name": { - "name": "IndexMappingRecord", - "namespace": "indices.get_mapping" + "name": "Template", + "namespace": "indices.simulate_template" }, "properties": [ { - "name": "item", - "required": false, + "name": "aliases", + "required": true, + "type": { + "key": { + "kind": "instance_of", + "type": { + "name": "IndexName", + "namespace": "_types" + } + }, + "kind": "dictionary_of", + "singleKey": false, + "value": { + "kind": "instance_of", + "type": { + "name": "Alias", + "namespace": "indices._types" + } + } + } + }, + { + "name": "mappings", + "required": true, "type": { "kind": "instance_of", "type": { @@ -120277,52 +122365,64 @@ } }, { - "name": "mappings", + "name": "settings", "required": true, "type": { "kind": "instance_of", "type": { - "name": "TypeMapping", - "namespace": "_types.mapping" + "name": "IndexSettings", + "namespace": "indices._types" } } } ], - "specLocation": "indices/get_mapping/IndicesGetMappingResponse.ts#L29-L32" + "specLocation": "indices/simulate_template/IndicesSimulateTemplateResponse.ts#L33-L37" }, { "kind": "interface", "name": { "name": "Action", - "namespace": "indices.modify_data_stream" + "namespace": "indices.update_aliases" }, "properties": [ { - "description": "Adds an existing index as a backing index for a data stream.\nThe index is hidden as part of this operation.\nWARNING: Adding indices with the `add_backing_index` action can potentially result in improper data stream behavior.\nThis should be considered an expert level API.", - "name": "add_backing_index", + "description": "Adds a data stream or index to an alias.\nIf the alias doesnโ€™t exist, the `add` action creates it.", + "name": "add", "required": false, "type": { "kind": "instance_of", "type": { - "name": "IndexAndDataStreamAction", - "namespace": "indices.modify_data_stream" + "name": "AddAction", + "namespace": "indices.update_aliases" } } }, { - "description": "Removes a backing index from a data stream.\nThe index is unhidden as part of this operation.\nA data streamโ€™s write index cannot be removed.", - "name": "remove_backing_index", + "description": "Removes a data stream or index from an alias.", + "name": "remove", "required": false, "type": { "kind": "instance_of", "type": { - "name": "IndexAndDataStreamAction", - "namespace": "indices.modify_data_stream" + "name": "RemoveAction", + "namespace": "indices.update_aliases" + } + } + }, + { + "description": "Deletes an index.\nYou cannot use this action on aliases or data streams.", + "name": "remove_index", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "RemoveIndexAction", + "namespace": "indices.update_aliases" } } } ], - "specLocation": "indices/modify_data_stream/types.ts#L22-L37", + "specLocation": "indices/update_aliases/types.ts#L23-L39", "variants": { "kind": "container" } @@ -120330,123 +122430,113 @@ { "kind": "interface", "name": { - "name": "IndexAndDataStreamAction", - "namespace": "indices.modify_data_stream" + "name": "AddAction", + "namespace": "indices.update_aliases" }, "properties": [ { - "description": "Data stream targeted by the action.", - "name": "data_stream", - "required": true, + "description": "Alias for the action.\nIndex alias names support date math.", + "name": "alias", + "required": false, "type": { "kind": "instance_of", "type": { - "name": "DataStreamName", + "name": "IndexAlias", "namespace": "_types" } } }, { - "description": "Index for the action.", - "name": "index", - "required": true, + "description": "Aliases for the action.\nIndex alias names support date math.", + "name": "aliases", + "required": false, + "type": { + "items": [ + { + "kind": "instance_of", + "type": { + "name": "IndexAlias", + "namespace": "_types" + } + }, + { + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "IndexAlias", + "namespace": "_types" + } + } + } + ], + "kind": "union_of" + } + }, + { + "description": "Query used to limit documents the alias can access.", + "name": "filter", + "required": false, "type": { "kind": "instance_of", "type": { - "name": "IndexName", - "namespace": "_types" + "name": "QueryContainer", + "namespace": "_types.query_dsl" } } - } - ], - "specLocation": "indices/modify_data_stream/types.ts#L39-L44" - }, - { - "kind": "interface", - "name": { - "name": "IndexTemplateMapping", - "namespace": "indices.put_index_template" - }, - "properties": [ + }, { - "description": "Aliases to add.\nIf the index template includes a `data_stream` object, these are data stream aliases.\nOtherwise, these are index aliases.\nData stream aliases ignore the `index_routing`, `routing`, and `search_routing` options.", - "name": "aliases", + "description": "Data stream or index for the action.\nSupports wildcards (`*`).", + "name": "index", "required": false, "type": { - "key": { - "kind": "instance_of", - "type": { - "name": "IndexName", - "namespace": "_types" - } - }, - "kind": "dictionary_of", - "singleKey": false, - "value": { - "kind": "instance_of", - "type": { - "name": "Alias", - "namespace": "indices._types" - } + "kind": "instance_of", + "type": { + "name": "IndexName", + "namespace": "_types" } } }, { - "description": "Mapping for fields in the index.\nIf specified, this mapping can include field names, field data types, and mapping parameters.", - "name": "mappings", + "description": "Data streams or indices for the action.\nSupports wildcards (`*`).", + "name": "indices", "required": false, "type": { "kind": "instance_of", "type": { - "name": "TypeMapping", - "namespace": "_types.mapping" + "name": "Indices", + "namespace": "_types" } } }, { - "description": "Configuration options for the index.", - "name": "settings", + "description": "Value used to route indexing operations to a specific shard.\nIf specified, this overwrites the `routing` value for indexing operations.\nData stream aliases donโ€™t support this parameter.", + "name": "index_routing", "required": false, "type": { "kind": "instance_of", "type": { - "name": "IndexSettings", - "namespace": "indices._types" + "name": "Routing", + "namespace": "_types" } } }, { - "availability": { - "serverless": { - "stability": "stable" - }, - "stack": { - "since": "8.11.0", - "stability": "stable" - } - }, - "name": "lifecycle", + "description": "If `true`, the alias is hidden.", + "name": "is_hidden", "required": false, + "serverDefault": false, "type": { "kind": "instance_of", "type": { - "name": "DataStreamLifecycle", - "namespace": "indices._types" + "name": "boolean", + "namespace": "_builtins" } } - } - ], - "specLocation": "indices/put_index_template/IndicesPutIndexTemplateRequest.ts#L159-L181" - }, - { - "kind": "interface", - "name": { - "name": "DataStreamVisibility", - "namespace": "indices._types" - }, - "properties": [ + }, { - "name": "hidden", + "description": "If `true`, sets the write index or data stream for the alias.", + "name": "is_write_index", "required": false, "type": { "kind": "instance_of", @@ -120457,121 +122547,154 @@ } }, { - "name": "allow_custom_routing", + "description": "Value used to route indexing and search operations to a specific shard.\nData stream aliases donโ€™t support this parameter.", + "name": "routing", "required": false, "type": { "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "Routing", + "namespace": "_types" } } - } - ], - "specLocation": "indices/_types/DataStream.ts#L159-L162" - }, - { - "kind": "interface", - "name": { - "name": "ShardsOperationResponseBase", - "namespace": "_types" - }, - "properties": [ + }, { - "name": "_shards", + "description": "Value used to route search operations to a specific shard.\nIf specified, this overwrites the `routing` value for search operations.\nData stream aliases donโ€™t support this parameter.", + "name": "search_routing", "required": false, "type": { "kind": "instance_of", "type": { - "name": "ShardStatistics", + "name": "Routing", "namespace": "_types" } } + }, + { + "description": "If `true`, the alias must exist to perform the action.", + "name": "must_exist", + "required": false, + "serverDefault": false, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } } ], - "specLocation": "_types/Base.ts#L142-L145" + "specLocation": "indices/update_aliases/types.ts#L41-L95" }, { "kind": "interface", "name": { - "name": "ResolveIndexItem", - "namespace": "indices.resolve_index" + "name": "RemoveAction", + "namespace": "indices.update_aliases" }, "properties": [ { - "name": "name", - "required": true, + "description": "Alias for the action.\nIndex alias names support date math.", + "name": "alias", + "required": false, "type": { "kind": "instance_of", "type": { - "name": "Name", + "name": "IndexAlias", "namespace": "_types" } } }, { + "description": "Aliases for the action.\nIndex alias names support date math.", "name": "aliases", "required": false, "type": { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" + "items": [ + { + "kind": "instance_of", + "type": { + "name": "IndexAlias", + "namespace": "_types" + } + }, + { + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "IndexAlias", + "namespace": "_types" + } + } } - } + ], + "kind": "union_of" } }, { - "name": "attributes", - "required": true, + "description": "Data stream or index for the action.\nSupports wildcards (`*`).", + "name": "index", + "required": false, "type": { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } + "kind": "instance_of", + "type": { + "name": "IndexName", + "namespace": "_types" } } }, { - "name": "data_stream", + "description": "Data streams or indices for the action.\nSupports wildcards (`*`).", + "name": "indices", "required": false, "type": { "kind": "instance_of", "type": { - "name": "DataStreamName", + "name": "Indices", "namespace": "_types" } } + }, + { + "description": "If `true`, the alias must exist to perform the action.", + "name": "must_exist", + "required": false, + "serverDefault": false, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } } ], - "specLocation": "indices/resolve_index/ResolveIndexResponse.ts#L30-L35" + "specLocation": "indices/update_aliases/types.ts#L97-L122" }, { "kind": "interface", "name": { - "name": "ResolveIndexAliasItem", - "namespace": "indices.resolve_index" + "name": "RemoveIndexAction", + "namespace": "indices.update_aliases" }, "properties": [ { - "name": "name", - "required": true, + "description": "Data stream or index for the action.\nSupports wildcards (`*`).", + "name": "index", + "required": false, "type": { "kind": "instance_of", "type": { - "name": "Name", + "name": "IndexName", "namespace": "_types" } } }, { + "description": "Data streams or indices for the action.\nSupports wildcards (`*`).", "name": "indices", - "required": true, + "required": false, "type": { "kind": "instance_of", "type": { @@ -120579,677 +122702,755 @@ "namespace": "_types" } } + }, + { + "description": "If `true`, the alias must exist to perform the action.", + "name": "must_exist", + "required": false, + "serverDefault": false, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } } ], - "specLocation": "indices/resolve_index/ResolveIndexResponse.ts#L37-L40" + "specLocation": "indices/update_aliases/types.ts#L124-L139" }, { "kind": "interface", "name": { - "name": "ResolveIndexDataStreamsItem", - "namespace": "indices.resolve_index" + "name": "IndicesValidationExplanation", + "namespace": "indices.validate_query" }, "properties": [ { - "name": "name", - "required": true, + "name": "error", + "required": false, "type": { "kind": "instance_of", "type": { - "name": "DataStreamName", - "namespace": "_types" + "name": "string", + "namespace": "_builtins" } } }, { - "name": "timestamp_field", + "name": "explanation", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } + }, + { + "name": "index", "required": true, "type": { "kind": "instance_of", "type": { - "name": "Field", + "name": "IndexName", "namespace": "_types" } } }, { - "name": "backing_indices", + "name": "valid", "required": true, "type": { "kind": "instance_of", "type": { - "name": "Indices", - "namespace": "_types" + "name": "boolean", + "namespace": "_builtins" } } } ], - "specLocation": "indices/resolve_index/ResolveIndexResponse.ts#L42-L46" + "specLocation": "indices/validate_query/IndicesValidateQueryResponse.ts#L32-L37" }, { + "attachedBehaviors": [ + "CommonQueryParameters" + ], + "inherits": { + "type": { + "name": "RequestBase", + "namespace": "_types" + } + }, "kind": "interface", "name": { - "name": "RolloverConditions", - "namespace": "indices.rollover" + "name": "RequestChatCompletionBase", + "namespace": "inference._types" }, "properties": [ { - "name": "min_age", - "required": false, + "description": "A list of objects representing the conversation.", + "name": "messages", + "required": true, "type": { - "kind": "instance_of", - "type": { - "name": "Duration", - "namespace": "_types" + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "Message", + "namespace": "inference.chat_completion_unified" + } } } }, { - "name": "max_age", + "description": "The ID of the model to use.", + "name": "model", "required": false, "type": { "kind": "instance_of", "type": { - "name": "Duration", - "namespace": "_types" + "name": "string", + "namespace": "_builtins" } } }, { - "name": "max_age_millis", + "description": "The upper bound limit for the number of tokens that can be generated for a completion request.", + "name": "max_completion_tokens", "required": false, "type": { - "generics": [ - { - "kind": "instance_of", - "type": { - "name": "UnitMillis", - "namespace": "_types" - } - } - ], "kind": "instance_of", "type": { - "name": "DurationValue", + "name": "long", "namespace": "_types" } } }, { - "name": "min_docs", + "description": "A sequence of strings to control when the model should stop generating additional tokens.", + "name": "stop", "required": false, "type": { - "kind": "instance_of", - "type": { - "name": "long", - "namespace": "_types" + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } } } }, { - "name": "max_docs", + "description": "The sampling temperature to use.", + "name": "temperature", "required": false, "type": { "kind": "instance_of", "type": { - "name": "long", + "name": "float", "namespace": "_types" } } }, { - "name": "max_size", + "description": "Controls which tool is called by the model.", + "name": "tool_choice", "required": false, "type": { "kind": "instance_of", "type": { - "name": "ByteSize", - "namespace": "_types" + "name": "CompletionToolType", + "namespace": "inference.chat_completion_unified" } } }, { - "name": "max_size_bytes", + "description": "A list of tools that the model can call.", + "name": "tools", "required": false, "type": { - "kind": "instance_of", - "type": { - "name": "long", - "namespace": "_types" + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "CompletionTool", + "namespace": "inference.chat_completion_unified" + } } } }, { - "name": "min_size", + "description": "Nucleus sampling, an alternative to sampling with temperature.", + "name": "top_p", "required": false, "type": { "kind": "instance_of", "type": { - "name": "ByteSize", + "name": "float", "namespace": "_types" } } - }, + } + ], + "specLocation": "inference/_types/CommonTypes.ts#L28-L61" + }, + { + "description": "An object representing part of the conversation.", + "kind": "interface", + "name": { + "name": "Message", + "namespace": "inference.chat_completion_unified" + }, + "properties": [ { - "name": "min_size_bytes", + "description": "The content of the message.", + "name": "content", "required": false, "type": { "kind": "instance_of", "type": { - "name": "long", - "namespace": "_types" + "name": "MessageContent", + "namespace": "inference.chat_completion_unified" } } }, { - "name": "max_primary_shard_size", - "required": false, + "description": "The role of the message author.", + "name": "role", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "ByteSize", - "namespace": "_types" + "name": "string", + "namespace": "_builtins" } } }, { - "name": "max_primary_shard_size_bytes", + "description": "The tool call that this message is responding to.", + "name": "tool_call_id", "required": false, "type": { "kind": "instance_of", "type": { - "name": "long", + "name": "Id", "namespace": "_types" } } }, { - "name": "min_primary_shard_size", + "description": "The tool calls generated by the model.", + "name": "tool_calls", "required": false, "type": { - "kind": "instance_of", - "type": { - "name": "ByteSize", - "namespace": "_types" + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "ToolCall", + "namespace": "inference.chat_completion_unified" + } } } - }, + } + ], + "specLocation": "inference/chat_completion_unified/UnifiedRequest.ts#L110-L130" + }, + { + "description": "A tool call generated by the model.", + "kind": "interface", + "name": { + "name": "ToolCall", + "namespace": "inference.chat_completion_unified" + }, + "properties": [ { - "name": "min_primary_shard_size_bytes", - "required": false, + "description": "The identifier of the tool call.", + "name": "id", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "long", + "name": "Id", "namespace": "_types" } } }, { - "name": "max_primary_shard_docs", - "required": false, + "description": "The function that the model called.", + "name": "function", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "long", - "namespace": "_types" + "name": "ToolCallFunction", + "namespace": "inference.chat_completion_unified" } } }, { - "name": "min_primary_shard_docs", - "required": false, + "description": "The type of the tool call.", + "name": "type", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "long", - "namespace": "_types" + "name": "string", + "namespace": "_builtins" } } } ], - "specLocation": "indices/rollover/types.ts#L24-L40" + "specLocation": "inference/chat_completion_unified/UnifiedRequest.ts#L87-L103" }, { + "description": "The function that the model called.", "kind": "interface", "name": { - "name": "Overlapping", - "namespace": "indices.simulate_template" + "name": "ToolCallFunction", + "namespace": "inference.chat_completion_unified" }, "properties": [ { - "name": "name", + "description": "The arguments to call the function with in JSON format.", + "name": "arguments", "required": true, "type": { "kind": "instance_of", "type": { - "name": "Name", - "namespace": "_types" + "name": "string", + "namespace": "_builtins" } } }, { - "name": "index_patterns", + "description": "The name of the function to call.", + "name": "name", "required": true, "type": { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" } } } ], - "specLocation": "indices/simulate_template/IndicesSimulateTemplateResponse.ts#L39-L42" + "specLocation": "inference/chat_completion_unified/UnifiedRequest.ts#L73-L85" }, { + "description": "A list of tools that the model can call.", "kind": "interface", "name": { - "name": "Template", - "namespace": "indices.simulate_template" + "name": "CompletionTool", + "namespace": "inference.chat_completion_unified" }, "properties": [ { - "name": "aliases", - "required": true, - "type": { - "key": { - "kind": "instance_of", - "type": { - "name": "IndexName", - "namespace": "_types" - } - }, - "kind": "dictionary_of", - "singleKey": false, - "value": { - "kind": "instance_of", - "type": { - "name": "Alias", - "namespace": "indices._types" - } - } - } - }, - { - "name": "mappings", + "description": "The type of tool.", + "name": "type", "required": true, "type": { "kind": "instance_of", "type": { - "name": "TypeMapping", - "namespace": "_types.mapping" + "name": "string", + "namespace": "_builtins" } } }, { - "name": "settings", + "description": "The function definition.", + "name": "function", "required": true, "type": { "kind": "instance_of", "type": { - "name": "IndexSettings", - "namespace": "indices._types" + "name": "CompletionToolFunction", + "namespace": "inference.chat_completion_unified" } } } ], - "specLocation": "indices/simulate_template/IndicesSimulateTemplateResponse.ts#L33-L37" + "specLocation": "inference/chat_completion_unified/UnifiedRequest.ts#L180-L192" }, { + "description": "The completion tool function definition.", "kind": "interface", "name": { - "name": "Action", - "namespace": "indices.update_aliases" + "name": "CompletionToolFunction", + "namespace": "inference.chat_completion_unified" }, "properties": [ { - "description": "Adds a data stream or index to an alias.\nIf the alias doesnโ€™t exist, the `add` action creates it.", - "name": "add", + "description": "A description of what the function does.\nThis is used by the model to choose when and how to call the function.", + "name": "description", "required": false, "type": { "kind": "instance_of", "type": { - "name": "AddAction", - "namespace": "indices.update_aliases" + "name": "string", + "namespace": "_builtins" } } }, { - "description": "Removes a data stream or index from an alias.", - "name": "remove", - "required": false, + "description": "The name of the function.", + "name": "name", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "RemoveAction", - "namespace": "indices.update_aliases" + "name": "string", + "namespace": "_builtins" } } }, { - "description": "Deletes an index.\nYou cannot use this action on aliases or data streams.", - "name": "remove_index", + "description": "The parameters the functional accepts. This should be formatted as a JSON object.", + "name": "parameters", + "required": false, + "type": { + "kind": "user_defined_value" + } + }, + { + "description": "Whether to enable schema adherence when generating the function call.", + "name": "strict", "required": false, "type": { "kind": "instance_of", "type": { - "name": "RemoveIndexAction", - "namespace": "indices.update_aliases" + "name": "boolean", + "namespace": "_builtins" } } } ], - "specLocation": "indices/update_aliases/types.ts#L23-L39", - "variants": { - "kind": "container" - } + "specLocation": "inference/chat_completion_unified/UnifiedRequest.ts#L157-L178" }, { + "description": "Defines the completion result.", "kind": "interface", "name": { - "name": "AddAction", - "namespace": "indices.update_aliases" + "name": "CompletionInferenceResult", + "namespace": "inference._types" }, "properties": [ { - "description": "Alias for the action.\nIndex alias names support date math.", - "name": "alias", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "IndexAlias", - "namespace": "_types" - } - } - }, - { - "description": "Aliases for the action.\nIndex alias names support date math.", - "name": "aliases", - "required": false, + "name": "completion", + "required": true, "type": { - "items": [ - { - "kind": "instance_of", - "type": { - "name": "IndexAlias", - "namespace": "_types" - } - }, - { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "IndexAlias", - "namespace": "_types" - } - } + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "CompletionResult", + "namespace": "inference._types" } - ], - "kind": "union_of" - } - }, - { - "description": "Query used to limit documents the alias can access.", - "name": "filter", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "QueryContainer", - "namespace": "_types.query_dsl" } } - }, + } + ], + "specLocation": "inference/_types/Results.ts#L84-L89" + }, + { + "description": "The completion result object", + "kind": "interface", + "name": { + "name": "CompletionResult", + "namespace": "inference._types" + }, + "properties": [ { - "description": "Data stream or index for the action.\nSupports wildcards (`*`).", - "name": "index", - "required": false, + "name": "result", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "IndexName", - "namespace": "_types" + "name": "string", + "namespace": "_builtins" } } - }, + } + ], + "specLocation": "inference/_types/Results.ts#L77-L82" + }, + { + "description": "Acknowledged response. For dry_run, contains the list of pipelines which reference the inference endpoint", + "inherits": { + "type": { + "name": "AcknowledgedResponseBase", + "namespace": "_types" + } + }, + "kind": "interface", + "name": { + "name": "DeleteInferenceEndpointResult", + "namespace": "inference._types" + }, + "properties": [ { - "description": "Data streams or indices for the action.\nSupports wildcards (`*`).", - "name": "indices", - "required": false, + "name": "pipelines", + "required": true, "type": { - "kind": "instance_of", - "type": { - "name": "Indices", - "namespace": "_types" + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } } } - }, + } + ], + "specLocation": "inference/_types/Results.ts#L110-L115" + }, + { + "description": "Represents an inference endpoint as returned by the GET API", + "inherits": { + "type": { + "name": "InferenceEndpoint", + "namespace": "inference._types" + } + }, + "kind": "interface", + "name": { + "name": "InferenceEndpointInfo", + "namespace": "inference._types" + }, + "properties": [ { - "description": "Value used to route indexing operations to a specific shard.\nIf specified, this overwrites the `routing` value for indexing operations.\nData stream aliases donโ€™t support this parameter.", - "name": "index_routing", - "required": false, + "description": "The inference Id", + "name": "inference_id", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "Routing", - "namespace": "_types" + "name": "string", + "namespace": "_builtins" } } }, { - "description": "If `true`, the alias is hidden.", - "name": "is_hidden", - "required": false, - "serverDefault": false, + "description": "The task type", + "name": "task_type", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "TaskType", + "namespace": "inference._types" } } - }, + } + ], + "specLocation": "inference/_types/Services.ts#L46-L58" + }, + { + "description": "Configuration options when storing the inference endpoint", + "kind": "interface", + "name": { + "name": "InferenceEndpoint", + "namespace": "inference._types" + }, + "properties": [ { - "description": "If `true`, sets the write index or data stream for the alias.", - "name": "is_write_index", + "description": "Chunking configuration object", + "name": "chunking_settings", "required": false, "type": { "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "InferenceChunkingSettings", + "namespace": "inference._types" } } }, { - "description": "Value used to route indexing and search operations to a specific shard.\nData stream aliases donโ€™t support this parameter.", - "name": "routing", - "required": false, + "description": "The service type", + "name": "service", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "Routing", - "namespace": "_types" + "name": "string", + "namespace": "_builtins" } } }, { - "description": "Value used to route search operations to a specific shard.\nIf specified, this overwrites the `routing` value for search operations.\nData stream aliases donโ€™t support this parameter.", - "name": "search_routing", - "required": false, + "description": "Settings specific to the service", + "name": "service_settings", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "Routing", - "namespace": "_types" + "name": "ServiceSettings", + "namespace": "inference._types" } } }, { - "description": "If `true`, the alias must exist to perform the action.", - "name": "must_exist", + "description": "Task settings specific to the service and task type", + "name": "task_settings", "required": false, - "serverDefault": false, "type": { "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "TaskSettings", + "namespace": "inference._types" } } } ], - "specLocation": "indices/update_aliases/types.ts#L41-L95" + "specLocation": "inference/_types/Services.ts#L24-L44" }, { + "description": "Chunking configuration object", + "inherits": { + "type": { + "name": "InferenceEndpoint", + "namespace": "inference._types" + } + }, "kind": "interface", "name": { - "name": "RemoveAction", - "namespace": "indices.update_aliases" + "name": "InferenceChunkingSettings", + "namespace": "inference._types" }, "properties": [ { - "description": "Alias for the action.\nIndex alias names support date math.", - "name": "alias", + "description": "The maximum size of a chunk in words.\nThis value cannot be higher than `300` or lower than `20` (for `sentence` strategy) or `10` (for `word` strategy).", + "name": "max_chunk_size", "required": false, + "serverDefault": 250, "type": { "kind": "instance_of", "type": { - "name": "IndexAlias", + "name": "integer", "namespace": "_types" } } }, { - "description": "Aliases for the action.\nIndex alias names support date math.", - "name": "aliases", - "required": false, - "type": { - "items": [ - { - "kind": "instance_of", - "type": { - "name": "IndexAlias", - "namespace": "_types" - } - }, - { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "IndexAlias", - "namespace": "_types" - } - } - } - ], - "kind": "union_of" - } - }, - { - "description": "Data stream or index for the action.\nSupports wildcards (`*`).", - "name": "index", + "description": "The number of overlapping words for chunks.\nIt is applicable only to a `word` chunking strategy.\nThis value cannot be higher than half the `max_chunk_size` value.", + "name": "overlap", "required": false, + "serverDefault": 100, "type": { "kind": "instance_of", "type": { - "name": "IndexName", + "name": "integer", "namespace": "_types" } } }, { - "description": "Data streams or indices for the action.\nSupports wildcards (`*`).", - "name": "indices", + "description": "The number of overlapping sentences for chunks.\nIt is applicable only for a `sentence` chunking strategy.\nIt can be either `1` or `0`.", + "name": "sentence_overlap", "required": false, + "serverDefault": 1, "type": { "kind": "instance_of", "type": { - "name": "Indices", + "name": "integer", "namespace": "_types" } } }, { - "description": "If `true`, the alias must exist to perform the action.", - "name": "must_exist", + "description": "The chunking strategy: `sentence` or `word`.", + "name": "strategy", "required": false, - "serverDefault": false, + "serverDefault": "sentence", "type": { "kind": "instance_of", "type": { - "name": "boolean", + "name": "string", "namespace": "_builtins" } } } ], - "specLocation": "indices/update_aliases/types.ts#L97-L122" + "specLocation": "inference/_types/Services.ts#L60-L89" }, { "kind": "interface", "name": { - "name": "RemoveIndexAction", - "namespace": "indices.update_aliases" +<<<<<<< HEAD + "name": "AlibabaCloudServiceSettings", + "namespace": "inference.put_alibabacloud" }, "properties": [ { - "description": "Data stream or index for the action.\nSupports wildcards (`*`).", - "name": "index", - "required": false, + "description": "A valid API key for the AlibabaCloud AI Search API.", + "name": "api_key", +======= +<<<<<<< HEAD +======= + "name": "AmazonBedrockServiceSettings", + "namespace": "inference.put_amazonbedrock" + }, + "properties": [ + { + "description": "A valid AWS access key that has permissions to use Amazon Bedrock and access to models for inference requests.", + "name": "access_key", +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) + "required": true, "type": { "kind": "instance_of", "type": { - "name": "IndexName", - "namespace": "_types" + "name": "string", + "namespace": "_builtins" } } }, { - "description": "Data streams or indices for the action.\nSupports wildcards (`*`).", - "name": "indices", - "required": false, +<<<<<<< HEAD + "description": "The name of the host address used for the inference task.\nYou can find the host address in the API keys section of the documentation.", + "extDocId": "alibabacloud-api-keys", + "extDocUrl": "https://opensearch.console.aliyun.com/cn-shanghai/rag/api-key", + "name": "host", +======= + "description": "The base model ID or an ARN to a custom model based on a foundational model.\nThe base model IDs can be found in the Amazon Bedrock documentation.\nNote that the model ID must be available for the provider chosen and your IAM user must have access to the model.", + "extDocId": "amazonbedrock-models", + "extDocUrl": "https://docs.aws.amazon.com/bedrock/latest/userguide/models-supported.html", + "name": "model", +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) + "required": true, "type": { "kind": "instance_of", "type": { - "name": "Indices", - "namespace": "_types" + "name": "string", + "namespace": "_builtins" } } }, { - "description": "If `true`, the alias must exist to perform the action.", - "name": "must_exist", +<<<<<<< HEAD + "description": "This setting helps to minimize the number of rate limit errors returned from AlibabaCloud AI Search.\nBy default, the `alibabacloud-ai-search` service sets the number of requests allowed per minute to `1000`.", +======= + "description": "The model provider for your deployment.\nNote that some providers may support only certain task types.\nSupported providers include:\n\n* `amazontitan` - available for `text_embedding` and `completion` task types\n* `anthropic` - available for `completion` task type only\n* `ai21labs` - available for `completion` task type only\n* `cohere` - available for `text_embedding` and `completion` task types\n* `meta` - available for `completion` task type only\n* `mistral` - available for `completion` task type only", + "name": "provider", "required": false, - "serverDefault": false, "type": { "kind": "instance_of", "type": { - "name": "boolean", + "name": "string", "namespace": "_builtins" } } - } - ], - "specLocation": "indices/update_aliases/types.ts#L124-L139" - }, - { - "kind": "interface", - "name": { - "name": "IndicesValidationExplanation", - "namespace": "indices.validate_query" - }, - "properties": [ + }, { - "name": "error", - "required": false, + "description": "The region that your model or ARN is deployed in.\nThe list of available regions per model can be found in the Amazon Bedrock documentation.", + "extDocId": "amazonbedrock-models", + "extDocUrl": "https://docs.aws.amazon.com/bedrock/latest/userguide/models-supported.html", + "name": "region", + "required": true, "type": { "kind": "instance_of", "type": { @@ -121259,188 +123460,139 @@ } }, { - "name": "explanation", + "description": "This setting helps to minimize the number of rate limit errors returned from Watsonx.\nBy default, the `watsonxai` service sets the number of requests allowed per minute to 120.", +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) + "name": "rate_limit", "required": false, "type": { "kind": "instance_of", "type": { - "name": "string", - "namespace": "_builtins" + "name": "RateLimitSetting", + "namespace": "inference._types" } } }, { - "name": "index", +<<<<<<< HEAD + "description": "The name of the model service to use for the inference task.\nThe following service IDs are available for the `completion` task:\n\n* `ops-qwen-turbo`\n* `qwen-turbo`\n* `qwen-plus`\n* `qwen-max รท qwen-max-longcontext`\n\nThe following service ID is available for the `rerank` task:\n\n* `ops-bge-reranker-larger`\n\nThe following service ID is available for the `sparse_embedding` task:\n\n* `ops-text-sparse-embedding-001`\n\nThe following service IDs are available for the `text_embedding` task:\n\n`ops-text-embedding-001`\n`ops-text-embedding-zh-001`\n`ops-text-embedding-en-001`\n`ops-text-embedding-002`", + "name": "service_id", "required": true, "type": { "kind": "instance_of", "type": { - "name": "IndexName", - "namespace": "_types" + "name": "string", + "namespace": "_builtins" } } }, { - "name": "valid", + "description": "The name of the workspace used for the inference task.", + "name": "workspace", +======= + "description": "A valid AWS secret key that is paired with the `access_key`.\nFor informationg about creating and managing access and secret keys, refer to the AWS documentation.", + "extDocId": "amazonbedrock-secret-keys", + "extDocUrl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html", + "name": "secret_key", +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) "required": true, "type": { "kind": "instance_of", "type": { - "name": "boolean", + "name": "string", "namespace": "_builtins" } } } ], - "specLocation": "indices/validate_query/IndicesValidateQueryResponse.ts#L32-L37" +<<<<<<< HEAD + "specLocation": "inference/put_alibabacloud/PutAlibabaCloudRequest.ts#L93-L138" +======= + "specLocation": "inference/put_amazonbedrock/PutAmazonBedrockRequest.ts#L95-L137" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) }, { - "attachedBehaviors": [ - "CommonQueryParameters" - ], - "inherits": { - "type": { - "name": "RequestBase", - "namespace": "_types" - } - }, "kind": "interface", "name": { - "name": "RequestChatCompletionBase", + "name": "RateLimitSetting", "namespace": "inference._types" }, "properties": [ { - "description": "A list of objects representing the conversation.", - "name": "messages", - "required": true, - "type": { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "Message", - "namespace": "inference.chat_completion_unified" - } - } - } - }, - { - "description": "The ID of the model to use.", - "name": "model", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - } - }, - { - "description": "The upper bound limit for the number of tokens that can be generated for a completion request.", - "name": "max_completion_tokens", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "long", - "namespace": "_types" - } - } - }, - { - "description": "A sequence of strings to control when the model should stop generating additional tokens.", - "name": "stop", - "required": false, - "type": { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - } - } - }, - { - "description": "The sampling temperature to use.", - "name": "temperature", + "description": "The number of requests allowed per minute.", + "name": "requests_per_minute", "required": false, "type": { "kind": "instance_of", "type": { - "name": "float", + "name": "integer", "namespace": "_types" } } - }, + } + ], + "specLocation": "inference/_types/Services.ts#L95-L100" + }, + { + "kind": "interface", + "name": { +<<<<<<< HEAD + "name": "AlibabaCloudTaskSettings", + "namespace": "inference.put_alibabacloud" + }, + "properties": [ { - "description": "Controls which tool is called by the model.", - "name": "tool_choice", + "description": "For a `sparse_embedding` or `text_embedding` task, specify the type of input passed to the model.\nValid values are:\n\n* `ingest` for storing document embeddings in a vector database.\n* `search` for storing embeddings of search queries run against a vector database to find relevant documents.", + "name": "input_type", "required": false, "type": { "kind": "instance_of", "type": { - "name": "CompletionToolType", - "namespace": "inference.chat_completion_unified" - } - } - }, - { - "description": "A list of tools that the model can call.", - "name": "tools", - "required": false, - "type": { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "CompletionTool", - "namespace": "inference.chat_completion_unified" - } + "name": "string", + "namespace": "_builtins" } } }, { - "description": "Nucleus sampling, an alternative to sampling with temperature.", - "name": "top_p", + "description": "For a `sparse_embedding` task, it affects whether the token name will be returned in the response.\nIt defaults to `false`, which means only the token ID will be returned in the response.", + "name": "return_token", "required": false, "type": { "kind": "instance_of", "type": { - "name": "float", - "namespace": "_types" + "name": "boolean", + "namespace": "_builtins" } } } ], - "specLocation": "inference/_types/CommonTypes.ts#L28-L61" + "specLocation": "inference/put_alibabacloud/PutAlibabaCloudRequest.ts#L140-L154" }, { - "description": "An object representing part of the conversation.", "kind": "interface", "name": { - "name": "Message", - "namespace": "inference.chat_completion_unified" + "name": "AzureAiStudioServiceSettings", + "namespace": "inference.put_azureaistudio" }, "properties": [ { - "description": "The content of the message.", - "name": "content", - "required": false, + "description": "A valid API key of your Azure AI Studio model deployment.\nThis key can be found on the overview page for your deployment in the management section of your Azure AI Studio account.\n\nIMPORTANT: You need to provide the API key only once, during the inference model creation.\nThe get inference endpoint API does not retrieve your API key.\nAfter creating the inference model, you cannot change the associated API key.\nIf you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key.", + "extDocId": "azureaistudio-api-keys", + "extDocUrl": "https://ai.azure.com/", + "name": "api_key", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "MessageContent", - "namespace": "inference.chat_completion_unified" + "name": "string", + "namespace": "_builtins" } } }, { - "description": "The role of the message author.", - "name": "role", + "description": "The type of endpoint that is available for deployment through Azure AI Studio: `token` or `realtime`.\nThe `token` endpoint type is for \"pay as you go\" endpoints that are billed per token.\nThe `realtime` endpoint type is for \"real-time\" endpoints that are billed per hour of usage.", + "extDocId": "azureaistudio-endpoint-types", + "extDocUrl": "https://learn.microsoft.com/en-us/azure/ai-foundry/concepts/deployments-overview#billing-for-deploying-and-inferencing-llms-in-azure-ai-studio", + "name": "endpoint_type", "required": true, "type": { "kind": "instance_of", @@ -121451,129 +123603,160 @@ } }, { - "description": "The tool call that this message is responding to.", - "name": "tool_call_id", - "required": false, + "description": "The target URL of your Azure AI Studio model deployment.\nThis can be found on the overview page for your deployment in the management section of your Azure AI Studio account.", + "name": "target", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "Id", - "namespace": "_types" + "name": "string", + "namespace": "_builtins" } } }, { - "description": "The tool calls generated by the model.", - "name": "tool_calls", + "description": "The model provider for your deployment.\nNote that some providers may support only certain task types.\nSupported providers include:\n\n* `cohere` - available for `text_embedding` and `completion` task types\n* `databricks` - available for `completion` task type only\n* `meta` - available for `completion` task type only\n* `microsoft_phi` - available for `completion` task type only\n* `mistral` - available for `completion` task type only\n* `openai` - available for `text_embedding` and `completion` task types", + "name": "provider", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } + }, + { + "description": "This setting helps to minimize the number of rate limit errors returned from Azure AI Studio.\nBy default, the `azureaistudio` service sets the number of requests allowed per minute to 240.", + "name": "rate_limit", "required": false, "type": { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "ToolCall", - "namespace": "inference.chat_completion_unified" - } + "kind": "instance_of", + "type": { + "name": "RateLimitSetting", + "namespace": "inference._types" } } } ], - "specLocation": "inference/chat_completion_unified/UnifiedRequest.ts#L110-L130" + "specLocation": "inference/put_azureaistudio/PutAzureAiStudioRequest.ts#L92-L134" }, { - "description": "A tool call generated by the model.", "kind": "interface", "name": { - "name": "ToolCall", - "namespace": "inference.chat_completion_unified" + "name": "AzureAiStudioTaskSettings", + "namespace": "inference.put_azureaistudio" }, "properties": [ { - "description": "The identifier of the tool call.", - "name": "id", - "required": true, + "description": "For a `completion` task, instruct the inference process to perform sampling.\nIt has no effect unless `temperature` or `top_p` is specified.", + "name": "do_sample", + "required": false, "type": { "kind": "instance_of", "type": { - "name": "Id", + "name": "float", "namespace": "_types" } } }, { - "description": "The function that the model called.", - "name": "function", - "required": true, + "description": "For a `completion` task, provide a hint for the maximum number of output tokens to be generated.", +======= + "name": "AmazonBedrockTaskSettings", + "namespace": "inference.put_amazonbedrock" + }, + "properties": [ + { + "description": "For a `completion` task, it sets the maximum number for the output tokens to be generated.", +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) + "name": "max_new_tokens", + "required": false, + "serverDefault": 64, "type": { "kind": "instance_of", "type": { - "name": "ToolCallFunction", - "namespace": "inference.chat_completion_unified" + "name": "integer", + "namespace": "_types" } } }, { - "description": "The type of the tool call.", - "name": "type", - "required": true, +<<<<<<< HEAD + "description": "For a `completion` task, control the apparent creativity of generated completions with a sampling temperature.\nIt must be a number in the range of 0.0 to 2.0.\nIt should not be used if `top_p` is specified.", +======= + "description": "For a `completion` task, it is a number between 0.0 and 1.0 that controls the apparent creativity of the results.\nAt temperature 0.0 the model is most deterministic, at temperature 1.0 most random.\nIt should not be used if `top_p` or `top_k` is specified.", +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) + "name": "temperature", + "required": false, "type": { "kind": "instance_of", "type": { - "name": "string", - "namespace": "_builtins" + "name": "float", + "namespace": "_types" } } - } - ], - "specLocation": "inference/chat_completion_unified/UnifiedRequest.ts#L87-L103" - }, - { - "description": "The function that the model called.", - "kind": "interface", - "name": { - "name": "ToolCallFunction", - "namespace": "inference.chat_completion_unified" - }, - "properties": [ + }, { - "description": "The arguments to call the function with in JSON format.", - "name": "arguments", - "required": true, +<<<<<<< HEAD + "description": "For a `completion` task, make the model consider the results of the tokens with nucleus sampling probability.\nIt is an alternative value to `temperature` and must be a number in the range of 0.0 to 2.0.\nIt should not be used if `temperature` is specified.", + "name": "top_p", +======= + "description": "For a `completion` task, it limits samples to the top-K most likely words, balancing coherence and variability.\nIt is only available for anthropic, cohere, and mistral providers.\nIt is an alternative to `temperature`; it should not be used if `temperature` is specified.", + "name": "top_k", +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) + "required": false, "type": { "kind": "instance_of", "type": { - "name": "string", - "namespace": "_builtins" + "name": "float", + "namespace": "_types" } } }, { - "description": "The name of the function to call.", - "name": "name", - "required": true, +<<<<<<< HEAD + "description": "For a `text_embedding` task, specify the user issuing the request.\nThis information can be used for abuse detection.", + "name": "user", +======= + "description": "For a `completion` task, it is a number in the range of 0.0 to 1.0, to eliminate low-probability tokens.\nTop-p uses nucleus sampling to select top tokens whose sum of likelihoods does not exceed a certain value, ensuring both variety and coherence.\nIt is an alternative to `temperature`; it should not be used if `temperature` is specified.", + "name": "top_p", +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) + "required": false, "type": { "kind": "instance_of", "type": { +<<<<<<< HEAD "name": "string", "namespace": "_builtins" +======= + "name": "float", + "namespace": "_types" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) } } } ], - "specLocation": "inference/chat_completion_unified/UnifiedRequest.ts#L73-L85" +<<<<<<< HEAD + "specLocation": "inference/put_azureaistudio/PutAzureAiStudioRequest.ts#L136-L164" +======= + "specLocation": "inference/put_amazonbedrock/PutAmazonBedrockRequest.ts#L139-L163" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) }, { - "description": "A list of tools that the model can call.", "kind": "interface", "name": { - "name": "CompletionTool", - "namespace": "inference.chat_completion_unified" +<<<<<<< HEAD + "name": "AzureOpenAIServiceSettings", + "namespace": "inference.put_azureopenai" }, "properties": [ { - "description": "The type of tool.", - "name": "type", - "required": true, + "description": "A valid API key for your Azure OpenAI account.\nYou must specify either `api_key` or `entra_id`.\nIf you do not provide either or you provide both, you will receive an error when you try to create your model.\n\nIMPORTANT: You need to provide the API key only once, during the inference model creation.\nThe get inference endpoint API does not retrieve your API key.\nAfter creating the inference model, you cannot change the associated API key.\nIf you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key.", + "extDocId": "azureopenai-auth", + "extDocUrl": "https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#authentication", + "name": "api_key", + "required": false, "type": { "kind": "instance_of", "type": { @@ -121583,32 +123766,18 @@ } }, { - "description": "The function definition.", - "name": "function", - "required": true, - "type": { - "kind": "instance_of", - "type": { - "name": "CompletionToolFunction", - "namespace": "inference.chat_completion_unified" - } - } - } - ], - "specLocation": "inference/chat_completion_unified/UnifiedRequest.ts#L180-L192" - }, - { - "description": "The completion tool function definition.", - "kind": "interface", - "name": { - "name": "CompletionToolFunction", - "namespace": "inference.chat_completion_unified" + "description": "The Azure API version ID to use.\nIt is recommended to use the latest supported non-preview version.", + "name": "api_version", +======= + "name": "AnthropicServiceSettings", + "namespace": "inference.put_anthropic" }, "properties": [ { - "description": "A description of what the function does.\nThis is used by the model to choose when and how to call the function.", - "name": "description", - "required": false, + "description": "A valid API key for the Anthropic API.", + "name": "api_key", +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) + "required": true, "type": { "kind": "instance_of", "type": { @@ -121618,8 +123787,16 @@ } }, { - "description": "The name of the function.", - "name": "name", +<<<<<<< HEAD + "description": "The deployment name of your deployed models.\nYour Azure OpenAI deployments can be found though the Azure OpenAI Studio portal that is linked to your subscription.", + "extDocId": "azureopenai", + "extDocUrl": "https://oai.azure.com/", + "name": "deployment_id", +======= + "description": "The name of the model to use for the inference task.\nRefer to the Anthropic documentation for the list of supported models.", + "extDocId": "anothropic-models", + "name": "model_id", +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) "required": true, "type": { "kind": "instance_of", @@ -121630,123 +123807,98 @@ } }, { - "description": "The parameters the functional accepts. This should be formatted as a JSON object.", - "name": "parameters", - "required": false, - "type": { - "kind": "user_defined_value" - } - }, - { - "description": "Whether to enable schema adherence when generating the function call.", - "name": "strict", +<<<<<<< HEAD + "description": "A valid Microsoft Entra token.\nYou must specify either `api_key` or `entra_id`.\nIf you do not provide either or you provide both, you will receive an error when you try to create your model.", + "extDocId": "azureopenai-auth", + "extDocUrl": "https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#authentication", + "name": "entra_id", +======= + "description": "This setting helps to minimize the number of rate limit errors returned from Anthropic.\nBy default, the `anthropic` service sets the number of requests allowed per minute to 50.", + "name": "rate_limit", "required": false, "type": { "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "RateLimitSetting", + "namespace": "inference._types" } } } ], - "specLocation": "inference/chat_completion_unified/UnifiedRequest.ts#L157-L178" + "specLocation": "inference/put_anthropic/PutAnthropicRequest.ts#L92-L108" }, { - "description": "Defines the completion result.", "kind": "interface", "name": { - "name": "CompletionInferenceResult", - "namespace": "inference._types" + "name": "AnthropicTaskSettings", + "namespace": "inference.put_anthropic" }, "properties": [ { - "name": "completion", + "description": "For a `completion` task, it is the maximum number of tokens to generate before stopping.", + "name": "max_tokens", "required": true, "type": { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "CompletionResult", - "namespace": "inference._types" - } + "kind": "instance_of", + "type": { + "name": "integer", + "namespace": "_types" } } - } - ], - "specLocation": "inference/_types/Results.ts#L84-L89" - }, - { - "description": "The completion result object", - "kind": "interface", - "name": { - "name": "CompletionResult", - "namespace": "inference._types" - }, - "properties": [ + }, { - "name": "result", - "required": true, + "description": "For a `completion` task, it is the amount of randomness injected into the response.\nFor more details about the supported range, refer to Anthropic documentation.", + "extDocId": "anthropic-messages", + "extDocUrl": "https://docs.anthropic.com/en/api/messages", + "name": "temperature", + "required": false, "type": { "kind": "instance_of", "type": { - "name": "string", - "namespace": "_builtins" + "name": "float", + "namespace": "_types" } } - } - ], - "specLocation": "inference/_types/Results.ts#L77-L82" - }, - { - "description": "Acknowledged response. For dry_run, contains the list of pipelines which reference the inference endpoint", - "inherits": { - "type": { - "name": "AcknowledgedResponseBase", - "namespace": "_types" - } - }, - "kind": "interface", - "name": { - "name": "DeleteInferenceEndpointResult", - "namespace": "inference._types" - }, - "properties": [ + }, { - "name": "pipelines", - "required": true, + "description": "For a `completion` task, it specifies to only sample from the top K options for each subsequent token.\nIt is recommended for advanced use cases only.\nYou usually only need to use `temperature`.", + "name": "top_k", + "required": false, "type": { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } + "kind": "instance_of", + "type": { + "name": "integer", + "namespace": "_types" + } + } + }, + { + "description": "For a `completion` task, it specifies to use Anthropic's nucleus sampling.\nIn nucleus sampling, Anthropic computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches the specified probability.\nYou should either alter `temperature` or `top_p`, but not both.\nIt is recommended for advanced use cases only.\nYou usually only need to use `temperature`.", + "name": "top_p", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "float", + "namespace": "_types" } } } ], - "specLocation": "inference/_types/Results.ts#L110-L115" + "specLocation": "inference/put_anthropic/PutAnthropicRequest.ts#L110-L135" }, { - "description": "Represents an inference endpoint as returned by the GET API", - "inherits": { - "type": { - "name": "InferenceEndpoint", - "namespace": "inference._types" - } - }, "kind": "interface", "name": { - "name": "InferenceEndpointInfo", - "namespace": "inference._types" + "name": "CohereServiceSettings", + "namespace": "inference.put_cohere" }, "properties": [ { - "description": "The inference Id", - "name": "inference_id", + "description": "A valid API key for your Cohere account.\nYou can find or create your Cohere API keys on the Cohere API key settings page.\n\nIMPORTANT: You need to provide the API key only once, during the inference model creation.\nThe get inference endpoint API does not retrieve your API key.\nAfter creating the inference model, you cannot change the associated API key.\nIf you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key.", + "extDocId": "cohere-api-keys", + "extDocUrl": "https://dashboard.cohere.com/api-keys", + "name": "api_key", "required": true, "type": { "kind": "instance_of", @@ -121757,44 +123909,23 @@ } }, { - "description": "The task type", - "name": "task_type", - "required": true, - "type": { - "kind": "instance_of", - "type": { - "name": "TaskType", - "namespace": "inference._types" - } - } - } - ], - "specLocation": "inference/_types/Services.ts#L46-L58" - }, - { - "description": "Configuration options when storing the inference endpoint", - "kind": "interface", - "name": { - "name": "InferenceEndpoint", - "namespace": "inference._types" - }, - "properties": [ - { - "description": "Chunking configuration object", - "name": "chunking_settings", + "description": "For a `text_embedding` task, the types of embeddings you want to get back.\nUse `byte` for signed int8 embeddings (this is a synonym of `int8`).\nUse `float` for the default float embeddings.\nUse `int8` for signed int8 embeddings.", + "name": "embedding_type", "required": false, + "serverDefault": "float", "type": { "kind": "instance_of", "type": { - "name": "InferenceChunkingSettings", - "namespace": "inference._types" + "name": "EmbeddingType", + "namespace": "inference.put_cohere" } } }, { - "description": "The service type", - "name": "service", - "required": true, + "description": "For a `completion`, `rerank`, or `text_embedding` task, the name of the model to use for the inference task.\n\n* For the available `completion` models, refer to the [Cohere command docs](https://docs.cohere.com/docs/models#command).\n* For the available `rerank` models, refer to the [Cohere rerank docs](https://docs.cohere.com/reference/rerank-1).\n* For the available `text_embedding` models, refer to [Cohere embed docs](https://docs.cohere.com/reference/embed).\n\nThe default value for a text embedding task is `embed-english-v2.0`.", + "name": "model_id", +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) + "required": false, "type": { "kind": "instance_of", "type": { @@ -121804,77 +123935,79 @@ } }, { - "description": "Settings specific to the service", - "name": "service_settings", - "required": true, +<<<<<<< HEAD + "description": "This setting helps to minimize the number of rate limit errors returned from Azure.\nThe `azureopenai` service sets a default number of requests allowed per minute depending on the task type.\nFor `text_embedding`, it is set to `1440`.\nFor `completion`, it is set to `120`.", + "extDocId": "azureopenai-quota-limits", + "extDocUrl": "https://learn.microsoft.com/en-us/azure/ai-services/openai/quotas-limits", +======= + "description": "This setting helps to minimize the number of rate limit errors returned from Cohere.\nBy default, the `cohere` service sets the number of requests allowed per minute to 10000.", +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) + "name": "rate_limit", + "required": false, "type": { "kind": "instance_of", "type": { - "name": "ServiceSettings", + "name": "RateLimitSetting", "namespace": "inference._types" } } }, { - "description": "Task settings specific to the service and task type", - "name": "task_settings", +<<<<<<< HEAD + "description": "The name of your Azure OpenAI resource.\nYou can find this from the list of resources in the Azure Portal for your subscription.", + "extDocId": "azureopenai-portal", + "extDocUrl": "https://portal.azure.com/#view/HubsExtension/BrowseAll", + "name": "resource_name", +======= + "description": "The similarity measure.\nIf the `embedding_type` is `float`, the default value is `dot_product`.\nIf the `embedding_type` is `int8` or `byte`, the default value is `cosine`.", + "name": "similarity", "required": false, "type": { "kind": "instance_of", "type": { - "name": "TaskSettings", - "namespace": "inference._types" + "name": "SimilarityType", + "namespace": "inference.put_cohere" } } } ], - "specLocation": "inference/_types/Services.ts#L24-L44" + "specLocation": "inference/put_cohere/PutCohereRequest.ts#L119-L160" }, { - "description": "Chunking configuration object", - "inherits": { - "type": { - "name": "InferenceEndpoint", - "namespace": "inference._types" - } - }, "kind": "interface", "name": { - "name": "InferenceChunkingSettings", - "namespace": "inference._types" + "name": "CohereTaskSettings", + "namespace": "inference.put_cohere" }, "properties": [ { - "description": "The maximum size of a chunk in words.\nThis value cannot be higher than `300` or lower than `20` (for `sentence` strategy) or `10` (for `word` strategy).", - "name": "max_chunk_size", + "description": "For a `text_embedding` task, the type of input passed to the model.\nValid values are:\n\n* `classification`: Use it for embeddings passed through a text classifier.\n* `clustering`: Use it for the embeddings run through a clustering algorithm.\n* `ingest`: Use it for storing document embeddings in a vector database.\n* `search`: Use it for storing embeddings of search queries run against a vector database to find relevant documents.\n\nIMPORTANT: The `input_type` field is required when using embedding models `v3` and higher.", + "name": "input_type", "required": false, - "serverDefault": 250, "type": { "kind": "instance_of", "type": { - "name": "integer", - "namespace": "_types" + "name": "InputType", + "namespace": "inference.put_cohere" } } }, { - "description": "The number of overlapping words for chunks.\nIt is applicable only to a `word` chunking strategy.\nThis value cannot be higher than half the `max_chunk_size` value.", - "name": "overlap", + "description": "For a `rerank` task, return doc text within the results.", + "name": "return_documents", "required": false, - "serverDefault": 100, "type": { "kind": "instance_of", "type": { - "name": "integer", - "namespace": "_types" + "name": "boolean", + "namespace": "_builtins" } } }, { - "description": "The number of overlapping sentences for chunks.\nIt is applicable only for a `sentence` chunking strategy.\nIt can be either `1` or `0`.", - "name": "sentence_overlap", + "description": "For a `rerank` task, the number of most relevant documents to return.\nIt defaults to the number of the documents.\nIf this inference endpoint is used in a `text_similarity_reranker` retriever query and `top_n` is set, it must be greater than or equal to `rank_window_size` in the query.", + "name": "top_n", "required": false, - "serverDefault": 1, "type": { "kind": "instance_of", "type": { @@ -121884,31 +124017,30 @@ } }, { - "description": "The chunking strategy: `sentence` or `word`.", - "name": "strategy", + "description": "For a `text_embedding` task, the method to handle inputs longer than the maximum token length.\nValid values are:\n\n* `END`: When the input exceeds the maximum input token length, the end of the input is discarded.\n* `NONE`: When the input exceeds the maximum input token length, an error is returned.\n* `START`: When the input exceeds the maximum input token length, the start of the input is discarded.", + "name": "truncate", "required": false, - "serverDefault": "sentence", "type": { "kind": "instance_of", "type": { - "name": "string", - "namespace": "_builtins" + "name": "TruncateType", + "namespace": "inference.put_cohere" } } } ], - "specLocation": "inference/_types/Services.ts#L60-L89" + "specLocation": "inference/put_cohere/PutCohereRequest.ts#L162-L194" }, { "kind": "interface", "name": { - "name": "AlibabaCloudServiceSettings", - "namespace": "inference.put_alibabacloud" + "name": "EisServiceSettings", + "namespace": "inference.put_eis" }, "properties": [ { - "description": "A valid API key for the AlibabaCloud AI Search API.", - "name": "api_key", + "description": "The name of the model to use for the inference task.", + "name": "model_id", "required": true, "type": { "kind": "instance_of", @@ -121919,35 +124051,43 @@ } }, { - "description": "The name of the host address used for the inference task.\nYou can find the host address in the API keys section of the documentation.", - "extDocId": "alibabacloud-api-keys", - "extDocUrl": "https://opensearch.console.aliyun.com/cn-shanghai/rag/api-key", - "name": "host", - "required": true, + "description": "This setting helps to minimize the number of rate limit errors returned.\nBy default, the `elastic` service sets the number of requests allowed per minute to `240` in case of `chat_completion`.", + "name": "rate_limit", + "required": false, "type": { "kind": "instance_of", "type": { - "name": "string", - "namespace": "_builtins" + "name": "RateLimitSetting", + "namespace": "inference._types" } } - }, + } + ], + "specLocation": "inference/put_eis/PutEisRequest.ts#L72-L82" + }, + { + "kind": "interface", + "name": { + "name": "ElasticsearchServiceSettings", + "namespace": "inference.put_elasticsearch" + }, + "properties": [ { - "description": "This setting helps to minimize the number of rate limit errors returned from AlibabaCloud AI Search.\nBy default, the `alibabacloud-ai-search` service sets the number of requests allowed per minute to `1000`.", - "name": "rate_limit", + "description": "Adaptive allocations configuration details.\nIf `enabled` is true, the number of allocations of the model is set based on the current load the process gets.\nWhen the load is high, a new model allocation is automatically created, respecting the value of `max_number_of_allocations` if it's set.\nWhen the load is low, a model allocation is automatically removed, respecting the value of `min_number_of_allocations` if it's set.\nIf `enabled` is true, do not set the number of allocations manually.", + "name": "adaptive_allocations", "required": false, "type": { "kind": "instance_of", "type": { - "name": "RateLimitSetting", - "namespace": "inference._types" + "name": "AdaptiveAllocations", + "namespace": "inference.put_elasticsearch" } } }, { - "description": "The name of the model service to use for the inference task.\nThe following service IDs are available for the `completion` task:\n\n* `ops-qwen-turbo`\n* `qwen-turbo`\n* `qwen-plus`\n* `qwen-max รท qwen-max-longcontext`\n\nThe following service ID is available for the `rerank` task:\n\n* `ops-bge-reranker-larger`\n\nThe following service ID is available for the `sparse_embedding` task:\n\n* `ops-text-sparse-embedding-001`\n\nThe following service IDs are available for the `text_embedding` task:\n\n`ops-text-embedding-001`\n`ops-text-embedding-zh-001`\n`ops-text-embedding-en-001`\n`ops-text-embedding-002`", - "name": "service_id", - "required": true, + "description": "The deployment identifier for a trained model deployment.\nWhen `deployment_id` is used the `model_id` is optional.", + "name": "deployment_id", + "required": false, "type": { "kind": "instance_of", "type": { @@ -121957,8 +124097,10 @@ } }, { - "description": "The name of the workspace used for the inference task.", - "name": "workspace", + "description": "The name of the model to use for the inference task.\nIt can be the ID of a built-in model (for example, `.multilingual-e5-small` for E5) or a text embedding model that was uploaded by using the Eland client.", + "extDocId": "eland-import", + "extDocUrl": "https://www.elastic.co/guide/en/machine-learning/current/ml-nlp-import-model.html#ml-nlp-import-script", + "name": "model_id", "required": true, "type": { "kind": "instance_of", @@ -121967,20 +124109,10 @@ "namespace": "_builtins" } } - } - ], - "specLocation": "inference/put_alibabacloud/PutAlibabaCloudRequest.ts#L93-L138" - }, - { - "kind": "interface", - "name": { - "name": "RateLimitSetting", - "namespace": "inference._types" - }, - "properties": [ + }, { - "description": "The number of requests allowed per minute.", - "name": "requests_per_minute", + "description": "The total number of allocations that are assigned to the model across machine learning nodes.\nIncreasing this value generally increases the throughput.\nIf adaptive allocations are enabled, do not set this value because it's automatically set.", + "name": "num_allocations", "required": false, "type": { "kind": "instance_of", @@ -121989,142 +124121,162 @@ "namespace": "_types" } } + }, + { + "description": "The number of threads used by each model allocation during inference.\nThis setting generally increases the speed per inference request.\nThe inference process is a compute-bound process; `threads_per_allocations` must not exceed the number of available allocated processors per node.\nThe value must be a power of 2.\nThe maximum value is 32.", + "name": "num_threads", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "integer", + "namespace": "_types" + } + } } ], - "specLocation": "inference/_types/Services.ts#L95-L100" + "specLocation": "inference/put_elasticsearch/PutElasticsearchRequest.ts#L117-L151" }, { "kind": "interface", "name": { - "name": "AlibabaCloudTaskSettings", - "namespace": "inference.put_alibabacloud" + "name": "AdaptiveAllocations", + "namespace": "inference.put_elasticsearch" }, "properties": [ { - "description": "For a `sparse_embedding` or `text_embedding` task, specify the type of input passed to the model.\nValid values are:\n\n* `ingest` for storing document embeddings in a vector database.\n* `search` for storing embeddings of search queries run against a vector database to find relevant documents.", - "name": "input_type", + "description": "Turn on `adaptive_allocations`.", + "name": "enabled", "required": false, + "serverDefault": false, "type": { "kind": "instance_of", "type": { - "name": "string", + "name": "boolean", "namespace": "_builtins" } } }, { - "description": "For a `sparse_embedding` task, it affects whether the token name will be returned in the response.\nIt defaults to `false`, which means only the token ID will be returned in the response.", - "name": "return_token", + "description": "The maximum number of allocations to scale to.\nIf set, it must be greater than or equal to `min_number_of_allocations`.", + "name": "max_number_of_allocations", "required": false, "type": { "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "integer", + "namespace": "_types" + } + } + }, + { + "description": "The minimum number of allocations to scale to.\nIf set, it must be greater than or equal to 0.\nIf not defined, the deployment scales to 0.", + "name": "min_number_of_allocations", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "integer", + "namespace": "_types" } } } ], - "specLocation": "inference/put_alibabacloud/PutAlibabaCloudRequest.ts#L140-L154" + "specLocation": "inference/put_elasticsearch/PutElasticsearchRequest.ts#L98-L115" }, { "kind": "interface", "name": { - "name": "AzureAiStudioServiceSettings", - "namespace": "inference.put_azureaistudio" + "name": "ElasticsearchTaskSettings", + "namespace": "inference.put_elasticsearch" }, "properties": [ { - "description": "A valid API key of your Azure AI Studio model deployment.\nThis key can be found on the overview page for your deployment in the management section of your Azure AI Studio account.\n\nIMPORTANT: You need to provide the API key only once, during the inference model creation.\nThe get inference endpoint API does not retrieve your API key.\nAfter creating the inference model, you cannot change the associated API key.\nIf you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key.", - "extDocId": "azureaistudio-api-keys", - "extDocUrl": "https://ai.azure.com/", - "name": "api_key", - "required": true, + "description": "For a `rerank` task, return the document instead of only the index.", + "name": "return_documents", + "required": false, + "serverDefault": true, "type": { "kind": "instance_of", "type": { - "name": "string", + "name": "boolean", "namespace": "_builtins" } } - }, + } + ], + "specLocation": "inference/put_elasticsearch/PutElasticsearchRequest.ts#L153-L159" + }, + { + "kind": "interface", + "name": { + "name": "ElserServiceSettings", + "namespace": "inference.put_elser" + }, + "properties": [ { - "description": "The type of endpoint that is available for deployment through Azure AI Studio: `token` or `realtime`.\nThe `token` endpoint type is for \"pay as you go\" endpoints that are billed per token.\nThe `realtime` endpoint type is for \"real-time\" endpoints that are billed per hour of usage.", - "extDocId": "azureaistudio-endpoint-types", - "extDocUrl": "https://learn.microsoft.com/en-us/azure/ai-foundry/concepts/deployments-overview#billing-for-deploying-and-inferencing-llms-in-azure-ai-studio", - "name": "endpoint_type", - "required": true, + "description": "Adaptive allocations configuration details.\nIf `enabled` is true, the number of allocations of the model is set based on the current load the process gets.\nWhen the load is high, a new model allocation is automatically created, respecting the value of `max_number_of_allocations` if it's set.\nWhen the load is low, a model allocation is automatically removed, respecting the value of `min_number_of_allocations` if it's set.\nIf `enabled` is true, do not set the number of allocations manually.", + "name": "adaptive_allocations", + "required": false, "type": { "kind": "instance_of", "type": { - "name": "string", - "namespace": "_builtins" + "name": "AdaptiveAllocations", + "namespace": "inference.put_elser" } } }, { - "description": "The target URL of your Azure AI Studio model deployment.\nThis can be found on the overview page for your deployment in the management section of your Azure AI Studio account.", - "name": "target", + "description": "The total number of allocations this model is assigned across machine learning nodes.\nIncreasing this value generally increases the throughput.\nIf adaptive allocations is enabled, do not set this value because it's automatically set.", + "name": "num_allocations", "required": true, "type": { "kind": "instance_of", "type": { - "name": "string", - "namespace": "_builtins" + "name": "integer", + "namespace": "_types" } } }, { - "description": "The model provider for your deployment.\nNote that some providers may support only certain task types.\nSupported providers include:\n\n* `cohere` - available for `text_embedding` and `completion` task types\n* `databricks` - available for `completion` task type only\n* `meta` - available for `completion` task type only\n* `microsoft_phi` - available for `completion` task type only\n* `mistral` - available for `completion` task type only\n* `openai` - available for `text_embedding` and `completion` task types", - "name": "provider", + "description": "The number of threads used by each model allocation during inference.\nIncreasing this value generally increases the speed per inference request.\nThe inference process is a compute-bound process; `threads_per_allocations` must not exceed the number of available allocated processors per node.\nThe value must be a power of 2.\nThe maximum value is 32.\n\n> info\n> If you want to optimize your ELSER endpoint for ingest, set the number of threads to 1. If you want to optimize your ELSER endpoint for search, set the number of threads to greater than 1.", + "name": "num_threads", "required": true, "type": { "kind": "instance_of", "type": { - "name": "string", - "namespace": "_builtins" - } - } - }, - { - "description": "This setting helps to minimize the number of rate limit errors returned from Azure AI Studio.\nBy default, the `azureaistudio` service sets the number of requests allowed per minute to 240.", - "name": "rate_limit", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "RateLimitSetting", - "namespace": "inference._types" + "name": "integer", + "namespace": "_types" } } } ], - "specLocation": "inference/put_azureaistudio/PutAzureAiStudioRequest.ts#L92-L134" + "specLocation": "inference/put_elser/PutElserRequest.ts#L111-L137" }, { "kind": "interface", "name": { - "name": "AzureAiStudioTaskSettings", - "namespace": "inference.put_azureaistudio" + "name": "AdaptiveAllocations", + "namespace": "inference.put_elser" }, "properties": [ { - "description": "For a `completion` task, instruct the inference process to perform sampling.\nIt has no effect unless `temperature` or `top_p` is specified.", - "name": "do_sample", + "description": "Turn on `adaptive_allocations`.", + "name": "enabled", "required": false, + "serverDefault": false, "type": { "kind": "instance_of", "type": { - "name": "float", - "namespace": "_types" + "name": "boolean", + "namespace": "_builtins" } } }, { - "description": "For a `completion` task, provide a hint for the maximum number of output tokens to be generated.", - "name": "max_new_tokens", + "description": "The maximum number of allocations to scale to.\nIf set, it must be greater than or equal to `min_number_of_allocations`.", + "name": "max_number_of_allocations", "required": false, - "serverDefault": 64, "type": { "kind": "instance_of", "type": { @@ -122134,33 +124286,45 @@ } }, { - "description": "For a `completion` task, control the apparent creativity of generated completions with a sampling temperature.\nIt must be a number in the range of 0.0 to 2.0.\nIt should not be used if `top_p` is specified.", - "name": "temperature", + "description": "The minimum number of allocations to scale to.\nIf set, it must be greater than or equal to 0.\nIf not defined, the deployment scales to 0.", + "name": "min_number_of_allocations", "required": false, "type": { "kind": "instance_of", "type": { - "name": "float", + "name": "integer", "namespace": "_types" } } - }, + } + ], + "specLocation": "inference/put_elser/PutElserRequest.ts#L92-L109" + }, + { + "kind": "interface", + "name": { + "name": "GoogleAiStudioServiceSettings", + "namespace": "inference.put_googleaistudio" + }, + "properties": [ { - "description": "For a `completion` task, make the model consider the results of the tokens with nucleus sampling probability.\nIt is an alternative value to `temperature` and must be a number in the range of 0.0 to 2.0.\nIt should not be used if `temperature` is specified.", - "name": "top_p", - "required": false, + "description": "A valid API key of your Google Gemini account.", + "name": "api_key", + "required": true, "type": { "kind": "instance_of", "type": { - "name": "float", - "namespace": "_types" + "name": "string", + "namespace": "_builtins" } } }, { - "description": "For a `text_embedding` task, specify the user issuing the request.\nThis information can be used for abuse detection.", - "name": "user", - "required": false, + "description": "The name of the model to use for the inference task.\nRefer to the Google documentation for the list of supported models.", + "extDocId": "googleaistudio-models", + "extDocUrl": "https://ai.google.dev/gemini-api/docs/models", + "name": "model_id", + "required": true, "type": { "kind": "instance_of", "type": { @@ -122168,34 +124332,34 @@ "namespace": "_builtins" } } + }, + { + "description": "This setting helps to minimize the number of rate limit errors returned from Google AI Studio.\nBy default, the `googleaistudio` service sets the number of requests allowed per minute to 360.", + "name": "rate_limit", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "RateLimitSetting", + "namespace": "inference._types" + } + } } ], - "specLocation": "inference/put_azureaistudio/PutAzureAiStudioRequest.ts#L136-L164" + "specLocation": "inference/put_googleaistudio/PutGoogleAiStudioRequest.ts#L86-L102" }, { "kind": "interface", "name": { - "name": "AzureOpenAIServiceSettings", - "namespace": "inference.put_azureopenai" + "name": "GoogleVertexAIServiceSettings", + "namespace": "inference.put_googlevertexai" }, "properties": [ { - "description": "A valid API key for your Azure OpenAI account.\nYou must specify either `api_key` or `entra_id`.\nIf you do not provide either or you provide both, you will receive an error when you try to create your model.\n\nIMPORTANT: You need to provide the API key only once, during the inference model creation.\nThe get inference endpoint API does not retrieve your API key.\nAfter creating the inference model, you cannot change the associated API key.\nIf you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key.", - "extDocId": "azureopenai-auth", - "extDocUrl": "https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#authentication", - "name": "api_key", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - } - }, - { - "description": "The Azure API version ID to use.\nIt is recommended to use the latest supported non-preview version.", - "name": "api_version", + "description": "The name of the location to use for the inference task.\nRefer to the Google documentation for the list of supported locations.", + "extDocId": "googlevertexai-locations", + "extDocUrl": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/locations", + "name": "location", "required": true, "type": { "kind": "instance_of", @@ -122206,10 +124370,10 @@ } }, { - "description": "The deployment name of your deployed models.\nYour Azure OpenAI deployments can be found though the Azure OpenAI Studio portal that is linked to your subscription.", - "extDocId": "azureopenai", - "extDocUrl": "https://oai.azure.com/", - "name": "deployment_id", + "description": "The name of the model to use for the inference task.\nRefer to the Google documentation for the list of supported models.", + "extDocId": "googlevertexai-models", + "extDocUrl": "https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/text-embeddings-api", + "name": "model_id", "required": true, "type": { "kind": "instance_of", @@ -122220,11 +124384,9 @@ } }, { - "description": "A valid Microsoft Entra token.\nYou must specify either `api_key` or `entra_id`.\nIf you do not provide either or you provide both, you will receive an error when you try to create your model.", - "extDocId": "azureopenai-auth", - "extDocUrl": "https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#authentication", - "name": "entra_id", - "required": false, + "description": "The name of the project to use for the inference task.", + "name": "project_id", + "required": true, "type": { "kind": "instance_of", "type": { @@ -122234,9 +124396,7 @@ } }, { - "description": "This setting helps to minimize the number of rate limit errors returned from Azure.\nThe `azureopenai` service sets a default number of requests allowed per minute depending on the task type.\nFor `text_embedding`, it is set to `1440`.\nFor `completion`, it is set to `120`.", - "extDocId": "azureopenai-quota-limits", - "extDocUrl": "https://learn.microsoft.com/en-us/azure/ai-services/openai/quotas-limits", + "description": "This setting helps to minimize the number of rate limit errors returned from Google Vertex AI.\nBy default, the `googlevertexai` service sets the number of requests allowed per minute to 30.000.", "name": "rate_limit", "required": false, "type": { @@ -122248,10 +124408,9 @@ } }, { - "description": "The name of your Azure OpenAI resource.\nYou can find this from the list of resources in the Azure Portal for your subscription.", - "extDocId": "azureopenai-portal", - "extDocUrl": "https://portal.azure.com/#view/HubsExtension/BrowseAll", - "name": "resource_name", + "description": "A valid service account in JSON format for the Google Vertex AI API.", + "name": "service_account_json", +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) "required": true, "type": { "kind": "instance_of", @@ -122262,11 +124421,16 @@ } } ], +<<<<<<< HEAD "specLocation": "inference/put_azureopenai/PutAzureOpenAiRequest.ts#L99-L144" +======= + "specLocation": "inference/put_googlevertexai/PutGoogleVertexAiRequest.ts#L92-L118" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) }, { "kind": "interface", "name": { +<<<<<<< HEAD "name": "AzureOpenAITaskSettings", "namespace": "inference.put_azureopenai" }, @@ -122274,10 +124438,20 @@ { "description": "For a `completion` or `text_embedding` task, specify the user issuing the request.\nThis information can be used for abuse detection.", "name": "user", +======= + "name": "GoogleVertexAITaskSettings", + "namespace": "inference.put_googlevertexai" + }, + "properties": [ + { + "description": "For a `text_embedding` task, truncate inputs longer than the maximum token length automatically.", + "name": "auto_truncate", +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) "required": false, "type": { "kind": "instance_of", "type": { +<<<<<<< HEAD "name": "string", "namespace": "_builtins" } @@ -122285,10 +124459,35 @@ } ], "specLocation": "inference/put_azureopenai/PutAzureOpenAiRequest.ts#L146-L152" +======= + "name": "boolean", + "namespace": "_builtins" + } + } + }, + { + "description": "For a `rerank` task, the number of the top N documents that should be returned.", + "name": "top_n", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "integer", + "namespace": "_types" + } + } + } + ], + "specLocation": "inference/put_googlevertexai/PutGoogleVertexAiRequest.ts#L120-L129" +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) }, { "kind": "interface", "name": { +<<<<<<< HEAD +======= +>>>>>>> f5eaaab24 (Add Amazon Bedrock inference API (#4022)) +>>>>>>> 72877ef81 (Add Amazon Bedrock inference API (#4022)) "name": "HuggingFaceServiceSettings", "namespace": "inference.put_hugging_face" }, diff --git a/output/schema/schema.json b/output/schema/schema.json index 3bdde706cb..942438c502 100644 --- a/output/schema/schema.json +++ b/output/schema/schema.json @@ -9348,6 +9348,51 @@ } ] }, + { + "availability": { + "serverless": { + "stability": "stable", + "visibility": "public" + }, + "stack": { + "since": "8.12.0", + "stability": "stable", + "visibility": "public" + } + }, + "description": "Create an Amazon Bedrock inference endpoint.\n\nCreates an inference endpoint to perform an inference task with the `amazonbedrock` service.\n\n>info\n> You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", + "docId": "inference-api-amazonbedrock", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-amazon-bedrock.html", + "name": "inference.put_amazonbedrock", + "privileges": { + "cluster": [ + "manage_inference" + ] + }, + "request": { + "name": "Request", + "namespace": "inference.put_amazonbedrock" + }, + "requestBodyRequired": false, + "requestMediaType": [ + "application/json" + ], + "response": { + "name": "Response", + "namespace": "inference.put_amazonbedrock" + }, + "responseMediaType": [ + "application/json" + ], + "urls": [ + { + "methods": [ + "PUT" + ], + "path": "/_inference/{task_type}/{amazonbedrock_inference_id}" + } + ] + }, { "availability": { "serverless": { @@ -150860,6 +150905,312 @@ }, "specLocation": "inference/put_alibabacloud/PutAlibabaCloudRequest.ts#L89-L91" }, + { + "kind": "interface", + "name": { + "name": "AmazonBedrockServiceSettings", + "namespace": "inference.put_amazonbedrock" + }, + "properties": [ + { + "description": "A valid AWS access key that has permissions to use Amazon Bedrock and access to models for inference requests.", + "name": "access_key", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } + }, + { + "description": "The base model ID or an ARN to a custom model based on a foundational model.\nThe base model IDs can be found in the Amazon Bedrock documentation.\nNote that the model ID must be available for the provider chosen and your IAM user must have access to the model.", + "extDocId": "amazonbedrock-models", + "extDocUrl": "https://docs.aws.amazon.com/bedrock/latest/userguide/models-supported.html", + "name": "model", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } + }, + { + "description": "The model provider for your deployment.\nNote that some providers may support only certain task types.\nSupported providers include:\n\n* `amazontitan` - available for `text_embedding` and `completion` task types\n* `anthropic` - available for `completion` task type only\n* `ai21labs` - available for `completion` task type only\n* `cohere` - available for `text_embedding` and `completion` task types\n* `meta` - available for `completion` task type only\n* `mistral` - available for `completion` task type only", + "name": "provider", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } + }, + { + "description": "The region that your model or ARN is deployed in.\nThe list of available regions per model can be found in the Amazon Bedrock documentation.", + "extDocId": "amazonbedrock-models", + "extDocUrl": "https://docs.aws.amazon.com/bedrock/latest/userguide/models-supported.html", + "name": "region", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } + }, + { + "description": "This setting helps to minimize the number of rate limit errors returned from Watsonx.\nBy default, the `watsonxai` service sets the number of requests allowed per minute to 120.", + "name": "rate_limit", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "RateLimitSetting", + "namespace": "inference._types" + } + } + }, + { + "description": "A valid AWS secret key that is paired with the `access_key`.\nFor informationg about creating and managing access and secret keys, refer to the AWS documentation.", + "extDocId": "amazonbedrock-secret-keys", + "extDocUrl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html", + "name": "secret_key", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } + } + ], + "specLocation": "inference/put_amazonbedrock/PutAmazonBedrockRequest.ts#L95-L137" + }, + { + "kind": "interface", + "name": { + "name": "AmazonBedrockTaskSettings", + "namespace": "inference.put_amazonbedrock" + }, + "properties": [ + { + "description": "For a `completion` task, it sets the maximum number for the output tokens to be generated.", + "name": "max_new_tokens", + "required": false, + "serverDefault": 64, + "type": { + "kind": "instance_of", + "type": { + "name": "integer", + "namespace": "_types" + } + } + }, + { + "description": "For a `completion` task, it is a number between 0.0 and 1.0 that controls the apparent creativity of the results.\nAt temperature 0.0 the model is most deterministic, at temperature 1.0 most random.\nIt should not be used if `top_p` or `top_k` is specified.", + "name": "temperature", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "float", + "namespace": "_types" + } + } + }, + { + "description": "For a `completion` task, it limits samples to the top-K most likely words, balancing coherence and variability.\nIt is only available for anthropic, cohere, and mistral providers.\nIt is an alternative to `temperature`; it should not be used if `temperature` is specified.", + "name": "top_k", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "float", + "namespace": "_types" + } + } + }, + { + "description": "For a `completion` task, it is a number in the range of 0.0 to 1.0, to eliminate low-probability tokens.\nTop-p uses nucleus sampling to select top tokens whose sum of likelihoods does not exceed a certain value, ensuring both variety and coherence.\nIt is an alternative to `temperature`; it should not be used if `temperature` is specified.", + "name": "top_p", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "float", + "namespace": "_types" + } + } + } + ], + "specLocation": "inference/put_amazonbedrock/PutAmazonBedrockRequest.ts#L139-L163" + }, + { + "kind": "enum", + "members": [ + { + "name": "completion" + }, + { + "name": "text_embedding" + } + ], + "name": { + "name": "AmazonBedrockTaskType", + "namespace": "inference.put_amazonbedrock" + }, + "specLocation": "inference/put_amazonbedrock/PutAmazonBedrockRequest.ts#L86-L89" + }, + { + "kind": "request", + "attachedBehaviors": [ + "CommonQueryParameters" + ], + "body": { + "kind": "properties", + "properties": [ + { + "description": "The chunking configuration object.", + "extDocId": "inference-chunking", + "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/inference-apis.html#infer-chunking-config", + "name": "chunking_settings", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "InferenceChunkingSettings", + "namespace": "inference._types" + } + } + }, + { + "description": "The type of service supported for the specified task type. In this case, `amazonbedrock`.", + "name": "service", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "ServiceType", + "namespace": "inference.put_amazonbedrock" + } + } + }, + { + "description": "Settings used to install the inference model. These settings are specific to the `amazonbedrock` service.", + "name": "service_settings", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "AmazonBedrockServiceSettings", + "namespace": "inference.put_amazonbedrock" + } + } + }, + { + "description": "Settings to configure the inference task.\nThese settings are specific to the task type you specified.", + "name": "task_settings", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "AmazonBedrockTaskSettings", + "namespace": "inference.put_amazonbedrock" + } + } + } + ] + }, + "description": "Create an Amazon Bedrock inference endpoint.\n\nCreates an inference endpoint to perform an inference task with the `amazonbedrock` service.\n\n>info\n> You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", + "examples": { + "PutAmazonBedrockRequestExample1": { + "description": "Run `PUT _inference/text_embedding/amazon_bedrock_embeddings` to create an inference endpoint that performs a text embedding task.", + "summary": "A text embedding task", + "value": "{\n \"service\": \"amazonbedrock\",\n \"service_settings\": {\n \"access_key\": \"AWS-access-key\",\n \"secret_key\": \"AWS-secret-key\",\n \"region\": \"us-east-1\",\n \"provider\": \"amazontitan\",\n \"model\": \"amazon.titan-embed-text-v2:0\"\n }\n}" + }, + "PutAmazonBedrockRequestExample2": { + "description": "Run `PUT _inference/completion/openai-completion` to create an inference endpoint to perform a completion task type.", + "summary": "A completion task", + "value": "{\n \"service\": \"openai\",\n \"service_settings\": {\n \"api_key\": \"OpenAI-API-Key\",\n \"model_id\": \"gpt-3.5-turbo\"\n }\n}" + } + }, + "inherits": { + "type": { + "name": "RequestBase", + "namespace": "_types" + } + }, + "name": { + "name": "Request", + "namespace": "inference.put_amazonbedrock" + }, + "path": [ + { + "description": "The type of the inference task that the model will perform.", + "name": "task_type", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "AmazonBedrockTaskType", + "namespace": "inference.put_amazonbedrock" + } + } + }, + { + "description": "The unique identifier of the inference endpoint.", + "name": "amazonbedrock_inference_id", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "Id", + "namespace": "_types" + } + } + } + ], + "query": [], + "specLocation": "inference/put_amazonbedrock/PutAmazonBedrockRequest.ts#L28-L84" + }, + { + "kind": "response", + "body": { + "kind": "value", + "value": { + "kind": "instance_of", + "type": { + "name": "InferenceEndpointInfo", + "namespace": "inference._types" + } + } + }, + "name": { + "name": "Response", + "namespace": "inference.put_amazonbedrock" + }, + "specLocation": "inference/put_amazonbedrock/PutAmazonBedrockResponse.ts#L22-L24" + }, + { + "kind": "enum", + "members": [ + { + "name": "amazonbedrock" + } + ], + "name": { + "name": "ServiceType", + "namespace": "inference.put_amazonbedrock" + }, + "specLocation": "inference/put_amazonbedrock/PutAmazonBedrockRequest.ts#L91-L93" + }, { "kind": "interface", "name": { @@ -152328,9 +152679,9 @@ "value": "{\n \"service\": \"openai\",\n \"service_settings\": {\n \"api_key\": \"OpenAI-API-Key\",\n \"model_id\": \"text-embedding-3-small\",\n \"dimensions\": 128\n }\n}" }, "PutOpenAiRequestExample2": { - "description": "Run `PUT _inference/completion/openai-completion` to create an inference endpoint to perform a completion task type.", + "description": "Run `PUT _inference/completion/amazon_bedrock_completion` to create an inference endpoint to perform a completion task.", "summary": "A completion task", - "value": "{\n \"service\": \"openai\",\n \"service_settings\": {\n \"api_key\": \"OpenAI-API-Key\",\n \"model_id\": \"gpt-3.5-turbo\"\n }\n}" + "value": "{\n \"service\": \"amazonbedrock\",\n \"service_settings\": {\n \"access_key\": \"AWS-access-key\",\n \"secret_key\": \"AWS-secret-key\",\n \"region\": \"us-east-1\",\n \"provider\": \"amazontitan\",\n \"model\": \"amazon.titan-text-premier-v1:0\"\n }\n}" } }, "inherits": { diff --git a/output/typescript/types.ts b/output/typescript/types.ts index fe9191ff0c..c40ff3031e 100644 --- a/output/typescript/types.ts +++ b/output/typescript/types.ts @@ -13277,6 +13277,39 @@ export type InferencePutAlibabacloudResponse = InferenceInferenceEndpointInfo export type InferencePutAlibabacloudServiceType = 'alibabacloud-ai-search' +export interface InferencePutAmazonbedrockAmazonBedrockServiceSettings { + access_key: string + model: string + provider?: string + region: string + rate_limit?: InferenceRateLimitSetting + secret_key: string +} + +export interface InferencePutAmazonbedrockAmazonBedrockTaskSettings { + max_new_tokens?: integer + temperature?: float + top_k?: float + top_p?: float +} + +export type InferencePutAmazonbedrockAmazonBedrockTaskType = 'completion' | 'text_embedding' + +export interface InferencePutAmazonbedrockRequest extends RequestBase { + task_type: InferencePutAmazonbedrockAmazonBedrockTaskType + amazonbedrock_inference_id: Id + body?: { + chunking_settings?: InferenceInferenceChunkingSettings + service: InferencePutAmazonbedrockServiceType + service_settings: InferencePutAmazonbedrockAmazonBedrockServiceSettings + task_settings?: InferencePutAmazonbedrockAmazonBedrockTaskSettings + } +} + +export type InferencePutAmazonbedrockResponse = InferenceInferenceEndpointInfo + +export type InferencePutAmazonbedrockServiceType = 'amazonbedrock' + export interface InferencePutAzureaistudioAzureAiStudioServiceSettings { api_key: string endpoint_type: string diff --git a/package-lock.json b/package-lock.json index 6b2f4fc3b3..346e9c827b 100644 --- a/package-lock.json +++ b/package-lock.json @@ -5,7 +5,7 @@ "packages": { "": { "dependencies": { - "@redocly/cli": "^1.33.1", + "@redocly/cli": "^1.34.0", "@stoplight/spectral-cli": "^6.14.2" } }, @@ -486,9 +486,9 @@ } }, "node_modules/@redocly/cli": { - "version": "1.33.1", - "resolved": "https://registry.npmjs.org/@redocly/cli/-/cli-1.33.1.tgz", - "integrity": "sha512-co+Vr/RfH9Nca3eiYuYvbLxI+5RVOyJ+l56B0SmU5UHfticTUXirO0vxtFmkHmch6YIFVU6BCF4tFbj7ssF8iQ==", + "version": "1.34.0", + "resolved": "https://registry.npmjs.org/@redocly/cli/-/cli-1.34.0.tgz", + "integrity": "sha512-Kg/t9zMjZB5cyb0YQLa+gne5E5Rz6wZP/goug1+2qaR17UqeupidBzwqDdr3lszEK3q2A37g4+W7pvdBOkiGQA==", "license": "MIT", "dependencies": { "@opentelemetry/api": "1.9.0", @@ -497,8 +497,8 @@ "@opentelemetry/sdk-trace-node": "1.26.0", "@opentelemetry/semantic-conventions": "1.27.0", "@redocly/config": "^0.22.0", - "@redocly/openapi-core": "1.33.1", - "@redocly/respect-core": "1.33.1", + "@redocly/openapi-core": "1.34.0", + "@redocly/respect-core": "1.34.0", "abort-controller": "^3.0.0", "chokidar": "^3.5.1", "colorette": "^1.2.0", @@ -561,9 +561,9 @@ "license": "MIT" }, "node_modules/@redocly/openapi-core": { - "version": "1.33.1", - "resolved": "https://registry.npmjs.org/@redocly/openapi-core/-/openapi-core-1.33.1.tgz", - "integrity": "sha512-tL3v8FVwdcCAcruOZV77uxH2ZFtnY3DRPG+rgmlm9hsu5uoatofVSJIJHUroz54KJ8ryeo28wQHhOr8iReGGEQ==", + "version": "1.34.0", + "resolved": "https://registry.npmjs.org/@redocly/openapi-core/-/openapi-core-1.34.0.tgz", + "integrity": "sha512-Ji00EiLQRXq0pJIz5pAjGF9MfQvQVsQehc6uIis6sqat8tG/zh25Zi64w6HVGEDgJEzUeq/CuUlD0emu3Hdaqw==", "license": "MIT", "dependencies": { "@redocly/ajv": "^8.11.2", @@ -603,14 +603,14 @@ } }, "node_modules/@redocly/respect-core": { - "version": "1.33.1", - "resolved": "https://registry.npmjs.org/@redocly/respect-core/-/respect-core-1.33.1.tgz", - "integrity": "sha512-Sh6TahtuvSzvejkfu74KErdMX6VtrNNRJAtwH9A6R1Igo8WVmrdoFE99uAp/dOL9bpAQPg4oKtrTF60avN7YYA==", + "version": "1.34.0", + "resolved": "https://registry.npmjs.org/@redocly/respect-core/-/respect-core-1.34.0.tgz", + "integrity": "sha512-CO2XxJ0SUYHKixKPTQm2U6QrGLnNhQy88CnX20llCxXDKd485cSioRMZ8MMNhHrnDsUlprSuM3ui2z5JGf1ftw==", "license": "MIT", "dependencies": { "@faker-js/faker": "^7.6.0", "@redocly/ajv": "8.11.2", - "@redocly/openapi-core": "1.33.1", + "@redocly/openapi-core": "1.34.0", "better-ajv-errors": "^1.2.0", "colorette": "^2.0.20", "concat-stream": "^2.0.0", diff --git a/specification/_doc_ids/table.csv b/specification/_doc_ids/table.csv index ec48f4fc75..454142f762 100644 --- a/specification/_doc_ids/table.csv +++ b/specification/_doc_ids/table.csv @@ -4,6 +4,8 @@ alias-update,https://www.elastic.co/docs/api/doc/elasticsearch/operation/operati aliases-update,https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-update-aliases alibabacloud-api-keys,https://opensearch.console.aliyun.com/cn-shanghai/rag/api-key analysis-analyzers,https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-analyzers.html +amazonbedrock-models,https://docs.aws.amazon.com/bedrock/latest/userguide/models-supported.html +amazonbedrock-secret-keys,https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html analysis-charfilters,https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-charfilters.html analysis-normalizers,https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-normalizers.html analysis-standard-analyzer,https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-standard-analyzer.html @@ -328,11 +330,9 @@ inference-api-post,https://www.elastic.co/docs/api/doc/elasticsearch/operation/o inference-api-post-eis-chat-completion,https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-post-eis-chat-completion inference-api-put,https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put inference-api-put-alibabacloud,https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-alibabacloud-ai-search.html -inference-api-amazonbedrock,https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-amazon-bedrock.html inference-api-put-azureaistudio,https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-azure-ai-studio.html inference-api-put-azureopenai,https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-azure-openai.html inference-api-put-cohere,https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-cohere.html -inference-api-put-cohere,https://www.elastic.co/guide/en/elasticsearch/reference/branch/infer-service-cohere.html inference-api-put-eis,https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-eis.html inference-api-put-huggingface,https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-hugging-face.html inference-api-put-googlevertexai,https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-google-vertex-ai.html diff --git a/specification/_json_spec/inference.put_amazonbedrock.json b/specification/_json_spec/inference.put_amazonbedrock.json new file mode 100644 index 0000000000..266a1800a3 --- /dev/null +++ b/specification/_json_spec/inference.put_amazonbedrock.json @@ -0,0 +1,35 @@ +{ + "inference.put_amazonbedrock": { + "documentation": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-amazon-bedrock.html", + "description": "Configure an Amazon Bedrock inference endpoint" + }, + "stability": "stable", + "visibility": "public", + "headers": { + "accept": ["application/json"], + "content_type": ["application/json"] + }, + "url": { + "paths": [ + { + "path": "/_inference/{task_type}/{amazonbedrock_inference_id}", + "methods": ["PUT"], + "parts": { + "task_type": { + "type": "string", + "description": "The task type" + }, + "amazonbedrock_inference_id": { + "type": "string", + "description": "The inference Id" + } + } + } + ] + }, + "body": { + "description": "The inference endpoint's task and service settings" + } + } +} diff --git a/specification/inference/put_amazonbedrock/PutAmazonBedrockRequest.ts b/specification/inference/put_amazonbedrock/PutAmazonBedrockRequest.ts new file mode 100644 index 0000000000..8ac3d0262f --- /dev/null +++ b/specification/inference/put_amazonbedrock/PutAmazonBedrockRequest.ts @@ -0,0 +1,163 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import { + InferenceChunkingSettings, + RateLimitSetting +} from '@inference/_types/Services' +import { RequestBase } from '@_types/Base' +import { Id } from '@_types/common' +import { float, integer } from '@_types/Numeric' + +/** + * Create an Amazon Bedrock inference endpoint. + * + * Creates an inference endpoint to perform an inference task with the `amazonbedrock` service. + * + * >info + * > You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys. + * + * When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + * After creating the endpoint, wait for the model deployment to complete before using it. + * To verify the deployment status, use the get trained model statistics API. + * Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. + * Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @rest_spec_name inference.put_amazonbedrock + * @availability stack since=8.12.0 stability=stable visibility=public + * @availability serverless stability=stable visibility=public + * @cluster_privileges manage_inference + * @doc_id inference-api-amazonbedrock + */ +export interface Request extends RequestBase { + urls: [ + { + path: '/_inference/{task_type}/{amazonbedrock_inference_id}' + methods: ['PUT'] + } + ] + path_parts: { + /** + * The type of the inference task that the model will perform. + */ + task_type: AmazonBedrockTaskType + /** + * The unique identifier of the inference endpoint. + */ + amazonbedrock_inference_id: Id + } + body: { + /** + * The chunking configuration object. + * @ext_doc_id inference-chunking + */ + chunking_settings?: InferenceChunkingSettings + /** + * The type of service supported for the specified task type. In this case, `amazonbedrock`. + */ + service: ServiceType + /** + * Settings used to install the inference model. These settings are specific to the `amazonbedrock` service. + */ + service_settings: AmazonBedrockServiceSettings + /** + * Settings to configure the inference task. + * These settings are specific to the task type you specified. + */ + task_settings?: AmazonBedrockTaskSettings + } +} + +export enum AmazonBedrockTaskType { + completion, + text_embedding +} + +export enum ServiceType { + amazonbedrock +} + +export class AmazonBedrockServiceSettings { + /** + * A valid AWS access key that has permissions to use Amazon Bedrock and access to models for inference requests. + */ + access_key: string + /** + * The base model ID or an ARN to a custom model based on a foundational model. + * The base model IDs can be found in the Amazon Bedrock documentation. + * Note that the model ID must be available for the provider chosen and your IAM user must have access to the model. + * @ext_doc_id amazonbedrock-models + */ + model: string + /** + * The model provider for your deployment. + * Note that some providers may support only certain task types. + * Supported providers include: + * + * * `amazontitan` - available for `text_embedding` and `completion` task types + * * `anthropic` - available for `completion` task type only + * * `ai21labs` - available for `completion` task type only + * * `cohere` - available for `text_embedding` and `completion` task types + * * `meta` - available for `completion` task type only + * * `mistral` - available for `completion` task type only + */ + provider?: string + /** + * The region that your model or ARN is deployed in. + * The list of available regions per model can be found in the Amazon Bedrock documentation. + * @ext_doc_id amazonbedrock-models + */ + region: string + /** + * This setting helps to minimize the number of rate limit errors returned from Watsonx. + * By default, the `watsonxai` service sets the number of requests allowed per minute to 120. + */ + rate_limit?: RateLimitSetting + /** + * A valid AWS secret key that is paired with the `access_key`. + * For informationg about creating and managing access and secret keys, refer to the AWS documentation. + * @ext_doc_id amazonbedrock-secret-keys + */ + secret_key: string +} + +export class AmazonBedrockTaskSettings { + /** + * For a `completion` task, it sets the maximum number for the output tokens to be generated. + * @server_default 64 + */ + max_new_tokens?: integer + /** + * For a `completion` task, it is a number between 0.0 and 1.0 that controls the apparent creativity of the results. + * At temperature 0.0 the model is most deterministic, at temperature 1.0 most random. + * It should not be used if `top_p` or `top_k` is specified. + */ + temperature?: float + /** + * For a `completion` task, it limits samples to the top-K most likely words, balancing coherence and variability. + * It is only available for anthropic, cohere, and mistral providers. + * It is an alternative to `temperature`; it should not be used if `temperature` is specified. + */ + top_k?: float + /** + * For a `completion` task, it is a number in the range of 0.0 to 1.0, to eliminate low-probability tokens. + * Top-p uses nucleus sampling to select top tokens whose sum of likelihoods does not exceed a certain value, ensuring both variety and coherence. + * It is an alternative to `temperature`; it should not be used if `temperature` is specified. + */ + top_p?: float +} diff --git a/specification/inference/put_amazonbedrock/PutAmazonBedrockResponse.ts b/specification/inference/put_amazonbedrock/PutAmazonBedrockResponse.ts new file mode 100644 index 0000000000..d40639b031 --- /dev/null +++ b/specification/inference/put_amazonbedrock/PutAmazonBedrockResponse.ts @@ -0,0 +1,24 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import { InferenceEndpointInfo } from '@inference/_types/Services' + +export class Response { + body: InferenceEndpointInfo +} diff --git a/specification/inference/put_amazonbedrock/examples/request/PutAmazonBedrockRequestExample1.yaml b/specification/inference/put_amazonbedrock/examples/request/PutAmazonBedrockRequestExample1.yaml new file mode 100644 index 0000000000..cded037d23 --- /dev/null +++ b/specification/inference/put_amazonbedrock/examples/request/PutAmazonBedrockRequestExample1.yaml @@ -0,0 +1,15 @@ +summary: A text embedding task +description: Run `PUT _inference/text_embedding/amazon_bedrock_embeddings` to create an inference endpoint that performs a text embedding task. +# method_request: "PUT _inference/text_embedding/amazon_bedrock_embeddings" +# type: "request" +value: |- + { + "service": "amazonbedrock", + "service_settings": { + "access_key": "AWS-access-key", + "secret_key": "AWS-secret-key", + "region": "us-east-1", + "provider": "amazontitan", + "model": "amazon.titan-embed-text-v2:0" + } + } diff --git a/specification/inference/put_amazonbedrock/examples/request/PutAmazonBedrockRequestExample2.yaml b/specification/inference/put_amazonbedrock/examples/request/PutAmazonBedrockRequestExample2.yaml new file mode 100644 index 0000000000..d21fd0d2aa --- /dev/null +++ b/specification/inference/put_amazonbedrock/examples/request/PutAmazonBedrockRequestExample2.yaml @@ -0,0 +1,12 @@ +summary: A completion task +description: Run `PUT _inference/completion/openai-completion` to create an inference endpoint to perform a completion task type. +# method_request: "PUT _inference/completion/openai-completion" +# type: "request" +value: |- + { + "service": "openai", + "service_settings": { + "api_key": "OpenAI-API-Key", + "model_id": "gpt-3.5-turbo" + } + } diff --git a/specification/inference/put_openai/examples/request/PutOpenAiRequestExample2.yaml b/specification/inference/put_openai/examples/request/PutOpenAiRequestExample2.yaml index d21fd0d2aa..4bd73086b3 100644 --- a/specification/inference/put_openai/examples/request/PutOpenAiRequestExample2.yaml +++ b/specification/inference/put_openai/examples/request/PutOpenAiRequestExample2.yaml @@ -1,12 +1,15 @@ summary: A completion task -description: Run `PUT _inference/completion/openai-completion` to create an inference endpoint to perform a completion task type. -# method_request: "PUT _inference/completion/openai-completion" +description: Run `PUT _inference/completion/amazon_bedrock_completion` to create an inference endpoint to perform a completion task. +# method_request: "PUT _inference/completion/amazon_bedrock_completion" # type: "request" value: |- { - "service": "openai", + "service": "amazonbedrock", "service_settings": { - "api_key": "OpenAI-API-Key", - "model_id": "gpt-3.5-turbo" + "access_key": "AWS-access-key", + "secret_key": "AWS-secret-key", + "region": "us-east-1", + "provider": "amazontitan", + "model": "amazon.titan-text-premier-v1:0" } }