diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index b23077e..513be40 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,44 +1,48 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: e75ca54601920b2770d9a559b299d272 + docChecksum: ee15d853ecc28d415d6b33191893a6ff docVersion: 0.0.2 - speakeasyVersion: 1.398.0 - generationVersion: 2.415.8 - releaseVersion: 1.1.0 - configChecksum: 49094e0f156d020bd164f8b4bd41e97b + speakeasyVersion: 1.434.4 + generationVersion: 2.452.0 + releaseVersion: 1.2.0 + configChecksum: 17ae764aa509274d1cf2d75af5bf6abb repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true features: python: additionalDependencies: 1.0.0 - constsAndDefaults: 1.0.2 - core: 5.5.3 + constsAndDefaults: 1.0.4 + core: 5.6.4 defaultEnabledRetries: 0.2.0 + downloadStreams: 1.0.1 enumUnions: 0.1.0 - envVarSecurityUsage: 0.3.1 + envVarSecurityUsage: 0.3.2 examples: 3.0.0 flatRequests: 1.0.1 - flattening: 3.0.0 + flattening: 3.1.0 globalSecurity: 3.0.2 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 globalServerURLs: 3.0.0 + methodArguments: 1.0.2 multipartFileContentType: 1.0.0 nameOverrides: 3.0.0 nullables: 1.0.0 openEnums: 1.0.0 - responseFormat: 1.0.0 + responseFormat: 1.0.1 retries: 3.0.2 sdkHooks: 1.0.0 - serverEvents: 1.0.2 + serverEvents: 1.0.4 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 - unions: 3.0.2 + tests: 1.6.0 + unions: 3.0.3 uploadStreams: 1.0.0 generatedFiles: - .gitattributes + - .python-version - .vscode/settings.json - USAGE.md - docs/models/agentscompletionrequest.md @@ -49,12 +53,23 @@ generatedFiles: - docs/models/agentscompletionstreamrequestmessages.md - docs/models/agentscompletionstreamrequeststop.md - docs/models/agentscompletionstreamrequesttoolchoice.md + - docs/models/apiendpoint.md - docs/models/archiveftmodelout.md - docs/models/archiveftmodeloutobject.md - docs/models/arguments.md - docs/models/assistantmessage.md + - docs/models/assistantmessagecontent.md - docs/models/assistantmessagerole.md - docs/models/basemodelcard.md + - docs/models/batcherror.md + - docs/models/batchjobin.md + - docs/models/batchjobout.md + - docs/models/batchjoboutobject.md + - docs/models/batchjobsout.md + - docs/models/batchjobsoutobject.md + - docs/models/batchjobstatus.md + - docs/models/chatclassificationrequest.md + - docs/models/chatclassificationrequestinputs.md - docs/models/chatcompletionchoice.md - docs/models/chatcompletionrequest.md - docs/models/chatcompletionrequesttoolchoice.md @@ -64,6 +79,10 @@ generatedFiles: - docs/models/chatcompletionstreamrequeststop.md - docs/models/chatcompletionstreamrequesttoolchoice.md - docs/models/checkpointout.md + - docs/models/classificationobject.md + - docs/models/classificationrequest.md + - docs/models/classificationrequestinputs.md + - docs/models/classificationresponse.md - docs/models/completionchunk.md - docs/models/completionevent.md - docs/models/completionresponsestreamchoice.md @@ -85,12 +104,13 @@ generatedFiles: - docs/models/embeddingresponsedata.md - docs/models/eventout.md - docs/models/file.md + - docs/models/filepurpose.md - docs/models/filesapiroutesdeletefilerequest.md + - docs/models/filesapiroutesdownloadfilerequest.md + - docs/models/filesapirouteslistfilesrequest.md - docs/models/filesapiroutesretrievefilerequest.md - docs/models/filesapiroutesuploadfilemultipartbodyparams.md - - docs/models/filesapiroutesuploadfilepurpose.md - docs/models/fileschema.md - - docs/models/fileschemapurpose.md - docs/models/fimcompletionrequest.md - docs/models/fimcompletionrequeststop.md - docs/models/fimcompletionresponse.md @@ -100,6 +120,7 @@ generatedFiles: - docs/models/finishreason.md - docs/models/ftmodelcapabilitiesout.md - docs/models/ftmodelcard.md + - docs/models/ftmodelcardtype.md - docs/models/ftmodelout.md - docs/models/ftmodeloutobject.md - docs/models/function.md @@ -121,6 +142,9 @@ generatedFiles: - docs/models/jobinrepositories.md - docs/models/jobmetadataout.md - docs/models/jobout.md + - docs/models/jobsapiroutesbatchcancelbatchjobrequest.md + - docs/models/jobsapiroutesbatchgetbatchjobrequest.md + - docs/models/jobsapiroutesbatchgetbatchjobsrequest.md - docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md - docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md - docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md @@ -140,13 +164,12 @@ generatedFiles: - docs/models/modelcapabilities.md - docs/models/modellist.md - docs/models/object.md - - docs/models/purpose.md + - docs/models/one.md - docs/models/queryparamstatus.md - docs/models/repositories.md - docs/models/responseformat.md - docs/models/responseformats.md - docs/models/retrievefileout.md - - docs/models/retrievefileoutpurpose.md - docs/models/retrievemodelv1modelsmodelidgetrequest.md - docs/models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md - docs/models/role.md @@ -156,6 +179,7 @@ generatedFiles: - docs/models/status.md - docs/models/stop.md - docs/models/systemmessage.md + - docs/models/systemmessagecontent.md - docs/models/textchunk.md - docs/models/textchunktype.md - docs/models/tool.md @@ -168,6 +192,7 @@ generatedFiles: - docs/models/trainingfile.md - docs/models/trainingparameters.md - docs/models/trainingparametersin.md + - docs/models/two.md - docs/models/type.md - docs/models/unarchiveftmodelout.md - docs/models/unarchiveftmodeloutobject.md @@ -181,47 +206,63 @@ generatedFiles: - docs/models/validationerror.md - docs/models/wandbintegration.md - docs/models/wandbintegrationout.md + - docs/models/wandbintegrationouttype.md - docs/models/wandbintegrationtype.md - docs/sdks/agents/README.md + - docs/sdks/batch/README.md - docs/sdks/chat/README.md + - docs/sdks/classifiers/README.md - docs/sdks/embeddings/README.md - docs/sdks/files/README.md - docs/sdks/fim/README.md - docs/sdks/finetuning/README.md - docs/sdks/jobs/README.md - docs/sdks/mistral/README.md + - docs/sdks/mistraljobs/README.md - docs/sdks/models/README.md - poetry.toml - py.typed - pylintrc - - pyproject.toml - - scripts/compile.sh - scripts/prepare-readme.py - scripts/publish.sh - src/mistralai/__init__.py - src/mistralai/_hooks/__init__.py - src/mistralai/_hooks/sdkhooks.py - src/mistralai/_hooks/types.py + - src/mistralai/_version.py - src/mistralai/agents.py - src/mistralai/basesdk.py + - src/mistralai/batch.py - src/mistralai/chat.py + - src/mistralai/classifiers.py - src/mistralai/embeddings.py - src/mistralai/files.py - src/mistralai/fim.py - src/mistralai/fine_tuning.py - src/mistralai/httpclient.py - src/mistralai/jobs.py + - src/mistralai/mistral_jobs.py - src/mistralai/models/__init__.py - src/mistralai/models/agentscompletionrequest.py - src/mistralai/models/agentscompletionstreamrequest.py + - src/mistralai/models/apiendpoint.py - src/mistralai/models/archiveftmodelout.py - src/mistralai/models/assistantmessage.py - src/mistralai/models/basemodelcard.py + - src/mistralai/models/batcherror.py + - src/mistralai/models/batchjobin.py + - src/mistralai/models/batchjobout.py + - src/mistralai/models/batchjobsout.py + - src/mistralai/models/batchjobstatus.py + - src/mistralai/models/chatclassificationrequest.py - src/mistralai/models/chatcompletionchoice.py - src/mistralai/models/chatcompletionrequest.py - src/mistralai/models/chatcompletionresponse.py - src/mistralai/models/chatcompletionstreamrequest.py - src/mistralai/models/checkpointout.py + - src/mistralai/models/classificationobject.py + - src/mistralai/models/classificationrequest.py + - src/mistralai/models/classificationresponse.py - src/mistralai/models/completionchunk.py - src/mistralai/models/completionevent.py - src/mistralai/models/completionresponsestreamchoice.py @@ -235,7 +276,10 @@ generatedFiles: - src/mistralai/models/embeddingresponse.py - src/mistralai/models/embeddingresponsedata.py - src/mistralai/models/eventout.py + - src/mistralai/models/filepurpose.py - src/mistralai/models/files_api_routes_delete_fileop.py + - src/mistralai/models/files_api_routes_download_fileop.py + - src/mistralai/models/files_api_routes_list_filesop.py - src/mistralai/models/files_api_routes_retrieve_fileop.py - src/mistralai/models/files_api_routes_upload_fileop.py - src/mistralai/models/fileschema.py @@ -257,6 +301,9 @@ generatedFiles: - src/mistralai/models/jobin.py - src/mistralai/models/jobmetadataout.py - src/mistralai/models/jobout.py + - src/mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py + - src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py + - src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py - src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py - src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py - src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py @@ -370,12 +417,12 @@ examples: multipart/form-data: {"file": {}} responses: "200": - application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "sample_type": "pretrain", "source": "upload"} + application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "fine-tune", "sample_type": "pretrain", "source": "upload"} files_api_routes_list_files: speakeasy-default-files-api-routes-list-files: responses: "200": - application/json: {"data": [], "object": ""} + application/json: {"data": [], "object": "", "total": 768578} files_api_routes_retrieve_file: speakeasy-default-files-api-routes-retrieve-file: parameters: @@ -383,7 +430,7 @@ examples: file_id: "" responses: "200": - application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "sample_type": "pretrain", "source": "repository"} + application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "fine-tune", "sample_type": "pretrain", "source": "repository", "deleted": false} files_api_routes_delete_file: speakeasy-default-files-api-routes-delete-file: parameters: @@ -403,7 +450,7 @@ examples: application/json: {"model": "codestral-latest"} responses: "200": - application/json: {"id": "7ad642c1-fc6f-4e07-a41b-cdd89dc7fa50", "auto_start": true, "model": "open-mistral-nemo", "status": "QUEUED", "job_type": "", "created_at": 519028, "modified_at": 230313, "training_files": []} + application/json: {"id": "a621cf02-1cd9-4cf5-8403-315211a509a3", "auto_start": false, "model": "open-mistral-7b", "status": "FAILED", "job_type": "", "created_at": 550483, "modified_at": 906537, "training_files": ["74c2becc-3769-4177-b5e0-24985613de0e"]} jobs_api_routes_fine_tuning_get_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-get-fine-tuning-job: parameters: @@ -431,7 +478,7 @@ examples: chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: - application/json: {"model": "mistral-small-latest", "messages": [{"content": ""}]} + application/json: {"model": "mistral-small-latest", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} responses: "200": application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} @@ -439,7 +486,7 @@ examples: stream_chat: speakeasy-default-stream-chat: requestBody: - application/json: {"model": "mistral-small-latest", "messages": [{"content": []}]} + application/json: {"model": "mistral-small-latest", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} fim_completion_v1_fim_completions_post: speakeasy-default-fim-completion-v1-fim-completions-post: requestBody: @@ -455,7 +502,7 @@ examples: agents_completion_v1_agents_completions_post: speakeasy-default-agents-completion-v1-agents-completions-post: requestBody: - application/json: {"messages": [{"content": ""}], "agent_id": ""} + application/json: {"messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "agent_id": ""} responses: "200": application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} @@ -463,12 +510,61 @@ examples: stream_agents: speakeasy-default-stream-agents: requestBody: - application/json: {"messages": [{"content": []}], "agent_id": ""} + application/json: {"messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "agent_id": ""} embeddings_v1_embeddings_post: speakeasy-default-embeddings-v1-embeddings-post: requestBody: - application/json: {"input": "", "model": "Wrangler"} + application/json: {"input": ["Embed this sentence.", "As well as this one."], "model": "Wrangler"} responses: "200": application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "data": [{"object": "embedding", "embedding": [0.1, 0.2, 0.3], "index": 0}]} "422": {} + files_api_routes_download_file: + speakeasy-default-files-api-routes-download-file: + parameters: + path: + file_id: "" + jobs_api_routes_batch_get_batch_jobs: + speakeasy-default-jobs-api-routes-batch-get-batch-jobs: + responses: + "200": + application/json: {"total": 768578} + jobs_api_routes_batch_create_batch_job: + speakeasy-default-jobs-api-routes-batch-create-batch-job: + requestBody: + application/json: {"input_files": ["a621cf02-1cd9-4cf5-8403-315211a509a3"], "endpoint": "/v1/fim/completions", "model": "2"} + responses: + "200": + application/json: {"id": "", "input_files": ["8e774c2b-ecc3-4769-b177-5e024985613d", "0ee803d5-6a1d-4f94-836b-fd39494798bc"], "endpoint": "", "model": "Impala", "errors": [{"message": ""}, {"message": ""}, {"message": ""}], "status": "RUNNING", "created_at": 770370, "total_requests": 350586, "completed_requests": 95214, "succeeded_requests": 930830, "failed_requests": 617761} + jobs_api_routes_batch_get_batch_job: + speakeasy-default-jobs-api-routes-batch-get-batch-job: + parameters: + path: + job_id: "b888f774-3e7c-4135-a18c-6b985523c4bc" + responses: + "200": + application/json: {"id": "", "input_files": ["50f76228-1da8-44bc-b661-c8a99c6b71b6", "cd62b8f7-112a-4af0-bab4-e43b4cca3716", "620807aa-1f8c-4f05-ad89-d58ee381f6b4"], "endpoint": "", "model": "Golf", "errors": [{"message": ""}, {"message": ""}], "status": "SUCCESS", "created_at": 790898, "total_requests": 55097, "completed_requests": 578320, "succeeded_requests": 856562, "failed_requests": 328633} + jobs_api_routes_batch_cancel_batch_job: + speakeasy-default-jobs-api-routes-batch-cancel-batch-job: + parameters: + path: + job_id: "0f713502-9233-41c6-9ebd-c570b7edb496" + responses: + "200": + application/json: {"id": "", "input_files": ["50fbe4e3-e326-4135-8744-d82f3fd6b3c1", "eb45e247-ac10-4cdc-8311-2f7cc9241230", "4afaa0f8-4bd4-4945-9116-89d07a64aa72"], "endpoint": "", "model": "Alpine", "errors": [{"message": ""}, {"message": ""}], "status": "QUEUED", "created_at": 709109, "total_requests": 275794, "completed_requests": 158938, "succeeded_requests": 12381, "failed_requests": 11864} + moderations_v1_moderations_post: + speakeasy-default-moderations-v1-moderations-post: + requestBody: + application/json: {"input": [""]} + responses: + "200": + application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef"} + "422": {} + moderations_chat_v1_chat_moderations_post: + speakeasy-default-moderations-chat-v1-chat-moderations-post: + requestBody: + application/json: {"input": [[{"content": ""}, {"content": []}, {"content": ""}], []], "model": "V90"} + responses: + "200": + application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef"} + "422": {} diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 8fd69ab..5a4f1a0 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -11,8 +11,9 @@ generation: requestResponseComponentNamesFeb2024: true auth: oAuth2ClientCredentialsEnabled: true + oAuth2PasswordEnabled: false python: - version: 1.1.0 + version: 1.2.0 additionalDependencies: dev: pytest: ^8.2.2 @@ -23,8 +24,11 @@ python: description: Python Client SDK for the Mistral AI API. enumFormat: union envVarPrefix: MISTRAL + fixFlags: + responseRequiredSep2024: false flattenGlobalSecurity: true flattenRequests: true + flatteningOrder: parameters-first imports: option: openapi paths: @@ -34,7 +38,7 @@ python: shared: "" webhooks: "" inputModelSuffix: input - maxMethodParams: 4 + maxMethodParams: 15 methodArguments: infer-optional-args outputModelSuffix: output packageName: mistralai diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 44dff84..46a7d2e 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,58 +1,60 @@ -speakeasyVersion: 1.398.0 +speakeasyVersion: 1.434.4 sources: mistral-azure-source: sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:4e9539e6903e630aa69e48af190a24d3702f6038c7b7a92472c7942597c2a6f5 - sourceBlobDigest: sha256:3ace0709471c04a040c9763097fef0081d6c21a1be0b694dfe5991c045b76d18 + sourceRevisionDigest: sha256:8fda8235e30128cc8e1c4e1b828316551d03b584568789f262dc287b81d584ee + sourceBlobDigest: sha256:3c039e1f8a2230a86b0e1acec6224f6b8d6f181fb222b6b3b39d38b52075a8ec tags: - latest mistral-google-cloud-source: sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:3047ad3ff8797fded89618b375d1398d48924a3a5f9ea1000c4284a110567c43 - sourceBlobDigest: sha256:02bbcef310f965d7ad089fb46d57b39f45b47cbc8f5cf90f728db03e960bdbca + sourceRevisionDigest: sha256:b2ce8e0e63674ea7ccfa3a75ff231bb97a39748331bcc0a3629f29c158f5b31e + sourceBlobDigest: sha256:a895adbf903776492b28daa3dd8c624f509decbbfe9ca6cda6510a33226604be tags: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:8e2d62b2242960d958406ba266eda41a013c1459dbac67195f8e2662c04cd05f - sourceBlobDigest: sha256:9fbff48fe087e3b2f950b1cfa52b6a25143982741dc7e6750dd14d9c5bed4041 + sourceRevisionDigest: sha256:e658442ebfc83351cbb7873fb17b03f07ff9edebd8eddfce5577e2c5c7bfafce + sourceBlobDigest: sha256:559403eaaa97c021eaf0022adddb1066694d879a946c87057e942806d5a2a2a2 tags: - latest targets: mistralai-azure-sdk: source: mistral-azure-source sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:4e9539e6903e630aa69e48af190a24d3702f6038c7b7a92472c7942597c2a6f5 - sourceBlobDigest: sha256:3ace0709471c04a040c9763097fef0081d6c21a1be0b694dfe5991c045b76d18 + sourceRevisionDigest: sha256:8fda8235e30128cc8e1c4e1b828316551d03b584568789f262dc287b81d584ee + sourceBlobDigest: sha256:3c039e1f8a2230a86b0e1acec6224f6b8d6f181fb222b6b3b39d38b52075a8ec mistralai-gcp-sdk: source: mistral-google-cloud-source sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:3047ad3ff8797fded89618b375d1398d48924a3a5f9ea1000c4284a110567c43 - sourceBlobDigest: sha256:02bbcef310f965d7ad089fb46d57b39f45b47cbc8f5cf90f728db03e960bdbca + sourceRevisionDigest: sha256:b2ce8e0e63674ea7ccfa3a75ff231bb97a39748331bcc0a3629f29c158f5b31e + sourceBlobDigest: sha256:a895adbf903776492b28daa3dd8c624f509decbbfe9ca6cda6510a33226604be mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:8e2d62b2242960d958406ba266eda41a013c1459dbac67195f8e2662c04cd05f - sourceBlobDigest: sha256:9fbff48fe087e3b2f950b1cfa52b6a25143982741dc7e6750dd14d9c5bed4041 + sourceRevisionDigest: sha256:e658442ebfc83351cbb7873fb17b03f07ff9edebd8eddfce5577e2c5c7bfafce + sourceBlobDigest: sha256:559403eaaa97c021eaf0022adddb1066694d879a946c87057e942806d5a2a2a2 + codeSamplesNamespace: mistral-openapi-code-samples + codeSamplesRevisionDigest: sha256:e56faedc510d1c011d19e5fbbaa9d41917ffd6c22833b0795a61aa6da1cbca9b workflow: workflowVersion: 1.0.0 speakeasyVersion: latest sources: mistral-azure-source: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:main registry: - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:main mistral-google-cloud-source: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:main registry: - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:main mistral-openapi: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:main registry: - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:main targets: mistralai-azure-sdk: target: python @@ -61,6 +63,10 @@ workflow: publish: pypi: token: $pypi_token + codeSamples: + registry: + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure-code-samples + blocking: false mistralai-gcp-sdk: target: python source: mistral-google-cloud-source @@ -68,9 +74,17 @@ workflow: publish: pypi: token: $pypi_token + codeSamples: + registry: + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud-code-samples + blocking: false mistralai-sdk: target: python source: mistral-openapi publish: pypi: token: $pypi_token + codeSamples: + registry: + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-code-samples + blocking: false diff --git a/.speakeasy/workflow.yaml b/.speakeasy/workflow.yaml index 6ef130a..164d399 100644 --- a/.speakeasy/workflow.yaml +++ b/.speakeasy/workflow.yaml @@ -24,6 +24,10 @@ targets: publish: pypi: token: $pypi_token + codeSamples: + registry: + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure-code-samples + blocking: false mistralai-gcp-sdk: target: python source: mistral-google-cloud-source @@ -31,9 +35,17 @@ targets: publish: pypi: token: $pypi_token + codeSamples: + registry: + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud-code-samples + blocking: false mistralai-sdk: target: python source: mistral-openapi publish: pypi: token: $pypi_token + codeSamples: + registry: + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-code-samples + blocking: false diff --git a/README.md b/README.md index 0c63e5e..a73c133 100644 --- a/README.md +++ b/README.md @@ -180,7 +180,8 @@ s = Mistral( res = s.agents.complete(messages=[ { - "content": "", + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", }, ], agent_id="") @@ -212,6 +213,53 @@ async def main(): # handle response pass +asyncio.run(main()) +``` + +### Create Embedding Request + +This example shows how to create embedding request. + +```python +# Synchronous Example +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + +res = s.embeddings.create(inputs=[ + "Embed this sentence.", + "As well as this one.", +], model="Wrangler") + +if res is not None: + # handle response + pass +``` + +
+ +The same SDK client can also be used to make asychronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +from mistralai import Mistral +import os + +async def main(): + s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), + ) + res = await s.embeddings.create_async(inputs=[ + "Embed this sentence.", + "As well as this one.", + ], model="Wrangler") + if res is not None: + # handle response + pass + asyncio.run(main()) ``` @@ -322,11 +370,26 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [complete](docs/sdks/agents/README.md#complete) - Agents Completion * [stream](docs/sdks/agents/README.md#stream) - Stream Agents completion +### [batch](docs/sdks/batch/README.md) + + +#### [batch.jobs](docs/sdks/mistraljobs/README.md) + +* [list](docs/sdks/mistraljobs/README.md#list) - Get Batch Jobs +* [create](docs/sdks/mistraljobs/README.md#create) - Create Batch Job +* [get](docs/sdks/mistraljobs/README.md#get) - Get Batch Job +* [cancel](docs/sdks/mistraljobs/README.md#cancel) - Cancel Batch Job + ### [chat](docs/sdks/chat/README.md) * [complete](docs/sdks/chat/README.md#complete) - Chat Completion * [stream](docs/sdks/chat/README.md#stream) - Stream chat completion +### [classifiers](docs/sdks/classifiers/README.md) + +* [moderate](docs/sdks/classifiers/README.md#moderate) - Moderations +* [moderate_chat](docs/sdks/classifiers/README.md#moderate_chat) - Moderations Chat + ### [embeddings](docs/sdks/embeddings/README.md) * [create](docs/sdks/embeddings/README.md#create) - Embeddings @@ -337,6 +400,7 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [list](docs/sdks/files/README.md#list) - List Files * [retrieve](docs/sdks/files/README.md#retrieve) - Retrieve File * [delete](docs/sdks/files/README.md#delete) - Delete File +* [download](docs/sdks/files/README.md#download) - Download File ### [fim](docs/sdks/fim/README.md) @@ -479,12 +543,23 @@ if res is not None: ## Error Handling -Handling errors in this SDK should largely match your expectations. All operations return a response object or raise an error. If Error objects are specified in your OpenAPI Spec, the SDK will raise the appropriate Error type. +Handling errors in this SDK should largely match your expectations. All operations return a response object or raise an exception. + +By default, an API error will raise a models.SDKError exception, which has the following properties: + +| Property | Type | Description | +|-----------------|------------------|-----------------------| +| `.status_code` | *int* | The HTTP status code | +| `.message` | *str* | The error message | +| `.raw_response` | *httpx.Response* | The raw HTTP response | +| `.body` | *str* | The response content | + +When custom error responses are specified for an operation, the SDK may also raise their associated exceptions. You can refer to respective *Errors* tables in SDK docs for more details on possible exception types for each operation. For example, the `list_async` method may raise the following exceptions: -| Error Object | Status Code | Content Type | -| -------------------------- | ----------- | ---------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4xx-5xx | */* | +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | ### Example @@ -520,9 +595,9 @@ except models.SDKError as e: You can override the default server globally by passing a server name to the `server: str` optional parameter when initializing the SDK client instance. The selected server will then be used as the default on the operations that use it. This table lists the names associated with the available servers: -| Name | Server | Variables | -| ------ | ------------------------ | --------- | -| `prod` | `https://api.mistral.ai` | None | +| Name | Server | Variables | +| ----- | ------ | --------- | +| `eu` | `https://api.mistral.ai` | None | #### Example @@ -531,7 +606,7 @@ from mistralai import Mistral import os s = Mistral( - server="prod", + server="eu", api_key=os.getenv("MISTRAL_API_KEY", ""), ) @@ -653,9 +728,9 @@ s = Mistral(async_client=CustomClient(httpx.AsyncClient())) This SDK supports the following security scheme globally: -| Name | Type | Scheme | Environment Variable | -| --------- | ---- | ----------- | -------------------- | -| `api_key` | http | HTTP Bearer | `MISTRAL_API_KEY` | +| Name | Type | Scheme | Environment Variable | +| -------------------- | -------------------- | -------------------- | -------------------- | +| `api_key` | http | HTTP Bearer | `MISTRAL_API_KEY` | To authenticate with the API the `api_key` parameter must be set when initializing the SDK client instance. For example: ```python diff --git a/RELEASES.md b/RELEASES.md index fee21df..b504c7f 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -48,4 +48,14 @@ Based on: ### Generated - [python v1.1.0] . ### Releases -- [PyPI v1.1.0] https://pypi.org/project/mistralai/1.1.0 - . \ No newline at end of file +- [PyPI v1.1.0] https://pypi.org/project/mistralai/1.1.0 - . + +## 2024-11-07 19:52:56 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.434.3 (2.452.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.2.0] . +### Releases +- [PyPI v1.2.0] https://pypi.org/project/mistralai/1.2.0 - . \ No newline at end of file diff --git a/USAGE.md b/USAGE.md index 6a6c46b..7d9d2ce 100644 --- a/USAGE.md +++ b/USAGE.md @@ -13,7 +13,10 @@ s = Mistral( ) res = s.chat.complete(model="mistral-small-latest", messages=[ - + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, ]) if res is not None: @@ -35,7 +38,10 @@ async def main(): api_key=os.getenv("MISTRAL_API_KEY", ""), ) res = await s.chat.complete_async(model="mistral-small-latest", messages=[ - + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, ]) if res is not None: # handle response @@ -106,7 +112,8 @@ s = Mistral( res = s.agents.complete(messages=[ { - "content": "", + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", }, ], agent_id="") @@ -130,13 +137,61 @@ async def main(): ) res = await s.agents.complete_async(messages=[ { - "content": "", + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", }, ], agent_id="") if res is not None: # handle response pass +asyncio.run(main()) +``` + +### Create Embedding Request + +This example shows how to create embedding request. + +```python +# Synchronous Example +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + +res = s.embeddings.create(inputs=[ + "Embed this sentence.", + "As well as this one.", +], model="Wrangler") + +if res is not None: + # handle response + pass +``` + +
+ +The same SDK client can also be used to make asychronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +from mistralai import Mistral +import os + +async def main(): + s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), + ) + res = await s.embeddings.create_async(inputs=[ + "Embed this sentence.", + "As well as this one.", + ], model="Wrangler") + if res is not None: + # handle response + pass + asyncio.run(main()) ``` \ No newline at end of file diff --git a/docs/models/agentscompletionrequest.md b/docs/models/agentscompletionrequest.md index 7f6c428..c4259f2 100644 --- a/docs/models/agentscompletionrequest.md +++ b/docs/models/agentscompletionrequest.md @@ -8,10 +8,12 @@ | `messages` | List[[models.AgentsCompletionRequestMessages](../models/agentscompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | | `stop` | [Optional[models.AgentsCompletionRequestStop]](../models/agentscompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.AgentsCompletionRequestToolChoice]](../models/agentscompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `tool_choice` | [Optional[models.AgentsCompletionRequestToolChoice]](../models/agentscompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | \ No newline at end of file diff --git a/docs/models/agentscompletionrequestmessages.md b/docs/models/agentscompletionrequestmessages.md index 946ef46..d6a1e69 100644 --- a/docs/models/agentscompletionrequestmessages.md +++ b/docs/models/agentscompletionrequestmessages.md @@ -9,6 +9,12 @@ value: models.AssistantMessage = /* values here */ ``` +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + ### `models.ToolMessage` ```python diff --git a/docs/models/agentscompletionstreamrequest.md b/docs/models/agentscompletionstreamrequest.md index d849a95..21e19b5 100644 --- a/docs/models/agentscompletionstreamrequest.md +++ b/docs/models/agentscompletionstreamrequest.md @@ -3,15 +3,17 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `messages` | List[[models.AgentsCompletionStreamRequestMessages](../models/agentscompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.AgentsCompletionStreamRequestMessages](../models/agentscompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | \ No newline at end of file diff --git a/docs/models/agentscompletionstreamrequestmessages.md b/docs/models/agentscompletionstreamrequestmessages.md index d8cf99e..1bc736a 100644 --- a/docs/models/agentscompletionstreamrequestmessages.md +++ b/docs/models/agentscompletionstreamrequestmessages.md @@ -9,6 +9,12 @@ value: models.AssistantMessage = /* values here */ ``` +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + ### `models.ToolMessage` ```python diff --git a/docs/models/apiendpoint.md b/docs/models/apiendpoint.md new file mode 100644 index 0000000..5dfa68a --- /dev/null +++ b/docs/models/apiendpoint.md @@ -0,0 +1,11 @@ +# APIEndpoint + + +## Values + +| Name | Value | +| -------------------------- | -------------------------- | +| `ROOT_V1_CHAT_COMPLETIONS` | /v1/chat/completions | +| `ROOT_V1_EMBEDDINGS` | /v1/embeddings | +| `ROOT_V1_FIM_COMPLETIONS` | /v1/fim/completions | +| `ROOT_V1_MODERATIONS` | /v1/moderations | \ No newline at end of file diff --git a/docs/models/assistantmessage.md b/docs/models/assistantmessage.md index 0c36cde..53f1cc7 100644 --- a/docs/models/assistantmessage.md +++ b/docs/models/assistantmessage.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `content` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | -| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | -| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/assistantmessagecontent.md b/docs/models/assistantmessagecontent.md new file mode 100644 index 0000000..047b7cf --- /dev/null +++ b/docs/models/assistantmessagecontent.md @@ -0,0 +1,17 @@ +# AssistantMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.ContentChunk]` + +```python +value: List[models.ContentChunk] = /* values here */ +``` + diff --git a/docs/models/basemodelcard.md b/docs/models/basemodelcard.md index 1c10ae3..0bdbb65 100644 --- a/docs/models/basemodelcard.md +++ b/docs/models/basemodelcard.md @@ -15,4 +15,5 @@ | `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | | `aliases` | List[*str*] | :heavy_minus_sign: | N/A | | `deprecation` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `default_model_temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.Type]](../models/type.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/batcherror.md b/docs/models/batcherror.md new file mode 100644 index 0000000..95016cd --- /dev/null +++ b/docs/models/batcherror.md @@ -0,0 +1,9 @@ +# BatchError + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `message` | *str* | :heavy_check_mark: | N/A | +| `count` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/batchjobin.md b/docs/models/batchjobin.md new file mode 100644 index 0000000..5203a52 --- /dev/null +++ b/docs/models/batchjobin.md @@ -0,0 +1,12 @@ +# BatchJobIn + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | +| `input_files` | List[*str*] | :heavy_check_mark: | N/A | +| `endpoint` | [models.APIEndpoint](../models/apiendpoint.md) | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `metadata` | Dict[str, *str*] | :heavy_minus_sign: | N/A | +| `timeout_hours` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/batchjobout.md b/docs/models/batchjobout.md new file mode 100644 index 0000000..d79d9a2 --- /dev/null +++ b/docs/models/batchjobout.md @@ -0,0 +1,24 @@ +# BatchJobOut + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `input_files` | List[*str*] | :heavy_check_mark: | N/A | +| `endpoint` | *str* | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `errors` | List[[models.BatchError](../models/batcherror.md)] | :heavy_check_mark: | N/A | +| `status` | [models.BatchJobStatus](../models/batchjobstatus.md) | :heavy_check_mark: | N/A | +| `created_at` | *int* | :heavy_check_mark: | N/A | +| `total_requests` | *int* | :heavy_check_mark: | N/A | +| `completed_requests` | *int* | :heavy_check_mark: | N/A | +| `succeeded_requests` | *int* | :heavy_check_mark: | N/A | +| `failed_requests` | *int* | :heavy_check_mark: | N/A | +| `object` | [Optional[models.BatchJobOutObject]](../models/batchjoboutobject.md) | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `output_file` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `error_file` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `started_at` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `completed_at` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/batchjoboutobject.md b/docs/models/batchjoboutobject.md new file mode 100644 index 0000000..64ae896 --- /dev/null +++ b/docs/models/batchjoboutobject.md @@ -0,0 +1,8 @@ +# BatchJobOutObject + + +## Values + +| Name | Value | +| ------- | ------- | +| `BATCH` | batch | \ No newline at end of file diff --git a/docs/models/batchjobsout.md b/docs/models/batchjobsout.md new file mode 100644 index 0000000..3104118 --- /dev/null +++ b/docs/models/batchjobsout.md @@ -0,0 +1,10 @@ +# BatchJobsOut + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `total` | *int* | :heavy_check_mark: | N/A | +| `data` | List[[models.BatchJobOut](../models/batchjobout.md)] | :heavy_minus_sign: | N/A | +| `object` | [Optional[models.BatchJobsOutObject]](../models/batchjobsoutobject.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/batchjobsoutobject.md b/docs/models/batchjobsoutobject.md new file mode 100644 index 0000000..d4bf9f6 --- /dev/null +++ b/docs/models/batchjobsoutobject.md @@ -0,0 +1,8 @@ +# BatchJobsOutObject + + +## Values + +| Name | Value | +| ------ | ------ | +| `LIST` | list | \ No newline at end of file diff --git a/docs/models/batchjobstatus.md b/docs/models/batchjobstatus.md new file mode 100644 index 0000000..64617b3 --- /dev/null +++ b/docs/models/batchjobstatus.md @@ -0,0 +1,14 @@ +# BatchJobStatus + + +## Values + +| Name | Value | +| ------------------------ | ------------------------ | +| `QUEUED` | QUEUED | +| `RUNNING` | RUNNING | +| `SUCCESS` | SUCCESS | +| `FAILED` | FAILED | +| `TIMEOUT_EXCEEDED` | TIMEOUT_EXCEEDED | +| `CANCELLATION_REQUESTED` | CANCELLATION_REQUESTED | +| `CANCELLED` | CANCELLED | \ No newline at end of file diff --git a/docs/models/chatclassificationrequest.md b/docs/models/chatclassificationrequest.md new file mode 100644 index 0000000..990408b --- /dev/null +++ b/docs/models/chatclassificationrequest.md @@ -0,0 +1,9 @@ +# ChatClassificationRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| `inputs` | [models.ChatClassificationRequestInputs](../models/chatclassificationrequestinputs.md) | :heavy_check_mark: | Chat to classify | +| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/chatclassificationrequestinputs.md b/docs/models/chatclassificationrequestinputs.md new file mode 100644 index 0000000..290c9ad --- /dev/null +++ b/docs/models/chatclassificationrequestinputs.md @@ -0,0 +1,19 @@ +# ChatClassificationRequestInputs + +Chat to classify + + +## Supported Types + +### `List[models.One]` + +```python +value: List[models.One] = /* values here */ +``` + +### `List[List[models.Two]]` + +```python +value: List[List[models.Two]] = /* values here */ +``` + diff --git a/docs/models/chatcompletionrequest.md b/docs/models/chatcompletionrequest.md index 8419760..d458081 100644 --- a/docs/models/chatcompletionrequest.md +++ b/docs/models/chatcompletionrequest.md @@ -3,18 +3,20 @@ ## Fields -| Field | Type | Required | Description | Example | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/docs/models/chatcompletionstreamrequest.md b/docs/models/chatcompletionstreamrequest.md index fd1fc48..63865c1 100644 --- a/docs/models/chatcompletionstreamrequest.md +++ b/docs/models/chatcompletionstreamrequest.md @@ -3,18 +3,20 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.ChatCompletionStreamRequestMessages](../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `messages` | List[[models.ChatCompletionStreamRequestMessages](../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/docs/models/classificationobject.md b/docs/models/classificationobject.md new file mode 100644 index 0000000..68f2e2b --- /dev/null +++ b/docs/models/classificationobject.md @@ -0,0 +1,9 @@ +# ClassificationObject + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------- | ----------------------------- | ----------------------------- | ----------------------------- | +| `categories` | Dict[str, *bool*] | :heavy_minus_sign: | Classifier result thresholded | +| `category_scores` | Dict[str, *float*] | :heavy_minus_sign: | Classifier result | \ No newline at end of file diff --git a/docs/models/classificationrequest.md b/docs/models/classificationrequest.md new file mode 100644 index 0000000..e155668 --- /dev/null +++ b/docs/models/classificationrequest.md @@ -0,0 +1,9 @@ +# ClassificationRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `inputs` | [models.ClassificationRequestInputs](../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/classificationrequestinputs.md b/docs/models/classificationrequestinputs.md new file mode 100644 index 0000000..69d75d1 --- /dev/null +++ b/docs/models/classificationrequestinputs.md @@ -0,0 +1,19 @@ +# ClassificationRequestInputs + +Text to classify. + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/docs/models/classificationresponse.md b/docs/models/classificationresponse.md new file mode 100644 index 0000000..4765ff6 --- /dev/null +++ b/docs/models/classificationresponse.md @@ -0,0 +1,10 @@ +# ClassificationResponse + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | mod-e5cc70bb28c444948073e77776eb30ef | +| `model` | *Optional[str]* | :heavy_minus_sign: | N/A | | +| `results` | List[[models.ClassificationObject](../models/classificationobject.md)] | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/content.md b/docs/models/content.md index 4cd3cfd..a833dc2 100644 --- a/docs/models/content.md +++ b/docs/models/content.md @@ -9,9 +9,9 @@ value: str = /* values here */ ``` -### `List[models.TextChunk]` +### `List[models.ContentChunk]` ```python -value: List[models.TextChunk] = /* values here */ +value: List[models.ContentChunk] = /* values here */ ``` diff --git a/docs/models/deltamessage.md b/docs/models/deltamessage.md index d32f8e1..61deabb 100644 --- a/docs/models/deltamessage.md +++ b/docs/models/deltamessage.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | -| `role` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `content` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | +| `role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `content` | [OptionalNullable[models.Content]](../models/content.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/embeddingrequest.md b/docs/models/embeddingrequest.md index 584a8be..4d215c7 100644 --- a/docs/models/embeddingrequest.md +++ b/docs/models/embeddingrequest.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| --------------------------------------- | --------------------------------------- | --------------------------------------- | --------------------------------------- | -| `inputs` | [models.Inputs](../models/inputs.md) | :heavy_check_mark: | Text to embed. | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. | -| `encoding_format` | *OptionalNullable[str]* | :heavy_minus_sign: | The format to return the embeddings in. | \ No newline at end of file +| Field | Type | Required | Description | Example | +| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | +| `inputs` | [models.Inputs](../models/inputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | +| `model` | *Optional[str]* | :heavy_minus_sign: | ID of the model to use. | | +| `encoding_format` | *OptionalNullable[str]* | :heavy_minus_sign: | The format to return the embeddings in. | | \ No newline at end of file diff --git a/docs/models/filesapiroutesuploadfilepurpose.md b/docs/models/filepurpose.md similarity index 50% rename from docs/models/filesapiroutesuploadfilepurpose.md rename to docs/models/filepurpose.md index 164af61..5152aee 100644 --- a/docs/models/filesapiroutesuploadfilepurpose.md +++ b/docs/models/filepurpose.md @@ -1,8 +1,9 @@ -# FilesAPIRoutesUploadFilePurpose +# FilePurpose ## Values | Name | Value | | ----------- | ----------- | -| `FINE_TUNE` | fine-tune | \ No newline at end of file +| `FINE_TUNE` | fine-tune | +| `BATCH` | batch | \ No newline at end of file diff --git a/docs/models/filesapiroutesdownloadfilerequest.md b/docs/models/filesapiroutesdownloadfilerequest.md new file mode 100644 index 0000000..8b28cb0 --- /dev/null +++ b/docs/models/filesapiroutesdownloadfilerequest.md @@ -0,0 +1,8 @@ +# FilesAPIRoutesDownloadFileRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `file_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/filesapirouteslistfilesrequest.md b/docs/models/filesapirouteslistfilesrequest.md new file mode 100644 index 0000000..b28ab3f --- /dev/null +++ b/docs/models/filesapirouteslistfilesrequest.md @@ -0,0 +1,13 @@ +# FilesAPIRoutesListFilesRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `sample_type` | List[[models.SampleType](../models/sampletype.md)] | :heavy_minus_sign: | N/A | +| `source` | List[[models.Source](../models/source.md)] | :heavy_minus_sign: | N/A | +| `search` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `purpose` | [OptionalNullable[models.FilePurpose]](../models/filepurpose.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/filesapiroutesuploadfilemultipartbodyparams.md b/docs/models/filesapiroutesuploadfilemultipartbodyparams.md index 2472dcc..41631b2 100644 --- a/docs/models/filesapiroutesuploadfilemultipartbodyparams.md +++ b/docs/models/filesapiroutesuploadfilemultipartbodyparams.md @@ -6,4 +6,4 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `file` | [models.File](../models/file.md) | :heavy_check_mark: | The File object (not file name) to be uploaded.
To upload a file and specify a custom file name you should format your request as such:
```bash
file=@path/to/your/file.jsonl;filename=custom_name.jsonl
```
Otherwise, you can just keep the original file name:
```bash
file=@path/to/your/file.jsonl
``` | -| `purpose` | [Optional[models.FilesAPIRoutesUploadFilePurpose]](../models/filesapiroutesuploadfilepurpose.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `purpose` | [Optional[models.FilePurpose]](../models/filepurpose.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/fileschema.md b/docs/models/fileschema.md index a877bee..47fa486 100644 --- a/docs/models/fileschema.md +++ b/docs/models/fileschema.md @@ -3,14 +3,14 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | -| `id` | *str* | :heavy_check_mark: | The unique identifier of the file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | -| `object` | *str* | :heavy_check_mark: | The object type, which is always "file". | file | -| `bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | -| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | 1716963433 | -| `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | -| `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | -| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | -| `purpose` | [models.FileSchemaPurpose](../models/fileschemapurpose.md) | :heavy_check_mark: | The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now. | fine-tune | -| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | The unique identifier of the file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | +| `object` | *str* | :heavy_check_mark: | The object type, which is always "file". | file | +| `bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | 1716963433 | +| `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | +| `purpose` | [models.FilePurpose](../models/filepurpose.md) | :heavy_check_mark: | N/A | | +| `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | +| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | +| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/fileschemapurpose.md b/docs/models/fileschemapurpose.md deleted file mode 100644 index b7ba511..0000000 --- a/docs/models/fileschemapurpose.md +++ /dev/null @@ -1,10 +0,0 @@ -# FileSchemaPurpose - -The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now. - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `FINE_TUNE` | fine-tune | \ No newline at end of file diff --git a/docs/models/fimcompletionrequest.md b/docs/models/fimcompletionrequest.md index b4b024e..236d2d2 100644 --- a/docs/models/fimcompletionrequest.md +++ b/docs/models/fimcompletionrequest.md @@ -3,15 +3,15 @@ ## Fields -| Field | Type | Required | Description | Example | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | -| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.FIMCompletionRequestStop]](../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.FIMCompletionRequestStop]](../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | \ No newline at end of file diff --git a/docs/models/fimcompletionstreamrequest.md b/docs/models/fimcompletionstreamrequest.md index acffb53..fa63593 100644 --- a/docs/models/fimcompletionstreamrequest.md +++ b/docs/models/fimcompletionstreamrequest.md @@ -3,15 +3,15 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | -| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | \ No newline at end of file diff --git a/docs/models/ftmodelcard.md b/docs/models/ftmodelcard.md index fc633c0..1efeadb 100644 --- a/docs/models/ftmodelcard.md +++ b/docs/models/ftmodelcard.md @@ -19,5 +19,6 @@ Extra fields for fine-tuned models. | `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | | `aliases` | List[*str*] | :heavy_minus_sign: | N/A | | `deprecation` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `default_model_temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.FTModelCardType]](../models/ftmodelcardtype.md) | :heavy_minus_sign: | N/A | | `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/ftmodelcardtype.md b/docs/models/ftmodelcardtype.md new file mode 100644 index 0000000..0b38470 --- /dev/null +++ b/docs/models/ftmodelcardtype.md @@ -0,0 +1,8 @@ +# FTModelCardType + + +## Values + +| Name | Value | +| ------------ | ------------ | +| `FINE_TUNED` | fine-tuned | \ No newline at end of file diff --git a/docs/models/httpvalidationerror.md b/docs/models/httpvalidationerror.md index 6389243..712a148 100644 --- a/docs/models/httpvalidationerror.md +++ b/docs/models/httpvalidationerror.md @@ -1,7 +1,5 @@ # HTTPValidationError -Validation Error - ## Fields diff --git a/docs/models/jobsapiroutesbatchcancelbatchjobrequest.md b/docs/models/jobsapiroutesbatchcancelbatchjobrequest.md new file mode 100644 index 0000000..c19d024 --- /dev/null +++ b/docs/models/jobsapiroutesbatchcancelbatchjobrequest.md @@ -0,0 +1,8 @@ +# JobsAPIRoutesBatchCancelBatchJobRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `job_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/jobsapiroutesbatchgetbatchjobrequest.md b/docs/models/jobsapiroutesbatchgetbatchjobrequest.md new file mode 100644 index 0000000..3930aac --- /dev/null +++ b/docs/models/jobsapiroutesbatchgetbatchjobrequest.md @@ -0,0 +1,8 @@ +# JobsAPIRoutesBatchGetBatchJobRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `job_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md b/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md new file mode 100644 index 0000000..93de090 --- /dev/null +++ b/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md @@ -0,0 +1,14 @@ +# JobsAPIRoutesBatchGetBatchJobsRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `status` | [OptionalNullable[models.BatchJobStatus]](../models/batchjobstatus.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/listfilesout.md b/docs/models/listfilesout.md index 3694739..ee544c1 100644 --- a/docs/models/listfilesout.md +++ b/docs/models/listfilesout.md @@ -6,4 +6,5 @@ | Field | Type | Required | Description | | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | | `data` | List[[models.FileSchema](../models/fileschema.md)] | :heavy_check_mark: | N/A | -| `object` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| `object` | *str* | :heavy_check_mark: | N/A | +| `total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/one.md b/docs/models/one.md new file mode 100644 index 0000000..3de496a --- /dev/null +++ b/docs/models/one.md @@ -0,0 +1,29 @@ +# One + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/docs/models/purpose.md b/docs/models/purpose.md deleted file mode 100644 index 6c795b9..0000000 --- a/docs/models/purpose.md +++ /dev/null @@ -1,10 +0,0 @@ -# Purpose - -The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now. - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `FINE_TUNE` | fine-tune | \ No newline at end of file diff --git a/docs/models/retrievefileout.md b/docs/models/retrievefileout.md index 0231177..93aa502 100644 --- a/docs/models/retrievefileout.md +++ b/docs/models/retrievefileout.md @@ -3,14 +3,15 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | -| `id` | *str* | :heavy_check_mark: | The unique identifier of the file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | -| `object` | *str* | :heavy_check_mark: | The object type, which is always "file". | file | -| `bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | -| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | 1716963433 | -| `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | -| `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | -| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | -| `purpose` | [models.RetrieveFileOutPurpose](../models/retrievefileoutpurpose.md) | :heavy_check_mark: | The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now. | fine-tune | -| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | The unique identifier of the file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | +| `object` | *str* | :heavy_check_mark: | The object type, which is always "file". | file | +| `bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | 1716963433 | +| `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | +| `purpose` | [models.FilePurpose](../models/filepurpose.md) | :heavy_check_mark: | N/A | | +| `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | +| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | +| `deleted` | *bool* | :heavy_check_mark: | N/A | | +| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/retrievefileoutpurpose.md b/docs/models/retrievefileoutpurpose.md deleted file mode 100644 index 8b1df1a..0000000 --- a/docs/models/retrievefileoutpurpose.md +++ /dev/null @@ -1,10 +0,0 @@ -# RetrieveFileOutPurpose - -The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now. - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `FINE_TUNE` | fine-tune | \ No newline at end of file diff --git a/docs/models/sampletype.md b/docs/models/sampletype.md index 888fd63..34a6a01 100644 --- a/docs/models/sampletype.md +++ b/docs/models/sampletype.md @@ -3,7 +3,10 @@ ## Values -| Name | Value | -| ---------- | ---------- | -| `PRETRAIN` | pretrain | -| `INSTRUCT` | instruct | \ No newline at end of file +| Name | Value | +| --------------- | --------------- | +| `PRETRAIN` | pretrain | +| `INSTRUCT` | instruct | +| `BATCH_REQUEST` | batch_request | +| `BATCH_RESULT` | batch_result | +| `BATCH_ERROR` | batch_error | \ No newline at end of file diff --git a/docs/models/source.md b/docs/models/source.md index ef05562..bb1ed61 100644 --- a/docs/models/source.md +++ b/docs/models/source.md @@ -6,4 +6,5 @@ | Name | Value | | ------------ | ------------ | | `UPLOAD` | upload | -| `REPOSITORY` | repository | \ No newline at end of file +| `REPOSITORY` | repository | +| `MISTRAL` | mistral | \ No newline at end of file diff --git a/docs/models/systemmessage.md b/docs/models/systemmessage.md index 7f82798..0dba71c 100644 --- a/docs/models/systemmessage.md +++ b/docs/models/systemmessage.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | -| `content` | [models.Content](../models/content.md) | :heavy_check_mark: | N/A | -| `role` | [Optional[models.Role]](../models/role.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `content` | [models.SystemMessageContent](../models/systemmessagecontent.md) | :heavy_check_mark: | N/A | +| `role` | [Optional[models.Role]](../models/role.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/systemmessagecontent.md b/docs/models/systemmessagecontent.md new file mode 100644 index 0000000..e0d27d9 --- /dev/null +++ b/docs/models/systemmessagecontent.md @@ -0,0 +1,17 @@ +# SystemMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.TextChunk]` + +```python +value: List[models.TextChunk] = /* values here */ +``` + diff --git a/docs/models/trainingparameters.md b/docs/models/trainingparameters.md index 0a47b61..e56df8e 100644 --- a/docs/models/trainingparameters.md +++ b/docs/models/trainingparameters.md @@ -10,4 +10,5 @@ | `weight_decay` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | `warmup_fraction` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `fim_ratio` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `fim_ratio` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `seq_len` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/trainingparametersin.md b/docs/models/trainingparametersin.md index 34918ce..64c31a4 100644 --- a/docs/models/trainingparametersin.md +++ b/docs/models/trainingparametersin.md @@ -12,4 +12,5 @@ The fine-tuning hyperparameter settings used in a fine-tune job. | `weight_decay` | *OptionalNullable[float]* | :heavy_minus_sign: | (Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large. | | `warmup_fraction` | *OptionalNullable[float]* | :heavy_minus_sign: | (Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune) | | `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `fim_ratio` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `fim_ratio` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `seq_len` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/two.md b/docs/models/two.md new file mode 100644 index 0000000..59dc2be --- /dev/null +++ b/docs/models/two.md @@ -0,0 +1,29 @@ +# Two + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/docs/models/type.md b/docs/models/type.md index 342c8c7..239a00f 100644 --- a/docs/models/type.md +++ b/docs/models/type.md @@ -3,6 +3,6 @@ ## Values -| Name | Value | -| ------- | ------- | -| `WANDB` | wandb | \ No newline at end of file +| Name | Value | +| ------ | ------ | +| `BASE` | base | \ No newline at end of file diff --git a/docs/models/uploadfileout.md b/docs/models/uploadfileout.md index 7a695ba..c997443 100644 --- a/docs/models/uploadfileout.md +++ b/docs/models/uploadfileout.md @@ -3,14 +3,14 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | -| `id` | *str* | :heavy_check_mark: | The unique identifier of the file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | -| `object` | *str* | :heavy_check_mark: | The object type, which is always "file". | file | -| `bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | -| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | 1716963433 | -| `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | -| `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | -| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | -| `purpose` | [models.Purpose](../models/purpose.md) | :heavy_check_mark: | The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now. | fine-tune | -| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | The unique identifier of the file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | +| `object` | *str* | :heavy_check_mark: | The object type, which is always "file". | file | +| `bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | 1716963433 | +| `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | +| `purpose` | [models.FilePurpose](../models/filepurpose.md) | :heavy_check_mark: | N/A | | +| `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | +| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | +| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/usermessage.md b/docs/models/usermessage.md index 3d96f1c..63b0131 100644 --- a/docs/models/usermessage.md +++ b/docs/models/usermessage.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | -| `content` | [models.UserMessageContent](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | -| `role` | [Optional[models.UserMessageRole]](../models/usermessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `content` | [Nullable[models.UserMessageContent]](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | +| `role` | [Optional[models.UserMessageRole]](../models/usermessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/wandbintegrationout.md b/docs/models/wandbintegrationout.md index e7616fc..b9a3a86 100644 --- a/docs/models/wandbintegrationout.md +++ b/docs/models/wandbintegrationout.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | -| `project` | *str* | :heavy_check_mark: | The name of the project that the new run will be created under. | -| `type` | [Optional[models.Type]](../models/type.md) | :heavy_minus_sign: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | A display name to set for the run. If not set, will use the job ID as the name. | -| `run_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `project` | *str* | :heavy_check_mark: | The name of the project that the new run will be created under. | +| `type` | [Optional[models.WandbIntegrationOutType]](../models/wandbintegrationouttype.md) | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | A display name to set for the run. If not set, will use the job ID as the name. | +| `run_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/wandbintegrationouttype.md b/docs/models/wandbintegrationouttype.md new file mode 100644 index 0000000..5a7533c --- /dev/null +++ b/docs/models/wandbintegrationouttype.md @@ -0,0 +1,8 @@ +# WandbIntegrationOutType + + +## Values + +| Name | Value | +| ------- | ------- | +| `WANDB` | wandb | \ No newline at end of file diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md index 279a13f..3eb946a 100644 --- a/docs/sdks/agents/README.md +++ b/docs/sdks/agents/README.md @@ -26,7 +26,8 @@ s = Mistral( res = s.agents.complete(messages=[ { - "content": "", + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", }, ], agent_id="") @@ -43,13 +44,15 @@ if res is not None: | `messages` | List[[models.AgentsCompletionRequestMessages](../../models/agentscompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | | `stop` | [Optional[models.AgentsCompletionRequestStop]](../../models/agentscompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | | `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | | `tool_choice` | [Optional[models.AgentsCompletionRequestToolChoice]](../../models/agentscompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -58,11 +61,10 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | | models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4xx-5xx | */* | - +| models.SDKError | 4XX, 5XX | \*/\* | ## stream @@ -80,13 +82,8 @@ s = Mistral( res = s.agents.stream(messages=[ { - "content": [ - { - "image_url": { - "url": "http://possible-veal.org", - }, - }, - ], + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", }, ], agent_id="") @@ -99,19 +96,21 @@ if res is not None: ### Parameters -| Parameter | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `messages` | List[[models.AgentsCompletionStreamRequestMessages](../../models/agentscompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | +| Parameter | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.AgentsCompletionStreamRequestMessages](../../models/agentscompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -119,7 +118,7 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | | models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4xx-5xx | */* | +| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/batch/README.md b/docs/sdks/batch/README.md new file mode 100644 index 0000000..55a9c13 --- /dev/null +++ b/docs/sdks/batch/README.md @@ -0,0 +1,2 @@ +# Batch +(*batch*) \ No newline at end of file diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index d5e85cc..d6f4a76 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -25,7 +25,10 @@ s = Mistral( ) res = s.chat.complete(model="mistral-small-latest", messages=[ - + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, ]) if res is not None: @@ -36,22 +39,24 @@ if res is not None: ### Parameters -| Parameter | Type | Required | Description | Example | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.Messages](../../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.Stop]](../../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | +| Parameter | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `messages` | List[[models.Messages](../../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.Stop]](../../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -59,11 +64,10 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | | models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4xx-5xx | */* | - +| models.SDKError | 4XX, 5XX | \*/\* | ## stream @@ -80,7 +84,10 @@ s = Mistral( ) res = s.chat.stream(model="mistral-small-latest", messages=[ - + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, ]) if res is not None: @@ -92,22 +99,24 @@ if res is not None: ### Parameters -| Parameter | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.ChatCompletionStreamRequestMessages](../../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | +| Parameter | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `messages` | List[[models.ChatCompletionStreamRequestMessages](../../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -115,7 +124,7 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | | models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4xx-5xx | */* | +| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/classifiers/README.md b/docs/sdks/classifiers/README.md new file mode 100644 index 0000000..7e48638 --- /dev/null +++ b/docs/sdks/classifiers/README.md @@ -0,0 +1,101 @@ +# Classifiers +(*classifiers*) + +## Overview + +Classifiers API. + +### Available Operations + +* [moderate](#moderate) - Moderations +* [moderate_chat](#moderate_chat) - Moderations Chat + +## moderate + +Moderations + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + +res = s.classifiers.moderate(inputs=[ + "", +]) + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | +| `inputs` | [models.ClassificationRequestInputs](../../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ClassificationResponse](../../models/classificationresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## moderate_chat + +Moderations Chat + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + +res = s.classifiers.moderate_chat(inputs=[ + [ + { + "content": "", + }, + ], +], model="V90") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | +| `inputs` | [models.ChatClassificationRequestInputs](../../models/chatclassificationrequestinputs.md) | :heavy_check_mark: | Chat to classify | +| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ClassificationResponse](../../models/classificationresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index ae27086..9f47e70 100644 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -23,7 +23,10 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) -res = s.embeddings.create(inputs="", model="Wrangler") +res = s.embeddings.create(inputs=[ + "Embed this sentence.", + "As well as this one.", +], model="Wrangler") if res is not None: # handle response @@ -33,12 +36,12 @@ if res is not None: ### Parameters -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | -| `inputs` | [models.Inputs](../../models/inputs.md) | :heavy_check_mark: | Text to embed. | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. | -| `encoding_format` | *OptionalNullable[str]* | :heavy_minus_sign: | The format to return the embeddings in. | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | Example | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `inputs` | [models.Inputs](../../models/inputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | +| `model` | *Optional[str]* | :heavy_minus_sign: | ID of the model to use. | | +| `encoding_format` | *OptionalNullable[str]* | :heavy_minus_sign: | The format to return the embeddings in. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -46,7 +49,7 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | | models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4xx-5xx | */* | +| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md index 41ed961..fc5784a 100644 --- a/docs/sdks/files/README.md +++ b/docs/sdks/files/README.md @@ -11,6 +11,7 @@ Files API * [list](#list) - List Files * [retrieve](#retrieve) - Retrieve File * [delete](#delete) - Delete File +* [download](#download) - Download File ## upload @@ -46,6 +47,7 @@ if res is not None: | Parameter | Type | Required | Description | | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `file` | [models.File](../../models/file.md) | :heavy_check_mark: | The File object (not file name) to be uploaded.
To upload a file and specify a custom file name you should format your request as such:
```bash
file=@path/to/your/file.jsonl;filename=custom_name.jsonl
```
Otherwise, you can just keep the original file name:
```bash
file=@path/to/your/file.jsonl
``` | +| `purpose` | [Optional[models.FilePurpose]](../../models/filepurpose.md) | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -54,10 +56,9 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4xx-5xx | */* | - +| models.SDKError | 4XX, 5XX | \*/\* | ## list @@ -85,6 +86,12 @@ if res is not None: | Parameter | Type | Required | Description | | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `sample_type` | List[[models.SampleType](../../models/sampletype.md)] | :heavy_minus_sign: | N/A | +| `source` | List[[models.Source](../../models/source.md)] | :heavy_minus_sign: | N/A | +| `search` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `purpose` | [OptionalNullable[models.FilePurpose]](../../models/filepurpose.md) | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -93,10 +100,9 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4xx-5xx | */* | - +| models.SDKError | 4XX, 5XX | \*/\* | ## retrieve @@ -133,10 +139,9 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4xx-5xx | */* | - +| models.SDKError | 4XX, 5XX | \*/\* | ## delete @@ -173,6 +178,45 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4XX, 5XX | \*/\* | + +## download + +Download a file + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + +res = s.files.download(file_id="") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `file_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[httpx.Response](../../models/.md)** + +### Errors + +| Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4xx-5xx | */* | +| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/fim/README.md b/docs/sdks/fim/README.md index cfb3d50..d981152 100644 --- a/docs/sdks/fim/README.md +++ b/docs/sdks/fim/README.md @@ -34,19 +34,19 @@ if res is not None: ### Parameters -| Parameter | Type | Required | Description | Example | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | -| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.FIMCompletionRequestStop]](../../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | +| Parameter | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.FIMCompletionRequestStop]](../../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -54,11 +54,10 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | | models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4xx-5xx | */* | - +| models.SDKError | 4XX, 5XX | \*/\* | ## stream @@ -85,19 +84,19 @@ if res is not None: ### Parameters -| Parameter | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | -| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | +| Parameter | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -105,7 +104,7 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | | models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4xx-5xx | */* | +| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/jobs/README.md b/docs/sdks/jobs/README.md index 0929c78..6ecf6e5 100644 --- a/docs/sdks/jobs/README.md +++ b/docs/sdks/jobs/README.md @@ -54,10 +54,9 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4xx-5xx | */* | - +| models.SDKError | 4XX, 5XX | \*/\* | ## create @@ -101,10 +100,9 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4xx-5xx | */* | - +| models.SDKError | 4XX, 5XX | \*/\* | ## get @@ -141,10 +139,9 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4xx-5xx | */* | - +| models.SDKError | 4XX, 5XX | \*/\* | ## cancel @@ -181,10 +178,9 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4xx-5xx | */* | - +| models.SDKError | 4XX, 5XX | \*/\* | ## start @@ -221,6 +217,6 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4xx-5xx | */* | +| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/mistraljobs/README.md b/docs/sdks/mistraljobs/README.md new file mode 100644 index 0000000..5852c2c --- /dev/null +++ b/docs/sdks/mistraljobs/README.md @@ -0,0 +1,179 @@ +# MistralJobs +(*batch.jobs*) + +## Overview + +### Available Operations + +* [list](#list) - Get Batch Jobs +* [create](#create) - Create Batch Job +* [get](#get) - Get Batch Job +* [cancel](#cancel) - Cancel Batch Job + +## list + +Get a list of batch jobs for your organization and user. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + +res = s.batch.jobs.list() + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------- | ------------------------------------------------------------------------- | ------------------------------------------------------------------------- | ------------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `status` | [OptionalNullable[models.BatchJobStatus]](../../models/batchjobstatus.md) | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.BatchJobsOut](../../models/batchjobsout.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4XX, 5XX | \*/\* | + +## create + +Create a new batch job, it will be queued for processing. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + +res = s.batch.jobs.create(input_files=[ + "a621cf02-1cd9-4cf5-8403-315211a509a3", +], endpoint="/v1/fim/completions", model="2") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `input_files` | List[*str*] | :heavy_check_mark: | N/A | +| `endpoint` | [models.APIEndpoint](../../models/apiendpoint.md) | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `metadata` | Dict[str, *str*] | :heavy_minus_sign: | N/A | +| `timeout_hours` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.BatchJobOut](../../models/batchjobout.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4XX, 5XX | \*/\* | + +## get + +Get a batch job details by its UUID. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + +res = s.batch.jobs.get(job_id="b888f774-3e7c-4135-a18c-6b985523c4bc") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `job_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.BatchJobOut](../../models/batchjobout.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4XX, 5XX | \*/\* | + +## cancel + +Request the cancellation of a batch job. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + +res = s.batch.jobs.cancel(job_id="0f713502-9233-41c6-9ebd-c570b7edb496") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `job_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.BatchJobOut](../../models/batchjobout.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index 1a54bbb..2ad489e 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -48,11 +48,10 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | | models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4xx-5xx | */* | - +| models.SDKError | 4XX, 5XX | \*/\* | ## retrieve @@ -89,11 +88,10 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | | models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4xx-5xx | */* | - +| models.SDKError | 4XX, 5XX | \*/\* | ## delete @@ -130,11 +128,10 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | | models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4xx-5xx | */* | - +| models.SDKError | 4XX, 5XX | \*/\* | ## update @@ -173,10 +170,9 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4xx-5xx | */* | - +| models.SDKError | 4XX, 5XX | \*/\* | ## archive @@ -213,10 +209,9 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4xx-5xx | */* | - +| models.SDKError | 4XX, 5XX | \*/\* | ## unarchive @@ -253,6 +248,6 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4xx-5xx | */* | +| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/examples/function_calling.py b/examples/function_calling.py index 76ce489..766a825 100644 --- a/examples/function_calling.py +++ b/examples/function_calling.py @@ -98,26 +98,25 @@ def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: messages.append(UserMessage(content="My transaction ID is T1001.")) response = client.chat.complete(model=model, messages=messages, tools=tools) +messages.append(response.choices[0].message) -tool_call = response.choices[0].message.tool_calls[0] -function_name = tool_call.function.name -function_params = json.loads(tool_call.function.arguments) - -print( - f"calling function_name: {function_name}, with function_params: {function_params}" -) +for tool_call in response.choices[0].message.tool_calls: -function_result = names_to_functions[function_name](**function_params) + function_name = tool_call.function.name + function_params = json.loads(tool_call.function.arguments) -messages.append(response.choices[0].message) -messages.append( - ToolMessage( - name=function_name, - content=function_result, - tool_call_id=tool_call.id, + print( + f"calling function_name: {function_name}, with function_params: {function_params}" ) -) + function_result =names_to_functions[function_name](**function_params) + messages.append( + ToolMessage( + name=function_name, + content=function_result, + tool_call_id=tool_call.id, + ) + ) response = client.chat.complete(model=model, messages=messages, tools=tools) print(f"{response.choices[0].message.content}") diff --git a/packages/mistralai_azure/.speakeasy/gen.lock b/packages/mistralai_azure/.speakeasy/gen.lock index 0972d2a..bc550ff 100644 --- a/packages/mistralai_azure/.speakeasy/gen.lock +++ b/packages/mistralai_azure/.speakeasy/gen.lock @@ -1,42 +1,46 @@ lockVersion: 2.0.0 id: dc40fa48-2c4d-46ad-ac8b-270749770f34 management: - docChecksum: e99cb4d498ede912c81ab20b7828c0e3 + docChecksum: 0f195020b1080b5c3b1fc5834d30a929 docVersion: 0.0.2 - speakeasyVersion: 1.396.7 - generationVersion: 2.415.6 - releaseVersion: 1.2.2 - configChecksum: 36e70d966ca186be6efc57911c094dec + speakeasyVersion: 1.434.4 + generationVersion: 2.452.0 + releaseVersion: 1.2.0 + configChecksum: 0600a305e49d44a5fcb3a5a33dc00999 published: true features: python: additionalDependencies: 1.0.0 - constsAndDefaults: 1.0.2 - core: 5.5.3 + constsAndDefaults: 1.0.4 + core: 5.6.4 defaultEnabledRetries: 0.2.0 enumUnions: 0.1.0 - envVarSecurityUsage: 0.3.1 + envVarSecurityUsage: 0.3.2 examples: 3.0.0 flatRequests: 1.0.1 globalSecurity: 3.0.2 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 globalServerURLs: 3.0.0 + methodArguments: 1.0.2 nameOverrides: 3.0.0 nullables: 1.0.0 openEnums: 1.0.0 - responseFormat: 1.0.0 + responseFormat: 1.0.1 retries: 3.0.2 sdkHooks: 1.0.0 - serverEvents: 1.0.2 + serverEvents: 1.0.4 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 - unions: 3.0.2 + tests: 1.6.0 + unions: 3.0.3 generatedFiles: - .gitattributes + - .python-version - .vscode/settings.json - docs/models/arguments.md - docs/models/assistantmessage.md + - docs/models/assistantmessagecontent.md - docs/models/assistantmessagerole.md - docs/models/chatcompletionchoice.md - docs/models/chatcompletionchoicefinishreason.md @@ -66,6 +70,7 @@ generatedFiles: - docs/models/security.md - docs/models/stop.md - docs/models/systemmessage.md + - docs/models/systemmessagecontent.md - docs/models/textchunk.md - docs/models/tool.md - docs/models/toolcall.md @@ -85,13 +90,13 @@ generatedFiles: - py.typed - pylintrc - pyproject.toml - - scripts/compile.sh - scripts/prepare-readme.py - scripts/publish.sh - src/mistralai_azure/__init__.py - src/mistralai_azure/_hooks/__init__.py - src/mistralai_azure/_hooks/sdkhooks.py - src/mistralai_azure/_hooks/types.py + - src/mistralai_azure/_version.py - src/mistralai_azure/basesdk.py - src/mistralai_azure/chat.py - src/mistralai_azure/httpclient.py @@ -148,11 +153,11 @@ examples: stream_chat: speakeasy-default-stream-chat: requestBody: - application/json: {"model": "azureai", "messages": [{"content": []}]} + application/json: {"model": "azureai", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: - application/json: {"model": "azureai", "messages": [{"content": ""}]} + application/json: {"model": "azureai", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} responses: "200": application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} diff --git a/packages/mistralai_azure/.speakeasy/gen.yaml b/packages/mistralai_azure/.speakeasy/gen.yaml index edcb95b..7280691 100644 --- a/packages/mistralai_azure/.speakeasy/gen.yaml +++ b/packages/mistralai_azure/.speakeasy/gen.yaml @@ -11,8 +11,9 @@ generation: requestResponseComponentNamesFeb2024: true auth: oAuth2ClientCredentialsEnabled: true + oAuth2PasswordEnabled: false python: - version: 1.2.2 + version: 1.2.0 additionalDependencies: dev: pytest: ^8.2.2 @@ -22,8 +23,11 @@ python: clientServerStatusCodesAsErrors: true description: Python Client SDK for the Mistral AI API in Azure. enumFormat: union + fixFlags: + responseRequiredSep2024: false flattenGlobalSecurity: true flattenRequests: true + flatteningOrder: parameters-first imports: option: openapi paths: diff --git a/packages/mistralai_azure/docs/models/assistantmessage.md b/packages/mistralai_azure/docs/models/assistantmessage.md index 0c36cde..53f1cc7 100644 --- a/packages/mistralai_azure/docs/models/assistantmessage.md +++ b/packages/mistralai_azure/docs/models/assistantmessage.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `content` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | -| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | -| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/assistantmessagecontent.md b/packages/mistralai_azure/docs/models/assistantmessagecontent.md new file mode 100644 index 0000000..047b7cf --- /dev/null +++ b/packages/mistralai_azure/docs/models/assistantmessagecontent.md @@ -0,0 +1,17 @@ +# AssistantMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.ContentChunk]` + +```python +value: List[models.ContentChunk] = /* values here */ +``` + diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequest.md b/packages/mistralai_azure/docs/models/chatcompletionrequest.md index 307b279..68cef4a 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionrequest.md +++ b/packages/mistralai_azure/docs/models/chatcompletionrequest.md @@ -3,18 +3,20 @@ ## Fields -| Field | Type | Required | Description | Example | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md index 05f711f..c9c5c87 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md +++ b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md @@ -3,18 +3,20 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/content.md b/packages/mistralai_azure/docs/models/content.md index 4cd3cfd..a833dc2 100644 --- a/packages/mistralai_azure/docs/models/content.md +++ b/packages/mistralai_azure/docs/models/content.md @@ -9,9 +9,9 @@ value: str = /* values here */ ``` -### `List[models.TextChunk]` +### `List[models.ContentChunk]` ```python -value: List[models.TextChunk] = /* values here */ +value: List[models.ContentChunk] = /* values here */ ``` diff --git a/packages/mistralai_azure/docs/models/deltamessage.md b/packages/mistralai_azure/docs/models/deltamessage.md index d32f8e1..61deabb 100644 --- a/packages/mistralai_azure/docs/models/deltamessage.md +++ b/packages/mistralai_azure/docs/models/deltamessage.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | -| `role` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `content` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | +| `role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `content` | [OptionalNullable[models.Content]](../models/content.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/httpvalidationerror.md b/packages/mistralai_azure/docs/models/httpvalidationerror.md index 6389243..712a148 100644 --- a/packages/mistralai_azure/docs/models/httpvalidationerror.md +++ b/packages/mistralai_azure/docs/models/httpvalidationerror.md @@ -1,7 +1,5 @@ # HTTPValidationError -Validation Error - ## Fields diff --git a/packages/mistralai_azure/docs/models/systemmessage.md b/packages/mistralai_azure/docs/models/systemmessage.md index 7f82798..0dba71c 100644 --- a/packages/mistralai_azure/docs/models/systemmessage.md +++ b/packages/mistralai_azure/docs/models/systemmessage.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | -| `content` | [models.Content](../models/content.md) | :heavy_check_mark: | N/A | -| `role` | [Optional[models.Role]](../models/role.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `content` | [models.SystemMessageContent](../models/systemmessagecontent.md) | :heavy_check_mark: | N/A | +| `role` | [Optional[models.Role]](../models/role.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/systemmessagecontent.md b/packages/mistralai_azure/docs/models/systemmessagecontent.md new file mode 100644 index 0000000..e0d27d9 --- /dev/null +++ b/packages/mistralai_azure/docs/models/systemmessagecontent.md @@ -0,0 +1,17 @@ +# SystemMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.TextChunk]` + +```python +value: List[models.TextChunk] = /* values here */ +``` + diff --git a/packages/mistralai_azure/docs/models/usermessage.md b/packages/mistralai_azure/docs/models/usermessage.md index 3d96f1c..63b0131 100644 --- a/packages/mistralai_azure/docs/models/usermessage.md +++ b/packages/mistralai_azure/docs/models/usermessage.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | -| `content` | [models.UserMessageContent](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | -| `role` | [Optional[models.UserMessageRole]](../models/usermessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `content` | [Nullable[models.UserMessageContent]](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | +| `role` | [Optional[models.UserMessageRole]](../models/usermessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/pylintrc b/packages/mistralai_azure/pylintrc index 5080038..393d0f7 100644 --- a/packages/mistralai_azure/pylintrc +++ b/packages/mistralai_azure/pylintrc @@ -188,6 +188,7 @@ good-names=i, Run, _, e, + n, id # Good variable names regexes, separated by a comma. If names match any regex, diff --git a/packages/mistralai_azure/pyproject.toml b/packages/mistralai_azure/pyproject.toml index a9f13e0..9900116 100644 --- a/packages/mistralai_azure/pyproject.toml +++ b/packages/mistralai_azure/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai_azure" -version = "1.2.2" +version = "1.2.0" description = "Python Client SDK for the Mistral AI API in Azure." authors = ["Mistral",] readme = "README-PYPI.md" @@ -20,7 +20,7 @@ python = "^3.8" eval-type-backport = "^0.2.0" httpx = "^0.27.0" jsonpath-python = "^1.0.6" -pydantic = "~2.9.0" +pydantic = "~2.9.2" python-dateutil = "2.8.2" typing-inspect = "^0.9.0" diff --git a/packages/mistralai_azure/scripts/compile.sh b/packages/mistralai_azure/scripts/compile.sh deleted file mode 100755 index fafe635..0000000 --- a/packages/mistralai_azure/scripts/compile.sh +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/env bash - -set -o pipefail # Ensure pipeline failures are propagated - -# Use temporary files to store outputs and exit statuses -declare -A output_files -declare -A status_files - -# Function to run a command with temporary output and status files -run_command() { - local cmd="$1" - local key="$2" - local output_file="$3" - local status_file="$4" - - # Run the command and store output and exit status - { - eval "$cmd" - echo $? > "$status_file" - } &> "$output_file" & -} - -poetry run python scripts/prepare-readme.py - -# Create temporary files for outputs and statuses -for cmd in compileall pylint mypy pyright; do - output_files[$cmd]=$(mktemp) - status_files[$cmd]=$(mktemp) -done - -# Collect PIDs for background processes -declare -a pids - -# Run commands in parallel using temporary files -echo "Running python -m compileall" -run_command 'poetry run python -m compileall -q . && echo "Success"' 'compileall' "${output_files[compileall]}" "${status_files[compileall]}" -pids+=($!) - -echo "Running pylint" -run_command 'poetry run pylint src' 'pylint' "${output_files[pylint]}" "${status_files[pylint]}" -pids+=($!) - -echo "Running mypy" -run_command 'poetry run mypy src' 'mypy' "${output_files[mypy]}" "${status_files[mypy]}" -pids+=($!) - -echo "Running pyright (optional)" -run_command 'if command -v pyright > /dev/null 2>&1; then pyright src; else echo "pyright not found, skipping"; fi' 'pyright' "${output_files[pyright]}" "${status_files[pyright]}" -pids+=($!) - -# Wait for all processes to complete -echo "Waiting for processes to complete" -for pid in "${pids[@]}"; do - wait "$pid" -done - -# Print output sequentially and check for failures -failed=false -for key in "${!output_files[@]}"; do - echo "--- Output from Command: $key ---" - echo - cat "${output_files[$key]}" - echo # Empty line for separation - echo "--- End of Output from Command: $key ---" - echo - - exit_status=$(cat "${status_files[$key]}") - if [ "$exit_status" -ne 0 ]; then - echo "Command $key failed with exit status $exit_status" >&2 - failed=true - fi -done - -# Clean up temporary files -for tmp_file in "${output_files[@]}" "${status_files[@]}"; do - rm -f "$tmp_file" -done - -if $failed; then - echo "One or more commands failed." >&2 - exit 1 -else - echo "All commands completed successfully." - exit 0 -fi diff --git a/packages/mistralai_azure/src/mistralai_azure/__init__.py b/packages/mistralai_azure/src/mistralai_azure/__init__.py index 68138c4..a1b7f62 100644 --- a/packages/mistralai_azure/src/mistralai_azure/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/__init__.py @@ -1,5 +1,9 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +from ._version import __title__, __version__ from .sdk import * from .sdkconfiguration import * from .models import * + + +VERSION: str = __version__ diff --git a/packages/mistralai_azure/src/mistralai_azure/_version.py b/packages/mistralai_azure/src/mistralai_azure/_version.py new file mode 100644 index 0000000..6a45a91 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/_version.py @@ -0,0 +1,12 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import importlib.metadata + +__title__: str = "mistralai_azure" +__version__: str = "1.2.0" + +try: + if __package__ is not None: + __version__ = importlib.metadata.version(__package__) +except importlib.metadata.PackageNotFoundError: + pass diff --git a/packages/mistralai_azure/src/mistralai_azure/chat.py b/packages/mistralai_azure/src/mistralai_azure/chat.py index 5f1e539..e1d3390 100644 --- a/packages/mistralai_azure/src/mistralai_azure/chat.py +++ b/packages/mistralai_azure/src/mistralai_azure/chat.py @@ -16,10 +16,9 @@ def stream( *, messages: Union[List[models.Messages], List[models.MessagesTypedDict]], model: OptionalNullable[str] = "azureai", - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, random_seed: OptionalNullable[int] = UNSET, @@ -35,6 +34,9 @@ def stream( models.ChatCompletionStreamRequestToolChoiceTypedDict, ] ] = None, + presence_penalty: Optional[float] = 0, + frequency_penalty: Optional[float] = 0, + n: OptionalNullable[int] = UNSET, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -46,16 +48,18 @@ def stream( :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. :param model: The ID of the model to use for this request. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param response_format: :param tools: :param tool_choice: + :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -74,7 +78,6 @@ def stream( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, @@ -86,6 +89,9 @@ def stream( tool_choice=utils.get_pydantic_model( tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, safe_prompt=safe_prompt, ) @@ -135,18 +141,21 @@ def stream( sentinel="[DONE]", ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + http_res_text = utils.stream_to_text(http_res) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -155,10 +164,9 @@ async def stream_async( *, messages: Union[List[models.Messages], List[models.MessagesTypedDict]], model: OptionalNullable[str] = "azureai", - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, random_seed: OptionalNullable[int] = UNSET, @@ -174,6 +182,9 @@ async def stream_async( models.ChatCompletionStreamRequestToolChoiceTypedDict, ] ] = None, + presence_penalty: Optional[float] = 0, + frequency_penalty: Optional[float] = 0, + n: OptionalNullable[int] = UNSET, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -185,16 +196,18 @@ async def stream_async( :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. :param model: The ID of the model to use for this request. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param response_format: :param tools: :param tool_choice: + :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -213,7 +226,6 @@ async def stream_async( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, @@ -225,6 +237,9 @@ async def stream_async( tool_choice=utils.get_pydantic_model( tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, safe_prompt=safe_prompt, ) @@ -274,18 +289,21 @@ async def stream_async( sentinel="[DONE]", ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + http_res_text = await utils.stream_to_text_async(http_res) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -297,10 +315,9 @@ def complete( List[models.ChatCompletionRequestMessagesTypedDict], ], model: OptionalNullable[str] = "azureai", - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[ Union[ @@ -321,6 +338,9 @@ def complete( models.ChatCompletionRequestToolChoiceTypedDict, ] ] = None, + presence_penalty: Optional[float] = 0, + frequency_penalty: Optional[float] = 0, + n: OptionalNullable[int] = UNSET, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -330,16 +350,18 @@ def complete( :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. :param model: The ID of the model to use for this request. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param response_format: :param tools: :param tool_choice: + :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -358,7 +380,6 @@ def complete( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, @@ -372,6 +393,9 @@ def complete( tool_choice=utils.get_pydantic_model( tool_choice, Optional[models.ChatCompletionRequestToolChoice] ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, safe_prompt=safe_prompt, ) @@ -421,15 +445,17 @@ def complete( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -441,10 +467,9 @@ async def complete_async( List[models.ChatCompletionRequestMessagesTypedDict], ], model: OptionalNullable[str] = "azureai", - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[ Union[ @@ -465,6 +490,9 @@ async def complete_async( models.ChatCompletionRequestToolChoiceTypedDict, ] ] = None, + presence_penalty: Optional[float] = 0, + frequency_penalty: Optional[float] = 0, + n: OptionalNullable[int] = UNSET, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -474,16 +502,18 @@ async def complete_async( :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. :param model: The ID of the model to use for this request. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param response_format: :param tools: :param tool_choice: + :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -502,7 +532,6 @@ async def complete_async( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, @@ -516,6 +545,9 @@ async def complete_async( tool_choice=utils.get_pydantic_model( tool_choice, Optional[models.ChatCompletionRequestToolChoice] ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, safe_prompt=safe_prompt, ) @@ -565,14 +597,16 @@ async def complete_async( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py index 70f0799..e662fa7 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py @@ -2,6 +2,8 @@ from .assistantmessage import ( AssistantMessage, + AssistantMessageContent, + AssistantMessageContentTypedDict, AssistantMessageRole, AssistantMessageTypedDict, ) @@ -42,7 +44,7 @@ FinishReason, ) from .contentchunk import ContentChunk, ContentChunkTypedDict -from .deltamessage import DeltaMessage, DeltaMessageTypedDict +from .deltamessage import Content, ContentTypedDict, DeltaMessage, DeltaMessageTypedDict from .function import Function, FunctionTypedDict from .functioncall import ( Arguments, @@ -57,10 +59,10 @@ from .sdkerror import SDKError from .security import Security, SecurityTypedDict from .systemmessage import ( - Content, - ContentTypedDict, Role, SystemMessage, + SystemMessageContent, + SystemMessageContentTypedDict, SystemMessageTypedDict, ) from .textchunk import TextChunk, TextChunkTypedDict, Type @@ -89,6 +91,8 @@ "Arguments", "ArgumentsTypedDict", "AssistantMessage", + "AssistantMessageContent", + "AssistantMessageContentTypedDict", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", @@ -143,6 +147,8 @@ "Stop", "StopTypedDict", "SystemMessage", + "SystemMessageContent", + "SystemMessageContentTypedDict", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", diff --git a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py index 577b7e9..5d978f0 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py @@ -1,6 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict from .toolcall import ToolCall, ToolCallTypedDict from mistralai_azure.types import ( BaseModel, @@ -10,28 +11,32 @@ UNSET_SENTINEL, ) from pydantic import model_serializer -from typing import List, Literal, Optional, TypedDict -from typing_extensions import NotRequired +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypedDict + + +AssistantMessageContentTypedDict = Union[str, List[ContentChunkTypedDict]] + + +AssistantMessageContent = Union[str, List[ContentChunk]] AssistantMessageRole = Literal["assistant"] class AssistantMessageTypedDict(TypedDict): - content: NotRequired[Nullable[str]] + content: NotRequired[Nullable[AssistantMessageContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] prefix: NotRequired[bool] - r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: NotRequired[AssistantMessageRole] class AssistantMessage(BaseModel): - content: OptionalNullable[str] = UNSET + content: OptionalNullable[AssistantMessageContent] = UNSET tool_calls: OptionalNullable[List[ToolCall]] = UNSET prefix: Optional[bool] = False - r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: Optional[AssistantMessageRole] = "assistant" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py index a71cd08..a78b72d 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py @@ -2,12 +2,15 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from mistralai_azure.types import BaseModel -from typing import Literal, TypedDict +from mistralai_azure.types import BaseModel, UnrecognizedStr +from mistralai_azure.utils import validate_open_enum +from pydantic.functional_validators import PlainValidator +from typing import Literal, Union +from typing_extensions import Annotated, TypedDict -ChatCompletionChoiceFinishReason = Literal[ - "stop", "length", "model_length", "error", "tool_calls" +ChatCompletionChoiceFinishReason = Union[ + Literal["stop", "length", "model_length", "error", "tool_calls"], UnrecognizedStr ] @@ -22,4 +25,6 @@ class ChatCompletionChoice(BaseModel): message: AssistantMessage - finish_reason: ChatCompletionChoiceFinishReason + finish_reason: Annotated[ + ChatCompletionChoiceFinishReason, PlainValidator(validate_open_enum(False)) + ] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py index f2ba234..fd3cb7b 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py @@ -18,8 +18,8 @@ ) from mistralai_azure.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Optional, TypedDict, Union -from typing_extensions import Annotated, NotRequired +from typing import List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict ChatCompletionRequestStopTypedDict = Union[str, List[str]] @@ -60,14 +60,12 @@ class ChatCompletionRequestTypedDict(TypedDict): r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" model: NotRequired[Nullable[str]] r"""The ID of the model to use for this request.""" - temperature: NotRequired[float] - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: NotRequired[float] r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" max_tokens: NotRequired[Nullable[int]] r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: NotRequired[Nullable[int]] - r"""The minimum number of tokens to generate in the completion.""" stream: NotRequired[bool] r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" stop: NotRequired[ChatCompletionRequestStopTypedDict] @@ -77,6 +75,12 @@ class ChatCompletionRequestTypedDict(TypedDict): response_format: NotRequired[ResponseFormatTypedDict] tools: NotRequired[Nullable[List[ToolTypedDict]]] tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict] + presence_penalty: NotRequired[float] + r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -88,8 +92,8 @@ class ChatCompletionRequest(BaseModel): model: OptionalNullable[str] = "azureai" r"""The ID of the model to use for this request.""" - temperature: Optional[float] = 0.7 - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" @@ -97,9 +101,6 @@ class ChatCompletionRequest(BaseModel): max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: OptionalNullable[int] = UNSET - r"""The minimum number of tokens to generate in the completion.""" - stream: Optional[bool] = False r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" @@ -115,6 +116,15 @@ class ChatCompletionRequest(BaseModel): tool_choice: Optional[ChatCompletionRequestToolChoice] = None + presence_penalty: Optional[float] = 0 + r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = 0 + r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + safe_prompt: Optional[bool] = False r"""Whether to inject a safety prompt before all conversations.""" @@ -125,16 +135,25 @@ def serialize_model(self, handler): "temperature", "top_p", "max_tokens", - "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", "safe_prompt", ] - nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] + nullable_fields = [ + "model", + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", + ] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py index 0a02e46..ecd85d5 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py @@ -4,8 +4,8 @@ from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai_azure.types import BaseModel -from typing import List, Optional, TypedDict -from typing_extensions import NotRequired +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict class ChatCompletionResponseTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py index 28abddb..8f71f89 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py @@ -18,8 +18,8 @@ ) from mistralai_azure.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Optional, TypedDict, Union -from typing_extensions import Annotated, NotRequired +from typing import List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict StopTypedDict = Union[str, List[str]] @@ -62,14 +62,12 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" model: NotRequired[Nullable[str]] r"""The ID of the model to use for this request.""" - temperature: NotRequired[float] - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: NotRequired[float] r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" max_tokens: NotRequired[Nullable[int]] r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: NotRequired[Nullable[int]] - r"""The minimum number of tokens to generate in the completion.""" stream: NotRequired[bool] stop: NotRequired[StopTypedDict] r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" @@ -78,6 +76,12 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): response_format: NotRequired[ResponseFormatTypedDict] tools: NotRequired[Nullable[List[ToolTypedDict]]] tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict] + presence_penalty: NotRequired[float] + r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -89,8 +93,8 @@ class ChatCompletionStreamRequest(BaseModel): model: OptionalNullable[str] = "azureai" r"""The ID of the model to use for this request.""" - temperature: Optional[float] = 0.7 - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" @@ -98,9 +102,6 @@ class ChatCompletionStreamRequest(BaseModel): max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: OptionalNullable[int] = UNSET - r"""The minimum number of tokens to generate in the completion.""" - stream: Optional[bool] = True stop: Optional[Stop] = None @@ -115,6 +116,15 @@ class ChatCompletionStreamRequest(BaseModel): tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None + presence_penalty: Optional[float] = 0 + r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = 0 + r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + safe_prompt: Optional[bool] = False r"""Whether to inject a safety prompt before all conversations.""" @@ -125,16 +135,25 @@ def serialize_model(self, handler): "temperature", "top_p", "max_tokens", - "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", "safe_prompt", ] - nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] + nullable_fields = [ + "model", + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", + ] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/completionchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/completionchunk.py index d2f334d..d6cc2a8 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/completionchunk.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/completionchunk.py @@ -7,8 +7,8 @@ ) from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai_azure.types import BaseModel -from typing import List, Optional, TypedDict -from typing_extensions import NotRequired +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict class CompletionChunkTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/completionevent.py b/packages/mistralai_azure/src/mistralai_azure/models/completionevent.py index b9b68db..5a2039c 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/completionevent.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/completionevent.py @@ -3,7 +3,7 @@ from __future__ import annotations from .completionchunk import CompletionChunk, CompletionChunkTypedDict from mistralai_azure.types import BaseModel -from typing import TypedDict +from typing_extensions import TypedDict class CompletionEventTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py b/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py index c220a51..37294d9 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py @@ -2,12 +2,15 @@ from __future__ import annotations from .deltamessage import DeltaMessage, DeltaMessageTypedDict -from mistralai_azure.types import BaseModel, Nullable, UNSET_SENTINEL +from mistralai_azure.types import BaseModel, Nullable, UNSET_SENTINEL, UnrecognizedStr +from mistralai_azure.utils import validate_open_enum from pydantic import model_serializer -from typing import Literal, TypedDict +from pydantic.functional_validators import PlainValidator +from typing import Literal, Union +from typing_extensions import Annotated, TypedDict -FinishReason = Literal["stop", "length", "error", "tool_calls"] +FinishReason = Union[Literal["stop", "length", "error", "tool_calls"], UnrecognizedStr] class CompletionResponseStreamChoiceTypedDict(TypedDict): @@ -21,7 +24,9 @@ class CompletionResponseStreamChoice(BaseModel): delta: DeltaMessage - finish_reason: Nullable[FinishReason] + finish_reason: Annotated[ + Nullable[FinishReason], PlainValidator(validate_open_enum(False)) + ] @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py b/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py index 5e8011d..bb39449 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py @@ -1,6 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict from .toolcall import ToolCall, ToolCallTypedDict from mistralai_azure.types import ( BaseModel, @@ -10,27 +11,33 @@ UNSET_SENTINEL, ) from pydantic import model_serializer -from typing import List, Optional, TypedDict -from typing_extensions import NotRequired +from typing import List, Union +from typing_extensions import NotRequired, TypedDict + + +ContentTypedDict = Union[str, List[ContentChunkTypedDict]] + + +Content = Union[str, List[ContentChunk]] class DeltaMessageTypedDict(TypedDict): - role: NotRequired[str] - content: NotRequired[Nullable[str]] + role: NotRequired[Nullable[str]] + content: NotRequired[Nullable[ContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] class DeltaMessage(BaseModel): - role: Optional[str] = None + role: OptionalNullable[str] = UNSET - content: OptionalNullable[str] = UNSET + content: OptionalNullable[Content] = UNSET tool_calls: OptionalNullable[List[ToolCall]] = UNSET @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["role", "content", "tool_calls"] - nullable_fields = ["content", "tool_calls"] + nullable_fields = ["role", "content", "tool_calls"] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/function.py b/packages/mistralai_azure/src/mistralai_azure/models/function.py index 081ce1d..488cdce 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/function.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/function.py @@ -2,8 +2,8 @@ from __future__ import annotations from mistralai_azure.types import BaseModel -from typing import Any, Dict, Optional, TypedDict -from typing_extensions import NotRequired +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict class FunctionTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py b/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py index 0afa590..d2f136c 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py @@ -2,7 +2,8 @@ from __future__ import annotations from mistralai_azure.types import BaseModel -from typing import Any, Dict, TypedDict, Union +from typing import Any, Dict, Union +from typing_extensions import TypedDict ArgumentsTypedDict = Union[Dict[str, Any], str] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/functionname.py b/packages/mistralai_azure/src/mistralai_azure/models/functionname.py index c825a5a..b55c82a 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/functionname.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/functionname.py @@ -2,7 +2,7 @@ from __future__ import annotations from mistralai_azure.types import BaseModel -from typing import TypedDict +from typing_extensions import TypedDict class FunctionNameTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py b/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py index 28f9b4e..1d22d97 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py @@ -12,8 +12,6 @@ class HTTPValidationErrorData(BaseModel): class HTTPValidationError(Exception): - r"""Validation Error""" - data: HTTPValidationErrorData def __init__(self, data: HTTPValidationErrorData): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py b/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py index c692033..e4a9d7d 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py @@ -3,8 +3,8 @@ from __future__ import annotations from .responseformats import ResponseFormats from mistralai_azure.types import BaseModel -from typing import Optional, TypedDict -from typing_extensions import NotRequired +from typing import Optional +from typing_extensions import NotRequired, TypedDict class ResponseFormatTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/security.py b/packages/mistralai_azure/src/mistralai_azure/models/security.py index 1245881..c1ae831 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/security.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/security.py @@ -3,8 +3,7 @@ from __future__ import annotations from mistralai_azure.types import BaseModel from mistralai_azure.utils import FieldMetadata, SecurityMetadata -from typing import TypedDict -from typing_extensions import Annotated +from typing_extensions import Annotated, TypedDict class SecurityTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py index cf1775f..3c00a82 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py @@ -3,25 +3,25 @@ from __future__ import annotations from .textchunk import TextChunk, TextChunkTypedDict from mistralai_azure.types import BaseModel -from typing import List, Literal, Optional, TypedDict, Union -from typing_extensions import NotRequired +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypedDict -ContentTypedDict = Union[str, List[TextChunkTypedDict]] +SystemMessageContentTypedDict = Union[str, List[TextChunkTypedDict]] -Content = Union[str, List[TextChunk]] +SystemMessageContent = Union[str, List[TextChunk]] Role = Literal["system"] class SystemMessageTypedDict(TypedDict): - content: ContentTypedDict + content: SystemMessageContentTypedDict role: NotRequired[Role] class SystemMessage(BaseModel): - content: Content + content: SystemMessageContent role: Optional[Role] = "system" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py index 75cc949..583ce18 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py @@ -2,9 +2,11 @@ from __future__ import annotations from mistralai_azure.types import BaseModel +from mistralai_azure.utils import validate_const import pydantic -from typing import Final, Literal, Optional, TypedDict -from typing_extensions import Annotated +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict Type = Literal["text"] @@ -12,11 +14,13 @@ class TextChunkTypedDict(TypedDict): text: str + type: Type class TextChunk(BaseModel): text: str - # fmt: off - TYPE: Annotated[Final[Optional[Type]], pydantic.Field(alias="type")] = "text" # type: ignore - # fmt: on + TYPE: Annotated[ + Annotated[Optional[Type], AfterValidator(validate_const("text"))], + pydantic.Field(alias="type"), + ] = "text" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/tool.py b/packages/mistralai_azure/src/mistralai_azure/models/tool.py index 3a02ed7..ffd9b06 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/tool.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/tool.py @@ -6,8 +6,8 @@ from mistralai_azure.types import BaseModel from mistralai_azure.utils import validate_open_enum from pydantic.functional_validators import PlainValidator -from typing import Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict class ToolTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py b/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py index 2a768a2..69b4731 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py @@ -6,8 +6,8 @@ from mistralai_azure.types import BaseModel from mistralai_azure.utils import validate_open_enum from pydantic.functional_validators import PlainValidator -from typing import Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict class ToolCallTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolchoice.py b/packages/mistralai_azure/src/mistralai_azure/models/toolchoice.py index 2d3d87f..cc3c2c1 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolchoice.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/toolchoice.py @@ -6,8 +6,8 @@ from mistralai_azure.types import BaseModel from mistralai_azure.utils import validate_open_enum from pydantic.functional_validators import PlainValidator -from typing import Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict class ToolChoiceTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py index 14ecf73..4362bc9 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py @@ -9,8 +9,8 @@ UNSET_SENTINEL, ) from pydantic import model_serializer -from typing import Literal, Optional, TypedDict -from typing_extensions import NotRequired +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict ToolMessageRole = Literal["tool"] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py b/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py index 2a92648..b1d094f 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py @@ -2,7 +2,7 @@ from __future__ import annotations from mistralai_azure.types import BaseModel -from typing import TypedDict +from typing_extensions import TypedDict class UsageInfoTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py b/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py index e948876..eddfb85 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py @@ -2,9 +2,10 @@ from __future__ import annotations from .contentchunk import ContentChunk, ContentChunkTypedDict -from mistralai_azure.types import BaseModel -from typing import List, Literal, Optional, TypedDict, Union -from typing_extensions import NotRequired +from mistralai_azure.types import BaseModel, Nullable, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypedDict UserMessageContentTypedDict = Union[str, List[ContentChunkTypedDict]] @@ -17,11 +18,41 @@ class UserMessageTypedDict(TypedDict): - content: UserMessageContentTypedDict + content: Nullable[UserMessageContentTypedDict] role: NotRequired[UserMessageRole] class UserMessage(BaseModel): - content: UserMessageContent + content: Nullable[UserMessageContent] role: Optional[UserMessageRole] = "user" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["role"] + nullable_fields = ["content"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py b/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py index 6ab66a1..aa8eaff 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py @@ -2,7 +2,8 @@ from __future__ import annotations from mistralai_azure.types import BaseModel -from typing import List, TypedDict, Union +from typing import List, Union +from typing_extensions import TypedDict LocTypedDict = Union[str, int] diff --git a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py index eefd8df..1a31944 100644 --- a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py +++ b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py @@ -10,10 +10,10 @@ from typing import Callable, Dict, Optional, Tuple, Union -SERVER_PROD = "prod" -r"""Production server""" +SERVER_EU = "eu" +r"""EU Production server""" SERVERS = { - SERVER_PROD: "https://api.mistral.ai", + SERVER_EU: "https://api.mistral.ai", } """Contains the list of servers available to the SDK""" @@ -28,9 +28,9 @@ class SDKConfiguration: server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.2.2" - gen_version: str = "2.415.6" - user_agent: str = "speakeasy-sdk/python 1.2.2 2.415.6 0.0.2 mistralai_azure" + sdk_version: str = "1.2.0" + gen_version: str = "2.452.0" + user_agent: str = "speakeasy-sdk/python 1.2.0 2.452.0 0.0.2 mistralai_azure" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None @@ -41,7 +41,7 @@ def get_server_details(self) -> Tuple[str, Dict[str, str]]: if self.server_url is not None and self.server_url: return remove_suffix(self.server_url, "/"), {} if not self.server: - self.server = SERVER_PROD + self.server = SERVER_EU if self.server not in SERVERS: raise ValueError(f'Invalid server "{self.server}"') diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py b/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py index 6c26aeb..26d51ae 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py @@ -27,6 +27,10 @@ serialize_float, serialize_int, stream_to_text, + stream_to_text_async, + stream_to_bytes, + stream_to_bytes_async, + validate_const, validate_decimal, validate_float, validate_int, @@ -79,10 +83,14 @@ "serialize_request_body", "SerializedRequestBody", "stream_to_text", + "stream_to_text_async", + "stream_to_bytes", + "stream_to_bytes_async", "template_url", "unmarshal", "unmarshal_json", "validate_decimal", + "validate_const", "validate_float", "validate_int", "validate_open_enum", diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/annotations.py b/packages/mistralai_azure/src/mistralai_azure/utils/annotations.py index 0d17472..5b3bbb0 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/annotations.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/annotations.py @@ -1,5 +1,6 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +from enum import Enum from typing import Any def get_discriminator(model: Any, fieldname: str, key: str) -> str: @@ -10,10 +11,20 @@ def get_discriminator(model: Any, fieldname: str, key: str) -> str: raise ValueError(f'Could not find discriminator key {key} in {model}') from e if hasattr(model, fieldname): - return f'{getattr(model, fieldname)}' + attr = getattr(model, fieldname) + + if isinstance(attr, Enum): + return f'{attr.value}' + + return f'{attr}' fieldname = fieldname.upper() if hasattr(model, fieldname): - return f'{getattr(model, fieldname)}' + attr = getattr(model, fieldname) + + if isinstance(attr, Enum): + return f'{attr.value}' + + return f'{attr}' raise ValueError(f'Could not find discriminator field {fieldname} in {model}') diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py b/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py index 85d57f4..c5eb365 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py @@ -116,6 +116,19 @@ def validate(e): return validate +def validate_const(v): + def validate(c): + if is_optional_type(type(c)) and c is None: + return None + + if v != c: + raise ValueError(f"Expected {v}") + + return c + + return validate + + def unmarshal_json(raw, typ: Any) -> Any: return unmarshal(from_json(raw), typ) @@ -172,6 +185,18 @@ def stream_to_text(stream: httpx.Response) -> str: return "".join(stream.iter_text()) +async def stream_to_text_async(stream: httpx.Response) -> str: + return "".join([chunk async for chunk in stream.aiter_text()]) + + +def stream_to_bytes(stream: httpx.Response) -> bytes: + return stream.content + + +async def stream_to_bytes_async(stream: httpx.Response) -> bytes: + return await stream.aread() + + def get_pydantic_model(data: Any, typ: Any) -> Any: if not _contains_pydantic_model(data): return unmarshal(data, typ) diff --git a/packages/mistralai_gcp/.speakeasy/gen.lock b/packages/mistralai_gcp/.speakeasy/gen.lock index e5d61fb..6add360 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.lock +++ b/packages/mistralai_gcp/.speakeasy/gen.lock @@ -1,42 +1,46 @@ lockVersion: 2.0.0 id: ec60f2d8-7869-45c1-918e-773d41a8cf74 management: - docChecksum: 823d9b94fcb9c6588d0af16b7301f4ac + docChecksum: 46baf8da7636ea1bf44557571d011045 docVersion: 0.0.2 - speakeasyVersion: 1.396.7 - generationVersion: 2.415.6 - releaseVersion: 1.2.2 - configChecksum: fa993b7253c0c8c0d114d51422ffb486 + speakeasyVersion: 1.434.4 + generationVersion: 2.452.0 + releaseVersion: 1.2.0 + configChecksum: 6036ab871ca1cf21d35bfc75dc25089b published: true features: python: additionalDependencies: 1.0.0 - constsAndDefaults: 1.0.2 - core: 5.5.3 + constsAndDefaults: 1.0.4 + core: 5.6.4 defaultEnabledRetries: 0.2.0 enumUnions: 0.1.0 - envVarSecurityUsage: 0.3.1 + envVarSecurityUsage: 0.3.2 examples: 3.0.0 flatRequests: 1.0.1 globalSecurity: 3.0.2 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 globalServerURLs: 3.0.0 + methodArguments: 1.0.2 nameOverrides: 3.0.0 nullables: 1.0.0 openEnums: 1.0.0 - responseFormat: 1.0.0 + responseFormat: 1.0.1 retries: 3.0.2 sdkHooks: 1.0.0 - serverEvents: 1.0.2 + serverEvents: 1.0.4 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 - unions: 3.0.2 + tests: 1.6.0 + unions: 3.0.3 generatedFiles: - .gitattributes + - .python-version - .vscode/settings.json - docs/models/arguments.md - docs/models/assistantmessage.md + - docs/models/assistantmessagecontent.md - docs/models/assistantmessagerole.md - docs/models/chatcompletionchoice.md - docs/models/chatcompletionchoicefinishreason.md @@ -71,6 +75,7 @@ generatedFiles: - docs/models/security.md - docs/models/stop.md - docs/models/systemmessage.md + - docs/models/systemmessagecontent.md - docs/models/textchunk.md - docs/models/tool.md - docs/models/toolcall.md @@ -90,13 +95,13 @@ generatedFiles: - py.typed - pylintrc - pyproject.toml - - scripts/compile.sh - scripts/prepare-readme.py - scripts/publish.sh - src/mistralai_gcp/__init__.py - src/mistralai_gcp/_hooks/__init__.py - src/mistralai_gcp/_hooks/sdkhooks.py - src/mistralai_gcp/_hooks/types.py + - src/mistralai_gcp/_version.py - src/mistralai_gcp/basesdk.py - src/mistralai_gcp/chat.py - src/mistralai_gcp/fim.py @@ -157,11 +162,11 @@ examples: stream_chat: speakeasy-default-stream-chat: requestBody: - application/json: {"model": "mistral-small-latest", "messages": [{"content": []}]} + application/json: {"model": "mistral-small-latest", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: - application/json: {"model": "mistral-small-latest", "messages": [{"content": ""}]} + application/json: {"model": "mistral-small-latest", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} responses: "200": application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} diff --git a/packages/mistralai_gcp/.speakeasy/gen.yaml b/packages/mistralai_gcp/.speakeasy/gen.yaml index 43da5ef..97e9faf 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.yaml +++ b/packages/mistralai_gcp/.speakeasy/gen.yaml @@ -11,8 +11,9 @@ generation: requestResponseComponentNamesFeb2024: true auth: oAuth2ClientCredentialsEnabled: true + oAuth2PasswordEnabled: false python: - version: 1.2.2 + version: 1.2.0 additionalDependencies: dev: pytest: ^8.2.2 @@ -25,8 +26,11 @@ python: clientServerStatusCodesAsErrors: true description: Python Client SDK for the Mistral AI API in GCP. enumFormat: union + fixFlags: + responseRequiredSep2024: false flattenGlobalSecurity: true flattenRequests: true + flatteningOrder: parameters-first imports: option: openapi paths: diff --git a/packages/mistralai_gcp/docs/models/assistantmessage.md b/packages/mistralai_gcp/docs/models/assistantmessage.md index 0c36cde..53f1cc7 100644 --- a/packages/mistralai_gcp/docs/models/assistantmessage.md +++ b/packages/mistralai_gcp/docs/models/assistantmessage.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `content` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | -| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | -| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/assistantmessagecontent.md b/packages/mistralai_gcp/docs/models/assistantmessagecontent.md new file mode 100644 index 0000000..047b7cf --- /dev/null +++ b/packages/mistralai_gcp/docs/models/assistantmessagecontent.md @@ -0,0 +1,17 @@ +# AssistantMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.ContentChunk]` + +```python +value: List[models.ContentChunk] = /* values here */ +``` + diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionrequest.md index fb3bfb4..abc8328 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionrequest.md @@ -3,17 +3,19 @@ ## Fields -| Field | Type | Required | Description | Example | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md index eb0d11e..863c022 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md @@ -3,17 +3,19 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/content.md b/packages/mistralai_gcp/docs/models/content.md index 4cd3cfd..a833dc2 100644 --- a/packages/mistralai_gcp/docs/models/content.md +++ b/packages/mistralai_gcp/docs/models/content.md @@ -9,9 +9,9 @@ value: str = /* values here */ ``` -### `List[models.TextChunk]` +### `List[models.ContentChunk]` ```python -value: List[models.TextChunk] = /* values here */ +value: List[models.ContentChunk] = /* values here */ ``` diff --git a/packages/mistralai_gcp/docs/models/deltamessage.md b/packages/mistralai_gcp/docs/models/deltamessage.md index d32f8e1..61deabb 100644 --- a/packages/mistralai_gcp/docs/models/deltamessage.md +++ b/packages/mistralai_gcp/docs/models/deltamessage.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | -| `role` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `content` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | +| `role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `content` | [OptionalNullable[models.Content]](../models/content.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/fimcompletionrequest.md b/packages/mistralai_gcp/docs/models/fimcompletionrequest.md index b4b024e..236d2d2 100644 --- a/packages/mistralai_gcp/docs/models/fimcompletionrequest.md +++ b/packages/mistralai_gcp/docs/models/fimcompletionrequest.md @@ -3,15 +3,15 @@ ## Fields -| Field | Type | Required | Description | Example | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | -| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.FIMCompletionRequestStop]](../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.FIMCompletionRequestStop]](../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md b/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md index acffb53..fa63593 100644 --- a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md +++ b/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md @@ -3,15 +3,15 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | -| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/httpvalidationerror.md b/packages/mistralai_gcp/docs/models/httpvalidationerror.md index 6389243..712a148 100644 --- a/packages/mistralai_gcp/docs/models/httpvalidationerror.md +++ b/packages/mistralai_gcp/docs/models/httpvalidationerror.md @@ -1,7 +1,5 @@ # HTTPValidationError -Validation Error - ## Fields diff --git a/packages/mistralai_gcp/docs/models/systemmessage.md b/packages/mistralai_gcp/docs/models/systemmessage.md index 7f82798..0dba71c 100644 --- a/packages/mistralai_gcp/docs/models/systemmessage.md +++ b/packages/mistralai_gcp/docs/models/systemmessage.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | -| `content` | [models.Content](../models/content.md) | :heavy_check_mark: | N/A | -| `role` | [Optional[models.Role]](../models/role.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `content` | [models.SystemMessageContent](../models/systemmessagecontent.md) | :heavy_check_mark: | N/A | +| `role` | [Optional[models.Role]](../models/role.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/systemmessagecontent.md b/packages/mistralai_gcp/docs/models/systemmessagecontent.md new file mode 100644 index 0000000..e0d27d9 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/systemmessagecontent.md @@ -0,0 +1,17 @@ +# SystemMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.TextChunk]` + +```python +value: List[models.TextChunk] = /* values here */ +``` + diff --git a/packages/mistralai_gcp/docs/models/usermessage.md b/packages/mistralai_gcp/docs/models/usermessage.md index 3d96f1c..63b0131 100644 --- a/packages/mistralai_gcp/docs/models/usermessage.md +++ b/packages/mistralai_gcp/docs/models/usermessage.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | -| `content` | [models.UserMessageContent](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | -| `role` | [Optional[models.UserMessageRole]](../models/usermessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `content` | [Nullable[models.UserMessageContent]](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | +| `role` | [Optional[models.UserMessageRole]](../models/usermessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/pylintrc b/packages/mistralai_gcp/pylintrc index 5080038..393d0f7 100644 --- a/packages/mistralai_gcp/pylintrc +++ b/packages/mistralai_gcp/pylintrc @@ -188,6 +188,7 @@ good-names=i, Run, _, e, + n, id # Good variable names regexes, separated by a comma. If names match any regex, diff --git a/packages/mistralai_gcp/pyproject.toml b/packages/mistralai_gcp/pyproject.toml index 34ea7e5..c4e6488 100644 --- a/packages/mistralai_gcp/pyproject.toml +++ b/packages/mistralai_gcp/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai-gcp" -version = "1.2.2" +version = "1.2.0" description = "Python Client SDK for the Mistral AI API in GCP." authors = ["Mistral",] readme = "README-PYPI.md" @@ -21,7 +21,7 @@ eval-type-backport = "^0.2.0" google-auth = "2.27.0" httpx = "^0.27.0" jsonpath-python = "^1.0.6" -pydantic = "~2.9.0" +pydantic = "~2.9.2" python-dateutil = "2.8.2" requests = "^2.32.3" typing-inspect = "^0.9.0" diff --git a/packages/mistralai_gcp/scripts/compile.sh b/packages/mistralai_gcp/scripts/compile.sh deleted file mode 100755 index fafe635..0000000 --- a/packages/mistralai_gcp/scripts/compile.sh +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/env bash - -set -o pipefail # Ensure pipeline failures are propagated - -# Use temporary files to store outputs and exit statuses -declare -A output_files -declare -A status_files - -# Function to run a command with temporary output and status files -run_command() { - local cmd="$1" - local key="$2" - local output_file="$3" - local status_file="$4" - - # Run the command and store output and exit status - { - eval "$cmd" - echo $? > "$status_file" - } &> "$output_file" & -} - -poetry run python scripts/prepare-readme.py - -# Create temporary files for outputs and statuses -for cmd in compileall pylint mypy pyright; do - output_files[$cmd]=$(mktemp) - status_files[$cmd]=$(mktemp) -done - -# Collect PIDs for background processes -declare -a pids - -# Run commands in parallel using temporary files -echo "Running python -m compileall" -run_command 'poetry run python -m compileall -q . && echo "Success"' 'compileall' "${output_files[compileall]}" "${status_files[compileall]}" -pids+=($!) - -echo "Running pylint" -run_command 'poetry run pylint src' 'pylint' "${output_files[pylint]}" "${status_files[pylint]}" -pids+=($!) - -echo "Running mypy" -run_command 'poetry run mypy src' 'mypy' "${output_files[mypy]}" "${status_files[mypy]}" -pids+=($!) - -echo "Running pyright (optional)" -run_command 'if command -v pyright > /dev/null 2>&1; then pyright src; else echo "pyright not found, skipping"; fi' 'pyright' "${output_files[pyright]}" "${status_files[pyright]}" -pids+=($!) - -# Wait for all processes to complete -echo "Waiting for processes to complete" -for pid in "${pids[@]}"; do - wait "$pid" -done - -# Print output sequentially and check for failures -failed=false -for key in "${!output_files[@]}"; do - echo "--- Output from Command: $key ---" - echo - cat "${output_files[$key]}" - echo # Empty line for separation - echo "--- End of Output from Command: $key ---" - echo - - exit_status=$(cat "${status_files[$key]}") - if [ "$exit_status" -ne 0 ]; then - echo "Command $key failed with exit status $exit_status" >&2 - failed=true - fi -done - -# Clean up temporary files -for tmp_file in "${output_files[@]}" "${status_files[@]}"; do - rm -f "$tmp_file" -done - -if $failed; then - echo "One or more commands failed." >&2 - exit 1 -else - echo "All commands completed successfully." - exit 0 -fi diff --git a/packages/mistralai_gcp/src/mistralai_gcp/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/__init__.py index 68138c4..a1b7f62 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/__init__.py @@ -1,5 +1,9 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +from ._version import __title__, __version__ from .sdk import * from .sdkconfiguration import * from .models import * + + +VERSION: str = __version__ diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_version.py b/packages/mistralai_gcp/src/mistralai_gcp/_version.py new file mode 100644 index 0000000..0472b64 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/_version.py @@ -0,0 +1,12 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import importlib.metadata + +__title__: str = "mistralai-gcp" +__version__: str = "1.2.0" + +try: + if __package__ is not None: + __version__ = importlib.metadata.version(__package__) +except importlib.metadata.PackageNotFoundError: + pass diff --git a/packages/mistralai_gcp/src/mistralai_gcp/chat.py b/packages/mistralai_gcp/src/mistralai_gcp/chat.py index 044dd19..19c7435 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/chat.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/chat.py @@ -16,10 +16,9 @@ def stream( *, model: Nullable[str], messages: Union[List[models.Messages], List[models.MessagesTypedDict]], - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, random_seed: OptionalNullable[int] = UNSET, @@ -35,6 +34,9 @@ def stream( models.ChatCompletionStreamRequestToolChoiceTypedDict, ] ] = None, + presence_penalty: Optional[float] = 0, + frequency_penalty: Optional[float] = 0, + n: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -45,16 +47,18 @@ def stream( :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param response_format: :param tools: :param tool_choice: + :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -72,7 +76,6 @@ def stream( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, @@ -84,6 +87,9 @@ def stream( tool_choice=utils.get_pydantic_model( tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, ) req = self.build_request( @@ -132,18 +138,21 @@ def stream( sentinel="[DONE]", ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + http_res_text = utils.stream_to_text(http_res) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -152,10 +161,9 @@ async def stream_async( *, model: Nullable[str], messages: Union[List[models.Messages], List[models.MessagesTypedDict]], - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, random_seed: OptionalNullable[int] = UNSET, @@ -171,6 +179,9 @@ async def stream_async( models.ChatCompletionStreamRequestToolChoiceTypedDict, ] ] = None, + presence_penalty: Optional[float] = 0, + frequency_penalty: Optional[float] = 0, + n: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -181,16 +192,18 @@ async def stream_async( :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param response_format: :param tools: :param tool_choice: + :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -208,7 +221,6 @@ async def stream_async( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, @@ -220,6 +232,9 @@ async def stream_async( tool_choice=utils.get_pydantic_model( tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, ) req = self.build_request_async( @@ -268,18 +283,21 @@ async def stream_async( sentinel="[DONE]", ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + http_res_text = await utils.stream_to_text_async(http_res) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -291,10 +309,9 @@ def complete( List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict], ], - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[ Union[ @@ -315,6 +332,9 @@ def complete( models.ChatCompletionRequestToolChoiceTypedDict, ] ] = None, + presence_penalty: Optional[float] = 0, + frequency_penalty: Optional[float] = 0, + n: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -323,16 +343,18 @@ def complete( :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param response_format: :param tools: :param tool_choice: + :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -350,7 +372,6 @@ def complete( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, @@ -364,6 +385,9 @@ def complete( tool_choice=utils.get_pydantic_model( tool_choice, Optional[models.ChatCompletionRequestToolChoice] ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, ) req = self.build_request( @@ -412,15 +436,17 @@ def complete( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -432,10 +458,9 @@ async def complete_async( List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict], ], - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[ Union[ @@ -456,6 +481,9 @@ async def complete_async( models.ChatCompletionRequestToolChoiceTypedDict, ] ] = None, + presence_penalty: Optional[float] = 0, + frequency_penalty: Optional[float] = 0, + n: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -464,16 +492,18 @@ async def complete_async( :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param response_format: :param tools: :param tool_choice: + :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -491,7 +521,6 @@ async def complete_async( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, @@ -505,6 +534,9 @@ async def complete_async( tool_choice=utils.get_pydantic_model( tool_choice, Optional[models.ChatCompletionRequestToolChoice] ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, ) req = self.build_request_async( @@ -553,14 +585,16 @@ async def complete_async( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/fim.py b/packages/mistralai_gcp/src/mistralai_gcp/fim.py index cda380c..fb3bf90 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/fim.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/fim.py @@ -16,10 +16,9 @@ def stream( *, model: Nullable[str], prompt: str, - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[ Union[ @@ -29,6 +28,7 @@ def stream( ] = None, random_seed: OptionalNullable[int] = UNSET, suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -39,14 +39,14 @@ def stream( :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` :param prompt: The text/code to complete. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -64,12 +64,12 @@ def stream( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, prompt=prompt, suffix=suffix, + min_tokens=min_tokens, ) req = self.build_request( @@ -118,18 +118,21 @@ def stream( sentinel="[DONE]", ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + http_res_text = utils.stream_to_text(http_res) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -138,10 +141,9 @@ async def stream_async( *, model: Nullable[str], prompt: str, - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[ Union[ @@ -151,6 +153,7 @@ async def stream_async( ] = None, random_seed: OptionalNullable[int] = UNSET, suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -161,14 +164,14 @@ async def stream_async( :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` :param prompt: The text/code to complete. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -186,12 +189,12 @@ async def stream_async( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, prompt=prompt, suffix=suffix, + min_tokens=min_tokens, ) req = self.build_request_async( @@ -240,18 +243,21 @@ async def stream_async( sentinel="[DONE]", ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + http_res_text = await utils.stream_to_text_async(http_res) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -260,10 +266,9 @@ def complete( *, model: Nullable[str], prompt: str, - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[ Union[ @@ -273,6 +278,7 @@ def complete( ] = None, random_seed: OptionalNullable[int] = UNSET, suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -283,14 +289,14 @@ def complete( :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` :param prompt: The text/code to complete. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -308,12 +314,12 @@ def complete( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, prompt=prompt, suffix=suffix, + min_tokens=min_tokens, ) req = self.build_request( @@ -362,15 +368,17 @@ def complete( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -379,10 +387,9 @@ async def complete_async( *, model: Nullable[str], prompt: str, - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[ Union[ @@ -392,6 +399,7 @@ async def complete_async( ] = None, random_seed: OptionalNullable[int] = UNSET, suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -402,14 +410,14 @@ async def complete_async( :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` :param prompt: The text/code to complete. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -427,12 +435,12 @@ async def complete_async( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, prompt=prompt, suffix=suffix, + min_tokens=min_tokens, ) req = self.build_request_async( @@ -481,14 +489,16 @@ async def complete_async( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py index 84acf24..db408df 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py @@ -2,6 +2,8 @@ from .assistantmessage import ( AssistantMessage, + AssistantMessageContent, + AssistantMessageContentTypedDict, AssistantMessageRole, AssistantMessageTypedDict, ) @@ -42,7 +44,7 @@ FinishReason, ) from .contentchunk import ContentChunk, ContentChunkTypedDict -from .deltamessage import DeltaMessage, DeltaMessageTypedDict +from .deltamessage import Content, ContentTypedDict, DeltaMessage, DeltaMessageTypedDict from .fimcompletionrequest import ( FIMCompletionRequest, FIMCompletionRequestStop, @@ -70,10 +72,10 @@ from .sdkerror import SDKError from .security import Security, SecurityTypedDict from .systemmessage import ( - Content, - ContentTypedDict, Role, SystemMessage, + SystemMessageContent, + SystemMessageContentTypedDict, SystemMessageTypedDict, ) from .textchunk import TextChunk, TextChunkTypedDict, Type @@ -102,6 +104,8 @@ "Arguments", "ArgumentsTypedDict", "AssistantMessage", + "AssistantMessageContent", + "AssistantMessageContentTypedDict", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", @@ -166,6 +170,8 @@ "Stop", "StopTypedDict", "SystemMessage", + "SystemMessageContent", + "SystemMessageContentTypedDict", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py index 33a4965..f93a06c 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py @@ -1,6 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict from .toolcall import ToolCall, ToolCallTypedDict from mistralai_gcp.types import ( BaseModel, @@ -10,28 +11,32 @@ UNSET_SENTINEL, ) from pydantic import model_serializer -from typing import List, Literal, Optional, TypedDict -from typing_extensions import NotRequired +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypedDict + + +AssistantMessageContentTypedDict = Union[str, List[ContentChunkTypedDict]] + + +AssistantMessageContent = Union[str, List[ContentChunk]] AssistantMessageRole = Literal["assistant"] class AssistantMessageTypedDict(TypedDict): - content: NotRequired[Nullable[str]] + content: NotRequired[Nullable[AssistantMessageContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] prefix: NotRequired[bool] - r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: NotRequired[AssistantMessageRole] class AssistantMessage(BaseModel): - content: OptionalNullable[str] = UNSET + content: OptionalNullable[AssistantMessageContent] = UNSET tool_calls: OptionalNullable[List[ToolCall]] = UNSET prefix: Optional[bool] = False - r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: Optional[AssistantMessageRole] = "assistant" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py index c585e1e..9bcf124 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py @@ -2,12 +2,15 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from mistralai_gcp.types import BaseModel -from typing import Literal, TypedDict +from mistralai_gcp.types import BaseModel, UnrecognizedStr +from mistralai_gcp.utils import validate_open_enum +from pydantic.functional_validators import PlainValidator +from typing import Literal, Union +from typing_extensions import Annotated, TypedDict -ChatCompletionChoiceFinishReason = Literal[ - "stop", "length", "model_length", "error", "tool_calls" +ChatCompletionChoiceFinishReason = Union[ + Literal["stop", "length", "model_length", "error", "tool_calls"], UnrecognizedStr ] @@ -22,4 +25,6 @@ class ChatCompletionChoice(BaseModel): message: AssistantMessage - finish_reason: ChatCompletionChoiceFinishReason + finish_reason: Annotated[ + ChatCompletionChoiceFinishReason, PlainValidator(validate_open_enum(False)) + ] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py index dbe6f55..e1c263b 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py @@ -18,8 +18,8 @@ ) from mistralai_gcp.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Optional, TypedDict, Union -from typing_extensions import Annotated, NotRequired +from typing import List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict ChatCompletionRequestStopTypedDict = Union[str, List[str]] @@ -60,14 +60,12 @@ class ChatCompletionRequestTypedDict(TypedDict): r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[ChatCompletionRequestMessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - temperature: NotRequired[float] - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: NotRequired[float] r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" max_tokens: NotRequired[Nullable[int]] r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: NotRequired[Nullable[int]] - r"""The minimum number of tokens to generate in the completion.""" stream: NotRequired[bool] r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" stop: NotRequired[ChatCompletionRequestStopTypedDict] @@ -77,6 +75,12 @@ class ChatCompletionRequestTypedDict(TypedDict): response_format: NotRequired[ResponseFormatTypedDict] tools: NotRequired[Nullable[List[ToolTypedDict]]] tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict] + presence_penalty: NotRequired[float] + r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" class ChatCompletionRequest(BaseModel): @@ -86,8 +90,8 @@ class ChatCompletionRequest(BaseModel): messages: List[ChatCompletionRequestMessages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - temperature: Optional[float] = 0.7 - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" @@ -95,9 +99,6 @@ class ChatCompletionRequest(BaseModel): max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: OptionalNullable[int] = UNSET - r"""The minimum number of tokens to generate in the completion.""" - stream: Optional[bool] = False r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" @@ -113,21 +114,39 @@ class ChatCompletionRequest(BaseModel): tool_choice: Optional[ChatCompletionRequestToolChoice] = None + presence_penalty: Optional[float] = 0 + r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = 0 + r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ "temperature", "top_p", "max_tokens", - "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + ] + nullable_fields = [ + "model", + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", ] - nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py index 5fb1044..0404a9d 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py @@ -4,8 +4,8 @@ from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai_gcp.types import BaseModel -from typing import List, Optional, TypedDict -from typing_extensions import NotRequired +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict class ChatCompletionResponseTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py index 5bb7059..5fc4085 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py @@ -18,8 +18,8 @@ ) from mistralai_gcp.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Optional, TypedDict, Union -from typing_extensions import Annotated, NotRequired +from typing import List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict StopTypedDict = Union[str, List[str]] @@ -62,14 +62,12 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[MessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - temperature: NotRequired[float] - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: NotRequired[float] r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" max_tokens: NotRequired[Nullable[int]] r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: NotRequired[Nullable[int]] - r"""The minimum number of tokens to generate in the completion.""" stream: NotRequired[bool] stop: NotRequired[StopTypedDict] r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" @@ -78,6 +76,12 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): response_format: NotRequired[ResponseFormatTypedDict] tools: NotRequired[Nullable[List[ToolTypedDict]]] tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict] + presence_penalty: NotRequired[float] + r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" class ChatCompletionStreamRequest(BaseModel): @@ -87,8 +91,8 @@ class ChatCompletionStreamRequest(BaseModel): messages: List[Messages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - temperature: Optional[float] = 0.7 - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" @@ -96,9 +100,6 @@ class ChatCompletionStreamRequest(BaseModel): max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: OptionalNullable[int] = UNSET - r"""The minimum number of tokens to generate in the completion.""" - stream: Optional[bool] = True stop: Optional[Stop] = None @@ -113,21 +114,39 @@ class ChatCompletionStreamRequest(BaseModel): tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None + presence_penalty: Optional[float] = 0 + r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = 0 + r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ "temperature", "top_p", "max_tokens", - "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + ] + nullable_fields = [ + "model", + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", ] - nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/completionchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/completionchunk.py index f0561ef..ca002f5 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/completionchunk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/completionchunk.py @@ -7,8 +7,8 @@ ) from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai_gcp.types import BaseModel -from typing import List, Optional, TypedDict -from typing_extensions import NotRequired +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict class CompletionChunkTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/completionevent.py b/packages/mistralai_gcp/src/mistralai_gcp/models/completionevent.py index 7086fce..33278c1 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/completionevent.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/completionevent.py @@ -3,7 +3,7 @@ from __future__ import annotations from .completionchunk import CompletionChunk, CompletionChunkTypedDict from mistralai_gcp.types import BaseModel -from typing import TypedDict +from typing_extensions import TypedDict class CompletionEventTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py b/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py index a09f67f..8d77997 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py @@ -2,12 +2,15 @@ from __future__ import annotations from .deltamessage import DeltaMessage, DeltaMessageTypedDict -from mistralai_gcp.types import BaseModel, Nullable, UNSET_SENTINEL +from mistralai_gcp.types import BaseModel, Nullable, UNSET_SENTINEL, UnrecognizedStr +from mistralai_gcp.utils import validate_open_enum from pydantic import model_serializer -from typing import Literal, TypedDict +from pydantic.functional_validators import PlainValidator +from typing import Literal, Union +from typing_extensions import Annotated, TypedDict -FinishReason = Literal["stop", "length", "error", "tool_calls"] +FinishReason = Union[Literal["stop", "length", "error", "tool_calls"], UnrecognizedStr] class CompletionResponseStreamChoiceTypedDict(TypedDict): @@ -21,7 +24,9 @@ class CompletionResponseStreamChoice(BaseModel): delta: DeltaMessage - finish_reason: Nullable[FinishReason] + finish_reason: Annotated[ + Nullable[FinishReason], PlainValidator(validate_open_enum(False)) + ] @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py index 314e52a..bb540c9 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py @@ -1,6 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict from .toolcall import ToolCall, ToolCallTypedDict from mistralai_gcp.types import ( BaseModel, @@ -10,27 +11,33 @@ UNSET_SENTINEL, ) from pydantic import model_serializer -from typing import List, Optional, TypedDict -from typing_extensions import NotRequired +from typing import List, Union +from typing_extensions import NotRequired, TypedDict + + +ContentTypedDict = Union[str, List[ContentChunkTypedDict]] + + +Content = Union[str, List[ContentChunk]] class DeltaMessageTypedDict(TypedDict): - role: NotRequired[str] - content: NotRequired[Nullable[str]] + role: NotRequired[Nullable[str]] + content: NotRequired[Nullable[ContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] class DeltaMessage(BaseModel): - role: Optional[str] = None + role: OptionalNullable[str] = UNSET - content: OptionalNullable[str] = UNSET + content: OptionalNullable[Content] = UNSET tool_calls: OptionalNullable[List[ToolCall]] = UNSET @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["role", "content", "tool_calls"] - nullable_fields = ["content", "tool_calls"] + nullable_fields = ["role", "content", "tool_calls"] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py index 8693e34..3a85176 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py @@ -9,8 +9,8 @@ UNSET_SENTINEL, ) from pydantic import model_serializer -from typing import List, Optional, TypedDict, Union -from typing_extensions import NotRequired +from typing import List, Optional, Union +from typing_extensions import NotRequired, TypedDict FIMCompletionRequestStopTypedDict = Union[str, List[str]] @@ -29,14 +29,12 @@ class FIMCompletionRequestTypedDict(TypedDict): """ prompt: str r"""The text/code to complete.""" - temperature: NotRequired[float] - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: NotRequired[float] r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" max_tokens: NotRequired[Nullable[int]] r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: NotRequired[Nullable[int]] - r"""The minimum number of tokens to generate in the completion.""" stream: NotRequired[bool] r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" stop: NotRequired[FIMCompletionRequestStopTypedDict] @@ -45,6 +43,8 @@ class FIMCompletionRequestTypedDict(TypedDict): r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" suffix: NotRequired[Nullable[str]] r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" class FIMCompletionRequest(BaseModel): @@ -57,8 +57,8 @@ class FIMCompletionRequest(BaseModel): prompt: str r"""The text/code to complete.""" - temperature: Optional[float] = 0.7 - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" @@ -66,9 +66,6 @@ class FIMCompletionRequest(BaseModel): max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: OptionalNullable[int] = UNSET - r"""The minimum number of tokens to generate in the completion.""" - stream: Optional[bool] = False r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" @@ -81,19 +78,29 @@ class FIMCompletionRequest(BaseModel): suffix: OptionalNullable[str] = UNSET r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ "temperature", "top_p", "max_tokens", - "min_tokens", "stream", "stop", "random_seed", "suffix", + "min_tokens", + ] + nullable_fields = [ + "model", + "temperature", + "max_tokens", + "random_seed", + "suffix", + "min_tokens", ] - nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py index ad28515..a4d273a 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py @@ -4,8 +4,8 @@ from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai_gcp.types import BaseModel -from typing import List, Optional, TypedDict -from typing_extensions import NotRequired +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict class FIMCompletionResponseTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py index d05918c..f47937b 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py @@ -9,8 +9,8 @@ UNSET_SENTINEL, ) from pydantic import model_serializer -from typing import List, Optional, TypedDict, Union -from typing_extensions import NotRequired +from typing import List, Optional, Union +from typing_extensions import NotRequired, TypedDict FIMCompletionStreamRequestStopTypedDict = Union[str, List[str]] @@ -29,14 +29,12 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): """ prompt: str r"""The text/code to complete.""" - temperature: NotRequired[float] - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: NotRequired[float] r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" max_tokens: NotRequired[Nullable[int]] r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: NotRequired[Nullable[int]] - r"""The minimum number of tokens to generate in the completion.""" stream: NotRequired[bool] stop: NotRequired[FIMCompletionStreamRequestStopTypedDict] r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" @@ -44,6 +42,8 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" suffix: NotRequired[Nullable[str]] r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" class FIMCompletionStreamRequest(BaseModel): @@ -56,8 +56,8 @@ class FIMCompletionStreamRequest(BaseModel): prompt: str r"""The text/code to complete.""" - temperature: Optional[float] = 0.7 - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" @@ -65,9 +65,6 @@ class FIMCompletionStreamRequest(BaseModel): max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: OptionalNullable[int] = UNSET - r"""The minimum number of tokens to generate in the completion.""" - stream: Optional[bool] = True stop: Optional[FIMCompletionStreamRequestStop] = None @@ -79,19 +76,29 @@ class FIMCompletionStreamRequest(BaseModel): suffix: OptionalNullable[str] = UNSET r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ "temperature", "top_p", "max_tokens", - "min_tokens", "stream", "stop", "random_seed", "suffix", + "min_tokens", + ] + nullable_fields = [ + "model", + "temperature", + "max_tokens", + "random_seed", + "suffix", + "min_tokens", ] - nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/function.py b/packages/mistralai_gcp/src/mistralai_gcp/models/function.py index 533c3de..c3168ee 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/function.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/function.py @@ -2,8 +2,8 @@ from __future__ import annotations from mistralai_gcp.types import BaseModel -from typing import Any, Dict, Optional, TypedDict -from typing_extensions import NotRequired +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict class FunctionTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py b/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py index d8daaef..02da9bb 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py @@ -2,7 +2,8 @@ from __future__ import annotations from mistralai_gcp.types import BaseModel -from typing import Any, Dict, TypedDict, Union +from typing import Any, Dict, Union +from typing_extensions import TypedDict ArgumentsTypedDict = Union[Dict[str, Any], str] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/functionname.py b/packages/mistralai_gcp/src/mistralai_gcp/models/functionname.py index 47af74a..00ec22f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/functionname.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/functionname.py @@ -2,7 +2,7 @@ from __future__ import annotations from mistralai_gcp.types import BaseModel -from typing import TypedDict +from typing_extensions import TypedDict class FunctionNameTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py b/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py index 68b1f78..11024f8 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py @@ -12,8 +12,6 @@ class HTTPValidationErrorData(BaseModel): class HTTPValidationError(Exception): - r"""Validation Error""" - data: HTTPValidationErrorData def __init__(self, data: HTTPValidationErrorData): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py index 0398e9b..fde8986 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py @@ -3,8 +3,8 @@ from __future__ import annotations from .responseformats import ResponseFormats from mistralai_gcp.types import BaseModel -from typing import Optional, TypedDict -from typing_extensions import NotRequired +from typing import Optional +from typing_extensions import NotRequired, TypedDict class ResponseFormatTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/security.py b/packages/mistralai_gcp/src/mistralai_gcp/models/security.py index c9c0e0f..3857494 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/security.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/security.py @@ -3,8 +3,7 @@ from __future__ import annotations from mistralai_gcp.types import BaseModel from mistralai_gcp.utils import FieldMetadata, SecurityMetadata -from typing import TypedDict -from typing_extensions import Annotated +from typing_extensions import Annotated, TypedDict class SecurityTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py index 872b9e3..8779855 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py @@ -3,25 +3,25 @@ from __future__ import annotations from .textchunk import TextChunk, TextChunkTypedDict from mistralai_gcp.types import BaseModel -from typing import List, Literal, Optional, TypedDict, Union -from typing_extensions import NotRequired +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypedDict -ContentTypedDict = Union[str, List[TextChunkTypedDict]] +SystemMessageContentTypedDict = Union[str, List[TextChunkTypedDict]] -Content = Union[str, List[TextChunk]] +SystemMessageContent = Union[str, List[TextChunk]] Role = Literal["system"] class SystemMessageTypedDict(TypedDict): - content: ContentTypedDict + content: SystemMessageContentTypedDict role: NotRequired[Role] class SystemMessage(BaseModel): - content: Content + content: SystemMessageContent role: Optional[Role] = "system" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py index 5c3774c..48367e4 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py @@ -2,9 +2,11 @@ from __future__ import annotations from mistralai_gcp.types import BaseModel +from mistralai_gcp.utils import validate_const import pydantic -from typing import Final, Literal, Optional, TypedDict -from typing_extensions import Annotated +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict Type = Literal["text"] @@ -12,11 +14,13 @@ class TextChunkTypedDict(TypedDict): text: str + type: Type class TextChunk(BaseModel): text: str - # fmt: off - TYPE: Annotated[Final[Optional[Type]], pydantic.Field(alias="type")] = "text" # type: ignore - # fmt: on + TYPE: Annotated[ + Annotated[Optional[Type], AfterValidator(validate_const("text"))], + pydantic.Field(alias="type"), + ] = "text" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py b/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py index 24e1a9f..a1d477d 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py @@ -6,8 +6,8 @@ from mistralai_gcp.types import BaseModel from mistralai_gcp.utils import validate_open_enum from pydantic.functional_validators import PlainValidator -from typing import Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict class ToolTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py index 6374f2c..5b4b217 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py @@ -6,8 +6,8 @@ from mistralai_gcp.types import BaseModel from mistralai_gcp.utils import validate_open_enum from pydantic.functional_validators import PlainValidator -from typing import Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict class ToolCallTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoice.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoice.py index bd6dbe7..dc213e6 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoice.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoice.py @@ -6,8 +6,8 @@ from mistralai_gcp.types import BaseModel from mistralai_gcp.utils import validate_open_enum from pydantic.functional_validators import PlainValidator -from typing import Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict class ToolChoiceTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py index caff0ad..80e44ed 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py @@ -9,8 +9,8 @@ UNSET_SENTINEL, ) from pydantic import model_serializer -from typing import Literal, Optional, TypedDict -from typing_extensions import NotRequired +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict ToolMessageRole = Literal["tool"] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py b/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py index d63486b..9de6af7 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py @@ -2,7 +2,7 @@ from __future__ import annotations from mistralai_gcp.types import BaseModel -from typing import TypedDict +from typing_extensions import TypedDict class UsageInfoTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py index ccc6efb..229dbaf 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py @@ -2,9 +2,10 @@ from __future__ import annotations from .contentchunk import ContentChunk, ContentChunkTypedDict -from mistralai_gcp.types import BaseModel -from typing import List, Literal, Optional, TypedDict, Union -from typing_extensions import NotRequired +from mistralai_gcp.types import BaseModel, Nullable, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypedDict UserMessageContentTypedDict = Union[str, List[ContentChunkTypedDict]] @@ -17,11 +18,41 @@ class UserMessageTypedDict(TypedDict): - content: UserMessageContentTypedDict + content: Nullable[UserMessageContentTypedDict] role: NotRequired[UserMessageRole] class UserMessage(BaseModel): - content: UserMessageContent + content: Nullable[UserMessageContent] role: Optional[UserMessageRole] = "user" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["role"] + nullable_fields = ["content"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py b/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py index 23e9595..b8bd434 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py @@ -2,7 +2,8 @@ from __future__ import annotations from mistralai_gcp.types import BaseModel -from typing import List, TypedDict, Union +from typing import List, Union +from typing_extensions import TypedDict LocTypedDict = Union[str, int] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py index 408d8c3..fdb296c 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py @@ -10,10 +10,10 @@ from typing import Callable, Dict, Optional, Tuple, Union -SERVER_PROD = "prod" -r"""Production server""" +SERVER_EU = "eu" +r"""EU Production server""" SERVERS = { - SERVER_PROD: "https://api.mistral.ai", + SERVER_EU: "https://api.mistral.ai", } """Contains the list of servers available to the SDK""" @@ -28,9 +28,9 @@ class SDKConfiguration: server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.2.2" - gen_version: str = "2.415.6" - user_agent: str = "speakeasy-sdk/python 1.2.2 2.415.6 0.0.2 mistralai-gcp" + sdk_version: str = "1.2.0" + gen_version: str = "2.452.0" + user_agent: str = "speakeasy-sdk/python 1.2.0 2.452.0 0.0.2 mistralai-gcp" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None @@ -41,7 +41,7 @@ def get_server_details(self) -> Tuple[str, Dict[str, str]]: if self.server_url is not None and self.server_url: return remove_suffix(self.server_url, "/"), {} if not self.server: - self.server = SERVER_PROD + self.server = SERVER_EU if self.server not in SERVERS: raise ValueError(f'Invalid server "{self.server}"') diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py index 6c26aeb..26d51ae 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py @@ -27,6 +27,10 @@ serialize_float, serialize_int, stream_to_text, + stream_to_text_async, + stream_to_bytes, + stream_to_bytes_async, + validate_const, validate_decimal, validate_float, validate_int, @@ -79,10 +83,14 @@ "serialize_request_body", "SerializedRequestBody", "stream_to_text", + "stream_to_text_async", + "stream_to_bytes", + "stream_to_bytes_async", "template_url", "unmarshal", "unmarshal_json", "validate_decimal", + "validate_const", "validate_float", "validate_int", "validate_open_enum", diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py index 0d17472..5b3bbb0 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py @@ -1,5 +1,6 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +from enum import Enum from typing import Any def get_discriminator(model: Any, fieldname: str, key: str) -> str: @@ -10,10 +11,20 @@ def get_discriminator(model: Any, fieldname: str, key: str) -> str: raise ValueError(f'Could not find discriminator key {key} in {model}') from e if hasattr(model, fieldname): - return f'{getattr(model, fieldname)}' + attr = getattr(model, fieldname) + + if isinstance(attr, Enum): + return f'{attr.value}' + + return f'{attr}' fieldname = fieldname.upper() if hasattr(model, fieldname): - return f'{getattr(model, fieldname)}' + attr = getattr(model, fieldname) + + if isinstance(attr, Enum): + return f'{attr.value}' + + return f'{attr}' raise ValueError(f'Could not find discriminator field {fieldname} in {model}') diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py index 85d57f4..c5eb365 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py @@ -116,6 +116,19 @@ def validate(e): return validate +def validate_const(v): + def validate(c): + if is_optional_type(type(c)) and c is None: + return None + + if v != c: + raise ValueError(f"Expected {v}") + + return c + + return validate + + def unmarshal_json(raw, typ: Any) -> Any: return unmarshal(from_json(raw), typ) @@ -172,6 +185,18 @@ def stream_to_text(stream: httpx.Response) -> str: return "".join(stream.iter_text()) +async def stream_to_text_async(stream: httpx.Response) -> str: + return "".join([chunk async for chunk in stream.aiter_text()]) + + +def stream_to_bytes(stream: httpx.Response) -> bytes: + return stream.content + + +async def stream_to_bytes_async(stream: httpx.Response) -> bytes: + return await stream.aread() + + def get_pydantic_model(data: Any, typ: Any) -> Any: if not _contains_pydantic_model(data): return unmarshal(data, typ) diff --git a/pylintrc b/pylintrc index 5080038..393d0f7 100644 --- a/pylintrc +++ b/pylintrc @@ -188,6 +188,7 @@ good-names=i, Run, _, e, + n, id # Good variable names regexes, separated by a comma. If names match any regex, diff --git a/pyproject.toml b/pyproject.toml index 4200b2b..31aea5d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai" -version = "1.1.0" +version = "1.2.0" description = "Python Client SDK for the Mistral AI API." authors = ["Mistral"] readme = "README-PYPI.md" diff --git a/scripts/compile.sh b/scripts/compile.sh deleted file mode 100755 index fafe635..0000000 --- a/scripts/compile.sh +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/env bash - -set -o pipefail # Ensure pipeline failures are propagated - -# Use temporary files to store outputs and exit statuses -declare -A output_files -declare -A status_files - -# Function to run a command with temporary output and status files -run_command() { - local cmd="$1" - local key="$2" - local output_file="$3" - local status_file="$4" - - # Run the command and store output and exit status - { - eval "$cmd" - echo $? > "$status_file" - } &> "$output_file" & -} - -poetry run python scripts/prepare-readme.py - -# Create temporary files for outputs and statuses -for cmd in compileall pylint mypy pyright; do - output_files[$cmd]=$(mktemp) - status_files[$cmd]=$(mktemp) -done - -# Collect PIDs for background processes -declare -a pids - -# Run commands in parallel using temporary files -echo "Running python -m compileall" -run_command 'poetry run python -m compileall -q . && echo "Success"' 'compileall' "${output_files[compileall]}" "${status_files[compileall]}" -pids+=($!) - -echo "Running pylint" -run_command 'poetry run pylint src' 'pylint' "${output_files[pylint]}" "${status_files[pylint]}" -pids+=($!) - -echo "Running mypy" -run_command 'poetry run mypy src' 'mypy' "${output_files[mypy]}" "${status_files[mypy]}" -pids+=($!) - -echo "Running pyright (optional)" -run_command 'if command -v pyright > /dev/null 2>&1; then pyright src; else echo "pyright not found, skipping"; fi' 'pyright' "${output_files[pyright]}" "${status_files[pyright]}" -pids+=($!) - -# Wait for all processes to complete -echo "Waiting for processes to complete" -for pid in "${pids[@]}"; do - wait "$pid" -done - -# Print output sequentially and check for failures -failed=false -for key in "${!output_files[@]}"; do - echo "--- Output from Command: $key ---" - echo - cat "${output_files[$key]}" - echo # Empty line for separation - echo "--- End of Output from Command: $key ---" - echo - - exit_status=$(cat "${status_files[$key]}") - if [ "$exit_status" -ne 0 ]; then - echo "Command $key failed with exit status $exit_status" >&2 - failed=true - fi -done - -# Clean up temporary files -for tmp_file in "${output_files[@]}" "${status_files[@]}"; do - rm -f "$tmp_file" -done - -if $failed; then - echo "One or more commands failed." >&2 - exit 1 -else - echo "All commands completed successfully." - exit 0 -fi diff --git a/src/mistralai/__init__.py b/src/mistralai/__init__.py index 68138c4..a1b7f62 100644 --- a/src/mistralai/__init__.py +++ b/src/mistralai/__init__.py @@ -1,5 +1,9 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +from ._version import __title__, __version__ from .sdk import * from .sdkconfiguration import * from .models import * + + +VERSION: str = __version__ diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py new file mode 100644 index 0000000..752c9ed --- /dev/null +++ b/src/mistralai/_version.py @@ -0,0 +1,12 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import importlib.metadata + +__title__: str = "mistralai" +__version__: str = "1.2.0" + +try: + if __package__ is not None: + __version__ = importlib.metadata.version(__package__) +except importlib.metadata.PackageNotFoundError: + pass diff --git a/src/mistralai/agents.py b/src/mistralai/agents.py index 05d1775..1b5c6a1 100644 --- a/src/mistralai/agents.py +++ b/src/mistralai/agents.py @@ -20,7 +20,6 @@ def complete( ], agent_id: str, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[ Union[ @@ -41,6 +40,9 @@ def complete( models.AgentsCompletionRequestToolChoiceTypedDict, ] ] = None, + presence_penalty: Optional[float] = 0, + frequency_penalty: Optional[float] = 0, + n: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -50,13 +52,15 @@ def complete( :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. :param agent_id: The ID of the agent to use for this completion. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param response_format: :param tools: :param tool_choice: + :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -71,7 +75,6 @@ def complete( request = models.AgentsCompletionRequest( max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, @@ -85,6 +88,9 @@ def complete( tool_choice=utils.get_pydantic_model( tool_choice, Optional[models.AgentsCompletionRequestToolChoice] ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, agent_id=agent_id, ) @@ -136,15 +142,17 @@ def complete( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -157,7 +165,6 @@ async def complete_async( ], agent_id: str, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[ Union[ @@ -178,6 +185,9 @@ async def complete_async( models.AgentsCompletionRequestToolChoiceTypedDict, ] ] = None, + presence_penalty: Optional[float] = 0, + frequency_penalty: Optional[float] = 0, + n: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -187,13 +197,15 @@ async def complete_async( :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. :param agent_id: The ID of the agent to use for this completion. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param response_format: :param tools: :param tool_choice: + :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -208,7 +220,6 @@ async def complete_async( request = models.AgentsCompletionRequest( max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, @@ -222,6 +233,9 @@ async def complete_async( tool_choice=utils.get_pydantic_model( tool_choice, Optional[models.AgentsCompletionRequestToolChoice] ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, agent_id=agent_id, ) @@ -273,15 +287,17 @@ async def complete_async( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -294,7 +310,6 @@ def stream( ], agent_id: str, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[ Union[ @@ -315,6 +330,9 @@ def stream( models.AgentsCompletionStreamRequestToolChoiceTypedDict, ] ] = None, + presence_penalty: Optional[float] = 0, + frequency_penalty: Optional[float] = 0, + n: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -326,13 +344,15 @@ def stream( :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. :param agent_id: The ID of the agent to use for this completion. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param response_format: :param tools: :param tool_choice: + :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -347,7 +367,6 @@ def stream( request = models.AgentsCompletionStreamRequest( max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, @@ -361,6 +380,9 @@ def stream( tool_choice=utils.get_pydantic_model( tool_choice, Optional[models.AgentsCompletionStreamRequestToolChoice] ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, agent_id=agent_id, ) @@ -412,18 +434,21 @@ def stream( sentinel="[DONE]", ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + http_res_text = utils.stream_to_text(http_res) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -436,7 +461,6 @@ async def stream_async( ], agent_id: str, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[ Union[ @@ -457,6 +481,9 @@ async def stream_async( models.AgentsCompletionStreamRequestToolChoiceTypedDict, ] ] = None, + presence_penalty: Optional[float] = 0, + frequency_penalty: Optional[float] = 0, + n: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -468,13 +495,15 @@ async def stream_async( :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. :param agent_id: The ID of the agent to use for this completion. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param response_format: :param tools: :param tool_choice: + :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -489,7 +518,6 @@ async def stream_async( request = models.AgentsCompletionStreamRequest( max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, @@ -503,6 +531,9 @@ async def stream_async( tool_choice=utils.get_pydantic_model( tool_choice, Optional[models.AgentsCompletionStreamRequestToolChoice] ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, agent_id=agent_id, ) @@ -554,17 +585,20 @@ async def stream_async( sentinel="[DONE]", ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + http_res_text = await utils.stream_to_text_async(http_res) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) diff --git a/src/mistralai/batch.py b/src/mistralai/batch.py new file mode 100644 index 0000000..bb59abd --- /dev/null +++ b/src/mistralai/batch.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from .sdkconfiguration import SDKConfiguration +from mistralai.mistral_jobs import MistralJobs + + +class Batch(BaseSDK): + jobs: MistralJobs + + def __init__(self, sdk_config: SDKConfiguration) -> None: + BaseSDK.__init__(self, sdk_config) + self.sdk_configuration = sdk_config + self._init_sdks() + + def _init_sdks(self): + self.jobs = MistralJobs(self.sdk_configuration) diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py index 3e770f1..dd5ca69 100644 --- a/src/mistralai/chat.py +++ b/src/mistralai/chat.py @@ -16,10 +16,9 @@ def complete( *, model: Nullable[str], messages: Union[List[models.Messages], List[models.MessagesTypedDict]], - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, random_seed: OptionalNullable[int] = UNSET, @@ -35,6 +34,9 @@ def complete( models.ChatCompletionRequestToolChoiceTypedDict, ] ] = None, + presence_penalty: Optional[float] = 0, + frequency_penalty: Optional[float] = 0, + n: OptionalNullable[int] = UNSET, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -44,16 +46,18 @@ def complete( :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param response_format: :param tools: :param tool_choice: + :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -72,7 +76,6 @@ def complete( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, @@ -84,6 +87,9 @@ def complete( tool_choice=utils.get_pydantic_model( tool_choice, Optional[models.ChatCompletionRequestToolChoice] ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, safe_prompt=safe_prompt, ) @@ -135,15 +141,17 @@ def complete( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -152,10 +160,9 @@ async def complete_async( *, model: Nullable[str], messages: Union[List[models.Messages], List[models.MessagesTypedDict]], - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, random_seed: OptionalNullable[int] = UNSET, @@ -171,6 +178,9 @@ async def complete_async( models.ChatCompletionRequestToolChoiceTypedDict, ] ] = None, + presence_penalty: Optional[float] = 0, + frequency_penalty: Optional[float] = 0, + n: OptionalNullable[int] = UNSET, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -180,16 +190,18 @@ async def complete_async( :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param response_format: :param tools: :param tool_choice: + :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -208,7 +220,6 @@ async def complete_async( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, @@ -220,6 +231,9 @@ async def complete_async( tool_choice=utils.get_pydantic_model( tool_choice, Optional[models.ChatCompletionRequestToolChoice] ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, safe_prompt=safe_prompt, ) @@ -271,15 +285,17 @@ async def complete_async( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -291,10 +307,9 @@ def stream( List[models.ChatCompletionStreamRequestMessages], List[models.ChatCompletionStreamRequestMessagesTypedDict], ], - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[ Union[ @@ -315,6 +330,9 @@ def stream( models.ChatCompletionStreamRequestToolChoiceTypedDict, ] ] = None, + presence_penalty: Optional[float] = 0, + frequency_penalty: Optional[float] = 0, + n: OptionalNullable[int] = UNSET, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -326,16 +344,18 @@ def stream( :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param response_format: :param tools: :param tool_choice: + :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -354,7 +374,6 @@ def stream( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, @@ -368,6 +387,9 @@ def stream( tool_choice=utils.get_pydantic_model( tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, safe_prompt=safe_prompt, ) @@ -419,18 +441,21 @@ def stream( sentinel="[DONE]", ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + http_res_text = utils.stream_to_text(http_res) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -442,10 +467,9 @@ async def stream_async( List[models.ChatCompletionStreamRequestMessages], List[models.ChatCompletionStreamRequestMessagesTypedDict], ], - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[ Union[ @@ -466,6 +490,9 @@ async def stream_async( models.ChatCompletionStreamRequestToolChoiceTypedDict, ] ] = None, + presence_penalty: Optional[float] = 0, + frequency_penalty: Optional[float] = 0, + n: OptionalNullable[int] = UNSET, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -477,16 +504,18 @@ async def stream_async( :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param response_format: :param tools: :param tool_choice: + :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -505,7 +534,6 @@ async def stream_async( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, @@ -519,6 +547,9 @@ async def stream_async( tool_choice=utils.get_pydantic_model( tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, safe_prompt=safe_prompt, ) @@ -570,17 +601,20 @@ async def stream_async( sentinel="[DONE]", ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + http_res_text = await utils.stream_to_text_async(http_res) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) diff --git a/src/mistralai/classifiers.py b/src/mistralai/classifiers.py new file mode 100644 index 0000000..3a77206 --- /dev/null +++ b/src/mistralai/classifiers.py @@ -0,0 +1,396 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai import models, utils +from mistralai._hooks import HookContext +from mistralai.types import Nullable, OptionalNullable, UNSET +from mistralai.utils import get_security_from_env +from typing import Any, Optional, Union + + +class Classifiers(BaseSDK): + r"""Classifiers API.""" + + def moderate( + self, + *, + inputs: Union[ + models.ClassificationRequestInputs, + models.ClassificationRequestInputsTypedDict, + ], + model: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ClassificationResponse]: + r"""Moderations + + :param inputs: Text to classify. + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.ClassificationRequest( + inputs=inputs, + model=model, + ) + + req = self.build_request( + method="POST", + path="/v1/moderations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ClassificationRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + operation_id="moderations_v1_moderations_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json( + http_res.text, Optional[models.ClassificationResponse] + ) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def moderate_async( + self, + *, + inputs: Union[ + models.ClassificationRequestInputs, + models.ClassificationRequestInputsTypedDict, + ], + model: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ClassificationResponse]: + r"""Moderations + + :param inputs: Text to classify. + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.ClassificationRequest( + inputs=inputs, + model=model, + ) + + req = self.build_request_async( + method="POST", + path="/v1/moderations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ClassificationRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + operation_id="moderations_v1_moderations_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json( + http_res.text, Optional[models.ClassificationResponse] + ) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def moderate_chat( + self, + *, + inputs: Union[ + models.ChatClassificationRequestInputs, + models.ChatClassificationRequestInputsTypedDict, + ], + model: Nullable[str], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ClassificationResponse]: + r"""Moderations Chat + + :param inputs: Chat to classify + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.ChatClassificationRequest( + inputs=utils.get_pydantic_model( + inputs, models.ChatClassificationRequestInputs + ), + model=model, + ) + + req = self.build_request( + method="POST", + path="/v1/chat/moderations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatClassificationRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + operation_id="moderations_chat_v1_chat_moderations_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json( + http_res.text, Optional[models.ClassificationResponse] + ) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def moderate_chat_async( + self, + *, + inputs: Union[ + models.ChatClassificationRequestInputs, + models.ChatClassificationRequestInputsTypedDict, + ], + model: Nullable[str], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ClassificationResponse]: + r"""Moderations Chat + + :param inputs: Chat to classify + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.ChatClassificationRequest( + inputs=utils.get_pydantic_model( + inputs, models.ChatClassificationRequestInputs + ), + model=model, + ) + + req = self.build_request_async( + method="POST", + path="/v1/chat/moderations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatClassificationRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + operation_id="moderations_chat_v1_chat_moderations_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json( + http_res.text, Optional[models.ClassificationResponse] + ) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) diff --git a/src/mistralai/embeddings.py b/src/mistralai/embeddings.py index c19a9e3..2aa115c 100644 --- a/src/mistralai/embeddings.py +++ b/src/mistralai/embeddings.py @@ -15,7 +15,7 @@ def create( self, *, inputs: Union[models.Inputs, models.InputsTypedDict], - model: str, + model: Optional[str] = "mistral-embed", encoding_format: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -94,15 +94,17 @@ def create( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -110,7 +112,7 @@ async def create_async( self, *, inputs: Union[models.Inputs, models.InputsTypedDict], - model: str, + model: Optional[str] = "mistral-embed", encoding_format: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -189,14 +191,16 @@ async def create_async( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) diff --git a/src/mistralai/files.py b/src/mistralai/files.py index 0672405..6cf0fcb 100644 --- a/src/mistralai/files.py +++ b/src/mistralai/files.py @@ -1,11 +1,12 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from .basesdk import BaseSDK +import httpx from mistralai import models, utils from mistralai._hooks import HookContext from mistralai.types import OptionalNullable, UNSET from mistralai.utils import get_security_from_env -from typing import Optional, Union +from typing import List, Optional, Union class Files(BaseSDK): @@ -15,6 +16,7 @@ def upload( self, *, file: Union[models.File, models.FileTypedDict], + purpose: Optional[models.FilePurpose] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -28,6 +30,7 @@ def upload( Please contact us if you need to increase these storage limits. :param file: The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: ```bash file=@path/to/your/file.jsonl;filename=custom_name.jsonl ``` Otherwise, you can just keep the original file name: ```bash file=@path/to/your/file.jsonl ``` + :param purpose: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -42,6 +45,7 @@ def upload( request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( file=utils.get_pydantic_model(file, models.File), + purpose=purpose, ) req = self.build_request( @@ -90,15 +94,17 @@ def upload( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.UploadFileOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -106,6 +112,7 @@ async def upload_async( self, *, file: Union[models.File, models.FileTypedDict], + purpose: Optional[models.FilePurpose] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -119,6 +126,7 @@ async def upload_async( Please contact us if you need to increase these storage limits. :param file: The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: ```bash file=@path/to/your/file.jsonl;filename=custom_name.jsonl ``` Otherwise, you can just keep the original file name: ```bash file=@path/to/your/file.jsonl ``` + :param purpose: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -133,6 +141,7 @@ async def upload_async( request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( file=utils.get_pydantic_model(file, models.File), + purpose=purpose, ) req = self.build_request_async( @@ -181,21 +190,29 @@ async def upload_async( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.UploadFileOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) def list( self, *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + sample_type: OptionalNullable[List[models.SampleType]] = UNSET, + source: OptionalNullable[List[models.Source]] = UNSET, + search: OptionalNullable[str] = UNSET, + purpose: OptionalNullable[models.FilePurpose] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -204,6 +221,12 @@ def list( Returns a list of files that belong to the user's organization. + :param page: + :param page_size: + :param sample_type: + :param source: + :param search: + :param purpose: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -215,12 +238,22 @@ def list( if server_url is not None: base_url = server_url + + request = models.FilesAPIRoutesListFilesRequest( + page=page, + page_size=page_size, + sample_type=sample_type, + source=source, + search=search, + purpose=purpose, + ) + req = self.build_request( method="GET", path="/v1/files", base_url=base_url, url_variables=url_variables, - request=None, + request=request, request_body_required=False, request_has_path_params=False, request_has_query_params=True, @@ -254,21 +287,29 @@ def list( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.ListFilesOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) async def list_async( self, *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + sample_type: OptionalNullable[List[models.SampleType]] = UNSET, + source: OptionalNullable[List[models.Source]] = UNSET, + search: OptionalNullable[str] = UNSET, + purpose: OptionalNullable[models.FilePurpose] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -277,6 +318,12 @@ async def list_async( Returns a list of files that belong to the user's organization. + :param page: + :param page_size: + :param sample_type: + :param source: + :param search: + :param purpose: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -288,12 +335,22 @@ async def list_async( if server_url is not None: base_url = server_url + + request = models.FilesAPIRoutesListFilesRequest( + page=page, + page_size=page_size, + sample_type=sample_type, + source=source, + search=search, + purpose=purpose, + ) + req = self.build_request_async( method="GET", path="/v1/files", base_url=base_url, url_variables=url_variables, - request=None, + request=request, request_body_required=False, request_has_path_params=False, request_has_query_params=True, @@ -327,15 +384,17 @@ async def list_async( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.ListFilesOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -407,15 +466,17 @@ def retrieve( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.RetrieveFileOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -487,15 +548,17 @@ async def retrieve_async( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.RetrieveFileOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -567,15 +630,17 @@ def delete( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DeleteFileOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -647,14 +712,182 @@ async def delete_async( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DeleteFileOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def download( + self, + *, + file_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[httpx.Response]: + r"""Download File + + Download a file + + :param file_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.FilesAPIRoutesDownloadFileRequest( + file_id=file_id, + ) + + req = self.build_request( + method="GET", + path="/v1/files/{file_id}/content", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/octet-stream", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + operation_id="files_api_routes_download_file", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/octet-stream"): + return http_res + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def download_async( + self, + *, + file_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[httpx.Response]: + r"""Download File + + Download a file + + :param file_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.FilesAPIRoutesDownloadFileRequest( + file_id=file_id, + ) + + req = self.build_request_async( + method="GET", + path="/v1/files/{file_id}/content", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/octet-stream", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + operation_id="files_api_routes_download_file", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/octet-stream"): + return http_res + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) diff --git a/src/mistralai/fim.py b/src/mistralai/fim.py index 5239e90..8f8c852 100644 --- a/src/mistralai/fim.py +++ b/src/mistralai/fim.py @@ -16,10 +16,9 @@ def complete( *, model: Nullable[str], prompt: str, - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[ Union[ @@ -29,6 +28,7 @@ def complete( ] = None, random_seed: OptionalNullable[int] = UNSET, suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -39,14 +39,14 @@ def complete( :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` :param prompt: The text/code to complete. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -64,12 +64,12 @@ def complete( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, prompt=prompt, suffix=suffix, + min_tokens=min_tokens, ) req = self.build_request( @@ -120,15 +120,17 @@ def complete( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -137,10 +139,9 @@ async def complete_async( *, model: Nullable[str], prompt: str, - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[ Union[ @@ -150,6 +151,7 @@ async def complete_async( ] = None, random_seed: OptionalNullable[int] = UNSET, suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -160,14 +162,14 @@ async def complete_async( :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` :param prompt: The text/code to complete. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -185,12 +187,12 @@ async def complete_async( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, prompt=prompt, suffix=suffix, + min_tokens=min_tokens, ) req = self.build_request_async( @@ -241,15 +243,17 @@ async def complete_async( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -258,10 +262,9 @@ def stream( *, model: Nullable[str], prompt: str, - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[ Union[ @@ -271,6 +274,7 @@ def stream( ] = None, random_seed: OptionalNullable[int] = UNSET, suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -281,14 +285,14 @@ def stream( :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` :param prompt: The text/code to complete. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -306,12 +310,12 @@ def stream( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, prompt=prompt, suffix=suffix, + min_tokens=min_tokens, ) req = self.build_request( @@ -362,18 +366,21 @@ def stream( sentinel="[DONE]", ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + http_res_text = utils.stream_to_text(http_res) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -382,10 +389,9 @@ async def stream_async( *, model: Nullable[str], prompt: str, - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[ Union[ @@ -395,6 +401,7 @@ async def stream_async( ] = None, random_seed: OptionalNullable[int] = UNSET, suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -405,14 +412,14 @@ async def stream_async( :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` :param prompt: The text/code to complete. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -430,12 +437,12 @@ async def stream_async( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, prompt=prompt, suffix=suffix, + min_tokens=min_tokens, ) req = self.build_request_async( @@ -486,17 +493,20 @@ async def stream_async( sentinel="[DONE]", ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + http_res_text = await utils.stream_to_text_async(http_res) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) diff --git a/src/mistralai/jobs.py b/src/mistralai/jobs.py index b6c1b24..9f472de 100644 --- a/src/mistralai/jobs.py +++ b/src/mistralai/jobs.py @@ -102,15 +102,17 @@ def list( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.JobsOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -206,15 +208,17 @@ async def list_async( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.JobsOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -333,15 +337,17 @@ def create( Optional[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse], ) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -460,15 +466,17 @@ async def create_async( Optional[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse], ) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -540,15 +548,17 @@ def get( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -620,15 +630,17 @@ async def get_async( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -700,15 +712,17 @@ def cancel( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -780,15 +794,17 @@ async def cancel_async( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -860,15 +876,17 @@ def start( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -940,14 +958,16 @@ async def start_async( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) diff --git a/src/mistralai/mistral_jobs.py b/src/mistralai/mistral_jobs.py new file mode 100644 index 0000000..59ea13f --- /dev/null +++ b/src/mistralai/mistral_jobs.py @@ -0,0 +1,733 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from datetime import datetime +from mistralai import models, utils +from mistralai._hooks import HookContext +from mistralai.types import OptionalNullable, UNSET +from mistralai.utils import get_security_from_env +from typing import Any, Dict, List, Optional + + +class MistralJobs(BaseSDK): + def list( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + model: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + created_after: OptionalNullable[datetime] = UNSET, + created_by_me: Optional[bool] = False, + status: OptionalNullable[models.BatchJobStatus] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.BatchJobsOut]: + r"""Get Batch Jobs + + Get a list of batch jobs for your organization and user. + + :param page: + :param page_size: + :param model: + :param metadata: + :param created_after: + :param created_by_me: + :param status: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesBatchGetBatchJobsRequest( + page=page, + page_size=page_size, + model=model, + metadata=metadata, + created_after=created_after, + created_by_me=created_by_me, + status=status, + ) + + req = self.build_request( + method="GET", + path="/v1/batch/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + operation_id="jobs_api_routes_batch_get_batch_jobs", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.BatchJobsOut]) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def list_async( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + model: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + created_after: OptionalNullable[datetime] = UNSET, + created_by_me: Optional[bool] = False, + status: OptionalNullable[models.BatchJobStatus] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.BatchJobsOut]: + r"""Get Batch Jobs + + Get a list of batch jobs for your organization and user. + + :param page: + :param page_size: + :param model: + :param metadata: + :param created_after: + :param created_by_me: + :param status: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesBatchGetBatchJobsRequest( + page=page, + page_size=page_size, + model=model, + metadata=metadata, + created_after=created_after, + created_by_me=created_by_me, + status=status, + ) + + req = self.build_request_async( + method="GET", + path="/v1/batch/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + operation_id="jobs_api_routes_batch_get_batch_jobs", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.BatchJobsOut]) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def create( + self, + *, + input_files: List[str], + endpoint: models.APIEndpoint, + model: str, + metadata: OptionalNullable[Dict[str, str]] = UNSET, + timeout_hours: Optional[int] = 24, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.BatchJobOut]: + r"""Create Batch Job + + Create a new batch job, it will be queued for processing. + + :param input_files: + :param endpoint: + :param model: + :param metadata: + :param timeout_hours: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.BatchJobIn( + input_files=input_files, + endpoint=endpoint, + model=model, + metadata=metadata, + timeout_hours=timeout_hours, + ) + + req = self.build_request( + method="POST", + path="/v1/batch/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.BatchJobIn + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + operation_id="jobs_api_routes_batch_create_batch_job", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.BatchJobOut]) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def create_async( + self, + *, + input_files: List[str], + endpoint: models.APIEndpoint, + model: str, + metadata: OptionalNullable[Dict[str, str]] = UNSET, + timeout_hours: Optional[int] = 24, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.BatchJobOut]: + r"""Create Batch Job + + Create a new batch job, it will be queued for processing. + + :param input_files: + :param endpoint: + :param model: + :param metadata: + :param timeout_hours: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.BatchJobIn( + input_files=input_files, + endpoint=endpoint, + model=model, + metadata=metadata, + timeout_hours=timeout_hours, + ) + + req = self.build_request_async( + method="POST", + path="/v1/batch/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.BatchJobIn + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + operation_id="jobs_api_routes_batch_create_batch_job", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.BatchJobOut]) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def get( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.BatchJobOut]: + r"""Get Batch Job + + Get a batch job details by its UUID. + + :param job_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesBatchGetBatchJobRequest( + job_id=job_id, + ) + + req = self.build_request( + method="GET", + path="/v1/batch/jobs/{job_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + operation_id="jobs_api_routes_batch_get_batch_job", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.BatchJobOut]) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def get_async( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.BatchJobOut]: + r"""Get Batch Job + + Get a batch job details by its UUID. + + :param job_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesBatchGetBatchJobRequest( + job_id=job_id, + ) + + req = self.build_request_async( + method="GET", + path="/v1/batch/jobs/{job_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + operation_id="jobs_api_routes_batch_get_batch_job", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.BatchJobOut]) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def cancel( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.BatchJobOut]: + r"""Cancel Batch Job + + Request the cancellation of a batch job. + + :param job_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesBatchCancelBatchJobRequest( + job_id=job_id, + ) + + req = self.build_request( + method="POST", + path="/v1/batch/jobs/{job_id}/cancel", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + operation_id="jobs_api_routes_batch_cancel_batch_job", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.BatchJobOut]) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def cancel_async( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.BatchJobOut]: + r"""Cancel Batch Job + + Request the cancellation of a batch job. + + :param job_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesBatchCancelBatchJobRequest( + job_id=job_id, + ) + + req = self.build_request_async( + method="POST", + path="/v1/batch/jobs/{job_id}/cancel", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + operation_id="jobs_api_routes_batch_cancel_batch_job", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.BatchJobOut]) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index 8b7f1a2..42d2c66 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -20,6 +20,7 @@ AgentsCompletionStreamRequestToolChoiceTypedDict, AgentsCompletionStreamRequestTypedDict, ) +from .apiendpoint import APIEndpoint from .archiveftmodelout import ( ArchiveFTModelOut, ArchiveFTModelOutObject, @@ -27,10 +28,27 @@ ) from .assistantmessage import ( AssistantMessage, + AssistantMessageContent, + AssistantMessageContentTypedDict, AssistantMessageRole, AssistantMessageTypedDict, ) -from .basemodelcard import BaseModelCard, BaseModelCardTypedDict +from .basemodelcard import BaseModelCard, BaseModelCardTypedDict, Type +from .batcherror import BatchError, BatchErrorTypedDict +from .batchjobin import BatchJobIn, BatchJobInTypedDict +from .batchjobout import BatchJobOut, BatchJobOutObject, BatchJobOutTypedDict +from .batchjobsout import BatchJobsOut, BatchJobsOutObject, BatchJobsOutTypedDict +from .batchjobstatus import BatchJobStatus +from .chatclassificationrequest import ( + ChatClassificationRequest, + ChatClassificationRequestInputs, + ChatClassificationRequestInputsTypedDict, + ChatClassificationRequestTypedDict, + One, + OneTypedDict, + Two, + TwoTypedDict, +) from .chatcompletionchoice import ( ChatCompletionChoice, ChatCompletionChoiceTypedDict, @@ -61,6 +79,17 @@ ChatCompletionStreamRequestTypedDict, ) from .checkpointout import CheckpointOut, CheckpointOutTypedDict +from .classificationobject import ClassificationObject, ClassificationObjectTypedDict +from .classificationrequest import ( + ClassificationRequest, + ClassificationRequestInputs, + ClassificationRequestInputsTypedDict, + ClassificationRequestTypedDict, +) +from .classificationresponse import ( + ClassificationResponse, + ClassificationResponseTypedDict, +) from .completionchunk import CompletionChunk, CompletionChunkTypedDict from .completionevent import CompletionEvent, CompletionEventTypedDict from .completionresponsestreamchoice import ( @@ -75,7 +104,7 @@ ) from .deletefileout import DeleteFileOut, DeleteFileOutTypedDict from .deletemodelout import DeleteModelOut, DeleteModelOutTypedDict -from .deltamessage import DeltaMessage, DeltaMessageTypedDict +from .deltamessage import Content, ContentTypedDict, DeltaMessage, DeltaMessageTypedDict from .detailedjobout import ( DetailedJobOut, DetailedJobOutIntegrations, @@ -95,10 +124,19 @@ from .embeddingresponse import EmbeddingResponse, EmbeddingResponseTypedDict from .embeddingresponsedata import EmbeddingResponseData, EmbeddingResponseDataTypedDict from .eventout import EventOut, EventOutTypedDict +from .filepurpose import FilePurpose from .files_api_routes_delete_fileop import ( FilesAPIRoutesDeleteFileRequest, FilesAPIRoutesDeleteFileRequestTypedDict, ) +from .files_api_routes_download_fileop import ( + FilesAPIRoutesDownloadFileRequest, + FilesAPIRoutesDownloadFileRequestTypedDict, +) +from .files_api_routes_list_filesop import ( + FilesAPIRoutesListFilesRequest, + FilesAPIRoutesListFilesRequestTypedDict, +) from .files_api_routes_retrieve_fileop import ( FilesAPIRoutesRetrieveFileRequest, FilesAPIRoutesRetrieveFileRequestTypedDict, @@ -108,9 +146,8 @@ FileTypedDict, FilesAPIRoutesUploadFileMultiPartBodyParams, FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict, - FilesAPIRoutesUploadFilePurpose, ) -from .fileschema import FileSchema, FileSchemaPurpose, FileSchemaTypedDict +from .fileschema import FileSchema, FileSchemaTypedDict from .fimcompletionrequest import ( FIMCompletionRequest, FIMCompletionRequestStop, @@ -129,7 +166,7 @@ FTModelCapabilitiesOut, FTModelCapabilitiesOutTypedDict, ) -from .ftmodelcard import FTModelCard, FTModelCardTypedDict +from .ftmodelcard import FTModelCard, FTModelCardType, FTModelCardTypedDict from .ftmodelout import FTModelOut, FTModelOutObject, FTModelOutTypedDict from .function import Function, FunctionTypedDict from .functioncall import ( @@ -177,6 +214,18 @@ RepositoriesTypedDict, Status, ) +from .jobs_api_routes_batch_cancel_batch_jobop import ( + JobsAPIRoutesBatchCancelBatchJobRequest, + JobsAPIRoutesBatchCancelBatchJobRequestTypedDict, +) +from .jobs_api_routes_batch_get_batch_jobop import ( + JobsAPIRoutesBatchGetBatchJobRequest, + JobsAPIRoutesBatchGetBatchJobRequestTypedDict, +) +from .jobs_api_routes_batch_get_batch_jobsop import ( + JobsAPIRoutesBatchGetBatchJobsRequest, + JobsAPIRoutesBatchGetBatchJobsRequestTypedDict, +) from .jobs_api_routes_fine_tuning_archive_fine_tuned_modelop import ( JobsAPIRoutesFineTuningArchiveFineTunedModelRequest, JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict, @@ -228,20 +277,16 @@ RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict, ) -from .retrievefileout import ( - RetrieveFileOut, - RetrieveFileOutPurpose, - RetrieveFileOutTypedDict, -) +from .retrievefileout import RetrieveFileOut, RetrieveFileOutTypedDict from .sampletype import SampleType from .sdkerror import SDKError from .security import Security, SecurityTypedDict from .source import Source from .systemmessage import ( - Content, - ContentTypedDict, Role, SystemMessage, + SystemMessageContent, + SystemMessageContentTypedDict, SystemMessageTypedDict, ) from .textchunk import TextChunk, TextChunkType, TextChunkTypedDict @@ -260,7 +305,7 @@ UnarchiveFTModelOutTypedDict, ) from .updateftmodelin import UpdateFTModelIn, UpdateFTModelInTypedDict -from .uploadfileout import Purpose, UploadFileOut, UploadFileOutTypedDict +from .uploadfileout import UploadFileOut, UploadFileOutTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict from .usermessage import ( UserMessage, @@ -280,9 +325,14 @@ WandbIntegrationType, WandbIntegrationTypedDict, ) -from .wandbintegrationout import Type, WandbIntegrationOut, WandbIntegrationOutTypedDict +from .wandbintegrationout import ( + WandbIntegrationOut, + WandbIntegrationOutType, + WandbIntegrationOutTypedDict, +) __all__ = [ + "APIEndpoint", "AgentsCompletionRequest", "AgentsCompletionRequestMessages", "AgentsCompletionRequestMessagesTypedDict", @@ -305,10 +355,27 @@ "Arguments", "ArgumentsTypedDict", "AssistantMessage", + "AssistantMessageContent", + "AssistantMessageContentTypedDict", "AssistantMessageRole", "AssistantMessageTypedDict", "BaseModelCard", "BaseModelCardTypedDict", + "BatchError", + "BatchErrorTypedDict", + "BatchJobIn", + "BatchJobInTypedDict", + "BatchJobOut", + "BatchJobOutObject", + "BatchJobOutTypedDict", + "BatchJobStatus", + "BatchJobsOut", + "BatchJobsOutObject", + "BatchJobsOutTypedDict", + "ChatClassificationRequest", + "ChatClassificationRequestInputs", + "ChatClassificationRequestInputsTypedDict", + "ChatClassificationRequestTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", @@ -327,6 +394,14 @@ "ChatCompletionStreamRequestTypedDict", "CheckpointOut", "CheckpointOutTypedDict", + "ClassificationObject", + "ClassificationObjectTypedDict", + "ClassificationRequest", + "ClassificationRequestInputs", + "ClassificationRequestInputsTypedDict", + "ClassificationRequestTypedDict", + "ClassificationResponse", + "ClassificationResponseTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", @@ -377,22 +452,26 @@ "FTModelCapabilitiesOut", "FTModelCapabilitiesOutTypedDict", "FTModelCard", + "FTModelCardType", "FTModelCardTypedDict", "FTModelOut", "FTModelOutObject", "FTModelOutTypedDict", "File", + "FilePurpose", "FileSchema", - "FileSchemaPurpose", "FileSchemaTypedDict", "FileTypedDict", "FilesAPIRoutesDeleteFileRequest", "FilesAPIRoutesDeleteFileRequestTypedDict", + "FilesAPIRoutesDownloadFileRequest", + "FilesAPIRoutesDownloadFileRequestTypedDict", + "FilesAPIRoutesListFilesRequest", + "FilesAPIRoutesListFilesRequestTypedDict", "FilesAPIRoutesRetrieveFileRequest", "FilesAPIRoutesRetrieveFileRequestTypedDict", "FilesAPIRoutesUploadFileMultiPartBodyParams", "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict", - "FilesAPIRoutesUploadFilePurpose", "FineTuneableModel", "FinishReason", "Function", @@ -430,6 +509,12 @@ "JobMetadataOutTypedDict", "JobOut", "JobOutTypedDict", + "JobsAPIRoutesBatchCancelBatchJobRequest", + "JobsAPIRoutesBatchCancelBatchJobRequestTypedDict", + "JobsAPIRoutesBatchGetBatchJobRequest", + "JobsAPIRoutesBatchGetBatchJobRequestTypedDict", + "JobsAPIRoutesBatchGetBatchJobsRequest", + "JobsAPIRoutesBatchGetBatchJobsRequestTypedDict", "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest", "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict", "JobsAPIRoutesFineTuningCancelFineTuningJobRequest", @@ -465,7 +550,8 @@ "ModelList", "ModelListTypedDict", "Object", - "Purpose", + "One", + "OneTypedDict", "QueryParamStatus", "Repositories", "RepositoriesTypedDict", @@ -473,7 +559,6 @@ "ResponseFormatTypedDict", "ResponseFormats", "RetrieveFileOut", - "RetrieveFileOutPurpose", "RetrieveFileOutTypedDict", "RetrieveModelV1ModelsModelIDGetRequest", "RetrieveModelV1ModelsModelIDGetRequestTypedDict", @@ -489,6 +574,8 @@ "Stop", "StopTypedDict", "SystemMessage", + "SystemMessageContent", + "SystemMessageContentTypedDict", "SystemMessageTypedDict", "TextChunk", "TextChunkType", @@ -510,6 +597,8 @@ "TrainingParametersIn", "TrainingParametersInTypedDict", "TrainingParametersTypedDict", + "Two", + "TwoTypedDict", "Type", "UnarchiveFTModelOut", "UnarchiveFTModelOutObject", @@ -529,6 +618,7 @@ "ValidationErrorTypedDict", "WandbIntegration", "WandbIntegrationOut", + "WandbIntegrationOutType", "WandbIntegrationOutTypedDict", "WandbIntegrationType", "WandbIntegrationTypedDict", diff --git a/src/mistralai/models/agentscompletionrequest.py b/src/mistralai/models/agentscompletionrequest.py index 1f0523a..99d074d 100644 --- a/src/mistralai/models/agentscompletionrequest.py +++ b/src/mistralai/models/agentscompletionrequest.py @@ -3,6 +3,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict from .tool import Tool, ToolTypedDict from .toolchoice import ToolChoice, ToolChoiceTypedDict from .toolchoiceenum import ToolChoiceEnum @@ -11,8 +12,8 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Optional, TypedDict, Union -from typing_extensions import Annotated, NotRequired +from typing import List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict AgentsCompletionRequestStopTypedDict = Union[str, List[str]] @@ -24,13 +25,17 @@ AgentsCompletionRequestMessagesTypedDict = Union[ - UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, ] AgentsCompletionRequestMessages = Annotated[ Union[ Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")], ], @@ -51,8 +56,6 @@ class AgentsCompletionRequestTypedDict(TypedDict): r"""The ID of the agent to use for this completion.""" max_tokens: NotRequired[Nullable[int]] r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: NotRequired[Nullable[int]] - r"""The minimum number of tokens to generate in the completion.""" stream: NotRequired[bool] r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" stop: NotRequired[AgentsCompletionRequestStopTypedDict] @@ -62,6 +65,12 @@ class AgentsCompletionRequestTypedDict(TypedDict): response_format: NotRequired[ResponseFormatTypedDict] tools: NotRequired[Nullable[List[ToolTypedDict]]] tool_choice: NotRequired[AgentsCompletionRequestToolChoiceTypedDict] + presence_penalty: NotRequired[float] + r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" class AgentsCompletionRequest(BaseModel): @@ -74,9 +83,6 @@ class AgentsCompletionRequest(BaseModel): max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: OptionalNullable[int] = UNSET - r"""The minimum number of tokens to generate in the completion.""" - stream: Optional[bool] = False r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" @@ -92,19 +98,30 @@ class AgentsCompletionRequest(BaseModel): tool_choice: Optional[AgentsCompletionRequestToolChoice] = None + presence_penalty: Optional[float] = 0 + r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = 0 + r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ "max_tokens", - "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", ] - nullable_fields = ["max_tokens", "min_tokens", "random_seed", "tools"] + nullable_fields = ["max_tokens", "random_seed", "tools", "n"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/agentscompletionstreamrequest.py b/src/mistralai/models/agentscompletionstreamrequest.py index 57d1177..4e1757a 100644 --- a/src/mistralai/models/agentscompletionstreamrequest.py +++ b/src/mistralai/models/agentscompletionstreamrequest.py @@ -3,6 +3,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict from .tool import Tool, ToolTypedDict from .toolchoice import ToolChoice, ToolChoiceTypedDict from .toolchoiceenum import ToolChoiceEnum @@ -11,8 +12,8 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Optional, TypedDict, Union -from typing_extensions import Annotated, NotRequired +from typing import List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict AgentsCompletionStreamRequestStopTypedDict = Union[str, List[str]] @@ -24,13 +25,17 @@ AgentsCompletionStreamRequestMessagesTypedDict = Union[ - UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, ] AgentsCompletionStreamRequestMessages = Annotated[ Union[ Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")], ], @@ -53,8 +58,6 @@ class AgentsCompletionStreamRequestTypedDict(TypedDict): r"""The ID of the agent to use for this completion.""" max_tokens: NotRequired[Nullable[int]] r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: NotRequired[Nullable[int]] - r"""The minimum number of tokens to generate in the completion.""" stream: NotRequired[bool] stop: NotRequired[AgentsCompletionStreamRequestStopTypedDict] r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" @@ -63,6 +66,12 @@ class AgentsCompletionStreamRequestTypedDict(TypedDict): response_format: NotRequired[ResponseFormatTypedDict] tools: NotRequired[Nullable[List[ToolTypedDict]]] tool_choice: NotRequired[AgentsCompletionStreamRequestToolChoiceTypedDict] + presence_penalty: NotRequired[float] + r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" class AgentsCompletionStreamRequest(BaseModel): @@ -75,9 +84,6 @@ class AgentsCompletionStreamRequest(BaseModel): max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: OptionalNullable[int] = UNSET - r"""The minimum number of tokens to generate in the completion.""" - stream: Optional[bool] = True stop: Optional[AgentsCompletionStreamRequestStop] = None @@ -92,19 +98,30 @@ class AgentsCompletionStreamRequest(BaseModel): tool_choice: Optional[AgentsCompletionStreamRequestToolChoice] = None + presence_penalty: Optional[float] = 0 + r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = 0 + r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ "max_tokens", - "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", ] - nullable_fields = ["max_tokens", "min_tokens", "random_seed", "tools"] + nullable_fields = ["max_tokens", "random_seed", "tools", "n"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/apiendpoint.py b/src/mistralai/models/apiendpoint.py new file mode 100644 index 0000000..00621eb --- /dev/null +++ b/src/mistralai/models/apiendpoint.py @@ -0,0 +1,9 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +APIEndpoint = Literal[ + "/v1/chat/completions", "/v1/embeddings", "/v1/fim/completions", "/v1/moderations" +] diff --git a/src/mistralai/models/archiveftmodelout.py b/src/mistralai/models/archiveftmodelout.py index eeffa5d..e78e98c 100644 --- a/src/mistralai/models/archiveftmodelout.py +++ b/src/mistralai/models/archiveftmodelout.py @@ -2,9 +2,11 @@ from __future__ import annotations from mistralai.types import BaseModel +from mistralai.utils import validate_const import pydantic -from typing import Final, Literal, Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict ArchiveFTModelOutObject = Literal["model"] @@ -12,14 +14,18 @@ class ArchiveFTModelOutTypedDict(TypedDict): id: str + object: ArchiveFTModelOutObject archived: NotRequired[bool] class ArchiveFTModelOut(BaseModel): id: str - # fmt: off - OBJECT: Annotated[Final[Optional[ArchiveFTModelOutObject]], pydantic.Field(alias="object")] = "model" # type: ignore - # fmt: on + OBJECT: Annotated[ + Annotated[ + Optional[ArchiveFTModelOutObject], AfterValidator(validate_const("model")) + ], + pydantic.Field(alias="object"), + ] = "model" archived: Optional[bool] = True diff --git a/src/mistralai/models/assistantmessage.py b/src/mistralai/models/assistantmessage.py index 92af66a..d7b929b 100644 --- a/src/mistralai/models/assistantmessage.py +++ b/src/mistralai/models/assistantmessage.py @@ -1,31 +1,36 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict from .toolcall import ToolCall, ToolCallTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import List, Literal, Optional, TypedDict -from typing_extensions import NotRequired +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypedDict + + +AssistantMessageContentTypedDict = Union[str, List[ContentChunkTypedDict]] + + +AssistantMessageContent = Union[str, List[ContentChunk]] AssistantMessageRole = Literal["assistant"] class AssistantMessageTypedDict(TypedDict): - content: NotRequired[Nullable[str]] + content: NotRequired[Nullable[AssistantMessageContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] prefix: NotRequired[bool] - r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: NotRequired[AssistantMessageRole] class AssistantMessage(BaseModel): - content: OptionalNullable[str] = UNSET + content: OptionalNullable[AssistantMessageContent] = UNSET tool_calls: OptionalNullable[List[ToolCall]] = UNSET prefix: Optional[bool] = False - r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: Optional[AssistantMessageRole] = "assistant" diff --git a/src/mistralai/models/basemodelcard.py b/src/mistralai/models/basemodelcard.py index 85af1f1..edb8174 100644 --- a/src/mistralai/models/basemodelcard.py +++ b/src/mistralai/models/basemodelcard.py @@ -4,10 +4,15 @@ from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict from datetime import datetime from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import validate_const import pydantic from pydantic import model_serializer -from typing import Final, List, Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +Type = Literal["base"] class BaseModelCardTypedDict(TypedDict): @@ -21,6 +26,8 @@ class BaseModelCardTypedDict(TypedDict): max_context_length: NotRequired[int] aliases: NotRequired[List[str]] deprecation: NotRequired[Nullable[datetime]] + default_model_temperature: NotRequired[Nullable[float]] + type: Type class BaseModelCard(BaseModel): @@ -44,9 +51,12 @@ class BaseModelCard(BaseModel): deprecation: OptionalNullable[datetime] = UNSET - # fmt: off - TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "base" # type: ignore - # fmt: on + default_model_temperature: OptionalNullable[float] = UNSET + + TYPE: Annotated[ + Annotated[Optional[Type], AfterValidator(validate_const("base"))], + pydantic.Field(alias="type"), + ] = "base" @model_serializer(mode="wrap") def serialize_model(self, handler): @@ -59,9 +69,15 @@ def serialize_model(self, handler): "max_context_length", "aliases", "deprecation", + "default_model_temperature", "type", ] - nullable_fields = ["name", "description", "deprecation"] + nullable_fields = [ + "name", + "description", + "deprecation", + "default_model_temperature", + ] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/batcherror.py b/src/mistralai/models/batcherror.py new file mode 100644 index 0000000..4f82344 --- /dev/null +++ b/src/mistralai/models/batcherror.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class BatchErrorTypedDict(TypedDict): + message: str + count: NotRequired[int] + + +class BatchError(BaseModel): + message: str + + count: Optional[int] = 1 diff --git a/src/mistralai/models/batchjobin.py b/src/mistralai/models/batchjobin.py new file mode 100644 index 0000000..20f054b --- /dev/null +++ b/src/mistralai/models/batchjobin.py @@ -0,0 +1,58 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .apiendpoint import APIEndpoint +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Dict, List, Optional +from typing_extensions import NotRequired, TypedDict + + +class BatchJobInTypedDict(TypedDict): + input_files: List[str] + endpoint: APIEndpoint + model: str + metadata: NotRequired[Nullable[Dict[str, str]]] + timeout_hours: NotRequired[int] + + +class BatchJobIn(BaseModel): + input_files: List[str] + + endpoint: APIEndpoint + + model: str + + metadata: OptionalNullable[Dict[str, str]] = UNSET + + timeout_hours: Optional[int] = 24 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["metadata", "timeout_hours"] + nullable_fields = ["metadata"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/batchjobout.py b/src/mistralai/models/batchjobout.py new file mode 100644 index 0000000..677284f --- /dev/null +++ b/src/mistralai/models/batchjobout.py @@ -0,0 +1,117 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .batcherror import BatchError, BatchErrorTypedDict +from .batchjobstatus import BatchJobStatus +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Any, Dict, List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +BatchJobOutObject = Literal["batch"] + + +class BatchJobOutTypedDict(TypedDict): + id: str + input_files: List[str] + endpoint: str + model: str + errors: List[BatchErrorTypedDict] + status: BatchJobStatus + created_at: int + total_requests: int + completed_requests: int + succeeded_requests: int + failed_requests: int + object: BatchJobOutObject + metadata: NotRequired[Nullable[Dict[str, Any]]] + output_file: NotRequired[Nullable[str]] + error_file: NotRequired[Nullable[str]] + started_at: NotRequired[Nullable[int]] + completed_at: NotRequired[Nullable[int]] + + +class BatchJobOut(BaseModel): + id: str + + input_files: List[str] + + endpoint: str + + model: str + + errors: List[BatchError] + + status: BatchJobStatus + + created_at: int + + total_requests: int + + completed_requests: int + + succeeded_requests: int + + failed_requests: int + + OBJECT: Annotated[ + Annotated[Optional[BatchJobOutObject], AfterValidator(validate_const("batch"))], + pydantic.Field(alias="object"), + ] = "batch" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + output_file: OptionalNullable[str] = UNSET + + error_file: OptionalNullable[str] = UNSET + + started_at: OptionalNullable[int] = UNSET + + completed_at: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "object", + "metadata", + "output_file", + "error_file", + "started_at", + "completed_at", + ] + nullable_fields = [ + "metadata", + "output_file", + "error_file", + "started_at", + "completed_at", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/batchjobsout.py b/src/mistralai/models/batchjobsout.py new file mode 100644 index 0000000..f8c63a3 --- /dev/null +++ b/src/mistralai/models/batchjobsout.py @@ -0,0 +1,30 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .batchjobout import BatchJobOut, BatchJobOutTypedDict +from mistralai.types import BaseModel +from mistralai.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +BatchJobsOutObject = Literal["list"] + + +class BatchJobsOutTypedDict(TypedDict): + total: int + data: NotRequired[List[BatchJobOutTypedDict]] + object: BatchJobsOutObject + + +class BatchJobsOut(BaseModel): + total: int + + data: Optional[List[BatchJobOut]] = None + + OBJECT: Annotated[ + Annotated[Optional[BatchJobsOutObject], AfterValidator(validate_const("list"))], + pydantic.Field(alias="object"), + ] = "list" diff --git a/src/mistralai/models/batchjobstatus.py b/src/mistralai/models/batchjobstatus.py new file mode 100644 index 0000000..4b28059 --- /dev/null +++ b/src/mistralai/models/batchjobstatus.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +BatchJobStatus = Literal[ + "QUEUED", + "RUNNING", + "SUCCESS", + "FAILED", + "TIMEOUT_EXCEEDED", + "CANCELLATION_REQUESTED", + "CANCELLED", +] diff --git a/src/mistralai/models/chatclassificationrequest.py b/src/mistralai/models/chatclassificationrequest.py new file mode 100644 index 0000000..6b4cc13 --- /dev/null +++ b/src/mistralai/models/chatclassificationrequest.py @@ -0,0 +1,104 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL +from mistralai.utils import get_discriminator +import pydantic +from pydantic import Discriminator, Tag, model_serializer +from typing import List, Union +from typing_extensions import Annotated, TypedDict + + +TwoTypedDict = Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, +] + + +Two = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +OneTypedDict = Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, +] + + +One = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +ChatClassificationRequestInputsTypedDict = Union[ + List[OneTypedDict], List[List[TwoTypedDict]] +] +r"""Chat to classify""" + + +ChatClassificationRequestInputs = Union[List[One], List[List[Two]]] +r"""Chat to classify""" + + +class ChatClassificationRequestTypedDict(TypedDict): + inputs: ChatClassificationRequestInputsTypedDict + r"""Chat to classify""" + model: Nullable[str] + + +class ChatClassificationRequest(BaseModel): + inputs: Annotated[ChatClassificationRequestInputs, pydantic.Field(alias="input")] + r"""Chat to classify""" + + model: Nullable[str] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [] + nullable_fields = ["model"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/chatcompletionchoice.py b/src/mistralai/models/chatcompletionchoice.py index 20d674b..f4f37fb 100644 --- a/src/mistralai/models/chatcompletionchoice.py +++ b/src/mistralai/models/chatcompletionchoice.py @@ -2,11 +2,16 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from mistralai.types import BaseModel -from typing import Literal, TypedDict +from mistralai.types import BaseModel, UnrecognizedStr +from mistralai.utils import validate_open_enum +from pydantic.functional_validators import PlainValidator +from typing import Literal, Union +from typing_extensions import Annotated, TypedDict -FinishReason = Literal["stop", "length", "model_length", "error", "tool_calls"] +FinishReason = Union[ + Literal["stop", "length", "model_length", "error", "tool_calls"], UnrecognizedStr +] class ChatCompletionChoiceTypedDict(TypedDict): @@ -20,4 +25,4 @@ class ChatCompletionChoice(BaseModel): message: AssistantMessage - finish_reason: FinishReason + finish_reason: Annotated[FinishReason, PlainValidator(validate_open_enum(False))] diff --git a/src/mistralai/models/chatcompletionrequest.py b/src/mistralai/models/chatcompletionrequest.py index 7872216..6cdf97b 100644 --- a/src/mistralai/models/chatcompletionrequest.py +++ b/src/mistralai/models/chatcompletionrequest.py @@ -12,8 +12,8 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Optional, TypedDict, Union -from typing_extensions import Annotated, NotRequired +from typing import List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict StopTypedDict = Union[str, List[str]] @@ -54,14 +54,12 @@ class ChatCompletionRequestTypedDict(TypedDict): r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[MessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - temperature: NotRequired[float] - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: NotRequired[float] r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" max_tokens: NotRequired[Nullable[int]] r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: NotRequired[Nullable[int]] - r"""The minimum number of tokens to generate in the completion.""" stream: NotRequired[bool] r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" stop: NotRequired[StopTypedDict] @@ -71,6 +69,12 @@ class ChatCompletionRequestTypedDict(TypedDict): response_format: NotRequired[ResponseFormatTypedDict] tools: NotRequired[Nullable[List[ToolTypedDict]]] tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict] + presence_penalty: NotRequired[float] + r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -82,8 +86,8 @@ class ChatCompletionRequest(BaseModel): messages: List[Messages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - temperature: Optional[float] = 0.7 - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" @@ -91,9 +95,6 @@ class ChatCompletionRequest(BaseModel): max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: OptionalNullable[int] = UNSET - r"""The minimum number of tokens to generate in the completion.""" - stream: Optional[bool] = False r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" @@ -109,6 +110,15 @@ class ChatCompletionRequest(BaseModel): tool_choice: Optional[ChatCompletionRequestToolChoice] = None + presence_penalty: Optional[float] = 0 + r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = 0 + r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + safe_prompt: Optional[bool] = False r"""Whether to inject a safety prompt before all conversations.""" @@ -118,16 +128,25 @@ def serialize_model(self, handler): "temperature", "top_p", "max_tokens", - "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", "safe_prompt", ] - nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] + nullable_fields = [ + "model", + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", + ] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/chatcompletionresponse.py b/src/mistralai/models/chatcompletionresponse.py index 20c9010..67f1965 100644 --- a/src/mistralai/models/chatcompletionresponse.py +++ b/src/mistralai/models/chatcompletionresponse.py @@ -4,8 +4,8 @@ from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai.types import BaseModel -from typing import List, Optional, TypedDict -from typing_extensions import NotRequired +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict class ChatCompletionResponseTypedDict(TypedDict): diff --git a/src/mistralai/models/chatcompletionstreamrequest.py b/src/mistralai/models/chatcompletionstreamrequest.py index ccba04a..c56f523 100644 --- a/src/mistralai/models/chatcompletionstreamrequest.py +++ b/src/mistralai/models/chatcompletionstreamrequest.py @@ -12,8 +12,8 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Optional, TypedDict, Union -from typing_extensions import Annotated, NotRequired +from typing import List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict ChatCompletionStreamRequestStopTypedDict = Union[str, List[str]] @@ -56,14 +56,12 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[ChatCompletionStreamRequestMessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - temperature: NotRequired[float] - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: NotRequired[float] r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" max_tokens: NotRequired[Nullable[int]] r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: NotRequired[Nullable[int]] - r"""The minimum number of tokens to generate in the completion.""" stream: NotRequired[bool] stop: NotRequired[ChatCompletionStreamRequestStopTypedDict] r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" @@ -72,6 +70,12 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): response_format: NotRequired[ResponseFormatTypedDict] tools: NotRequired[Nullable[List[ToolTypedDict]]] tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict] + presence_penalty: NotRequired[float] + r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -83,8 +87,8 @@ class ChatCompletionStreamRequest(BaseModel): messages: List[ChatCompletionStreamRequestMessages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - temperature: Optional[float] = 0.7 - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" @@ -92,9 +96,6 @@ class ChatCompletionStreamRequest(BaseModel): max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: OptionalNullable[int] = UNSET - r"""The minimum number of tokens to generate in the completion.""" - stream: Optional[bool] = True stop: Optional[ChatCompletionStreamRequestStop] = None @@ -109,6 +110,15 @@ class ChatCompletionStreamRequest(BaseModel): tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None + presence_penalty: Optional[float] = 0 + r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = 0 + r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + safe_prompt: Optional[bool] = False r"""Whether to inject a safety prompt before all conversations.""" @@ -118,16 +128,25 @@ def serialize_model(self, handler): "temperature", "top_p", "max_tokens", - "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", "safe_prompt", ] - nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] + nullable_fields = [ + "model", + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", + ] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/checkpointout.py b/src/mistralai/models/checkpointout.py index f818dae..aefb773 100644 --- a/src/mistralai/models/checkpointout.py +++ b/src/mistralai/models/checkpointout.py @@ -3,7 +3,7 @@ from __future__ import annotations from .metricout import MetricOut, MetricOutTypedDict from mistralai.types import BaseModel -from typing import TypedDict +from typing_extensions import TypedDict class CheckpointOutTypedDict(TypedDict): diff --git a/src/mistralai/models/classificationobject.py b/src/mistralai/models/classificationobject.py new file mode 100644 index 0000000..e4ee362 --- /dev/null +++ b/src/mistralai/models/classificationobject.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import Dict, Optional +from typing_extensions import NotRequired, TypedDict + + +class ClassificationObjectTypedDict(TypedDict): + categories: NotRequired[Dict[str, bool]] + r"""Classifier result thresholded""" + category_scores: NotRequired[Dict[str, float]] + r"""Classifier result""" + + +class ClassificationObject(BaseModel): + categories: Optional[Dict[str, bool]] = None + r"""Classifier result thresholded""" + + category_scores: Optional[Dict[str, float]] = None + r"""Classifier result""" diff --git a/src/mistralai/models/classificationrequest.py b/src/mistralai/models/classificationrequest.py new file mode 100644 index 0000000..d2426c4 --- /dev/null +++ b/src/mistralai/models/classificationrequest.py @@ -0,0 +1,59 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +import pydantic +from pydantic import model_serializer +from typing import List, Union +from typing_extensions import Annotated, NotRequired, TypedDict + + +ClassificationRequestInputsTypedDict = Union[str, List[str]] +r"""Text to classify.""" + + +ClassificationRequestInputs = Union[str, List[str]] +r"""Text to classify.""" + + +class ClassificationRequestTypedDict(TypedDict): + inputs: ClassificationRequestInputsTypedDict + r"""Text to classify.""" + model: NotRequired[Nullable[str]] + + +class ClassificationRequest(BaseModel): + inputs: Annotated[ClassificationRequestInputs, pydantic.Field(alias="input")] + r"""Text to classify.""" + + model: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["model"] + nullable_fields = ["model"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/classificationresponse.py b/src/mistralai/models/classificationresponse.py new file mode 100644 index 0000000..5716db4 --- /dev/null +++ b/src/mistralai/models/classificationresponse.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .classificationobject import ClassificationObject, ClassificationObjectTypedDict +from mistralai.types import BaseModel +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict + + +class ClassificationResponseTypedDict(TypedDict): + id: NotRequired[str] + model: NotRequired[str] + results: NotRequired[List[ClassificationObjectTypedDict]] + + +class ClassificationResponse(BaseModel): + id: Optional[str] = None + + model: Optional[str] = None + + results: Optional[List[ClassificationObject]] = None diff --git a/src/mistralai/models/completionchunk.py b/src/mistralai/models/completionchunk.py index 8859d22..4d1fcfb 100644 --- a/src/mistralai/models/completionchunk.py +++ b/src/mistralai/models/completionchunk.py @@ -7,8 +7,8 @@ ) from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai.types import BaseModel -from typing import List, Optional, TypedDict -from typing_extensions import NotRequired +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict class CompletionChunkTypedDict(TypedDict): diff --git a/src/mistralai/models/completionevent.py b/src/mistralai/models/completionevent.py index b0286fd..cc85991 100644 --- a/src/mistralai/models/completionevent.py +++ b/src/mistralai/models/completionevent.py @@ -3,7 +3,7 @@ from __future__ import annotations from .completionchunk import CompletionChunk, CompletionChunkTypedDict from mistralai.types import BaseModel -from typing import TypedDict +from typing_extensions import TypedDict class CompletionEventTypedDict(TypedDict): diff --git a/src/mistralai/models/completionresponsestreamchoice.py b/src/mistralai/models/completionresponsestreamchoice.py index 227a2f7..b3b2a28 100644 --- a/src/mistralai/models/completionresponsestreamchoice.py +++ b/src/mistralai/models/completionresponsestreamchoice.py @@ -2,13 +2,16 @@ from __future__ import annotations from .deltamessage import DeltaMessage, DeltaMessageTypedDict -from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL +from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL, UnrecognizedStr +from mistralai.utils import validate_open_enum from pydantic import model_serializer -from typing import Literal, TypedDict +from pydantic.functional_validators import PlainValidator +from typing import Literal, Union +from typing_extensions import Annotated, TypedDict -CompletionResponseStreamChoiceFinishReason = Literal[ - "stop", "length", "error", "tool_calls" +CompletionResponseStreamChoiceFinishReason = Union[ + Literal["stop", "length", "error", "tool_calls"], UnrecognizedStr ] @@ -23,7 +26,10 @@ class CompletionResponseStreamChoice(BaseModel): delta: DeltaMessage - finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] + finish_reason: Annotated[ + Nullable[CompletionResponseStreamChoiceFinishReason], + PlainValidator(validate_open_enum(False)), + ] @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/src/mistralai/models/delete_model_v1_models_model_id_deleteop.py b/src/mistralai/models/delete_model_v1_models_model_id_deleteop.py index 2093245..4acb8d5 100644 --- a/src/mistralai/models/delete_model_v1_models_model_id_deleteop.py +++ b/src/mistralai/models/delete_model_v1_models_model_id_deleteop.py @@ -3,8 +3,7 @@ from __future__ import annotations from mistralai.types import BaseModel from mistralai.utils import FieldMetadata, PathParamMetadata -from typing import TypedDict -from typing_extensions import Annotated +from typing_extensions import Annotated, TypedDict class DeleteModelV1ModelsModelIDDeleteRequestTypedDict(TypedDict): diff --git a/src/mistralai/models/deletefileout.py b/src/mistralai/models/deletefileout.py index dc1a87f..2b346ec 100644 --- a/src/mistralai/models/deletefileout.py +++ b/src/mistralai/models/deletefileout.py @@ -2,7 +2,7 @@ from __future__ import annotations from mistralai.types import BaseModel -from typing import TypedDict +from typing_extensions import TypedDict class DeleteFileOutTypedDict(TypedDict): diff --git a/src/mistralai/models/deletemodelout.py b/src/mistralai/models/deletemodelout.py index 96dbeb1..c1b1eff 100644 --- a/src/mistralai/models/deletemodelout.py +++ b/src/mistralai/models/deletemodelout.py @@ -2,8 +2,8 @@ from __future__ import annotations from mistralai.types import BaseModel -from typing import Optional, TypedDict -from typing_extensions import NotRequired +from typing import Optional +from typing_extensions import NotRequired, TypedDict class DeleteModelOutTypedDict(TypedDict): diff --git a/src/mistralai/models/deltamessage.py b/src/mistralai/models/deltamessage.py index 7b7fe79..7a966e0 100644 --- a/src/mistralai/models/deltamessage.py +++ b/src/mistralai/models/deltamessage.py @@ -1,30 +1,37 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict from .toolcall import ToolCall, ToolCallTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import List, Optional, TypedDict -from typing_extensions import NotRequired +from typing import List, Union +from typing_extensions import NotRequired, TypedDict + + +ContentTypedDict = Union[str, List[ContentChunkTypedDict]] + + +Content = Union[str, List[ContentChunk]] class DeltaMessageTypedDict(TypedDict): - role: NotRequired[str] - content: NotRequired[Nullable[str]] + role: NotRequired[Nullable[str]] + content: NotRequired[Nullable[ContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] class DeltaMessage(BaseModel): - role: Optional[str] = None + role: OptionalNullable[str] = UNSET - content: OptionalNullable[str] = UNSET + content: OptionalNullable[Content] = UNSET tool_calls: OptionalNullable[List[ToolCall]] = UNSET @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["role", "content", "tool_calls"] - nullable_fields = ["content", "tool_calls"] + nullable_fields = ["role", "content", "tool_calls"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/detailedjobout.py b/src/mistralai/models/detailedjobout.py index 336190c..a4be707 100644 --- a/src/mistralai/models/detailedjobout.py +++ b/src/mistralai/models/detailedjobout.py @@ -9,10 +9,12 @@ from .trainingparameters import TrainingParameters, TrainingParametersTypedDict from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import validate_const import pydantic from pydantic import model_serializer -from typing import Final, List, Literal, Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict DetailedJobOutStatus = Literal[ @@ -54,6 +56,7 @@ class DetailedJobOutTypedDict(TypedDict): modified_at: int training_files: List[str] validation_files: NotRequired[Nullable[List[str]]] + object: DetailedJobOutObject fine_tuned_model: NotRequired[Nullable[str]] suffix: NotRequired[Nullable[str]] integrations: NotRequired[Nullable[List[DetailedJobOutIntegrationsTypedDict]]] @@ -87,9 +90,12 @@ class DetailedJobOut(BaseModel): validation_files: OptionalNullable[List[str]] = UNSET - # fmt: off - OBJECT: Annotated[Final[Optional[DetailedJobOutObject]], pydantic.Field(alias="object")] = "job" # type: ignore - # fmt: on + OBJECT: Annotated[ + Annotated[ + Optional[DetailedJobOutObject], AfterValidator(validate_const("job")) + ], + pydantic.Field(alias="object"), + ] = "job" fine_tuned_model: OptionalNullable[str] = UNSET diff --git a/src/mistralai/models/embeddingrequest.py b/src/mistralai/models/embeddingrequest.py index 5655472..61e181c 100644 --- a/src/mistralai/models/embeddingrequest.py +++ b/src/mistralai/models/embeddingrequest.py @@ -4,8 +4,8 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL import pydantic from pydantic import model_serializer -from typing import List, TypedDict, Union -from typing_extensions import Annotated, NotRequired +from typing import List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict InputsTypedDict = Union[str, List[str]] @@ -19,7 +19,7 @@ class EmbeddingRequestTypedDict(TypedDict): inputs: InputsTypedDict r"""Text to embed.""" - model: str + model: NotRequired[str] r"""ID of the model to use.""" encoding_format: NotRequired[Nullable[str]] r"""The format to return the embeddings in.""" @@ -29,7 +29,7 @@ class EmbeddingRequest(BaseModel): inputs: Annotated[Inputs, pydantic.Field(alias="input")] r"""Text to embed.""" - model: str + model: Optional[str] = "mistral-embed" r"""ID of the model to use.""" encoding_format: OptionalNullable[str] = UNSET @@ -37,7 +37,7 @@ class EmbeddingRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["encoding_format"] + optional_fields = ["model", "encoding_format"] nullable_fields = ["encoding_format"] null_default_fields = [] diff --git a/src/mistralai/models/embeddingresponse.py b/src/mistralai/models/embeddingresponse.py index d85ceec..aae6fa6 100644 --- a/src/mistralai/models/embeddingresponse.py +++ b/src/mistralai/models/embeddingresponse.py @@ -4,7 +4,8 @@ from .embeddingresponsedata import EmbeddingResponseData, EmbeddingResponseDataTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai.types import BaseModel -from typing import List, TypedDict +from typing import List +from typing_extensions import TypedDict class EmbeddingResponseTypedDict(TypedDict): diff --git a/src/mistralai/models/embeddingresponsedata.py b/src/mistralai/models/embeddingresponsedata.py index f37995e..01e2765 100644 --- a/src/mistralai/models/embeddingresponsedata.py +++ b/src/mistralai/models/embeddingresponsedata.py @@ -2,8 +2,8 @@ from __future__ import annotations from mistralai.types import BaseModel -from typing import List, Optional, TypedDict -from typing_extensions import NotRequired +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict class EmbeddingResponseDataTypedDict(TypedDict): diff --git a/src/mistralai/models/eventout.py b/src/mistralai/models/eventout.py index fa427f1..a9f2287 100644 --- a/src/mistralai/models/eventout.py +++ b/src/mistralai/models/eventout.py @@ -3,8 +3,8 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import Any, Dict, TypedDict -from typing_extensions import NotRequired +from typing import Any, Dict +from typing_extensions import NotRequired, TypedDict class EventOutTypedDict(TypedDict): diff --git a/src/mistralai/models/filepurpose.py b/src/mistralai/models/filepurpose.py new file mode 100644 index 0000000..8628b30 --- /dev/null +++ b/src/mistralai/models/filepurpose.py @@ -0,0 +1,8 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import UnrecognizedStr +from typing import Literal, Union + + +FilePurpose = Union[Literal["fine-tune", "batch"], UnrecognizedStr] diff --git a/src/mistralai/models/files_api_routes_delete_fileop.py b/src/mistralai/models/files_api_routes_delete_fileop.py index def6791..a84a7a8 100644 --- a/src/mistralai/models/files_api_routes_delete_fileop.py +++ b/src/mistralai/models/files_api_routes_delete_fileop.py @@ -3,8 +3,7 @@ from __future__ import annotations from mistralai.types import BaseModel from mistralai.utils import FieldMetadata, PathParamMetadata -from typing import TypedDict -from typing_extensions import Annotated +from typing_extensions import Annotated, TypedDict class FilesAPIRoutesDeleteFileRequestTypedDict(TypedDict): diff --git a/src/mistralai/models/files_api_routes_download_fileop.py b/src/mistralai/models/files_api_routes_download_fileop.py new file mode 100644 index 0000000..168a7fa --- /dev/null +++ b/src/mistralai/models/files_api_routes_download_fileop.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class FilesAPIRoutesDownloadFileRequestTypedDict(TypedDict): + file_id: str + + +class FilesAPIRoutesDownloadFileRequest(BaseModel): + file_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/files_api_routes_list_filesop.py b/src/mistralai/models/files_api_routes_list_filesop.py new file mode 100644 index 0000000..03a33af --- /dev/null +++ b/src/mistralai/models/files_api_routes_list_filesop.py @@ -0,0 +1,96 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .filepurpose import FilePurpose +from .sampletype import SampleType +from .source import Source +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import FieldMetadata, QueryParamMetadata, validate_open_enum +from pydantic import model_serializer +from pydantic.functional_validators import PlainValidator +from typing import List, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class FilesAPIRoutesListFilesRequestTypedDict(TypedDict): + page: NotRequired[int] + page_size: NotRequired[int] + sample_type: NotRequired[Nullable[List[SampleType]]] + source: NotRequired[Nullable[List[Source]]] + search: NotRequired[Nullable[str]] + purpose: NotRequired[Nullable[FilePurpose]] + + +class FilesAPIRoutesListFilesRequest(BaseModel): + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 100 + + sample_type: Annotated[ + OptionalNullable[ + List[Annotated[SampleType, PlainValidator(validate_open_enum(False))]] + ], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + source: Annotated[ + OptionalNullable[ + List[Annotated[Source, PlainValidator(validate_open_enum(False))]] + ], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + search: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + purpose: Annotated[ + Annotated[ + OptionalNullable[FilePurpose], PlainValidator(validate_open_enum(False)) + ], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "page", + "page_size", + "sample_type", + "source", + "search", + "purpose", + ] + nullable_fields = ["sample_type", "source", "search", "purpose"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/files_api_routes_retrieve_fileop.py b/src/mistralai/models/files_api_routes_retrieve_fileop.py index bfbad27..0c2a95e 100644 --- a/src/mistralai/models/files_api_routes_retrieve_fileop.py +++ b/src/mistralai/models/files_api_routes_retrieve_fileop.py @@ -3,8 +3,7 @@ from __future__ import annotations from mistralai.types import BaseModel from mistralai.utils import FieldMetadata, PathParamMetadata -from typing import TypedDict -from typing_extensions import Annotated +from typing_extensions import Annotated, TypedDict class FilesAPIRoutesRetrieveFileRequestTypedDict(TypedDict): diff --git a/src/mistralai/models/files_api_routes_upload_fileop.py b/src/mistralai/models/files_api_routes_upload_fileop.py index 8eae7af..4f2bb0c 100644 --- a/src/mistralai/models/files_api_routes_upload_fileop.py +++ b/src/mistralai/models/files_api_routes_upload_fileop.py @@ -1,16 +1,14 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .filepurpose import FilePurpose import io -from mistralai.types import BaseModel, UnrecognizedStr +from mistralai.types import BaseModel from mistralai.utils import FieldMetadata, MultipartFormMetadata, validate_open_enum import pydantic from pydantic.functional_validators import PlainValidator -from typing import Final, IO, Literal, Optional, TypedDict, Union -from typing_extensions import Annotated, NotRequired - - -FilesAPIRoutesUploadFilePurpose = Union[Literal["fine-tune"], UnrecognizedStr] +from typing import IO, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict class FileTypedDict(TypedDict): @@ -49,6 +47,7 @@ class FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict(TypedDict): file=@path/to/your/file.jsonl ``` """ + purpose: NotRequired[FilePurpose] class FilesAPIRoutesUploadFileMultiPartBodyParams(BaseModel): @@ -68,6 +67,7 @@ class FilesAPIRoutesUploadFileMultiPartBodyParams(BaseModel): ``` """ - # fmt: off - PURPOSE: Annotated[Final[Annotated[Optional[FilesAPIRoutesUploadFilePurpose], PlainValidator(validate_open_enum(False))]], pydantic.Field(alias="purpose"), FieldMetadata(multipart=True)] = "fine-tune" # type: ignore - # fmt: on + purpose: Annotated[ + Annotated[Optional[FilePurpose], PlainValidator(validate_open_enum(False))], + FieldMetadata(multipart=True), + ] = None diff --git a/src/mistralai/models/fileschema.py b/src/mistralai/models/fileschema.py index 1ace0fa..952d23a 100644 --- a/src/mistralai/models/fileschema.py +++ b/src/mistralai/models/fileschema.py @@ -1,26 +1,14 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .filepurpose import FilePurpose from .sampletype import SampleType from .source import Source -from mistralai.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, - UnrecognizedStr, -) +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import validate_open_enum -import pydantic from pydantic import model_serializer from pydantic.functional_validators import PlainValidator -from typing import Final, Literal, TypedDict, Union -from typing_extensions import Annotated, NotRequired - - -FileSchemaPurpose = Union[Literal["fine-tune"], UnrecognizedStr] -r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" +from typing_extensions import Annotated, NotRequired, TypedDict class FileSchemaTypedDict(TypedDict): @@ -34,6 +22,7 @@ class FileSchemaTypedDict(TypedDict): r"""The UNIX timestamp (in seconds) of the event.""" filename: str r"""The name of the uploaded file.""" + purpose: FilePurpose sample_type: SampleType source: Source num_lines: NotRequired[Nullable[int]] @@ -55,14 +44,11 @@ class FileSchema(BaseModel): filename: str r"""The name of the uploaded file.""" - sample_type: SampleType + purpose: Annotated[FilePurpose, PlainValidator(validate_open_enum(False))] - source: Source + sample_type: Annotated[SampleType, PlainValidator(validate_open_enum(False))] - # fmt: off - PURPOSE: Annotated[Final[Annotated[FileSchemaPurpose, PlainValidator(validate_open_enum(False))]], pydantic.Field(alias="purpose")] = "fine-tune" # type: ignore - # fmt: on - r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" + source: Annotated[Source, PlainValidator(validate_open_enum(False))] num_lines: OptionalNullable[int] = UNSET diff --git a/src/mistralai/models/fimcompletionrequest.py b/src/mistralai/models/fimcompletionrequest.py index 4f00d3d..409aa25 100644 --- a/src/mistralai/models/fimcompletionrequest.py +++ b/src/mistralai/models/fimcompletionrequest.py @@ -3,8 +3,8 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import List, Optional, TypedDict, Union -from typing_extensions import NotRequired +from typing import List, Optional, Union +from typing_extensions import NotRequired, TypedDict FIMCompletionRequestStopTypedDict = Union[str, List[str]] @@ -23,14 +23,12 @@ class FIMCompletionRequestTypedDict(TypedDict): """ prompt: str r"""The text/code to complete.""" - temperature: NotRequired[float] - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: NotRequired[float] r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" max_tokens: NotRequired[Nullable[int]] r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: NotRequired[Nullable[int]] - r"""The minimum number of tokens to generate in the completion.""" stream: NotRequired[bool] r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" stop: NotRequired[FIMCompletionRequestStopTypedDict] @@ -39,6 +37,8 @@ class FIMCompletionRequestTypedDict(TypedDict): r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" suffix: NotRequired[Nullable[str]] r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" class FIMCompletionRequest(BaseModel): @@ -51,8 +51,8 @@ class FIMCompletionRequest(BaseModel): prompt: str r"""The text/code to complete.""" - temperature: Optional[float] = 0.7 - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" @@ -60,9 +60,6 @@ class FIMCompletionRequest(BaseModel): max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: OptionalNullable[int] = UNSET - r"""The minimum number of tokens to generate in the completion.""" - stream: Optional[bool] = False r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" @@ -75,19 +72,29 @@ class FIMCompletionRequest(BaseModel): suffix: OptionalNullable[str] = UNSET r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ "temperature", "top_p", "max_tokens", - "min_tokens", "stream", "stop", "random_seed", "suffix", + "min_tokens", + ] + nullable_fields = [ + "model", + "temperature", + "max_tokens", + "random_seed", + "suffix", + "min_tokens", ] - nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/fimcompletionresponse.py b/src/mistralai/models/fimcompletionresponse.py index d9e11df..9fe0582 100644 --- a/src/mistralai/models/fimcompletionresponse.py +++ b/src/mistralai/models/fimcompletionresponse.py @@ -4,8 +4,8 @@ from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai.types import BaseModel -from typing import List, Optional, TypedDict -from typing_extensions import NotRequired +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict class FIMCompletionResponseTypedDict(TypedDict): diff --git a/src/mistralai/models/fimcompletionstreamrequest.py b/src/mistralai/models/fimcompletionstreamrequest.py index 708542d..8f9c1da 100644 --- a/src/mistralai/models/fimcompletionstreamrequest.py +++ b/src/mistralai/models/fimcompletionstreamrequest.py @@ -3,8 +3,8 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import List, Optional, TypedDict, Union -from typing_extensions import NotRequired +from typing import List, Optional, Union +from typing_extensions import NotRequired, TypedDict FIMCompletionStreamRequestStopTypedDict = Union[str, List[str]] @@ -23,14 +23,12 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): """ prompt: str r"""The text/code to complete.""" - temperature: NotRequired[float] - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: NotRequired[float] r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" max_tokens: NotRequired[Nullable[int]] r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: NotRequired[Nullable[int]] - r"""The minimum number of tokens to generate in the completion.""" stream: NotRequired[bool] stop: NotRequired[FIMCompletionStreamRequestStopTypedDict] r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" @@ -38,6 +36,8 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" suffix: NotRequired[Nullable[str]] r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" class FIMCompletionStreamRequest(BaseModel): @@ -50,8 +50,8 @@ class FIMCompletionStreamRequest(BaseModel): prompt: str r"""The text/code to complete.""" - temperature: Optional[float] = 0.7 - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" @@ -59,9 +59,6 @@ class FIMCompletionStreamRequest(BaseModel): max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: OptionalNullable[int] = UNSET - r"""The minimum number of tokens to generate in the completion.""" - stream: Optional[bool] = True stop: Optional[FIMCompletionStreamRequestStop] = None @@ -73,19 +70,29 @@ class FIMCompletionStreamRequest(BaseModel): suffix: OptionalNullable[str] = UNSET r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ "temperature", "top_p", "max_tokens", - "min_tokens", "stream", "stop", "random_seed", "suffix", + "min_tokens", + ] + nullable_fields = [ + "model", + "temperature", + "max_tokens", + "random_seed", + "suffix", + "min_tokens", ] - nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/ftmodelcapabilitiesout.py b/src/mistralai/models/ftmodelcapabilitiesout.py index fe66d30..b5e1e52 100644 --- a/src/mistralai/models/ftmodelcapabilitiesout.py +++ b/src/mistralai/models/ftmodelcapabilitiesout.py @@ -2,8 +2,8 @@ from __future__ import annotations from mistralai.types import BaseModel -from typing import Optional, TypedDict -from typing_extensions import NotRequired +from typing import Optional +from typing_extensions import NotRequired, TypedDict class FTModelCapabilitiesOutTypedDict(TypedDict): diff --git a/src/mistralai/models/ftmodelcard.py b/src/mistralai/models/ftmodelcard.py index b282a09..9a640a2 100644 --- a/src/mistralai/models/ftmodelcard.py +++ b/src/mistralai/models/ftmodelcard.py @@ -4,10 +4,15 @@ from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict from datetime import datetime from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import validate_const import pydantic from pydantic import model_serializer -from typing import Final, List, Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +FTModelCardType = Literal["fine-tuned"] class FTModelCardTypedDict(TypedDict): @@ -25,6 +30,8 @@ class FTModelCardTypedDict(TypedDict): max_context_length: NotRequired[int] aliases: NotRequired[List[str]] deprecation: NotRequired[Nullable[datetime]] + default_model_temperature: NotRequired[Nullable[float]] + type: FTModelCardType archived: NotRequired[bool] @@ -55,9 +62,14 @@ class FTModelCard(BaseModel): deprecation: OptionalNullable[datetime] = UNSET - # fmt: off - TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "fine-tuned" # type: ignore - # fmt: on + default_model_temperature: OptionalNullable[float] = UNSET + + TYPE: Annotated[ + Annotated[ + Optional[FTModelCardType], AfterValidator(validate_const("fine-tuned")) + ], + pydantic.Field(alias="type"), + ] = "fine-tuned" archived: Optional[bool] = False @@ -72,10 +84,16 @@ def serialize_model(self, handler): "max_context_length", "aliases", "deprecation", + "default_model_temperature", "type", "archived", ] - nullable_fields = ["name", "description", "deprecation"] + nullable_fields = [ + "name", + "description", + "deprecation", + "default_model_temperature", + ] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/ftmodelout.py b/src/mistralai/models/ftmodelout.py index 664dd5d..e8d6864 100644 --- a/src/mistralai/models/ftmodelout.py +++ b/src/mistralai/models/ftmodelout.py @@ -6,10 +6,12 @@ FTModelCapabilitiesOutTypedDict, ) from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import validate_const import pydantic from pydantic import model_serializer -from typing import Final, List, Literal, Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict FTModelOutObject = Literal["model"] @@ -23,6 +25,7 @@ class FTModelOutTypedDict(TypedDict): archived: bool capabilities: FTModelCapabilitiesOutTypedDict job: str + object: FTModelOutObject name: NotRequired[Nullable[str]] description: NotRequired[Nullable[str]] max_context_length: NotRequired[int] @@ -44,9 +47,10 @@ class FTModelOut(BaseModel): job: str - # fmt: off - OBJECT: Annotated[Final[Optional[FTModelOutObject]], pydantic.Field(alias="object")] = "model" # type: ignore - # fmt: on + OBJECT: Annotated[ + Annotated[Optional[FTModelOutObject], AfterValidator(validate_const("model"))], + pydantic.Field(alias="object"), + ] = "model" name: OptionalNullable[str] = UNSET diff --git a/src/mistralai/models/function.py b/src/mistralai/models/function.py index a872eea..942b042 100644 --- a/src/mistralai/models/function.py +++ b/src/mistralai/models/function.py @@ -2,8 +2,8 @@ from __future__ import annotations from mistralai.types import BaseModel -from typing import Any, Dict, Optional, TypedDict -from typing_extensions import NotRequired +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict class FunctionTypedDict(TypedDict): diff --git a/src/mistralai/models/functioncall.py b/src/mistralai/models/functioncall.py index 941cc5e..a57d235 100644 --- a/src/mistralai/models/functioncall.py +++ b/src/mistralai/models/functioncall.py @@ -2,7 +2,8 @@ from __future__ import annotations from mistralai.types import BaseModel -from typing import Any, Dict, TypedDict, Union +from typing import Any, Dict, Union +from typing_extensions import TypedDict ArgumentsTypedDict = Union[Dict[str, Any], str] diff --git a/src/mistralai/models/functionname.py b/src/mistralai/models/functionname.py index 20fc9be..0a6c0b1 100644 --- a/src/mistralai/models/functionname.py +++ b/src/mistralai/models/functionname.py @@ -2,7 +2,7 @@ from __future__ import annotations from mistralai.types import BaseModel -from typing import TypedDict +from typing_extensions import TypedDict class FunctionNameTypedDict(TypedDict): diff --git a/src/mistralai/models/githubrepositoryin.py b/src/mistralai/models/githubrepositoryin.py index cb8bad6..715db6b 100644 --- a/src/mistralai/models/githubrepositoryin.py +++ b/src/mistralai/models/githubrepositoryin.py @@ -2,10 +2,12 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import validate_const import pydantic from pydantic import model_serializer -from typing import Final, Literal, Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict GithubRepositoryInType = Literal["github"] @@ -15,6 +17,7 @@ class GithubRepositoryInTypedDict(TypedDict): name: str owner: str token: str + type: GithubRepositoryInType ref: NotRequired[Nullable[str]] weight: NotRequired[float] @@ -26,9 +29,12 @@ class GithubRepositoryIn(BaseModel): token: str - # fmt: off - TYPE: Annotated[Final[Optional[GithubRepositoryInType]], pydantic.Field(alias="type")] = "github" # type: ignore - # fmt: on + TYPE: Annotated[ + Annotated[ + Optional[GithubRepositoryInType], AfterValidator(validate_const("github")) + ], + pydantic.Field(alias="type"), + ] = "github" ref: OptionalNullable[str] = UNSET diff --git a/src/mistralai/models/githubrepositoryout.py b/src/mistralai/models/githubrepositoryout.py index 7f023c7..5a0ce31 100644 --- a/src/mistralai/models/githubrepositoryout.py +++ b/src/mistralai/models/githubrepositoryout.py @@ -2,10 +2,12 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import validate_const import pydantic from pydantic import model_serializer -from typing import Final, Literal, Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict GithubRepositoryOutType = Literal["github"] @@ -15,6 +17,7 @@ class GithubRepositoryOutTypedDict(TypedDict): name: str owner: str commit_id: str + type: GithubRepositoryOutType ref: NotRequired[Nullable[str]] weight: NotRequired[float] @@ -26,9 +29,12 @@ class GithubRepositoryOut(BaseModel): commit_id: str - # fmt: off - TYPE: Annotated[Final[Optional[GithubRepositoryOutType]], pydantic.Field(alias="type")] = "github" # type: ignore - # fmt: on + TYPE: Annotated[ + Annotated[ + Optional[GithubRepositoryOutType], AfterValidator(validate_const("github")) + ], + pydantic.Field(alias="type"), + ] = "github" ref: OptionalNullable[str] = UNSET diff --git a/src/mistralai/models/httpvalidationerror.py b/src/mistralai/models/httpvalidationerror.py index 991b8bd..37f2dd7 100644 --- a/src/mistralai/models/httpvalidationerror.py +++ b/src/mistralai/models/httpvalidationerror.py @@ -12,8 +12,6 @@ class HTTPValidationErrorData(BaseModel): class HTTPValidationError(Exception): - r"""Validation Error""" - data: HTTPValidationErrorData def __init__(self, data: HTTPValidationErrorData): diff --git a/src/mistralai/models/imageurl.py b/src/mistralai/models/imageurl.py index af24a1a..1e8276a 100644 --- a/src/mistralai/models/imageurl.py +++ b/src/mistralai/models/imageurl.py @@ -3,8 +3,7 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import TypedDict -from typing_extensions import NotRequired +from typing_extensions import NotRequired, TypedDict class ImageURLTypedDict(TypedDict): diff --git a/src/mistralai/models/imageurlchunk.py b/src/mistralai/models/imageurlchunk.py index 4440902..1c37fe3 100644 --- a/src/mistralai/models/imageurlchunk.py +++ b/src/mistralai/models/imageurlchunk.py @@ -3,9 +3,11 @@ from __future__ import annotations from .imageurl import ImageURL, ImageURLTypedDict from mistralai.types import BaseModel +from mistralai.utils import validate_const import pydantic -from typing import Final, Literal, Optional, TypedDict, Union -from typing_extensions import Annotated +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional, Union +from typing_extensions import Annotated, TypedDict ImageURLChunkType = Literal["image_url"] @@ -20,6 +22,7 @@ class ImageURLChunkTypedDict(TypedDict): r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" image_url: ImageURLChunkImageURLTypedDict + type: ImageURLChunkType class ImageURLChunk(BaseModel): @@ -27,6 +30,9 @@ class ImageURLChunk(BaseModel): image_url: ImageURLChunkImageURL - # fmt: off - TYPE: Annotated[Final[Optional[ImageURLChunkType]], pydantic.Field(alias="type")] = "image_url" # type: ignore - # fmt: on + TYPE: Annotated[ + Annotated[ + Optional[ImageURLChunkType], AfterValidator(validate_const("image_url")) + ], + pydantic.Field(alias="type"), + ] = "image_url" diff --git a/src/mistralai/models/jobin.py b/src/mistralai/models/jobin.py index db875c1..a294d29 100644 --- a/src/mistralai/models/jobin.py +++ b/src/mistralai/models/jobin.py @@ -8,8 +8,8 @@ from .wandbintegration import WandbIntegration, WandbIntegrationTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import List, Optional, TypedDict -from typing_extensions import NotRequired +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict JobInIntegrationsTypedDict = WandbIntegrationTypedDict diff --git a/src/mistralai/models/jobmetadataout.py b/src/mistralai/models/jobmetadataout.py index 690540d..d1eeb4f 100644 --- a/src/mistralai/models/jobmetadataout.py +++ b/src/mistralai/models/jobmetadataout.py @@ -3,8 +3,7 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import TypedDict -from typing_extensions import NotRequired +from typing_extensions import NotRequired, TypedDict class JobMetadataOutTypedDict(TypedDict): diff --git a/src/mistralai/models/jobout.py b/src/mistralai/models/jobout.py index a716cb7..71edce0 100644 --- a/src/mistralai/models/jobout.py +++ b/src/mistralai/models/jobout.py @@ -7,10 +7,12 @@ from .trainingparameters import TrainingParameters, TrainingParametersTypedDict from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import validate_const import pydantic from pydantic import model_serializer -from typing import Final, List, Literal, Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict Status = Literal[ @@ -61,6 +63,8 @@ class JobOutTypedDict(TypedDict): r"""A list containing the IDs of uploaded files that contain training data.""" validation_files: NotRequired[Nullable[List[str]]] r"""A list containing the IDs of uploaded files that contain validation data.""" + object: Object + r"""The object type of the fine-tuning job.""" fine_tuned_model: NotRequired[Nullable[str]] r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" suffix: NotRequired[Nullable[str]] @@ -102,9 +106,10 @@ class JobOut(BaseModel): validation_files: OptionalNullable[List[str]] = UNSET r"""A list containing the IDs of uploaded files that contain validation data.""" - # fmt: off - OBJECT: Annotated[Final[Optional[Object]], pydantic.Field(alias="object")] = "job" # type: ignore - # fmt: on + OBJECT: Annotated[ + Annotated[Optional[Object], AfterValidator(validate_const("job"))], + pydantic.Field(alias="object"), + ] = "job" r"""The object type of the fine-tuning job.""" fine_tuned_model: OptionalNullable[str] = UNSET diff --git a/src/mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py b/src/mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py new file mode 100644 index 0000000..5b83d53 --- /dev/null +++ b/src/mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class JobsAPIRoutesBatchCancelBatchJobRequestTypedDict(TypedDict): + job_id: str + + +class JobsAPIRoutesBatchCancelBatchJobRequest(BaseModel): + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py b/src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py new file mode 100644 index 0000000..d9c7b39 --- /dev/null +++ b/src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class JobsAPIRoutesBatchGetBatchJobRequestTypedDict(TypedDict): + job_id: str + + +class JobsAPIRoutesBatchGetBatchJobRequest(BaseModel): + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py b/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py new file mode 100644 index 0000000..8f0c66c --- /dev/null +++ b/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py @@ -0,0 +1,95 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .batchjobstatus import BatchJobStatus +from datetime import datetime +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import FieldMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Any, Dict, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class JobsAPIRoutesBatchGetBatchJobsRequestTypedDict(TypedDict): + page: NotRequired[int] + page_size: NotRequired[int] + model: NotRequired[Nullable[str]] + metadata: NotRequired[Nullable[Dict[str, Any]]] + created_after: NotRequired[Nullable[datetime]] + created_by_me: NotRequired[bool] + status: NotRequired[Nullable[BatchJobStatus]] + + +class JobsAPIRoutesBatchGetBatchJobsRequest(BaseModel): + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 100 + + model: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + metadata: Annotated[ + OptionalNullable[Dict[str, Any]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + created_after: Annotated[ + OptionalNullable[datetime], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + created_by_me: Annotated[ + Optional[bool], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = False + + status: Annotated[ + OptionalNullable[BatchJobStatus], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "page", + "page_size", + "model", + "metadata", + "created_after", + "created_by_me", + "status", + ] + nullable_fields = ["model", "metadata", "created_after", "status"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py index da52142..d728efd 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py @@ -3,8 +3,7 @@ from __future__ import annotations from mistralai.types import BaseModel from mistralai.utils import FieldMetadata, PathParamMetadata -from typing import TypedDict -from typing_extensions import Annotated +from typing_extensions import Annotated, TypedDict class JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict(TypedDict): diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py index e84b082..b72ff42 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py @@ -3,8 +3,7 @@ from __future__ import annotations from mistralai.types import BaseModel from mistralai.utils import FieldMetadata, PathParamMetadata -from typing import TypedDict -from typing_extensions import Annotated +from typing_extensions import Annotated, TypedDict class JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict(TypedDict): diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py index 0570612..896d34f 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py @@ -3,8 +3,7 @@ from __future__ import annotations from mistralai.types import BaseModel from mistralai.utils import FieldMetadata, PathParamMetadata -from typing import TypedDict -from typing_extensions import Annotated +from typing_extensions import Annotated, TypedDict class JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict(TypedDict): diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py index 3320b10..b51b195 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py @@ -5,8 +5,8 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import FieldMetadata, QueryParamMetadata from pydantic import model_serializer -from typing import Literal, Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict QueryParamStatus = Literal[ diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py index bc1b6d4..3e7989a 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py @@ -3,8 +3,7 @@ from __future__ import annotations from mistralai.types import BaseModel from mistralai.utils import FieldMetadata, PathParamMetadata -from typing import TypedDict -from typing_extensions import Annotated +from typing_extensions import Annotated, TypedDict class JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict(TypedDict): diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py index acc6bf4..a84274f 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py @@ -3,8 +3,7 @@ from __future__ import annotations from mistralai.types import BaseModel from mistralai.utils import FieldMetadata, PathParamMetadata -from typing import TypedDict -from typing_extensions import Annotated +from typing_extensions import Annotated, TypedDict class JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict(TypedDict): diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py index 50298ce..11e23f8 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py @@ -4,8 +4,7 @@ from .updateftmodelin import UpdateFTModelIn, UpdateFTModelInTypedDict from mistralai.types import BaseModel from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata -from typing import TypedDict -from typing_extensions import Annotated +from typing_extensions import Annotated, TypedDict class JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict(TypedDict): diff --git a/src/mistralai/models/jobsout.py b/src/mistralai/models/jobsout.py index bd5edf6..316bf89 100644 --- a/src/mistralai/models/jobsout.py +++ b/src/mistralai/models/jobsout.py @@ -3,9 +3,11 @@ from __future__ import annotations from .jobout import JobOut, JobOutTypedDict from mistralai.types import BaseModel +from mistralai.utils import validate_const import pydantic -from typing import Final, List, Literal, Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict JobsOutObject = Literal["list"] @@ -14,6 +16,7 @@ class JobsOutTypedDict(TypedDict): total: int data: NotRequired[List[JobOutTypedDict]] + object: JobsOutObject class JobsOut(BaseModel): @@ -21,6 +24,7 @@ class JobsOut(BaseModel): data: Optional[List[JobOut]] = None - # fmt: off - OBJECT: Annotated[Final[Optional[JobsOutObject]], pydantic.Field(alias="object")] = "list" # type: ignore - # fmt: on + OBJECT: Annotated[ + Annotated[Optional[JobsOutObject], AfterValidator(validate_const("list"))], + pydantic.Field(alias="object"), + ] = "list" diff --git a/src/mistralai/models/legacyjobmetadataout.py b/src/mistralai/models/legacyjobmetadataout.py index 677cad8..df6b3d3 100644 --- a/src/mistralai/models/legacyjobmetadataout.py +++ b/src/mistralai/models/legacyjobmetadataout.py @@ -2,10 +2,12 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import validate_const import pydantic from pydantic import model_serializer -from typing import Final, Literal, Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict LegacyJobMetadataOutObject = Literal["job.metadata"] @@ -31,6 +33,7 @@ class LegacyJobMetadataOutTypedDict(TypedDict): r"""The number of complete passes through the entire training dataset.""" training_steps: NotRequired[Nullable[int]] r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" + object: LegacyJobMetadataOutObject class LegacyJobMetadataOut(BaseModel): @@ -64,9 +67,13 @@ class LegacyJobMetadataOut(BaseModel): training_steps: OptionalNullable[int] = UNSET r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - # fmt: off - OBJECT: Annotated[Final[Optional[LegacyJobMetadataOutObject]], pydantic.Field(alias="object")] = "job.metadata" # type: ignore - # fmt: on + OBJECT: Annotated[ + Annotated[ + Optional[LegacyJobMetadataOutObject], + AfterValidator(validate_const("job.metadata")), + ], + pydantic.Field(alias="object"), + ] = "job.metadata" @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/src/mistralai/models/listfilesout.py b/src/mistralai/models/listfilesout.py index 928a7be..b032f63 100644 --- a/src/mistralai/models/listfilesout.py +++ b/src/mistralai/models/listfilesout.py @@ -3,15 +3,19 @@ from __future__ import annotations from .fileschema import FileSchema, FileSchemaTypedDict from mistralai.types import BaseModel -from typing import List, TypedDict +from typing import List +from typing_extensions import TypedDict class ListFilesOutTypedDict(TypedDict): data: List[FileSchemaTypedDict] object: str + total: int class ListFilesOut(BaseModel): data: List[FileSchema] object: str + + total: int diff --git a/src/mistralai/models/metricout.py b/src/mistralai/models/metricout.py index 99fe9fb..7583d92 100644 --- a/src/mistralai/models/metricout.py +++ b/src/mistralai/models/metricout.py @@ -3,8 +3,7 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import TypedDict -from typing_extensions import NotRequired +from typing_extensions import NotRequired, TypedDict class MetricOutTypedDict(TypedDict): diff --git a/src/mistralai/models/modelcapabilities.py b/src/mistralai/models/modelcapabilities.py index af981cc..961f866 100644 --- a/src/mistralai/models/modelcapabilities.py +++ b/src/mistralai/models/modelcapabilities.py @@ -2,8 +2,8 @@ from __future__ import annotations from mistralai.types import BaseModel -from typing import Optional, TypedDict -from typing_extensions import NotRequired +from typing import Optional +from typing_extensions import NotRequired, TypedDict class ModelCapabilitiesTypedDict(TypedDict): diff --git a/src/mistralai/models/modellist.py b/src/mistralai/models/modellist.py index 759b931..97ae4c3 100644 --- a/src/mistralai/models/modellist.py +++ b/src/mistralai/models/modellist.py @@ -6,8 +6,8 @@ from mistralai.types import BaseModel from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag -from typing import List, Optional, TypedDict, Union -from typing_extensions import Annotated, NotRequired +from typing import List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict DataTypedDict = Union[BaseModelCardTypedDict, FTModelCardTypedDict] diff --git a/src/mistralai/models/responseformat.py b/src/mistralai/models/responseformat.py index bf53869..aa60ba5 100644 --- a/src/mistralai/models/responseformat.py +++ b/src/mistralai/models/responseformat.py @@ -3,8 +3,8 @@ from __future__ import annotations from .responseformats import ResponseFormats from mistralai.types import BaseModel -from typing import Optional, TypedDict -from typing_extensions import NotRequired +from typing import Optional +from typing_extensions import NotRequired, TypedDict class ResponseFormatTypedDict(TypedDict): diff --git a/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py b/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py index 37c52c9..dd4bccc 100644 --- a/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py +++ b/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py @@ -6,8 +6,8 @@ from mistralai.types import BaseModel from mistralai.utils import FieldMetadata, PathParamMetadata, get_discriminator from pydantic import Discriminator, Tag -from typing import TypedDict, Union -from typing_extensions import Annotated +from typing import Union +from typing_extensions import Annotated, TypedDict class RetrieveModelV1ModelsModelIDGetRequestTypedDict(TypedDict): diff --git a/src/mistralai/models/retrievefileout.py b/src/mistralai/models/retrievefileout.py index 9cc9bb2..6bf4a5b 100644 --- a/src/mistralai/models/retrievefileout.py +++ b/src/mistralai/models/retrievefileout.py @@ -1,26 +1,14 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .filepurpose import FilePurpose from .sampletype import SampleType from .source import Source -from mistralai.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, - UnrecognizedStr, -) +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import validate_open_enum -import pydantic from pydantic import model_serializer from pydantic.functional_validators import PlainValidator -from typing import Final, Literal, TypedDict, Union -from typing_extensions import Annotated, NotRequired - - -RetrieveFileOutPurpose = Union[Literal["fine-tune"], UnrecognizedStr] -r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" +from typing_extensions import Annotated, NotRequired, TypedDict class RetrieveFileOutTypedDict(TypedDict): @@ -34,8 +22,10 @@ class RetrieveFileOutTypedDict(TypedDict): r"""The UNIX timestamp (in seconds) of the event.""" filename: str r"""The name of the uploaded file.""" + purpose: FilePurpose sample_type: SampleType source: Source + deleted: bool num_lines: NotRequired[Nullable[int]] @@ -55,14 +45,13 @@ class RetrieveFileOut(BaseModel): filename: str r"""The name of the uploaded file.""" - sample_type: SampleType + purpose: Annotated[FilePurpose, PlainValidator(validate_open_enum(False))] - source: Source + sample_type: Annotated[SampleType, PlainValidator(validate_open_enum(False))] + + source: Annotated[Source, PlainValidator(validate_open_enum(False))] - # fmt: off - PURPOSE: Annotated[Final[Annotated[RetrieveFileOutPurpose, PlainValidator(validate_open_enum(False))]], pydantic.Field(alias="purpose")] = "fine-tune" # type: ignore - # fmt: on - r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" + deleted: bool num_lines: OptionalNullable[int] = UNSET diff --git a/src/mistralai/models/sampletype.py b/src/mistralai/models/sampletype.py index 83424f3..adc90ec 100644 --- a/src/mistralai/models/sampletype.py +++ b/src/mistralai/models/sampletype.py @@ -1,7 +1,11 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from typing import Literal +from mistralai.types import UnrecognizedStr +from typing import Literal, Union -SampleType = Literal["pretrain", "instruct"] +SampleType = Union[ + Literal["pretrain", "instruct", "batch_request", "batch_result", "batch_error"], + UnrecognizedStr, +] diff --git a/src/mistralai/models/security.py b/src/mistralai/models/security.py index 5bd4c7e..cf05ba8 100644 --- a/src/mistralai/models/security.py +++ b/src/mistralai/models/security.py @@ -3,8 +3,8 @@ from __future__ import annotations from mistralai.types import BaseModel from mistralai.utils import FieldMetadata, SecurityMetadata -from typing import Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict class SecurityTypedDict(TypedDict): diff --git a/src/mistralai/models/source.py b/src/mistralai/models/source.py index 66d09ae..c21550f 100644 --- a/src/mistralai/models/source.py +++ b/src/mistralai/models/source.py @@ -1,7 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from typing import Literal +from mistralai.types import UnrecognizedStr +from typing import Literal, Union -Source = Literal["upload", "repository"] +Source = Union[Literal["upload", "repository", "mistral"], UnrecognizedStr] diff --git a/src/mistralai/models/systemmessage.py b/src/mistralai/models/systemmessage.py index 47dc778..f6f3074 100644 --- a/src/mistralai/models/systemmessage.py +++ b/src/mistralai/models/systemmessage.py @@ -3,25 +3,25 @@ from __future__ import annotations from .textchunk import TextChunk, TextChunkTypedDict from mistralai.types import BaseModel -from typing import List, Literal, Optional, TypedDict, Union -from typing_extensions import NotRequired +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypedDict -ContentTypedDict = Union[str, List[TextChunkTypedDict]] +SystemMessageContentTypedDict = Union[str, List[TextChunkTypedDict]] -Content = Union[str, List[TextChunk]] +SystemMessageContent = Union[str, List[TextChunk]] Role = Literal["system"] class SystemMessageTypedDict(TypedDict): - content: ContentTypedDict + content: SystemMessageContentTypedDict role: NotRequired[Role] class SystemMessage(BaseModel): - content: Content + content: SystemMessageContent role: Optional[Role] = "system" diff --git a/src/mistralai/models/textchunk.py b/src/mistralai/models/textchunk.py index 9c1f9d7..130a91c 100644 --- a/src/mistralai/models/textchunk.py +++ b/src/mistralai/models/textchunk.py @@ -2,9 +2,11 @@ from __future__ import annotations from mistralai.types import BaseModel +from mistralai.utils import validate_const import pydantic -from typing import Final, Literal, Optional, TypedDict -from typing_extensions import Annotated +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict TextChunkType = Literal["text"] @@ -12,11 +14,13 @@ class TextChunkTypedDict(TypedDict): text: str + type: TextChunkType class TextChunk(BaseModel): text: str - # fmt: off - TYPE: Annotated[Final[Optional[TextChunkType]], pydantic.Field(alias="type")] = "text" # type: ignore - # fmt: on + TYPE: Annotated[ + Annotated[Optional[TextChunkType], AfterValidator(validate_const("text"))], + pydantic.Field(alias="type"), + ] = "text" diff --git a/src/mistralai/models/tool.py b/src/mistralai/models/tool.py index 51295f3..6e746df 100644 --- a/src/mistralai/models/tool.py +++ b/src/mistralai/models/tool.py @@ -6,8 +6,8 @@ from mistralai.types import BaseModel from mistralai.utils import validate_open_enum from pydantic.functional_validators import PlainValidator -from typing import Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict class ToolTypedDict(TypedDict): diff --git a/src/mistralai/models/toolcall.py b/src/mistralai/models/toolcall.py index 66d570e..827fd00 100644 --- a/src/mistralai/models/toolcall.py +++ b/src/mistralai/models/toolcall.py @@ -6,8 +6,8 @@ from mistralai.types import BaseModel from mistralai.utils import validate_open_enum from pydantic.functional_validators import PlainValidator -from typing import Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict class ToolCallTypedDict(TypedDict): diff --git a/src/mistralai/models/toolchoice.py b/src/mistralai/models/toolchoice.py index fc36512..3b7d60e 100644 --- a/src/mistralai/models/toolchoice.py +++ b/src/mistralai/models/toolchoice.py @@ -6,8 +6,8 @@ from mistralai.types import BaseModel from mistralai.utils import validate_open_enum from pydantic.functional_validators import PlainValidator -from typing import Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict class ToolChoiceTypedDict(TypedDict): diff --git a/src/mistralai/models/toolmessage.py b/src/mistralai/models/toolmessage.py index 3c4be63..2d469d0 100644 --- a/src/mistralai/models/toolmessage.py +++ b/src/mistralai/models/toolmessage.py @@ -3,8 +3,8 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import Literal, Optional, TypedDict -from typing_extensions import NotRequired +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict ToolMessageRole = Literal["tool"] diff --git a/src/mistralai/models/trainingfile.py b/src/mistralai/models/trainingfile.py index 1917d37..99bd49d 100644 --- a/src/mistralai/models/trainingfile.py +++ b/src/mistralai/models/trainingfile.py @@ -2,8 +2,8 @@ from __future__ import annotations from mistralai.types import BaseModel -from typing import Optional, TypedDict -from typing_extensions import NotRequired +from typing import Optional +from typing_extensions import NotRequired, TypedDict class TrainingFileTypedDict(TypedDict): diff --git a/src/mistralai/models/trainingparameters.py b/src/mistralai/models/trainingparameters.py index 885f3ff..cc2b037 100644 --- a/src/mistralai/models/trainingparameters.py +++ b/src/mistralai/models/trainingparameters.py @@ -3,8 +3,8 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import Optional, TypedDict -from typing_extensions import NotRequired +from typing import Optional +from typing_extensions import NotRequired, TypedDict class TrainingParametersTypedDict(TypedDict): @@ -14,6 +14,7 @@ class TrainingParametersTypedDict(TypedDict): warmup_fraction: NotRequired[Nullable[float]] epochs: NotRequired[Nullable[float]] fim_ratio: NotRequired[Nullable[float]] + seq_len: NotRequired[Nullable[int]] class TrainingParameters(BaseModel): @@ -29,6 +30,8 @@ class TrainingParameters(BaseModel): fim_ratio: OptionalNullable[float] = UNSET + seq_len: OptionalNullable[int] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -38,6 +41,7 @@ def serialize_model(self, handler): "warmup_fraction", "epochs", "fim_ratio", + "seq_len", ] nullable_fields = [ "training_steps", @@ -45,6 +49,7 @@ def serialize_model(self, handler): "warmup_fraction", "epochs", "fim_ratio", + "seq_len", ] null_default_fields = [] diff --git a/src/mistralai/models/trainingparametersin.py b/src/mistralai/models/trainingparametersin.py index 8ecb027..7d2e414 100644 --- a/src/mistralai/models/trainingparametersin.py +++ b/src/mistralai/models/trainingparametersin.py @@ -3,8 +3,8 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import Optional, TypedDict -from typing_extensions import NotRequired +from typing import Optional +from typing_extensions import NotRequired, TypedDict class TrainingParametersInTypedDict(TypedDict): @@ -20,6 +20,7 @@ class TrainingParametersInTypedDict(TypedDict): r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" epochs: NotRequired[Nullable[float]] fim_ratio: NotRequired[Nullable[float]] + seq_len: NotRequired[Nullable[int]] class TrainingParametersIn(BaseModel): @@ -41,6 +42,8 @@ class TrainingParametersIn(BaseModel): fim_ratio: OptionalNullable[float] = UNSET + seq_len: OptionalNullable[int] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -50,6 +53,7 @@ def serialize_model(self, handler): "warmup_fraction", "epochs", "fim_ratio", + "seq_len", ] nullable_fields = [ "training_steps", @@ -57,6 +61,7 @@ def serialize_model(self, handler): "warmup_fraction", "epochs", "fim_ratio", + "seq_len", ] null_default_fields = [] diff --git a/src/mistralai/models/unarchiveftmodelout.py b/src/mistralai/models/unarchiveftmodelout.py index 6eac820..6540df1 100644 --- a/src/mistralai/models/unarchiveftmodelout.py +++ b/src/mistralai/models/unarchiveftmodelout.py @@ -2,9 +2,11 @@ from __future__ import annotations from mistralai.types import BaseModel +from mistralai.utils import validate_const import pydantic -from typing import Final, Literal, Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict UnarchiveFTModelOutObject = Literal["model"] @@ -12,14 +14,18 @@ class UnarchiveFTModelOutTypedDict(TypedDict): id: str + object: UnarchiveFTModelOutObject archived: NotRequired[bool] class UnarchiveFTModelOut(BaseModel): id: str - # fmt: off - OBJECT: Annotated[Final[Optional[UnarchiveFTModelOutObject]], pydantic.Field(alias="object")] = "model" # type: ignore - # fmt: on + OBJECT: Annotated[ + Annotated[ + Optional[UnarchiveFTModelOutObject], AfterValidator(validate_const("model")) + ], + pydantic.Field(alias="object"), + ] = "model" archived: Optional[bool] = False diff --git a/src/mistralai/models/updateftmodelin.py b/src/mistralai/models/updateftmodelin.py index c22c511..603f031 100644 --- a/src/mistralai/models/updateftmodelin.py +++ b/src/mistralai/models/updateftmodelin.py @@ -3,8 +3,7 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import TypedDict -from typing_extensions import NotRequired +from typing_extensions import NotRequired, TypedDict class UpdateFTModelInTypedDict(TypedDict): diff --git a/src/mistralai/models/uploadfileout.py b/src/mistralai/models/uploadfileout.py index 7754ae3..23e25d5 100644 --- a/src/mistralai/models/uploadfileout.py +++ b/src/mistralai/models/uploadfileout.py @@ -1,26 +1,14 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .filepurpose import FilePurpose from .sampletype import SampleType from .source import Source -from mistralai.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, - UnrecognizedStr, -) +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import validate_open_enum -import pydantic from pydantic import model_serializer from pydantic.functional_validators import PlainValidator -from typing import Final, Literal, TypedDict, Union -from typing_extensions import Annotated, NotRequired - - -Purpose = Union[Literal["fine-tune"], UnrecognizedStr] -r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" +from typing_extensions import Annotated, NotRequired, TypedDict class UploadFileOutTypedDict(TypedDict): @@ -34,6 +22,7 @@ class UploadFileOutTypedDict(TypedDict): r"""The UNIX timestamp (in seconds) of the event.""" filename: str r"""The name of the uploaded file.""" + purpose: FilePurpose sample_type: SampleType source: Source num_lines: NotRequired[Nullable[int]] @@ -55,14 +44,11 @@ class UploadFileOut(BaseModel): filename: str r"""The name of the uploaded file.""" - sample_type: SampleType + purpose: Annotated[FilePurpose, PlainValidator(validate_open_enum(False))] - source: Source + sample_type: Annotated[SampleType, PlainValidator(validate_open_enum(False))] - # fmt: off - PURPOSE: Annotated[Final[Annotated[Purpose, PlainValidator(validate_open_enum(False))]], pydantic.Field(alias="purpose")] = "fine-tune" # type: ignore - # fmt: on - r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" + source: Annotated[Source, PlainValidator(validate_open_enum(False))] num_lines: OptionalNullable[int] = UNSET diff --git a/src/mistralai/models/usageinfo.py b/src/mistralai/models/usageinfo.py index e8113e3..f7a6e99 100644 --- a/src/mistralai/models/usageinfo.py +++ b/src/mistralai/models/usageinfo.py @@ -2,7 +2,7 @@ from __future__ import annotations from mistralai.types import BaseModel -from typing import TypedDict +from typing_extensions import TypedDict class UsageInfoTypedDict(TypedDict): diff --git a/src/mistralai/models/usermessage.py b/src/mistralai/models/usermessage.py index db4176a..af69895 100644 --- a/src/mistralai/models/usermessage.py +++ b/src/mistralai/models/usermessage.py @@ -2,9 +2,10 @@ from __future__ import annotations from .contentchunk import ContentChunk, ContentChunkTypedDict -from mistralai.types import BaseModel -from typing import List, Literal, Optional, TypedDict, Union -from typing_extensions import NotRequired +from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypedDict UserMessageContentTypedDict = Union[str, List[ContentChunkTypedDict]] @@ -17,11 +18,41 @@ class UserMessageTypedDict(TypedDict): - content: UserMessageContentTypedDict + content: Nullable[UserMessageContentTypedDict] role: NotRequired[UserMessageRole] class UserMessage(BaseModel): - content: UserMessageContent + content: Nullable[UserMessageContent] role: Optional[UserMessageRole] = "user" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["role"] + nullable_fields = ["content"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/validationerror.py b/src/mistralai/models/validationerror.py index ed394a6..03ce984 100644 --- a/src/mistralai/models/validationerror.py +++ b/src/mistralai/models/validationerror.py @@ -2,7 +2,8 @@ from __future__ import annotations from mistralai.types import BaseModel -from typing import List, TypedDict, Union +from typing import List, Union +from typing_extensions import TypedDict LocTypedDict = Union[str, int] diff --git a/src/mistralai/models/wandbintegration.py b/src/mistralai/models/wandbintegration.py index 7659e27..d82f921 100644 --- a/src/mistralai/models/wandbintegration.py +++ b/src/mistralai/models/wandbintegration.py @@ -2,10 +2,12 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import validate_const import pydantic from pydantic import model_serializer -from typing import Final, Literal, Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict WandbIntegrationType = Literal["wandb"] @@ -16,6 +18,7 @@ class WandbIntegrationTypedDict(TypedDict): r"""The name of the project that the new run will be created under.""" api_key: str r"""The WandB API key to use for authentication.""" + type: WandbIntegrationType name: NotRequired[Nullable[str]] r"""A display name to set for the run. If not set, will use the job ID as the name.""" run_name: NotRequired[Nullable[str]] @@ -28,9 +31,12 @@ class WandbIntegration(BaseModel): api_key: str r"""The WandB API key to use for authentication.""" - # fmt: off - TYPE: Annotated[Final[Optional[WandbIntegrationType]], pydantic.Field(alias="type")] = "wandb" # type: ignore - # fmt: on + TYPE: Annotated[ + Annotated[ + Optional[WandbIntegrationType], AfterValidator(validate_const("wandb")) + ], + pydantic.Field(alias="type"), + ] = "wandb" name: OptionalNullable[str] = UNSET r"""A display name to set for the run. If not set, will use the job ID as the name.""" diff --git a/src/mistralai/models/wandbintegrationout.py b/src/mistralai/models/wandbintegrationout.py index 5635af7..5514b59 100644 --- a/src/mistralai/models/wandbintegrationout.py +++ b/src/mistralai/models/wandbintegrationout.py @@ -2,18 +2,21 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import validate_const import pydantic from pydantic import model_serializer -from typing import Final, Literal, Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict -Type = Literal["wandb"] +WandbIntegrationOutType = Literal["wandb"] class WandbIntegrationOutTypedDict(TypedDict): project: str r"""The name of the project that the new run will be created under.""" + type: WandbIntegrationOutType name: NotRequired[Nullable[str]] r"""A display name to set for the run. If not set, will use the job ID as the name.""" run_name: NotRequired[Nullable[str]] @@ -23,9 +26,12 @@ class WandbIntegrationOut(BaseModel): project: str r"""The name of the project that the new run will be created under.""" - # fmt: off - TYPE: Annotated[Final[Optional[Type]], pydantic.Field(alias="type")] = "wandb" # type: ignore - # fmt: on + TYPE: Annotated[ + Annotated[ + Optional[WandbIntegrationOutType], AfterValidator(validate_const("wandb")) + ], + pydantic.Field(alias="type"), + ] = "wandb" name: OptionalNullable[str] = UNSET r"""A display name to set for the run. If not set, will use the job ID as the name.""" diff --git a/src/mistralai/models_.py b/src/mistralai/models_.py index 32fdcbc..44e95ce 100644 --- a/src/mistralai/models_.py +++ b/src/mistralai/models_.py @@ -76,15 +76,17 @@ def list( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -153,15 +155,17 @@ async def list_async( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -244,15 +248,17 @@ def retrieve( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -335,15 +341,17 @@ async def retrieve_async( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -419,15 +427,17 @@ def delete( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -503,15 +513,17 @@ async def delete_async( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -594,15 +606,17 @@ def update( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.FTModelOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -685,15 +699,17 @@ async def update_async( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.FTModelOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -767,15 +783,17 @@ def archive( http_res.text, Optional[models.ArchiveFTModelOut] ) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -849,15 +867,17 @@ async def archive_async( http_res.text, Optional[models.ArchiveFTModelOut] ) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -931,15 +951,17 @@ def unarchive( http_res.text, Optional[models.UnarchiveFTModelOut] ) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -1013,14 +1035,16 @@ async def unarchive_async( http_res.text, Optional[models.UnarchiveFTModelOut] ) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) diff --git a/src/mistralai/sdk.py b/src/mistralai/sdk.py index 05029ab..71c60fc 100644 --- a/src/mistralai/sdk.py +++ b/src/mistralai/sdk.py @@ -9,7 +9,9 @@ from mistralai import models, utils from mistralai._hooks import SDKHooks from mistralai.agents import Agents +from mistralai.batch import Batch from mistralai.chat import Chat +from mistralai.classifiers import Classifiers from mistralai.embeddings import Embeddings from mistralai.files import Files from mistralai.fim import Fim @@ -27,6 +29,7 @@ class Mistral(BaseSDK): files: Files r"""Files API""" fine_tuning: FineTuning + batch: Batch chat: Chat r"""Chat Completion API.""" fim: Fim @@ -35,6 +38,8 @@ class Mistral(BaseSDK): r"""Agents API.""" embeddings: Embeddings r"""Embeddings API.""" + classifiers: Classifiers + r"""Classifiers API.""" def __init__( self, @@ -118,7 +123,9 @@ def _init_sdks(self): self.models = Models(self.sdk_configuration) self.files = Files(self.sdk_configuration) self.fine_tuning = FineTuning(self.sdk_configuration) + self.batch = Batch(self.sdk_configuration) self.chat = Chat(self.sdk_configuration) self.fim = Fim(self.sdk_configuration) self.agents = Agents(self.sdk_configuration) self.embeddings = Embeddings(self.sdk_configuration) + self.classifiers = Classifiers(self.sdk_configuration) diff --git a/src/mistralai/sdkconfiguration.py b/src/mistralai/sdkconfiguration.py index 0a7c332..101757f 100644 --- a/src/mistralai/sdkconfiguration.py +++ b/src/mistralai/sdkconfiguration.py @@ -10,10 +10,10 @@ from typing import Callable, Dict, Optional, Tuple, Union -SERVER_PROD = "prod" -r"""Production server""" +SERVER_EU = "eu" +r"""EU Production server""" SERVERS = { - SERVER_PROD: "https://api.mistral.ai", + SERVER_EU: "https://api.mistral.ai", } """Contains the list of servers available to the SDK""" @@ -28,9 +28,9 @@ class SDKConfiguration: server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.1.0" - gen_version: str = "2.415.8" - user_agent: str = "speakeasy-sdk/python 1.1.0 2.415.8 0.0.2 mistralai" + sdk_version: str = "1.2.0" + gen_version: str = "2.452.0" + user_agent: str = "speakeasy-sdk/python 1.2.0 2.452.0 0.0.2 mistralai" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None @@ -41,7 +41,7 @@ def get_server_details(self) -> Tuple[str, Dict[str, str]]: if self.server_url is not None and self.server_url: return remove_suffix(self.server_url, "/"), {} if not self.server: - self.server = SERVER_PROD + self.server = SERVER_EU if self.server not in SERVERS: raise ValueError(f'Invalid server "{self.server}"') diff --git a/src/mistralai/utils/__init__.py b/src/mistralai/utils/__init__.py index 74109c1..151c87d 100644 --- a/src/mistralai/utils/__init__.py +++ b/src/mistralai/utils/__init__.py @@ -28,6 +28,10 @@ serialize_float, serialize_int, stream_to_text, + stream_to_text_async, + stream_to_bytes, + stream_to_bytes_async, + validate_const, validate_decimal, validate_float, validate_int, @@ -81,10 +85,14 @@ "serialize_request_body", "SerializedRequestBody", "stream_to_text", + "stream_to_text_async", + "stream_to_bytes", + "stream_to_bytes_async", "template_url", "unmarshal", "unmarshal_json", "validate_decimal", + "validate_const", "validate_float", "validate_int", "validate_open_enum", diff --git a/src/mistralai/utils/annotations.py b/src/mistralai/utils/annotations.py index 0d17472..5b3bbb0 100644 --- a/src/mistralai/utils/annotations.py +++ b/src/mistralai/utils/annotations.py @@ -1,5 +1,6 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +from enum import Enum from typing import Any def get_discriminator(model: Any, fieldname: str, key: str) -> str: @@ -10,10 +11,20 @@ def get_discriminator(model: Any, fieldname: str, key: str) -> str: raise ValueError(f'Could not find discriminator key {key} in {model}') from e if hasattr(model, fieldname): - return f'{getattr(model, fieldname)}' + attr = getattr(model, fieldname) + + if isinstance(attr, Enum): + return f'{attr.value}' + + return f'{attr}' fieldname = fieldname.upper() if hasattr(model, fieldname): - return f'{getattr(model, fieldname)}' + attr = getattr(model, fieldname) + + if isinstance(attr, Enum): + return f'{attr.value}' + + return f'{attr}' raise ValueError(f'Could not find discriminator field {fieldname} in {model}') diff --git a/src/mistralai/utils/serializers.py b/src/mistralai/utils/serializers.py index 85d57f4..c5eb365 100644 --- a/src/mistralai/utils/serializers.py +++ b/src/mistralai/utils/serializers.py @@ -116,6 +116,19 @@ def validate(e): return validate +def validate_const(v): + def validate(c): + if is_optional_type(type(c)) and c is None: + return None + + if v != c: + raise ValueError(f"Expected {v}") + + return c + + return validate + + def unmarshal_json(raw, typ: Any) -> Any: return unmarshal(from_json(raw), typ) @@ -172,6 +185,18 @@ def stream_to_text(stream: httpx.Response) -> str: return "".join(stream.iter_text()) +async def stream_to_text_async(stream: httpx.Response) -> str: + return "".join([chunk async for chunk in stream.aiter_text()]) + + +def stream_to_bytes(stream: httpx.Response) -> bytes: + return stream.content + + +async def stream_to_bytes_async(stream: httpx.Response) -> bytes: + return await stream.aread() + + def get_pydantic_model(data: Any, typ: Any) -> Any: if not _contains_pydantic_model(data): return unmarshal(data, typ)