From 559de90b55e95ffcb5ac86cf363d164bf8e76f4f Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Thu, 26 Jun 2025 14:40:05 +0000 Subject: [PATCH] ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.517.3 --- .speakeasy/gen.lock | 14 +- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 12 +- RELEASES.md | 12 +- .../agentsapiv1conversationsgetrequest.md | 6 +- .../agentsapiv1conversationshistoryrequest.md | 6 +- ...agentsapiv1conversationsmessagesrequest.md | 6 +- .../agentsapiv1conversationsrestartrequest.md | 2 +- ...sapiv1conversationsrestartstreamrequest.md | 2 +- docs/models/agentscompletionrequest.md | 2 +- docs/models/agentscompletionstreamrequest.md | 2 +- docs/models/basemodelcard.md | 3 +- docs/models/chatcompletionrequest.md | 2 +- docs/models/chatcompletionstreamrequest.md | 2 +- docs/models/contentchunk.md | 6 + docs/models/conversationrestartrequest.md | 22 +-- .../models/conversationrestartrequesttools.md | 41 ++++++ .../conversationrestartstreamrequest.md | 8 +- .../conversationrestartstreamrequesttools.md | 41 ++++++ docs/models/document.md | 6 + docs/models/filechunk.md | 9 ++ docs/models/ftmodelcard.md | 3 +- docs/models/ftmodelcardtype.md | 8 -- docs/models/inputentries.md | 24 ++++ docs/models/messageinputentry.md | 18 +-- docs/models/messageinputentrytype.md | 8 -- docs/models/modelcapabilities.md | 3 +- docs/models/type.md | 6 +- docs/sdks/agents/README.md | 4 +- docs/sdks/chat/README.md | 4 +- docs/sdks/conversations/README.md | 22 ++- docs/sdks/models/README.md | 2 +- pyproject.toml | 2 +- src/mistralai/_version.py | 4 +- src/mistralai/agents.py | 8 +- src/mistralai/chat.py | 8 +- src/mistralai/conversations.py | 128 ++++++++++++++++-- src/mistralai/models/__init__.py | 19 ++- .../agents_api_v1_conversations_getop.py | 2 + .../agents_api_v1_conversations_historyop.py | 2 + .../agents_api_v1_conversations_messagesop.py | 2 + ...s_api_v1_conversations_restart_streamop.py | 2 + .../agents_api_v1_conversations_restartop.py | 2 + .../models/agentscompletionrequest.py | 2 + .../models/agentscompletionstreamrequest.py | 2 + src/mistralai/models/basemodelcard.py | 12 +- src/mistralai/models/chatcompletionrequest.py | 2 + .../models/chatcompletionstreamrequest.py | 2 + src/mistralai/models/contentchunk.py | 3 + .../models/conversationrestartrequest.py | 110 ++++++++++++++- .../conversationrestartstreamrequest.py | 110 ++++++++++++++- src/mistralai/models/filechunk.py | 23 ++++ src/mistralai/models/ftmodelcard.py | 13 +- src/mistralai/models/inputentries.py | 23 +++- src/mistralai/models/messageinputentry.py | 6 +- src/mistralai/models/modelcapabilities.py | 3 + src/mistralai/models/ocrrequest.py | 6 +- src/mistralai/models_.py | 4 +- 58 files changed, 674 insertions(+), 134 deletions(-) create mode 100644 docs/models/conversationrestartrequesttools.md create mode 100644 docs/models/conversationrestartstreamrequesttools.md create mode 100644 docs/models/filechunk.md delete mode 100644 docs/models/ftmodelcardtype.md delete mode 100644 docs/models/messageinputentrytype.md create mode 100644 src/mistralai/models/filechunk.py diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index c568d4f3..ae75fd30 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 9c8bd4d6bf675b159a80173b97c1265c + docChecksum: f475c45d9daca1cfdbec568160b56773 docVersion: 1.0.0 speakeasyVersion: 1.517.3 generationVersion: 2.548.6 - releaseVersion: 1.8.2 - configChecksum: 5024c28578f991eabb85310ad8df96b7 + releaseVersion: 1.8.3 + configChecksum: 81d0549cb4d8bf7fd453dea94ea12376 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -169,8 +169,10 @@ generatedFiles: - docs/models/conversationresponseobject.md - docs/models/conversationrestartrequest.md - docs/models/conversationrestartrequesthandoffexecution.md + - docs/models/conversationrestartrequesttools.md - docs/models/conversationrestartstreamrequest.md - docs/models/conversationrestartstreamrequesthandoffexecution.md + - docs/models/conversationrestartstreamrequesttools.md - docs/models/conversationstreamrequest.md - docs/models/conversationstreamrequesthandoffexecution.md - docs/models/conversationstreamrequesttools.md @@ -193,6 +195,7 @@ generatedFiles: - docs/models/entries.md - docs/models/eventout.md - docs/models/file.md + - docs/models/filechunk.md - docs/models/filepurpose.md - docs/models/filesapiroutesdeletefilerequest.md - docs/models/filesapiroutesdownloadfilerequest.md @@ -212,7 +215,6 @@ generatedFiles: - docs/models/ftclassifierlossfunction.md - docs/models/ftmodelcapabilitiesout.md - docs/models/ftmodelcard.md - - docs/models/ftmodelcardtype.md - docs/models/function.md - docs/models/functioncall.md - docs/models/functioncallentry.md @@ -280,7 +282,6 @@ generatedFiles: - docs/models/messageinputentry.md - docs/models/messageinputentrycontent.md - docs/models/messageinputentryrole.md - - docs/models/messageinputentrytype.md - docs/models/messageoutputcontentchunks.md - docs/models/messageoutputentry.md - docs/models/messageoutputentrycontent.md @@ -506,6 +507,7 @@ generatedFiles: - src/mistralai/models/embeddingresponse.py - src/mistralai/models/embeddingresponsedata.py - src/mistralai/models/eventout.py + - src/mistralai/models/filechunk.py - src/mistralai/models/filepurpose.py - src/mistralai/models/files_api_routes_delete_fileop.py - src/mistralai/models/files_api_routes_download_fileop.py @@ -651,7 +653,7 @@ examples: model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" responses: "200": - application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": true, "completion_fim": false, "function_calling": true, "fine_tuning": false, "vision": false}, "max_context_length": 32768} + application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": true, "completion_fim": false, "function_calling": true, "fine_tuning": false, "vision": false, "classification": false}, "max_context_length": 32768} "422": {} delete_model_v1_models__model_id__delete: "": diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 77710816..51ac392f 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -15,7 +15,7 @@ generation: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.8.2 + version: 1.8.3 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index c618ac1d..19edc2c2 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -14,11 +14,11 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:21244d618cafcc163c3aa4acbc443ca16c63b8614632b65b87fbb2c4066987f3 - sourceBlobDigest: sha256:74aeb6a2e0d466c206f983ce79581cc72d205cc7866826282c181207ebe841a2 + sourceRevisionDigest: sha256:357548241f9013494796db31e055452b4378a5d2a5a9f3ea08739e75676f1ac7 + sourceBlobDigest: sha256:6f9dea860bc4f914e3145db945f02936537245927082a018611e8d51ca1ff3a8 tags: - latest - - speakeasy-sdk-regen-1749573609 + - speakeasy-sdk-regen-1750948743 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -37,10 +37,10 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:21244d618cafcc163c3aa4acbc443ca16c63b8614632b65b87fbb2c4066987f3 - sourceBlobDigest: sha256:74aeb6a2e0d466c206f983ce79581cc72d205cc7866826282c181207ebe841a2 + sourceRevisionDigest: sha256:357548241f9013494796db31e055452b4378a5d2a5a9f3ea08739e75676f1ac7 + sourceBlobDigest: sha256:6f9dea860bc4f914e3145db945f02936537245927082a018611e8d51ca1ff3a8 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:dc4396ba994048a9f31c008dced1a46a9e54d89973e9608039a7bc37b1052957 + codeSamplesRevisionDigest: sha256:23aa878d903a7ef63dfbe3196f3b4d23836239ebd9481f4e1e094f53e21af410 workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.517.3 diff --git a/RELEASES.md b/RELEASES.md index 265eda73..f098248c 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -238,4 +238,14 @@ Based on: ### Generated - [python v1.8.2] . ### Releases -- [PyPI v1.8.2] https://pypi.org/project/mistralai/1.8.2 - . \ No newline at end of file +- [PyPI v1.8.2] https://pypi.org/project/mistralai/1.8.2 - . + +## 2025-06-26 14:38:46 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.517.3 (2.548.6) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.8.3] . +### Releases +- [PyPI v1.8.3] https://pypi.org/project/mistralai/1.8.3 - . \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsgetrequest.md b/docs/models/agentsapiv1conversationsgetrequest.md index 0d2d7827..67d450c8 100644 --- a/docs/models/agentsapiv1conversationsgetrequest.md +++ b/docs/models/agentsapiv1conversationsgetrequest.md @@ -3,6 +3,6 @@ ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------------------------------------------- | ----------------------------------------------------------- | ----------------------------------------------------------- | ----------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching metadata. | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationshistoryrequest.md b/docs/models/agentsapiv1conversationshistoryrequest.md index f0d4f049..7e5d39e9 100644 --- a/docs/models/agentsapiv1conversationshistoryrequest.md +++ b/docs/models/agentsapiv1conversationshistoryrequest.md @@ -3,6 +3,6 @@ ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching entries. | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsmessagesrequest.md b/docs/models/agentsapiv1conversationsmessagesrequest.md index b3189925..a91ab046 100644 --- a/docs/models/agentsapiv1conversationsmessagesrequest.md +++ b/docs/models/agentsapiv1conversationsmessagesrequest.md @@ -3,6 +3,6 @@ ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------------------------------------------- | ----------------------------------------------------------- | ----------------------------------------------------------- | ----------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching messages. | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsrestartrequest.md b/docs/models/agentsapiv1conversationsrestartrequest.md index 11a2fe2e..a18a41f5 100644 --- a/docs/models/agentsapiv1conversationsrestartrequest.md +++ b/docs/models/agentsapiv1conversationsrestartrequest.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the original conversation which is being restarted. | | `conversation_restart_request` | [models.ConversationRestartRequest](../models/conversationrestartrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsrestartstreamrequest.md b/docs/models/agentsapiv1conversationsrestartstreamrequest.md index 4cbb9d6c..7548286a 100644 --- a/docs/models/agentsapiv1conversationsrestartstreamrequest.md +++ b/docs/models/agentsapiv1conversationsrestartstreamrequest.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the original conversation which is being restarted. | | `conversation_restart_stream_request` | [models.ConversationRestartStreamRequest](../models/conversationrestartstreamrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentscompletionrequest.md b/docs/models/agentscompletionrequest.md index 8ace69d9..398e5f5c 100644 --- a/docs/models/agentscompletionrequest.md +++ b/docs/models/agentscompletionrequest.md @@ -19,4 +19,4 @@ | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | \ No newline at end of file diff --git a/docs/models/agentscompletionstreamrequest.md b/docs/models/agentscompletionstreamrequest.md index 0bab012c..4e924cf0 100644 --- a/docs/models/agentscompletionstreamrequest.md +++ b/docs/models/agentscompletionstreamrequest.md @@ -19,4 +19,4 @@ | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | \ No newline at end of file diff --git a/docs/models/basemodelcard.md b/docs/models/basemodelcard.md index 0bdbb65f..84ad75c7 100644 --- a/docs/models/basemodelcard.md +++ b/docs/models/basemodelcard.md @@ -15,5 +15,6 @@ | `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | | `aliases` | List[*str*] | :heavy_minus_sign: | N/A | | `deprecation` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `deprecation_replacement_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `default_model_temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.Type]](../models/type.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | *Optional[Literal["base"]]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/chatcompletionrequest.md b/docs/models/chatcompletionrequest.md index a850b5b8..9c239961 100644 --- a/docs/models/chatcompletionrequest.md +++ b/docs/models/chatcompletionrequest.md @@ -21,5 +21,5 @@ | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/docs/models/chatcompletionstreamrequest.md b/docs/models/chatcompletionstreamrequest.md index cf286cda..d1e31a18 100644 --- a/docs/models/chatcompletionstreamrequest.md +++ b/docs/models/chatcompletionstreamrequest.md @@ -21,5 +21,5 @@ | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/docs/models/contentchunk.md b/docs/models/contentchunk.md index 8cf7fad1..a65cd054 100644 --- a/docs/models/contentchunk.md +++ b/docs/models/contentchunk.md @@ -27,3 +27,9 @@ value: models.TextChunk = /* values here */ value: models.ReferenceChunk = /* values here */ ``` +### `models.FileChunk` + +```python +value: models.FileChunk = /* values here */ +``` + diff --git a/docs/models/conversationrestartrequest.md b/docs/models/conversationrestartrequest.md index 15a6ead4..d093755a 100644 --- a/docs/models/conversationrestartrequest.md +++ b/docs/models/conversationrestartrequest.md @@ -5,11 +5,17 @@ Request to restart a new conversation from a given entry in the conversation. ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | -| `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | -| `from_entry_id` | *str* | :heavy_check_mark: | N/A | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | -| `handoff_execution` | [Optional[models.ConversationRestartRequestHandoffExecution]](../models/conversationrestartrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | -| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | +| `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `from_entry_id` | *str* | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | +| `handoff_execution` | [Optional[models.ConversationRestartRequestHandoffExecution]](../models/conversationrestartrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.ConversationRestartRequestTools](../models/conversationrestartrequesttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name given to the conversation. | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the what the conversation is about. | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | Model which is used as assistant of the conversation. If not provided, will use the original conversation's model. | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Agent which will be used as assistant to the conversation. If not provided, will use the original conversation's agent. | \ No newline at end of file diff --git a/docs/models/conversationrestartrequesttools.md b/docs/models/conversationrestartrequesttools.md new file mode 100644 index 00000000..8a769b17 --- /dev/null +++ b/docs/models/conversationrestartrequesttools.md @@ -0,0 +1,41 @@ +# ConversationRestartRequestTools + + +## Supported Types + +### `models.CodeInterpreterTool` + +```python +value: models.CodeInterpreterTool = /* values here */ +``` + +### `models.DocumentLibraryTool` + +```python +value: models.DocumentLibraryTool = /* values here */ +``` + +### `models.FunctionTool` + +```python +value: models.FunctionTool = /* values here */ +``` + +### `models.ImageGenerationTool` + +```python +value: models.ImageGenerationTool = /* values here */ +``` + +### `models.WebSearchTool` + +```python +value: models.WebSearchTool = /* values here */ +``` + +### `models.WebSearchPremiumTool` + +```python +value: models.WebSearchPremiumTool = /* values here */ +``` + diff --git a/docs/models/conversationrestartstreamrequest.md b/docs/models/conversationrestartstreamrequest.md index 30f3767c..f356096a 100644 --- a/docs/models/conversationrestartstreamrequest.md +++ b/docs/models/conversationrestartstreamrequest.md @@ -12,4 +12,10 @@ Request to restart a new conversation from a given entry in the conversation. | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationRestartStreamRequestHandoffExecution]](../models/conversationrestartstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | -| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | \ No newline at end of file +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.ConversationRestartStreamRequestTools](../models/conversationrestartstreamrequesttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name given to the conversation. | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the what the conversation is about. | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | Model which is used as assistant of the conversation. If not provided, will use the original conversation's model. | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Agent which will be used as assistant to the conversation. If not provided, will use the original conversation's agent. | \ No newline at end of file diff --git a/docs/models/conversationrestartstreamrequesttools.md b/docs/models/conversationrestartstreamrequesttools.md new file mode 100644 index 00000000..212c5893 --- /dev/null +++ b/docs/models/conversationrestartstreamrequesttools.md @@ -0,0 +1,41 @@ +# ConversationRestartStreamRequestTools + + +## Supported Types + +### `models.CodeInterpreterTool` + +```python +value: models.CodeInterpreterTool = /* values here */ +``` + +### `models.DocumentLibraryTool` + +```python +value: models.DocumentLibraryTool = /* values here */ +``` + +### `models.FunctionTool` + +```python +value: models.FunctionTool = /* values here */ +``` + +### `models.ImageGenerationTool` + +```python +value: models.ImageGenerationTool = /* values here */ +``` + +### `models.WebSearchTool` + +```python +value: models.WebSearchTool = /* values here */ +``` + +### `models.WebSearchPremiumTool` + +```python +value: models.WebSearchPremiumTool = /* values here */ +``` + diff --git a/docs/models/document.md b/docs/models/document.md index e2940355..509d43b7 100644 --- a/docs/models/document.md +++ b/docs/models/document.md @@ -5,6 +5,12 @@ Document to run OCR on ## Supported Types +### `models.FileChunk` + +```python +value: models.FileChunk = /* values here */ +``` + ### `models.DocumentURLChunk` ```python diff --git a/docs/models/filechunk.md b/docs/models/filechunk.md new file mode 100644 index 00000000..46c838b0 --- /dev/null +++ b/docs/models/filechunk.md @@ -0,0 +1,9 @@ +# FileChunk + + +## Fields + +| Field | Type | Required | Description | +| --------------------------- | --------------------------- | --------------------------- | --------------------------- | +| `file_id` | *str* | :heavy_check_mark: | N/A | +| `type` | *Optional[Literal["file"]]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/ftmodelcard.md b/docs/models/ftmodelcard.md index 1efeadb2..9ecab416 100644 --- a/docs/models/ftmodelcard.md +++ b/docs/models/ftmodelcard.md @@ -19,6 +19,7 @@ Extra fields for fine-tuned models. | `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | | `aliases` | List[*str*] | :heavy_minus_sign: | N/A | | `deprecation` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `deprecation_replacement_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `default_model_temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.FTModelCardType]](../models/ftmodelcardtype.md) | :heavy_minus_sign: | N/A | +| `type` | *Optional[Literal["fine-tuned"]]* | :heavy_minus_sign: | N/A | | `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/ftmodelcardtype.md b/docs/models/ftmodelcardtype.md deleted file mode 100644 index 0b38470b..00000000 --- a/docs/models/ftmodelcardtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# FTModelCardType - - -## Values - -| Name | Value | -| ------------ | ------------ | -| `FINE_TUNED` | fine-tuned | \ No newline at end of file diff --git a/docs/models/inputentries.md b/docs/models/inputentries.md index e1e48279..b44a467d 100644 --- a/docs/models/inputentries.md +++ b/docs/models/inputentries.md @@ -9,9 +9,33 @@ value: models.MessageInputEntry = /* values here */ ``` +### `models.MessageOutputEntry` + +```python +value: models.MessageOutputEntry = /* values here */ +``` + ### `models.FunctionResultEntry` ```python value: models.FunctionResultEntry = /* values here */ ``` +### `models.FunctionCallEntry` + +```python +value: models.FunctionCallEntry = /* values here */ +``` + +### `models.ToolExecutionEntry` + +```python +value: models.ToolExecutionEntry = /* values here */ +``` + +### `models.AgentHandoffEntry` + +```python +value: models.AgentHandoffEntry = /* values here */ +``` + diff --git a/docs/models/messageinputentry.md b/docs/models/messageinputentry.md index a1573ed5..8fa55e47 100644 --- a/docs/models/messageinputentry.md +++ b/docs/models/messageinputentry.md @@ -5,12 +5,12 @@ Representation of an input message inside the conversation. ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `role` | [models.MessageInputEntryRole](../models/messageinputentryrole.md) | :heavy_check_mark: | N/A | -| `content` | [models.MessageInputEntryContent](../models/messageinputentrycontent.md) | :heavy_check_mark: | N/A | -| `object` | [Optional[models.Object]](../models/object.md) | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.MessageInputEntryType]](../models/messageinputentrytype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | +| `role` | [models.MessageInputEntryRole](../models/messageinputentryrole.md) | :heavy_check_mark: | N/A | +| `content` | [models.MessageInputEntryContent](../models/messageinputentrycontent.md) | :heavy_check_mark: | N/A | +| `object` | [Optional[models.Object]](../models/object.md) | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.Type]](../models/type.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/messageinputentrytype.md b/docs/models/messageinputentrytype.md deleted file mode 100644 index d3378124..00000000 --- a/docs/models/messageinputentrytype.md +++ /dev/null @@ -1,8 +0,0 @@ -# MessageInputEntryType - - -## Values - -| Name | Value | -| --------------- | --------------- | -| `MESSAGE_INPUT` | message.input | \ No newline at end of file diff --git a/docs/models/modelcapabilities.md b/docs/models/modelcapabilities.md index 2e399ab6..36b27938 100644 --- a/docs/models/modelcapabilities.md +++ b/docs/models/modelcapabilities.md @@ -9,4 +9,5 @@ | `completion_fim` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `function_calling` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `fine_tuning` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `vision` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `vision` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `classification` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/type.md b/docs/models/type.md index 239a00f5..357acf0b 100644 --- a/docs/models/type.md +++ b/docs/models/type.md @@ -3,6 +3,6 @@ ## Values -| Name | Value | -| ------ | ------ | -| `BASE` | base | \ No newline at end of file +| Name | Value | +| --------------- | --------------- | +| `MESSAGE_INPUT` | message.input | \ No newline at end of file diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md index c7fdb687..96353ddd 100644 --- a/docs/sdks/agents/README.md +++ b/docs/sdks/agents/README.md @@ -55,7 +55,7 @@ with Mistral( | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -116,7 +116,7 @@ with Mistral( | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index 3a8d57fa..d9a85e63 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -57,7 +57,7 @@ with Mistral( | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | @@ -121,7 +121,7 @@ with Mistral( | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | diff --git a/docs/sdks/conversations/README.md b/docs/sdks/conversations/README.md index 6d6aaa2c..7f7f9cb8 100644 --- a/docs/sdks/conversations/README.md +++ b/docs/sdks/conversations/README.md @@ -135,7 +135,7 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching metadata. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -220,7 +220,7 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching entries. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -260,7 +260,7 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching messages. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -300,13 +300,19 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the original conversation which is being restarted. | | `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | | `from_entry_id` | *str* | :heavy_check_mark: | N/A | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationRestartRequestHandoffExecution]](../../models/conversationrestartrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.ConversationRestartRequestTools](../../models/conversationrestartrequesttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name given to the conversation. | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the what the conversation is about. | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | Model which is used as assistant of the conversation. If not provided, will use the original conversation's model. | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Agent which will be used as assistant to the conversation. If not provided, will use the original conversation's agent. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -447,13 +453,19 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the original conversation which is being restarted. | | `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | | `from_entry_id` | *str* | :heavy_check_mark: | N/A | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationRestartStreamRequestHandoffExecution]](../../models/conversationrestartstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.ConversationRestartStreamRequestTools](../../models/conversationrestartstreamrequesttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name given to the conversation. | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the what the conversation is about. | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | Model which is used as assistant of the conversation. If not provided, will use the original conversation's model. | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Agent which will be used as assistant to the conversation. If not provided, will use the original conversation's agent. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index d7a5ed85..7dd5d1de 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -55,7 +55,7 @@ with Mistral( ## retrieve -Retrieve a model information. +Retrieve information about a model. ### Example Usage diff --git a/pyproject.toml b/pyproject.toml index c7cb9095..975b1435 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.8.2" +version = "1.8.3" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" },] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index fc416fd3..3a6da97c 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.8.2" +__version__: str = "1.8.3" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.548.6" -__user_agent__: str = "speakeasy-sdk/python 1.8.2 2.548.6 1.0.0 mistralai" +__user_agent__: str = "speakeasy-sdk/python 1.8.3 2.548.6 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/agents.py b/src/mistralai/agents.py index 4fbb25dd..0e7c7ae3 100644 --- a/src/mistralai/agents.py +++ b/src/mistralai/agents.py @@ -69,7 +69,7 @@ def complete( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: - :param prompt_mode: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -238,7 +238,7 @@ async def complete_async( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: - :param prompt_mode: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -409,7 +409,7 @@ def stream( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: - :param prompt_mode: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -586,7 +586,7 @@ async def stream_async( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: - :param prompt_mode: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py index 96fcf65d..41d4a9f2 100644 --- a/src/mistralai/chat.py +++ b/src/mistralai/chat.py @@ -148,7 +148,7 @@ def complete( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: - :param prompt_mode: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -316,7 +316,7 @@ async def complete_async( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: - :param prompt_mode: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -494,7 +494,7 @@ def stream( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: - :param prompt_mode: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -680,7 +680,7 @@ async def stream_async( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: - :param prompt_mode: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method diff --git a/src/mistralai/conversations.py b/src/mistralai/conversations.py index 6ef02edd..3fe19c0e 100644 --- a/src/mistralai/conversations.py +++ b/src/mistralai/conversations.py @@ -708,7 +708,7 @@ def get( Given a conversation_id retrieve a conversation entity with its attributes. - :param conversation_id: + :param conversation_id: ID of the conversation from which we are fetching metadata. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -810,7 +810,7 @@ async def get_async( Given a conversation_id retrieve a conversation entity with its attributes. - :param conversation_id: + :param conversation_id: ID of the conversation from which we are fetching metadata. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -1170,7 +1170,7 @@ def get_history( Given a conversation_id retrieve all the entries belonging to that conversation. The entries are sorted in the order they were appended, those can be messages, connectors or function_call. - :param conversation_id: + :param conversation_id: ID of the conversation from which we are fetching entries. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -1269,7 +1269,7 @@ async def get_history_async( Given a conversation_id retrieve all the entries belonging to that conversation. The entries are sorted in the order they were appended, those can be messages, connectors or function_call. - :param conversation_id: + :param conversation_id: ID of the conversation from which we are fetching entries. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -1368,7 +1368,7 @@ def get_messages( Given a conversation_id retrieve all the messages belonging to that conversation. This is similar to retrieving all entries except we filter the messages only. - :param conversation_id: + :param conversation_id: ID of the conversation from which we are fetching messages. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -1467,7 +1467,7 @@ async def get_messages_async( Given a conversation_id retrieve all the messages belonging to that conversation. This is similar to retrieving all entries except we filter the messages only. - :param conversation_id: + :param conversation_id: ID of the conversation from which we are fetching messages. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -1564,9 +1564,20 @@ def restart( handoff_execution: Optional[ models.ConversationRestartRequestHandoffExecution ] = "server", + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models.ConversationRestartRequestTools], + List[models.ConversationRestartRequestToolsTypedDict], + ] + ] = None, completion_args: Optional[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + model: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -1576,13 +1587,19 @@ def restart( Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. - :param conversation_id: + :param conversation_id: ID of the original conversation which is being restarted. :param inputs: :param from_entry_id: :param stream: :param store: Whether to store the results into our servers or not. :param handoff_execution: + :param instructions: Instruction prompt the model will follow during the conversation. + :param tools: List of tools which are available to the model during the conversation. :param completion_args: White-listed arguments from the completion API + :param name: Name given to the conversation. + :param description: Description of the what the conversation is about. + :param model: Model which is used as assistant of the conversation. If not provided, will use the original conversation's model. + :param agent_id: Agent which will be used as assistant to the conversation. If not provided, will use the original conversation's agent. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -1605,10 +1622,18 @@ def restart( stream=stream, store=store, handoff_execution=handoff_execution, - from_entry_id=from_entry_id, + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.ConversationRestartRequestTools]] + ), completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + name=name, + description=description, + from_entry_id=from_entry_id, + model=model, + agent_id=agent_id, ), ) @@ -1696,9 +1721,20 @@ async def restart_async( handoff_execution: Optional[ models.ConversationRestartRequestHandoffExecution ] = "server", + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models.ConversationRestartRequestTools], + List[models.ConversationRestartRequestToolsTypedDict], + ] + ] = None, completion_args: Optional[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + model: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -1708,13 +1744,19 @@ async def restart_async( Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. - :param conversation_id: + :param conversation_id: ID of the original conversation which is being restarted. :param inputs: :param from_entry_id: :param stream: :param store: Whether to store the results into our servers or not. :param handoff_execution: + :param instructions: Instruction prompt the model will follow during the conversation. + :param tools: List of tools which are available to the model during the conversation. :param completion_args: White-listed arguments from the completion API + :param name: Name given to the conversation. + :param description: Description of the what the conversation is about. + :param model: Model which is used as assistant of the conversation. If not provided, will use the original conversation's model. + :param agent_id: Agent which will be used as assistant to the conversation. If not provided, will use the original conversation's agent. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -1737,10 +1779,18 @@ async def restart_async( stream=stream, store=store, handoff_execution=handoff_execution, - from_entry_id=from_entry_id, + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.ConversationRestartRequestTools]] + ), completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + name=name, + description=description, + from_entry_id=from_entry_id, + model=model, + agent_id=agent_id, ), ) @@ -2396,9 +2446,20 @@ def restart_stream( handoff_execution: Optional[ models.ConversationRestartStreamRequestHandoffExecution ] = "server", + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models.ConversationRestartStreamRequestTools], + List[models.ConversationRestartStreamRequestToolsTypedDict], + ] + ] = None, completion_args: Optional[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + model: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -2408,13 +2469,19 @@ def restart_stream( Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. - :param conversation_id: + :param conversation_id: ID of the original conversation which is being restarted. :param inputs: :param from_entry_id: :param stream: :param store: Whether to store the results into our servers or not. :param handoff_execution: + :param instructions: Instruction prompt the model will follow during the conversation. + :param tools: List of tools which are available to the model during the conversation. :param completion_args: White-listed arguments from the completion API + :param name: Name given to the conversation. + :param description: Description of the what the conversation is about. + :param model: Model which is used as assistant of the conversation. If not provided, will use the original conversation's model. + :param agent_id: Agent which will be used as assistant to the conversation. If not provided, will use the original conversation's agent. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -2437,10 +2504,18 @@ def restart_stream( stream=stream, store=store, handoff_execution=handoff_execution, - from_entry_id=from_entry_id, + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.ConversationRestartStreamRequestTools]] + ), completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + name=name, + description=description, + from_entry_id=from_entry_id, + model=model, + agent_id=agent_id, ), ) @@ -2533,9 +2608,20 @@ async def restart_stream_async( handoff_execution: Optional[ models.ConversationRestartStreamRequestHandoffExecution ] = "server", + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models.ConversationRestartStreamRequestTools], + List[models.ConversationRestartStreamRequestToolsTypedDict], + ] + ] = None, completion_args: Optional[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + model: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -2545,13 +2631,19 @@ async def restart_stream_async( Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. - :param conversation_id: + :param conversation_id: ID of the original conversation which is being restarted. :param inputs: :param from_entry_id: :param stream: :param store: Whether to store the results into our servers or not. :param handoff_execution: + :param instructions: Instruction prompt the model will follow during the conversation. + :param tools: List of tools which are available to the model during the conversation. :param completion_args: White-listed arguments from the completion API + :param name: Name given to the conversation. + :param description: Description of the what the conversation is about. + :param model: Model which is used as assistant of the conversation. If not provided, will use the original conversation's model. + :param agent_id: Agent which will be used as assistant to the conversation. If not provided, will use the original conversation's agent. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -2574,10 +2666,18 @@ async def restart_stream_async( stream=stream, store=store, handoff_execution=handoff_execution, - from_entry_id=from_entry_id, + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.ConversationRestartStreamRequestTools]] + ), completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + name=name, + description=description, + from_entry_id=from_entry_id, + model=model, + agent_id=agent_id, ), ) diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index e6493e90..b727ab7c 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -119,7 +119,7 @@ AssistantMessageRole, AssistantMessageTypedDict, ) -from .basemodelcard import BaseModelCard, BaseModelCardTypedDict, Type +from .basemodelcard import BaseModelCard, BaseModelCardTypedDict from .batcherror import BatchError, BatchErrorTypedDict from .batchjobin import BatchJobIn, BatchJobInTypedDict from .batchjobout import BatchJobOut, BatchJobOutObject, BatchJobOutTypedDict @@ -315,11 +315,15 @@ from .conversationrestartrequest import ( ConversationRestartRequest, ConversationRestartRequestHandoffExecution, + ConversationRestartRequestTools, + ConversationRestartRequestToolsTypedDict, ConversationRestartRequestTypedDict, ) from .conversationrestartstreamrequest import ( ConversationRestartStreamRequest, ConversationRestartStreamRequestHandoffExecution, + ConversationRestartStreamRequestTools, + ConversationRestartStreamRequestToolsTypedDict, ConversationRestartStreamRequestTypedDict, ) from .conversationstreamrequest import ( @@ -357,6 +361,7 @@ from .embeddingresponse import EmbeddingResponse, EmbeddingResponseTypedDict from .embeddingresponsedata import EmbeddingResponseData, EmbeddingResponseDataTypedDict from .eventout import EventOut, EventOutTypedDict +from .filechunk import FileChunk, FileChunkTypedDict from .filepurpose import FilePurpose from .files_api_routes_delete_fileop import ( FilesAPIRoutesDeleteFileRequest, @@ -405,7 +410,7 @@ FTModelCapabilitiesOut, FTModelCapabilitiesOutTypedDict, ) -from .ftmodelcard import FTModelCard, FTModelCardType, FTModelCardTypedDict +from .ftmodelcard import FTModelCard, FTModelCardTypedDict from .function import Function, FunctionTypedDict from .functioncall import ( Arguments, @@ -565,9 +570,9 @@ MessageInputEntryContent, MessageInputEntryContentTypedDict, MessageInputEntryRole, - MessageInputEntryType, MessageInputEntryTypedDict, Object, + Type, ) from .messageoutputcontentchunks import ( MessageOutputContentChunks, @@ -937,9 +942,13 @@ "ConversationResponseTypedDict", "ConversationRestartRequest", "ConversationRestartRequestHandoffExecution", + "ConversationRestartRequestTools", + "ConversationRestartRequestToolsTypedDict", "ConversationRestartRequestTypedDict", "ConversationRestartStreamRequest", "ConversationRestartStreamRequestHandoffExecution", + "ConversationRestartStreamRequestTools", + "ConversationRestartStreamRequestToolsTypedDict", "ConversationRestartStreamRequestTypedDict", "ConversationStreamRequest", "ConversationStreamRequestHandoffExecution", @@ -993,9 +1002,10 @@ "FTModelCapabilitiesOut", "FTModelCapabilitiesOutTypedDict", "FTModelCard", - "FTModelCardType", "FTModelCardTypedDict", "File", + "FileChunk", + "FileChunkTypedDict", "FilePurpose", "FileSchema", "FileSchemaTypedDict", @@ -1134,7 +1144,6 @@ "MessageInputEntryContent", "MessageInputEntryContentTypedDict", "MessageInputEntryRole", - "MessageInputEntryType", "MessageInputEntryTypedDict", "MessageOutputContentChunks", "MessageOutputContentChunksTypedDict", diff --git a/src/mistralai/models/agents_api_v1_conversations_getop.py b/src/mistralai/models/agents_api_v1_conversations_getop.py index 4a800ad6..a37a61ba 100644 --- a/src/mistralai/models/agents_api_v1_conversations_getop.py +++ b/src/mistralai/models/agents_api_v1_conversations_getop.py @@ -11,12 +11,14 @@ class AgentsAPIV1ConversationsGetRequestTypedDict(TypedDict): conversation_id: str + r"""ID of the conversation from which we are fetching metadata.""" class AgentsAPIV1ConversationsGetRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] + r"""ID of the conversation from which we are fetching metadata.""" AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict = TypeAliasType( diff --git a/src/mistralai/models/agents_api_v1_conversations_historyop.py b/src/mistralai/models/agents_api_v1_conversations_historyop.py index 09fb6081..b8c33d1b 100644 --- a/src/mistralai/models/agents_api_v1_conversations_historyop.py +++ b/src/mistralai/models/agents_api_v1_conversations_historyop.py @@ -8,9 +8,11 @@ class AgentsAPIV1ConversationsHistoryRequestTypedDict(TypedDict): conversation_id: str + r"""ID of the conversation from which we are fetching entries.""" class AgentsAPIV1ConversationsHistoryRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] + r"""ID of the conversation from which we are fetching entries.""" diff --git a/src/mistralai/models/agents_api_v1_conversations_messagesop.py b/src/mistralai/models/agents_api_v1_conversations_messagesop.py index ade66e5e..f0dac8bf 100644 --- a/src/mistralai/models/agents_api_v1_conversations_messagesop.py +++ b/src/mistralai/models/agents_api_v1_conversations_messagesop.py @@ -8,9 +8,11 @@ class AgentsAPIV1ConversationsMessagesRequestTypedDict(TypedDict): conversation_id: str + r"""ID of the conversation from which we are fetching messages.""" class AgentsAPIV1ConversationsMessagesRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] + r"""ID of the conversation from which we are fetching messages.""" diff --git a/src/mistralai/models/agents_api_v1_conversations_restart_streamop.py b/src/mistralai/models/agents_api_v1_conversations_restart_streamop.py index c8fd8475..f39b74eb 100644 --- a/src/mistralai/models/agents_api_v1_conversations_restart_streamop.py +++ b/src/mistralai/models/agents_api_v1_conversations_restart_streamop.py @@ -12,6 +12,7 @@ class AgentsAPIV1ConversationsRestartStreamRequestTypedDict(TypedDict): conversation_id: str + r"""ID of the original conversation which is being restarted.""" conversation_restart_stream_request: ConversationRestartStreamRequestTypedDict @@ -19,6 +20,7 @@ class AgentsAPIV1ConversationsRestartStreamRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] + r"""ID of the original conversation which is being restarted.""" conversation_restart_stream_request: Annotated[ ConversationRestartStreamRequest, diff --git a/src/mistralai/models/agents_api_v1_conversations_restartop.py b/src/mistralai/models/agents_api_v1_conversations_restartop.py index aa867aff..f706c066 100644 --- a/src/mistralai/models/agents_api_v1_conversations_restartop.py +++ b/src/mistralai/models/agents_api_v1_conversations_restartop.py @@ -12,6 +12,7 @@ class AgentsAPIV1ConversationsRestartRequestTypedDict(TypedDict): conversation_id: str + r"""ID of the original conversation which is being restarted.""" conversation_restart_request: ConversationRestartRequestTypedDict @@ -19,6 +20,7 @@ class AgentsAPIV1ConversationsRestartRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] + r"""ID of the original conversation which is being restarted.""" conversation_restart_request: Annotated[ ConversationRestartRequest, diff --git a/src/mistralai/models/agentscompletionrequest.py b/src/mistralai/models/agentscompletionrequest.py index e99dcfc2..99b33ca4 100644 --- a/src/mistralai/models/agentscompletionrequest.py +++ b/src/mistralai/models/agentscompletionrequest.py @@ -89,6 +89,7 @@ class AgentsCompletionRequestTypedDict(TypedDict): prediction: NotRequired[PredictionTypedDict] parallel_tool_calls: NotRequired[bool] prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" class AgentsCompletionRequest(BaseModel): @@ -132,6 +133,7 @@ class AgentsCompletionRequest(BaseModel): prompt_mode: Annotated[ OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) ] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/src/mistralai/models/agentscompletionstreamrequest.py b/src/mistralai/models/agentscompletionstreamrequest.py index b4b423f5..8a8cc81c 100644 --- a/src/mistralai/models/agentscompletionstreamrequest.py +++ b/src/mistralai/models/agentscompletionstreamrequest.py @@ -88,6 +88,7 @@ class AgentsCompletionStreamRequestTypedDict(TypedDict): prediction: NotRequired[PredictionTypedDict] parallel_tool_calls: NotRequired[bool] prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" class AgentsCompletionStreamRequest(BaseModel): @@ -130,6 +131,7 @@ class AgentsCompletionStreamRequest(BaseModel): prompt_mode: Annotated[ OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) ] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/src/mistralai/models/basemodelcard.py b/src/mistralai/models/basemodelcard.py index edb81741..ff4d2838 100644 --- a/src/mistralai/models/basemodelcard.py +++ b/src/mistralai/models/basemodelcard.py @@ -12,9 +12,6 @@ from typing_extensions import Annotated, NotRequired, TypedDict -Type = Literal["base"] - - class BaseModelCardTypedDict(TypedDict): id: str capabilities: ModelCapabilitiesTypedDict @@ -26,8 +23,9 @@ class BaseModelCardTypedDict(TypedDict): max_context_length: NotRequired[int] aliases: NotRequired[List[str]] deprecation: NotRequired[Nullable[datetime]] + deprecation_replacement_model: NotRequired[Nullable[str]] default_model_temperature: NotRequired[Nullable[float]] - type: Type + type: Literal["base"] class BaseModelCard(BaseModel): @@ -51,10 +49,12 @@ class BaseModelCard(BaseModel): deprecation: OptionalNullable[datetime] = UNSET + deprecation_replacement_model: OptionalNullable[str] = UNSET + default_model_temperature: OptionalNullable[float] = UNSET TYPE: Annotated[ - Annotated[Optional[Type], AfterValidator(validate_const("base"))], + Annotated[Optional[Literal["base"]], AfterValidator(validate_const("base"))], pydantic.Field(alias="type"), ] = "base" @@ -69,6 +69,7 @@ def serialize_model(self, handler): "max_context_length", "aliases", "deprecation", + "deprecation_replacement_model", "default_model_temperature", "type", ] @@ -76,6 +77,7 @@ def serialize_model(self, handler): "name", "description", "deprecation", + "deprecation_replacement_model", "default_model_temperature", ] null_default_fields = [] diff --git a/src/mistralai/models/chatcompletionrequest.py b/src/mistralai/models/chatcompletionrequest.py index 004cc011..286bd988 100644 --- a/src/mistralai/models/chatcompletionrequest.py +++ b/src/mistralai/models/chatcompletionrequest.py @@ -89,6 +89,7 @@ class ChatCompletionRequestTypedDict(TypedDict): prediction: NotRequired[PredictionTypedDict] parallel_tool_calls: NotRequired[bool] prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -140,6 +141,7 @@ class ChatCompletionRequest(BaseModel): prompt_mode: Annotated[ OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) ] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" safe_prompt: Optional[bool] = None r"""Whether to inject a safety prompt before all conversations.""" diff --git a/src/mistralai/models/chatcompletionstreamrequest.py b/src/mistralai/models/chatcompletionstreamrequest.py index 78a85bef..6516e4bf 100644 --- a/src/mistralai/models/chatcompletionstreamrequest.py +++ b/src/mistralai/models/chatcompletionstreamrequest.py @@ -92,6 +92,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): prediction: NotRequired[PredictionTypedDict] parallel_tool_calls: NotRequired[bool] prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -142,6 +143,7 @@ class ChatCompletionStreamRequest(BaseModel): prompt_mode: Annotated[ OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) ] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" safe_prompt: Optional[bool] = None r"""Whether to inject a safety prompt before all conversations.""" diff --git a/src/mistralai/models/contentchunk.py b/src/mistralai/models/contentchunk.py index ff7d9fcf..4cb8ab6d 100644 --- a/src/mistralai/models/contentchunk.py +++ b/src/mistralai/models/contentchunk.py @@ -2,6 +2,7 @@ from __future__ import annotations from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .filechunk import FileChunk, FileChunkTypedDict from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict @@ -17,6 +18,7 @@ TextChunkTypedDict, ImageURLChunkTypedDict, ReferenceChunkTypedDict, + FileChunkTypedDict, DocumentURLChunkTypedDict, ], ) @@ -28,6 +30,7 @@ Annotated[DocumentURLChunk, Tag("document_url")], Annotated[TextChunk, Tag("text")], Annotated[ReferenceChunk, Tag("reference")], + Annotated[FileChunk, Tag("file")], ], Discriminator(lambda m: get_discriminator(m, "type", "type")), ] diff --git a/src/mistralai/models/conversationrestartrequest.py b/src/mistralai/models/conversationrestartrequest.py index 58376140..be9fdb90 100644 --- a/src/mistralai/models/conversationrestartrequest.py +++ b/src/mistralai/models/conversationrestartrequest.py @@ -1,15 +1,48 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict from .completionargs import CompletionArgs, CompletionArgsTypedDict from .conversationinputs import ConversationInputs, ConversationInputsTypedDict -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict ConversationRestartRequestHandoffExecution = Literal["client", "server"] +ConversationRestartRequestToolsTypedDict = TypeAliasType( + "ConversationRestartRequestToolsTypedDict", + Union[ + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + FunctionToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +ConversationRestartRequestTools = Annotated[ + Union[ + Annotated[CodeInterpreterTool, Tag("code_interpreter")], + Annotated[DocumentLibraryTool, Tag("document_library")], + Annotated[FunctionTool, Tag("function")], + Annotated[ImageGenerationTool, Tag("image_generation")], + Annotated[WebSearchTool, Tag("web_search")], + Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + class ConversationRestartRequestTypedDict(TypedDict): r"""Request to restart a new conversation from a given entry in the conversation.""" @@ -20,8 +53,20 @@ class ConversationRestartRequestTypedDict(TypedDict): store: NotRequired[bool] r"""Whether to store the results into our servers or not.""" handoff_execution: NotRequired[ConversationRestartRequestHandoffExecution] + instructions: NotRequired[Nullable[str]] + r"""Instruction prompt the model will follow during the conversation.""" + tools: NotRequired[List[ConversationRestartRequestToolsTypedDict]] + r"""List of tools which are available to the model during the conversation.""" completion_args: NotRequired[CompletionArgsTypedDict] r"""White-listed arguments from the completion API""" + name: NotRequired[Nullable[str]] + r"""Name given to the conversation.""" + description: NotRequired[Nullable[str]] + r"""Description of the what the conversation is about.""" + model: NotRequired[Nullable[str]] + r"""Model which is used as assistant of the conversation. If not provided, will use the original conversation's model.""" + agent_id: NotRequired[Nullable[str]] + r"""Agent which will be used as assistant to the conversation. If not provided, will use the original conversation's agent.""" class ConversationRestartRequest(BaseModel): @@ -38,5 +83,64 @@ class ConversationRestartRequest(BaseModel): handoff_execution: Optional[ConversationRestartRequestHandoffExecution] = "server" + instructions: OptionalNullable[str] = UNSET + r"""Instruction prompt the model will follow during the conversation.""" + + tools: Optional[List[ConversationRestartRequestTools]] = None + r"""List of tools which are available to the model during the conversation.""" + completion_args: Optional[CompletionArgs] = None r"""White-listed arguments from the completion API""" + + name: OptionalNullable[str] = UNSET + r"""Name given to the conversation.""" + + description: OptionalNullable[str] = UNSET + r"""Description of the what the conversation is about.""" + + model: OptionalNullable[str] = UNSET + r"""Model which is used as assistant of the conversation. If not provided, will use the original conversation's model.""" + + agent_id: OptionalNullable[str] = UNSET + r"""Agent which will be used as assistant to the conversation. If not provided, will use the original conversation's agent.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "stream", + "store", + "handoff_execution", + "instructions", + "tools", + "completion_args", + "name", + "description", + "model", + "agent_id", + ] + nullable_fields = ["instructions", "name", "description", "model", "agent_id"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/conversationrestartstreamrequest.py b/src/mistralai/models/conversationrestartstreamrequest.py index f213aea3..a42618f7 100644 --- a/src/mistralai/models/conversationrestartstreamrequest.py +++ b/src/mistralai/models/conversationrestartstreamrequest.py @@ -1,15 +1,48 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict from .completionargs import CompletionArgs, CompletionArgsTypedDict from .conversationinputs import ConversationInputs, ConversationInputsTypedDict -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict ConversationRestartStreamRequestHandoffExecution = Literal["client", "server"] +ConversationRestartStreamRequestToolsTypedDict = TypeAliasType( + "ConversationRestartStreamRequestToolsTypedDict", + Union[ + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + FunctionToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +ConversationRestartStreamRequestTools = Annotated[ + Union[ + Annotated[CodeInterpreterTool, Tag("code_interpreter")], + Annotated[DocumentLibraryTool, Tag("document_library")], + Annotated[FunctionTool, Tag("function")], + Annotated[ImageGenerationTool, Tag("image_generation")], + Annotated[WebSearchTool, Tag("web_search")], + Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + class ConversationRestartStreamRequestTypedDict(TypedDict): r"""Request to restart a new conversation from a given entry in the conversation.""" @@ -20,8 +53,20 @@ class ConversationRestartStreamRequestTypedDict(TypedDict): store: NotRequired[bool] r"""Whether to store the results into our servers or not.""" handoff_execution: NotRequired[ConversationRestartStreamRequestHandoffExecution] + instructions: NotRequired[Nullable[str]] + r"""Instruction prompt the model will follow during the conversation.""" + tools: NotRequired[List[ConversationRestartStreamRequestToolsTypedDict]] + r"""List of tools which are available to the model during the conversation.""" completion_args: NotRequired[CompletionArgsTypedDict] r"""White-listed arguments from the completion API""" + name: NotRequired[Nullable[str]] + r"""Name given to the conversation.""" + description: NotRequired[Nullable[str]] + r"""Description of the what the conversation is about.""" + model: NotRequired[Nullable[str]] + r"""Model which is used as assistant of the conversation. If not provided, will use the original conversation's model.""" + agent_id: NotRequired[Nullable[str]] + r"""Agent which will be used as assistant to the conversation. If not provided, will use the original conversation's agent.""" class ConversationRestartStreamRequest(BaseModel): @@ -40,5 +85,64 @@ class ConversationRestartStreamRequest(BaseModel): "server" ) + instructions: OptionalNullable[str] = UNSET + r"""Instruction prompt the model will follow during the conversation.""" + + tools: Optional[List[ConversationRestartStreamRequestTools]] = None + r"""List of tools which are available to the model during the conversation.""" + completion_args: Optional[CompletionArgs] = None r"""White-listed arguments from the completion API""" + + name: OptionalNullable[str] = UNSET + r"""Name given to the conversation.""" + + description: OptionalNullable[str] = UNSET + r"""Description of the what the conversation is about.""" + + model: OptionalNullable[str] = UNSET + r"""Model which is used as assistant of the conversation. If not provided, will use the original conversation's model.""" + + agent_id: OptionalNullable[str] = UNSET + r"""Agent which will be used as assistant to the conversation. If not provided, will use the original conversation's agent.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "stream", + "store", + "handoff_execution", + "instructions", + "tools", + "completion_args", + "name", + "description", + "model", + "agent_id", + ] + nullable_fields = ["instructions", "name", "description", "model", "agent_id"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/filechunk.py b/src/mistralai/models/filechunk.py new file mode 100644 index 00000000..83e60cef --- /dev/null +++ b/src/mistralai/models/filechunk.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class FileChunkTypedDict(TypedDict): + file_id: str + type: Literal["file"] + + +class FileChunk(BaseModel): + file_id: str + + TYPE: Annotated[ + Annotated[Optional[Literal["file"]], AfterValidator(validate_const("file"))], + pydantic.Field(alias="type"), + ] = "file" diff --git a/src/mistralai/models/ftmodelcard.py b/src/mistralai/models/ftmodelcard.py index 9a640a28..73cc418c 100644 --- a/src/mistralai/models/ftmodelcard.py +++ b/src/mistralai/models/ftmodelcard.py @@ -12,9 +12,6 @@ from typing_extensions import Annotated, NotRequired, TypedDict -FTModelCardType = Literal["fine-tuned"] - - class FTModelCardTypedDict(TypedDict): r"""Extra fields for fine-tuned models.""" @@ -30,8 +27,9 @@ class FTModelCardTypedDict(TypedDict): max_context_length: NotRequired[int] aliases: NotRequired[List[str]] deprecation: NotRequired[Nullable[datetime]] + deprecation_replacement_model: NotRequired[Nullable[str]] default_model_temperature: NotRequired[Nullable[float]] - type: FTModelCardType + type: Literal["fine-tuned"] archived: NotRequired[bool] @@ -62,11 +60,14 @@ class FTModelCard(BaseModel): deprecation: OptionalNullable[datetime] = UNSET + deprecation_replacement_model: OptionalNullable[str] = UNSET + default_model_temperature: OptionalNullable[float] = UNSET TYPE: Annotated[ Annotated[ - Optional[FTModelCardType], AfterValidator(validate_const("fine-tuned")) + Optional[Literal["fine-tuned"]], + AfterValidator(validate_const("fine-tuned")), ], pydantic.Field(alias="type"), ] = "fine-tuned" @@ -84,6 +85,7 @@ def serialize_model(self, handler): "max_context_length", "aliases", "deprecation", + "deprecation_replacement_model", "default_model_temperature", "type", "archived", @@ -92,6 +94,7 @@ def serialize_model(self, handler): "name", "description", "deprecation", + "deprecation_replacement_model", "default_model_temperature", ] null_default_fields = [] diff --git a/src/mistralai/models/inputentries.py b/src/mistralai/models/inputentries.py index 9c0fea6e..0221f968 100644 --- a/src/mistralai/models/inputentries.py +++ b/src/mistralai/models/inputentries.py @@ -1,18 +1,37 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict +from .functioncallentry import FunctionCallEntry, FunctionCallEntryTypedDict from .functionresultentry import FunctionResultEntry, FunctionResultEntryTypedDict from .messageinputentry import MessageInputEntry, MessageInputEntryTypedDict +from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict +from .toolexecutionentry import ToolExecutionEntry, ToolExecutionEntryTypedDict from typing import Union from typing_extensions import TypeAliasType InputEntriesTypedDict = TypeAliasType( "InputEntriesTypedDict", - Union[MessageInputEntryTypedDict, FunctionResultEntryTypedDict], + Union[ + MessageInputEntryTypedDict, + FunctionResultEntryTypedDict, + ToolExecutionEntryTypedDict, + FunctionCallEntryTypedDict, + MessageOutputEntryTypedDict, + AgentHandoffEntryTypedDict, + ], ) InputEntries = TypeAliasType( - "InputEntries", Union[MessageInputEntry, FunctionResultEntry] + "InputEntries", + Union[ + MessageInputEntry, + FunctionResultEntry, + ToolExecutionEntry, + FunctionCallEntry, + MessageOutputEntry, + AgentHandoffEntry, + ], ) diff --git a/src/mistralai/models/messageinputentry.py b/src/mistralai/models/messageinputentry.py index 3d642cdf..486fe733 100644 --- a/src/mistralai/models/messageinputentry.py +++ b/src/mistralai/models/messageinputentry.py @@ -14,7 +14,7 @@ Object = Literal["entry"] -MessageInputEntryType = Literal["message.input"] +Type = Literal["message.input"] MessageInputEntryRole = Literal["assistant", "user"] @@ -35,7 +35,7 @@ class MessageInputEntryTypedDict(TypedDict): role: MessageInputEntryRole content: MessageInputEntryContentTypedDict object: NotRequired[Object] - type: NotRequired[MessageInputEntryType] + type: NotRequired[Type] created_at: NotRequired[datetime] completed_at: NotRequired[Nullable[datetime]] id: NotRequired[str] @@ -50,7 +50,7 @@ class MessageInputEntry(BaseModel): object: Optional[Object] = "entry" - type: Optional[MessageInputEntryType] = "message.input" + type: Optional[Type] = "message.input" created_at: Optional[datetime] = None diff --git a/src/mistralai/models/modelcapabilities.py b/src/mistralai/models/modelcapabilities.py index 961f8664..54c5f2a2 100644 --- a/src/mistralai/models/modelcapabilities.py +++ b/src/mistralai/models/modelcapabilities.py @@ -12,6 +12,7 @@ class ModelCapabilitiesTypedDict(TypedDict): function_calling: NotRequired[bool] fine_tuning: NotRequired[bool] vision: NotRequired[bool] + classification: NotRequired[bool] class ModelCapabilities(BaseModel): @@ -24,3 +25,5 @@ class ModelCapabilities(BaseModel): fine_tuning: Optional[bool] = False vision: Optional[bool] = False + + classification: Optional[bool] = False diff --git a/src/mistralai/models/ocrrequest.py b/src/mistralai/models/ocrrequest.py index 4f9dfd47..40e67a1f 100644 --- a/src/mistralai/models/ocrrequest.py +++ b/src/mistralai/models/ocrrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .filechunk import FileChunk, FileChunkTypedDict from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL @@ -11,12 +12,13 @@ DocumentTypedDict = TypeAliasType( - "DocumentTypedDict", Union[ImageURLChunkTypedDict, DocumentURLChunkTypedDict] + "DocumentTypedDict", + Union[FileChunkTypedDict, ImageURLChunkTypedDict, DocumentURLChunkTypedDict], ) r"""Document to run OCR on""" -Document = TypeAliasType("Document", Union[ImageURLChunk, DocumentURLChunk]) +Document = TypeAliasType("Document", Union[FileChunk, ImageURLChunk, DocumentURLChunk]) r"""Document to run OCR on""" diff --git a/src/mistralai/models_.py b/src/mistralai/models_.py index 96aab468..9790cb30 100644 --- a/src/mistralai/models_.py +++ b/src/mistralai/models_.py @@ -206,7 +206,7 @@ def retrieve( ) -> models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet: r"""Retrieve Model - Retrieve a model information. + Retrieve information about a model. :param model_id: The ID of the model to retrieve. :param retries: Override the default retry configuration for this method @@ -308,7 +308,7 @@ async def retrieve_async( ) -> models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet: r"""Retrieve Model - Retrieve a model information. + Retrieve information about a model. :param model_id: The ID of the model to retrieve. :param retries: Override the default retry configuration for this method