diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 6cf27f7..73686ad 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,18 +1,19 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 82cf79b2dee6811d91e2912113c21d3a + docChecksum: c33c788946fa446bfcf90b60f68abde9 docVersion: 1.0.0 speakeasyVersion: 1.568.2 generationVersion: 2.634.2 - releaseVersion: 1.9.2 - configChecksum: 1ca921f44508650d65ccf46783910ff3 + releaseVersion: 1.9.3 + configChecksum: 0f65a9bdd8df5ae03eaaaea3ab055bf1 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true features: python: additionalDependencies: 1.0.0 + additionalProperties: 1.0.1 constsAndDefaults: 1.0.5 core: 5.19.3 customCodeRegions: 0.1.1 @@ -89,7 +90,12 @@ generatedFiles: - docs/models/assistantmessage.md - docs/models/assistantmessagecontent.md - docs/models/assistantmessagerole.md + - docs/models/audiochunk.md + - docs/models/audiochunktype.md + - docs/models/audiotranscriptionrequest.md + - docs/models/audiotranscriptionrequeststream.md - docs/models/basemodelcard.md + - docs/models/basemodelcardtype.md - docs/models/batcherror.md - docs/models/batchjobin.md - docs/models/batchjobout.md @@ -320,6 +326,7 @@ generatedFiles: - docs/models/messageoutputeventrole.md - docs/models/messageoutputeventtype.md - docs/models/messages.md + - docs/models/metadata.md - docs/models/metricout.md - docs/models/mistralpromptmode.md - docs/models/modelcapabilities.md @@ -375,6 +382,10 @@ generatedFiles: - docs/models/systemmessagecontent.md - docs/models/textchunk.md - docs/models/textchunktype.md + - docs/models/thinkchunk.md + - docs/models/thinkchunktype.md + - docs/models/thinking.md + - docs/models/timestampgranularity.md - docs/models/tool.md - docs/models/toolcall.md - docs/models/toolchoice.md @@ -398,6 +409,19 @@ generatedFiles: - docs/models/tools.md - docs/models/tooltypes.md - docs/models/trainingfile.md + - docs/models/transcriptionresponse.md + - docs/models/transcriptionsegmentchunk.md + - docs/models/transcriptionstreamdone.md + - docs/models/transcriptionstreamdonetype.md + - docs/models/transcriptionstreamevents.md + - docs/models/transcriptionstreameventsdata.md + - docs/models/transcriptionstreameventtypes.md + - docs/models/transcriptionstreamlanguage.md + - docs/models/transcriptionstreamlanguagetype.md + - docs/models/transcriptionstreamsegmentdelta.md + - docs/models/transcriptionstreamsegmentdeltatype.md + - docs/models/transcriptionstreamtextdelta.md + - docs/models/transcriptionstreamtextdeltatype.md - docs/models/two.md - docs/models/type.md - docs/models/unarchiveftmodelout.md @@ -420,6 +444,7 @@ generatedFiles: - docs/models/websearchtooltype.md - docs/sdks/accesses/README.md - docs/sdks/agents/README.md + - docs/sdks/audio/README.md - docs/sdks/batch/README.md - docs/sdks/beta/README.md - docs/sdks/chat/README.md @@ -437,6 +462,7 @@ generatedFiles: - docs/sdks/mistraljobs/README.md - docs/sdks/models/README.md - docs/sdks/ocr/README.md + - docs/sdks/transcriptions/README.md - poetry.toml - py.typed - scripts/prepare_readme.py @@ -448,6 +474,7 @@ generatedFiles: - src/mistralai/_version.py - src/mistralai/accesses.py - src/mistralai/agents.py + - src/mistralai/audio.py - src/mistralai/basesdk.py - src/mistralai/batch.py - src/mistralai/beta.py @@ -489,6 +516,9 @@ generatedFiles: - src/mistralai/models/apiendpoint.py - src/mistralai/models/archiveftmodelout.py - src/mistralai/models/assistantmessage.py + - src/mistralai/models/audiochunk.py + - src/mistralai/models/audiotranscriptionrequest.py + - src/mistralai/models/audiotranscriptionrequeststream.py - src/mistralai/models/basemodelcard.py - src/mistralai/models/batcherror.py - src/mistralai/models/batchjobin.py @@ -668,6 +698,8 @@ generatedFiles: - src/mistralai/models/ssetypes.py - src/mistralai/models/systemmessage.py - src/mistralai/models/textchunk.py + - src/mistralai/models/thinkchunk.py + - src/mistralai/models/timestampgranularity.py - src/mistralai/models/tool.py - src/mistralai/models/toolcall.py - src/mistralai/models/toolchoice.py @@ -681,6 +713,14 @@ generatedFiles: - src/mistralai/models/toolreferencechunk.py - src/mistralai/models/tooltypes.py - src/mistralai/models/trainingfile.py + - src/mistralai/models/transcriptionresponse.py + - src/mistralai/models/transcriptionsegmentchunk.py + - src/mistralai/models/transcriptionstreamdone.py + - src/mistralai/models/transcriptionstreamevents.py + - src/mistralai/models/transcriptionstreameventtypes.py + - src/mistralai/models/transcriptionstreamlanguage.py + - src/mistralai/models/transcriptionstreamsegmentdelta.py + - src/mistralai/models/transcriptionstreamtextdelta.py - src/mistralai/models/unarchiveftmodelout.py - src/mistralai/models/updateftmodelin.py - src/mistralai/models/uploadfileout.py @@ -696,6 +736,7 @@ generatedFiles: - src/mistralai/py.typed - src/mistralai/sdk.py - src/mistralai/sdkconfiguration.py + - src/mistralai/transcriptions.py - src/mistralai/types/__init__.py - src/mistralai/types/basemodel.py - src/mistralai/utils/__init__.py @@ -751,7 +792,7 @@ examples: application/json: {} responses: "200": - application/json: {"id": "", "object": "model", "created": 124166, "owned_by": "", "root": "", "archived": true, "capabilities": {"completion_chat": true, "completion_fim": false, "function_calling": false, "fine_tuning": false, "classification": false}, "max_context_length": 32768, "job": "c4f8ef9a-6612-4f49-88fa-a80eb8116e46", "model_type": "completion"} + application/json: {"id": "", "object": "model", "created": 124166, "owned_by": "", "workspace_id": "", "root": "", "root_version": "", "archived": true, "capabilities": {"completion_chat": true, "completion_fim": false, "function_calling": false, "fine_tuning": false, "classification": false}, "max_context_length": 32768, "job": "c4f8ef9a-6612-4f49-88fa-a80eb8116e46", "model_type": "completion"} jobs_api_routes_fine_tuning_archive_fine_tuned_model: speakeasy-default-jobs-api-routes-fine-tuning-archive-fine-tuned-model: parameters: @@ -1027,10 +1068,10 @@ examples: jobs_api_routes_batch_create_batch_job: speakeasy-default-jobs-api-routes-batch-create-batch-job: requestBody: - application/json: {"input_files": ["fe3343a2-3b8d-404b-ba32-a78dede2614a"], "endpoint": "/v1/moderations", "model": "Altima", "timeout_hours": 24} + application/json: {"input_files": ["fe3343a2-3b8d-404b-ba32-a78dede2614a"], "endpoint": "/v1/moderations", "timeout_hours": 24} responses: "200": - application/json: {"id": "", "object": "batch", "input_files": ["7b2553d8-e17f-4df5-a862-a1678f6b5271", "8c618d9f-7d82-42ba-a284-d57d84f50a58", "c042f996-e842-441d-ae47-4e0850334e41"], "endpoint": "", "model": "Taurus", "errors": [{"message": "", "count": 1}], "status": "SUCCESS", "created_at": 395527, "total_requests": 166919, "completed_requests": 258552, "succeeded_requests": 480980, "failed_requests": 684176} + application/json: {"id": "", "object": "batch", "input_files": ["7b2553d8-e17f-4df5-a862-a1678f6b5271", "8c618d9f-7d82-42ba-a284-d57d84f50a58", "c042f996-e842-441d-ae47-4e0850334e41"], "endpoint": "", "errors": [{"message": "", "count": 1}], "status": "SUCCESS", "created_at": 395527, "total_requests": 166919, "completed_requests": 258552, "succeeded_requests": 480980, "failed_requests": 684176} jobs_api_routes_batch_get_batch_job: speakeasy-default-jobs-api-routes-batch-get-batch-job: parameters: @@ -1038,7 +1079,7 @@ examples: job_id: "4017dc9f-b629-42f4-9700-8c681b9e7f0f" responses: "200": - application/json: {"id": "", "object": "batch", "input_files": ["11b83f16-f2f9-4de4-a81f-203fff419c99"], "endpoint": "", "model": "Accord", "errors": [], "status": "TIMEOUT_EXCEEDED", "created_at": 900958, "total_requests": 458292, "completed_requests": 184893, "succeeded_requests": 104800, "failed_requests": 836210} + application/json: {"id": "", "object": "batch", "input_files": ["11b83f16-f2f9-4de4-a81f-203fff419c99"], "endpoint": "", "errors": [], "status": "TIMEOUT_EXCEEDED", "created_at": 900958, "total_requests": 458292, "completed_requests": 184893, "succeeded_requests": 104800, "failed_requests": 836210} jobs_api_routes_batch_cancel_batch_job: speakeasy-default-jobs-api-routes-batch-cancel-batch-job: parameters: @@ -1046,14 +1087,14 @@ examples: job_id: "4fb29d1c-535b-4f0a-a1cb-2167f86da569" responses: "200": - application/json: {"id": "", "object": "batch", "input_files": ["8fd9d88a-66be-43fd-a816-ba509ca3ca85"], "endpoint": "", "model": "PT Cruiser", "errors": [], "status": "TIMEOUT_EXCEEDED", "created_at": 608251, "total_requests": 12693, "completed_requests": 203340, "succeeded_requests": 189291, "failed_requests": 969057} + application/json: {"id": "", "object": "batch", "input_files": ["8fd9d88a-66be-43fd-a816-ba509ca3ca85"], "endpoint": "", "errors": [], "status": "TIMEOUT_EXCEEDED", "created_at": 608251, "total_requests": 12693, "completed_requests": 203340, "succeeded_requests": 189291, "failed_requests": 969057} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: application/json: {"model": "mistral-small-latest", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} "422": application/json: {} stream_chat: @@ -1069,7 +1110,7 @@ examples: application/json: {"model": "codestral-2405", "top_p": 1, "stream": false, "prompt": "def", "suffix": "return a+b"} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} "422": application/json: {} stream_fim: @@ -1085,7 +1126,7 @@ examples: application/json: {"stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "agent_id": ""} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} "422": application/json: {} stream_agents: @@ -1101,7 +1142,7 @@ examples: application/json: {"model": "mistral-embed", "input": ["Embed this sentence.", "As well as this one."]} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "data": [{"object": "embedding", "embedding": [0.1, 0.2, 0.3], "index": 0}, {"object": "embedding", "embedding": [0.4, 0.5, 0.6], "index": 1}]} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "data": [{"object": "embedding", "embedding": [0.1, 0.2, 0.3], "index": 0}, {"object": "embedding", "embedding": [0.4, 0.5, 0.6], "index": 1}]} "422": application/json: {} moderations_v1_moderations_post: @@ -1342,5 +1383,16 @@ examples: application/json: {"library_id": "7f9c6af4-e362-4cf1-9363-0409d51c2dfa", "org_id": "6b2cac3a-b29c-4d8f-bebb-0db06ec1bf97", "role": "", "share_with_type": "", "share_with_uuid": "618c78f1-41ca-45c3-8ef2-7d78898c7061"} "422": application/json: {} + audio_api_v1_transcriptions_post: + speakeasy-default-audio-api-v1-transcriptions-post: + requestBody: + multipart/form-data: {"model": "Model X", "stream": false} + responses: + "200": + application/json: {"model": "Beetle", "text": "", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "language": ""} + audio_api_v1_transcriptions_post_stream: + speakeasy-default-audio-api-v1-transcriptions-post-stream: + requestBody: + multipart/form-data: {"model": "Camry", "stream": true} examplesVersion: 1.0.2 generatedTests: {} diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index d3df5c3..ffc6c82 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -21,7 +21,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 1.9.2 + version: 1.9.3 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 75541fb..4cb89b9 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -14,11 +14,11 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:2ab1acc41424ca9be28ef867168aeb32af9fc7129b0a91494c0cd24d68c30345 - sourceBlobDigest: sha256:029ae17d555b02220397bba95308ba545c4733db81e65258be7baf9991d10c3a + sourceRevisionDigest: sha256:22d8044215dc1331ba83f3d25598409bc82fdc04d68033fb05e0133a13cc4dad + sourceBlobDigest: sha256:f3322d8a44d0bf1515b5c1c078525dbf00ff90e6110644de4c03b0b0e9050350 tags: - latest - - speakeasy-sdk-regen-1751557705 + - speakeasy-sdk-regen-1753262939 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -37,10 +37,10 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:2ab1acc41424ca9be28ef867168aeb32af9fc7129b0a91494c0cd24d68c30345 - sourceBlobDigest: sha256:029ae17d555b02220397bba95308ba545c4733db81e65258be7baf9991d10c3a + sourceRevisionDigest: sha256:22d8044215dc1331ba83f3d25598409bc82fdc04d68033fb05e0133a13cc4dad + sourceBlobDigest: sha256:f3322d8a44d0bf1515b5c1c078525dbf00ff90e6110644de4c03b0b0e9050350 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:b45de481b3d77689a76a406421d4625dc37cc17bf90bab2f7d6e78f3eec77a9c + codeSamplesRevisionDigest: sha256:1fd9897fdd851557c592b8fd46232518359401d15a6574933c43be63ec2edb53 workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.568.2 diff --git a/README.md b/README.md index 503c512..f71ccfc 100644 --- a/README.md +++ b/README.md @@ -434,6 +434,14 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [complete](docs/sdks/agents/README.md#complete) - Agents Completion * [stream](docs/sdks/agents/README.md#stream) - Stream Agents completion +### [audio](docs/sdks/audio/README.md) + + +#### [audio.transcriptions](docs/sdks/transcriptions/README.md) + +* [complete](docs/sdks/transcriptions/README.md#complete) - Create Transcription +* [stream](docs/sdks/transcriptions/README.md#stream) - Create streaming transcription (SSE) + ### [batch](docs/sdks/batch/README.md) diff --git a/RELEASES.md b/RELEASES.md index b66777e..c0e96ad 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -258,4 +258,14 @@ Based on: ### Generated - [python v1.9.2] . ### Releases -- [PyPI v1.9.2] https://pypi.org/project/mistralai/1.9.2 - . \ No newline at end of file +- [PyPI v1.9.2] https://pypi.org/project/mistralai/1.9.2 - . + +## 2025-07-23 17:03:42 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.568.2 (2.634.2) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.9.3] . +### Releases +- [PyPI v1.9.3] https://pypi.org/project/mistralai/1.9.3 - . \ No newline at end of file diff --git a/docs/models/audiochunk.md b/docs/models/audiochunk.md new file mode 100644 index 0000000..c443e7a --- /dev/null +++ b/docs/models/audiochunk.md @@ -0,0 +1,9 @@ +# AudioChunk + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | +| `input_audio` | *str* | :heavy_check_mark: | N/A | +| `type` | [Optional[models.AudioChunkType]](../models/audiochunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/audiochunktype.md b/docs/models/audiochunktype.md new file mode 100644 index 0000000..46ebf37 --- /dev/null +++ b/docs/models/audiochunktype.md @@ -0,0 +1,8 @@ +# AudioChunkType + + +## Values + +| Name | Value | +| ------------- | ------------- | +| `INPUT_AUDIO` | input_audio | \ No newline at end of file diff --git a/docs/models/audiotranscriptionrequest.md b/docs/models/audiotranscriptionrequest.md new file mode 100644 index 0000000..e876de1 --- /dev/null +++ b/docs/models/audiotranscriptionrequest.md @@ -0,0 +1,15 @@ +# AudioTranscriptionRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `file` | [Optional[models.File]](../models/file.md) | :heavy_minus_sign: | N/A | +| `file_url` | *OptionalNullable[str]* | :heavy_minus_sign: | Url of a file to be transcribed | +| `file_id` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of a file uploaded to /v1/files | +| `language` | *OptionalNullable[str]* | :heavy_minus_sign: | Language of the audio, e.g. 'en'. Providing the language can boost accuracy. | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `stream` | *Optional[Literal[False]]* | :heavy_minus_sign: | N/A | +| `timestamp_granularities` | List[[models.TimestampGranularity](../models/timestampgranularity.md)] | :heavy_minus_sign: | Granularities of timestamps to include in the response. | \ No newline at end of file diff --git a/docs/models/audiotranscriptionrequeststream.md b/docs/models/audiotranscriptionrequeststream.md new file mode 100644 index 0000000..975e437 --- /dev/null +++ b/docs/models/audiotranscriptionrequeststream.md @@ -0,0 +1,15 @@ +# AudioTranscriptionRequestStream + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `file` | [Optional[models.File]](../models/file.md) | :heavy_minus_sign: | N/A | +| `file_url` | *OptionalNullable[str]* | :heavy_minus_sign: | Url of a file to be transcribed | +| `file_id` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of a file uploaded to /v1/files | +| `language` | *OptionalNullable[str]* | :heavy_minus_sign: | Language of the audio, e.g. 'en'. Providing the language can boost accuracy. | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `stream` | *Optional[Literal[True]]* | :heavy_minus_sign: | N/A | +| `timestamp_granularities` | List[[models.TimestampGranularity](../models/timestampgranularity.md)] | :heavy_minus_sign: | Granularities of timestamps to include in the response. | \ No newline at end of file diff --git a/docs/models/basemodelcard.md b/docs/models/basemodelcard.md index f5ce8c5..58ad5e2 100644 --- a/docs/models/basemodelcard.md +++ b/docs/models/basemodelcard.md @@ -17,4 +17,4 @@ | `deprecation` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `deprecation_replacement_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `default_model_temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.Type]](../models/type.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | [Optional[models.BaseModelCardType]](../models/basemodelcardtype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/basemodelcardtype.md b/docs/models/basemodelcardtype.md new file mode 100644 index 0000000..4a40ce7 --- /dev/null +++ b/docs/models/basemodelcardtype.md @@ -0,0 +1,8 @@ +# BaseModelCardType + + +## Values + +| Name | Value | +| ------ | ------ | +| `BASE` | base | \ No newline at end of file diff --git a/docs/models/batchjobin.md b/docs/models/batchjobin.md index 5203a52..b5b1378 100644 --- a/docs/models/batchjobin.md +++ b/docs/models/batchjobin.md @@ -7,6 +7,7 @@ | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | | `input_files` | List[*str*] | :heavy_check_mark: | N/A | | `endpoint` | [models.APIEndpoint](../models/apiendpoint.md) | :heavy_check_mark: | N/A | -| `model` | *str* | :heavy_check_mark: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *str*] | :heavy_minus_sign: | N/A | | `timeout_hours` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/batchjobout.md b/docs/models/batchjobout.md index 1637446..b66fff0 100644 --- a/docs/models/batchjobout.md +++ b/docs/models/batchjobout.md @@ -10,7 +10,8 @@ | `input_files` | List[*str*] | :heavy_check_mark: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `endpoint` | *str* | :heavy_check_mark: | N/A | -| `model` | *str* | :heavy_check_mark: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `output_file` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `error_file` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `errors` | List[[models.BatchError](../models/batcherror.md)] | :heavy_check_mark: | N/A | diff --git a/docs/models/classifierftmodelout.md b/docs/models/classifierftmodelout.md index 506af14..dd9e8bf 100644 --- a/docs/models/classifierftmodelout.md +++ b/docs/models/classifierftmodelout.md @@ -9,7 +9,9 @@ | `object` | [Optional[models.ClassifierFTModelOutObject]](../models/classifierftmodeloutobject.md) | :heavy_minus_sign: | N/A | | `created` | *int* | :heavy_check_mark: | N/A | | `owned_by` | *str* | :heavy_check_mark: | N/A | +| `workspace_id` | *str* | :heavy_check_mark: | N/A | | `root` | *str* | :heavy_check_mark: | N/A | +| `root_version` | *str* | :heavy_check_mark: | N/A | | `archived` | *bool* | :heavy_check_mark: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | diff --git a/docs/models/completionargs.md b/docs/models/completionargs.md index 5f07b67..0d10822 100644 --- a/docs/models/completionargs.md +++ b/docs/models/completionargs.md @@ -10,7 +10,7 @@ White-listed arguments from the completion API | `stop` | [OptionalNullable[models.CompletionArgsStop]](../models/completionargsstop.md) | :heavy_minus_sign: | N/A | | `presence_penalty` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | `frequency_penalty` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | `top_p` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | diff --git a/docs/models/completionftmodelout.md b/docs/models/completionftmodelout.md index f1e22b8..cd08582 100644 --- a/docs/models/completionftmodelout.md +++ b/docs/models/completionftmodelout.md @@ -9,7 +9,9 @@ | `object` | [Optional[models.CompletionFTModelOutObject]](../models/completionftmodeloutobject.md) | :heavy_minus_sign: | N/A | | `created` | *int* | :heavy_check_mark: | N/A | | `owned_by` | *str* | :heavy_check_mark: | N/A | +| `workspace_id` | *str* | :heavy_check_mark: | N/A | | `root` | *str* | :heavy_check_mark: | N/A | +| `root_version` | *str* | :heavy_check_mark: | N/A | | `archived` | *bool* | :heavy_check_mark: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | diff --git a/docs/models/contentchunk.md b/docs/models/contentchunk.md index a65cd05..cb7e51d 100644 --- a/docs/models/contentchunk.md +++ b/docs/models/contentchunk.md @@ -33,3 +33,15 @@ value: models.ReferenceChunk = /* values here */ value: models.FileChunk = /* values here */ ``` +### `models.ThinkChunk` + +```python +value: models.ThinkChunk = /* values here */ +``` + +### `models.AudioChunk` + +```python +value: models.AudioChunk = /* values here */ +``` + diff --git a/docs/models/fileschema.md b/docs/models/fileschema.md index 9746a99..4f3e72d 100644 --- a/docs/models/fileschema.md +++ b/docs/models/fileschema.md @@ -13,4 +13,6 @@ | `purpose` | [models.FilePurpose](../models/filepurpose.md) | :heavy_check_mark: | N/A | | | `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | | `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | -| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | \ No newline at end of file +| `mimetype` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | +| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | +| `signature` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md b/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md index f2a3bb7..b062b87 100644 --- a/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md +++ b/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md @@ -8,6 +8,7 @@ | `page` | *Optional[int]* | :heavy_minus_sign: | N/A | | `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | N/A | diff --git a/docs/models/metadata.md b/docs/models/metadata.md new file mode 100644 index 0000000..e655f58 --- /dev/null +++ b/docs/models/metadata.md @@ -0,0 +1,7 @@ +# Metadata + + +## Fields + +| Field | Type | Required | Description | +| ----------- | ----------- | ----------- | ----------- | \ No newline at end of file diff --git a/docs/models/retrievefileout.md b/docs/models/retrievefileout.md index 10f738b..28f97dd 100644 --- a/docs/models/retrievefileout.md +++ b/docs/models/retrievefileout.md @@ -13,5 +13,7 @@ | `purpose` | [models.FilePurpose](../models/filepurpose.md) | :heavy_check_mark: | N/A | | | `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | | `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | +| `mimetype` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | | `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | +| `signature` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | | `deleted` | *bool* | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/docs/models/thinkchunk.md b/docs/models/thinkchunk.md new file mode 100644 index 0000000..66b2e0c --- /dev/null +++ b/docs/models/thinkchunk.md @@ -0,0 +1,10 @@ +# ThinkChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | +| `thinking` | List[[models.Thinking](../models/thinking.md)] | :heavy_check_mark: | N/A | +| `closed` | *Optional[bool]* | :heavy_minus_sign: | Whether the thinking chunk is closed or not. Currently only used for prefixing. | +| `type` | [Optional[models.ThinkChunkType]](../models/thinkchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/thinkchunktype.md b/docs/models/thinkchunktype.md new file mode 100644 index 0000000..baf6f75 --- /dev/null +++ b/docs/models/thinkchunktype.md @@ -0,0 +1,8 @@ +# ThinkChunkType + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `THINKING` | thinking | \ No newline at end of file diff --git a/docs/models/thinking.md b/docs/models/thinking.md new file mode 100644 index 0000000..c7a0d5c --- /dev/null +++ b/docs/models/thinking.md @@ -0,0 +1,17 @@ +# Thinking + + +## Supported Types + +### `models.ReferenceChunk` + +```python +value: models.ReferenceChunk = /* values here */ +``` + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + diff --git a/docs/models/timestampgranularity.md b/docs/models/timestampgranularity.md new file mode 100644 index 0000000..0d2a805 --- /dev/null +++ b/docs/models/timestampgranularity.md @@ -0,0 +1,8 @@ +# TimestampGranularity + + +## Values + +| Name | Value | +| --------- | --------- | +| `SEGMENT` | segment | \ No newline at end of file diff --git a/docs/models/toolcall.md b/docs/models/toolcall.md index 3819236..43e0905 100644 --- a/docs/models/toolcall.md +++ b/docs/models/toolcall.md @@ -3,9 +3,10 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | -| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | -| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | +| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `metadata` | [OptionalNullable[models.Metadata]](../models/metadata.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionresponse.md b/docs/models/transcriptionresponse.md new file mode 100644 index 0000000..1bc0189 --- /dev/null +++ b/docs/models/transcriptionresponse.md @@ -0,0 +1,13 @@ +# TranscriptionResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `text` | *str* | :heavy_check_mark: | N/A | +| `segments` | List[[models.TranscriptionSegmentChunk](../models/transcriptionsegmentchunk.md)] | :heavy_minus_sign: | N/A | +| `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | +| `language` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionsegmentchunk.md b/docs/models/transcriptionsegmentchunk.md new file mode 100644 index 0000000..bebc9f7 --- /dev/null +++ b/docs/models/transcriptionsegmentchunk.md @@ -0,0 +1,12 @@ +# TranscriptionSegmentChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | +| `text` | *str* | :heavy_check_mark: | N/A | +| `start` | *float* | :heavy_check_mark: | N/A | +| `end` | *float* | :heavy_check_mark: | N/A | +| `type` | [Optional[models.Type]](../models/type.md) | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionstreamdone.md b/docs/models/transcriptionstreamdone.md new file mode 100644 index 0000000..9ecf7d9 --- /dev/null +++ b/docs/models/transcriptionstreamdone.md @@ -0,0 +1,14 @@ +# TranscriptionStreamDone + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `text` | *str* | :heavy_check_mark: | N/A | +| `segments` | List[[models.TranscriptionSegmentChunk](../models/transcriptionsegmentchunk.md)] | :heavy_minus_sign: | N/A | +| `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | +| `type` | [Optional[models.TranscriptionStreamDoneType]](../models/transcriptionstreamdonetype.md) | :heavy_minus_sign: | N/A | +| `language` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionstreamdonetype.md b/docs/models/transcriptionstreamdonetype.md new file mode 100644 index 0000000..db092c4 --- /dev/null +++ b/docs/models/transcriptionstreamdonetype.md @@ -0,0 +1,8 @@ +# TranscriptionStreamDoneType + + +## Values + +| Name | Value | +| -------------------- | -------------------- | +| `TRANSCRIPTION_DONE` | transcription.done | \ No newline at end of file diff --git a/docs/models/transcriptionstreamevents.md b/docs/models/transcriptionstreamevents.md new file mode 100644 index 0000000..f760385 --- /dev/null +++ b/docs/models/transcriptionstreamevents.md @@ -0,0 +1,9 @@ +# TranscriptionStreamEvents + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `event` | [models.TranscriptionStreamEventTypes](../models/transcriptionstreameventtypes.md) | :heavy_check_mark: | N/A | +| `data` | [models.TranscriptionStreamEventsData](../models/transcriptionstreameventsdata.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionstreameventsdata.md b/docs/models/transcriptionstreameventsdata.md new file mode 100644 index 0000000..eea8e92 --- /dev/null +++ b/docs/models/transcriptionstreameventsdata.md @@ -0,0 +1,29 @@ +# TranscriptionStreamEventsData + + +## Supported Types + +### `models.TranscriptionStreamDone` + +```python +value: models.TranscriptionStreamDone = /* values here */ +``` + +### `models.TranscriptionStreamLanguage` + +```python +value: models.TranscriptionStreamLanguage = /* values here */ +``` + +### `models.TranscriptionStreamSegmentDelta` + +```python +value: models.TranscriptionStreamSegmentDelta = /* values here */ +``` + +### `models.TranscriptionStreamTextDelta` + +```python +value: models.TranscriptionStreamTextDelta = /* values here */ +``` + diff --git a/docs/models/transcriptionstreameventtypes.md b/docs/models/transcriptionstreameventtypes.md new file mode 100644 index 0000000..e4eb25a --- /dev/null +++ b/docs/models/transcriptionstreameventtypes.md @@ -0,0 +1,11 @@ +# TranscriptionStreamEventTypes + + +## Values + +| Name | Value | +| -------------------------- | -------------------------- | +| `TRANSCRIPTION_LANGUAGE` | transcription.language | +| `TRANSCRIPTION_SEGMENT` | transcription.segment | +| `TRANSCRIPTION_TEXT_DELTA` | transcription.text.delta | +| `TRANSCRIPTION_DONE` | transcription.done | \ No newline at end of file diff --git a/docs/models/transcriptionstreamlanguage.md b/docs/models/transcriptionstreamlanguage.md new file mode 100644 index 0000000..e16c8fd --- /dev/null +++ b/docs/models/transcriptionstreamlanguage.md @@ -0,0 +1,10 @@ +# TranscriptionStreamLanguage + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | +| `type` | [Optional[models.TranscriptionStreamLanguageType]](../models/transcriptionstreamlanguagetype.md) | :heavy_minus_sign: | N/A | +| `audio_language` | *str* | :heavy_check_mark: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionstreamlanguagetype.md b/docs/models/transcriptionstreamlanguagetype.md new file mode 100644 index 0000000..e93521e --- /dev/null +++ b/docs/models/transcriptionstreamlanguagetype.md @@ -0,0 +1,8 @@ +# TranscriptionStreamLanguageType + + +## Values + +| Name | Value | +| ------------------------ | ------------------------ | +| `TRANSCRIPTION_LANGUAGE` | transcription.language | \ No newline at end of file diff --git a/docs/models/transcriptionstreamsegmentdelta.md b/docs/models/transcriptionstreamsegmentdelta.md new file mode 100644 index 0000000..3deeedf --- /dev/null +++ b/docs/models/transcriptionstreamsegmentdelta.md @@ -0,0 +1,12 @@ +# TranscriptionStreamSegmentDelta + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | +| `text` | *str* | :heavy_check_mark: | N/A | +| `start` | *float* | :heavy_check_mark: | N/A | +| `end` | *float* | :heavy_check_mark: | N/A | +| `type` | [Optional[models.TranscriptionStreamSegmentDeltaType]](../models/transcriptionstreamsegmentdeltatype.md) | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionstreamsegmentdeltatype.md b/docs/models/transcriptionstreamsegmentdeltatype.md new file mode 100644 index 0000000..03ff3e8 --- /dev/null +++ b/docs/models/transcriptionstreamsegmentdeltatype.md @@ -0,0 +1,8 @@ +# TranscriptionStreamSegmentDeltaType + + +## Values + +| Name | Value | +| ----------------------- | ----------------------- | +| `TRANSCRIPTION_SEGMENT` | transcription.segment | \ No newline at end of file diff --git a/docs/models/transcriptionstreamtextdelta.md b/docs/models/transcriptionstreamtextdelta.md new file mode 100644 index 0000000..adddfe1 --- /dev/null +++ b/docs/models/transcriptionstreamtextdelta.md @@ -0,0 +1,10 @@ +# TranscriptionStreamTextDelta + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | +| `text` | *str* | :heavy_check_mark: | N/A | +| `type` | [Optional[models.TranscriptionStreamTextDeltaType]](../models/transcriptionstreamtextdeltatype.md) | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionstreamtextdeltatype.md b/docs/models/transcriptionstreamtextdeltatype.md new file mode 100644 index 0000000..b7c9d67 --- /dev/null +++ b/docs/models/transcriptionstreamtextdeltatype.md @@ -0,0 +1,8 @@ +# TranscriptionStreamTextDeltaType + + +## Values + +| Name | Value | +| -------------------------- | -------------------------- | +| `TRANSCRIPTION_TEXT_DELTA` | transcription.text.delta | \ No newline at end of file diff --git a/docs/models/type.md b/docs/models/type.md index 239a00f..d05ead7 100644 --- a/docs/models/type.md +++ b/docs/models/type.md @@ -3,6 +3,6 @@ ## Values -| Name | Value | -| ------ | ------ | -| `BASE` | base | \ No newline at end of file +| Name | Value | +| ----------------------- | ----------------------- | +| `TRANSCRIPTION_SEGMENT` | transcription_segment | \ No newline at end of file diff --git a/docs/models/uploadfileout.md b/docs/models/uploadfileout.md index ef2ad21..6f09c9a 100644 --- a/docs/models/uploadfileout.md +++ b/docs/models/uploadfileout.md @@ -13,4 +13,6 @@ | `purpose` | [models.FilePurpose](../models/filepurpose.md) | :heavy_check_mark: | N/A | | | `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | | `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | -| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | \ No newline at end of file +| `mimetype` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | +| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | +| `signature` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/usageinfo.md b/docs/models/usageinfo.md index 9f56a3a..f5204ac 100644 --- a/docs/models/usageinfo.md +++ b/docs/models/usageinfo.md @@ -3,8 +3,10 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------- | ------------------- | ------------------- | ------------------- | ------------------- | -| `prompt_tokens` | *int* | :heavy_check_mark: | N/A | 16 | -| `completion_tokens` | *int* | :heavy_check_mark: | N/A | 34 | -| `total_tokens` | *int* | :heavy_check_mark: | N/A | 50 | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `prompt_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `completion_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `total_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `prompt_audio_seconds` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/sdks/audio/README.md b/docs/sdks/audio/README.md new file mode 100644 index 0000000..2101c26 --- /dev/null +++ b/docs/sdks/audio/README.md @@ -0,0 +1,6 @@ +# Audio +(*audio*) + +## Overview + +### Available Operations diff --git a/docs/sdks/mistraljobs/README.md b/docs/sdks/mistraljobs/README.md index 0c0b599..ef1e154 100644 --- a/docs/sdks/mistraljobs/README.md +++ b/docs/sdks/mistraljobs/README.md @@ -39,6 +39,7 @@ with Mistral( | `page` | *Optional[int]* | :heavy_minus_sign: | N/A | | `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | N/A | @@ -72,7 +73,7 @@ with Mistral( res = mistral.batch.jobs.create(input_files=[ "fe3343a2-3b8d-404b-ba32-a78dede2614a", - ], endpoint="/v1/moderations", model="Altima", timeout_hours=24) + ], endpoint="/v1/moderations", timeout_hours=24) # Handle response print(res) @@ -85,7 +86,8 @@ with Mistral( | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | | `input_files` | List[*str*] | :heavy_check_mark: | N/A | | `endpoint` | [models.APIEndpoint](../../models/apiendpoint.md) | :heavy_check_mark: | N/A | -| `model` | *str* | :heavy_check_mark: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *str*] | :heavy_minus_sign: | N/A | | `timeout_hours` | *Optional[int]* | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | diff --git a/docs/sdks/transcriptions/README.md b/docs/sdks/transcriptions/README.md new file mode 100644 index 0000000..fcac246 --- /dev/null +++ b/docs/sdks/transcriptions/README.md @@ -0,0 +1,103 @@ +# Transcriptions +(*audio.transcriptions*) + +## Overview + +API for audio transcription. + +### Available Operations + +* [complete](#complete) - Create Transcription +* [stream](#stream) - Create streaming transcription (SSE) + +## complete + +Create Transcription + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.audio.transcriptions.complete(model="Model X") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `file` | [Optional[models.File]](../../models/file.md) | :heavy_minus_sign: | N/A | +| `file_url` | *OptionalNullable[str]* | :heavy_minus_sign: | Url of a file to be transcribed | +| `file_id` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of a file uploaded to /v1/files | +| `language` | *OptionalNullable[str]* | :heavy_minus_sign: | Language of the audio, e.g. 'en'. Providing the language can boost accuracy. | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `timestamp_granularities` | List[[models.TimestampGranularity](../../models/timestampgranularity.md)] | :heavy_minus_sign: | Granularities of timestamps to include in the response. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.TranscriptionResponse](../../models/transcriptionresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4XX, 5XX | \*/\* | + +## stream + +Create streaming transcription (SSE) + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.audio.transcriptions.stream(model="Camry") + + with res as event_stream: + for event in event_stream: + # handle event + print(event, flush=True) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `file` | [Optional[models.File]](../../models/file.md) | :heavy_minus_sign: | N/A | +| `file_url` | *OptionalNullable[str]* | :heavy_minus_sign: | Url of a file to be transcribed | +| `file_id` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of a file uploaded to /v1/files | +| `language` | *OptionalNullable[str]* | :heavy_minus_sign: | Language of the audio, e.g. 'en'. Providing the language can boost accuracy. | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `timestamp_granularities` | List[[models.TimestampGranularity](../../models/timestampgranularity.md)] | :heavy_minus_sign: | Granularities of timestamps to include in the response. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[Union[eventstreaming.EventStream[models.TranscriptionStreamEvents], eventstreaming.EventStreamAsync[models.TranscriptionStreamEvents]]](../../models/.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 846b6ff..123c0fe 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.9.2" +version = "1.9.3" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" },] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index 8d003f9..5937a74 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.9.2" +__version__: str = "1.9.3" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.634.2" -__user_agent__: str = "speakeasy-sdk/python 1.9.2 2.634.2 1.0.0 mistralai" +__user_agent__: str = "speakeasy-sdk/python 1.9.3 2.634.2 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/audio.py b/src/mistralai/audio.py new file mode 100644 index 0000000..66934a8 --- /dev/null +++ b/src/mistralai/audio.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from .sdkconfiguration import SDKConfiguration +from mistralai.transcriptions import Transcriptions + + +class Audio(BaseSDK): + transcriptions: Transcriptions + r"""API for audio transcription.""" + + def __init__(self, sdk_config: SDKConfiguration) -> None: + BaseSDK.__init__(self, sdk_config) + self.sdk_configuration = sdk_config + self._init_sdks() + + def _init_sdks(self): + self.transcriptions = Transcriptions(self.sdk_configuration) diff --git a/src/mistralai/mistral_jobs.py b/src/mistralai/mistral_jobs.py index b824508..c51d64a 100644 --- a/src/mistralai/mistral_jobs.py +++ b/src/mistralai/mistral_jobs.py @@ -16,6 +16,7 @@ def list( page: Optional[int] = 0, page_size: Optional[int] = 100, model: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, created_after: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, @@ -32,6 +33,7 @@ def list( :param page: :param page_size: :param model: + :param agent_id: :param metadata: :param created_after: :param created_by_me: @@ -55,6 +57,7 @@ def list( page=page, page_size=page_size, model=model, + agent_id=agent_id, metadata=metadata, created_after=created_after, created_by_me=created_by_me, @@ -128,6 +131,7 @@ async def list_async( page: Optional[int] = 0, page_size: Optional[int] = 100, model: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, created_after: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, @@ -144,6 +148,7 @@ async def list_async( :param page: :param page_size: :param model: + :param agent_id: :param metadata: :param created_after: :param created_by_me: @@ -167,6 +172,7 @@ async def list_async( page=page, page_size=page_size, model=model, + agent_id=agent_id, metadata=metadata, created_after=created_after, created_by_me=created_by_me, @@ -239,7 +245,8 @@ def create( *, input_files: List[str], endpoint: models.APIEndpoint, - model: str, + model: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, metadata: OptionalNullable[Dict[str, str]] = UNSET, timeout_hours: Optional[int] = 24, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -254,6 +261,7 @@ def create( :param input_files: :param endpoint: :param model: + :param agent_id: :param metadata: :param timeout_hours: :param retries: Override the default retry configuration for this method @@ -275,6 +283,7 @@ def create( input_files=input_files, endpoint=endpoint, model=model, + agent_id=agent_id, metadata=metadata, timeout_hours=timeout_hours, ) @@ -348,7 +357,8 @@ async def create_async( *, input_files: List[str], endpoint: models.APIEndpoint, - model: str, + model: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, metadata: OptionalNullable[Dict[str, str]] = UNSET, timeout_hours: Optional[int] = 24, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -363,6 +373,7 @@ async def create_async( :param input_files: :param endpoint: :param model: + :param agent_id: :param metadata: :param timeout_hours: :param retries: Override the default retry configuration for this method @@ -384,6 +395,7 @@ async def create_async( input_files=input_files, endpoint=endpoint, model=model, + agent_id=agent_id, metadata=metadata, timeout_hours=timeout_hours, ) diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index 03965fd..2039c2b 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -129,7 +129,16 @@ AssistantMessageRole, AssistantMessageTypedDict, ) - from .basemodelcard import BaseModelCard, BaseModelCardTypedDict, Type + from .audiochunk import AudioChunk, AudioChunkType, AudioChunkTypedDict + from .audiotranscriptionrequest import ( + AudioTranscriptionRequest, + AudioTranscriptionRequestTypedDict, + ) + from .audiotranscriptionrequeststream import ( + AudioTranscriptionRequestStream, + AudioTranscriptionRequestStreamTypedDict, + ) + from .basemodelcard import BaseModelCard, BaseModelCardType, BaseModelCardTypedDict from .batcherror import BatchError, BatchErrorTypedDict from .batchjobin import BatchJobIn, BatchJobInTypedDict from .batchjobout import BatchJobOut, BatchJobOutObject, BatchJobOutTypedDict @@ -759,8 +768,16 @@ SystemMessageTypedDict, ) from .textchunk import TextChunk, TextChunkType, TextChunkTypedDict + from .thinkchunk import ( + ThinkChunk, + ThinkChunkType, + ThinkChunkTypedDict, + Thinking, + ThinkingTypedDict, + ) + from .timestampgranularity import TimestampGranularity from .tool import Tool, ToolTypedDict - from .toolcall import ToolCall, ToolCallTypedDict + from .toolcall import Metadata, MetadataTypedDict, ToolCall, ToolCallTypedDict from .toolchoice import ToolChoice, ToolChoiceTypedDict from .toolchoiceenum import ToolChoiceEnum from .toolexecutiondeltaevent import ( @@ -799,6 +816,42 @@ ) from .tooltypes import ToolTypes from .trainingfile import TrainingFile, TrainingFileTypedDict + from .transcriptionresponse import ( + TranscriptionResponse, + TranscriptionResponseTypedDict, + ) + from .transcriptionsegmentchunk import ( + TranscriptionSegmentChunk, + TranscriptionSegmentChunkTypedDict, + Type, + ) + from .transcriptionstreamdone import ( + TranscriptionStreamDone, + TranscriptionStreamDoneType, + TranscriptionStreamDoneTypedDict, + ) + from .transcriptionstreamevents import ( + TranscriptionStreamEvents, + TranscriptionStreamEventsData, + TranscriptionStreamEventsDataTypedDict, + TranscriptionStreamEventsTypedDict, + ) + from .transcriptionstreameventtypes import TranscriptionStreamEventTypes + from .transcriptionstreamlanguage import ( + TranscriptionStreamLanguage, + TranscriptionStreamLanguageType, + TranscriptionStreamLanguageTypedDict, + ) + from .transcriptionstreamsegmentdelta import ( + TranscriptionStreamSegmentDelta, + TranscriptionStreamSegmentDeltaType, + TranscriptionStreamSegmentDeltaTypedDict, + ) + from .transcriptionstreamtextdelta import ( + TranscriptionStreamTextDelta, + TranscriptionStreamTextDeltaType, + TranscriptionStreamTextDeltaTypedDict, + ) from .unarchiveftmodelout import ( UnarchiveFTModelOut, UnarchiveFTModelOutObject, @@ -917,7 +970,15 @@ "AssistantMessageContentTypedDict", "AssistantMessageRole", "AssistantMessageTypedDict", + "AudioChunk", + "AudioChunkType", + "AudioChunkTypedDict", + "AudioTranscriptionRequest", + "AudioTranscriptionRequestStream", + "AudioTranscriptionRequestStreamTypedDict", + "AudioTranscriptionRequestTypedDict", "BaseModelCard", + "BaseModelCardType", "BaseModelCardTypedDict", "BatchError", "BatchErrorTypedDict", @@ -1322,6 +1383,8 @@ "MessageOutputEventTypedDict", "Messages", "MessagesTypedDict", + "Metadata", + "MetadataTypedDict", "MetricOut", "MetricOutTypedDict", "MistralPromptMode", @@ -1416,6 +1479,12 @@ "TextChunk", "TextChunkType", "TextChunkTypedDict", + "ThinkChunk", + "ThinkChunkType", + "ThinkChunkTypedDict", + "Thinking", + "ThinkingTypedDict", + "TimestampGranularity", "Tool", "ToolCall", "ToolCallTypedDict", @@ -1452,6 +1521,27 @@ "ToolsTypedDict", "TrainingFile", "TrainingFileTypedDict", + "TranscriptionResponse", + "TranscriptionResponseTypedDict", + "TranscriptionSegmentChunk", + "TranscriptionSegmentChunkTypedDict", + "TranscriptionStreamDone", + "TranscriptionStreamDoneType", + "TranscriptionStreamDoneTypedDict", + "TranscriptionStreamEventTypes", + "TranscriptionStreamEvents", + "TranscriptionStreamEventsData", + "TranscriptionStreamEventsDataTypedDict", + "TranscriptionStreamEventsTypedDict", + "TranscriptionStreamLanguage", + "TranscriptionStreamLanguageType", + "TranscriptionStreamLanguageTypedDict", + "TranscriptionStreamSegmentDelta", + "TranscriptionStreamSegmentDeltaType", + "TranscriptionStreamSegmentDeltaTypedDict", + "TranscriptionStreamTextDelta", + "TranscriptionStreamTextDeltaType", + "TranscriptionStreamTextDeltaTypedDict", "Two", "TwoTypedDict", "Type", @@ -1565,9 +1655,16 @@ "AssistantMessageContentTypedDict": ".assistantmessage", "AssistantMessageRole": ".assistantmessage", "AssistantMessageTypedDict": ".assistantmessage", + "AudioChunk": ".audiochunk", + "AudioChunkType": ".audiochunk", + "AudioChunkTypedDict": ".audiochunk", + "AudioTranscriptionRequest": ".audiotranscriptionrequest", + "AudioTranscriptionRequestTypedDict": ".audiotranscriptionrequest", + "AudioTranscriptionRequestStream": ".audiotranscriptionrequeststream", + "AudioTranscriptionRequestStreamTypedDict": ".audiotranscriptionrequeststream", "BaseModelCard": ".basemodelcard", + "BaseModelCardType": ".basemodelcard", "BaseModelCardTypedDict": ".basemodelcard", - "Type": ".basemodelcard", "BatchError": ".batcherror", "BatchErrorTypedDict": ".batcherror", "BatchJobIn": ".batchjobin", @@ -2067,8 +2164,16 @@ "TextChunk": ".textchunk", "TextChunkType": ".textchunk", "TextChunkTypedDict": ".textchunk", + "ThinkChunk": ".thinkchunk", + "ThinkChunkType": ".thinkchunk", + "ThinkChunkTypedDict": ".thinkchunk", + "Thinking": ".thinkchunk", + "ThinkingTypedDict": ".thinkchunk", + "TimestampGranularity": ".timestampgranularity", "Tool": ".tool", "ToolTypedDict": ".tool", + "Metadata": ".toolcall", + "MetadataTypedDict": ".toolcall", "ToolCall": ".toolcall", "ToolCallTypedDict": ".toolcall", "ToolChoice": ".toolchoice", @@ -2101,6 +2206,28 @@ "ToolTypes": ".tooltypes", "TrainingFile": ".trainingfile", "TrainingFileTypedDict": ".trainingfile", + "TranscriptionResponse": ".transcriptionresponse", + "TranscriptionResponseTypedDict": ".transcriptionresponse", + "TranscriptionSegmentChunk": ".transcriptionsegmentchunk", + "TranscriptionSegmentChunkTypedDict": ".transcriptionsegmentchunk", + "Type": ".transcriptionsegmentchunk", + "TranscriptionStreamDone": ".transcriptionstreamdone", + "TranscriptionStreamDoneType": ".transcriptionstreamdone", + "TranscriptionStreamDoneTypedDict": ".transcriptionstreamdone", + "TranscriptionStreamEvents": ".transcriptionstreamevents", + "TranscriptionStreamEventsData": ".transcriptionstreamevents", + "TranscriptionStreamEventsDataTypedDict": ".transcriptionstreamevents", + "TranscriptionStreamEventsTypedDict": ".transcriptionstreamevents", + "TranscriptionStreamEventTypes": ".transcriptionstreameventtypes", + "TranscriptionStreamLanguage": ".transcriptionstreamlanguage", + "TranscriptionStreamLanguageType": ".transcriptionstreamlanguage", + "TranscriptionStreamLanguageTypedDict": ".transcriptionstreamlanguage", + "TranscriptionStreamSegmentDelta": ".transcriptionstreamsegmentdelta", + "TranscriptionStreamSegmentDeltaType": ".transcriptionstreamsegmentdelta", + "TranscriptionStreamSegmentDeltaTypedDict": ".transcriptionstreamsegmentdelta", + "TranscriptionStreamTextDelta": ".transcriptionstreamtextdelta", + "TranscriptionStreamTextDeltaType": ".transcriptionstreamtextdelta", + "TranscriptionStreamTextDeltaTypedDict": ".transcriptionstreamtextdelta", "UnarchiveFTModelOut": ".unarchiveftmodelout", "UnarchiveFTModelOutObject": ".unarchiveftmodelout", "UnarchiveFTModelOutTypedDict": ".unarchiveftmodelout", diff --git a/src/mistralai/models/audiochunk.py b/src/mistralai/models/audiochunk.py new file mode 100644 index 0000000..2780570 --- /dev/null +++ b/src/mistralai/models/audiochunk.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +AudioChunkType = Literal["input_audio"] + + +class AudioChunkTypedDict(TypedDict): + input_audio: str + type: NotRequired[AudioChunkType] + + +class AudioChunk(BaseModel): + input_audio: str + + type: Optional[AudioChunkType] = "input_audio" diff --git a/src/mistralai/models/audiotranscriptionrequest.py b/src/mistralai/models/audiotranscriptionrequest.py new file mode 100644 index 0000000..371d3ec --- /dev/null +++ b/src/mistralai/models/audiotranscriptionrequest.py @@ -0,0 +1,97 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .file import File, FileTypedDict +from .timestampgranularity import TimestampGranularity +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import FieldMetadata, MultipartFormMetadata, validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class AudioTranscriptionRequestTypedDict(TypedDict): + model: str + file: NotRequired[FileTypedDict] + file_url: NotRequired[Nullable[str]] + r"""Url of a file to be transcribed""" + file_id: NotRequired[Nullable[str]] + r"""ID of a file uploaded to /v1/files""" + language: NotRequired[Nullable[str]] + r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" + temperature: NotRequired[Nullable[float]] + stream: Literal[False] + timestamp_granularities: NotRequired[List[TimestampGranularity]] + r"""Granularities of timestamps to include in the response.""" + + +class AudioTranscriptionRequest(BaseModel): + model: Annotated[str, FieldMetadata(multipart=True)] + + file: Annotated[ + Optional[File], FieldMetadata(multipart=MultipartFormMetadata(file=True)) + ] = None + + file_url: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""Url of a file to be transcribed""" + + file_id: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""ID of a file uploaded to /v1/files""" + + language: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" + + temperature: Annotated[OptionalNullable[float], FieldMetadata(multipart=True)] = ( + UNSET + ) + + STREAM: Annotated[ + Annotated[Optional[Literal[False]], AfterValidator(validate_const(False))], + pydantic.Field(alias="stream"), + FieldMetadata(multipart=True), + ] = False + + timestamp_granularities: Annotated[ + Optional[List[TimestampGranularity]], FieldMetadata(multipart=True) + ] = None + r"""Granularities of timestamps to include in the response.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "file", + "file_url", + "file_id", + "language", + "temperature", + "stream", + "timestamp_granularities", + ] + nullable_fields = ["file_url", "file_id", "language", "temperature"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/audiotranscriptionrequeststream.py b/src/mistralai/models/audiotranscriptionrequeststream.py new file mode 100644 index 0000000..0437450 --- /dev/null +++ b/src/mistralai/models/audiotranscriptionrequeststream.py @@ -0,0 +1,97 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .file import File, FileTypedDict +from .timestampgranularity import TimestampGranularity +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import FieldMetadata, MultipartFormMetadata, validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class AudioTranscriptionRequestStreamTypedDict(TypedDict): + model: str + file: NotRequired[FileTypedDict] + file_url: NotRequired[Nullable[str]] + r"""Url of a file to be transcribed""" + file_id: NotRequired[Nullable[str]] + r"""ID of a file uploaded to /v1/files""" + language: NotRequired[Nullable[str]] + r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" + temperature: NotRequired[Nullable[float]] + stream: Literal[True] + timestamp_granularities: NotRequired[List[TimestampGranularity]] + r"""Granularities of timestamps to include in the response.""" + + +class AudioTranscriptionRequestStream(BaseModel): + model: Annotated[str, FieldMetadata(multipart=True)] + + file: Annotated[ + Optional[File], FieldMetadata(multipart=MultipartFormMetadata(file=True)) + ] = None + + file_url: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""Url of a file to be transcribed""" + + file_id: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""ID of a file uploaded to /v1/files""" + + language: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" + + temperature: Annotated[OptionalNullable[float], FieldMetadata(multipart=True)] = ( + UNSET + ) + + STREAM: Annotated[ + Annotated[Optional[Literal[True]], AfterValidator(validate_const(True))], + pydantic.Field(alias="stream"), + FieldMetadata(multipart=True), + ] = True + + timestamp_granularities: Annotated[ + Optional[List[TimestampGranularity]], FieldMetadata(multipart=True) + ] = None + r"""Granularities of timestamps to include in the response.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "file", + "file_url", + "file_id", + "language", + "temperature", + "stream", + "timestamp_granularities", + ] + nullable_fields = ["file_url", "file_id", "language", "temperature"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/basemodelcard.py b/src/mistralai/models/basemodelcard.py index 7423a71..a4a061f 100644 --- a/src/mistralai/models/basemodelcard.py +++ b/src/mistralai/models/basemodelcard.py @@ -12,7 +12,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -Type = Literal["base"] +BaseModelCardType = Literal["base"] class BaseModelCardTypedDict(TypedDict): @@ -28,7 +28,7 @@ class BaseModelCardTypedDict(TypedDict): deprecation: NotRequired[Nullable[datetime]] deprecation_replacement_model: NotRequired[Nullable[str]] default_model_temperature: NotRequired[Nullable[float]] - type: Type + type: BaseModelCardType class BaseModelCard(BaseModel): @@ -57,7 +57,7 @@ class BaseModelCard(BaseModel): default_model_temperature: OptionalNullable[float] = UNSET TYPE: Annotated[ - Annotated[Optional[Type], AfterValidator(validate_const("base"))], + Annotated[Optional[BaseModelCardType], AfterValidator(validate_const("base"))], pydantic.Field(alias="type"), ] = "base" diff --git a/src/mistralai/models/batchjobin.py b/src/mistralai/models/batchjobin.py index 6fcce0f..aa0bb5b 100644 --- a/src/mistralai/models/batchjobin.py +++ b/src/mistralai/models/batchjobin.py @@ -13,7 +13,8 @@ class BatchJobInTypedDict(TypedDict): input_files: List[str] endpoint: APIEndpoint - model: str + model: NotRequired[Nullable[str]] + agent_id: NotRequired[Nullable[str]] metadata: NotRequired[Nullable[Dict[str, str]]] timeout_hours: NotRequired[int] @@ -23,7 +24,9 @@ class BatchJobIn(BaseModel): endpoint: Annotated[APIEndpoint, PlainValidator(validate_open_enum(False))] - model: str + model: OptionalNullable[str] = UNSET + + agent_id: OptionalNullable[str] = UNSET metadata: OptionalNullable[Dict[str, str]] = UNSET @@ -31,8 +34,8 @@ class BatchJobIn(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["metadata", "timeout_hours"] - nullable_fields = ["metadata"] + optional_fields = ["model", "agent_id", "metadata", "timeout_hours"] + nullable_fields = ["model", "agent_id", "metadata"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/batchjobout.py b/src/mistralai/models/batchjobout.py index 2b49057..8830431 100644 --- a/src/mistralai/models/batchjobout.py +++ b/src/mistralai/models/batchjobout.py @@ -16,7 +16,6 @@ class BatchJobOutTypedDict(TypedDict): id: str input_files: List[str] endpoint: str - model: str errors: List[BatchErrorTypedDict] status: BatchJobStatus created_at: int @@ -26,6 +25,8 @@ class BatchJobOutTypedDict(TypedDict): failed_requests: int object: NotRequired[BatchJobOutObject] metadata: NotRequired[Nullable[Dict[str, Any]]] + model: NotRequired[Nullable[str]] + agent_id: NotRequired[Nullable[str]] output_file: NotRequired[Nullable[str]] error_file: NotRequired[Nullable[str]] started_at: NotRequired[Nullable[int]] @@ -39,8 +40,6 @@ class BatchJobOut(BaseModel): endpoint: str - model: str - errors: List[BatchError] status: BatchJobStatus @@ -59,6 +58,10 @@ class BatchJobOut(BaseModel): metadata: OptionalNullable[Dict[str, Any]] = UNSET + model: OptionalNullable[str] = UNSET + + agent_id: OptionalNullable[str] = UNSET + output_file: OptionalNullable[str] = UNSET error_file: OptionalNullable[str] = UNSET @@ -72,6 +75,8 @@ def serialize_model(self, handler): optional_fields = [ "object", "metadata", + "model", + "agent_id", "output_file", "error_file", "started_at", @@ -79,6 +84,8 @@ def serialize_model(self, handler): ] nullable_fields = [ "metadata", + "model", + "agent_id", "output_file", "error_file", "started_at", diff --git a/src/mistralai/models/classifierftmodelout.py b/src/mistralai/models/classifierftmodelout.py index 4143d0e..56ffe96 100644 --- a/src/mistralai/models/classifierftmodelout.py +++ b/src/mistralai/models/classifierftmodelout.py @@ -21,7 +21,9 @@ class ClassifierFTModelOutTypedDict(TypedDict): id: str created: int owned_by: str + workspace_id: str root: str + root_version: str archived: bool capabilities: FTModelCapabilitiesOutTypedDict job: str @@ -41,8 +43,12 @@ class ClassifierFTModelOut(BaseModel): owned_by: str + workspace_id: str + root: str + root_version: str + archived: bool capabilities: FTModelCapabilitiesOut diff --git a/src/mistralai/models/completionargs.py b/src/mistralai/models/completionargs.py index 04e032a..40aa031 100644 --- a/src/mistralai/models/completionargs.py +++ b/src/mistralai/models/completionargs.py @@ -17,7 +17,7 @@ class CompletionArgsTypedDict(TypedDict): stop: NotRequired[Nullable[CompletionArgsStopTypedDict]] presence_penalty: NotRequired[Nullable[float]] frequency_penalty: NotRequired[Nullable[float]] - temperature: NotRequired[float] + temperature: NotRequired[Nullable[float]] top_p: NotRequired[Nullable[float]] max_tokens: NotRequired[Nullable[int]] random_seed: NotRequired[Nullable[int]] @@ -35,7 +35,7 @@ class CompletionArgs(BaseModel): frequency_penalty: OptionalNullable[float] = UNSET - temperature: Optional[float] = 0.3 + temperature: OptionalNullable[float] = UNSET top_p: OptionalNullable[float] = UNSET @@ -67,6 +67,7 @@ def serialize_model(self, handler): "stop", "presence_penalty", "frequency_penalty", + "temperature", "top_p", "max_tokens", "random_seed", diff --git a/src/mistralai/models/completionftmodelout.py b/src/mistralai/models/completionftmodelout.py index ad04d73..ab71168 100644 --- a/src/mistralai/models/completionftmodelout.py +++ b/src/mistralai/models/completionftmodelout.py @@ -20,7 +20,9 @@ class CompletionFTModelOutTypedDict(TypedDict): id: str created: int owned_by: str + workspace_id: str root: str + root_version: str archived: bool capabilities: FTModelCapabilitiesOutTypedDict job: str @@ -39,8 +41,12 @@ class CompletionFTModelOut(BaseModel): owned_by: str + workspace_id: str + root: str + root_version: str + archived: bool capabilities: FTModelCapabilitiesOut diff --git a/src/mistralai/models/contentchunk.py b/src/mistralai/models/contentchunk.py index 4cb8ab6..47170ee 100644 --- a/src/mistralai/models/contentchunk.py +++ b/src/mistralai/models/contentchunk.py @@ -1,11 +1,13 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .audiochunk import AudioChunk, AudioChunkTypedDict from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict from .filechunk import FileChunk, FileChunkTypedDict from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict +from .thinkchunk import ThinkChunk, ThinkChunkTypedDict from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag from typing import Union @@ -19,7 +21,9 @@ ImageURLChunkTypedDict, ReferenceChunkTypedDict, FileChunkTypedDict, + AudioChunkTypedDict, DocumentURLChunkTypedDict, + ThinkChunkTypedDict, ], ) @@ -31,6 +35,8 @@ Annotated[TextChunk, Tag("text")], Annotated[ReferenceChunk, Tag("reference")], Annotated[FileChunk, Tag("file")], + Annotated[ThinkChunk, Tag("thinking")], + Annotated[AudioChunk, Tag("input_audio")], ], Discriminator(lambda m: get_discriminator(m, "type", "type")), ] diff --git a/src/mistralai/models/fileschema.py b/src/mistralai/models/fileschema.py index d687f22..7c7b60c 100644 --- a/src/mistralai/models/fileschema.py +++ b/src/mistralai/models/fileschema.py @@ -27,6 +27,8 @@ class FileSchemaTypedDict(TypedDict): sample_type: SampleType source: Source num_lines: NotRequired[Nullable[int]] + mimetype: NotRequired[Nullable[str]] + signature: NotRequired[Nullable[str]] class FileSchema(BaseModel): @@ -53,10 +55,14 @@ class FileSchema(BaseModel): num_lines: OptionalNullable[int] = UNSET + mimetype: OptionalNullable[str] = UNSET + + signature: OptionalNullable[str] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["num_lines"] - nullable_fields = ["num_lines"] + optional_fields = ["num_lines", "mimetype", "signature"] + nullable_fields = ["num_lines", "mimetype", "signature"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py b/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py index 3926122..c48246d 100644 --- a/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py +++ b/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py @@ -14,6 +14,7 @@ class JobsAPIRoutesBatchGetBatchJobsRequestTypedDict(TypedDict): page: NotRequired[int] page_size: NotRequired[int] model: NotRequired[Nullable[str]] + agent_id: NotRequired[Nullable[str]] metadata: NotRequired[Nullable[Dict[str, Any]]] created_after: NotRequired[Nullable[datetime]] created_by_me: NotRequired[bool] @@ -36,6 +37,11 @@ class JobsAPIRoutesBatchGetBatchJobsRequest(BaseModel): FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = UNSET + agent_id: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + metadata: Annotated[ OptionalNullable[Dict[str, Any]], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), @@ -62,12 +68,13 @@ def serialize_model(self, handler): "page", "page_size", "model", + "agent_id", "metadata", "created_after", "created_by_me", "status", ] - nullable_fields = ["model", "metadata", "created_after", "status"] + nullable_fields = ["model", "agent_id", "metadata", "created_after", "status"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/retrievefileout.py b/src/mistralai/models/retrievefileout.py index e5f9144..7d734b0 100644 --- a/src/mistralai/models/retrievefileout.py +++ b/src/mistralai/models/retrievefileout.py @@ -28,6 +28,8 @@ class RetrieveFileOutTypedDict(TypedDict): source: Source deleted: bool num_lines: NotRequired[Nullable[int]] + mimetype: NotRequired[Nullable[str]] + signature: NotRequired[Nullable[str]] class RetrieveFileOut(BaseModel): @@ -56,10 +58,14 @@ class RetrieveFileOut(BaseModel): num_lines: OptionalNullable[int] = UNSET + mimetype: OptionalNullable[str] = UNSET + + signature: OptionalNullable[str] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["num_lines"] - nullable_fields = ["num_lines"] + optional_fields = ["num_lines", "mimetype", "signature"] + nullable_fields = ["num_lines", "mimetype", "signature"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/thinkchunk.py b/src/mistralai/models/thinkchunk.py new file mode 100644 index 0000000..24b466f --- /dev/null +++ b/src/mistralai/models/thinkchunk.py @@ -0,0 +1,35 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from mistralai.types import BaseModel +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ThinkingTypedDict = TypeAliasType( + "ThinkingTypedDict", Union[ReferenceChunkTypedDict, TextChunkTypedDict] +) + + +Thinking = TypeAliasType("Thinking", Union[ReferenceChunk, TextChunk]) + + +ThinkChunkType = Literal["thinking"] + + +class ThinkChunkTypedDict(TypedDict): + thinking: List[ThinkingTypedDict] + closed: NotRequired[bool] + r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" + type: NotRequired[ThinkChunkType] + + +class ThinkChunk(BaseModel): + thinking: List[Thinking] + + closed: Optional[bool] = None + r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" + + type: Optional[ThinkChunkType] = "thinking" diff --git a/src/mistralai/models/timestampgranularity.py b/src/mistralai/models/timestampgranularity.py new file mode 100644 index 0000000..dd1b644 --- /dev/null +++ b/src/mistralai/models/timestampgranularity.py @@ -0,0 +1,7 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +TimestampGranularity = Literal["segment"] diff --git a/src/mistralai/models/toolcall.py b/src/mistralai/models/toolcall.py index 92dbb4a..7d3a3c6 100644 --- a/src/mistralai/models/toolcall.py +++ b/src/mistralai/models/toolcall.py @@ -3,18 +3,28 @@ from __future__ import annotations from .functioncall import FunctionCall, FunctionCallTypedDict from .tooltypes import ToolTypes -from mistralai.types import BaseModel +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import validate_open_enum +from pydantic import model_serializer from pydantic.functional_validators import PlainValidator from typing import Optional from typing_extensions import Annotated, NotRequired, TypedDict +class MetadataTypedDict(TypedDict): + pass + + +class Metadata(BaseModel): + pass + + class ToolCallTypedDict(TypedDict): function: FunctionCallTypedDict id: NotRequired[str] type: NotRequired[ToolTypes] index: NotRequired[int] + metadata: NotRequired[Nullable[MetadataTypedDict]] class ToolCall(BaseModel): @@ -27,3 +37,35 @@ class ToolCall(BaseModel): ) index: Optional[int] = 0 + + metadata: OptionalNullable[Metadata] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["id", "type", "index", "metadata"] + nullable_fields = ["metadata"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/transcriptionresponse.py b/src/mistralai/models/transcriptionresponse.py new file mode 100644 index 0000000..54a98a5 --- /dev/null +++ b/src/mistralai/models/transcriptionresponse.py @@ -0,0 +1,79 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .transcriptionsegmentchunk import ( + TranscriptionSegmentChunk, + TranscriptionSegmentChunkTypedDict, +) +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict, List, Optional +from typing_extensions import NotRequired, TypedDict + + +class TranscriptionResponseTypedDict(TypedDict): + model: str + text: str + usage: UsageInfoTypedDict + language: Nullable[str] + segments: NotRequired[List[TranscriptionSegmentChunkTypedDict]] + + +class TranscriptionResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + model: str + + text: str + + usage: UsageInfo + + language: Nullable[str] + + segments: Optional[List[TranscriptionSegmentChunk]] = None + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["segments"] + nullable_fields = ["language"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/src/mistralai/models/transcriptionsegmentchunk.py b/src/mistralai/models/transcriptionsegmentchunk.py new file mode 100644 index 0000000..53f1b39 --- /dev/null +++ b/src/mistralai/models/transcriptionsegmentchunk.py @@ -0,0 +1,41 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +import pydantic +from pydantic import ConfigDict +from typing import Any, Dict, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +Type = Literal["transcription_segment"] + + +class TranscriptionSegmentChunkTypedDict(TypedDict): + text: str + start: float + end: float + type: NotRequired[Type] + + +class TranscriptionSegmentChunk(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + text: str + + start: float + + end: float + + type: Optional[Type] = "transcription_segment" + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] diff --git a/src/mistralai/models/transcriptionstreamdone.py b/src/mistralai/models/transcriptionstreamdone.py new file mode 100644 index 0000000..ffd0e08 --- /dev/null +++ b/src/mistralai/models/transcriptionstreamdone.py @@ -0,0 +1,85 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .transcriptionsegmentchunk import ( + TranscriptionSegmentChunk, + TranscriptionSegmentChunkTypedDict, +) +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict, List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +TranscriptionStreamDoneType = Literal["transcription.done"] + + +class TranscriptionStreamDoneTypedDict(TypedDict): + model: str + text: str + usage: UsageInfoTypedDict + language: Nullable[str] + segments: NotRequired[List[TranscriptionSegmentChunkTypedDict]] + type: NotRequired[TranscriptionStreamDoneType] + + +class TranscriptionStreamDone(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + model: str + + text: str + + usage: UsageInfo + + language: Nullable[str] + + segments: Optional[List[TranscriptionSegmentChunk]] = None + + type: Optional[TranscriptionStreamDoneType] = "transcription.done" + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["segments", "type"] + nullable_fields = ["language"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/src/mistralai/models/transcriptionstreamevents.py b/src/mistralai/models/transcriptionstreamevents.py new file mode 100644 index 0000000..8207c03 --- /dev/null +++ b/src/mistralai/models/transcriptionstreamevents.py @@ -0,0 +1,58 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .transcriptionstreamdone import ( + TranscriptionStreamDone, + TranscriptionStreamDoneTypedDict, +) +from .transcriptionstreameventtypes import TranscriptionStreamEventTypes +from .transcriptionstreamlanguage import ( + TranscriptionStreamLanguage, + TranscriptionStreamLanguageTypedDict, +) +from .transcriptionstreamsegmentdelta import ( + TranscriptionStreamSegmentDelta, + TranscriptionStreamSegmentDeltaTypedDict, +) +from .transcriptionstreamtextdelta import ( + TranscriptionStreamTextDelta, + TranscriptionStreamTextDeltaTypedDict, +) +from mistralai.types import BaseModel +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +TranscriptionStreamEventsDataTypedDict = TypeAliasType( + "TranscriptionStreamEventsDataTypedDict", + Union[ + TranscriptionStreamTextDeltaTypedDict, + TranscriptionStreamLanguageTypedDict, + TranscriptionStreamSegmentDeltaTypedDict, + TranscriptionStreamDoneTypedDict, + ], +) + + +TranscriptionStreamEventsData = Annotated[ + Union[ + Annotated[TranscriptionStreamDone, Tag("transcription.done")], + Annotated[TranscriptionStreamLanguage, Tag("transcription.language")], + Annotated[TranscriptionStreamSegmentDelta, Tag("transcription.segment")], + Annotated[TranscriptionStreamTextDelta, Tag("transcription.text.delta")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +class TranscriptionStreamEventsTypedDict(TypedDict): + event: TranscriptionStreamEventTypes + data: TranscriptionStreamEventsDataTypedDict + + +class TranscriptionStreamEvents(BaseModel): + event: TranscriptionStreamEventTypes + + data: TranscriptionStreamEventsData diff --git a/src/mistralai/models/transcriptionstreameventtypes.py b/src/mistralai/models/transcriptionstreameventtypes.py new file mode 100644 index 0000000..4a910f0 --- /dev/null +++ b/src/mistralai/models/transcriptionstreameventtypes.py @@ -0,0 +1,12 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +TranscriptionStreamEventTypes = Literal[ + "transcription.language", + "transcription.segment", + "transcription.text.delta", + "transcription.done", +] diff --git a/src/mistralai/models/transcriptionstreamlanguage.py b/src/mistralai/models/transcriptionstreamlanguage.py new file mode 100644 index 0000000..8fc2aa6 --- /dev/null +++ b/src/mistralai/models/transcriptionstreamlanguage.py @@ -0,0 +1,35 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +import pydantic +from pydantic import ConfigDict +from typing import Any, Dict, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +TranscriptionStreamLanguageType = Literal["transcription.language"] + + +class TranscriptionStreamLanguageTypedDict(TypedDict): + audio_language: str + type: NotRequired[TranscriptionStreamLanguageType] + + +class TranscriptionStreamLanguage(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + audio_language: str + + type: Optional[TranscriptionStreamLanguageType] = "transcription.language" + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] diff --git a/src/mistralai/models/transcriptionstreamsegmentdelta.py b/src/mistralai/models/transcriptionstreamsegmentdelta.py new file mode 100644 index 0000000..61b396b --- /dev/null +++ b/src/mistralai/models/transcriptionstreamsegmentdelta.py @@ -0,0 +1,41 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +import pydantic +from pydantic import ConfigDict +from typing import Any, Dict, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +TranscriptionStreamSegmentDeltaType = Literal["transcription.segment"] + + +class TranscriptionStreamSegmentDeltaTypedDict(TypedDict): + text: str + start: float + end: float + type: NotRequired[TranscriptionStreamSegmentDeltaType] + + +class TranscriptionStreamSegmentDelta(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + text: str + + start: float + + end: float + + type: Optional[TranscriptionStreamSegmentDeltaType] = "transcription.segment" + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] diff --git a/src/mistralai/models/transcriptionstreamtextdelta.py b/src/mistralai/models/transcriptionstreamtextdelta.py new file mode 100644 index 0000000..8f0b0e5 --- /dev/null +++ b/src/mistralai/models/transcriptionstreamtextdelta.py @@ -0,0 +1,35 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +import pydantic +from pydantic import ConfigDict +from typing import Any, Dict, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +TranscriptionStreamTextDeltaType = Literal["transcription.text.delta"] + + +class TranscriptionStreamTextDeltaTypedDict(TypedDict): + text: str + type: NotRequired[TranscriptionStreamTextDeltaType] + + +class TranscriptionStreamTextDelta(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + text: str + + type: Optional[TranscriptionStreamTextDeltaType] = "transcription.text.delta" + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] diff --git a/src/mistralai/models/uploadfileout.py b/src/mistralai/models/uploadfileout.py index 3a8b733..8f9f106 100644 --- a/src/mistralai/models/uploadfileout.py +++ b/src/mistralai/models/uploadfileout.py @@ -27,6 +27,8 @@ class UploadFileOutTypedDict(TypedDict): sample_type: SampleType source: Source num_lines: NotRequired[Nullable[int]] + mimetype: NotRequired[Nullable[str]] + signature: NotRequired[Nullable[str]] class UploadFileOut(BaseModel): @@ -53,10 +55,14 @@ class UploadFileOut(BaseModel): num_lines: OptionalNullable[int] = UNSET + mimetype: OptionalNullable[str] = UNSET + + signature: OptionalNullable[str] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["num_lines"] - nullable_fields = ["num_lines"] + optional_fields = ["num_lines", "mimetype", "signature"] + nullable_fields = ["num_lines", "mimetype", "signature"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/usageinfo.py b/src/mistralai/models/usageinfo.py index f7a6e99..cedad5c 100644 --- a/src/mistralai/models/usageinfo.py +++ b/src/mistralai/models/usageinfo.py @@ -1,19 +1,76 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai.types import BaseModel -from typing_extensions import TypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict class UsageInfoTypedDict(TypedDict): - prompt_tokens: int - completion_tokens: int - total_tokens: int + prompt_tokens: NotRequired[int] + completion_tokens: NotRequired[int] + total_tokens: NotRequired[int] + prompt_audio_seconds: NotRequired[Nullable[int]] class UsageInfo(BaseModel): - prompt_tokens: int + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) - completion_tokens: int + prompt_tokens: Optional[int] = 0 - total_tokens: int + completion_tokens: Optional[int] = 0 + + total_tokens: Optional[int] = 0 + + prompt_audio_seconds: OptionalNullable[int] = UNSET + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "prompt_audio_seconds", + ] + nullable_fields = ["prompt_audio_seconds"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/src/mistralai/sdk.py b/src/mistralai/sdk.py index 352c16a..23d31cc 100644 --- a/src/mistralai/sdk.py +++ b/src/mistralai/sdk.py @@ -15,6 +15,7 @@ if TYPE_CHECKING: from mistralai.agents import Agents + from mistralai.audio import Audio from mistralai.batch import Batch from mistralai.beta import Beta from mistralai.chat import Chat @@ -49,6 +50,7 @@ class Mistral(BaseSDK): r"""Classifiers API.""" ocr: "Ocr" r"""OCR API""" + audio: "Audio" _sub_sdk_map = { "models": ("mistralai.models_", "Models"), "beta": ("mistralai.beta", "Beta"), @@ -61,6 +63,7 @@ class Mistral(BaseSDK): "embeddings": ("mistralai.embeddings", "Embeddings"), "classifiers": ("mistralai.classifiers", "Classifiers"), "ocr": ("mistralai.ocr", "Ocr"), + "audio": ("mistralai.audio", "Audio"), } def __init__( diff --git a/src/mistralai/transcriptions.py b/src/mistralai/transcriptions.py new file mode 100644 index 0000000..24975cb --- /dev/null +++ b/src/mistralai/transcriptions.py @@ -0,0 +1,480 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai import models, utils +from mistralai._hooks import HookContext +from mistralai.types import OptionalNullable, UNSET +from mistralai.utils import eventstreaming, get_security_from_env +from typing import List, Mapping, Optional, Union + + +class Transcriptions(BaseSDK): + r"""API for audio transcription.""" + + def complete( + self, + *, + model: str, + file: Optional[Union[models.File, models.FileTypedDict]] = None, + file_url: OptionalNullable[str] = UNSET, + file_id: OptionalNullable[str] = UNSET, + language: OptionalNullable[str] = UNSET, + temperature: OptionalNullable[float] = UNSET, + timestamp_granularities: Optional[List[models.TimestampGranularity]] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.TranscriptionResponse: + r"""Create Transcription + + :param model: + :param file: + :param file_url: Url of a file to be transcribed + :param file_id: ID of a file uploaded to /v1/files + :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. + :param temperature: + :param timestamp_granularities: Granularities of timestamps to include in the response. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AudioTranscriptionRequest( + model=model, + file=utils.get_pydantic_model(file, Optional[models.File]), + file_url=file_url, + file_id=file_id, + language=language, + temperature=temperature, + timestamp_granularities=timestamp_granularities, + ) + + req = self._build_request( + method="POST", + path="/v1/audio/transcriptions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "multipart", models.AudioTranscriptionRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="audio_api_v1_transcriptions_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.TranscriptionResponse) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def complete_async( + self, + *, + model: str, + file: Optional[Union[models.File, models.FileTypedDict]] = None, + file_url: OptionalNullable[str] = UNSET, + file_id: OptionalNullable[str] = UNSET, + language: OptionalNullable[str] = UNSET, + temperature: OptionalNullable[float] = UNSET, + timestamp_granularities: Optional[List[models.TimestampGranularity]] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.TranscriptionResponse: + r"""Create Transcription + + :param model: + :param file: + :param file_url: Url of a file to be transcribed + :param file_id: ID of a file uploaded to /v1/files + :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. + :param temperature: + :param timestamp_granularities: Granularities of timestamps to include in the response. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AudioTranscriptionRequest( + model=model, + file=utils.get_pydantic_model(file, Optional[models.File]), + file_url=file_url, + file_id=file_id, + language=language, + temperature=temperature, + timestamp_granularities=timestamp_granularities, + ) + + req = self._build_request_async( + method="POST", + path="/v1/audio/transcriptions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "multipart", models.AudioTranscriptionRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="audio_api_v1_transcriptions_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.TranscriptionResponse) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def stream( + self, + *, + model: str, + file: Optional[Union[models.File, models.FileTypedDict]] = None, + file_url: OptionalNullable[str] = UNSET, + file_id: OptionalNullable[str] = UNSET, + language: OptionalNullable[str] = UNSET, + temperature: OptionalNullable[float] = UNSET, + timestamp_granularities: Optional[List[models.TimestampGranularity]] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.TranscriptionStreamEvents]: + r"""Create streaming transcription (SSE) + + :param model: + :param file: + :param file_url: Url of a file to be transcribed + :param file_id: ID of a file uploaded to /v1/files + :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. + :param temperature: + :param timestamp_granularities: Granularities of timestamps to include in the response. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AudioTranscriptionRequestStream( + model=model, + file=utils.get_pydantic_model(file, Optional[models.File]), + file_url=file_url, + file_id=file_id, + language=language, + temperature=temperature, + timestamp_granularities=timestamp_granularities, + ) + + req = self._build_request( + method="POST", + path="/v1/audio/transcriptions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, + False, + False, + "multipart", + models.AudioTranscriptionRequestStream, + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="audio_api_v1_transcriptions_post_stream", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.TranscriptionStreamEvents), + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def stream_async( + self, + *, + model: str, + file: Optional[Union[models.File, models.FileTypedDict]] = None, + file_url: OptionalNullable[str] = UNSET, + file_id: OptionalNullable[str] = UNSET, + language: OptionalNullable[str] = UNSET, + temperature: OptionalNullable[float] = UNSET, + timestamp_granularities: Optional[List[models.TimestampGranularity]] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.TranscriptionStreamEvents]: + r"""Create streaming transcription (SSE) + + :param model: + :param file: + :param file_url: Url of a file to be transcribed + :param file_id: ID of a file uploaded to /v1/files + :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. + :param temperature: + :param timestamp_granularities: Granularities of timestamps to include in the response. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AudioTranscriptionRequestStream( + model=model, + file=utils.get_pydantic_model(file, Optional[models.File]), + file_url=file_url, + file_id=file_id, + language=language, + temperature=temperature, + timestamp_granularities=timestamp_granularities, + ) + + req = self._build_request_async( + method="POST", + path="/v1/audio/transcriptions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, + False, + False, + "multipart", + models.AudioTranscriptionRequestStream, + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="audio_api_v1_transcriptions_post_stream", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.TranscriptionStreamEvents), + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + )