From 811f17a5a4f8dfdf716fbdd1bef9ecf0a97e5ada Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Thu, 6 Mar 2025 16:40:11 +0000 Subject: [PATCH 1/5] ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.477.0 --- .speakeasy/gen.lock | 45 +++- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 13 +- README.md | 4 + RELEASES.md | 12 +- docs/models/assistantmessage.md | 12 +- docs/models/chatclassificationrequest.md | 9 - docs/models/chatcompletionrequest.md | 2 +- docs/models/chatcompletionstreamrequest.md | 2 +- docs/models/chatmoderationrequest.md | 10 + ...puts.md => chatmoderationrequestinputs.md} | 2 +- docs/models/classificationrequest.md | 4 +- docs/models/contentchunk.md | 6 + docs/models/document.md | 19 ++ docs/models/documenturlchunk.md | 10 + docs/models/embeddingrequest.md | 3 +- docs/models/fimcompletionrequest.md | 2 +- docs/models/fimcompletionstreamrequest.md | 2 +- docs/models/ocrimageobject.md | 13 + docs/models/ocrpagedimensions.md | 10 + docs/models/ocrpageobject.md | 11 + docs/models/ocrrequest.md | 14 ++ docs/models/ocrresponse.md | 10 + docs/models/ocrusageinfo.md | 9 + docs/models/prediction.md | 8 +- docs/models/predictiontype.md | 8 - docs/sdks/chat/README.md | 4 +- docs/sdks/classifiers/README.md | 19 +- docs/sdks/embeddings/README.md | 1 - docs/sdks/fim/README.md | 4 +- docs/sdks/ocr/README.md | 58 +++++ poetry.lock | 12 +- pyproject.toml | 2 +- src/mistralai/_version.py | 4 +- src/mistralai/chat.py | 10 +- src/mistralai/classifiers.py | 52 ++-- src/mistralai/embeddings.py | 10 +- src/mistralai/fim.py | 10 +- src/mistralai/models/__init__.py | 54 ++-- src/mistralai/models/assistantmessage.py | 2 + .../models/chatclassificationrequest.py | 113 --------- src/mistralai/models/chatcompletionrequest.py | 13 +- .../models/chatcompletionstreamrequest.py | 13 +- src/mistralai/models/chatmoderationrequest.py | 86 +++++++ src/mistralai/models/classificationrequest.py | 43 +--- src/mistralai/models/contentchunk.py | 9 +- src/mistralai/models/documenturlchunk.py | 62 +++++ src/mistralai/models/embeddingrequest.py | 38 +-- src/mistralai/models/fimcompletionrequest.py | 5 +- .../models/fimcompletionstreamrequest.py | 5 +- src/mistralai/models/ocrimageobject.py | 77 ++++++ src/mistralai/models/ocrpagedimensions.py | 25 ++ src/mistralai/models/ocrpageobject.py | 64 +++++ src/mistralai/models/ocrrequest.py | 97 +++++++ src/mistralai/models/ocrresponse.py | 26 ++ src/mistralai/models/ocrusageinfo.py | 51 ++++ src/mistralai/models/prediction.py | 9 +- src/mistralai/ocr.py | 238 ++++++++++++++++++ src/mistralai/sdk.py | 4 + 59 files changed, 1108 insertions(+), 354 deletions(-) delete mode 100644 docs/models/chatclassificationrequest.md create mode 100644 docs/models/chatmoderationrequest.md rename docs/models/{chatclassificationrequestinputs.md => chatmoderationrequestinputs.md} (86%) create mode 100644 docs/models/document.md create mode 100644 docs/models/documenturlchunk.md create mode 100644 docs/models/ocrimageobject.md create mode 100644 docs/models/ocrpagedimensions.md create mode 100644 docs/models/ocrpageobject.md create mode 100644 docs/models/ocrrequest.md create mode 100644 docs/models/ocrresponse.md create mode 100644 docs/models/ocrusageinfo.md delete mode 100644 docs/models/predictiontype.md create mode 100644 docs/sdks/ocr/README.md delete mode 100644 src/mistralai/models/chatclassificationrequest.py create mode 100644 src/mistralai/models/chatmoderationrequest.py create mode 100644 src/mistralai/models/documenturlchunk.py create mode 100644 src/mistralai/models/ocrimageobject.py create mode 100644 src/mistralai/models/ocrpagedimensions.py create mode 100644 src/mistralai/models/ocrpageobject.py create mode 100644 src/mistralai/models/ocrrequest.py create mode 100644 src/mistralai/models/ocrresponse.py create mode 100644 src/mistralai/models/ocrusageinfo.py create mode 100644 src/mistralai/ocr.py diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index ac3e0111..6eb1248e 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 553c31591e8dc33a58cb75f348c3aa72 + docChecksum: 81cc8be96362e2f1cb145b08a2e6c4fa docVersion: 0.0.2 speakeasyVersion: 1.477.0 generationVersion: 2.497.0 - releaseVersion: 1.5.0 - configChecksum: 9a5649c5c372dc5fd2fde38a0faee40e + releaseVersion: 1.5.1 + configChecksum: ef3439d915c5d16e7cfb88fe2bf94907 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -68,8 +68,6 @@ generatedFiles: - docs/models/batchjobsout.md - docs/models/batchjobsoutobject.md - docs/models/batchjobstatus.md - - docs/models/chatclassificationrequest.md - - docs/models/chatclassificationrequestinputs.md - docs/models/chatcompletionchoice.md - docs/models/chatcompletionrequest.md - docs/models/chatcompletionrequesttoolchoice.md @@ -78,6 +76,8 @@ generatedFiles: - docs/models/chatcompletionstreamrequestmessages.md - docs/models/chatcompletionstreamrequeststop.md - docs/models/chatcompletionstreamrequesttoolchoice.md + - docs/models/chatmoderationrequest.md + - docs/models/chatmoderationrequestinputs.md - docs/models/checkpointout.md - docs/models/classificationobject.md - docs/models/classificationrequest.md @@ -99,6 +99,8 @@ generatedFiles: - docs/models/detailedjoboutobject.md - docs/models/detailedjoboutrepositories.md - docs/models/detailedjoboutstatus.md + - docs/models/document.md + - docs/models/documenturlchunk.md - docs/models/embeddingrequest.md - docs/models/embeddingresponse.md - docs/models/embeddingresponsedata.md @@ -166,9 +168,14 @@ generatedFiles: - docs/models/modelcapabilities.md - docs/models/modellist.md - docs/models/object.md + - docs/models/ocrimageobject.md + - docs/models/ocrpagedimensions.md + - docs/models/ocrpageobject.md + - docs/models/ocrrequest.md + - docs/models/ocrresponse.md + - docs/models/ocrusageinfo.md - docs/models/one.md - docs/models/prediction.md - - docs/models/predictiontype.md - docs/models/queryparamstatus.md - docs/models/referencechunk.md - docs/models/referencechunktype.md @@ -227,6 +234,7 @@ generatedFiles: - docs/sdks/mistral/README.md - docs/sdks/mistraljobs/README.md - docs/sdks/models/README.md + - docs/sdks/ocr/README.md - poetry.toml - py.typed - pylintrc @@ -261,11 +269,11 @@ generatedFiles: - src/mistralai/models/batchjobout.py - src/mistralai/models/batchjobsout.py - src/mistralai/models/batchjobstatus.py - - src/mistralai/models/chatclassificationrequest.py - src/mistralai/models/chatcompletionchoice.py - src/mistralai/models/chatcompletionrequest.py - src/mistralai/models/chatcompletionresponse.py - src/mistralai/models/chatcompletionstreamrequest.py + - src/mistralai/models/chatmoderationrequest.py - src/mistralai/models/checkpointout.py - src/mistralai/models/classificationobject.py - src/mistralai/models/classificationrequest.py @@ -279,6 +287,7 @@ generatedFiles: - src/mistralai/models/deletemodelout.py - src/mistralai/models/deltamessage.py - src/mistralai/models/detailedjobout.py + - src/mistralai/models/documenturlchunk.py - src/mistralai/models/embeddingrequest.py - src/mistralai/models/embeddingresponse.py - src/mistralai/models/embeddingresponsedata.py @@ -327,6 +336,12 @@ generatedFiles: - src/mistralai/models/metricout.py - src/mistralai/models/modelcapabilities.py - src/mistralai/models/modellist.py + - src/mistralai/models/ocrimageobject.py + - src/mistralai/models/ocrpagedimensions.py + - src/mistralai/models/ocrpageobject.py + - src/mistralai/models/ocrrequest.py + - src/mistralai/models/ocrresponse.py + - src/mistralai/models/ocrusageinfo.py - src/mistralai/models/prediction.py - src/mistralai/models/referencechunk.py - src/mistralai/models/responseformat.py @@ -357,6 +372,7 @@ generatedFiles: - src/mistralai/models/wandbintegration.py - src/mistralai/models/wandbintegrationout.py - src/mistralai/models_.py + - src/mistralai/ocr.py - src/mistralai/py.typed - src/mistralai/sdk.py - src/mistralai/sdkconfiguration.py @@ -607,7 +623,7 @@ examples: embeddings_v1_embeddings_post: speakeasy-default-embeddings-v1-embeddings-post: requestBody: - application/json: {"input": ["Embed this sentence.", "As well as this one."], "model": "mistral-embed"} + application/json: {"model": "mistral-embed", "input": ["Embed this sentence.", "As well as this one."]} responses: "200": application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "data": [{"object": "embedding", "embedding": [0.1, 0.2, 0.3], "index": 0}, {"object": "embedding", "embedding": [0.4, 0.5, 0.6], "index": 1}]} @@ -616,7 +632,7 @@ examples: moderations_v1_moderations_post: speakeasy-default-moderations-v1-moderations-post: requestBody: - application/json: {"input": [""]} + application/json: {"model": "V90", "input": [""]} responses: "200": application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef"} @@ -625,11 +641,20 @@ examples: moderations_chat_v1_chat_moderations_post: speakeasy-default-moderations-chat-v1-chat-moderations-post: requestBody: - application/json: {"input": [[{"content": "", "role": "tool"}, {"content": "", "role": "tool"}, {"content": "", "role": "tool"}], [{"prefix": false, "role": "assistant"}, {"content": "", "role": "user"}, {"prefix": false, "role": "assistant"}]], "model": "Roadster"} + application/json: {"model": "Roadster", "input": [[{"content": "", "role": "tool"}, {"content": "", "role": "tool"}, {"content": "", "role": "tool"}], [{"prefix": false, "role": "assistant"}, {"content": "", "role": "user"}, {"prefix": false, "role": "assistant"}]], "truncate_for_context_length": false} responses: "200": application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef"} "422": application/json: {} + ocr_v1_ocr_post: + speakeasy-default-ocr-v1-ocr-post: + requestBody: + application/json: {"model": "Focus", "document": {"document_url": "https://dutiful-horst.org"}} + responses: + "200": + application/json: {"pages": [], "model": "A4", "usage_info": {"pages_processed": 442675}} + "422": + application/json: {} examplesVersion: 1.0.0 generatedTests: {} diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 069ce07d..f020895b 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -13,7 +13,7 @@ generation: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.5.0 + version: 1.5.1 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index ea74f7d9..21228dc5 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -14,10 +14,11 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:af4a2854e017abc0ec9e4b557186611dcd69468d82d5ac7f81bfbe49165fc18d - sourceBlobDigest: sha256:9f1bbc418fba3c7b5031bacdf9d431aff476fb4b2aa3838ed50fb3922563703c + sourceRevisionDigest: sha256:bdfe3bd4e867529e1821e0f195c2d5832083f7699315f4a42d6b5551bd7847a3 + sourceBlobDigest: sha256:7e8a475b75404d724fc7936bd6f585b8e5226d3dca00ab4b69807b53fb63151b tags: - latest + - speakeasy-sdk-regen-1741279153 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -36,13 +37,13 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:af4a2854e017abc0ec9e4b557186611dcd69468d82d5ac7f81bfbe49165fc18d - sourceBlobDigest: sha256:9f1bbc418fba3c7b5031bacdf9d431aff476fb4b2aa3838ed50fb3922563703c + sourceRevisionDigest: sha256:bdfe3bd4e867529e1821e0f195c2d5832083f7699315f4a42d6b5551bd7847a3 + sourceBlobDigest: sha256:7e8a475b75404d724fc7936bd6f585b8e5226d3dca00ab4b69807b53fb63151b codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:cbf9b277d16c47816fc5d63b4c69cf0fbd1fe99d424c34ab465d2b61fcc6e5e8 + codeSamplesRevisionDigest: sha256:ba10be893f3e6dae275eb8fb09a688f3652de81eebd314427f28c274800edc48 workflow: workflowVersion: 1.0.0 - speakeasyVersion: latest + speakeasyVersion: 1.477.0 sources: mistral-azure-source: inputs: diff --git a/README.md b/README.md index 617c6071..fd31bcd8 100644 --- a/README.md +++ b/README.md @@ -442,6 +442,10 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [archive](docs/sdks/models/README.md#archive) - Archive Fine Tuned Model * [unarchive](docs/sdks/models/README.md#unarchive) - Unarchive Fine Tuned Model +### [ocr](docs/sdks/ocr/README.md) + +* [process](docs/sdks/ocr/README.md#process) - OCR + diff --git a/RELEASES.md b/RELEASES.md index cc8c6c29..d7b657bb 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -158,4 +158,14 @@ Based on: ### Generated - [python v1.5.0] . ### Releases -- [PyPI v1.5.0] https://pypi.org/project/mistralai/1.5.0 - . \ No newline at end of file +- [PyPI v1.5.0] https://pypi.org/project/mistralai/1.5.0 - . + +## 2025-03-06 16:38:57 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.477.0 (2.497.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.5.1] . +### Releases +- [PyPI v1.5.1] https://pypi.org/project/mistralai/1.5.1 - . \ No newline at end of file diff --git a/docs/models/assistantmessage.md b/docs/models/assistantmessage.md index 53f1cc76..3d0bd90b 100644 --- a/docs/models/assistantmessage.md +++ b/docs/models/assistantmessage.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | -| `prefix` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | +| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/chatclassificationrequest.md b/docs/models/chatclassificationrequest.md deleted file mode 100644 index 990408b1..00000000 --- a/docs/models/chatclassificationrequest.md +++ /dev/null @@ -1,9 +0,0 @@ -# ChatClassificationRequest - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -| `inputs` | [models.ChatClassificationRequestInputs](../models/chatclassificationrequestinputs.md) | :heavy_check_mark: | Chat to classify | -| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/chatcompletionrequest.md b/docs/models/chatcompletionrequest.md index ac743583..714f4f5a 100644 --- a/docs/models/chatcompletionrequest.md +++ b/docs/models/chatcompletionrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | | `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | diff --git a/docs/models/chatcompletionstreamrequest.md b/docs/models/chatcompletionstreamrequest.md index 8ca0f21f..378ccd41 100644 --- a/docs/models/chatcompletionstreamrequest.md +++ b/docs/models/chatcompletionstreamrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | | `messages` | List[[models.ChatCompletionStreamRequestMessages](../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | diff --git a/docs/models/chatmoderationrequest.md b/docs/models/chatmoderationrequest.md new file mode 100644 index 00000000..2b8f46cb --- /dev/null +++ b/docs/models/chatmoderationrequest.md @@ -0,0 +1,10 @@ +# ChatModerationRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `model` | *str* | :heavy_check_mark: | N/A | +| `inputs` | [models.ChatModerationRequestInputs](../models/chatmoderationrequestinputs.md) | :heavy_check_mark: | Chat to classify | +| `truncate_for_context_length` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/chatclassificationrequestinputs.md b/docs/models/chatmoderationrequestinputs.md similarity index 86% rename from docs/models/chatclassificationrequestinputs.md rename to docs/models/chatmoderationrequestinputs.md index 290c9ad2..cf775d60 100644 --- a/docs/models/chatclassificationrequestinputs.md +++ b/docs/models/chatmoderationrequestinputs.md @@ -1,4 +1,4 @@ -# ChatClassificationRequestInputs +# ChatModerationRequestInputs Chat to classify diff --git a/docs/models/classificationrequest.md b/docs/models/classificationrequest.md index e1556684..b9befc89 100644 --- a/docs/models/classificationrequest.md +++ b/docs/models/classificationrequest.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `inputs` | [models.ClassificationRequestInputs](../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | +| `inputs` | [models.ClassificationRequestInputs](../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | \ No newline at end of file diff --git a/docs/models/contentchunk.md b/docs/models/contentchunk.md index 22023e8b..8cf7fad1 100644 --- a/docs/models/contentchunk.md +++ b/docs/models/contentchunk.md @@ -9,6 +9,12 @@ value: models.ImageURLChunk = /* values here */ ``` +### `models.DocumentURLChunk` + +```python +value: models.DocumentURLChunk = /* values here */ +``` + ### `models.TextChunk` ```python diff --git a/docs/models/document.md b/docs/models/document.md new file mode 100644 index 00000000..e2940355 --- /dev/null +++ b/docs/models/document.md @@ -0,0 +1,19 @@ +# Document + +Document to run OCR on + + +## Supported Types + +### `models.DocumentURLChunk` + +```python +value: models.DocumentURLChunk = /* values here */ +``` + +### `models.ImageURLChunk` + +```python +value: models.ImageURLChunk = /* values here */ +``` + diff --git a/docs/models/documenturlchunk.md b/docs/models/documenturlchunk.md new file mode 100644 index 00000000..33785c34 --- /dev/null +++ b/docs/models/documenturlchunk.md @@ -0,0 +1,10 @@ +# DocumentURLChunk + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------- | ----------------------------------- | ----------------------------------- | ----------------------------------- | +| `document_url` | *str* | :heavy_check_mark: | N/A | +| `type` | *Optional[Literal["document_url"]]* | :heavy_minus_sign: | N/A | +| `document_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The filename of the document | \ No newline at end of file diff --git a/docs/models/embeddingrequest.md b/docs/models/embeddingrequest.md index 4d215c7b..07ab903a 100644 --- a/docs/models/embeddingrequest.md +++ b/docs/models/embeddingrequest.md @@ -6,5 +6,4 @@ | Field | Type | Required | Description | Example | | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | | `inputs` | [models.Inputs](../models/inputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | -| `model` | *Optional[str]* | :heavy_minus_sign: | ID of the model to use. | | -| `encoding_format` | *OptionalNullable[str]* | :heavy_minus_sign: | The format to return the embeddings in. | | \ No newline at end of file +| `model` | *Optional[str]* | :heavy_minus_sign: | ID of the model to use. | | \ No newline at end of file diff --git a/docs/models/fimcompletionrequest.md b/docs/models/fimcompletionrequest.md index 236d2d21..7507b90c 100644 --- a/docs/models/fimcompletionrequest.md +++ b/docs/models/fimcompletionrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | | `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | diff --git a/docs/models/fimcompletionstreamrequest.md b/docs/models/fimcompletionstreamrequest.md index fa635932..6cc439c7 100644 --- a/docs/models/fimcompletionstreamrequest.md +++ b/docs/models/fimcompletionstreamrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | | `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | diff --git a/docs/models/ocrimageobject.md b/docs/models/ocrimageobject.md new file mode 100644 index 00000000..273cfa9a --- /dev/null +++ b/docs/models/ocrimageobject.md @@ -0,0 +1,13 @@ +# OCRImageObject + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | Image ID for extracted image in a page | +| `top_left_x` | *Nullable[int]* | :heavy_check_mark: | X coordinate of top-left corner of the extracted image | +| `top_left_y` | *Nullable[int]* | :heavy_check_mark: | Y coordinate of top-left corner of the extracted image | +| `bottom_right_x` | *Nullable[int]* | :heavy_check_mark: | X coordinate of bottom-right corner of the extracted image | +| `bottom_right_y` | *Nullable[int]* | :heavy_check_mark: | Y coordinate of bottom-right corner of the extracted image | +| `image_base64` | *OptionalNullable[str]* | :heavy_minus_sign: | Base64 string of the extracted image | \ No newline at end of file diff --git a/docs/models/ocrpagedimensions.md b/docs/models/ocrpagedimensions.md new file mode 100644 index 00000000..c93ca64d --- /dev/null +++ b/docs/models/ocrpagedimensions.md @@ -0,0 +1,10 @@ +# OCRPageDimensions + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------- | ------------------------------- | ------------------------------- | ------------------------------- | +| `dpi` | *int* | :heavy_check_mark: | Dots per inch of the page-image | +| `height` | *int* | :heavy_check_mark: | Height of the image in pixels | +| `width` | *int* | :heavy_check_mark: | Width of the image in pixels | \ No newline at end of file diff --git a/docs/models/ocrpageobject.md b/docs/models/ocrpageobject.md new file mode 100644 index 00000000..9db3bb77 --- /dev/null +++ b/docs/models/ocrpageobject.md @@ -0,0 +1,11 @@ +# OCRPageObject + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `index` | *int* | :heavy_check_mark: | The page index in a pdf document starting from 0 | +| `markdown` | *str* | :heavy_check_mark: | The markdown string response of the page | +| `images` | List[[models.OCRImageObject](../models/ocrimageobject.md)] | :heavy_check_mark: | List of all extracted images in the page | +| `dimensions` | [Nullable[models.OCRPageDimensions]](../models/ocrpagedimensions.md) | :heavy_check_mark: | The dimensions of the PDF Page's screenshot image | \ No newline at end of file diff --git a/docs/models/ocrrequest.md b/docs/models/ocrrequest.md new file mode 100644 index 00000000..dbc4dc80 --- /dev/null +++ b/docs/models/ocrrequest.md @@ -0,0 +1,14 @@ +# OCRRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `document` | [models.Document](../models/document.md) | :heavy_check_mark: | Document to run OCR on | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | +| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | +| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | +| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | \ No newline at end of file diff --git a/docs/models/ocrresponse.md b/docs/models/ocrresponse.md new file mode 100644 index 00000000..690d992d --- /dev/null +++ b/docs/models/ocrresponse.md @@ -0,0 +1,10 @@ +# OCRResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | +| `pages` | List[[models.OCRPageObject](../models/ocrpageobject.md)] | :heavy_check_mark: | List of OCR info for pages. | +| `model` | *str* | :heavy_check_mark: | The model used to generate the OCR. | +| `usage_info` | [models.OCRUsageInfo](../models/ocrusageinfo.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/ocrusageinfo.md b/docs/models/ocrusageinfo.md new file mode 100644 index 00000000..d9d79125 --- /dev/null +++ b/docs/models/ocrusageinfo.md @@ -0,0 +1,9 @@ +# OCRUsageInfo + + +## Fields + +| Field | Type | Required | Description | +| ------------------------- | ------------------------- | ------------------------- | ------------------------- | +| `pages_processed` | *int* | :heavy_check_mark: | Number of pages processed | +| `doc_size_bytes` | *OptionalNullable[int]* | :heavy_minus_sign: | Document size in bytes | \ No newline at end of file diff --git a/docs/models/prediction.md b/docs/models/prediction.md index 578cdcee..86e9c396 100644 --- a/docs/models/prediction.md +++ b/docs/models/prediction.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -| `type` | [Optional[models.PredictionType]](../models/predictiontype.md) | :heavy_minus_sign: | N/A | -| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------ | ------------------------------ | ------------------------------ | ------------------------------ | +| `type` | *Optional[Literal["content"]]* | :heavy_minus_sign: | N/A | +| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/predictiontype.md b/docs/models/predictiontype.md deleted file mode 100644 index 67ff99e2..00000000 --- a/docs/models/predictiontype.md +++ /dev/null @@ -1,8 +0,0 @@ -# PredictionType - - -## Values - -| Name | Value | -| --------- | --------- | -| `CONTENT` | content | \ No newline at end of file diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index dbdfba27..38e16adc 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -40,7 +40,7 @@ with Mistral( | Parameter | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | | `messages` | List[[models.Messages](../../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | @@ -101,7 +101,7 @@ with Mistral( | Parameter | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | | `messages` | List[[models.ChatCompletionStreamRequestMessages](../../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | diff --git a/docs/sdks/classifiers/README.md b/docs/sdks/classifiers/README.md index 185711a7..6bcc68a9 100644 --- a/docs/sdks/classifiers/README.md +++ b/docs/sdks/classifiers/README.md @@ -24,7 +24,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.classifiers.moderate(inputs=[ + res = mistral.classifiers.moderate(model="V90", inputs=[ "", ]) @@ -37,8 +37,8 @@ with Mistral( | Parameter | Type | Required | Description | | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | | `inputs` | [models.ClassificationRequestInputs](../../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -66,7 +66,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.classifiers.moderate_chat(inputs=[ + res = mistral.classifiers.moderate_chat(model="Roadster", inputs=[ [ { "content": "", @@ -95,7 +95,7 @@ with Mistral( "role": "assistant", }, ], - ], model="Roadster") + ], truncate_for_context_length=False) # Handle response print(res) @@ -104,11 +104,12 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | -| `inputs` | [models.ChatClassificationRequestInputs](../../models/chatclassificationrequestinputs.md) | :heavy_check_mark: | Chat to classify | -| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | +| --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `inputs` | [models.ChatModerationRequestInputs](../../models/chatmoderationrequestinputs.md) | :heavy_check_mark: | Chat to classify | +| `truncate_for_context_length` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index 899c626f..44fae4ac 100644 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -39,7 +39,6 @@ with Mistral( | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | | `inputs` | [models.Inputs](../../models/inputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | | `model` | *Optional[str]* | :heavy_minus_sign: | ID of the model to use. | | -| `encoding_format` | *OptionalNullable[str]* | :heavy_minus_sign: | The format to return the embeddings in. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/docs/sdks/fim/README.md b/docs/sdks/fim/README.md index 0339c213..28de6c02 100644 --- a/docs/sdks/fim/README.md +++ b/docs/sdks/fim/README.md @@ -35,7 +35,7 @@ with Mistral( | Parameter | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | | `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | @@ -85,7 +85,7 @@ with Mistral( | Parameter | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | | `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | diff --git a/docs/sdks/ocr/README.md b/docs/sdks/ocr/README.md new file mode 100644 index 00000000..54f8af96 --- /dev/null +++ b/docs/sdks/ocr/README.md @@ -0,0 +1,58 @@ +# Ocr +(*ocr*) + +## Overview + +OCR API + +### Available Operations + +* [process](#process) - OCR + +## process + +OCR + +### Example Usage + +```python +from mistralai import Mistral +import os + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.ocr.process(model="Focus", document={ + "document_url": "https://dutiful-horst.org", + "type": "document_url", + }) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `document` | [models.Document](../../models/document.md) | :heavy_check_mark: | Document to run OCR on | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | +| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | +| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | +| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.OCRResponse](../../models/ocrresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/poetry.lock b/poetry.lock index 154485d2..78003ff1 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.0.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand. [[package]] name = "annotated-types" @@ -35,7 +35,7 @@ typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} [package.extras] doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21.0b1)"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1) ; python_version >= \"3.10\"", "uvloop (>=0.21.0b1) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\""] trio = ["trio (>=0.26.1)"] [[package]] @@ -333,7 +333,7 @@ idna = "*" sniffio = "*" [package.extras] -brotli = ["brotli", "brotlicffi"] +brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""] cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] @@ -564,7 +564,7 @@ typing-extensions = ">=4.12.2" [package.extras] email = ["email-validator (>=2.0.0)"] -timezone = ["tzdata"] +timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] [[package]] name = "pydantic-core" @@ -697,7 +697,7 @@ colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} dill = [ {version = ">=0.2", markers = "python_version < \"3.11\""}, {version = ">=0.3.7", markers = "python_version >= \"3.12\""}, - {version = ">=0.3.6", markers = "python_version >= \"3.11\" and python_version < \"3.12\""}, + {version = ">=0.3.6", markers = "python_version == \"3.11\""}, ] isort = ">=4.2.5,<5.13.0 || >5.13.0,<6" mccabe = ">=0.6,<0.8" @@ -938,7 +938,7 @@ files = [ ] [package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] diff --git a/pyproject.toml b/pyproject.toml index 42f36f14..8eec1a78 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai" -version = "1.5.0" +version = "1.5.1" description = "Python Client SDK for the Mistral AI API." authors = ["Mistral"] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index 7f36cf10..700c880e 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.5.0" +__version__: str = "1.5.1" __openapi_doc_version__: str = "0.0.2" __gen_version__: str = "2.497.0" -__user_agent__: str = "speakeasy-sdk/python 1.5.0 2.497.0 0.0.2 mistralai" +__user_agent__: str = "speakeasy-sdk/python 1.5.1 2.497.0 0.0.2 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py index 55ad60a9..67646ffe 100644 --- a/src/mistralai/chat.py +++ b/src/mistralai/chat.py @@ -3,7 +3,7 @@ from .basesdk import BaseSDK from mistralai import models, utils from mistralai._hooks import HookContext -from mistralai.types import Nullable, OptionalNullable, UNSET +from mistralai.types import OptionalNullable, UNSET from mistralai.utils import eventstreaming, get_security_from_env from typing import Any, List, Mapping, Optional, Union @@ -96,7 +96,7 @@ async def parse_stream_async( def complete( self, *, - model: Nullable[str], + model: str, messages: Union[List[models.Messages], List[models.MessagesTypedDict]], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, @@ -253,7 +253,7 @@ def complete( async def complete_async( self, *, - model: Nullable[str], + model: str, messages: Union[List[models.Messages], List[models.MessagesTypedDict]], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, @@ -410,7 +410,7 @@ async def complete_async( def stream( self, *, - model: Nullable[str], + model: str, messages: Union[ List[models.ChatCompletionStreamRequestMessages], List[models.ChatCompletionStreamRequestMessagesTypedDict], @@ -585,7 +585,7 @@ def stream( async def stream_async( self, *, - model: Nullable[str], + model: str, messages: Union[ List[models.ChatCompletionStreamRequestMessages], List[models.ChatCompletionStreamRequestMessagesTypedDict], diff --git a/src/mistralai/classifiers.py b/src/mistralai/classifiers.py index af54e96e..6ff1d6a8 100644 --- a/src/mistralai/classifiers.py +++ b/src/mistralai/classifiers.py @@ -3,7 +3,7 @@ from .basesdk import BaseSDK from mistralai import models, utils from mistralai._hooks import HookContext -from mistralai.types import Nullable, OptionalNullable, UNSET +from mistralai.types import OptionalNullable, UNSET from mistralai.utils import get_security_from_env from typing import Any, Mapping, Optional, Union @@ -14,11 +14,11 @@ class Classifiers(BaseSDK): def moderate( self, *, + model: str, inputs: Union[ models.ClassificationRequestInputs, models.ClassificationRequestInputsTypedDict, ], - model: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -26,8 +26,8 @@ def moderate( ) -> models.ClassificationResponse: r"""Moderations + :param model: ID of the model to use. :param inputs: Text to classify. - :param model: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -42,8 +42,8 @@ def moderate( base_url = server_url request = models.ClassificationRequest( - inputs=inputs, model=model, + inputs=inputs, ) req = self._build_request( @@ -115,11 +115,11 @@ def moderate( async def moderate_async( self, *, + model: str, inputs: Union[ models.ClassificationRequestInputs, models.ClassificationRequestInputsTypedDict, ], - model: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -127,8 +127,8 @@ async def moderate_async( ) -> models.ClassificationResponse: r"""Moderations + :param model: ID of the model to use. :param inputs: Text to classify. - :param model: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -143,8 +143,8 @@ async def moderate_async( base_url = server_url request = models.ClassificationRequest( - inputs=inputs, model=model, + inputs=inputs, ) req = self._build_request_async( @@ -216,11 +216,12 @@ async def moderate_async( def moderate_chat( self, *, + model: str, inputs: Union[ - models.ChatClassificationRequestInputs, - models.ChatClassificationRequestInputsTypedDict, + models.ChatModerationRequestInputs, + models.ChatModerationRequestInputsTypedDict, ], - model: Nullable[str], + truncate_for_context_length: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -228,8 +229,9 @@ def moderate_chat( ) -> models.ClassificationResponse: r"""Moderations Chat - :param inputs: Chat to classify :param model: + :param inputs: Chat to classify + :param truncate_for_context_length: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -243,11 +245,10 @@ def moderate_chat( if server_url is not None: base_url = server_url - request = models.ChatClassificationRequest( - inputs=utils.get_pydantic_model( - inputs, models.ChatClassificationRequestInputs - ), + request = models.ChatModerationRequest( model=model, + inputs=utils.get_pydantic_model(inputs, models.ChatModerationRequestInputs), + truncate_for_context_length=truncate_for_context_length, ) req = self._build_request( @@ -264,7 +265,7 @@ def moderate_chat( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ChatClassificationRequest + request, False, False, "json", models.ChatModerationRequest ), timeout_ms=timeout_ms, ) @@ -319,11 +320,12 @@ def moderate_chat( async def moderate_chat_async( self, *, + model: str, inputs: Union[ - models.ChatClassificationRequestInputs, - models.ChatClassificationRequestInputsTypedDict, + models.ChatModerationRequestInputs, + models.ChatModerationRequestInputsTypedDict, ], - model: Nullable[str], + truncate_for_context_length: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -331,8 +333,9 @@ async def moderate_chat_async( ) -> models.ClassificationResponse: r"""Moderations Chat - :param inputs: Chat to classify :param model: + :param inputs: Chat to classify + :param truncate_for_context_length: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -346,11 +349,10 @@ async def moderate_chat_async( if server_url is not None: base_url = server_url - request = models.ChatClassificationRequest( - inputs=utils.get_pydantic_model( - inputs, models.ChatClassificationRequestInputs - ), + request = models.ChatModerationRequest( model=model, + inputs=utils.get_pydantic_model(inputs, models.ChatModerationRequestInputs), + truncate_for_context_length=truncate_for_context_length, ) req = self._build_request_async( @@ -367,7 +369,7 @@ async def moderate_chat_async( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ChatClassificationRequest + request, False, False, "json", models.ChatModerationRequest ), timeout_ms=timeout_ms, ) diff --git a/src/mistralai/embeddings.py b/src/mistralai/embeddings.py index 524f09c7..f6f558b8 100644 --- a/src/mistralai/embeddings.py +++ b/src/mistralai/embeddings.py @@ -16,7 +16,6 @@ def create( *, inputs: Union[models.Inputs, models.InputsTypedDict], model: Optional[str] = "mistral-embed", - encoding_format: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -28,7 +27,6 @@ def create( :param inputs: Text to embed. :param model: ID of the model to use. - :param encoding_format: The format to return the embeddings in. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -43,9 +41,8 @@ def create( base_url = server_url request = models.EmbeddingRequest( - inputs=inputs, model=model, - encoding_format=encoding_format, + inputs=inputs, ) req = self._build_request( @@ -119,7 +116,6 @@ async def create_async( *, inputs: Union[models.Inputs, models.InputsTypedDict], model: Optional[str] = "mistral-embed", - encoding_format: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -131,7 +127,6 @@ async def create_async( :param inputs: Text to embed. :param model: ID of the model to use. - :param encoding_format: The format to return the embeddings in. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -146,9 +141,8 @@ async def create_async( base_url = server_url request = models.EmbeddingRequest( - inputs=inputs, model=model, - encoding_format=encoding_format, + inputs=inputs, ) req = self._build_request_async( diff --git a/src/mistralai/fim.py b/src/mistralai/fim.py index 40e596be..c11f6c99 100644 --- a/src/mistralai/fim.py +++ b/src/mistralai/fim.py @@ -3,7 +3,7 @@ from .basesdk import BaseSDK from mistralai import models, utils from mistralai._hooks import HookContext -from mistralai.types import Nullable, OptionalNullable, UNSET +from mistralai.types import OptionalNullable, UNSET from mistralai.utils import eventstreaming, get_security_from_env from typing import Any, Mapping, Optional, Union @@ -14,7 +14,7 @@ class Fim(BaseSDK): def complete( self, *, - model: Nullable[str], + model: str, prompt: str, temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, @@ -143,7 +143,7 @@ def complete( async def complete_async( self, *, - model: Nullable[str], + model: str, prompt: str, temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, @@ -272,7 +272,7 @@ async def complete_async( def stream( self, *, - model: Nullable[str], + model: str, prompt: str, temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, @@ -407,7 +407,7 @@ def stream( async def stream_async( self, *, - model: Nullable[str], + model: str, prompt: str, temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index ee083f3a..197f6e1f 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -39,16 +39,6 @@ from .batchjobout import BatchJobOut, BatchJobOutObject, BatchJobOutTypedDict from .batchjobsout import BatchJobsOut, BatchJobsOutObject, BatchJobsOutTypedDict from .batchjobstatus import BatchJobStatus -from .chatclassificationrequest import ( - ChatClassificationRequest, - ChatClassificationRequestInputs, - ChatClassificationRequestInputsTypedDict, - ChatClassificationRequestTypedDict, - One, - OneTypedDict, - Two, - TwoTypedDict, -) from .chatcompletionchoice import ( ChatCompletionChoice, ChatCompletionChoiceTypedDict, @@ -78,6 +68,16 @@ ChatCompletionStreamRequestToolChoiceTypedDict, ChatCompletionStreamRequestTypedDict, ) +from .chatmoderationrequest import ( + ChatModerationRequest, + ChatModerationRequestInputs, + ChatModerationRequestInputsTypedDict, + ChatModerationRequestTypedDict, + One, + OneTypedDict, + Two, + TwoTypedDict, +) from .checkpointout import CheckpointOut, CheckpointOutTypedDict from .classificationobject import ClassificationObject, ClassificationObjectTypedDict from .classificationrequest import ( @@ -115,6 +115,7 @@ DetailedJobOutStatus, DetailedJobOutTypedDict, ) +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict from .embeddingrequest import ( EmbeddingRequest, EmbeddingRequestTypedDict, @@ -274,7 +275,13 @@ from .metricout import MetricOut, MetricOutTypedDict from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict from .modellist import Data, DataTypedDict, ModelList, ModelListTypedDict -from .prediction import Prediction, PredictionType, PredictionTypedDict +from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict +from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict +from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict +from .ocrrequest import Document, DocumentTypedDict, OCRRequest, OCRRequestTypedDict +from .ocrresponse import OCRResponse, OCRResponseTypedDict +from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict +from .prediction import Prediction, PredictionTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .responseformats import ResponseFormats @@ -386,10 +393,6 @@ "BatchJobsOut", "BatchJobsOutObject", "BatchJobsOutTypedDict", - "ChatClassificationRequest", - "ChatClassificationRequestInputs", - "ChatClassificationRequestInputsTypedDict", - "ChatClassificationRequestTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", @@ -406,6 +409,10 @@ "ChatCompletionStreamRequestToolChoice", "ChatCompletionStreamRequestToolChoiceTypedDict", "ChatCompletionStreamRequestTypedDict", + "ChatModerationRequest", + "ChatModerationRequestInputs", + "ChatModerationRequestInputsTypedDict", + "ChatModerationRequestTypedDict", "CheckpointOut", "CheckpointOutTypedDict", "ClassificationObject", @@ -445,6 +452,10 @@ "DetailedJobOutRepositoriesTypedDict", "DetailedJobOutStatus", "DetailedJobOutTypedDict", + "Document", + "DocumentTypedDict", + "DocumentURLChunk", + "DocumentURLChunkTypedDict", "EmbeddingRequest", "EmbeddingRequestTypedDict", "EmbeddingResponse", @@ -568,11 +579,22 @@ "ModelCapabilitiesTypedDict", "ModelList", "ModelListTypedDict", + "OCRImageObject", + "OCRImageObjectTypedDict", + "OCRPageDimensions", + "OCRPageDimensionsTypedDict", + "OCRPageObject", + "OCRPageObjectTypedDict", + "OCRRequest", + "OCRRequestTypedDict", + "OCRResponse", + "OCRResponseTypedDict", + "OCRUsageInfo", + "OCRUsageInfoTypedDict", "Object", "One", "OneTypedDict", "Prediction", - "PredictionType", "PredictionTypedDict", "QueryParamStatus", "ReferenceChunk", diff --git a/src/mistralai/models/assistantmessage.py b/src/mistralai/models/assistantmessage.py index c9a28945..fd540d99 100644 --- a/src/mistralai/models/assistantmessage.py +++ b/src/mistralai/models/assistantmessage.py @@ -26,6 +26,7 @@ class AssistantMessageTypedDict(TypedDict): content: NotRequired[Nullable[AssistantMessageContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] prefix: NotRequired[bool] + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: NotRequired[AssistantMessageRole] @@ -35,6 +36,7 @@ class AssistantMessage(BaseModel): tool_calls: OptionalNullable[List[ToolCall]] = UNSET prefix: Optional[bool] = False + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: Optional[AssistantMessageRole] = "assistant" diff --git a/src/mistralai/models/chatclassificationrequest.py b/src/mistralai/models/chatclassificationrequest.py deleted file mode 100644 index 6f3967dc..00000000 --- a/src/mistralai/models/chatclassificationrequest.py +++ /dev/null @@ -1,113 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .systemmessage import SystemMessage, SystemMessageTypedDict -from .toolmessage import ToolMessage, ToolMessageTypedDict -from .usermessage import UserMessage, UserMessageTypedDict -from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL -from mistralai.utils import get_discriminator -import pydantic -from pydantic import Discriminator, Tag, model_serializer -from typing import List, Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -TwoTypedDict = TypeAliasType( - "TwoTypedDict", - Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, - ], -) - - -Two = Annotated[ - Union[ - Annotated[AssistantMessage, Tag("assistant")], - Annotated[SystemMessage, Tag("system")], - Annotated[ToolMessage, Tag("tool")], - Annotated[UserMessage, Tag("user")], - ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), -] - - -OneTypedDict = TypeAliasType( - "OneTypedDict", - Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, - ], -) - - -One = Annotated[ - Union[ - Annotated[AssistantMessage, Tag("assistant")], - Annotated[SystemMessage, Tag("system")], - Annotated[ToolMessage, Tag("tool")], - Annotated[UserMessage, Tag("user")], - ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), -] - - -ChatClassificationRequestInputsTypedDict = TypeAliasType( - "ChatClassificationRequestInputsTypedDict", - Union[List[OneTypedDict], List[List[TwoTypedDict]]], -) -r"""Chat to classify""" - - -ChatClassificationRequestInputs = TypeAliasType( - "ChatClassificationRequestInputs", Union[List[One], List[List[Two]]] -) -r"""Chat to classify""" - - -class ChatClassificationRequestTypedDict(TypedDict): - inputs: ChatClassificationRequestInputsTypedDict - r"""Chat to classify""" - model: Nullable[str] - - -class ChatClassificationRequest(BaseModel): - inputs: Annotated[ChatClassificationRequestInputs, pydantic.Field(alias="input")] - r"""Chat to classify""" - - model: Nullable[str] - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [] - nullable_fields = ["model"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/chatcompletionrequest.py b/src/mistralai/models/chatcompletionrequest.py index a253ac4d..eaed8435 100644 --- a/src/mistralai/models/chatcompletionrequest.py +++ b/src/mistralai/models/chatcompletionrequest.py @@ -59,7 +59,7 @@ class ChatCompletionRequestTypedDict(TypedDict): - model: Nullable[str] + model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[MessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" @@ -90,7 +90,7 @@ class ChatCompletionRequestTypedDict(TypedDict): class ChatCompletionRequest(BaseModel): - model: Nullable[str] + model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[Messages] @@ -152,14 +152,7 @@ def serialize_model(self, handler): "prediction", "safe_prompt", ] - nullable_fields = [ - "model", - "temperature", - "max_tokens", - "random_seed", - "tools", - "n", - ] + nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/chatcompletionstreamrequest.py b/src/mistralai/models/chatcompletionstreamrequest.py index a1697d58..4f593c01 100644 --- a/src/mistralai/models/chatcompletionstreamrequest.py +++ b/src/mistralai/models/chatcompletionstreamrequest.py @@ -63,7 +63,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): - model: Nullable[str] + model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[ChatCompletionStreamRequestMessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" @@ -93,7 +93,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): class ChatCompletionStreamRequest(BaseModel): - model: Nullable[str] + model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[ChatCompletionStreamRequestMessages] @@ -154,14 +154,7 @@ def serialize_model(self, handler): "prediction", "safe_prompt", ] - nullable_fields = [ - "model", - "temperature", - "max_tokens", - "random_seed", - "tools", - "n", - ] + nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/chatmoderationrequest.py b/src/mistralai/models/chatmoderationrequest.py new file mode 100644 index 00000000..5b25b877 --- /dev/null +++ b/src/mistralai/models/chatmoderationrequest.py @@ -0,0 +1,86 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.types import BaseModel +from mistralai.utils import get_discriminator +import pydantic +from pydantic import Discriminator, Tag +from typing import List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +TwoTypedDict = TypeAliasType( + "TwoTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +Two = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +OneTypedDict = TypeAliasType( + "OneTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +One = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +ChatModerationRequestInputsTypedDict = TypeAliasType( + "ChatModerationRequestInputsTypedDict", + Union[List[OneTypedDict], List[List[TwoTypedDict]]], +) +r"""Chat to classify""" + + +ChatModerationRequestInputs = TypeAliasType( + "ChatModerationRequestInputs", Union[List[One], List[List[Two]]] +) +r"""Chat to classify""" + + +class ChatModerationRequestTypedDict(TypedDict): + model: str + inputs: ChatModerationRequestInputsTypedDict + r"""Chat to classify""" + truncate_for_context_length: NotRequired[bool] + + +class ChatModerationRequest(BaseModel): + model: str + + inputs: Annotated[ChatModerationRequestInputs, pydantic.Field(alias="input")] + r"""Chat to classify""" + + truncate_for_context_length: Optional[bool] = False diff --git a/src/mistralai/models/classificationrequest.py b/src/mistralai/models/classificationrequest.py index d18ffa61..39e25390 100644 --- a/src/mistralai/models/classificationrequest.py +++ b/src/mistralai/models/classificationrequest.py @@ -1,11 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.types import BaseModel import pydantic -from pydantic import model_serializer from typing import List, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict +from typing_extensions import Annotated, TypeAliasType, TypedDict ClassificationRequestInputsTypedDict = TypeAliasType( @@ -21,43 +20,15 @@ class ClassificationRequestTypedDict(TypedDict): + model: str + r"""ID of the model to use.""" inputs: ClassificationRequestInputsTypedDict r"""Text to classify.""" - model: NotRequired[Nullable[str]] class ClassificationRequest(BaseModel): + model: str + r"""ID of the model to use.""" + inputs: Annotated[ClassificationRequestInputs, pydantic.Field(alias="input")] r"""Text to classify.""" - - model: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["model"] - nullable_fields = ["model"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/contentchunk.py b/src/mistralai/models/contentchunk.py index feeda7cd..ff7d9fcf 100644 --- a/src/mistralai/models/contentchunk.py +++ b/src/mistralai/models/contentchunk.py @@ -1,6 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict @@ -12,13 +13,19 @@ ContentChunkTypedDict = TypeAliasType( "ContentChunkTypedDict", - Union[TextChunkTypedDict, ImageURLChunkTypedDict, ReferenceChunkTypedDict], + Union[ + TextChunkTypedDict, + ImageURLChunkTypedDict, + ReferenceChunkTypedDict, + DocumentURLChunkTypedDict, + ], ) ContentChunk = Annotated[ Union[ Annotated[ImageURLChunk, Tag("image_url")], + Annotated[DocumentURLChunk, Tag("document_url")], Annotated[TextChunk, Tag("text")], Annotated[ReferenceChunk, Tag("reference")], ], diff --git a/src/mistralai/models/documenturlchunk.py b/src/mistralai/models/documenturlchunk.py new file mode 100644 index 00000000..23622335 --- /dev/null +++ b/src/mistralai/models/documenturlchunk.py @@ -0,0 +1,62 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class DocumentURLChunkTypedDict(TypedDict): + document_url: str + type: Literal["document_url"] + document_name: NotRequired[Nullable[str]] + r"""The filename of the document""" + + +class DocumentURLChunk(BaseModel): + document_url: str + + TYPE: Annotated[ + Annotated[ + Optional[Literal["document_url"]], + AfterValidator(validate_const("document_url")), + ], + pydantic.Field(alias="type"), + ] = "document_url" + + document_name: OptionalNullable[str] = UNSET + r"""The filename of the document""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "document_name"] + nullable_fields = ["document_name"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/embeddingrequest.py b/src/mistralai/models/embeddingrequest.py index 4de8c312..b5ccd26e 100644 --- a/src/mistralai/models/embeddingrequest.py +++ b/src/mistralai/models/embeddingrequest.py @@ -1,9 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.types import BaseModel import pydantic -from pydantic import model_serializer from typing import List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -21,8 +20,6 @@ class EmbeddingRequestTypedDict(TypedDict): r"""Text to embed.""" model: NotRequired[str] r"""ID of the model to use.""" - encoding_format: NotRequired[Nullable[str]] - r"""The format to return the embeddings in.""" class EmbeddingRequest(BaseModel): @@ -31,36 +28,3 @@ class EmbeddingRequest(BaseModel): model: Optional[str] = "mistral-embed" r"""ID of the model to use.""" - - encoding_format: OptionalNullable[str] = UNSET - r"""The format to return the embeddings in.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["model", "encoding_format"] - nullable_fields = ["encoding_format"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/fimcompletionrequest.py b/src/mistralai/models/fimcompletionrequest.py index fb72ba41..01f8b2d1 100644 --- a/src/mistralai/models/fimcompletionrequest.py +++ b/src/mistralai/models/fimcompletionrequest.py @@ -20,7 +20,7 @@ class FIMCompletionRequestTypedDict(TypedDict): - model: Nullable[str] + model: str r"""ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` @@ -46,7 +46,7 @@ class FIMCompletionRequestTypedDict(TypedDict): class FIMCompletionRequest(BaseModel): - model: Nullable[str] + model: str r"""ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` @@ -92,7 +92,6 @@ def serialize_model(self, handler): "min_tokens", ] nullable_fields = [ - "model", "temperature", "max_tokens", "random_seed", diff --git a/src/mistralai/models/fimcompletionstreamrequest.py b/src/mistralai/models/fimcompletionstreamrequest.py index 5e16a170..cc4cf6e8 100644 --- a/src/mistralai/models/fimcompletionstreamrequest.py +++ b/src/mistralai/models/fimcompletionstreamrequest.py @@ -20,7 +20,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): - model: Nullable[str] + model: str r"""ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` @@ -45,7 +45,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): class FIMCompletionStreamRequest(BaseModel): - model: Nullable[str] + model: str r"""ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` @@ -90,7 +90,6 @@ def serialize_model(self, handler): "min_tokens", ] nullable_fields = [ - "model", "temperature", "max_tokens", "random_seed", diff --git a/src/mistralai/models/ocrimageobject.py b/src/mistralai/models/ocrimageobject.py new file mode 100644 index 00000000..16b41e6c --- /dev/null +++ b/src/mistralai/models/ocrimageobject.py @@ -0,0 +1,77 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class OCRImageObjectTypedDict(TypedDict): + id: str + r"""Image ID for extracted image in a page""" + top_left_x: Nullable[int] + r"""X coordinate of top-left corner of the extracted image""" + top_left_y: Nullable[int] + r"""Y coordinate of top-left corner of the extracted image""" + bottom_right_x: Nullable[int] + r"""X coordinate of bottom-right corner of the extracted image""" + bottom_right_y: Nullable[int] + r"""Y coordinate of bottom-right corner of the extracted image""" + image_base64: NotRequired[Nullable[str]] + r"""Base64 string of the extracted image""" + + +class OCRImageObject(BaseModel): + id: str + r"""Image ID for extracted image in a page""" + + top_left_x: Nullable[int] + r"""X coordinate of top-left corner of the extracted image""" + + top_left_y: Nullable[int] + r"""Y coordinate of top-left corner of the extracted image""" + + bottom_right_x: Nullable[int] + r"""X coordinate of bottom-right corner of the extracted image""" + + bottom_right_y: Nullable[int] + r"""Y coordinate of bottom-right corner of the extracted image""" + + image_base64: OptionalNullable[str] = UNSET + r"""Base64 string of the extracted image""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["image_base64"] + nullable_fields = [ + "top_left_x", + "top_left_y", + "bottom_right_x", + "bottom_right_y", + "image_base64", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/ocrpagedimensions.py b/src/mistralai/models/ocrpagedimensions.py new file mode 100644 index 00000000..d1aeb54d --- /dev/null +++ b/src/mistralai/models/ocrpagedimensions.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing_extensions import TypedDict + + +class OCRPageDimensionsTypedDict(TypedDict): + dpi: int + r"""Dots per inch of the page-image""" + height: int + r"""Height of the image in pixels""" + width: int + r"""Width of the image in pixels""" + + +class OCRPageDimensions(BaseModel): + dpi: int + r"""Dots per inch of the page-image""" + + height: int + r"""Height of the image in pixels""" + + width: int + r"""Width of the image in pixels""" diff --git a/src/mistralai/models/ocrpageobject.py b/src/mistralai/models/ocrpageobject.py new file mode 100644 index 00000000..c3ef8916 --- /dev/null +++ b/src/mistralai/models/ocrpageobject.py @@ -0,0 +1,64 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict +from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict +from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List +from typing_extensions import TypedDict + + +class OCRPageObjectTypedDict(TypedDict): + index: int + r"""The page index in a pdf document starting from 0""" + markdown: str + r"""The markdown string response of the page""" + images: List[OCRImageObjectTypedDict] + r"""List of all extracted images in the page""" + dimensions: Nullable[OCRPageDimensionsTypedDict] + r"""The dimensions of the PDF Page's screenshot image""" + + +class OCRPageObject(BaseModel): + index: int + r"""The page index in a pdf document starting from 0""" + + markdown: str + r"""The markdown string response of the page""" + + images: List[OCRImageObject] + r"""List of all extracted images in the page""" + + dimensions: Nullable[OCRPageDimensions] + r"""The dimensions of the PDF Page's screenshot image""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [] + nullable_fields = ["dimensions"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/ocrrequest.py b/src/mistralai/models/ocrrequest.py new file mode 100644 index 00000000..54339e9e --- /dev/null +++ b/src/mistralai/models/ocrrequest.py @@ -0,0 +1,97 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +DocumentTypedDict = TypeAliasType( + "DocumentTypedDict", Union[ImageURLChunkTypedDict, DocumentURLChunkTypedDict] +) +r"""Document to run OCR on""" + + +Document = TypeAliasType("Document", Union[ImageURLChunk, DocumentURLChunk]) +r"""Document to run OCR on""" + + +class OCRRequestTypedDict(TypedDict): + model: Nullable[str] + document: DocumentTypedDict + r"""Document to run OCR on""" + id: NotRequired[str] + pages: NotRequired[Nullable[List[int]]] + r"""Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0""" + include_image_base64: NotRequired[Nullable[bool]] + r"""Include image URLs in response""" + image_limit: NotRequired[Nullable[int]] + r"""Max images to extract""" + image_min_size: NotRequired[Nullable[int]] + r"""Minimum height and width of image to extract""" + + +class OCRRequest(BaseModel): + model: Nullable[str] + + document: Document + r"""Document to run OCR on""" + + id: Optional[str] = None + + pages: OptionalNullable[List[int]] = UNSET + r"""Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0""" + + include_image_base64: OptionalNullable[bool] = UNSET + r"""Include image URLs in response""" + + image_limit: OptionalNullable[int] = UNSET + r"""Max images to extract""" + + image_min_size: OptionalNullable[int] = UNSET + r"""Minimum height and width of image to extract""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "id", + "pages", + "include_image_base64", + "image_limit", + "image_min_size", + ] + nullable_fields = [ + "model", + "pages", + "include_image_base64", + "image_limit", + "image_min_size", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/ocrresponse.py b/src/mistralai/models/ocrresponse.py new file mode 100644 index 00000000..45fb06e3 --- /dev/null +++ b/src/mistralai/models/ocrresponse.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict +from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict +from mistralai.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class OCRResponseTypedDict(TypedDict): + pages: List[OCRPageObjectTypedDict] + r"""List of OCR info for pages.""" + model: str + r"""The model used to generate the OCR.""" + usage_info: OCRUsageInfoTypedDict + + +class OCRResponse(BaseModel): + pages: List[OCRPageObject] + r"""List of OCR info for pages.""" + + model: str + r"""The model used to generate the OCR.""" + + usage_info: OCRUsageInfo diff --git a/src/mistralai/models/ocrusageinfo.py b/src/mistralai/models/ocrusageinfo.py new file mode 100644 index 00000000..9dced73b --- /dev/null +++ b/src/mistralai/models/ocrusageinfo.py @@ -0,0 +1,51 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class OCRUsageInfoTypedDict(TypedDict): + pages_processed: int + r"""Number of pages processed""" + doc_size_bytes: NotRequired[Nullable[int]] + r"""Document size in bytes""" + + +class OCRUsageInfo(BaseModel): + pages_processed: int + r"""Number of pages processed""" + + doc_size_bytes: OptionalNullable[int] = UNSET + r"""Document size in bytes""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["doc_size_bytes"] + nullable_fields = ["doc_size_bytes"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/prediction.py b/src/mistralai/models/prediction.py index 63593122..7937c9d1 100644 --- a/src/mistralai/models/prediction.py +++ b/src/mistralai/models/prediction.py @@ -9,17 +9,16 @@ from typing_extensions import Annotated, NotRequired, TypedDict -PredictionType = Literal["content"] - - class PredictionTypedDict(TypedDict): - type: PredictionType + type: Literal["content"] content: NotRequired[str] class Prediction(BaseModel): TYPE: Annotated[ - Annotated[Optional[PredictionType], AfterValidator(validate_const("content"))], + Annotated[ + Optional[Literal["content"]], AfterValidator(validate_const("content")) + ], pydantic.Field(alias="type"), ] = "content" diff --git a/src/mistralai/ocr.py b/src/mistralai/ocr.py new file mode 100644 index 00000000..56c1da51 --- /dev/null +++ b/src/mistralai/ocr.py @@ -0,0 +1,238 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai import models, utils +from mistralai._hooks import HookContext +from mistralai.types import Nullable, OptionalNullable, UNSET +from mistralai.utils import get_security_from_env +from typing import Any, List, Mapping, Optional, Union + + +class Ocr(BaseSDK): + r"""OCR API""" + + def process( + self, + *, + model: Nullable[str], + document: Union[models.Document, models.DocumentTypedDict], + id: Optional[str] = None, + pages: OptionalNullable[List[int]] = UNSET, + include_image_base64: OptionalNullable[bool] = UNSET, + image_limit: OptionalNullable[int] = UNSET, + image_min_size: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.OCRResponse: + r"""OCR + + :param model: + :param document: Document to run OCR on + :param id: + :param pages: Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 + :param include_image_base64: Include image URLs in response + :param image_limit: Max images to extract + :param image_min_size: Minimum height and width of image to extract + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.OCRRequest( + model=model, + id=id, + document=utils.get_pydantic_model(document, models.Document), + pages=pages, + include_image_base64=include_image_base64, + image_limit=image_limit, + image_min_size=image_min_size, + ) + + req = self._build_request( + method="POST", + path="/v1/ocr", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.OCRRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + operation_id="ocr_v1_ocr_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.OCRResponse) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def process_async( + self, + *, + model: Nullable[str], + document: Union[models.Document, models.DocumentTypedDict], + id: Optional[str] = None, + pages: OptionalNullable[List[int]] = UNSET, + include_image_base64: OptionalNullable[bool] = UNSET, + image_limit: OptionalNullable[int] = UNSET, + image_min_size: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.OCRResponse: + r"""OCR + + :param model: + :param document: Document to run OCR on + :param id: + :param pages: Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 + :param include_image_base64: Include image URLs in response + :param image_limit: Max images to extract + :param image_min_size: Minimum height and width of image to extract + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.OCRRequest( + model=model, + id=id, + document=utils.get_pydantic_model(document, models.Document), + pages=pages, + include_image_base64=include_image_base64, + image_limit=image_limit, + image_min_size=image_min_size, + ) + + req = self._build_request_async( + method="POST", + path="/v1/ocr", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.OCRRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + operation_id="ocr_v1_ocr_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.OCRResponse) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) diff --git a/src/mistralai/sdk.py b/src/mistralai/sdk.py index e24f1581..00d8370a 100644 --- a/src/mistralai/sdk.py +++ b/src/mistralai/sdk.py @@ -17,6 +17,7 @@ from mistralai.fim import Fim from mistralai.fine_tuning import FineTuning from mistralai.models_ import Models +from mistralai.ocr import Ocr from mistralai.types import OptionalNullable, UNSET from typing import Any, Callable, Dict, Optional, Union, cast import weakref @@ -41,6 +42,8 @@ class Mistral(BaseSDK): r"""Embeddings API.""" classifiers: Classifiers r"""Classifiers API.""" + ocr: Ocr + r"""OCR API""" def __init__( self, @@ -139,6 +142,7 @@ def _init_sdks(self): self.agents = Agents(self.sdk_configuration) self.embeddings = Embeddings(self.sdk_configuration) self.classifiers = Classifiers(self.sdk_configuration) + self.ocr = Ocr(self.sdk_configuration) def __enter__(self): return self From da98b137fb09fa95cf5f92b9881d6ac9ba30a9e4 Mon Sep 17 00:00:00 2001 From: Alexandre Menasria Date: Thu, 6 Mar 2025 17:54:53 +0100 Subject: [PATCH 2/5] Add OCR example --- examples/chat_with_ocr.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 examples/chat_with_ocr.py diff --git a/examples/chat_with_ocr.py b/examples/chat_with_ocr.py new file mode 100644 index 00000000..05003944 --- /dev/null +++ b/examples/chat_with_ocr.py @@ -0,0 +1,23 @@ +from mistralai import Mistral +import os +import json + +MISTRAL_7B_PDF = "https://arxiv.org/pdf/2310.06825" + +def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + pdf_response = client.ocr.process(document={ + "document_url": MISTRAL_7B_PDF, + "type": "document_url", + "document_name": "mistral-7b-pdf", + }, model="mistral-ocr-latest", include_image_base64=True) + + response_dict = json.loads(pdf_response.model_dump_json()) + json_string = json.dumps(response_dict, indent=4) + print(json_string) + + +if __name__ == "__main__": + main() From 75e92b9c73dd03cba763b83bbf0ae4a0daec85c6 Mon Sep 17 00:00:00 2001 From: Alexandre Menasria Date: Thu, 6 Mar 2025 18:16:46 +0100 Subject: [PATCH 3/5] Add OCR example with file upload --- examples/chat_with_ocr.py | 50 +++++++++++++++++++++++++++++++++++---- 1 file changed, 46 insertions(+), 4 deletions(-) diff --git a/examples/chat_with_ocr.py b/examples/chat_with_ocr.py index 05003944..7531b20e 100644 --- a/examples/chat_with_ocr.py +++ b/examples/chat_with_ocr.py @@ -2,22 +2,64 @@ import os import json -MISTRAL_7B_PDF = "https://arxiv.org/pdf/2310.06825" +MISTRAL_7B_PDF_URL = "https://arxiv.org/pdf/2310.06825" +MIXTRAL_OF_EXPERTS_PDF_URL = "https://arxiv.org/pdf/2401.04088" +MOE_FILENAME = "mixtral_of_experts.pdf" -def main(): +def ocr_with_url(): api_key = os.environ["MISTRAL_API_KEY"] client = Mistral(api_key=api_key) + # Using an URL pdf_response = client.ocr.process(document={ - "document_url": MISTRAL_7B_PDF, + "document_url": MISTRAL_7B_PDF_URL, "type": "document_url", "document_name": "mistral-7b-pdf", }, model="mistral-ocr-latest", include_image_base64=True) + # Print the parsed PDF response_dict = json.loads(pdf_response.model_dump_json()) json_string = json.dumps(response_dict, indent=4) print(json_string) +def ocr_with_file(): + from pathlib import Path + import urllib.request + + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + pdf_file = Path(MOE_FILENAME) + # Download the file if it doesn't exist + if not pdf_file.is_file(): + urllib.request.urlretrieve(MIXTRAL_OF_EXPERTS_PDF_URL, MOE_FILENAME) + + uploaded_file = client.files.upload( + file={ + "file_name": pdf_file.stem, + "content": pdf_file.read_bytes(), + }, + purpose="ocr", + ) + + signed_url = client.files.get_signed_url(file_id=uploaded_file.id, expiry=1) + + pdf_response = client.ocr.process(document={ + "document_url": signed_url.url, + "type": "document_url", + "document_name": "mistral-7b-pdf", + }, model="mistral-ocr-latest", include_image_base64=True) + + # Print the parsed PDF + response_dict = json.loads(pdf_response.model_dump_json()) + json_string = json.dumps(response_dict, indent=4) + print(json_string) + + # Remove the file + pdf_file.unlink() + + if __name__ == "__main__": - main() + ocr_with_url() + ocr_with_file() From 2416c3858b535eaa9aec54fe307c605ace0f69c5 Mon Sep 17 00:00:00 2001 From: Alexandre Menasria Date: Thu, 6 Mar 2025 18:20:48 +0100 Subject: [PATCH 4/5] Even with TS example --- ...t_with_ocr.py => ocr_process_from_file.py} | 30 ++++--------------- examples/ocr_process_from_url.py | 25 ++++++++++++++++ 2 files changed, 31 insertions(+), 24 deletions(-) rename examples/{chat_with_ocr.py => ocr_process_from_file.py} (62%) create mode 100644 examples/ocr_process_from_url.py diff --git a/examples/chat_with_ocr.py b/examples/ocr_process_from_file.py similarity index 62% rename from examples/chat_with_ocr.py rename to examples/ocr_process_from_file.py index 7531b20e..70c9d4a8 100644 --- a/examples/chat_with_ocr.py +++ b/examples/ocr_process_from_file.py @@ -1,40 +1,23 @@ from mistralai import Mistral import os import json +from pathlib import Path +import urllib.request -MISTRAL_7B_PDF_URL = "https://arxiv.org/pdf/2310.06825" MIXTRAL_OF_EXPERTS_PDF_URL = "https://arxiv.org/pdf/2401.04088" MOE_FILENAME = "mixtral_of_experts.pdf" -def ocr_with_url(): - api_key = os.environ["MISTRAL_API_KEY"] - client = Mistral(api_key=api_key) - - # Using an URL - pdf_response = client.ocr.process(document={ - "document_url": MISTRAL_7B_PDF_URL, - "type": "document_url", - "document_name": "mistral-7b-pdf", - }, model="mistral-ocr-latest", include_image_base64=True) - - # Print the parsed PDF - response_dict = json.loads(pdf_response.model_dump_json()) - json_string = json.dumps(response_dict, indent=4) - print(json_string) - - -def ocr_with_file(): - from pathlib import Path - import urllib.request +def main(): api_key = os.environ["MISTRAL_API_KEY"] client = Mistral(api_key=api_key) - pdf_file = Path(MOE_FILENAME) + # Download the file if it doesn't exist if not pdf_file.is_file(): urllib.request.urlretrieve(MIXTRAL_OF_EXPERTS_PDF_URL, MOE_FILENAME) + # Upload the file uploaded_file = client.files.upload( file={ "file_name": pdf_file.stem, @@ -61,5 +44,4 @@ def ocr_with_file(): if __name__ == "__main__": - ocr_with_url() - ocr_with_file() + main() diff --git a/examples/ocr_process_from_url.py b/examples/ocr_process_from_url.py new file mode 100644 index 00000000..4cb11835 --- /dev/null +++ b/examples/ocr_process_from_url.py @@ -0,0 +1,25 @@ +from mistralai import Mistral +import os +import json + +MISTRAL_7B_PDF_URL = "https://arxiv.org/pdf/2310.06825" + +def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + # Using an URL + pdf_response = client.ocr.process(document={ + "document_url": MISTRAL_7B_PDF_URL, + "type": "document_url", + "document_name": "mistral-7b-pdf", + }, model="mistral-ocr-latest", include_image_base64=True) + + # Print the parsed PDF + response_dict = json.loads(pdf_response.model_dump_json()) + json_string = json.dumps(response_dict, indent=4) + print(json_string) + + +if __name__ == "__main__": + main() From d43d7c2a7f5e9ae5cbd149d315a3e3d5cfe29471 Mon Sep 17 00:00:00 2001 From: Alexandre Menasria Date: Thu, 6 Mar 2025 18:26:05 +0100 Subject: [PATCH 5/5] Fix example url --- examples/async_chat_with_image_no_streaming.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/async_chat_with_image_no_streaming.py b/examples/async_chat_with_image_no_streaming.py index 7e415305..ecb42257 100755 --- a/examples/async_chat_with_image_no_streaming.py +++ b/examples/async_chat_with_image_no_streaming.py @@ -21,7 +21,7 @@ async def main(): {"type": "text", "text": "What's in this image?"}, { "type": "image_url", - "image_url": "https://mistral.ai/images/news/codestral/FIM_table.png", + "image_url": "https://cms.mistral.ai/assets/af26a11d-0793-439f-a06e-7694b24b8270", }, ] )