Skip to content

Commit 64c8250

Browse files
authored
Remove Python 3.9 support (#145)
* Remove Python 3.9 support * Fix typing issue * Fix * Fix issue from merge
1 parent 439c9d5 commit 64c8250

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

46 files changed

+438
-851
lines changed

.github/workflows/lint_build_test.yaml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,6 @@ jobs:
2727
matrix:
2828
# Support all LTS versions of Python
2929
python-version: [
30-
"3.9",
3130
"3.10",
3231
"3.11",
3332
"3.12",

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ authors = [{name = "bam4d", email = "[email protected]"}]
1010
license = "Apache-2.0"
1111
license-files = ["LICENSE"]
1212
readme = "README.md"
13-
requires-python = ">=3.9.0,<3.14"
13+
requires-python = ">=3.10.0,<3.14"
1414

1515
dependencies = [
1616
"pydantic>=2.7,<3.0",

src/mistral_common/audio.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
from enum import Enum
66
from functools import cache
77
from pathlib import Path
8-
from typing import TYPE_CHECKING, Union
8+
from typing import TYPE_CHECKING
99

1010
import numpy as np
1111
import requests
@@ -228,7 +228,7 @@ def resample(self, new_sampling_rate: int) -> None:
228228
self.sampling_rate = new_sampling_rate
229229

230230

231-
def hertz_to_mel(freq: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
231+
def hertz_to_mel(freq: float | np.ndarray) -> float | np.ndarray:
232232
r"""Convert frequency from hertz to mels using the "slaney" mel-scale.
233233
234234
Args:

src/mistral_common/exceptions.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,3 @@
1-
from typing import Optional
2-
3-
41
class MistralCommonException(Exception):
52
r"""Base class for all Mistral exceptions.
63
@@ -12,7 +9,7 @@ class MistralCommonException(Exception):
129

1310
def __init__(
1411
self,
15-
message: Optional[str] = None,
12+
message: str | None = None,
1613
) -> None:
1714
r"""Initialize the `MistralCommonException` with an optional message.
1815

src/mistral_common/experimental/app/main.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
from functools import lru_cache
22
from pathlib import Path
3-
from typing import Union
43

54
import click
65
import uvicorn
@@ -21,7 +20,7 @@
2120

2221

2322
def create_app(
24-
tokenizer: Union[str, Path, MistralTokenizer],
23+
tokenizer: str | Path | MistralTokenizer,
2524
validation_mode: ValidationMode = ValidationMode.test,
2625
engine_url: str = "127.0.0.1",
2726
engine_backend: EngineBackend = EngineBackend.llama_cpp,
@@ -114,8 +113,8 @@ def cli() -> None:
114113
show_default=True,
115114
)
116115
def serve(
117-
tokenizer_path: Union[str, Path],
118-
validation_mode: Union[ValidationMode, str] = ValidationMode.test,
116+
tokenizer_path: str | Path,
117+
validation_mode: ValidationMode | str = ValidationMode.test,
119118
host: str = "127.0.0.1",
120119
port: int = 0,
121120
engine_url: str = "http://127.0.0.1:8080",

src/mistral_common/experimental/app/models.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import importlib.metadata
22
from enum import Enum
33
from pathlib import Path
4-
from typing import Any, List, Optional, Union
4+
from typing import Any
55

66
from pydantic import BaseModel, ConfigDict, field_validator
77
from pydantic_settings import BaseSettings
@@ -23,8 +23,8 @@ class OpenAIChatCompletionRequest(BaseModel):
2323
This class accepts extra fields that are not validated.
2424
"""
2525

26-
messages: List[dict[str, Union[str, List[dict[str, Union[str, dict[str, Any]]]]]]]
27-
tools: Optional[List[dict[str, Any]]] = None
26+
messages: list[dict[str, str | list[dict[str, str | dict[str, Any]]]]]
27+
tools: list[dict[str, Any]] | None = None
2828

2929
# Allow extra fields as the `from_openai` method will handle them.
3030
# We never validate the input, so we don't need to worry about the extra fields.
@@ -83,16 +83,16 @@ def _validate_engine_url(cls, value: str) -> str:
8383

8484
@field_validator("engine_backend", mode="before")
8585
@classmethod
86-
def _validate_backend(cls, value: Union[str, EngineBackend]) -> EngineBackend:
86+
def _validate_backend(cls, value: str | EngineBackend) -> EngineBackend:
8787
if isinstance(value, str):
8888
value = EngineBackend(value)
8989
return value
9090

9191
def model_post_init(self, context: Any) -> None:
9292
super().model_post_init(context)
93-
self._tokenizer: Optional[MistralTokenizer] = None
93+
self._tokenizer: MistralTokenizer | None = None
9494

95-
def _load_tokenizer(self, tokenizer_path: Union[str, Path], validation_mode: ValidationMode) -> None:
95+
def _load_tokenizer(self, tokenizer_path: str | Path, validation_mode: ValidationMode) -> None:
9696
if tokenizer_path == "":
9797
raise ValueError("Tokenizer path must be set via the environment variable `TOKENIZER_PATH`.")
9898
elif self._tokenizer is not None:

src/mistral_common/experimental/app/routers.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import json
2-
from typing import Annotated, List, Optional, Union
2+
from typing import Annotated
33

44
import requests
55
from fastapi import APIRouter, Body, Depends, HTTPException
@@ -33,7 +33,7 @@ async def redirect_to_docs() -> RedirectResponse:
3333

3434
@tokenize_router.post("/")
3535
async def tokenize_request(
36-
request: Union[ChatCompletionRequest, OpenAIChatCompletionRequest],
36+
request: ChatCompletionRequest | OpenAIChatCompletionRequest,
3737
settings: Annotated[Settings, Depends(get_settings)],
3838
) -> list[int]:
3939
r"""Tokenize a chat completion request."""
@@ -100,7 +100,7 @@ async def detokenize_to_assistant_message(
100100
else:
101101
content_tokens, tool_calls_tokens = tokens, ()
102102

103-
content: Optional[Union[str, List[Union[TextChunk, ThinkChunk]]]] = None
103+
content: str | list[TextChunk | ThinkChunk] | None = None
104104

105105
if settings.tokenizer.instruct_tokenizer.tokenizer.version >= TokenizerVersion.v13:
106106
assert isinstance(settings.tokenizer.instruct_tokenizer, InstructTokenizerV13)
@@ -150,7 +150,7 @@ async def detokenize_to_assistant_message(
150150

151151
@main_router.post("/v1/chat/completions", tags=["chat", "completions"])
152152
async def generate(
153-
request: Union[ChatCompletionRequest, OpenAIChatCompletionRequest],
153+
request: ChatCompletionRequest | OpenAIChatCompletionRequest,
154154
settings: Annotated[Settings, Depends(get_settings)],
155155
) -> AssistantMessage:
156156
r"""Generate a chat completion.

src/mistral_common/experimental/think.py

Lines changed: 6 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,25 +1,22 @@
1-
from typing import List, Tuple
2-
3-
41
def _split_content_and_think_chunks(
5-
tokens: List[int], begin_think_token_id: int, end_think_token_id: int
6-
) -> List[Tuple[List[int], bool]]:
2+
tokens: list[int], begin_think_token_id: int, end_think_token_id: int
3+
) -> list[tuple[list[int], bool]]:
74
r"""Split the content and think chunks from a list of tokens.
85
96
Args:
10-
tokens: List of tokens.
7+
tokens: list of tokens.
118
begin_think_token_id: The token id for the begin think token.
129
end_think_token_id: The token id for the end think token.
1310
1411
Returns:
15-
List of tuples, where each tuple contains a list of tokens and a boolean indicating if the chunk is a think
12+
list of tuples, where each tuple contains a list of tokens and a boolean indicating if the chunk is a think
1613
chunk.
1714
"""
1815
if not tokens:
1916
return []
2017

21-
content_chunks: List[Tuple[List[int], bool]] = []
22-
current_content: List[int] = []
18+
content_chunks: list[tuple[list[int], bool]] = []
19+
current_content: list[int] = []
2320

2421
in_think_chunk = False
2522
for token in tokens:

src/mistral_common/experimental/tools.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import json
2-
from typing import List, Sequence, Tuple
2+
from typing import Sequence
33

44
from mistral_common.experimental.utils import (
55
_split_integer_list_by_value,
@@ -18,8 +18,8 @@ class InvalidArgsToolCallError(InvalidToolCallError):
1818

1919

2020
def _split_content_and_tool_calls(
21-
tokens: List[int], tool_call_token_id: int
22-
) -> tuple[List[int], Tuple[List[int], ...]]:
21+
tokens: list[int], tool_call_token_id: int
22+
) -> tuple[list[int], tuple[list[int], ...]]:
2323
r"""Split the content and tool calls from a list of tokens.
2424
2525
The content is the first sequence of tokens that does not start with the tool call token ID.
@@ -48,7 +48,7 @@ def _split_content_and_tool_calls(
4848
return content_tokens, tools_calls_tokens
4949

5050

51-
def _decode_tool_calls_v2_up_to_v7(tool_call_tokens: list[int], tokenizer: Tokenizer) -> List[ToolCall]:
51+
def _decode_tool_calls_v2_up_to_v7(tool_call_tokens: list[int], tokenizer: Tokenizer) -> list[ToolCall]:
5252
r"""Decode a list of tool call tokens into a list of tool calls for tokenizer versions v2 to v7.
5353
5454
Note:
@@ -130,7 +130,7 @@ def _decode_tool_call_v11(tool_call_tokens: list[int], tokenizer: Tokenizer) ->
130130
return tool_call
131131

132132

133-
def _decode_tool_calls(tool_call_tokens: Sequence[list[int]], tokenizer: Tokenizer) -> List[ToolCall]:
133+
def _decode_tool_calls(tool_call_tokens: Sequence[list[int]], tokenizer: Tokenizer) -> list[ToolCall]:
134134
r"""Decode a list of tool call tokens into a list of tool calls.
135135
136136
Note:

src/mistral_common/experimental/utils.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,7 @@
1-
from typing import List, Tuple
2-
31
from mistral_common.tokens.tokenizers.base import Tokenizer
42

53

6-
def _split_integer_list_by_value(list_: List[int], value: int) -> Tuple[List[int], ...]:
4+
def _split_integer_list_by_value(list_: list[int], value: int) -> tuple[list[int], ...]:
75
r"""Split a list of integers by a given value.
86
97
Args:
@@ -32,8 +30,8 @@ def _split_integer_list_by_value(list_: List[int], value: int) -> Tuple[List[int
3230

3331

3432
def _split_tokens_by_one_occurence_control_token(
35-
list_: List[int], tokenizer: Tokenizer, control_token: str
36-
) -> Tuple[List[int], List[int]]:
33+
list_: list[int], tokenizer: Tokenizer, control_token: str
34+
) -> tuple[list[int], list[int]]:
3735
r"""Split a list of integers by a given control token.
3836
3937
Raises:

0 commit comments

Comments
 (0)