Skip to content

Commit 6c06b25

Browse files
committed
format
1 parent 9531dcc commit 6c06b25

File tree

3 files changed

+152
-61
lines changed

3 files changed

+152
-61
lines changed

python/mlc_llm/conversation_template/llama.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,8 @@
2121
role_content_sep="<|header_end|>\n\n",
2222
role_empty_sep="<|header_end|>\n\n",
2323
stop_str=[],
24-
stop_token_ids=[200001, 200007, 200008],# "<|end_of_text|>", "<|eom|>", "<|eot|>"
25-
system_prefix_token_ids=[200000],# "<|begin_of_text|>"
24+
stop_token_ids=[200001, 200007, 200008], # "<|end_of_text|>", "<|eom|>", "<|eot|>"
25+
system_prefix_token_ids=[200000], # "<|begin_of_text|>"
2626
add_role_after_system_message=False,
2727
)
2828
)

python/mlc_llm/model/llama4/llama4_loader.py

Lines changed: 12 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,9 @@ def huggingface(model_config: Llama4Config, quantization: Quantization) -> Exter
6666
mlc_param = named_parameters[mlc_name]
6767
mapping.add_mapping(
6868
mlc_name,
69-
[hf_name,],
69+
[
70+
hf_name,
71+
],
7072
functools.partial(
7173
lambda x, dtype: x.astype(dtype),
7274
dtype=mlc_param.dtype,
@@ -80,7 +82,9 @@ def huggingface(model_config: Llama4Config, quantization: Quantization) -> Exter
8082
mlc_param = named_parameters[mlc_name]
8183
mapping.add_mapping(
8284
mlc_name,
83-
[hf_name,],
85+
[
86+
hf_name,
87+
],
8488
functools.partial(
8589
lambda x, dtype: x.astype(dtype),
8690
dtype=mlc_param.dtype,
@@ -90,11 +94,13 @@ def huggingface(model_config: Llama4Config, quantization: Quantization) -> Exter
9094
mlp = f"model.layers.{i}.feed_forward"
9195
mlc_name = f"{mlp}.experts.down_proj"
9296
hf_name = f"language_model.{mlp}.experts.down_proj"
93-
97+
9498
mlc_param = named_parameters[mlc_name]
9599
mapping.add_mapping(
96100
mlc_name,
97-
[hf_name,],
101+
[
102+
hf_name,
103+
],
98104
functools.partial(
99105
lambda x, dtype: x.astype(dtype),
100106
dtype=mlc_param.dtype,
@@ -113,7 +119,8 @@ def huggingface(model_config: Llama4Config, quantization: Quantization) -> Exter
113119
)
114120
return mapping
115121

116-
#TODO: This needs to be done for llama4
122+
123+
# TODO: This needs to be done for llama4
117124
def awq(model_config: Llama4Config, quantization: Quantization) -> ExternMapping:
118125
"""Returns a parameter mapping that maps from the names of MLC LLM parameters to
119126
the names of AWQ parameters.

0 commit comments

Comments
 (0)