Skip to content

Commit 46a783c

Browse files
committed
Move Synthetic provider to use updated models endpoint and dynamic fetcher.
Also redesign 1 test that was overly order-specific
1 parent ea7901d commit 46a783c

File tree

16 files changed

+385
-451
lines changed

16 files changed

+385
-451
lines changed

packages/types/src/provider-settings.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@ export const dynamicProviders = [
6060
"requesty",
6161
"unbound",
6262
"glama",
63+
"synthetic",
6364
] as const
6465

6566
export type DynamicProvider = (typeof dynamicProviders)[number]
Lines changed: 11 additions & 208 deletions
Original file line numberDiff line numberDiff line change
@@ -1,221 +1,24 @@
1-
// kilocode_change: provider added
1+
// kilocode_change: provider added - dynamic models only
22

33
import type { ModelInfo } from "../model.js"
44

5-
export type SyntheticModelId =
6-
| "hf:MiniMaxAI/MiniMax-M2"
7-
| "hf:zai-org/GLM-4.6"
8-
| "hf:zai-org/GLM-4.5"
9-
| "hf:openai/gpt-oss-120b"
10-
| "hf:moonshotai/Kimi-K2-Instruct-0905"
11-
| "hf:moonshotai/Kimi-K2-Thinking"
12-
| "hf:reissbaker/llama-3.1-70b-abliterated-lora"
13-
| "hf:meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8"
14-
| "hf:deepseek-ai/DeepSeek-V3.1"
15-
| "hf:meta-llama/Llama-3.1-8B-Instruct"
16-
| "hf:meta-llama/Llama-3.1-70B-Instruct"
17-
| "hf:meta-llama/Llama-3.1-405B-Instruct"
18-
| "hf:meta-llama/Llama-3.3-70B-Instruct"
19-
| "hf:deepseek-ai/DeepSeek-V3-0324"
20-
| "hf:deepseek-ai/DeepSeek-R1"
21-
| "hf:moonshotai/Kimi-K2-Instruct"
22-
| "hf:meta-llama/Llama-4-Scout-17B-16E-Instruct"
23-
| "hf:Qwen/Qwen3-Coder-480B-A35B-Instruct"
24-
| "hf:Qwen/Qwen2.5-Coder-32B-Instruct"
25-
| "hf:Qwen/Qwen3-235B-A22B-Thinking-2507"
26-
| "hf:Qwen/Qwen3-235B-A22B-Instruct-2507"
5+
export type SyntheticModelId = string
276

28-
export const syntheticDefaultModelId: SyntheticModelId = "hf:zai-org/GLM-4.6"
7+
export const syntheticDefaultModelId = "hf:zai-org/GLM-4.6"
298

30-
export const syntheticModels = {
31-
"hf:MiniMaxAI/MiniMax-M2": {
32-
maxTokens: 192608,
33-
contextWindow: 192608,
34-
supportsImages: false,
35-
supportsPromptCache: false,
36-
inputPrice: 0.55,
37-
outputPrice: 2.19,
38-
description:
39-
"MiniMax's latest hybrid reasoning model: it's fast, it thinks before it responds, it's great at using tools via the API, and it's a strong coding model. 192k-token context.",
40-
},
41-
"hf:moonshotai/Kimi-K2-Thinking": {
42-
maxTokens: 262144,
43-
contextWindow: 262144,
44-
supportsImages: false,
45-
supportsPromptCache: false,
46-
inputPrice: 0.55,
47-
outputPrice: 2.19,
48-
description:
49-
"Moonshot's latest hybrid reasoner. Extremely good at math — it saturates the AIME25 math benchmark — and competitive with GPT-5 and Claude 4.5 at tool use and codegen. 256k-token context.",
50-
},
51-
"hf:moonshotai/Kimi-K2-Instruct-0905": {
52-
maxTokens: 262144,
53-
contextWindow: 262144,
54-
supportsImages: false,
55-
supportsPromptCache: false,
56-
inputPrice: 1.2,
57-
outputPrice: 1.2,
58-
description:
59-
"Kimi K2 model gets a new version update: Agentic coding: more accurate, better generalization across scaffolds. Frontend coding: improved aesthetics and functionalities on web, 3d, and other tasks. Context length: extended from 128k to 256k, providing better long-horizon support.",
60-
},
61-
"hf:openai/gpt-oss-120b": {
62-
maxTokens: 128000,
63-
contextWindow: 128000,
64-
supportsImages: false,
65-
supportsPromptCache: false,
66-
inputPrice: 0.1,
67-
outputPrice: 0.1,
68-
},
69-
"hf:zai-org/GLM-4.5": {
70-
maxTokens: 128000,
71-
contextWindow: 128000,
72-
supportsImages: false,
73-
supportsPromptCache: false,
74-
inputPrice: 0.55,
75-
outputPrice: 2.19,
76-
},
9+
// Models used in tests and as fallback for dynamic provider
10+
export const syntheticModels: Record<string, ModelInfo> = {
7711
"hf:zai-org/GLM-4.6": {
78-
maxTokens: 200000,
79-
contextWindow: 200000,
80-
supportsImages: false,
81-
supportsPromptCache: false,
82-
inputPrice: 0.55,
83-
outputPrice: 2.19,
84-
},
85-
"hf:reissbaker/llama-3.1-70b-abliterated-lora": {
86-
maxTokens: 128000,
87-
contextWindow: 128000,
88-
supportsImages: false,
89-
supportsPromptCache: false,
90-
inputPrice: 0.9,
91-
outputPrice: 0.9,
92-
},
93-
"hf:meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8": {
94-
maxTokens: 524000,
95-
contextWindow: 524000,
96-
supportsImages: false,
97-
supportsPromptCache: false,
98-
inputPrice: 0.22,
99-
outputPrice: 0.88,
100-
},
101-
"hf:deepseek-ai/DeepSeek-V3.1": {
102-
maxTokens: 128000,
103-
contextWindow: 128000,
104-
supportsImages: false,
105-
supportsPromptCache: false,
106-
inputPrice: 0.56,
107-
outputPrice: 1.68,
108-
},
109-
"hf:meta-llama/Llama-3.1-405B-Instruct": {
110-
maxTokens: 128000,
111-
contextWindow: 128000,
112-
supportsImages: false,
113-
supportsPromptCache: false,
114-
inputPrice: 3.0,
115-
outputPrice: 3.0,
116-
},
117-
"hf:meta-llama/Llama-3.1-70B-Instruct": {
118-
maxTokens: 128000,
119-
contextWindow: 128000,
120-
supportsImages: false,
121-
supportsPromptCache: false,
122-
inputPrice: 0.9,
123-
outputPrice: 0.9,
124-
},
125-
"hf:meta-llama/Llama-3.1-8B-Instruct": {
126-
maxTokens: 128000,
127-
contextWindow: 128000,
128-
supportsImages: false,
129-
supportsPromptCache: false,
130-
inputPrice: 0.2,
131-
outputPrice: 0.2,
132-
},
133-
"hf:meta-llama/Llama-3.3-70B-Instruct": {
134-
maxTokens: 128000,
135-
contextWindow: 128000,
136-
supportsImages: false,
137-
supportsPromptCache: false,
138-
inputPrice: 0.9,
139-
outputPrice: 0.9,
140-
},
141-
"hf:deepseek-ai/DeepSeek-V3-0324": {
142-
maxTokens: 128000,
143-
contextWindow: 128000,
144-
supportsImages: false,
145-
supportsPromptCache: false,
146-
inputPrice: 1.2,
147-
outputPrice: 1.2,
148-
},
149-
"hf:deepseek-ai/DeepSeek-R1": {
15012
maxTokens: 128000,
15113
contextWindow: 128000,
15214
supportsImages: false,
15315
supportsPromptCache: false,
15416
inputPrice: 0.55,
15517
outputPrice: 2.19,
18+
description: "GLM-4.6",
19+
supportsComputerUse: false,
20+
supportsReasoningEffort: false,
21+
supportsReasoningBudget: false,
22+
supportedParameters: [],
15623
},
157-
"hf:deepseek-ai/DeepSeek-R1-0528": {
158-
maxTokens: 128000,
159-
contextWindow: 128000,
160-
supportsImages: false,
161-
supportsPromptCache: false,
162-
inputPrice: 3.0,
163-
outputPrice: 8.0,
164-
},
165-
"hf:meta-llama/Llama-4-Scout-17B-16E-Instruct": {
166-
maxTokens: 328000,
167-
contextWindow: 328000,
168-
supportsImages: false,
169-
supportsPromptCache: false,
170-
inputPrice: 0.15,
171-
outputPrice: 0.6,
172-
},
173-
"hf:moonshotai/Kimi-K2-Instruct": {
174-
maxTokens: 128000,
175-
contextWindow: 128000,
176-
supportsImages: false,
177-
supportsPromptCache: false,
178-
inputPrice: 0.6,
179-
outputPrice: 2.5,
180-
},
181-
"hf:Qwen/Qwen3-Coder-480B-A35B-Instruct": {
182-
maxTokens: 256000,
183-
contextWindow: 256000,
184-
supportsImages: false,
185-
supportsPromptCache: false,
186-
inputPrice: 0.45,
187-
outputPrice: 1.8,
188-
},
189-
"hf:Qwen/Qwen2.5-Coder-32B-Instruct": {
190-
maxTokens: 32000,
191-
contextWindow: 32000,
192-
supportsImages: false,
193-
supportsPromptCache: false,
194-
inputPrice: 0.8,
195-
outputPrice: 0.8,
196-
},
197-
"hf:deepseek-ai/DeepSeek-V3": {
198-
maxTokens: 128000,
199-
contextWindow: 128000,
200-
supportsImages: false,
201-
supportsPromptCache: false,
202-
inputPrice: 1.25,
203-
outputPrice: 1.25,
204-
},
205-
"hf:Qwen/Qwen3-235B-A22B-Instruct-2507": {
206-
maxTokens: 256000,
207-
contextWindow: 256000,
208-
supportsImages: false,
209-
supportsPromptCache: false,
210-
inputPrice: 0.22,
211-
outputPrice: 0.88,
212-
},
213-
"hf:Qwen/Qwen3-235B-A22B-Thinking-2507": {
214-
maxTokens: 256000,
215-
contextWindow: 256000,
216-
supportsImages: false,
217-
supportsPromptCache: false,
218-
inputPrice: 0.65,
219-
outputPrice: 3.0,
220-
},
221-
} as const satisfies Record<string, ModelInfo>
24+
}

src/api/providers/__tests__/synthetic.spec.ts

Lines changed: 14 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -22,11 +22,21 @@ vi.mock("openai", () => ({
2222
})),
2323
}))
2424

25+
// Mock model cache
26+
vi.mock("../fetchers/modelCache", () => ({
27+
getModels: vi.fn(),
28+
}))
29+
30+
// Import the mocked function after mock setup
31+
const { getModels: mockGetModels } = await import("../fetchers/modelCache")
32+
2533
describe("SyntheticHandler", () => {
2634
let handler: SyntheticHandler
2735

2836
beforeEach(() => {
2937
vi.clearAllMocks()
38+
// Mock getModels to return the static models
39+
vi.mocked(mockGetModels).mockResolvedValue(syntheticModels)
3040
// Set up default mock implementation
3141
mockCreate.mockImplementation(async () => ({
3242
[Symbol.asyncIterator]: async function* () {
@@ -83,7 +93,7 @@ describe("SyntheticHandler", () => {
8393
})
8494

8595
it("should return specified model when valid model is provided", () => {
86-
const testModelId: SyntheticModelId = "hf:zai-org/GLM-4.5"
96+
const testModelId: SyntheticModelId = "hf:zai-org/GLM-4.6"
8797
const handlerWithModel = new SyntheticHandler({
8898
apiModelId: testModelId,
8999
syntheticApiKey: "test-synthetic-api-key",
@@ -93,8 +103,8 @@ describe("SyntheticHandler", () => {
93103
expect(model.info).toEqual(expect.objectContaining(syntheticModels[testModelId]))
94104
})
95105

96-
it("should return GLM Instruct model with correct configuration", () => {
97-
const testModelId: SyntheticModelId = "hf:zai-org/GLM-4.5"
106+
it("should return GLM model with correct configuration", () => {
107+
const testModelId: SyntheticModelId = "hf:zai-org/GLM-4.6"
98108
const handlerWithModel = new SyntheticHandler({
99109
apiModelId: testModelId,
100110
syntheticApiKey: "test-synthetic-api-key",
@@ -175,7 +185,7 @@ describe("SyntheticHandler", () => {
175185
})
176186

177187
it("createMessage should pass correct parameters to synthetic client", async () => {
178-
const modelId: SyntheticModelId = "hf:zai-org/GLM-4.5"
188+
const modelId: SyntheticModelId = "hf:zai-org/GLM-4.6"
179189
const modelInfo = syntheticModels[modelId]
180190
const handlerWithModel = new SyntheticHandler({
181191
apiModelId: modelId,

src/api/providers/fetchers/modelCache.ts

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ import { getOvhCloudAiEndpointsModels } from "./ovhcloud"
2828
import { getChutesModels } from "./chutes"
2929
import { getGeminiModels } from "./gemini"
3030
import { getInceptionModels } from "./inception"
31+
import { getSyntheticModels } from "./synthetic"
3132
// kilocode_change end
3233

3334
import { getDeepInfraModels } from "./deepinfra"
@@ -109,6 +110,9 @@ export const getModels = async (options: GetModelsOptions): Promise<ModelRecord>
109110
case "chutes":
110111
models = await getChutesModels(options.apiKey)
111112
break
113+
case "synthetic":
114+
models = await getSyntheticModels(options.apiKey)
115+
break
112116
case "gemini":
113117
models = await getGeminiModels({
114118
apiKey: options.apiKey,

0 commit comments

Comments
 (0)