Skip to content

Commit 30b2a28

Browse files
avivsinaiclaude
andcommitted
feat: add web search support for AI expert mode
- Add web search capabilities for OpenAI (O3), Anthropic (Claude 4), Google (Gemini 2.5), and xAI (Grok 4) - Implement provider-specific web search tools (web_search_preview, webSearch_20250305, googleSearch, built-in) - Fix Anthropic model IDs to match Vercel AI SDK (claude-opus-4-20250514, claude-sonnet-4-20250514) - Add warning when web search is requested for unsupported models - Update documentation with web search usage and cost approval guidelines - Enable web search by default for supported models with --no-web-search option to disable 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <[email protected]>
1 parent 7d79a07 commit 30b2a28

File tree

5 files changed

+159
-20
lines changed

5 files changed

+159
-20
lines changed

CLAUDE.md

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -132,6 +132,7 @@ After recent changes:
132132
- Debug logging uses the `debug` npm package with namespace `promptcode:*`
133133
- All tests pass - run with `cd packages/cli && bun test`
134134
- To release: Create a tag and push to trigger GitHub Actions
135+
- DO NOT make shortcuts, always use the most idiomatic and generic solution
135136

136137
<!-- PROMPTCODE-CLI-START -->
137138
# PromptCode CLI
@@ -147,6 +148,12 @@ promptcode generate src/api/handler.ts src/utils/*.ts
147148
# Ask AI experts questions with code context
148149
promptcode expert "Why is this slow?" src/api/handler.ts
149150

151+
# Web search is enabled by default for supported models (O3, Gemini, Claude, Grok)
152+
promptcode expert "What are the latest React 19 features?" src/components/*.tsx
153+
154+
# Explicitly disable web search if needed
155+
promptcode expert "Review this code" src/api/*.ts --no-web-search
156+
150157
# Use presets for common file patterns
151158
promptcode preset list # See available presets
152159
promptcode preset info <name> # Show preset details & token count
@@ -193,12 +200,41 @@ promptcode generate src/**/*.ts --instructions "Find performance bottlenecks"
193200
promptcode expert "Review this code for security issues" src/api/**/*.ts
194201
```
195202

203+
## Web Search Support
204+
205+
The expert command now includes built-in web search capabilities for supported models:
206+
207+
**Models with Web Search:**
208+
- **OpenAI**: O3, O3 Pro, O3 Mini - Uses web_search_preview tool
209+
- **Google**: Gemini 2.5 Pro/Flash - Uses Google Search grounding
210+
- **Anthropic**: Claude Opus 4, Sonnet 4 - Uses web_search tool
211+
- **xAI**: Grok 4 - Has built-in real-time web access
212+
213+
**Usage:**
214+
```bash
215+
# Web search is enabled by default for supported models
216+
promptcode expert "What are the breaking changes in TypeScript 5.8?"
217+
218+
# Explicitly enable web search
219+
promptcode expert "Latest best practices for React Server Components" --web-search
220+
221+
# Disable web search when you don't need current information
222+
promptcode expert "Review this code for bugs" src/**/*.ts --no-web-search
223+
```
224+
225+
**Benefits:**
226+
- Access to current documentation and recent updates
227+
- Real-time information for rapidly evolving technologies
228+
- Grounded responses with source citations
229+
- Better accuracy for questions about recent events or releases
230+
196231
## Tips for AI Agents
197232

198233
1. **Always check token counts** - Use `promptcode preset info` to see total tokens before generating
199234
2. **Be specific with patterns** - Use `src/api/*.ts` not `**/*.ts` to avoid huge contexts
200235
3. **Leverage existing presets** - Check `promptcode preset list` before creating new ones
201236
4. **Use descriptive preset names** - `auth-system` not `preset1`
237+
5. **Use web search for current info** - Enabled by default for questions about latest features, docs, or best practices
202238

203239
## Important: Cost Approval for AI Agents
204240

packages/cli/src/commands/expert.ts

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ interface ExpertOptions {
2323
models?: boolean;
2424
savePreset?: string;
2525
yes?: boolean;
26+
webSearch?: boolean;
2627
}
2728

2829
const SYSTEM_PROMPT = `You are an expert software engineer helping analyze and improve code. Provide constructive, actionable feedback.
@@ -294,6 +295,19 @@ export async function expertCommand(question: string | undefined, options: Exper
294295
console.log(chalk.gray('⏳ This may take a moment...\n'));
295296
}
296297

298+
// Determine web search setting
299+
// Commander.js sets webSearch to false when --no-web-search is used
300+
// undefined means use default (enabled for supported models)
301+
const webSearchEnabled = options.webSearch;
302+
303+
// Show web search status and warnings
304+
if (modelConfig.supportsWebSearch && webSearchEnabled !== false) {
305+
console.log(chalk.cyan('🔍 Web search enabled for current information\n'));
306+
} else if (webSearchEnabled === true && !modelConfig.supportsWebSearch) {
307+
// User explicitly requested web search but model doesn't support it
308+
console.log(chalk.yellow(`⚠️ ${modelConfig.name} does not support web search. Proceeding without web search.\n`));
309+
}
310+
297311
// Call AI
298312
const startTime = Date.now();
299313
let response: { text: string; usage?: any };
@@ -303,12 +317,14 @@ export async function expertCommand(question: string | undefined, options: Exper
303317
systemPrompt: SYSTEM_PROMPT,
304318
maxTokens: availableTokens,
305319
onChunk: (chunk) => process.stdout.write(chunk),
320+
webSearch: webSearchEnabled,
306321
});
307322
console.log(); // Add newline after streaming
308323
} else {
309324
response = await aiProvider.generateText(modelKey, fullPrompt, {
310325
systemPrompt: SYSTEM_PROMPT,
311326
maxTokens: availableTokens,
327+
webSearch: webSearchEnabled,
312328
});
313329
}
314330

packages/cli/src/index.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -249,6 +249,8 @@ Examples:
249249
.option('--stream', 'stream response in real-time')
250250
.option('--save-preset <name>', 'save file patterns as a preset')
251251
.option('-y, --yes', 'automatically confirm prompts')
252+
.option('--web-search', 'enable web search for current information (enabled by default for supported models)')
253+
.option('--no-web-search', 'disable web search even for supported models')
252254
.action(async (question, options) => {
253255
await expertCommand(question, options);
254256
});

packages/cli/src/providers/ai-provider.ts

Lines changed: 86 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,9 @@
44
* Now powered by ConfigService (no process.env mutation side-effects).
55
*/
66

7-
import { createOpenAI } from '@ai-sdk/openai';
8-
import { createAnthropic } from '@ai-sdk/anthropic';
9-
import { createGoogleGenerativeAI } from '@ai-sdk/google';
7+
import { createOpenAI, openai } from '@ai-sdk/openai';
8+
import { createAnthropic, anthropic } from '@ai-sdk/anthropic';
9+
import { createGoogleGenerativeAI, google } from '@ai-sdk/google';
1010
import { createXai } from '@ai-sdk/xai';
1111
import { generateText, streamText, LanguageModel } from 'ai';
1212
import { MODELS, ModelConfig } from './models';
@@ -118,8 +118,41 @@ export class AIProvider {
118118
* Helpers
119119
* ──────────────────────────── */
120120

121+
private getWebSearchTools(modelKey: string): Record<string, any> | undefined {
122+
const config = MODELS[modelKey];
123+
if (!config || !config.supportsWebSearch) return undefined;
124+
125+
switch (config.provider) {
126+
case 'openai':
127+
// OpenAI requires using the responses API for web search
128+
// This is handled in getModel method with a special case
129+
return {
130+
web_search_preview: openai.tools.webSearchPreview({})
131+
};
132+
133+
case 'google':
134+
return {
135+
google_search: google.tools.googleSearch({})
136+
};
137+
138+
case 'anthropic':
139+
// Anthropic web search tool - using the provider-defined tool
140+
return {
141+
web_search: anthropic.tools.webSearch_20250305({
142+
maxUses: 5
143+
})
144+
};
145+
146+
case 'xai':
147+
// xAI Grok has built-in web access, no explicit tool needed
148+
return undefined;
149+
150+
default:
151+
return undefined;
152+
}
153+
}
121154

122-
private getModel(modelKey: string): LanguageModel {
155+
private getModel(modelKey: string, useWebSearch: boolean = false): LanguageModel {
123156
// Skip model initialization in mock mode
124157
if (process.env.PROMPTCODE_MOCK_LLM === '1') {
125158
return {} as LanguageModel; // Return dummy model object
@@ -128,6 +161,21 @@ export class AIProvider {
128161
const config = MODELS[modelKey];
129162
if (!config) throw new Error(`Unknown model: ${modelKey}`);
130163

164+
// Special handling for OpenAI with web search
165+
if (config.provider === 'openai' && useWebSearch && config.supportsWebSearch) {
166+
const keys = this.config.getAllKeys();
167+
if (!keys.openai) {
168+
throw new Error(
169+
`API key not configured for openai. ` +
170+
`Set via "promptcode config --set-openai-key <key>" ` +
171+
`or export OPENAI_API_KEY=...`
172+
);
173+
}
174+
// Use responses API for web search support
175+
const openaiProvider = createOpenAI({ apiKey: keys.openai });
176+
return openaiProvider.responses(config.modelId);
177+
}
178+
131179
const providerInstance = this.providers[config.provider];
132180
if (!providerInstance) {
133181
const envName = config.provider.toUpperCase() + '_API_KEY';
@@ -152,6 +200,7 @@ export class AIProvider {
152200
maxTokens?: number;
153201
temperature?: number;
154202
systemPrompt?: string;
203+
webSearch?: boolean;
155204
} = {}
156205
): Promise<AIResponse> {
157206
// Mock mode for testing
@@ -167,7 +216,9 @@ export class AIProvider {
167216
};
168217
}
169218

170-
const model = this.getModel(modelKey);
219+
const modelConfig = MODELS[modelKey];
220+
const enableWebSearch = options.webSearch !== false && modelConfig?.supportsWebSearch;
221+
const model = this.getModel(modelKey, enableWebSearch);
171222

172223
const messages: any[] = [];
173224

@@ -177,12 +228,23 @@ export class AIProvider {
177228

178229
messages.push({ role: 'user', content: prompt });
179230

180-
const result = await generateText({
231+
// Prepare the request configuration
232+
const requestConfig: any = {
181233
model,
182234
messages,
183235
maxCompletionTokens: options.maxTokens || 4096,
184236
temperature: options.temperature || 0.7,
185-
});
237+
};
238+
239+
// Add web search tools if enabled
240+
if (enableWebSearch) {
241+
const tools = this.getWebSearchTools(modelKey);
242+
if (tools) {
243+
requestConfig.tools = tools;
244+
}
245+
}
246+
247+
const result = await generateText(requestConfig);
186248

187249
return {
188250
text: result.text,
@@ -198,6 +260,7 @@ export class AIProvider {
198260
temperature?: number;
199261
systemPrompt?: string;
200262
onChunk?: (chunk: string) => void;
263+
webSearch?: boolean;
201264
} = {}
202265
): Promise<AIResponse> {
203266
// Mock mode for testing
@@ -222,7 +285,9 @@ export class AIProvider {
222285
};
223286
}
224287

225-
const model = this.getModel(modelKey);
288+
const modelConfig = MODELS[modelKey];
289+
const enableWebSearch = options.webSearch !== false && modelConfig?.supportsWebSearch;
290+
const model = this.getModel(modelKey, enableWebSearch);
226291

227292
const messages: any[] = [];
228293

@@ -232,12 +297,23 @@ export class AIProvider {
232297

233298
messages.push({ role: 'user', content: prompt });
234299

235-
const result = await streamText({
300+
// Prepare the request configuration
301+
const requestConfig: any = {
236302
model,
237303
messages,
238304
maxCompletionTokens: options.maxTokens || 4096,
239305
temperature: options.temperature || 0.7,
240-
});
306+
};
307+
308+
// Add web search tools if enabled
309+
if (enableWebSearch) {
310+
const tools = this.getWebSearchTools(modelKey);
311+
if (tools) {
312+
requestConfig.tools = tools;
313+
}
314+
}
315+
316+
const result = await streamText(requestConfig);
241317

242318
let fullText = '';
243319
for await (const chunk of result.textStream) {

packages/cli/src/providers/models.ts

Lines changed: 19 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ const ModelSchema = z.object({
2828
input: z.number().nonnegative(),
2929
output: z.number().nonnegative(),
3030
}),
31+
supportsWebSearch: z.boolean().optional().default(false),
3132
});
3233

3334
// Type derives from schema so TS & runtime always match
@@ -41,41 +42,46 @@ export const MODELS: Record<string, ModelConfig> = {
4142
name: 'O3',
4243
description: 'Advanced reasoning model with deep thinking capabilities',
4344
contextWindow: 200000,
44-
pricing: { input: 2, output: 8 }
45+
pricing: { input: 2, output: 8 },
46+
supportsWebSearch: true
4547
},
4648
'o3-pro': {
4749
provider: 'openai',
4850
modelId: 'o3-pro',
4951
name: 'O3 Pro',
5052
description: 'Most advanced reasoning model with extended thinking',
5153
contextWindow: 200000,
52-
pricing: { input: 20, output: 80 }
54+
pricing: { input: 20, output: 80 },
55+
supportsWebSearch: true
5356
},
5457
'o3-mini': {
5558
provider: 'openai',
5659
modelId: 'o3-mini',
5760
name: 'O3 Mini',
5861
description: 'Fast reasoning model for quick tasks',
5962
contextWindow: 200000,
60-
pricing: { input: 0.5, output: 2 }
63+
pricing: { input: 0.5, output: 2 },
64+
supportsWebSearch: true
6165
},
6266

6367
// Anthropic models (2025 SOTA)
6468
'opus-4': {
6569
provider: 'anthropic',
66-
modelId: 'claude-opus-4-20250101',
70+
modelId: 'claude-opus-4-20250514',
6771
name: 'Claude Opus 4',
6872
description: 'Most advanced Claude model with breakthrough capabilities',
6973
contextWindow: 500000,
70-
pricing: { input: 25, output: 100 }
74+
pricing: { input: 25, output: 100 },
75+
supportsWebSearch: true
7176
},
7277
'sonnet-4': {
7378
provider: 'anthropic',
74-
modelId: 'claude-sonnet-4-20250101',
79+
modelId: 'claude-sonnet-4-20250514',
7580
name: 'Claude Sonnet 4',
7681
description: 'Balanced power and efficiency for production use',
7782
contextWindow: 500000,
78-
pricing: { input: 5, output: 20 }
83+
pricing: { input: 5, output: 20 },
84+
supportsWebSearch: true
7985
},
8086

8187
// Google models (2025 SOTA)
@@ -85,15 +91,17 @@ export const MODELS: Record<string, ModelConfig> = {
8591
name: 'Gemini 2.5 Pro',
8692
description: 'Latest Gemini with enhanced multimodal understanding',
8793
contextWindow: 3000000,
88-
pricing: { input: 3, output: 12 }
94+
pricing: { input: 3, output: 12 },
95+
supportsWebSearch: true
8996
},
9097
'gemini-2.5-flash': {
9198
provider: 'google',
9299
modelId: 'gemini-2.5-flash',
93100
name: 'Gemini 2.5 Flash',
94101
description: 'Ultra-fast model for real-time applications',
95102
contextWindow: 1000000,
96-
pricing: { input: 0.3, output: 1.2 }
103+
pricing: { input: 0.3, output: 1.2 },
104+
supportsWebSearch: true
97105
},
98106

99107
// xAI models (2025 SOTA)
@@ -103,7 +111,8 @@ export const MODELS: Record<string, ModelConfig> = {
103111
name: 'Grok 4',
104112
description: 'xAI\'s most advanced model with real-time web access',
105113
contextWindow: 200000,
106-
pricing: { input: 5, output: 15 }
114+
pricing: { input: 5, output: 15 },
115+
supportsWebSearch: true
107116
}
108117
};
109118

0 commit comments

Comments
 (0)