1+ #!/usr/bin/env python3
2+ """
3+ Example demonstrating how to configure the LLM class for enterprise API gateways
4+ that require custom headers and SSL certificate handling.
5+
6+ This example shows configuration patterns used at Wells Fargo and other enterprises
7+ with corporate proxies or API management systems like Tachyon/Apigee.
8+ """
9+
10+ import os
11+ import uuid
12+ from datetime import datetime
13+ from openhands .sdk .llm import LLM
14+
15+
16+ def create_enterprise_llm ():
17+ """
18+ Create an LLM instance configured for enterprise gateway access.
19+
20+ This example shows how to:
21+ 1. Add custom headers required by the gateway (auth tokens, correlation IDs)
22+ 2. Set a custom base URL for the enterprise proxy
23+ 3. Disable SSL verification when corporate proxies break cert chains
24+ 4. Specify the underlying provider explicitly
25+ """
26+
27+ # Generate dynamic headers that may be required by the gateway
28+ now = datetime .now ()
29+ correlation_id = str (uuid .uuid4 ())
30+ request_id = str (uuid .uuid4 ())
31+
32+ # Configure the LLM with enterprise gateway settings
33+ llm = LLM (
34+ model = "openai/gemini-2.5-flash" , # Model name as exposed by gateway
35+ api_key = "placeholder" , # Often required even if not used
36+
37+ # Enterprise proxy endpoint
38+ base_url = "https://your-corporate-proxy.company.com/api/llm" ,
39+
40+ # Custom headers required by the gateway
41+ extra_headers = {
42+ "Authorization" : "Bearer YOUR_ENTERPRISE_TOKEN" ,
43+ "Content-Type" : "application/json" ,
44+ "x-correlation-id" : correlation_id ,
45+ "x-request-id" : request_id ,
46+ "x-wf-request-date" : now .strftime ("%Y-%m-%dT%H:%M:%S.%f" )[:- 3 ],
47+ "X-WF-USECASE-ID" : "YOUR_USECASE_ID" ,
48+ "x-wf-client-id" : "YOUR_CLIENT_ID" ,
49+ "x-wf-api-key" : "YOUR_API_KEY" ,
50+ },
51+
52+ # Disable SSL verification if corporate proxy breaks certificate chain
53+ ssl_verify = False , # Set to True in production if certs are properly configured
54+
55+ # Explicitly specify the provider for LiteLLM routing
56+ custom_llm_provider = "openai" ,
57+
58+ # Other configurations
59+ num_retries = 1 ,
60+ timeout = 30 ,
61+ )
62+
63+ return llm
64+
65+
66+ def create_llm_from_env ():
67+ """
68+ Create an LLM instance using environment variables.
69+
70+ Set these environment variables:
71+ - LLM_MODEL=openai/gemini-2.5-flash
72+ - LLM_API_KEY=placeholder
73+ - LLM_BASE_URL=https://your-corporate-proxy.company.com/api/llm
74+ - LLM_SSL_VERIFY=false
75+ - LLM_CUSTOM_LLM_PROVIDER=openai
76+ - LLM_EXTRA_HEADERS='{"Authorization": "Bearer TOKEN", "x-correlation-id": "123"}'
77+ """
78+
79+ # The load_from_env method automatically handles:
80+ # - Boolean parsing for ssl_verify (accepts: false, False, 0, no, off)
81+ # - JSON parsing for complex fields like extra_headers
82+ llm = LLM .load_from_env ()
83+
84+ return llm
85+
86+
87+ def example_usage ():
88+ """Demonstrate using the enterprise-configured LLM."""
89+
90+ # Create the LLM instance
91+ llm = create_enterprise_llm ()
92+
93+ # Use the LLM for chat completion
94+ response = llm .chat (
95+ messages = [
96+ {"role" : "system" , "content" : "You are a helpful assistant." },
97+ {"role" : "user" , "content" : "What is the capital of France?" }
98+ ]
99+ )
100+
101+ print (f"Response: { response .choices [0 ].message .content } " )
102+
103+ # The extra_headers are automatically included in the request to the gateway
104+ # The ssl_verify setting is applied to the HTTPS connection
105+ # The custom_llm_provider ensures proper routing through LiteLLM
106+
107+
108+ if __name__ == "__main__" :
109+ # Example 1: Direct configuration
110+ print ("Example 1: Direct configuration" )
111+ llm = create_enterprise_llm ()
112+ print (f"Created LLM with model: { llm .model } " )
113+ print (f"Base URL: { llm .base_url } " )
114+ print (f"SSL Verify: { llm .ssl_verify } " )
115+ print (f"Extra headers configured: { bool (llm .extra_headers )} " )
116+
117+ # Example 2: Environment variable configuration
118+ print ("\n Example 2: Environment variable configuration" )
119+ # Set example environment variables (normally these would be set externally)
120+ os .environ ["LLM_MODEL" ] = "openai/gpt-4"
121+ os .environ ["LLM_BASE_URL" ] = "https://api-gateway.example.com/v1"
122+ os .environ ["LLM_SSL_VERIFY" ] = "false"
123+ os .environ ["LLM_CUSTOM_LLM_PROVIDER" ] = "openai"
124+ os .environ ["LLM_EXTRA_HEADERS" ] = '{"x-api-key": "secret123"}'
125+
126+ llm_env = LLM .load_from_env ()
127+ print (f"Created LLM from env with model: { llm_env .model } " )
128+ print (f"Base URL: { llm_env .base_url } " )
129+ print (f"SSL Verify: { llm_env .ssl_verify } " )
130+ print (f"Extra headers: { llm_env .extra_headers } " )
0 commit comments