Skip to content

Commit 550c410

Browse files
Add instructions for token limit parameters fro OpenAI models
Add instructions for token limit parameters fro OpenAI models
1 parent 5ce1280 commit 550c410

File tree

2 files changed

+17
-1
lines changed

2 files changed

+17
-1
lines changed

libs/oci/README.md

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,14 @@ This repository includes two main integration categories:
3030
```python
3131
from langchain_oci import ChatOCIGenAI
3232

33-
llm = ChatOCIGenAI()
33+
llm = ChatOCIGenAI(
34+
model_id="MY_MODEL_ID",
35+
service_endpoint="MY_SERVICE_ENDPOINT",
36+
compartment_id="MY_COMPARTMENT_ID",
37+
model_kwargs={"max_tokens": 1024}, # Use max_completion_tokens instead of max_tokens for OpenAI models
38+
auth_profile="MY_AUTH_PROFILE",
39+
is_stream=True,
40+
auth_type="SECURITY_TOKEN"
3441
llm.invoke("Sing a ballad of LangChain.")
3542
```
3643

libs/oci/langchain_oci/chat_models/oci_generative_ai.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1058,6 +1058,15 @@ def _prepare_request(
10581058
if stop is not None:
10591059
_model_kwargs[self._provider.stop_sequence_key] = stop
10601060

1061+
# Warn if using max_tokens with OpenAI models
1062+
if self.model_id and self.model_id.startswith("openai.") and "max_tokens" in _model_kwargs:
1063+
import warnings
1064+
warnings.warn(
1065+
f"OpenAI models require 'max_completion_tokens' instead of 'max_tokens'.",
1066+
UserWarning,
1067+
stacklevel=2
1068+
)
1069+
10611070
chat_params = {**_model_kwargs, **kwargs, **oci_params}
10621071

10631072
if not self.model_id:

0 commit comments

Comments
 (0)