Replies: 1 comment 2 replies
-
You wired both conditional edges and an unconditional chatbot -> END. |
Beta Was this translation helpful? Give feedback.
2 replies
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Uh oh!
There was an error while loading. Please reload this page.
-
Can anyone point put what am I doing wrong here as my graph does not resume instead it runs twice when provide with clarification.
class State(TypedDict):
messages: Annotated[list, add_messages]
clarification: Annotated[
str,
"Request for clarification from the human. "
"The model should populate this field whenever human assistance is needed "
"because the query is ambiguous or missing details.",
]
human_response: Annotated[
str,
"Response from the human when asked for clarification."
]
human_required: Annotated[
bool,
"Set to True if human is required in the loop for additional clarification else False"
]
class LLMResponse(BaseModel):
clarification: str = Field(description="Request message for clarification from the human whenever human assisstance is required if the query is ambiguous or missing details.")
human_required: bool = Field(description='Set to True if human is required in the loop for additional clarification else False')
response: Any = Field(description="Response from the LLM.")
llm = llm.with_structured_output(LLMResponse, strict=True, method='json_mode')
def human_assistance(state: State):
"""Request assistance from a human."""
print("Requesting information from human.")
human_response = interrupt({"query": state.get("clarification")})
def chatbot(state: State):
print("Initial state:", state)
# Append the new message to the existing messages in the state
messages = state.get("messages", [])
llm_response = llm.invoke(messages)
message = AIMessage(content=llm_response.response)
return {
"messages": messages + [message], # Append the new message
"clarification": llm_response.clarification, # Access attribute directly
"human_required": llm_response.human_required # Access attribute directly
}
def need_human(state: State) -> Literal['human_assistance', 'END']:
print("Need human:", state)
if state.get('human_required'):
return 'human_assistance'
else:
return "END"
def end(state: State):
print("Ending the graph.")
graph_builder = StateGraph(State)
graph_builder.add_node("chatbot", chatbot)
graph_builder.add_node("END", end)
graph_builder.set_entry_point("chatbot")
graph_builder.add_conditional_edges(
"chatbot",
need_human,
)
graph_builder.add_node("human_assistance", human_assistance)
graph_builder.add_edge("human_assistance", "chatbot")
graph_builder.add_edge("chatbot", "END")
graph_builder.add_edge("END", END)
memory = InMemorySaver()
graph = graph_builder.compile(checkpointer=memory)
thread_id = str(uuid.uuid4())
config = {"configurable": {"thread_id": thread_id}}
Include the SystemMessage in the initial list of messages
result = graph.invoke(
{"messages": [SystemMessage(content=system_message_content), HumanMessage(content=user_query)]}, config=config
)
print(f"Results: \n {result['messages']}")
human_response = input("Enter your response:")
result = graph.invoke(
Command(resume={'query': human_response}), config=config
)
Beta Was this translation helpful? Give feedback.
All reactions