Skip to content

Commit

Permalink
langchain: docstrings agents nested (#23598)
Browse files Browse the repository at this point in the history
Added missed docstrings. Formatted docstrings to the consistent form.

---------

Co-authored-by: ccurme <[email protected]>
  • Loading branch information
leo-gan and ccurme committed Jun 27, 2024
1 parent 70834cd commit b64c4b4
Show file tree
Hide file tree
Showing 22 changed files with 394 additions and 62 deletions.
1 change: 1 addition & 0 deletions libs/langchain/langchain/agents/agent_toolkits/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 13,7 @@
See [Security](https://python.langchain.com/docs/security) for more information.
"""

from pathlib import Path
from typing import TYPE_CHECKING, Any

Expand Down
38 changes: 37 additions & 1 deletion libs/langchain/langchain/agents/chat/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 80,23 @@ def create_prompt(
format_instructions: str = FORMAT_INSTRUCTIONS,
input_variables: Optional[List[str]] = None,
) -> BasePromptTemplate:
"""Create a prompt from a list of tools.
Args:
tools: A list of tools.
system_message_prefix: The system message prefix.
Default is SYSTEM_MESSAGE_PREFIX.
system_message_suffix: The system message suffix.
Default is SYSTEM_MESSAGE_SUFFIX.
human_message: The human message. Default is HUMAN_MESSAGE.
format_instructions: The format instructions.
Default is FORMAT_INSTRUCTIONS.
input_variables: The input variables. Default is None.
Returns:
A prompt template.
"""

tool_strings = "\n".join([f"{tool.name}: {tool.description}" for tool in tools])
tool_names = ", ".join([tool.name for tool in tools])
format_instructions = format_instructions.format(tool_names=tool_names)
Expand Down Expand Up @@ -113,7 130,26 @@ def from_llm_and_tools(
input_variables: Optional[List[str]] = None,
**kwargs: Any,
) -> Agent:
"""Construct an agent from an LLM and tools."""
"""Construct an agent from an LLM and tools.
Args:
llm: The language model.
tools: A list of tools.
callback_manager: The callback manager. Default is None.
output_parser: The output parser. Default is None.
system_message_prefix: The system message prefix.
Default is SYSTEM_MESSAGE_PREFIX.
system_message_suffix: The system message suffix.
Default is SYSTEM_MESSAGE_SUFFIX.
human_message: The human message. Default is HUMAN_MESSAGE.
format_instructions: The format instructions.
Default is FORMAT_INSTRUCTIONS.
input_variables: The input variables. Default is None.
kwargs: Additional keyword arguments.
Returns:
An agent.
"""
cls._validate_tools(tools)
prompt = cls.create_prompt(
tools,
Expand Down
14 changes: 14 additions & 0 deletions libs/langchain/langchain/agents/chat/output_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 25,20 @@ def get_format_instructions(self) -> str:
return self.format_instructions

def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
"""Parse the output from the agent into
an AgentAction or AgentFinish object.
Args:
text: The text to parse.
Returns:
An AgentAction or AgentFinish object.
Raises:
OutputParserException: If the output could not be parsed.
ValueError: If the action could not be found.
"""

includes_answer = FINAL_ANSWER_ACTION in text
try:
found = self.pattern.search(text)
Expand Down
44 changes: 38 additions & 6 deletions libs/langchain/langchain/agents/conversational/base.py
Original file line number Diff line number Diff line change
@@ -1,4 1,5 @@
"""An agent designed to hold a conversation in addition to using tools."""

from __future__ import annotations

from typing import Any, List, Optional, Sequence
Expand Down Expand Up @@ -40,12 41,20 @@ def _agent_type(self) -> str:

@property
def observation_prefix(self) -> str:
"""Prefix to append the observation with."""
"""Prefix to append the observation with.
Returns:
"Observation: "
"""
return "Observation: "

@property
def llm_prefix(self) -> str:
"""Prefix to append the llm call with."""
"""Prefix to append the llm call with.
Returns:
"Thought: "
"""
return "Thought:"

@classmethod
Expand All @@ -64,11 73,15 @@ def create_prompt(
Args:
tools: List of tools the agent will have access to, used to format the
prompt.
prefix: String to put before the list of tools.
suffix: String to put after the list of tools.
ai_prefix: String to use before AI output.
prefix: String to put before the list of tools. Defaults to PREFIX.
suffix: String to put after the list of tools. Defaults to SUFFIX.
format_instructions: Instructions on how to use the tools. Defaults to
FORMAT_INSTRUCTIONS
ai_prefix: String to use before AI output. Defaults to "AI".
human_prefix: String to use before human output.
Defaults to "Human".
input_variables: List of input variables the final prompt will expect.
Defaults to ["input", "chat_history", "agent_scratchpad"].
Returns:
A PromptTemplate with the template assembled from the pieces here.
Expand Down Expand Up @@ -105,7 118,26 @@ def from_llm_and_tools(
input_variables: Optional[List[str]] = None,
**kwargs: Any,
) -> Agent:
"""Construct an agent from an LLM and tools."""
"""Construct an agent from an LLM and tools.
Args:
llm: The language model to use.
tools: A list of tools to use.
callback_manager: The callback manager to use. Default is None.
output_parser: The output parser to use. Default is None.
prefix: The prefix to use in the prompt. Default is PREFIX.
suffix: The suffix to use in the prompt. Default is SUFFIX.
format_instructions: The format instructions to use.
Default is FORMAT_INSTRUCTIONS.
ai_prefix: The prefix to use before AI output. Default is "AI".
human_prefix: The prefix to use before human output.
Default is "Human".
input_variables: The input variables to use. Default is None.
**kwargs: Any additional keyword arguments to pass to the agent.
Returns:
An agent.
"""
cls._validate_tools(tools)
prompt = cls.create_prompt(
tools,
Expand Down
10 changes: 10 additions & 0 deletions libs/langchain/langchain/agents/conversational/output_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 22,16 @@ def get_format_instructions(self) -> str:
return self.format_instructions

def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
"""Parse the output from the agent into
an AgentAction or AgentFinish object.
Args:
text: The text to parse.
Returns:
An AgentAction or AgentFinish object.
"""

if f"{self.ai_prefix}:" in text:
return AgentFinish(
{"output": text.split(f"{self.ai_prefix}:")[-1].strip()}, text
Expand Down
45 changes: 42 additions & 3 deletions libs/langchain/langchain/agents/conversational_chat/base.py
Original file line number Diff line number Diff line change
@@ -1,4 1,5 @@
"""An agent designed to hold a conversation in addition to using tools."""

from __future__ import annotations

from typing import Any, List, Optional, Sequence, Tuple
Expand Down Expand Up @@ -35,7 36,9 @@ class ConversationalChatAgent(Agent):
"""An agent designed to hold a conversation in addition to using tools."""

output_parser: AgentOutputParser = Field(default_factory=ConvoOutputParser)
"""Output parser for the agent."""
template_tool_response: str = TEMPLATE_TOOL_RESPONSE
"""Template for the tool response."""

@classmethod
def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser:
Expand All @@ -47,12 50,20 @@ def _agent_type(self) -> str:

@property
def observation_prefix(self) -> str:
"""Prefix to append the observation with."""
"""Prefix to append the observation with.
Returns:
"Observation: "
"""
return "Observation: "

@property
def llm_prefix(self) -> str:
"""Prefix to append the llm call with."""
"""Prefix to append the llm call with.
Returns:
"Thought: "
"""
return "Thought:"

@classmethod
Expand All @@ -69,6 80,20 @@ def create_prompt(
input_variables: Optional[List[str]] = None,
output_parser: Optional[BaseOutputParser] = None,
) -> BasePromptTemplate:
"""Create a prompt for the agent.
Args:
tools: The tools to use.
system_message: The system message to use.
Defaults to the PREFIX.
human_message: The human message to use.
Defaults to the SUFFIX.
input_variables: The input variables to use. Defaults to None.
output_parser: The output parser to use. Defaults to None.
Returns:
A PromptTemplate.
"""
tool_strings = "\n".join(
[f"> {tool.name}: {tool.description}" for tool in tools]
)
Expand Down Expand Up @@ -115,7 140,21 @@ def from_llm_and_tools(
input_variables: Optional[List[str]] = None,
**kwargs: Any,
) -> Agent:
"""Construct an agent from an LLM and tools."""
"""Construct an agent from an LLM and tools.
Args:
llm: The language model to use.
tools: A list of tools to use.
callback_manager: The callback manager to use. Default is None.
output_parser: The output parser to use. Default is None.
system_message: The system message to use. Default is PREFIX.
human_message: The human message to use. Default is SUFFIX.
input_variables: The input variables to use. Default is None.
**kwargs: Any additional arguments.
Returns:
An agent.
"""
cls._validate_tools(tools)
_output_parser = output_parser or cls._get_default_output_parser()
prompt = cls.create_prompt(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 5,7 @@
Depending on the prompting strategy you are using, you may want to format these
differently before passing them into the LLM.
"""

from langchain.agents.format_scratchpad.log import format_log_to_str
from langchain.agents.format_scratchpad.log_to_messages import format_log_to_messages
from langchain.agents.format_scratchpad.openai_functions import (
Expand Down
13 changes: 12 additions & 1 deletion libs/langchain/langchain/agents/format_scratchpad/log.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 8,18 @@ def format_log_to_str(
observation_prefix: str = "Observation: ",
llm_prefix: str = "Thought: ",
) -> str:
"""Construct the scratchpad that lets the agent continue its thought process."""
"""Construct the scratchpad that lets the agent continue its thought process.
Args:
intermediate_steps: List of tuples of AgentAction and observation strings.
observation_prefix: Prefix to append the observation with.
Defaults to "Observation: ".
llm_prefix: Prefix to append the llm call with.
Defaults to "Thought: ".
Returns:
str: The scratchpad.
"""
thoughts = ""
for action, observation in intermediate_steps:
thoughts = action.log
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 8,16 @@ def format_log_to_messages(
intermediate_steps: List[Tuple[AgentAction, str]],
template_tool_response: str = "{observation}",
) -> List[BaseMessage]:
"""Construct the scratchpad that lets the agent continue its thought process."""
"""Construct the scratchpad that lets the agent continue its thought process.
Args:
intermediate_steps: List of tuples of AgentAction and observation strings.
template_tool_response: Template to format the observation with.
Defaults to "{observation}".
Returns:
List[BaseMessage]: The scratchpad.
"""
thoughts: List[BaseMessage] = []
for action, observation in intermediate_steps:
thoughts.append(AIMessage(content=action.log))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 16,8 @@ def _convert_agent_action_to_messages(
agent_action: Agent action to convert.
Returns:
AIMessage that corresponds to the original tool invocation.
AIMessage or the previous messages plus a FunctionMessage that corresponds to
the original tool invocation
"""
if isinstance(agent_action, AgentActionMessageLog):
return list(agent_action.message_log) [
Expand All @@ -31,10 32,13 @@ def _create_function_message(
) -> FunctionMessage:
"""Convert agent action and observation into a function message.
Args:
agent_action: the tool invocation request from the agent
observation: the result of the tool invocation
agent_action: the tool invocation request from the agent.
observation: the result of the tool invocation.
Returns:
FunctionMessage that corresponds to the original tool invocation
FunctionMessage that corresponds to the original tool invocation.
Raises:
ValueError: if the observation cannot be converted to a string.
"""
if not isinstance(observation, str):
try:
Expand All @@ -59,7 63,8 @@ def format_to_openai_function_messages(
Returns:
list of messages to send to the LLM for the next prediction
Raises:
ValueError: if the observation cannot be converted to a string.
"""
messages = []

Expand Down
14 changes: 9 additions & 5 deletions libs/langchain/langchain/agents/format_scratchpad/tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 15,15 @@ def _create_tool_message(
agent_action: ToolAgentAction, observation: str
) -> ToolMessage:
"""Convert agent action and observation into a tool message.
Args:
agent_action: the tool invocation request from the agent
observation: the result of the tool invocation
agent_action: the tool invocation request from the agent.
observation: the result of the tool invocation.
Returns:
ToolMessage that corresponds to the original tool invocation
ToolMessage that corresponds to the original tool invocation.
Raises:
ValueError: if the observation cannot be converted to a string.
"""
if not isinstance(observation, str):
try:
Expand All @@ -41,10 45,10 @@ def format_to_tool_messages(
"""Convert (AgentAction, tool output) tuples into ToolMessages.
Args:
intermediate_steps: Steps the LLM has taken to date, along with observations
intermediate_steps: Steps the LLM has taken to date, along with observations.
Returns:
list of messages to send to the LLM for the next prediction
list of messages to send to the LLM for the next prediction.
"""
messages = []
Expand Down
6 changes: 6 additions & 0 deletions libs/langchain/langchain/agents/json_chat/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,11 36,17 @@ def create_json_chat_agent(
then passed into the LLM. Default is `render_text_description`.
template_tool_response: Template prompt that uses the tool response (observation)
to make the LLM generate the next action to take.
Default is TEMPLATE_TOOL_RESPONSE.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
Raises:
ValueError: If the prompt is missing required variables.
ValueError: If the template_tool_response is missing
the required variable 'observation'.
Example:
Expand Down
Loading

0 comments on commit b64c4b4

Please sign in to comment.