codeact_agent.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473
  1. import json
  2. import os
  3. from collections import deque
  4. from litellm import ModelResponse
  5. import openhands.agenthub.codeact_agent.function_calling as codeact_function_calling
  6. from openhands.controller.agent import Agent
  7. from openhands.controller.state.state import State
  8. from openhands.core.config import AgentConfig
  9. from openhands.core.logger import openhands_logger as logger
  10. from openhands.core.message import ImageContent, Message, TextContent
  11. from openhands.events.action import (
  12. Action,
  13. AgentDelegateAction,
  14. AgentFinishAction,
  15. BrowseInteractiveAction,
  16. BrowseURLAction,
  17. CmdRunAction,
  18. FileEditAction,
  19. IPythonRunCellAction,
  20. MessageAction,
  21. )
  22. from openhands.events.observation import (
  23. AgentDelegateObservation,
  24. BrowserOutputObservation,
  25. CmdOutputObservation,
  26. FileEditObservation,
  27. IPythonRunCellObservation,
  28. UserRejectObservation,
  29. )
  30. from openhands.events.observation.error import ErrorObservation
  31. from openhands.events.observation.observation import Observation
  32. from openhands.events.serialization.event import truncate_content
  33. from openhands.llm.llm import LLM
  34. from openhands.runtime.plugins import (
  35. AgentSkillsRequirement,
  36. JupyterRequirement,
  37. PluginRequirement,
  38. )
  39. from openhands.utils.prompt import PromptManager
  40. class CodeActAgent(Agent):
  41. VERSION = '2.2'
  42. """
  43. The Code Act Agent is a minimalist agent.
  44. The agent works by passing the model a list of action-observation pairs and prompting the model to take the next step.
  45. ### Overview
  46. This agent implements the CodeAct idea ([paper](https://arxiv.org/abs/2402.01030), [tweet](https://twitter.com/xingyaow_/status/1754556835703751087)) that consolidates LLM agents’ **act**ions into a unified **code** action space for both *simplicity* and *performance* (see paper for more details).
  47. The conceptual idea is illustrated below. At each turn, the agent can:
  48. 1. **Converse**: Communicate with humans in natural language to ask for clarification, confirmation, etc.
  49. 2. **CodeAct**: Choose to perform the task by executing code
  50. - Execute any valid Linux `bash` command
  51. - Execute any valid `Python` code with [an interactive Python interpreter](https://ipython.org/). This is simulated through `bash` command, see plugin system below for more details.
  52. ![image](https://github.com/All-Hands-AI/OpenHands/assets/38853559/92b622e3-72ad-4a61-8f41-8c040b6d5fb3)
  53. """
  54. sandbox_plugins: list[PluginRequirement] = [
  55. # NOTE: AgentSkillsRequirement need to go before JupyterRequirement, since
  56. # AgentSkillsRequirement provides a lot of Python functions,
  57. # and it needs to be initialized before Jupyter for Jupyter to use those functions.
  58. AgentSkillsRequirement(),
  59. JupyterRequirement(),
  60. ]
  61. def __init__(
  62. self,
  63. llm: LLM,
  64. config: AgentConfig,
  65. ) -> None:
  66. """Initializes a new instance of the CodeActAgent class.
  67. Parameters:
  68. - llm (LLM): The llm to be used by this agent
  69. """
  70. super().__init__(llm, config)
  71. self.reset()
  72. self.mock_function_calling = False
  73. if not self.llm.is_function_calling_active():
  74. logger.info(
  75. f'Function calling not enabled for model {self.llm.config.model}. '
  76. 'Mocking function calling via prompting.'
  77. )
  78. self.mock_function_calling = True
  79. # Function calling mode
  80. self.tools = codeact_function_calling.get_tools(
  81. codeact_enable_browsing=self.config.codeact_enable_browsing,
  82. codeact_enable_jupyter=self.config.codeact_enable_jupyter,
  83. codeact_enable_llm_editor=self.config.codeact_enable_llm_editor,
  84. )
  85. logger.debug(
  86. f'TOOLS loaded for CodeActAgent: {json.dumps(self.tools, indent=2)}'
  87. )
  88. self.prompt_manager = PromptManager(
  89. microagent_dir=os.path.join(os.path.dirname(__file__), 'micro')
  90. if self.config.use_microagents
  91. else None,
  92. prompt_dir=os.path.join(os.path.dirname(__file__), 'prompts'),
  93. disabled_microagents=self.config.disabled_microagents,
  94. )
  95. self.pending_actions: deque[Action] = deque()
  96. def get_action_message(
  97. self,
  98. action: Action,
  99. pending_tool_call_action_messages: dict[str, Message],
  100. ) -> list[Message]:
  101. """Converts an action into a message format that can be sent to the LLM.
  102. This method handles different types of actions and formats them appropriately:
  103. 1. For tool-based actions (AgentDelegate, CmdRun, IPythonRunCell, FileEdit) and agent-sourced AgentFinish:
  104. - In function calling mode: Stores the LLM's response in pending_tool_call_action_messages
  105. - In non-function calling mode: Creates a message with the action string
  106. 2. For MessageActions: Creates a message with the text content and optional image content
  107. Args:
  108. action (Action): The action to convert. Can be one of:
  109. - CmdRunAction: For executing bash commands
  110. - IPythonRunCellAction: For running IPython code
  111. - FileEditAction: For editing files
  112. - BrowseInteractiveAction: For browsing the web
  113. - AgentFinishAction: For ending the interaction
  114. - MessageAction: For sending messages
  115. pending_tool_call_action_messages (dict[str, Message]): Dictionary mapping response IDs
  116. to their corresponding messages. Used in function calling mode to track tool calls
  117. that are waiting for their results.
  118. Returns:
  119. list[Message]: A list containing the formatted message(s) for the action.
  120. May be empty if the action is handled as a tool call in function calling mode.
  121. Note:
  122. In function calling mode, tool-based actions are stored in pending_tool_call_action_messages
  123. rather than being returned immediately. They will be processed later when all corresponding
  124. tool call results are available.
  125. """
  126. # create a regular message from an event
  127. if isinstance(
  128. action,
  129. (
  130. AgentDelegateAction,
  131. IPythonRunCellAction,
  132. FileEditAction,
  133. BrowseInteractiveAction,
  134. BrowseURLAction,
  135. ),
  136. ) or (
  137. isinstance(action, (AgentFinishAction, CmdRunAction))
  138. and action.source == 'agent'
  139. ):
  140. tool_metadata = action.tool_call_metadata
  141. assert tool_metadata is not None, (
  142. 'Tool call metadata should NOT be None when function calling is enabled. Action: '
  143. + str(action)
  144. )
  145. llm_response: ModelResponse = tool_metadata.model_response
  146. assistant_msg = llm_response.choices[0].message
  147. # Add the LLM message (assistant) that initiated the tool calls
  148. # (overwrites any previous message with the same response_id)
  149. pending_tool_call_action_messages[llm_response.id] = Message(
  150. role=assistant_msg.role,
  151. # tool call content SHOULD BE a string
  152. content=[TextContent(text=assistant_msg.content or '')]
  153. if assistant_msg.content is not None
  154. else [],
  155. tool_calls=assistant_msg.tool_calls,
  156. )
  157. return []
  158. elif isinstance(action, MessageAction):
  159. role = 'user' if action.source == 'user' else 'assistant'
  160. content = [TextContent(text=action.content or '')]
  161. if self.llm.vision_is_active() and action.image_urls:
  162. content.append(ImageContent(image_urls=action.image_urls))
  163. return [
  164. Message(
  165. role=role,
  166. content=content,
  167. )
  168. ]
  169. elif isinstance(action, CmdRunAction) and action.source == 'user':
  170. content = [
  171. TextContent(text=f'User executed the command:\n{action.command}')
  172. ]
  173. return [
  174. Message(
  175. role='user',
  176. content=content,
  177. )
  178. ]
  179. return []
  180. def get_observation_message(
  181. self,
  182. obs: Observation,
  183. tool_call_id_to_message: dict[str, Message],
  184. ) -> list[Message]:
  185. """Converts an observation into a message format that can be sent to the LLM.
  186. This method handles different types of observations and formats them appropriately:
  187. - CmdOutputObservation: Formats command execution results with exit codes
  188. - IPythonRunCellObservation: Formats IPython cell execution results, replacing base64 images
  189. - FileEditObservation: Formats file editing results
  190. - AgentDelegateObservation: Formats results from delegated agent tasks
  191. - ErrorObservation: Formats error messages from failed actions
  192. - UserRejectObservation: Formats user rejection messages
  193. In function calling mode, observations with tool_call_metadata are stored in
  194. tool_call_id_to_message for later processing instead of being returned immediately.
  195. Args:
  196. obs (Observation): The observation to convert
  197. tool_call_id_to_message (dict[str, Message]): Dictionary mapping tool call IDs
  198. to their corresponding messages (used in function calling mode)
  199. Returns:
  200. list[Message]: A list containing the formatted message(s) for the observation.
  201. May be empty if the observation is handled as a tool response in function calling mode.
  202. Raises:
  203. ValueError: If the observation type is unknown
  204. """
  205. message: Message
  206. max_message_chars = self.llm.config.max_message_chars
  207. if isinstance(obs, CmdOutputObservation):
  208. # if it doesn't have tool call metadata, it was triggered by a user action
  209. if obs.tool_call_metadata is None:
  210. text = truncate_content(
  211. f'\nObserved result of command executed by user:\n{obs.content}',
  212. max_message_chars,
  213. )
  214. else:
  215. text = truncate_content(
  216. obs.content + obs.interpreter_details, max_message_chars
  217. )
  218. text += f'\n[Command finished with exit code {obs.exit_code}]'
  219. message = Message(role='user', content=[TextContent(text=text)])
  220. elif isinstance(obs, IPythonRunCellObservation):
  221. text = obs.content
  222. # replace base64 images with a placeholder
  223. splitted = text.split('\n')
  224. for i, line in enumerate(splitted):
  225. if '![image](data:image/png;base64,' in line:
  226. splitted[i] = (
  227. '![image](data:image/png;base64, ...) already displayed to user'
  228. )
  229. text = '\n'.join(splitted)
  230. text = truncate_content(text, max_message_chars)
  231. message = Message(role='user', content=[TextContent(text=text)])
  232. elif isinstance(obs, FileEditObservation):
  233. text = truncate_content(str(obs), max_message_chars)
  234. message = Message(role='user', content=[TextContent(text=text)])
  235. elif isinstance(obs, BrowserOutputObservation):
  236. text = obs.get_agent_obs_text()
  237. message = Message(
  238. role='user',
  239. content=[TextContent(text=text)],
  240. )
  241. elif isinstance(obs, AgentDelegateObservation):
  242. text = truncate_content(
  243. obs.outputs['content'] if 'content' in obs.outputs else '',
  244. max_message_chars,
  245. )
  246. message = Message(role='user', content=[TextContent(text=text)])
  247. elif isinstance(obs, ErrorObservation):
  248. text = truncate_content(obs.content, max_message_chars)
  249. text += '\n[Error occurred in processing last action]'
  250. message = Message(role='user', content=[TextContent(text=text)])
  251. elif isinstance(obs, UserRejectObservation):
  252. text = 'OBSERVATION:\n' + truncate_content(obs.content, max_message_chars)
  253. text += '\n[Last action has been rejected by the user]'
  254. message = Message(role='user', content=[TextContent(text=text)])
  255. else:
  256. # If an observation message is not returned, it will cause an error
  257. # when the LLM tries to return the next message
  258. raise ValueError(f'Unknown observation type: {type(obs)}')
  259. # Update the message as tool response properly
  260. if (tool_call_metadata := obs.tool_call_metadata) is not None:
  261. tool_call_id_to_message[tool_call_metadata.tool_call_id] = Message(
  262. role='tool',
  263. content=message.content,
  264. tool_call_id=tool_call_metadata.tool_call_id,
  265. name=tool_call_metadata.function_name,
  266. )
  267. # No need to return the observation message
  268. # because it will be added by get_action_message when all the corresponding
  269. # tool calls in the SAME request are processed
  270. return []
  271. return [message]
  272. def reset(self) -> None:
  273. """Resets the CodeAct Agent."""
  274. super().reset()
  275. def step(self, state: State) -> Action:
  276. """Performs one step using the CodeAct Agent.
  277. This includes gathering info on previous steps and prompting the model to make a command to execute.
  278. Parameters:
  279. - state (State): used to get updated info
  280. Returns:
  281. - CmdRunAction(command) - bash command to run
  282. - IPythonRunCellAction(code) - IPython code to run
  283. - AgentDelegateAction(agent, inputs) - delegate action for (sub)task
  284. - MessageAction(content) - Message action to run (e.g. ask for clarification)
  285. - AgentFinishAction() - end the interaction
  286. """
  287. # Continue with pending actions if any
  288. if self.pending_actions:
  289. return self.pending_actions.popleft()
  290. # if we're done, go back
  291. latest_user_message = state.get_last_user_message()
  292. if latest_user_message and latest_user_message.content.strip() == '/exit':
  293. return AgentFinishAction()
  294. # prepare what we want to send to the LLM
  295. messages = self._get_messages(state)
  296. params: dict = {
  297. 'messages': self.llm.format_messages_for_llm(messages),
  298. }
  299. params['tools'] = self.tools
  300. if self.mock_function_calling:
  301. params['mock_function_calling'] = True
  302. response = self.llm.completion(**params)
  303. actions = codeact_function_calling.response_to_actions(response)
  304. for action in actions:
  305. self.pending_actions.append(action)
  306. return self.pending_actions.popleft()
  307. def _get_messages(self, state: State) -> list[Message]:
  308. """Constructs the message history for the LLM conversation.
  309. This method builds a structured conversation history by processing events from the state
  310. and formatting them into messages that the LLM can understand. It handles both regular
  311. message flow and function-calling scenarios.
  312. The method performs the following steps:
  313. 1. Initializes with system prompt and optional initial user message
  314. 2. Processes events (Actions and Observations) into messages
  315. 3. Handles tool calls and their responses in function-calling mode
  316. 4. Manages message role alternation (user/assistant/tool)
  317. 5. Applies caching for specific LLM providers (e.g., Anthropic)
  318. 6. Adds environment reminders for non-function-calling mode
  319. Args:
  320. state (State): The current state object containing conversation history and other metadata
  321. Returns:
  322. list[Message]: A list of formatted messages ready for LLM consumption, including:
  323. - System message with prompt
  324. - Initial user message (if configured)
  325. - Action messages (from both user and assistant)
  326. - Observation messages (including tool responses)
  327. - Environment reminders (in non-function-calling mode)
  328. Note:
  329. - In function-calling mode, tool calls and their responses are carefully tracked
  330. to maintain proper conversation flow
  331. - Messages from the same role are combined to prevent consecutive same-role messages
  332. - For Anthropic models, specific messages are cached according to their documentation
  333. """
  334. messages: list[Message] = [
  335. Message(
  336. role='system',
  337. content=[
  338. TextContent(
  339. text=self.prompt_manager.get_system_message(),
  340. cache_prompt=self.llm.is_caching_prompt_active(),
  341. )
  342. ],
  343. )
  344. ]
  345. example_message = self.prompt_manager.get_example_user_message()
  346. if example_message:
  347. messages.append(
  348. Message(
  349. role='user',
  350. content=[TextContent(text=example_message)],
  351. cache_prompt=self.llm.is_caching_prompt_active(),
  352. )
  353. )
  354. pending_tool_call_action_messages: dict[str, Message] = {}
  355. tool_call_id_to_message: dict[str, Message] = {}
  356. events = list(state.history)
  357. for event in events:
  358. # create a regular message from an event
  359. if isinstance(event, Action):
  360. messages_to_add = self.get_action_message(
  361. action=event,
  362. pending_tool_call_action_messages=pending_tool_call_action_messages,
  363. )
  364. elif isinstance(event, Observation):
  365. messages_to_add = self.get_observation_message(
  366. obs=event,
  367. tool_call_id_to_message=tool_call_id_to_message,
  368. )
  369. else:
  370. raise ValueError(f'Unknown event type: {type(event)}')
  371. # Check pending tool call action messages and see if they are complete
  372. _response_ids_to_remove = []
  373. for (
  374. response_id,
  375. pending_message,
  376. ) in pending_tool_call_action_messages.items():
  377. assert pending_message.tool_calls is not None, (
  378. 'Tool calls should NOT be None when function calling is enabled & the message is considered pending tool call. '
  379. f'Pending message: {pending_message}'
  380. )
  381. if all(
  382. tool_call.id in tool_call_id_to_message
  383. for tool_call in pending_message.tool_calls
  384. ):
  385. # If complete:
  386. # -- 1. Add the message that **initiated** the tool calls
  387. messages_to_add.append(pending_message)
  388. # -- 2. Add the tool calls **results***
  389. for tool_call in pending_message.tool_calls:
  390. messages_to_add.append(tool_call_id_to_message[tool_call.id])
  391. tool_call_id_to_message.pop(tool_call.id)
  392. _response_ids_to_remove.append(response_id)
  393. # Cleanup the processed pending tool messages
  394. for response_id in _response_ids_to_remove:
  395. pending_tool_call_action_messages.pop(response_id)
  396. for message in messages_to_add:
  397. if message:
  398. if message.role == 'user':
  399. self.prompt_manager.enhance_message(message)
  400. # handle error if the message is the SAME role as the previous message
  401. # litellm.exceptions.BadRequestError: litellm.BadRequestError: OpenAIException - Error code: 400 - {'detail': 'Only supports u/a/u/a/u...'}
  402. # there shouldn't be two consecutive messages from the same role
  403. # NOTE: we shouldn't combine tool messages because each of them has a different tool_call_id
  404. if (
  405. messages
  406. and messages[-1].role == message.role
  407. and message.role != 'tool'
  408. ):
  409. messages[-1].content.extend(message.content)
  410. else:
  411. messages.append(message)
  412. if self.llm.is_caching_prompt_active():
  413. # NOTE: this is only needed for anthropic
  414. # following logic here:
  415. # https://github.com/anthropics/anthropic-quickstarts/blob/8f734fd08c425c6ec91ddd613af04ff87d70c5a0/computer-use-demo/computer_use_demo/loop.py#L241-L262
  416. breakpoints_remaining = 3 # remaining 1 for system/tool
  417. for message in reversed(messages):
  418. if message.role == 'user' or message.role == 'tool':
  419. if breakpoints_remaining > 0:
  420. message.content[
  421. -1
  422. ].cache_prompt = True # Last item inside the message content
  423. breakpoints_remaining -= 1
  424. else:
  425. break
  426. return messages