codeact_agent.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471
  1. import json
  2. import os
  3. from collections import deque
  4. from litellm import ModelResponse
  5. import openhands.agenthub.codeact_agent.function_calling as codeact_function_calling
  6. from openhands.controller.agent import Agent
  7. from openhands.controller.state.state import State
  8. from openhands.core.config import AgentConfig
  9. from openhands.core.logger import openhands_logger as logger
  10. from openhands.core.message import ImageContent, Message, TextContent
  11. from openhands.events.action import (
  12. Action,
  13. AgentDelegateAction,
  14. AgentFinishAction,
  15. BrowseInteractiveAction,
  16. CmdRunAction,
  17. FileEditAction,
  18. IPythonRunCellAction,
  19. MessageAction,
  20. )
  21. from openhands.events.observation import (
  22. AgentDelegateObservation,
  23. BrowserOutputObservation,
  24. CmdOutputObservation,
  25. FileEditObservation,
  26. IPythonRunCellObservation,
  27. UserRejectObservation,
  28. )
  29. from openhands.events.observation.error import ErrorObservation
  30. from openhands.events.observation.observation import Observation
  31. from openhands.events.serialization.event import truncate_content
  32. from openhands.llm.llm import LLM
  33. from openhands.runtime.plugins import (
  34. AgentSkillsRequirement,
  35. JupyterRequirement,
  36. PluginRequirement,
  37. )
  38. from openhands.utils.prompt import PromptManager
  39. class CodeActAgent(Agent):
  40. VERSION = '2.2'
  41. """
  42. The Code Act Agent is a minimalist agent.
  43. The agent works by passing the model a list of action-observation pairs and prompting the model to take the next step.
  44. ### Overview
  45. This agent implements the CodeAct idea ([paper](https://arxiv.org/abs/2402.01030), [tweet](https://twitter.com/xingyaow_/status/1754556835703751087)) that consolidates LLM agents’ **act**ions into a unified **code** action space for both *simplicity* and *performance* (see paper for more details).
  46. The conceptual idea is illustrated below. At each turn, the agent can:
  47. 1. **Converse**: Communicate with humans in natural language to ask for clarification, confirmation, etc.
  48. 2. **CodeAct**: Choose to perform the task by executing code
  49. - Execute any valid Linux `bash` command
  50. - Execute any valid `Python` code with [an interactive Python interpreter](https://ipython.org/). This is simulated through `bash` command, see plugin system below for more details.
  51. ![image](https://github.com/All-Hands-AI/OpenHands/assets/38853559/92b622e3-72ad-4a61-8f41-8c040b6d5fb3)
  52. """
  53. sandbox_plugins: list[PluginRequirement] = [
  54. # NOTE: AgentSkillsRequirement need to go before JupyterRequirement, since
  55. # AgentSkillsRequirement provides a lot of Python functions,
  56. # and it needs to be initialized before Jupyter for Jupyter to use those functions.
  57. AgentSkillsRequirement(),
  58. JupyterRequirement(),
  59. ]
  60. def __init__(
  61. self,
  62. llm: LLM,
  63. config: AgentConfig,
  64. ) -> None:
  65. """Initializes a new instance of the CodeActAgent class.
  66. Parameters:
  67. - llm (LLM): The llm to be used by this agent
  68. """
  69. super().__init__(llm, config)
  70. self.reset()
  71. self.mock_function_calling = False
  72. if not self.llm.is_function_calling_active():
  73. logger.info(
  74. f'Function calling not enabled for model {self.llm.config.model}. '
  75. 'Mocking function calling via prompting.'
  76. )
  77. self.mock_function_calling = True
  78. # Function calling mode
  79. self.tools = codeact_function_calling.get_tools(
  80. codeact_enable_browsing=self.config.codeact_enable_browsing,
  81. codeact_enable_jupyter=self.config.codeact_enable_jupyter,
  82. codeact_enable_llm_editor=self.config.codeact_enable_llm_editor,
  83. )
  84. logger.debug(
  85. f'TOOLS loaded for CodeActAgent: {json.dumps(self.tools, indent=2)}'
  86. )
  87. self.prompt_manager = PromptManager(
  88. microagent_dir=os.path.join(os.path.dirname(__file__), 'micro')
  89. if self.config.use_microagents
  90. else None,
  91. prompt_dir=os.path.join(os.path.dirname(__file__), 'prompts'),
  92. disabled_microagents=self.config.disabled_microagents,
  93. )
  94. self.pending_actions: deque[Action] = deque()
  95. def get_action_message(
  96. self,
  97. action: Action,
  98. pending_tool_call_action_messages: dict[str, Message],
  99. ) -> list[Message]:
  100. """Converts an action into a message format that can be sent to the LLM.
  101. This method handles different types of actions and formats them appropriately:
  102. 1. For tool-based actions (AgentDelegate, CmdRun, IPythonRunCell, FileEdit) and agent-sourced AgentFinish:
  103. - In function calling mode: Stores the LLM's response in pending_tool_call_action_messages
  104. - In non-function calling mode: Creates a message with the action string
  105. 2. For MessageActions: Creates a message with the text content and optional image content
  106. Args:
  107. action (Action): The action to convert. Can be one of:
  108. - CmdRunAction: For executing bash commands
  109. - IPythonRunCellAction: For running IPython code
  110. - FileEditAction: For editing files
  111. - BrowseInteractiveAction: For browsing the web
  112. - AgentFinishAction: For ending the interaction
  113. - MessageAction: For sending messages
  114. pending_tool_call_action_messages (dict[str, Message]): Dictionary mapping response IDs
  115. to their corresponding messages. Used in function calling mode to track tool calls
  116. that are waiting for their results.
  117. Returns:
  118. list[Message]: A list containing the formatted message(s) for the action.
  119. May be empty if the action is handled as a tool call in function calling mode.
  120. Note:
  121. In function calling mode, tool-based actions are stored in pending_tool_call_action_messages
  122. rather than being returned immediately. They will be processed later when all corresponding
  123. tool call results are available.
  124. """
  125. # create a regular message from an event
  126. if isinstance(
  127. action,
  128. (
  129. AgentDelegateAction,
  130. IPythonRunCellAction,
  131. FileEditAction,
  132. BrowseInteractiveAction,
  133. ),
  134. ) or (
  135. isinstance(action, (AgentFinishAction, CmdRunAction))
  136. and action.source == 'agent'
  137. ):
  138. tool_metadata = action.tool_call_metadata
  139. assert tool_metadata is not None, (
  140. 'Tool call metadata should NOT be None when function calling is enabled. Action: '
  141. + str(action)
  142. )
  143. llm_response: ModelResponse = tool_metadata.model_response
  144. assistant_msg = llm_response.choices[0].message
  145. # Add the LLM message (assistant) that initiated the tool calls
  146. # (overwrites any previous message with the same response_id)
  147. pending_tool_call_action_messages[llm_response.id] = Message(
  148. role=assistant_msg.role,
  149. # tool call content SHOULD BE a string
  150. content=[TextContent(text=assistant_msg.content or '')]
  151. if assistant_msg.content is not None
  152. else [],
  153. tool_calls=assistant_msg.tool_calls,
  154. )
  155. return []
  156. elif isinstance(action, MessageAction):
  157. role = 'user' if action.source == 'user' else 'assistant'
  158. content = [TextContent(text=action.content or '')]
  159. if self.llm.vision_is_active() and action.image_urls:
  160. content.append(ImageContent(image_urls=action.image_urls))
  161. return [
  162. Message(
  163. role=role,
  164. content=content,
  165. )
  166. ]
  167. elif isinstance(action, CmdRunAction) and action.source == 'user':
  168. content = [
  169. TextContent(text=f'User executed the command:\n{action.command}')
  170. ]
  171. return [
  172. Message(
  173. role='user',
  174. content=content,
  175. )
  176. ]
  177. return []
  178. def get_observation_message(
  179. self,
  180. obs: Observation,
  181. tool_call_id_to_message: dict[str, Message],
  182. ) -> list[Message]:
  183. """Converts an observation into a message format that can be sent to the LLM.
  184. This method handles different types of observations and formats them appropriately:
  185. - CmdOutputObservation: Formats command execution results with exit codes
  186. - IPythonRunCellObservation: Formats IPython cell execution results, replacing base64 images
  187. - FileEditObservation: Formats file editing results
  188. - AgentDelegateObservation: Formats results from delegated agent tasks
  189. - ErrorObservation: Formats error messages from failed actions
  190. - UserRejectObservation: Formats user rejection messages
  191. In function calling mode, observations with tool_call_metadata are stored in
  192. tool_call_id_to_message for later processing instead of being returned immediately.
  193. Args:
  194. obs (Observation): The observation to convert
  195. tool_call_id_to_message (dict[str, Message]): Dictionary mapping tool call IDs
  196. to their corresponding messages (used in function calling mode)
  197. Returns:
  198. list[Message]: A list containing the formatted message(s) for the observation.
  199. May be empty if the observation is handled as a tool response in function calling mode.
  200. Raises:
  201. ValueError: If the observation type is unknown
  202. """
  203. message: Message
  204. max_message_chars = self.llm.config.max_message_chars
  205. if isinstance(obs, CmdOutputObservation):
  206. # if it doesn't have tool call metadata, it was triggered by a user action
  207. if obs.tool_call_metadata is None:
  208. text = truncate_content(
  209. f'\nObserved result of command executed by user:\n{obs.content}',
  210. max_message_chars,
  211. )
  212. else:
  213. text = truncate_content(
  214. obs.content + obs.interpreter_details, max_message_chars
  215. )
  216. text += f'\n[Command finished with exit code {obs.exit_code}]'
  217. message = Message(role='user', content=[TextContent(text=text)])
  218. elif isinstance(obs, IPythonRunCellObservation):
  219. text = obs.content
  220. # replace base64 images with a placeholder
  221. splitted = text.split('\n')
  222. for i, line in enumerate(splitted):
  223. if '![image](data:image/png;base64,' in line:
  224. splitted[i] = (
  225. '![image](data:image/png;base64, ...) already displayed to user'
  226. )
  227. text = '\n'.join(splitted)
  228. text = truncate_content(text, max_message_chars)
  229. message = Message(role='user', content=[TextContent(text=text)])
  230. elif isinstance(obs, FileEditObservation):
  231. text = truncate_content(str(obs), max_message_chars)
  232. message = Message(role='user', content=[TextContent(text=text)])
  233. elif isinstance(obs, BrowserOutputObservation):
  234. text = obs.get_agent_obs_text()
  235. message = Message(
  236. role='user',
  237. content=[TextContent(text=text)],
  238. )
  239. elif isinstance(obs, AgentDelegateObservation):
  240. text = truncate_content(
  241. obs.outputs['content'] if 'content' in obs.outputs else '',
  242. max_message_chars,
  243. )
  244. message = Message(role='user', content=[TextContent(text=text)])
  245. elif isinstance(obs, ErrorObservation):
  246. text = truncate_content(obs.content, max_message_chars)
  247. text += '\n[Error occurred in processing last action]'
  248. message = Message(role='user', content=[TextContent(text=text)])
  249. elif isinstance(obs, UserRejectObservation):
  250. text = 'OBSERVATION:\n' + truncate_content(obs.content, max_message_chars)
  251. text += '\n[Last action has been rejected by the user]'
  252. message = Message(role='user', content=[TextContent(text=text)])
  253. else:
  254. # If an observation message is not returned, it will cause an error
  255. # when the LLM tries to return the next message
  256. raise ValueError(f'Unknown observation type: {type(obs)}')
  257. # Update the message as tool response properly
  258. if (tool_call_metadata := obs.tool_call_metadata) is not None:
  259. tool_call_id_to_message[tool_call_metadata.tool_call_id] = Message(
  260. role='tool',
  261. content=message.content,
  262. tool_call_id=tool_call_metadata.tool_call_id,
  263. name=tool_call_metadata.function_name,
  264. )
  265. # No need to return the observation message
  266. # because it will be added by get_action_message when all the corresponding
  267. # tool calls in the SAME request are processed
  268. return []
  269. return [message]
  270. def reset(self) -> None:
  271. """Resets the CodeAct Agent."""
  272. super().reset()
  273. def step(self, state: State) -> Action:
  274. """Performs one step using the CodeAct Agent.
  275. This includes gathering info on previous steps and prompting the model to make a command to execute.
  276. Parameters:
  277. - state (State): used to get updated info
  278. Returns:
  279. - CmdRunAction(command) - bash command to run
  280. - IPythonRunCellAction(code) - IPython code to run
  281. - AgentDelegateAction(agent, inputs) - delegate action for (sub)task
  282. - MessageAction(content) - Message action to run (e.g. ask for clarification)
  283. - AgentFinishAction() - end the interaction
  284. """
  285. # Continue with pending actions if any
  286. if self.pending_actions:
  287. return self.pending_actions.popleft()
  288. # if we're done, go back
  289. latest_user_message = state.get_last_user_message()
  290. if latest_user_message and latest_user_message.content.strip() == '/exit':
  291. return AgentFinishAction()
  292. # prepare what we want to send to the LLM
  293. messages = self._get_messages(state)
  294. params: dict = {
  295. 'messages': self.llm.format_messages_for_llm(messages),
  296. }
  297. params['tools'] = self.tools
  298. if self.mock_function_calling:
  299. params['mock_function_calling'] = True
  300. response = self.llm.completion(**params)
  301. actions = codeact_function_calling.response_to_actions(response)
  302. for action in actions:
  303. self.pending_actions.append(action)
  304. return self.pending_actions.popleft()
  305. def _get_messages(self, state: State) -> list[Message]:
  306. """Constructs the message history for the LLM conversation.
  307. This method builds a structured conversation history by processing events from the state
  308. and formatting them into messages that the LLM can understand. It handles both regular
  309. message flow and function-calling scenarios.
  310. The method performs the following steps:
  311. 1. Initializes with system prompt and optional initial user message
  312. 2. Processes events (Actions and Observations) into messages
  313. 3. Handles tool calls and their responses in function-calling mode
  314. 4. Manages message role alternation (user/assistant/tool)
  315. 5. Applies caching for specific LLM providers (e.g., Anthropic)
  316. 6. Adds environment reminders for non-function-calling mode
  317. Args:
  318. state (State): The current state object containing conversation history and other metadata
  319. Returns:
  320. list[Message]: A list of formatted messages ready for LLM consumption, including:
  321. - System message with prompt
  322. - Initial user message (if configured)
  323. - Action messages (from both user and assistant)
  324. - Observation messages (including tool responses)
  325. - Environment reminders (in non-function-calling mode)
  326. Note:
  327. - In function-calling mode, tool calls and their responses are carefully tracked
  328. to maintain proper conversation flow
  329. - Messages from the same role are combined to prevent consecutive same-role messages
  330. - For Anthropic models, specific messages are cached according to their documentation
  331. """
  332. messages: list[Message] = [
  333. Message(
  334. role='system',
  335. content=[
  336. TextContent(
  337. text=self.prompt_manager.get_system_message(),
  338. cache_prompt=self.llm.is_caching_prompt_active(),
  339. )
  340. ],
  341. )
  342. ]
  343. example_message = self.prompt_manager.get_example_user_message()
  344. if example_message:
  345. messages.append(
  346. Message(
  347. role='user',
  348. content=[TextContent(text=example_message)],
  349. cache_prompt=self.llm.is_caching_prompt_active(),
  350. )
  351. )
  352. pending_tool_call_action_messages: dict[str, Message] = {}
  353. tool_call_id_to_message: dict[str, Message] = {}
  354. events = list(state.history)
  355. for event in events:
  356. # create a regular message from an event
  357. if isinstance(event, Action):
  358. messages_to_add = self.get_action_message(
  359. action=event,
  360. pending_tool_call_action_messages=pending_tool_call_action_messages,
  361. )
  362. elif isinstance(event, Observation):
  363. messages_to_add = self.get_observation_message(
  364. obs=event,
  365. tool_call_id_to_message=tool_call_id_to_message,
  366. )
  367. else:
  368. raise ValueError(f'Unknown event type: {type(event)}')
  369. # Check pending tool call action messages and see if they are complete
  370. _response_ids_to_remove = []
  371. for (
  372. response_id,
  373. pending_message,
  374. ) in pending_tool_call_action_messages.items():
  375. assert pending_message.tool_calls is not None, (
  376. 'Tool calls should NOT be None when function calling is enabled & the message is considered pending tool call. '
  377. f'Pending message: {pending_message}'
  378. )
  379. if all(
  380. tool_call.id in tool_call_id_to_message
  381. for tool_call in pending_message.tool_calls
  382. ):
  383. # If complete:
  384. # -- 1. Add the message that **initiated** the tool calls
  385. messages_to_add.append(pending_message)
  386. # -- 2. Add the tool calls **results***
  387. for tool_call in pending_message.tool_calls:
  388. messages_to_add.append(tool_call_id_to_message[tool_call.id])
  389. tool_call_id_to_message.pop(tool_call.id)
  390. _response_ids_to_remove.append(response_id)
  391. # Cleanup the processed pending tool messages
  392. for response_id in _response_ids_to_remove:
  393. pending_tool_call_action_messages.pop(response_id)
  394. for message in messages_to_add:
  395. if message:
  396. if message.role == 'user':
  397. self.prompt_manager.enhance_message(message)
  398. # handle error if the message is the SAME role as the previous message
  399. # litellm.exceptions.BadRequestError: litellm.BadRequestError: OpenAIException - Error code: 400 - {'detail': 'Only supports u/a/u/a/u...'}
  400. # there shouldn't be two consecutive messages from the same role
  401. # NOTE: we shouldn't combine tool messages because each of them has a different tool_call_id
  402. if (
  403. messages
  404. and messages[-1].role == message.role
  405. and message.role != 'tool'
  406. ):
  407. messages[-1].content.extend(message.content)
  408. else:
  409. messages.append(message)
  410. if self.llm.is_caching_prompt_active():
  411. # NOTE: this is only needed for anthropic
  412. # following logic here:
  413. # https://github.com/anthropics/anthropic-quickstarts/blob/8f734fd08c425c6ec91ddd613af04ff87d70c5a0/computer-use-demo/computer_use_demo/loop.py#L241-L262
  414. breakpoints_remaining = 3 # remaining 1 for system/tool
  415. for message in reversed(messages):
  416. if message.role == 'user' or message.role == 'tool':
  417. if breakpoints_remaining > 0:
  418. message.content[
  419. -1
  420. ].cache_prompt = True # Last item inside the message content
  421. breakpoints_remaining -= 1
  422. else:
  423. break
  424. return messages