codeact_agent.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298
  1. import os
  2. from itertools import islice
  3. from agenthub.codeact_agent.action_parser import CodeActResponseParser
  4. from openhands.controller.agent import Agent
  5. from openhands.controller.state.state import State
  6. from openhands.core.config import AgentConfig
  7. from openhands.core.exceptions import OperationCancelled
  8. from openhands.core.logger import openhands_logger as logger
  9. from openhands.core.message import ImageContent, Message, TextContent
  10. from openhands.events.action import (
  11. Action,
  12. AgentDelegateAction,
  13. AgentFinishAction,
  14. CmdRunAction,
  15. IPythonRunCellAction,
  16. MessageAction,
  17. )
  18. from openhands.events.observation import (
  19. AgentDelegateObservation,
  20. CmdOutputObservation,
  21. IPythonRunCellObservation,
  22. UserRejectObservation,
  23. )
  24. from openhands.events.observation.error import ErrorObservation
  25. from openhands.events.observation.observation import Observation
  26. from openhands.events.serialization.event import truncate_content
  27. from openhands.llm.llm import LLM
  28. from openhands.runtime.plugins import (
  29. AgentSkillsRequirement,
  30. JupyterRequirement,
  31. PluginRequirement,
  32. )
  33. from openhands.utils.microagent import MicroAgent
  34. from openhands.utils.prompt import PromptManager
  35. class CodeActAgent(Agent):
  36. VERSION = '1.9'
  37. """
  38. The Code Act Agent is a minimalist agent.
  39. The agent works by passing the model a list of action-observation pairs and prompting the model to take the next step.
  40. ### Overview
  41. This agent implements the CodeAct idea ([paper](https://arxiv.org/abs/2402.01030), [tweet](https://twitter.com/xingyaow_/status/1754556835703751087)) that consolidates LLM agents’ **act**ions into a unified **code** action space for both *simplicity* and *performance* (see paper for more details).
  42. The conceptual idea is illustrated below. At each turn, the agent can:
  43. 1. **Converse**: Communicate with humans in natural language to ask for clarification, confirmation, etc.
  44. 2. **CodeAct**: Choose to perform the task by executing code
  45. - Execute any valid Linux `bash` command
  46. - Execute any valid `Python` code with [an interactive Python interpreter](https://ipython.org/). This is simulated through `bash` command, see plugin system below for more details.
  47. ![image](https://github.com/All-Hands-AI/OpenHands/assets/38853559/92b622e3-72ad-4a61-8f41-8c040b6d5fb3)
  48. """
  49. sandbox_plugins: list[PluginRequirement] = [
  50. # NOTE: AgentSkillsRequirement need to go before JupyterRequirement, since
  51. # AgentSkillsRequirement provides a lot of Python functions,
  52. # and it needs to be initialized before Jupyter for Jupyter to use those functions.
  53. AgentSkillsRequirement(),
  54. JupyterRequirement(),
  55. ]
  56. action_parser = CodeActResponseParser()
  57. def __init__(
  58. self,
  59. llm: LLM,
  60. config: AgentConfig,
  61. ) -> None:
  62. """Initializes a new instance of the CodeActAgent class.
  63. Parameters:
  64. - llm (LLM): The llm to be used by this agent
  65. """
  66. super().__init__(llm, config)
  67. self.reset()
  68. self.micro_agent = (
  69. MicroAgent(
  70. os.path.join(
  71. os.path.dirname(__file__), 'micro', f'{config.micro_agent_name}.md'
  72. )
  73. )
  74. if config.micro_agent_name
  75. else None
  76. )
  77. self.prompt_manager = PromptManager(
  78. prompt_dir=os.path.join(os.path.dirname(__file__)),
  79. agent_skills_docs=AgentSkillsRequirement.documentation,
  80. micro_agent=self.micro_agent,
  81. )
  82. def action_to_str(self, action: Action) -> str:
  83. if isinstance(action, CmdRunAction):
  84. return (
  85. f'{action.thought}\n<execute_bash>\n{action.command}\n</execute_bash>'
  86. )
  87. elif isinstance(action, IPythonRunCellAction):
  88. return f'{action.thought}\n<execute_ipython>\n{action.code}\n</execute_ipython>'
  89. elif isinstance(action, AgentDelegateAction):
  90. return f'{action.thought}\n<execute_browse>\n{action.inputs["task"]}\n</execute_browse>'
  91. elif isinstance(action, MessageAction):
  92. return action.content
  93. elif isinstance(action, AgentFinishAction) and action.source == 'agent':
  94. return action.thought
  95. return ''
  96. def get_action_message(self, action: Action) -> Message | None:
  97. if (
  98. isinstance(action, AgentDelegateAction)
  99. or isinstance(action, CmdRunAction)
  100. or isinstance(action, IPythonRunCellAction)
  101. or isinstance(action, MessageAction)
  102. or (isinstance(action, AgentFinishAction) and action.source == 'agent')
  103. ):
  104. content = [TextContent(text=self.action_to_str(action))]
  105. if (
  106. self.llm.vision_is_active()
  107. and isinstance(action, MessageAction)
  108. and action.images_urls
  109. ):
  110. content.append(ImageContent(image_urls=action.images_urls))
  111. return Message(
  112. role='user' if action.source == 'user' else 'assistant', content=content
  113. )
  114. return None
  115. def get_observation_message(self, obs: Observation) -> Message | None:
  116. max_message_chars = self.llm.config.max_message_chars
  117. obs_prefix = 'OBSERVATION:\n'
  118. if isinstance(obs, CmdOutputObservation):
  119. text = obs_prefix + truncate_content(obs.content, max_message_chars)
  120. text += (
  121. f'\n[Command {obs.command_id} finished with exit code {obs.exit_code}]'
  122. )
  123. return Message(role='user', content=[TextContent(text=text)])
  124. elif isinstance(obs, IPythonRunCellObservation):
  125. text = obs_prefix + obs.content
  126. # replace base64 images with a placeholder
  127. splitted = text.split('\n')
  128. for i, line in enumerate(splitted):
  129. if '![image](data:image/png;base64,' in line:
  130. splitted[i] = (
  131. '![image](data:image/png;base64, ...) already displayed to user'
  132. )
  133. text = '\n'.join(splitted)
  134. text = truncate_content(text, max_message_chars)
  135. return Message(role='user', content=[TextContent(text=text)])
  136. elif isinstance(obs, AgentDelegateObservation):
  137. text = obs_prefix + truncate_content(
  138. obs.outputs['content'] if 'content' in obs.outputs else '',
  139. max_message_chars,
  140. )
  141. return Message(role='user', content=[TextContent(text=text)])
  142. elif isinstance(obs, ErrorObservation):
  143. text = obs_prefix + truncate_content(obs.content, max_message_chars)
  144. text += '\n[Error occurred in processing last action]'
  145. return Message(role='user', content=[TextContent(text=text)])
  146. elif isinstance(obs, UserRejectObservation):
  147. text = 'OBSERVATION:\n' + truncate_content(obs.content, max_message_chars)
  148. text += '\n[Last action has been rejected by the user]'
  149. return Message(role='user', content=[TextContent(text=text)])
  150. else:
  151. # If an observation message is not returned, it will cause an error
  152. # when the LLM tries to return the next message
  153. raise ValueError(f'Unknown observation type: {type(obs)}')
  154. def reset(self) -> None:
  155. """Resets the CodeAct Agent."""
  156. super().reset()
  157. def step(self, state: State) -> Action:
  158. """Performs one step using the CodeAct Agent.
  159. This includes gathering info on previous steps and prompting the model to make a command to execute.
  160. Parameters:
  161. - state (State): used to get updated info
  162. Returns:
  163. - CmdRunAction(command) - bash command to run
  164. - IPythonRunCellAction(code) - IPython code to run
  165. - AgentDelegateAction(agent, inputs) - delegate action for (sub)task
  166. - MessageAction(content) - Message action to run (e.g. ask for clarification)
  167. - AgentFinishAction() - end the interaction
  168. """
  169. # if we're done, go back
  170. latest_user_message = state.history.get_last_user_message()
  171. if latest_user_message and latest_user_message.strip() == '/exit':
  172. return AgentFinishAction()
  173. # prepare what we want to send to the LLM
  174. messages = self._get_messages(state)
  175. params = {
  176. 'messages': self.llm.format_messages_for_llm(messages),
  177. 'stop': [
  178. '</execute_ipython>',
  179. '</execute_bash>',
  180. '</execute_browse>',
  181. ],
  182. 'temperature': 0.0,
  183. }
  184. if self.llm.is_caching_prompt_active():
  185. params['extra_headers'] = {
  186. 'anthropic-beta': 'prompt-caching-2024-07-31',
  187. }
  188. # TODO: move exception handling to agent_controller
  189. try:
  190. response = self.llm.completion(**params)
  191. except OperationCancelled as e:
  192. raise e
  193. except Exception as e:
  194. logger.error(f'{e}')
  195. error_message = '{}: {}'.format(type(e).__name__, str(e).split('\n')[0])
  196. return AgentFinishAction(
  197. thought=f'Agent encountered an error while processing the last action.\nError: {error_message}\nPlease try again.'
  198. )
  199. return self.action_parser.parse(response)
  200. def _get_messages(self, state: State) -> list[Message]:
  201. messages: list[Message] = [
  202. Message(
  203. role='system',
  204. content=[
  205. TextContent(
  206. text=self.prompt_manager.system_message,
  207. cache_prompt=self.llm.is_caching_prompt_active(), # Cache system prompt
  208. )
  209. ],
  210. ),
  211. Message(
  212. role='user',
  213. content=[
  214. TextContent(
  215. text=self.prompt_manager.initial_user_message,
  216. cache_prompt=self.llm.is_caching_prompt_active(), # if the user asks the same query,
  217. )
  218. ],
  219. ),
  220. ]
  221. for event in state.history.get_events():
  222. # create a regular message from an event
  223. if isinstance(event, Action):
  224. message = self.get_action_message(event)
  225. elif isinstance(event, Observation):
  226. message = self.get_observation_message(event)
  227. else:
  228. raise ValueError(f'Unknown event type: {type(event)}')
  229. # add regular message
  230. if message:
  231. # handle error if the message is the SAME role as the previous message
  232. # litellm.exceptions.BadRequestError: litellm.BadRequestError: OpenAIException - Error code: 400 - {'detail': 'Only supports u/a/u/a/u...'}
  233. # there shouldn't be two consecutive messages from the same role
  234. if messages and messages[-1].role == message.role:
  235. messages[-1].content.extend(message.content)
  236. else:
  237. messages.append(message)
  238. # Add caching to the last 2 user messages
  239. if self.llm.is_caching_prompt_active():
  240. user_turns_processed = 0
  241. for message in reversed(messages):
  242. if message.role == 'user' and user_turns_processed < 2:
  243. message.content[
  244. -1
  245. ].cache_prompt = True # Last item inside the message content
  246. user_turns_processed += 1
  247. # The latest user message is important:
  248. # we want to remind the agent of the environment constraints
  249. latest_user_message = next(
  250. islice(
  251. (
  252. m
  253. for m in reversed(messages)
  254. if m.role == 'user'
  255. and any(isinstance(c, TextContent) for c in m.content)
  256. ),
  257. 1,
  258. ),
  259. None,
  260. )
  261. if latest_user_message:
  262. reminder_text = f'\n\nENVIRONMENT REMINDER: You have {state.max_iterations - state.iteration} turns left to complete the task. When finished reply with <finish></finish>.'
  263. latest_user_message.content.append(TextContent(text=reminder_text))
  264. return messages