agent.py 2.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657
  1. from agenthub.monologue_agent.response_parser import MonologueResponseParser
  2. from opendevin.controller.agent import Agent
  3. from opendevin.controller.state.state import State
  4. from opendevin.events.action import Action, AgentFinishAction
  5. from opendevin.llm.llm import LLM
  6. from opendevin.runtime.tools import RuntimeTool
  7. from .prompt import get_prompt
  8. class PlannerAgent(Agent):
  9. VERSION = '1.0'
  10. """
  11. The planner agent utilizes a special prompting strategy to create long term plans for solving problems.
  12. The agent is given its previous action-observation pairs, current task, and hint based on last action taken at every step.
  13. """
  14. runtime_tools: list[RuntimeTool] = [RuntimeTool.BROWSER]
  15. response_parser = MonologueResponseParser()
  16. def __init__(self, llm: LLM):
  17. """
  18. Initialize the Planner Agent with an LLM
  19. Parameters:
  20. - llm (LLM): The llm to be used by this agent
  21. """
  22. super().__init__(llm)
  23. def step(self, state: State) -> Action:
  24. """
  25. Checks to see if current step is completed, returns AgentFinishAction if True.
  26. Otherwise, creates a plan prompt and sends to model for inference, returning the result as the next action.
  27. Parameters:
  28. - state (State): The current state given the previous actions and observations
  29. Returns:
  30. - AgentFinishAction: If the last state was 'completed', 'verified', or 'abandoned'
  31. - Action: The next action to take based on llm response
  32. """
  33. if state.root_task.state in [
  34. 'completed',
  35. 'verified',
  36. 'abandoned',
  37. ]:
  38. return AgentFinishAction()
  39. prompt = get_prompt(state)
  40. messages = [{'content': prompt, 'role': 'user'}]
  41. resp = self.llm.do_completion(messages=messages)
  42. state.num_of_chars += len(prompt) + len(
  43. resp['choices'][0]['message']['content']
  44. )
  45. return self.response_parser.parse(resp)
  46. def search_memory(self, query: str) -> list[str]:
  47. return []