agent.py 2.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354
  1. from agenthub.planner_agent.response_parser import PlannerResponseParser
  2. from opendevin.controller.agent import Agent
  3. from opendevin.controller.state.state import State
  4. from opendevin.core.config import AgentConfig
  5. from opendevin.core.message import ImageContent, Message, TextContent
  6. from opendevin.events.action import Action, AgentFinishAction
  7. from opendevin.llm.llm import LLM
  8. from .prompt import get_prompt_and_images
  9. class PlannerAgent(Agent):
  10. VERSION = '1.0'
  11. """
  12. The planner agent utilizes a special prompting strategy to create long term plans for solving problems.
  13. The agent is given its previous action-observation pairs, current task, and hint based on last action taken at every step.
  14. """
  15. response_parser = PlannerResponseParser()
  16. def __init__(self, llm: LLM, config: AgentConfig):
  17. """Initialize the Planner Agent with an LLM
  18. Parameters:
  19. - llm (LLM): The llm to be used by this agent
  20. """
  21. super().__init__(llm, config)
  22. def step(self, state: State) -> Action:
  23. """Checks to see if current step is completed, returns AgentFinishAction if True.
  24. Otherwise, creates a plan prompt and sends to model for inference, returning the result as the next action.
  25. Parameters:
  26. - state (State): The current state given the previous actions and observations
  27. Returns:
  28. - AgentFinishAction: If the last state was 'completed', 'verified', or 'abandoned'
  29. - Action: The next action to take based on llm response
  30. """
  31. if state.root_task.state in [
  32. 'completed',
  33. 'verified',
  34. 'abandoned',
  35. ]:
  36. return AgentFinishAction()
  37. prompt, image_urls = get_prompt_and_images(
  38. state, self.llm.config.max_message_chars
  39. )
  40. content = [TextContent(text=prompt)]
  41. if image_urls:
  42. content.append(ImageContent(image_urls=image_urls))
  43. message = Message(role='user', content=content)
  44. resp = self.llm.completion(messages=[message.model_dump()])
  45. return self.response_parser.parse(resp)