monologue.py 2.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879
  1. import agenthub.monologue_agent.utils.json as json
  2. import agenthub.monologue_agent.utils.prompts as prompts
  3. from opendevin.core.exceptions import AgentEventTypeError
  4. from opendevin.core.logger import opendevin_logger as logger
  5. from opendevin.llm.llm import LLM
  6. class Monologue:
  7. """
  8. The monologue is a representation for the agent's internal monologue where it can think.
  9. The agent has the capability of using this monologue for whatever it wants.
  10. """
  11. def __init__(self):
  12. """
  13. Initialize the empty list of thoughts
  14. """
  15. self.thoughts = []
  16. def add_event(self, t: dict):
  17. """
  18. Adds an event to memory if it is a valid event.
  19. Parameters:
  20. - t (dict): The thought that we want to add to memory
  21. Raises:
  22. - AgentEventTypeError: If t is not a dict
  23. """
  24. if not isinstance(t, dict):
  25. raise AgentEventTypeError()
  26. self.thoughts.append(t)
  27. def get_thoughts(self):
  28. """
  29. Get the current thoughts of the agent.
  30. Returns:
  31. - list: The list of thoughts that the agent has.
  32. """
  33. return self.thoughts
  34. def get_total_length(self):
  35. """
  36. Gives the total number of characters in all thoughts
  37. Returns:
  38. - Int: Total number of chars in thoughts.
  39. """
  40. total_length = 0
  41. for t in self.thoughts:
  42. try:
  43. total_length += len(json.dumps(t))
  44. except TypeError as e:
  45. logger.error('Error serializing thought: %s', str(e), exc_info=False)
  46. return total_length
  47. def condense(self, llm: LLM):
  48. """
  49. Attempts to condense the monologue by using the llm
  50. Parameters:
  51. - llm (LLM): llm to be used for summarization
  52. Raises:
  53. - Exception: the same exception as it got from the llm or processing the response
  54. """
  55. try:
  56. prompt = prompts.get_summarize_monologue_prompt(self.thoughts)
  57. messages = [{'content': prompt, 'role': 'user'}]
  58. resp = llm.completion(messages=messages)
  59. summary_resp = resp['choices'][0]['message']['content']
  60. self.thoughts = prompts.parse_summary_response(summary_resp)
  61. except Exception as e:
  62. logger.error('Error condensing thoughts: %s', str(e), exc_info=False)
  63. # TODO If the llm fails with ContextWindowExceededError, we can try to condense the monologue chunk by chunk
  64. raise