llm.py 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165
  1. import os
  2. from . import json
  3. if os.getenv("DEBUG"):
  4. from langchain.globals import set_debug
  5. set_debug(True)
  6. from typing import List
  7. from langchain_core.pydantic_v1 import BaseModel
  8. from opendevin.observation import (
  9. CmdOutputObservation,
  10. )
  11. from langchain.chains import LLMChain
  12. from langchain.prompts import PromptTemplate
  13. from langchain_core.output_parsers import JsonOutputParser
  14. from langchain_openai import ChatOpenAI
  15. ACTION_PROMPT = """
  16. You're a thoughtful robot. Your main task is to {task}.
  17. Don't expand the scope of your task--just complete it as written.
  18. This is your internal monologue, in JSON format:
  19. ```json
  20. {monologue}
  21. ```
  22. Your most recent thought is at the bottom of that monologue. Continue your train of thought.
  23. What is your next thought or action? Your response must be in JSON format.
  24. It must be an object, and it must contain two fields:
  25. * `action`, which is one of the actions below
  26. * `args`, which is a map of key-value pairs, specifying the arguments for that action
  27. Here are the possible actions:
  28. * `read` - reads the contents of a file. Arguments:
  29. * `path` - the path of the file to read
  30. * `write` - writes the contents to a file. Arguments:
  31. * `path` - the path of the file to write
  32. * `contents` - the contents to write to the file
  33. * `run` - runs a command. Arguments:
  34. * `command` - the command to run
  35. * `background` - if true, run the command in the background, so that other commands can be run concurrently. Useful for e.g. starting a server. You won't be able to see the logs. You don't need to end the command with `&`, just set this to true.
  36. * `kill` - kills a background command
  37. * `id` - the ID of the background command to kill
  38. * `browse` - opens a web page. Arguments:
  39. * `url` - the URL to open
  40. * `recall` - recalls a past memory. Arguments:
  41. * `query` - the query to search for
  42. * `think` - make a plan, set a goal, or record your thoughts. Arguments:
  43. * `thought` - the thought to record
  44. * `finish` - if you're absolutely certain that you've completed your task and have tested your work, use the finish action to stop working.
  45. {background_commands}
  46. You MUST take time to think in between read, write, run, browse, and recall actions.
  47. You should never act twice in a row without thinking. But if your last several
  48. actions are all "think" actions, you should consider taking a different action.
  49. Notes:
  50. * your environment is Debian Linux. You can install software with `apt`
  51. * you can use `git commit` to stash your work, but you don't have access to a remote repository
  52. * your working directory will not change, even if you run `cd`. All commands will be run in the `/workspace` directory.
  53. * don't run interactive commands, or commands that don't return (e.g. `node server.js`). You may run commands in the background (e.g. `node server.js &`)
  54. What is your next thought or action? Again, you must reply with JSON, and only with JSON.
  55. {hint}
  56. """
  57. MONOLOGUE_SUMMARY_PROMPT = """
  58. Below is the internal monologue of an automated LLM agent. Each
  59. thought is an item in a JSON array. The thoughts may be memories,
  60. actions taken by the agent, or outputs from those actions.
  61. Please return a new, smaller JSON array, which summarizes the
  62. internal monologue. You can summarize individual thoughts, and
  63. you can condense related thoughts together with a description
  64. of their content.
  65. ```json
  66. {monologue}
  67. ```
  68. Make the summaries as pithy and informative as possible.
  69. Be specific about what happened and what was learned. The summary
  70. will be used as keywords for searching for the original memory.
  71. Be sure to preserve any key words or important information.
  72. Your response must be in JSON format. It must be an object with the
  73. key `new_monologue`, which is a JSON array containing the summarized monologue.
  74. Each entry in the array must have an `action` key, and an `args` key.
  75. The action key may be `summarize`, and `args.summary` should contain the summary.
  76. You can also use the same action and args from the source monologue.
  77. """
  78. class _ActionDict(BaseModel):
  79. action: str
  80. args: dict
  81. class NewMonologue(BaseModel):
  82. new_monologue: List[_ActionDict]
  83. def get_chain(template, model_name):
  84. assert (
  85. "OPENAI_API_KEY" in os.environ
  86. ), "Please set the OPENAI_API_KEY environment variable to use langchains_agent."
  87. llm = ChatOpenAI(openai_api_key=os.getenv("OPENAI_API_KEY"), model_name=model_name) # type: ignore
  88. prompt = PromptTemplate.from_template(template)
  89. llm_chain = LLMChain(prompt=prompt, llm=llm)
  90. return llm_chain
  91. def summarize_monologue(thoughts: List[dict], model_name):
  92. llm_chain = get_chain(MONOLOGUE_SUMMARY_PROMPT, model_name)
  93. parser = JsonOutputParser(pydantic_object=NewMonologue)
  94. resp = llm_chain.invoke({"monologue": json.dumps({"old_monologue": thoughts})})
  95. if os.getenv("DEBUG"):
  96. print("resp", resp)
  97. parsed = parser.parse(resp["text"])
  98. return parsed["new_monologue"]
  99. def request_action(
  100. task,
  101. thoughts: List[dict],
  102. model_name: str,
  103. background_commands_obs: List[CmdOutputObservation] = [],
  104. ):
  105. llm_chain = get_chain(ACTION_PROMPT, model_name)
  106. parser = JsonOutputParser(pydantic_object=_ActionDict)
  107. hint = ""
  108. if len(thoughts) > 0:
  109. latest_thought = thoughts[-1]
  110. if latest_thought["action"] == 'think':
  111. if latest_thought["args"]['thought'].startswith("OK so my task is"):
  112. hint = "You're just getting started! What should you do first?"
  113. else:
  114. hint = "You've been thinking a lot lately. Maybe it's time to take action?"
  115. elif latest_thought["action"] == 'error':
  116. hint = "Looks like that last command failed. Maybe you need to fix it, or try something else."
  117. bg_commands_message = ""
  118. if len(background_commands_obs) > 0:
  119. bg_commands_message = "The following commands are running in the background:"
  120. for command_obs in background_commands_obs:
  121. bg_commands_message += f"\n`{command_obs.command_id}`: {command_obs.command}"
  122. bg_commands_message += "\nYou can end any process by sending a `kill` action with the numerical `id` above."
  123. latest_thought = thoughts[-1]
  124. resp = llm_chain.invoke(
  125. {
  126. "monologue": json.dumps(thoughts),
  127. "hint": hint,
  128. "task": task,
  129. "background_commands": bg_commands_message,
  130. }
  131. )
  132. if os.getenv("DEBUG"):
  133. print("resp", resp)
  134. parsed = parser.parse(resp["text"])
  135. return parsed