config.py 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. import os
  2. import argparse
  3. import toml
  4. from dotenv import load_dotenv
  5. from opendevin.schema import ConfigType
  6. load_dotenv()
  7. DEFAULT_CONFIG: dict = {
  8. ConfigType.LLM_API_KEY: None,
  9. ConfigType.LLM_BASE_URL: None,
  10. ConfigType.WORKSPACE_BASE: os.getcwd(),
  11. ConfigType.WORKSPACE_MOUNT_PATH: None,
  12. ConfigType.WORKSPACE_MOUNT_PATH_IN_SANDBOX: '/workspace',
  13. ConfigType.WORKSPACE_MOUNT_REWRITE: None,
  14. ConfigType.LLM_MODEL: 'gpt-3.5-turbo-1106',
  15. ConfigType.SANDBOX_CONTAINER_IMAGE: 'ghcr.io/opendevin/sandbox',
  16. ConfigType.RUN_AS_DEVIN: 'true',
  17. ConfigType.LLM_EMBEDDING_MODEL: 'local',
  18. ConfigType.LLM_EMBEDDING_DEPLOYMENT_NAME: None,
  19. ConfigType.LLM_API_VERSION: None,
  20. ConfigType.LLM_NUM_RETRIES: 1,
  21. ConfigType.LLM_COOLDOWN_TIME: 1,
  22. ConfigType.MAX_ITERATIONS: 100,
  23. # GPT-4 pricing is $10 per 1M input tokens. Since tokenization happens on LLM side,
  24. # we cannot easily count number of tokens, but we can count characters.
  25. # Assuming 5 characters per token, 5 million is a reasonable default limit.
  26. ConfigType.MAX_CHARS: 5_000_000,
  27. ConfigType.AGENT: 'MonologueAgent',
  28. ConfigType.E2B_API_KEY: '',
  29. ConfigType.SANDBOX_TYPE: 'ssh', # Can be 'ssh', 'exec', or 'e2b'
  30. ConfigType.USE_HOST_NETWORK: 'false',
  31. ConfigType.SSH_HOSTNAME: 'localhost',
  32. ConfigType.DISABLE_COLOR: 'false',
  33. }
  34. config_str = ''
  35. if os.path.exists('config.toml'):
  36. with open('config.toml', 'rb') as f:
  37. config_str = f.read().decode('utf-8')
  38. tomlConfig = toml.loads(config_str)
  39. config = DEFAULT_CONFIG.copy()
  40. for k, v in config.items():
  41. if k in os.environ:
  42. config[k] = os.environ[k]
  43. elif k in tomlConfig:
  44. config[k] = tomlConfig[k]
  45. def get_parser():
  46. parser = argparse.ArgumentParser(
  47. description='Run an agent with a specific task')
  48. parser.add_argument(
  49. '-d',
  50. '--directory',
  51. type=str,
  52. help='The working directory for the agent',
  53. )
  54. parser.add_argument(
  55. '-t', '--task', type=str, default='', help='The task for the agent to perform'
  56. )
  57. parser.add_argument(
  58. '-f',
  59. '--file',
  60. type=str,
  61. help='Path to a file containing the task. Overrides -t if both are provided.',
  62. )
  63. parser.add_argument(
  64. '-c',
  65. '--agent-cls',
  66. default='MonologueAgent',
  67. type=str,
  68. help='The agent class to use',
  69. )
  70. parser.add_argument(
  71. '-m',
  72. '--model-name',
  73. default=config.get(ConfigType.LLM_MODEL),
  74. type=str,
  75. help='The (litellm) model name to use',
  76. )
  77. parser.add_argument(
  78. '-i',
  79. '--max-iterations',
  80. default=config.get(ConfigType.MAX_ITERATIONS),
  81. type=int,
  82. help='The maximum number of iterations to run the agent',
  83. )
  84. parser.add_argument(
  85. '-n',
  86. '--max-chars',
  87. default=config.get(ConfigType.MAX_CHARS),
  88. type=int,
  89. help='The maximum number of characters to send to and receive from LLM per task',
  90. )
  91. return parser
  92. def parse_arguments():
  93. parser = get_parser()
  94. args, _ = parser.parse_known_args()
  95. if args.directory:
  96. config[ConfigType.WORKSPACE_BASE] = os.path.abspath(args.directory)
  97. print(f'Setting workspace base to {config[ConfigType.WORKSPACE_BASE]}')
  98. return args
  99. args = parse_arguments()
  100. def finalize_config():
  101. if config.get(ConfigType.WORKSPACE_MOUNT_REWRITE) and not config.get(ConfigType.WORKSPACE_MOUNT_PATH):
  102. base = config.get(ConfigType.WORKSPACE_BASE) or os.getcwd()
  103. parts = config[ConfigType.WORKSPACE_MOUNT_REWRITE].split(':')
  104. config[ConfigType.WORKSPACE_MOUNT_PATH] = base.replace(parts[0], parts[1])
  105. finalize_config()
  106. def get(key: str, required: bool = False):
  107. """
  108. Get a key from the environment variables or config.toml or default configs.
  109. """
  110. value = config.get(key)
  111. if not value and required:
  112. raise KeyError(f"Please set '{key}' in `config.toml` or `.env`.")
  113. return value