config.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513
  1. import argparse
  2. import logging
  3. import os
  4. import pathlib
  5. import platform
  6. from dataclasses import dataclass, field, fields, is_dataclass
  7. from types import UnionType
  8. from typing import Any, ClassVar, get_args, get_origin
  9. import toml
  10. from dotenv import load_dotenv
  11. from opendevin.core.utils import Singleton
  12. logger = logging.getLogger(__name__)
  13. load_dotenv()
  14. @dataclass
  15. class LLMConfig(metaclass=Singleton):
  16. """
  17. Configuration for the LLM model.
  18. Attributes:
  19. model: The model to use.
  20. api_key: The API key to use.
  21. base_url: The base URL for the API. This is necessary for local LLMs. It is also used for Azure embeddings.
  22. api_version: The version of the API.
  23. embedding_model: The embedding model to use.
  24. embedding_base_url: The base URL for the embedding API.
  25. embedding_deployment_name: The name of the deployment for the embedding API. This is used for Azure OpenAI.
  26. aws_access_key_id: The AWS access key ID.
  27. aws_secret_access_key: The AWS secret access key.
  28. aws_region_name: The AWS region name.
  29. num_retries: The number of retries to attempt.
  30. retry_min_wait: The minimum time to wait between retries, in seconds. This is exponential backoff minimum. For models with very low limits, this can be set to 15-20.
  31. retry_max_wait: The maximum time to wait between retries, in seconds. This is exponential backoff maximum.
  32. timeout: The timeout for the API.
  33. max_chars: The maximum number of characters to send to and receive from the API. This is a fallback for token counting, which doesn't work in all cases.
  34. temperature: The temperature for the API.
  35. top_p: The top p for the API.
  36. custom_llm_provider: The custom LLM provider to use. This is undocumented in opendevin, and normally not used. It is documented on the litellm side.
  37. max_input_tokens: The maximum number of input tokens. Note that this is currently unused, and the value at runtime is actually the total tokens in OpenAI (e.g. 128,000 tokens for GPT-4).
  38. max_output_tokens: The maximum number of output tokens. This is sent to the LLM.
  39. """
  40. model: str = 'gpt-3.5-turbo'
  41. api_key: str | None = None
  42. base_url: str | None = None
  43. api_version: str | None = None
  44. embedding_model: str = 'local'
  45. embedding_base_url: str | None = None
  46. embedding_deployment_name: str | None = None
  47. aws_access_key_id: str | None = None
  48. aws_secret_access_key: str | None = None
  49. aws_region_name: str | None = None
  50. num_retries: int = 5
  51. retry_min_wait: int = 3
  52. retry_max_wait: int = 60
  53. timeout: int | None = None
  54. max_chars: int = 5_000_000 # fallback for token counting
  55. temperature: float = 0
  56. top_p: float = 0.5
  57. custom_llm_provider: str | None = None
  58. max_input_tokens: int | None = None
  59. max_output_tokens: int | None = None
  60. def defaults_to_dict(self) -> dict:
  61. """
  62. Serialize fields to a dict for the frontend, including type hints, defaults, and whether it's optional.
  63. """
  64. dict = {}
  65. for f in fields(self):
  66. dict[f.name] = get_field_info(f)
  67. return dict
  68. @dataclass
  69. class AgentConfig(metaclass=Singleton):
  70. """
  71. Configuration for the agent.
  72. Attributes:
  73. name: The name of the agent.
  74. memory_enabled: Whether long-term memory (embeddings) is enabled.
  75. memory_max_threads: The maximum number of threads indexing at the same time for embeddings.
  76. """
  77. name: str = 'CodeActAgent'
  78. memory_enabled: bool = False
  79. memory_max_threads: int = 2
  80. def defaults_to_dict(self) -> dict:
  81. """
  82. Serialize fields to a dict for the frontend, including type hints, defaults, and whether it's optional.
  83. """
  84. dict = {}
  85. for f in fields(self):
  86. dict[f.name] = get_field_info(f)
  87. return dict
  88. @dataclass
  89. class AppConfig(metaclass=Singleton):
  90. """
  91. Configuration for the app.
  92. Attributes:
  93. llm: The LLM configuration.
  94. agent: The agent configuration.
  95. runtime: The runtime environment.
  96. file_store: The file store to use.
  97. file_store_path: The path to the file store.
  98. workspace_base: The base path for the workspace. Defaults to ./workspace as an absolute path.
  99. workspace_mount_path: The path to mount the workspace. This is set to the workspace base by default.
  100. workspace_mount_path_in_sandbox: The path to mount the workspace in the sandbox. Defaults to /workspace.
  101. workspace_mount_rewrite: The path to rewrite the workspace mount path to.
  102. cache_dir: The path to the cache directory. Defaults to /tmp/cache.
  103. sandbox_container_image: The container image to use for the sandbox.
  104. run_as_devin: Whether to run as devin.
  105. max_iterations: The maximum number of iterations.
  106. e2b_api_key: The E2B API key.
  107. sandbox_type: The type of sandbox to use. Options are: ssh, exec, e2b, local.
  108. use_host_network: Whether to use the host network.
  109. ssh_hostname: The SSH hostname.
  110. disable_color: Whether to disable color. For terminals that don't support color.
  111. sandbox_user_id: The user ID for the sandbox.
  112. sandbox_timeout: The timeout for the sandbox.
  113. github_token: The GitHub token.
  114. debug: Whether to enable debugging.
  115. enable_auto_lint: Whether to enable auto linting. This is False by default, for regular runs of the app. For evaluation, please set this to True.
  116. """
  117. llm: LLMConfig = field(default_factory=LLMConfig)
  118. agent: AgentConfig = field(default_factory=AgentConfig)
  119. runtime: str = 'server'
  120. file_store: str = 'memory'
  121. file_store_path: str = '/tmp/file_store'
  122. workspace_base: str = os.path.join(os.getcwd(), 'workspace')
  123. workspace_mount_path: str | None = None
  124. workspace_mount_path_in_sandbox: str = '/workspace'
  125. workspace_mount_rewrite: str | None = None
  126. cache_dir: str = '/tmp/cache'
  127. sandbox_container_image: str = 'ghcr.io/opendevin/sandbox' + (
  128. f':{os.getenv("OPEN_DEVIN_BUILD_VERSION")}'
  129. if os.getenv('OPEN_DEVIN_BUILD_VERSION')
  130. else ':main'
  131. )
  132. run_as_devin: bool = True
  133. max_iterations: int = 100
  134. e2b_api_key: str = ''
  135. sandbox_type: str = 'ssh' # Can be 'ssh', 'exec', or 'e2b'
  136. use_host_network: bool = False
  137. ssh_hostname: str = 'localhost'
  138. disable_color: bool = False
  139. sandbox_user_id: int = os.getuid() if hasattr(os, 'getuid') else 1000
  140. sandbox_timeout: int = 120
  141. github_token: str | None = None
  142. debug: bool = False
  143. enable_auto_lint: bool = (
  144. False # once enabled, OpenDevin would lint files after editing
  145. )
  146. defaults_dict: ClassVar[dict] = {}
  147. def __post_init__(self):
  148. """
  149. Post-initialization hook, called when the instance is created with only default values.
  150. """
  151. AppConfig.defaults_dict = self.defaults_to_dict()
  152. def defaults_to_dict(self) -> dict:
  153. """
  154. Serialize fields to a dict for the frontend, including type hints, defaults, and whether it's optional.
  155. """
  156. dict = {}
  157. for f in fields(self):
  158. field_value = getattr(self, f.name)
  159. # dataclasses compute their defaults themselves
  160. if is_dataclass(type(field_value)):
  161. dict[f.name] = field_value.defaults_to_dict()
  162. else:
  163. dict[f.name] = get_field_info(f)
  164. return dict
  165. def get_field_info(field):
  166. """
  167. Extract information about a dataclass field: type, optional, and default.
  168. Args:
  169. field: The field to extract information from.
  170. Returns: A dict with the field's type, whether it's optional, and its default value.
  171. """
  172. field_type = field.type
  173. optional = False
  174. # for types like str | None, find the non-None type and set optional to True
  175. # this is useful for the frontend to know if a field is optional
  176. # and to show the correct type in the UI
  177. # Note: this only works for UnionTypes with None as one of the types
  178. if get_origin(field_type) is UnionType:
  179. types = get_args(field_type)
  180. non_none_arg = next((t for t in types if t is not type(None)), None)
  181. if non_none_arg is not None:
  182. field_type = non_none_arg
  183. optional = True
  184. # type name in a pretty format
  185. type_name = (
  186. field_type.__name__ if hasattr(field_type, '__name__') else str(field_type)
  187. )
  188. # default is always present
  189. default = field.default
  190. # return a schema with the useful info for frontend
  191. return {'type': type_name.lower(), 'optional': optional, 'default': default}
  192. def load_from_env(config: AppConfig, env_or_toml_dict: dict | os._Environ):
  193. """Reads the env-style vars and sets config attributes based on env vars or a config.toml dict.
  194. Compatibility with vars like LLM_BASE_URL, AGENT_MEMORY_ENABLED and others.
  195. Args:
  196. config: The AppConfig object to set attributes on.
  197. env_or_toml_dict: The environment variables or a config.toml dict.
  198. """
  199. def get_optional_type(union_type: UnionType) -> Any:
  200. """Returns the non-None type from an Union."""
  201. types = get_args(union_type)
  202. return next((t for t in types if t is not type(None)), None)
  203. # helper function to set attributes based on env vars
  204. def set_attr_from_env(sub_config: Any, prefix=''):
  205. """Set attributes of a config dataclass based on environment variables."""
  206. for field_name, field_type in sub_config.__annotations__.items():
  207. # compute the expected env var name from the prefix and field name
  208. # e.g. LLM_BASE_URL
  209. env_var_name = (prefix + field_name).upper()
  210. if is_dataclass(field_type):
  211. # nested dataclass
  212. nested_sub_config = getattr(sub_config, field_name)
  213. # the agent field: the env var for agent.name is just 'AGENT'
  214. if field_name == 'agent' and 'AGENT' in env_or_toml_dict:
  215. setattr(nested_sub_config, 'name', env_or_toml_dict[env_var_name])
  216. set_attr_from_env(nested_sub_config, prefix=field_name + '_')
  217. elif env_var_name in env_or_toml_dict:
  218. # convert the env var to the correct type and set it
  219. value = env_or_toml_dict[env_var_name]
  220. try:
  221. # if it's an optional type, get the non-None type
  222. if get_origin(field_type) is UnionType:
  223. field_type = get_optional_type(field_type)
  224. # Attempt to cast the env var to type hinted in the dataclass
  225. if field_type is bool:
  226. cast_value = str(value).lower() in ['true', '1']
  227. else:
  228. cast_value = field_type(value)
  229. setattr(sub_config, field_name, cast_value)
  230. except (ValueError, TypeError):
  231. logger.error(
  232. f'Error setting env var {env_var_name}={value}: check that the value is of the right type'
  233. )
  234. # Start processing from the root of the config object
  235. set_attr_from_env(config)
  236. def load_from_toml(config: AppConfig, toml_file: str = 'config.toml'):
  237. """Load the config from the toml file. Supports both styles of config vars.
  238. Args:
  239. config: The AppConfig object to update attributes of.
  240. """
  241. # try to read the config.toml file into the config object
  242. toml_config = {}
  243. try:
  244. with open(toml_file, 'r', encoding='utf-8') as toml_contents:
  245. toml_config = toml.load(toml_contents)
  246. except FileNotFoundError:
  247. # the file is optional, we don't need to do anything
  248. return
  249. except toml.TomlDecodeError:
  250. logger.warning(
  251. 'Cannot parse config from toml, toml values have not been applied.',
  252. exc_info=False,
  253. )
  254. return
  255. # if there was an exception or core is not in the toml, try to use the old-style toml
  256. if 'core' not in toml_config:
  257. # re-use the env loader to set the config from env-style vars
  258. load_from_env(config, toml_config)
  259. return
  260. core_config = toml_config['core']
  261. try:
  262. # set llm config from the toml file
  263. llm_config = config.llm
  264. if 'llm' in toml_config:
  265. llm_config = LLMConfig(**toml_config['llm'])
  266. # set agent config from the toml file
  267. agent_config = config.agent
  268. if 'agent' in toml_config:
  269. agent_config = AgentConfig(**toml_config['agent'])
  270. # update the config object with the new values
  271. config = AppConfig(llm=llm_config, agent=agent_config, **core_config)
  272. except (TypeError, KeyError):
  273. logger.warning(
  274. 'Cannot parse config from toml, toml values have not been applied.',
  275. exc_info=False,
  276. )
  277. def finalize_config(config: AppConfig):
  278. """
  279. More tweaks to the config after it's been loaded.
  280. """
  281. # Set workspace_mount_path if not set by the user
  282. if config.workspace_mount_path is None:
  283. config.workspace_mount_path = os.path.abspath(config.workspace_base)
  284. config.workspace_base = os.path.abspath(config.workspace_base)
  285. # In local there is no sandbox, the workspace will have the same pwd as the host
  286. if config.sandbox_type == 'local':
  287. config.workspace_mount_path_in_sandbox = config.workspace_mount_path
  288. if config.workspace_mount_rewrite: # and not config.workspace_mount_path:
  289. # TODO why do we need to check if workspace_mount_path is None?
  290. base = config.workspace_base or os.getcwd()
  291. parts = config.workspace_mount_rewrite.split(':')
  292. config.workspace_mount_path = base.replace(parts[0], parts[1])
  293. if config.llm.embedding_base_url is None:
  294. config.llm.embedding_base_url = config.llm.base_url
  295. if config.use_host_network and platform.system() == 'Darwin':
  296. logger.warning(
  297. 'Please upgrade to Docker Desktop 4.29.0 or later to use host network mode on macOS. '
  298. 'See https://github.com/docker/roadmap/issues/238#issuecomment-2044688144 for more information.'
  299. )
  300. # make sure cache dir exists
  301. if config.cache_dir:
  302. pathlib.Path(config.cache_dir).mkdir(parents=True, exist_ok=True)
  303. config = AppConfig()
  304. load_from_toml(config)
  305. load_from_env(config, os.environ)
  306. finalize_config(config)
  307. # Utility function for command line --group argument
  308. def get_llm_config_arg(llm_config_arg: str):
  309. """
  310. Get a group of llm settings from the config file.
  311. A group in config.toml can look like this:
  312. ```
  313. [gpt-3.5-for-eval]
  314. model = 'gpt-3.5-turbo'
  315. api_key = '...'
  316. temperature = 0.5
  317. num_retries = 10
  318. ...
  319. ```
  320. The user-defined group name, like "gpt-3.5-for-eval", is the argument to this function. The function will load the LLMConfig object
  321. with the settings of this group, from the config file, and set it as the LLMConfig object for the app.
  322. Args:
  323. llm_config_arg: The group of llm settings to get from the config.toml file.
  324. Returns:
  325. LLMConfig: The LLMConfig object with the settings from the config file.
  326. """
  327. # keep only the name, just in case
  328. llm_config_arg = llm_config_arg.strip('[]')
  329. logger.info(f'Loading llm config from {llm_config_arg}')
  330. # load the toml file
  331. try:
  332. with open('config.toml', 'r', encoding='utf-8') as toml_file:
  333. toml_config = toml.load(toml_file)
  334. except FileNotFoundError as e:
  335. logger.error(f'Config file not found: {e}')
  336. return None
  337. except toml.TomlDecodeError as e:
  338. logger.error(f'Cannot parse llm group from {llm_config_arg}. Exception: {e}')
  339. return None
  340. # update the llm config with the specified section
  341. if llm_config_arg in toml_config:
  342. return LLMConfig(**toml_config[llm_config_arg])
  343. logger.debug(f'Loading from toml failed for {llm_config_arg}')
  344. return None
  345. # Command line arguments
  346. def get_parser():
  347. """
  348. Get the parser for the command line arguments.
  349. """
  350. parser = argparse.ArgumentParser(description='Run an agent with a specific task')
  351. parser.add_argument(
  352. '-d',
  353. '--directory',
  354. type=str,
  355. help='The working directory for the agent',
  356. )
  357. parser.add_argument(
  358. '-t', '--task', type=str, default='', help='The task for the agent to perform'
  359. )
  360. parser.add_argument(
  361. '-f',
  362. '--file',
  363. type=str,
  364. help='Path to a file containing the task. Overrides -t if both are provided.',
  365. )
  366. parser.add_argument(
  367. '-c',
  368. '--agent-cls',
  369. default=config.agent.name,
  370. type=str,
  371. help='The agent class to use',
  372. )
  373. parser.add_argument(
  374. '-m',
  375. '--model-name',
  376. default=config.llm.model,
  377. type=str,
  378. help='The (litellm) model name to use',
  379. )
  380. parser.add_argument(
  381. '-i',
  382. '--max-iterations',
  383. default=config.max_iterations,
  384. type=int,
  385. help='The maximum number of iterations to run the agent',
  386. )
  387. parser.add_argument(
  388. '-n',
  389. '--max-chars',
  390. default=config.llm.max_chars,
  391. type=int,
  392. help='The maximum number of characters to send to and receive from LLM per task',
  393. )
  394. # --eval configs are for evaluations only
  395. parser.add_argument(
  396. '--eval-output-dir',
  397. default='evaluation/evaluation_outputs/outputs',
  398. type=str,
  399. help='The directory to save evaluation output',
  400. )
  401. parser.add_argument(
  402. '--eval-n-limit',
  403. default=None,
  404. type=int,
  405. help='The number of instances to evaluate',
  406. )
  407. parser.add_argument(
  408. '--eval-num-workers',
  409. default=4,
  410. type=int,
  411. help='The number of workers to use for evaluation',
  412. )
  413. parser.add_argument(
  414. '--eval-note',
  415. default=None,
  416. type=str,
  417. help='The note to add to the evaluation directory',
  418. )
  419. parser.add_argument(
  420. '-l',
  421. '--llm-config',
  422. default=None,
  423. type=str,
  424. help='The group of llm settings, e.g. a [llama3] section in the toml file. Overrides model if both are provided.',
  425. )
  426. return parser
  427. def parse_arguments():
  428. """
  429. Parse the command line arguments.
  430. """
  431. parser = get_parser()
  432. args, _ = parser.parse_known_args()
  433. if args.directory:
  434. config.workspace_base = os.path.abspath(args.directory)
  435. print(f'Setting workspace base to {config.workspace_base}')
  436. return args
  437. args = parse_arguments()