test_config.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352
  1. import os
  2. import pytest
  3. from opendevin.core.config import (
  4. AgentConfig,
  5. AppConfig,
  6. LLMConfig,
  7. UndefinedString,
  8. finalize_config,
  9. load_from_env,
  10. load_from_toml,
  11. )
  12. @pytest.fixture
  13. def setup_env():
  14. # Create old-style and new-style TOML files
  15. with open('old_style_config.toml', 'w') as f:
  16. f.write('[default]\nLLM_MODEL="GPT-4"\n')
  17. with open('new_style_config.toml', 'w') as f:
  18. f.write('[app]\nLLM_MODEL="GPT-3"\n')
  19. yield
  20. # Cleanup TOML files after the test
  21. os.remove('old_style_config.toml')
  22. os.remove('new_style_config.toml')
  23. @pytest.fixture
  24. def temp_toml_file(tmp_path):
  25. # Fixture to create a temporary directory and TOML file for testing
  26. tmp_toml_file = os.path.join(tmp_path, 'config.toml')
  27. yield tmp_toml_file
  28. @pytest.fixture
  29. def default_config(monkeypatch):
  30. # Fixture to provide a default AppConfig instance
  31. AppConfig.reset()
  32. yield AppConfig()
  33. def test_compat_env_to_config(monkeypatch, setup_env):
  34. # Use `monkeypatch` to set environment variables for this specific test
  35. monkeypatch.setenv('WORKSPACE_BASE', '/repos/opendevin/workspace')
  36. monkeypatch.setenv('LLM_API_KEY', 'sk-proj-rgMV0...')
  37. monkeypatch.setenv('LLM_MODEL', 'gpt-4o')
  38. monkeypatch.setenv('AGENT_MEMORY_MAX_THREADS', '4')
  39. monkeypatch.setenv('AGENT_MEMORY_ENABLED', 'True')
  40. monkeypatch.setenv('AGENT', 'CodeActAgent')
  41. config = AppConfig()
  42. load_from_env(config, os.environ)
  43. assert config.workspace_base == '/repos/opendevin/workspace'
  44. assert isinstance(config.llm, LLMConfig)
  45. assert config.llm.api_key == 'sk-proj-rgMV0...'
  46. assert config.llm.model == 'gpt-4o'
  47. assert isinstance(config.agent, AgentConfig)
  48. assert isinstance(config.agent.memory_max_threads, int)
  49. assert config.agent.memory_max_threads == 4
  50. def test_load_from_old_style_env(monkeypatch, default_config):
  51. # Test loading configuration from old-style environment variables using monkeypatch
  52. monkeypatch.setenv('LLM_API_KEY', 'test-api-key')
  53. monkeypatch.setenv('AGENT_MEMORY_ENABLED', 'True')
  54. monkeypatch.setenv('AGENT_NAME', 'PlannerAgent')
  55. monkeypatch.setenv('WORKSPACE_BASE', '/opt/files/workspace')
  56. load_from_env(default_config, os.environ)
  57. assert default_config.llm.api_key == 'test-api-key'
  58. assert default_config.agent.memory_enabled is True
  59. assert default_config.agent.name == 'PlannerAgent'
  60. assert default_config.workspace_base == '/opt/files/workspace'
  61. assert (
  62. default_config.workspace_mount_path is UndefinedString.UNDEFINED
  63. ) # before finalize_config
  64. assert (
  65. default_config.workspace_mount_path_in_sandbox is not UndefinedString.UNDEFINED
  66. )
  67. def test_load_from_new_style_toml(default_config, temp_toml_file):
  68. # Test loading configuration from a new-style TOML file
  69. with open(temp_toml_file, 'w', encoding='utf-8') as toml_file:
  70. toml_file.write("""
  71. [llm]
  72. model = "test-model"
  73. api_key = "toml-api-key"
  74. [agent]
  75. name = "TestAgent"
  76. memory_enabled = true
  77. [core]
  78. workspace_base = "/opt/files2/workspace"
  79. """)
  80. load_from_toml(default_config, temp_toml_file)
  81. assert default_config.llm.model == 'test-model'
  82. assert default_config.llm.api_key == 'toml-api-key'
  83. assert default_config.agent.name == 'TestAgent'
  84. assert default_config.agent.memory_enabled is True
  85. assert default_config.workspace_base == '/opt/files2/workspace'
  86. # before finalize_config, workspace_mount_path is UndefinedString.UNDEFINED if it was not set
  87. assert default_config.workspace_mount_path is UndefinedString.UNDEFINED
  88. assert (
  89. default_config.workspace_mount_path_in_sandbox is not UndefinedString.UNDEFINED
  90. )
  91. assert default_config.workspace_mount_path_in_sandbox == '/workspace'
  92. finalize_config(default_config)
  93. # after finalize_config, workspace_mount_path is set to the absolute path of workspace_base
  94. # if it was undefined
  95. assert default_config.workspace_mount_path == '/opt/files2/workspace'
  96. def test_env_overrides_toml(monkeypatch, default_config, temp_toml_file):
  97. # Test that environment variables override TOML values using monkeypatch
  98. with open(temp_toml_file, 'w', encoding='utf-8') as toml_file:
  99. toml_file.write("""
  100. [llm]
  101. model = "test-model"
  102. api_key = "toml-api-key"
  103. [core]
  104. workspace_base = "/opt/files3/workspace"
  105. sandbox_type = "local"
  106. disable_color = true
  107. """)
  108. monkeypatch.setenv('LLM_API_KEY', 'env-api-key')
  109. monkeypatch.setenv('WORKSPACE_BASE', 'UNDEFINED')
  110. monkeypatch.setenv('SANDBOX_TYPE', 'ssh')
  111. load_from_toml(default_config, temp_toml_file)
  112. # before finalize_config, workspace_mount_path is UndefinedString.UNDEFINED if it was not set
  113. assert default_config.workspace_mount_path is UndefinedString.UNDEFINED
  114. load_from_env(default_config, os.environ)
  115. assert os.environ.get('LLM_MODEL') is None
  116. assert default_config.llm.model == 'test-model'
  117. assert default_config.llm.api_key == 'env-api-key'
  118. # after we set workspace_base to 'UNDEFINED' in the environment,
  119. # workspace_base should be set to that
  120. # workspace_mount path is still UndefinedString.UNDEFINED
  121. assert default_config.workspace_base is not UndefinedString.UNDEFINED
  122. assert default_config.workspace_base == 'UNDEFINED'
  123. assert default_config.workspace_mount_path is UndefinedString.UNDEFINED
  124. assert default_config.workspace_mount_path == 'UNDEFINED'
  125. assert default_config.sandbox_type == 'ssh'
  126. assert default_config.disable_color is True
  127. finalize_config(default_config)
  128. # after finalize_config, workspace_mount_path is set to absolute path of workspace_base if it was undefined
  129. assert default_config.workspace_mount_path == os.getcwd() + '/UNDEFINED'
  130. def test_defaults_dict_after_updates(default_config):
  131. # Test that `defaults_dict` retains initial values after updates.
  132. initial_defaults = default_config.defaults_dict
  133. assert (
  134. initial_defaults['workspace_mount_path']['default'] is UndefinedString.UNDEFINED
  135. )
  136. updated_config = AppConfig()
  137. updated_config.llm.api_key = 'updated-api-key'
  138. updated_config.agent.name = 'MonologueAgent'
  139. defaults_after_updates = updated_config.defaults_dict
  140. assert defaults_after_updates['llm']['api_key']['default'] is None
  141. assert defaults_after_updates['agent']['name']['default'] == 'CodeActAgent'
  142. assert (
  143. defaults_after_updates['workspace_mount_path']['default']
  144. is UndefinedString.UNDEFINED
  145. )
  146. assert defaults_after_updates == initial_defaults
  147. def test_invalid_toml_format(monkeypatch, temp_toml_file, default_config):
  148. # Invalid TOML format doesn't break the configuration
  149. monkeypatch.setenv('LLM_MODEL', 'gpt-5-turbo-1106')
  150. monkeypatch.setenv('WORKSPACE_MOUNT_PATH', '/home/user/project')
  151. monkeypatch.delenv('LLM_API_KEY', raising=False)
  152. with open(temp_toml_file, 'w', encoding='utf-8') as toml_file:
  153. toml_file.write('INVALID TOML CONTENT')
  154. load_from_toml(default_config)
  155. load_from_env(default_config, os.environ)
  156. default_config.ssh_password = None # prevent leak
  157. default_config.jwt_secret = None # prevent leak
  158. assert default_config.llm.model == 'gpt-5-turbo-1106'
  159. assert default_config.llm.custom_llm_provider is None
  160. if default_config.llm.api_key is not None: # prevent leak
  161. pytest.fail('LLM API key should be empty.')
  162. assert default_config.workspace_mount_path == '/home/user/project'
  163. def test_finalize_config(default_config):
  164. # Test finalize config
  165. assert default_config.workspace_mount_path is UndefinedString.UNDEFINED
  166. default_config.sandbox_type = 'local'
  167. finalize_config(default_config)
  168. assert (
  169. default_config.workspace_mount_path_in_sandbox
  170. == default_config.workspace_mount_path
  171. )
  172. assert default_config.workspace_mount_path == os.path.abspath(
  173. default_config.workspace_base
  174. )
  175. # tests for workspace, mount path, path in sandbox, cache dir
  176. def test_workspace_mount_path_default(default_config):
  177. assert default_config.workspace_mount_path is UndefinedString.UNDEFINED
  178. finalize_config(default_config)
  179. assert default_config.workspace_mount_path == os.path.abspath(
  180. default_config.workspace_base
  181. )
  182. def test_workspace_mount_path_in_sandbox_local(default_config):
  183. assert default_config.workspace_mount_path_in_sandbox == '/workspace'
  184. default_config.sandbox_type = 'local'
  185. finalize_config(default_config)
  186. assert (
  187. default_config.workspace_mount_path_in_sandbox
  188. == default_config.workspace_mount_path
  189. )
  190. def test_workspace_mount_rewrite(default_config, monkeypatch):
  191. default_config.workspace_base = '/home/user/project'
  192. default_config.workspace_mount_rewrite = '/home/user:/sandbox'
  193. monkeypatch.setattr('os.getcwd', lambda: '/current/working/directory')
  194. finalize_config(default_config)
  195. assert default_config.workspace_mount_path == '/sandbox/project'
  196. def test_embedding_base_url_default(default_config):
  197. default_config.llm.base_url = 'https://api.exampleapi.com'
  198. finalize_config(default_config)
  199. assert default_config.llm.embedding_base_url == 'https://api.exampleapi.com'
  200. def test_cache_dir_creation(default_config, tmpdir):
  201. default_config.cache_dir = str(tmpdir.join('test_cache'))
  202. finalize_config(default_config)
  203. assert os.path.exists(default_config.cache_dir)
  204. def test_api_keys_repr_str():
  205. # Test LLMConfig
  206. llm_config = LLMConfig(
  207. api_key='my_api_key',
  208. aws_access_key_id='my_access_key',
  209. aws_secret_access_key='my_secret_key',
  210. )
  211. assert "api_key='******'" in repr(llm_config)
  212. assert "aws_access_key_id='******'" in repr(llm_config)
  213. assert "aws_secret_access_key='******'" in repr(llm_config)
  214. assert "api_key='******'" in str(llm_config)
  215. assert "aws_access_key_id='******'" in str(llm_config)
  216. assert "aws_secret_access_key='******'" in str(llm_config)
  217. # Check that no other attrs in LLMConfig have 'key' or 'token' in their name
  218. # This will fail when new attrs are added, and attract attention
  219. known_key_token_attrs_llm = [
  220. 'api_key',
  221. 'aws_access_key_id',
  222. 'aws_secret_access_key',
  223. 'input_cost_per_token',
  224. 'output_cost_per_token',
  225. ]
  226. for attr_name in dir(LLMConfig):
  227. if (
  228. not attr_name.startswith('__')
  229. and attr_name not in known_key_token_attrs_llm
  230. ):
  231. assert (
  232. 'key' not in attr_name.lower()
  233. ), f"Unexpected attribute '{attr_name}' contains 'key' in LLMConfig"
  234. assert (
  235. 'token' not in attr_name.lower() or 'tokens' in attr_name.lower()
  236. ), f"Unexpected attribute '{attr_name}' contains 'token' in LLMConfig"
  237. # Test AgentConfig
  238. # No attrs in AgentConfig have 'key' or 'token' in their name
  239. agent_config = AgentConfig(
  240. name='my_agent', memory_enabled=True, memory_max_threads=4
  241. )
  242. for attr_name in dir(AgentConfig):
  243. if not attr_name.startswith('__'):
  244. assert (
  245. 'key' not in attr_name.lower()
  246. ), f"Unexpected attribute '{attr_name}' contains 'key' in AgentConfig"
  247. assert (
  248. 'token' not in attr_name.lower() or 'tokens' in attr_name.lower()
  249. ), f"Unexpected attribute '{attr_name}' contains 'token' in AgentConfig"
  250. # Test AppConfig
  251. app_config = AppConfig(
  252. llm=llm_config,
  253. agent=agent_config,
  254. e2b_api_key='my_e2b_api_key',
  255. )
  256. assert "e2b_api_key='******'" in repr(app_config)
  257. assert "e2b_api_key='******'" in str(app_config)
  258. # Check that no other attrs in AppConfig have 'key' or 'token' in their name
  259. # This will fail when new attrs are added, and attract attention
  260. known_key_token_attrs_app = ['e2b_api_key']
  261. for attr_name in dir(AppConfig):
  262. if (
  263. not attr_name.startswith('__')
  264. and attr_name not in known_key_token_attrs_app
  265. ):
  266. assert (
  267. 'key' not in attr_name.lower()
  268. ), f"Unexpected attribute '{attr_name}' contains 'key' in AppConfig"
  269. assert (
  270. 'token' not in attr_name.lower() or 'tokens' in attr_name.lower()
  271. ), f"Unexpected attribute '{attr_name}' contains 'token' in AppConfig"
  272. def test_max_iterations_and_max_budget_per_task_from_toml(temp_toml_file):
  273. temp_toml = """
  274. [core]
  275. max_iterations = 100
  276. max_budget_per_task = 4.0
  277. """
  278. config = AppConfig()
  279. with open(temp_toml_file, 'w') as f:
  280. f.write(temp_toml)
  281. load_from_toml(config, temp_toml_file)
  282. assert config.max_iterations == 100
  283. assert config.max_budget_per_task == 4.0