config.template.toml 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203
  1. ###################### OpenDevin Configuration Example ######################
  2. #
  3. # All settings have default values, so you only need to uncomment and
  4. # modify what you want to change
  5. # The fields within each section are sorted in alphabetical order.
  6. #
  7. ##############################################################################
  8. #################################### Core ####################################
  9. # General core configurations
  10. ##############################################################################
  11. [core]
  12. # API key for E2B
  13. #e2b_api_key = ""
  14. # Base path for the workspace
  15. workspace_base = "./workspace"
  16. # Cache directory path
  17. #cache_dir = "/tmp/cache"
  18. # Debugging enabled
  19. #debug = false
  20. # Disable color in terminal output
  21. #disable_color = false
  22. # Enable auto linting after editing
  23. #enable_auto_lint = false
  24. # Enable saving and restoring the session when run from CLI
  25. #enable_cli_session = false
  26. # File store path
  27. #file_store_path = "/tmp/file_store"
  28. # File store type
  29. #file_store = "memory"
  30. # List of allowed file extensions for uploads
  31. #file_uploads_allowed_extensions = [".*"]
  32. # Maximum file size for uploads, in megabytes
  33. #file_uploads_max_file_size_mb = 0
  34. # Maximum budget per task, 0.0 means no limit
  35. #max_budget_per_task = 0.0
  36. # Maximum number of iterations
  37. #max_iterations = 100
  38. # Path to mount the workspace in the sandbox
  39. #workspace_mount_path_in_sandbox = "/workspace"
  40. # Path to mount the workspace
  41. #workspace_mount_path = ""
  42. # Path to rewrite the workspace mount path to
  43. #workspace_mount_rewrite = ""
  44. # Persist the sandbox
  45. persist_sandbox = false
  46. # Run as devin
  47. #run_as_devin = true
  48. # Runtime environment
  49. #runtime = "server"
  50. # SSH hostname for the sandbox
  51. #ssh_hostname = "localhost"
  52. # SSH password for the sandbox
  53. #ssh_password = ""
  54. # SSH port for the sandbox
  55. #ssh_port = 63710
  56. # Use host network
  57. #use_host_network = false
  58. # Name of the default agent
  59. #default_agent = "CodeActAgent"
  60. #################################### LLM #####################################
  61. # Configuration for LLM models (group name starts with 'llm')
  62. # use 'llm' for the default LLM config
  63. ##############################################################################
  64. [llm]
  65. # AWS access key ID
  66. #aws_access_key_id = ""
  67. # AWS region name
  68. #aws_region_name = ""
  69. # AWS secret access key
  70. #aws_secret_access_key = ""
  71. # API key to use
  72. api_key = "your-api-key"
  73. # API base URL
  74. #base_url = ""
  75. # API version
  76. #api_version = ""
  77. # Cost per input token
  78. #input_cost_per_token = 0.0
  79. # Cost per output token
  80. #output_cost_per_token = 0.0
  81. # Custom LLM provider
  82. #custom_llm_provider = ""
  83. # Embedding API base URL
  84. #embedding_base_url = ""
  85. # Embedding deployment name
  86. #embedding_deployment_name = ""
  87. # Embedding model to use
  88. embedding_model = ""
  89. # Maximum number of characters in an observation's content
  90. #max_message_chars = 10000
  91. # Maximum number of input tokens
  92. #max_input_tokens = 0
  93. # Maximum number of output tokens
  94. #max_output_tokens = 0
  95. # Model to use
  96. model = "gpt-4o"
  97. # Number of retries to attempt
  98. #num_retries = 5
  99. # Retry maximum wait time
  100. #retry_max_wait = 60
  101. # Retry minimum wait time
  102. #retry_min_wait = 3
  103. # Temperature for the API
  104. #temperature = 0.0
  105. # Timeout for the API
  106. #timeout = 0
  107. # Top p for the API
  108. #top_p = 0.5
  109. [llm.gpt3]
  110. # API key to use
  111. api_key = "your-api-key"
  112. # Model to use
  113. model = "gpt-3.5"
  114. #################################### Agent ###################################
  115. # Configuration for agents (group name starts with 'agent')
  116. # Use 'agent' for the default agent config
  117. # otherwise, group name must be `agent.<agent_name>` (case-sensitive), e.g.
  118. # agent.CodeActAgent
  119. ##############################################################################
  120. [agent]
  121. # Memory enabled
  122. #memory_enabled = false
  123. # Memory maximum threads
  124. #memory_max_threads = 2
  125. # LLM config group to use
  126. #llm_config = 'llm'
  127. [agent.RepoExplorerAgent]
  128. # Example: use a cheaper model for RepoExplorerAgent to reduce cost, especially
  129. # useful when an agent doesn't demand high quality but uses a lot of tokens
  130. llm_config = 'gpt3'
  131. #################################### Sandbox ###################################
  132. # Configuration for the sandbox
  133. ##############################################################################
  134. [sandbox]
  135. # Sandbox timeout in seconds
  136. #timeout = 120
  137. # Sandbox type (ssh, e2b, local)
  138. #box_type = "ssh"
  139. # Sandbox user ID
  140. #user_id = 1000
  141. # Container image to use for the sandbox
  142. #container_image = "ghcr.io/opendevin/sandbox:main"
  143. #################################### Eval ####################################
  144. # Configuration for the evaluation, please refer to the specific evaluation
  145. # plugin for the available options
  146. ##############################################################################