config.template.toml 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. ###################### OpenDevin Configuration Example ######################
  2. #
  3. # All settings have default values, so you only need to uncomment and
  4. # modify what you want to change
  5. # The fields within each section are sorted in alphabetical order.
  6. #
  7. ##############################################################################
  8. #################################### Core ####################################
  9. # General core configurations
  10. ##############################################################################
  11. [core]
  12. # API key for E2B
  13. #e2b_api_key = ""
  14. # Base path for the workspace
  15. workspace_base = "./workspace"
  16. # Cache directory path
  17. #cache_dir = "/tmp/cache"
  18. # Debugging enabled
  19. #debug = false
  20. # Disable color in terminal output
  21. #disable_color = false
  22. # Enable saving and restoring the session when run from CLI
  23. #enable_cli_session = false
  24. # File store path
  25. #file_store_path = "/tmp/file_store"
  26. # File store type
  27. #file_store = "memory"
  28. # List of allowed file extensions for uploads
  29. #file_uploads_allowed_extensions = [".*"]
  30. # Maximum file size for uploads, in megabytes
  31. #file_uploads_max_file_size_mb = 0
  32. # Maximum budget per task, 0.0 means no limit
  33. #max_budget_per_task = 0.0
  34. # Maximum number of iterations
  35. #max_iterations = 100
  36. # Path to mount the workspace in the sandbox
  37. #workspace_mount_path_in_sandbox = "/workspace"
  38. # Path to mount the workspace
  39. #workspace_mount_path = ""
  40. # Path to rewrite the workspace mount path to
  41. #workspace_mount_rewrite = ""
  42. # Persist the sandbox
  43. persist_sandbox = false
  44. # Run as devin
  45. #run_as_devin = true
  46. # Runtime environment
  47. #runtime = "server"
  48. # SSH hostname for the sandbox
  49. #ssh_hostname = "localhost"
  50. # SSH password for the sandbox
  51. #ssh_password = ""
  52. # SSH port for the sandbox
  53. #ssh_port = 63710
  54. # Name of the default agent
  55. #default_agent = "CodeActAgent"
  56. #################################### LLM #####################################
  57. # Configuration for LLM models (group name starts with 'llm')
  58. # use 'llm' for the default LLM config
  59. ##############################################################################
  60. [llm]
  61. # AWS access key ID
  62. #aws_access_key_id = ""
  63. # AWS region name
  64. #aws_region_name = ""
  65. # AWS secret access key
  66. #aws_secret_access_key = ""
  67. # API key to use
  68. api_key = "your-api-key"
  69. # API base URL
  70. #base_url = ""
  71. # API version
  72. #api_version = ""
  73. # Cost per input token
  74. #input_cost_per_token = 0.0
  75. # Cost per output token
  76. #output_cost_per_token = 0.0
  77. # Custom LLM provider
  78. #custom_llm_provider = ""
  79. # Embedding API base URL
  80. #embedding_base_url = ""
  81. # Embedding deployment name
  82. #embedding_deployment_name = ""
  83. # Embedding model to use
  84. embedding_model = ""
  85. # Maximum number of characters in an observation's content
  86. #max_message_chars = 10000
  87. # Maximum number of input tokens
  88. #max_input_tokens = 0
  89. # Maximum number of output tokens
  90. #max_output_tokens = 0
  91. # Model to use
  92. model = "gpt-4o"
  93. # Number of retries to attempt
  94. #num_retries = 5
  95. # Retry maximum wait time
  96. #retry_max_wait = 60
  97. # Retry minimum wait time
  98. #retry_min_wait = 3
  99. # Temperature for the API
  100. #temperature = 0.0
  101. # Timeout for the API
  102. #timeout = 0
  103. # Top p for the API
  104. #top_p = 0.5
  105. [llm.gpt3]
  106. # API key to use
  107. api_key = "your-api-key"
  108. # Model to use
  109. model = "gpt-3.5"
  110. #################################### Agent ###################################
  111. # Configuration for agents (group name starts with 'agent')
  112. # Use 'agent' for the default agent config
  113. # otherwise, group name must be `agent.<agent_name>` (case-sensitive), e.g.
  114. # agent.CodeActAgent
  115. ##############################################################################
  116. [agent]
  117. # Memory enabled
  118. #memory_enabled = false
  119. # Memory maximum threads
  120. #memory_max_threads = 2
  121. # LLM config group to use
  122. #llm_config = 'llm'
  123. [agent.RepoExplorerAgent]
  124. # Example: use a cheaper model for RepoExplorerAgent to reduce cost, especially
  125. # useful when an agent doesn't demand high quality but uses a lot of tokens
  126. llm_config = 'gpt3'
  127. #################################### Sandbox ###################################
  128. # Configuration for the sandbox
  129. ##############################################################################
  130. [sandbox]
  131. # Sandbox timeout in seconds
  132. #timeout = 120
  133. # Sandbox type (ssh, e2b, local)
  134. #box_type = "ssh"
  135. # Sandbox user ID
  136. #user_id = 1000
  137. # Container image to use for the sandbox
  138. #container_image = "ghcr.io/opendevin/sandbox:main"
  139. # Use host network
  140. #use_host_network = false
  141. # Enable auto linting after editing
  142. #enable_auto_lint = false
  143. #################################### Eval ####################################
  144. # Configuration for the evaluation, please refer to the specific evaluation
  145. # plugin for the available options
  146. ##############################################################################