config.template.toml 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. ###################### OpenHands Configuration Example ######################
  2. #
  3. # All settings have default values, so you only need to uncomment and
  4. # modify what you want to change
  5. # The fields within each section are sorted in alphabetical order.
  6. #
  7. ##############################################################################
  8. #################################### Core ####################################
  9. # General core configurations
  10. ##############################################################################
  11. [core]
  12. # API key for E2B
  13. #e2b_api_key = ""
  14. # API key for Modal
  15. #modal_api_token_id = ""
  16. #modal_api_token_secret = ""
  17. # Base path for the workspace
  18. workspace_base = "./workspace"
  19. # Cache directory path
  20. #cache_dir = "/tmp/cache"
  21. # Debugging enabled
  22. #debug = false
  23. # Disable color in terminal output
  24. #disable_color = false
  25. # Enable saving and restoring the session when run from CLI
  26. #enable_cli_session = false
  27. # Path to store trajectories
  28. #trajectories_path="./trajectories"
  29. # File store path
  30. #file_store_path = "/tmp/file_store"
  31. # File store type
  32. #file_store = "memory"
  33. # List of allowed file extensions for uploads
  34. #file_uploads_allowed_extensions = [".*"]
  35. # Maximum file size for uploads, in megabytes
  36. #file_uploads_max_file_size_mb = 0
  37. # Maximum budget per task, 0.0 means no limit
  38. #max_budget_per_task = 0.0
  39. # Maximum number of iterations
  40. #max_iterations = 100
  41. # Path to mount the workspace in the sandbox
  42. #workspace_mount_path_in_sandbox = "/workspace"
  43. # Path to mount the workspace
  44. #workspace_mount_path = ""
  45. # Path to rewrite the workspace mount path to
  46. #workspace_mount_rewrite = ""
  47. # Run as openhands
  48. #run_as_openhands = true
  49. # Runtime environment
  50. #runtime = "eventstream"
  51. # Name of the default agent
  52. #default_agent = "CodeActAgent"
  53. # JWT secret for authentication
  54. #jwt_secret = ""
  55. # Restrict file types for file uploads
  56. #file_uploads_restrict_file_types = false
  57. # List of allowed file extensions for uploads
  58. #file_uploads_allowed_extensions = [".*"]
  59. #################################### LLM #####################################
  60. # Configuration for LLM models (group name starts with 'llm')
  61. # use 'llm' for the default LLM config
  62. ##############################################################################
  63. [llm]
  64. # AWS access key ID
  65. #aws_access_key_id = ""
  66. # AWS region name
  67. #aws_region_name = ""
  68. # AWS secret access key
  69. #aws_secret_access_key = ""
  70. # API key to use
  71. api_key = "your-api-key"
  72. # API base URL
  73. #base_url = ""
  74. # API version
  75. #api_version = ""
  76. # Cost per input token
  77. #input_cost_per_token = 0.0
  78. # Cost per output token
  79. #output_cost_per_token = 0.0
  80. # Custom LLM provider
  81. #custom_llm_provider = ""
  82. # Embedding API base URL
  83. #embedding_base_url = ""
  84. # Embedding deployment name
  85. #embedding_deployment_name = ""
  86. # Embedding model to use
  87. embedding_model = "local"
  88. # Maximum number of characters in an observation's content
  89. #max_message_chars = 10000
  90. # Maximum number of input tokens
  91. #max_input_tokens = 0
  92. # Maximum number of output tokens
  93. #max_output_tokens = 0
  94. # Model to use
  95. model = "gpt-4o"
  96. # Number of retries to attempt when an operation fails with the LLM.
  97. # Increase this value to allow more attempts before giving up
  98. #num_retries = 8
  99. # Maximum wait time (in seconds) between retry attempts
  100. # This caps the exponential backoff to prevent excessively long
  101. #retry_max_wait = 120
  102. # Minimum wait time (in seconds) between retry attempts
  103. # This sets the initial delay before the first retry
  104. #retry_min_wait = 15
  105. # Multiplier for exponential backoff calculation
  106. # The wait time increases by this factor after each failed attempt
  107. # A value of 2.0 means each retry waits twice as long as the previous one
  108. #retry_multiplier = 2.0
  109. # Drop any unmapped (unsupported) params without causing an exception
  110. #drop_params = false
  111. # Using the prompt caching feature if provided by the LLM and supported
  112. #caching_prompt = true
  113. # Base URL for the OLLAMA API
  114. #ollama_base_url = ""
  115. # Temperature for the API
  116. #temperature = 0.0
  117. # Timeout for the API
  118. #timeout = 0
  119. # Top p for the API
  120. #top_p = 1.0
  121. # If model is vision capable, this option allows to disable image processing (useful for cost reduction).
  122. #disable_vision = true
  123. [llm.gpt4o-mini]
  124. api_key = "your-api-key"
  125. model = "gpt-4o"
  126. #################################### Agent ###################################
  127. # Configuration for agents (group name starts with 'agent')
  128. # Use 'agent' for the default agent config
  129. # otherwise, group name must be `agent.<agent_name>` (case-sensitive), e.g.
  130. # agent.CodeActAgent
  131. ##############################################################################
  132. [agent]
  133. # Name of the micro agent to use for this agent
  134. #micro_agent_name = ""
  135. # Memory enabled
  136. #memory_enabled = false
  137. # Memory maximum threads
  138. #memory_max_threads = 3
  139. # LLM config group to use
  140. #llm_config = 'your-llm-config-group'
  141. [agent.RepoExplorerAgent]
  142. # Example: use a cheaper model for RepoExplorerAgent to reduce cost, especially
  143. # useful when an agent doesn't demand high quality but uses a lot of tokens
  144. llm_config = 'gpt3'
  145. #################################### Sandbox ###################################
  146. # Configuration for the sandbox
  147. ##############################################################################
  148. [sandbox]
  149. # Sandbox timeout in seconds
  150. #timeout = 120
  151. # Sandbox user ID
  152. #user_id = 1000
  153. # Container image to use for the sandbox
  154. #base_container_image = "nikolaik/python-nodejs:python3.12-nodejs22"
  155. # Use host network
  156. #use_host_network = false
  157. # Enable auto linting after editing
  158. #enable_auto_lint = false
  159. # Whether to initialize plugins
  160. #initialize_plugins = true
  161. # Extra dependencies to install in the runtime image
  162. #runtime_extra_deps = ""
  163. # Environment variables to set at the launch of the runtime
  164. #runtime_startup_env_vars = {}
  165. # BrowserGym environment to use for evaluation
  166. #browsergym_eval_env = ""
  167. #################################### Security ###################################
  168. # Configuration for security features
  169. ##############################################################################
  170. [security]
  171. # Enable confirmation mode
  172. #confirmation_mode = false
  173. # The security analyzer to use
  174. #security_analyzer = ""
  175. #################################### Eval ####################################
  176. # Configuration for the evaluation, please refer to the specific evaluation
  177. # plugin for the available options
  178. ##############################################################################