config.template.toml 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. ###################### OpenHands Configuration Example ######################
  2. #
  3. # All settings have default values, so you only need to uncomment and
  4. # modify what you want to change
  5. # The fields within each section are sorted in alphabetical order.
  6. #
  7. ##############################################################################
  8. #################################### Core ####################################
  9. # General core configurations
  10. ##############################################################################
  11. [core]
  12. # API key for E2B
  13. #e2b_api_key = ""
  14. # Base path for the workspace
  15. workspace_base = "./workspace"
  16. # Cache directory path
  17. #cache_dir = "/tmp/cache"
  18. # Debugging enabled
  19. #debug = false
  20. # Disable color in terminal output
  21. #disable_color = false
  22. # Enable saving and restoring the session when run from CLI
  23. #enable_cli_session = false
  24. # File store path
  25. #file_store_path = "/tmp/file_store"
  26. # File store type
  27. #file_store = "memory"
  28. # List of allowed file extensions for uploads
  29. #file_uploads_allowed_extensions = [".*"]
  30. # Maximum file size for uploads, in megabytes
  31. #file_uploads_max_file_size_mb = 0
  32. # Maximum budget per task, 0.0 means no limit
  33. #max_budget_per_task = 0.0
  34. # Maximum number of iterations
  35. #max_iterations = 100
  36. # Path to mount the workspace in the sandbox
  37. #workspace_mount_path_in_sandbox = "/workspace"
  38. # Path to mount the workspace
  39. #workspace_mount_path = ""
  40. # Path to rewrite the workspace mount path to
  41. #workspace_mount_rewrite = ""
  42. # Run as openhands
  43. #run_as_openhands = true
  44. # Runtime environment
  45. #runtime = "eventstream"
  46. # Name of the default agent
  47. #default_agent = "CodeActAgent"
  48. # JWT secret for authentication
  49. #jwt_secret = ""
  50. # Restrict file types for file uploads
  51. #file_uploads_restrict_file_types = false
  52. # List of allowed file extensions for uploads
  53. #file_uploads_allowed_extensions = [".*"]
  54. #################################### LLM #####################################
  55. # Configuration for LLM models (group name starts with 'llm')
  56. # use 'llm' for the default LLM config
  57. ##############################################################################
  58. [llm]
  59. # AWS access key ID
  60. #aws_access_key_id = ""
  61. # AWS region name
  62. #aws_region_name = ""
  63. # AWS secret access key
  64. #aws_secret_access_key = ""
  65. # API key to use
  66. api_key = "your-api-key"
  67. # API base URL
  68. #base_url = ""
  69. # API version
  70. #api_version = ""
  71. # Cost per input token
  72. #input_cost_per_token = 0.0
  73. # Cost per output token
  74. #output_cost_per_token = 0.0
  75. # Custom LLM provider
  76. #custom_llm_provider = ""
  77. # Embedding API base URL
  78. #embedding_base_url = ""
  79. # Embedding deployment name
  80. #embedding_deployment_name = ""
  81. # Embedding model to use
  82. embedding_model = "local"
  83. # Maximum number of characters in an observation's content
  84. #max_message_chars = 10000
  85. # Maximum number of input tokens
  86. #max_input_tokens = 0
  87. # Maximum number of output tokens
  88. #max_output_tokens = 0
  89. # Model to use
  90. model = "gpt-4o"
  91. # Number of retries to attempt when an operation fails with the LLM.
  92. # Increase this value to allow more attempts before giving up
  93. #num_retries = 8
  94. # Maximum wait time (in seconds) between retry attempts
  95. # This caps the exponential backoff to prevent excessively long
  96. #retry_max_wait = 120
  97. # Minimum wait time (in seconds) between retry attempts
  98. # This sets the initial delay before the first retry
  99. #retry_min_wait = 15
  100. # Multiplier for exponential backoff calculation
  101. # The wait time increases by this factor after each failed attempt
  102. # A value of 2.0 means each retry waits twice as long as the previous one
  103. #retry_multiplier = 2.0
  104. # Drop any unmapped (unsupported) params without causing an exception
  105. #drop_params = false
  106. # Using the prompt caching feature if provided by the LLM and supported
  107. #caching_prompt = true
  108. # Base URL for the OLLAMA API
  109. #ollama_base_url = ""
  110. # Temperature for the API
  111. #temperature = 0.0
  112. # Timeout for the API
  113. #timeout = 0
  114. # Top p for the API
  115. #top_p = 1.0
  116. # If model is vision capable, this option allows to disable image processing (useful for cost reduction).
  117. #disable_vision = true
  118. [llm.gpt4o-mini]
  119. # API key to use
  120. api_key = "your-api-key"
  121. # Model to use
  122. model = "gpt-4o-mini"
  123. #################################### Agent ###################################
  124. # Configuration for agents (group name starts with 'agent')
  125. # Use 'agent' for the default agent config
  126. # otherwise, group name must be `agent.<agent_name>` (case-sensitive), e.g.
  127. # agent.CodeActAgent
  128. ##############################################################################
  129. [agent]
  130. # Name of the micro agent to use for this agent
  131. #micro_agent_name = ""
  132. # Memory enabled
  133. #memory_enabled = false
  134. # Memory maximum threads
  135. #memory_max_threads = 2
  136. # LLM config group to use
  137. #llm_config = 'your-llm-config-group'
  138. [agent.RepoExplorerAgent]
  139. # Example: use a cheaper model for RepoExplorerAgent to reduce cost, especially
  140. # useful when an agent doesn't demand high quality but uses a lot of tokens
  141. llm_config = 'gpt3'
  142. #################################### Sandbox ###################################
  143. # Configuration for the sandbox
  144. ##############################################################################
  145. [sandbox]
  146. # Sandbox timeout in seconds
  147. #timeout = 120
  148. # Sandbox user ID
  149. #user_id = 1000
  150. # Container image to use for the sandbox
  151. #base_container_image = "nikolaik/python-nodejs:python3.12-nodejs22"
  152. # Use host network
  153. #use_host_network = false
  154. # Enable auto linting after editing
  155. #enable_auto_lint = false
  156. # Whether to initialize plugins
  157. #initialize_plugins = true
  158. # Extra dependencies to install in the runtime image
  159. #runtime_extra_deps = ""
  160. # Environment variables to set at the launch of the runtime
  161. #runtime_startup_env_vars = {}
  162. # BrowserGym environment to use for evaluation
  163. #browsergym_eval_env = ""
  164. #################################### Security ###################################
  165. # Configuration for security features
  166. ##############################################################################
  167. [security]
  168. # Enable confirmation mode
  169. #confirmation_mode = false
  170. # The security analyzer to use
  171. #security_analyzer = ""
  172. #################################### Eval ####################################
  173. # Configuration for the evaluation, please refer to the specific evaluation
  174. # plugin for the available options
  175. ##############################################################################