config.template.toml 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246
  1. ###################### OpenHands Configuration Example ######################
  2. #
  3. # All settings have default values, so you only need to uncomment and
  4. # modify what you want to change
  5. # The fields within each section are sorted in alphabetical order.
  6. #
  7. ##############################################################################
  8. #################################### Core ####################################
  9. # General core configurations
  10. ##############################################################################
  11. [core]
  12. # API key for E2B
  13. #e2b_api_key = ""
  14. # Base path for the workspace
  15. workspace_base = "./workspace"
  16. # Cache directory path
  17. #cache_dir = "/tmp/cache"
  18. # Debugging enabled
  19. #debug = false
  20. # Disable color in terminal output
  21. #disable_color = false
  22. # Enable saving and restoring the session when run from CLI
  23. #enable_cli_session = false
  24. # Path to store trajectories
  25. #trajectories_path="./trajectories"
  26. # File store path
  27. #file_store_path = "/tmp/file_store"
  28. # File store type
  29. #file_store = "memory"
  30. # List of allowed file extensions for uploads
  31. #file_uploads_allowed_extensions = [".*"]
  32. # Maximum file size for uploads, in megabytes
  33. #file_uploads_max_file_size_mb = 0
  34. # Maximum budget per task, 0.0 means no limit
  35. #max_budget_per_task = 0.0
  36. # Maximum number of iterations
  37. #max_iterations = 100
  38. # Path to mount the workspace in the sandbox
  39. #workspace_mount_path_in_sandbox = "/workspace"
  40. # Path to mount the workspace
  41. #workspace_mount_path = ""
  42. # Path to rewrite the workspace mount path to
  43. #workspace_mount_rewrite = ""
  44. # Run as openhands
  45. #run_as_openhands = true
  46. # Runtime environment
  47. #runtime = "eventstream"
  48. # Name of the default agent
  49. #default_agent = "CodeActAgent"
  50. # JWT secret for authentication
  51. #jwt_secret = ""
  52. # Restrict file types for file uploads
  53. #file_uploads_restrict_file_types = false
  54. # List of allowed file extensions for uploads
  55. #file_uploads_allowed_extensions = [".*"]
  56. #################################### LLM #####################################
  57. # Configuration for LLM models (group name starts with 'llm')
  58. # use 'llm' for the default LLM config
  59. ##############################################################################
  60. [llm]
  61. # AWS access key ID
  62. #aws_access_key_id = ""
  63. # AWS region name
  64. #aws_region_name = ""
  65. # AWS secret access key
  66. #aws_secret_access_key = ""
  67. # API key to use
  68. api_key = "your-api-key"
  69. # API base URL
  70. #base_url = ""
  71. # API version
  72. #api_version = ""
  73. # Cost per input token
  74. #input_cost_per_token = 0.0
  75. # Cost per output token
  76. #output_cost_per_token = 0.0
  77. # Custom LLM provider
  78. #custom_llm_provider = ""
  79. # Embedding API base URL
  80. #embedding_base_url = ""
  81. # Embedding deployment name
  82. #embedding_deployment_name = ""
  83. # Embedding model to use
  84. embedding_model = "local"
  85. # Maximum number of characters in an observation's content
  86. #max_message_chars = 10000
  87. # Maximum number of input tokens
  88. #max_input_tokens = 0
  89. # Maximum number of output tokens
  90. #max_output_tokens = 0
  91. # Model to use
  92. model = "gpt-4o"
  93. # Number of retries to attempt when an operation fails with the LLM.
  94. # Increase this value to allow more attempts before giving up
  95. #num_retries = 8
  96. # Maximum wait time (in seconds) between retry attempts
  97. # This caps the exponential backoff to prevent excessively long
  98. #retry_max_wait = 120
  99. # Minimum wait time (in seconds) between retry attempts
  100. # This sets the initial delay before the first retry
  101. #retry_min_wait = 15
  102. # Multiplier for exponential backoff calculation
  103. # The wait time increases by this factor after each failed attempt
  104. # A value of 2.0 means each retry waits twice as long as the previous one
  105. #retry_multiplier = 2.0
  106. # Drop any unmapped (unsupported) params without causing an exception
  107. #drop_params = false
  108. # Using the prompt caching feature if provided by the LLM and supported
  109. #caching_prompt = true
  110. # Base URL for the OLLAMA API
  111. #ollama_base_url = ""
  112. # Temperature for the API
  113. #temperature = 0.0
  114. # Timeout for the API
  115. #timeout = 0
  116. # Top p for the API
  117. #top_p = 1.0
  118. # If model is vision capable, this option allows to disable image processing (useful for cost reduction).
  119. #disable_vision = true
  120. [llm.gpt4o-mini]
  121. # API key to use
  122. api_key = "your-api-key"
  123. # Model to use
  124. model = "gpt-4o-mini"
  125. #################################### Agent ###################################
  126. # Configuration for agents (group name starts with 'agent')
  127. # Use 'agent' for the default agent config
  128. # otherwise, group name must be `agent.<agent_name>` (case-sensitive), e.g.
  129. # agent.CodeActAgent
  130. ##############################################################################
  131. [agent]
  132. # Name of the micro agent to use for this agent
  133. #micro_agent_name = ""
  134. # Memory enabled
  135. #memory_enabled = false
  136. # Memory maximum threads
  137. #memory_max_threads = 3
  138. # LLM config group to use
  139. #llm_config = 'your-llm-config-group'
  140. [agent.RepoExplorerAgent]
  141. # Example: use a cheaper model for RepoExplorerAgent to reduce cost, especially
  142. # useful when an agent doesn't demand high quality but uses a lot of tokens
  143. llm_config = 'gpt3'
  144. #################################### Sandbox ###################################
  145. # Configuration for the sandbox
  146. ##############################################################################
  147. [sandbox]
  148. # Sandbox timeout in seconds
  149. #timeout = 120
  150. # Sandbox user ID
  151. #user_id = 1000
  152. # Container image to use for the sandbox
  153. #base_container_image = "nikolaik/python-nodejs:python3.12-nodejs22"
  154. # Use host network
  155. #use_host_network = false
  156. # Enable auto linting after editing
  157. #enable_auto_lint = false
  158. # Whether to initialize plugins
  159. #initialize_plugins = true
  160. # Extra dependencies to install in the runtime image
  161. #runtime_extra_deps = ""
  162. # Environment variables to set at the launch of the runtime
  163. #runtime_startup_env_vars = {}
  164. # BrowserGym environment to use for evaluation
  165. #browsergym_eval_env = ""
  166. #################################### Security ###################################
  167. # Configuration for security features
  168. ##############################################################################
  169. [security]
  170. # Enable confirmation mode
  171. #confirmation_mode = false
  172. # The security analyzer to use
  173. #security_analyzer = ""
  174. #################################### Eval ####################################
  175. # Configuration for the evaluation, please refer to the specific evaluation
  176. # plugin for the available options
  177. ##############################################################################