llm_config.py 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139
  1. import os
  2. from dataclasses import dataclass, fields
  3. from typing import Optional
  4. from openhands.core.config.config_utils import get_field_info
  5. from openhands.core.logger import LOG_DIR
  6. LLM_SENSITIVE_FIELDS = ['api_key', 'aws_access_key_id', 'aws_secret_access_key']
  7. @dataclass
  8. class LLMConfig:
  9. """Configuration for the LLM model.
  10. Attributes:
  11. model: The model to use.
  12. api_key: The API key to use.
  13. base_url: The base URL for the API. This is necessary for local LLMs. It is also used for Azure embeddings.
  14. api_version: The version of the API.
  15. embedding_model: The embedding model to use.
  16. embedding_base_url: The base URL for the embedding API.
  17. embedding_deployment_name: The name of the deployment for the embedding API. This is used for Azure OpenAI.
  18. aws_access_key_id: The AWS access key ID.
  19. aws_secret_access_key: The AWS secret access key.
  20. aws_region_name: The AWS region name.
  21. num_retries: The number of retries to attempt.
  22. retry_multiplier: The multiplier for the exponential backoff.
  23. retry_min_wait: The minimum time to wait between retries, in seconds. This is exponential backoff minimum. For models with very low limits, this can be set to 15-20.
  24. retry_max_wait: The maximum time to wait between retries, in seconds. This is exponential backoff maximum.
  25. timeout: The timeout for the API.
  26. max_message_chars: The approximate max number of characters in the content of an event included in the prompt to the LLM. Larger observations are truncated.
  27. temperature: The temperature for the API.
  28. top_p: The top p for the API.
  29. custom_llm_provider: The custom LLM provider to use. This is undocumented in openhands, and normally not used. It is documented on the litellm side.
  30. max_input_tokens: The maximum number of input tokens. Note that this is currently unused, and the value at runtime is actually the total tokens in OpenAI (e.g. 128,000 tokens for GPT-4).
  31. max_output_tokens: The maximum number of output tokens. This is sent to the LLM.
  32. input_cost_per_token: The cost per input token. This will available in logs for the user to check.
  33. output_cost_per_token: The cost per output token. This will available in logs for the user to check.
  34. ollama_base_url: The base URL for the OLLAMA API.
  35. drop_params: Drop any unmapped (unsupported) params without causing an exception.
  36. disable_vision: If model is vision capable, this option allows to disable image processing (useful for cost reduction).
  37. caching_prompt: Use the prompt caching feature if provided by the LLM and supported by the provider.
  38. log_completions: Whether to log LLM completions to the state.
  39. log_completions_folder: The folder to log LLM completions to. Required if log_completions is True.
  40. draft_editor: A more efficient LLM to use for file editing. Introduced in [PR 3985](https://github.com/All-Hands-AI/OpenHands/pull/3985).
  41. custom_tokenizer: A custom tokenizer to use for token counting.
  42. """
  43. model: str = 'claude-3-5-sonnet-20241022'
  44. api_key: str | None = None
  45. base_url: str | None = None
  46. api_version: str | None = None
  47. embedding_model: str = 'local'
  48. embedding_base_url: str | None = None
  49. embedding_deployment_name: str | None = None
  50. aws_access_key_id: str | None = None
  51. aws_secret_access_key: str | None = None
  52. aws_region_name: str | None = None
  53. openrouter_site_url: str = 'https://docs.all-hands.dev/'
  54. openrouter_app_name: str = 'OpenHands'
  55. num_retries: int = 8
  56. retry_multiplier: float = 2
  57. retry_min_wait: int = 15
  58. retry_max_wait: int = 120
  59. timeout: int | None = None
  60. max_message_chars: int = 30_000 # maximum number of characters in an observation's content when sent to the llm
  61. temperature: float = 0.0
  62. top_p: float = 1.0
  63. custom_llm_provider: str | None = None
  64. max_input_tokens: int | None = None
  65. max_output_tokens: int | None = None
  66. input_cost_per_token: float | None = None
  67. output_cost_per_token: float | None = None
  68. ollama_base_url: str | None = None
  69. drop_params: bool = True
  70. disable_vision: bool | None = None
  71. caching_prompt: bool = True
  72. log_completions: bool = False
  73. log_completions_folder: str = os.path.join(LOG_DIR, 'completions')
  74. draft_editor: Optional['LLMConfig'] = None
  75. custom_tokenizer: str | None = None
  76. def defaults_to_dict(self) -> dict:
  77. """Serialize fields to a dict for the frontend, including type hints, defaults, and whether it's optional."""
  78. result = {}
  79. for f in fields(self):
  80. result[f.name] = get_field_info(f)
  81. return result
  82. def __post_init__(self):
  83. """
  84. Post-initialization hook to assign OpenRouter-related variables to environment variables.
  85. This ensures that these values are accessible to litellm at runtime.
  86. """
  87. # Assign OpenRouter-specific variables to environment variables
  88. if self.openrouter_site_url:
  89. os.environ['OR_SITE_URL'] = self.openrouter_site_url
  90. if self.openrouter_app_name:
  91. os.environ['OR_APP_NAME'] = self.openrouter_app_name
  92. def __str__(self):
  93. attr_str = []
  94. for f in fields(self):
  95. attr_name = f.name
  96. attr_value = getattr(self, f.name)
  97. if attr_name in LLM_SENSITIVE_FIELDS:
  98. attr_value = '******' if attr_value else None
  99. attr_str.append(f'{attr_name}={repr(attr_value)}')
  100. return f"LLMConfig({', '.join(attr_str)})"
  101. def __repr__(self):
  102. return self.__str__()
  103. def to_safe_dict(self):
  104. """Return a dict with the sensitive fields replaced with ******."""
  105. ret = self.__dict__.copy()
  106. for k, v in ret.items():
  107. if k in LLM_SENSITIVE_FIELDS:
  108. ret[k] = '******' if v else None
  109. elif isinstance(v, LLMConfig):
  110. ret[k] = v.to_safe_dict()
  111. return ret
  112. @classmethod
  113. def from_dict(cls, llm_config_dict: dict) -> 'LLMConfig':
  114. """Create an LLMConfig object from a dictionary.
  115. This function is used to create an LLMConfig object from a dictionary,
  116. with the exception of the 'draft_editor' key, which is a nested LLMConfig object.
  117. """
  118. args = {k: v for k, v in llm_config_dict.items() if not isinstance(v, dict)}
  119. if 'draft_editor' in llm_config_dict:
  120. draft_editor_config = LLMConfig(**llm_config_dict['draft_editor'])
  121. args['draft_editor'] = draft_editor_config
  122. return cls(**args)