|
@@ -44,6 +44,8 @@ class LLMConfig(metaclass=Singleton):
|
|
|
custom_llm_provider: The custom LLM provider to use. This is undocumented in opendevin, and normally not used. It is documented on the litellm side.
|
|
custom_llm_provider: The custom LLM provider to use. This is undocumented in opendevin, and normally not used. It is documented on the litellm side.
|
|
|
max_input_tokens: The maximum number of input tokens. Note that this is currently unused, and the value at runtime is actually the total tokens in OpenAI (e.g. 128,000 tokens for GPT-4).
|
|
max_input_tokens: The maximum number of input tokens. Note that this is currently unused, and the value at runtime is actually the total tokens in OpenAI (e.g. 128,000 tokens for GPT-4).
|
|
|
max_output_tokens: The maximum number of output tokens. This is sent to the LLM.
|
|
max_output_tokens: The maximum number of output tokens. This is sent to the LLM.
|
|
|
|
|
+ input_cost_per_token: The cost per input token. This will available in logs for the user to check.
|
|
|
|
|
+ output_cost_per_token: The cost per output token. This will available in logs for the user to check.
|
|
|
"""
|
|
"""
|
|
|
|
|
|
|
|
model: str = 'gpt-3.5-turbo'
|
|
model: str = 'gpt-3.5-turbo'
|
|
@@ -66,6 +68,8 @@ class LLMConfig(metaclass=Singleton):
|
|
|
custom_llm_provider: str | None = None
|
|
custom_llm_provider: str | None = None
|
|
|
max_input_tokens: int | None = None
|
|
max_input_tokens: int | None = None
|
|
|
max_output_tokens: int | None = None
|
|
max_output_tokens: int | None = None
|
|
|
|
|
+ input_cost_per_token: float | None = None
|
|
|
|
|
+ output_cost_per_token: float | None = None
|
|
|
|
|
|
|
|
def defaults_to_dict(self) -> dict:
|
|
def defaults_to_dict(self) -> dict:
|
|
|
"""
|
|
"""
|