|
|
@@ -0,0 +1,84 @@
|
|
|
+from unittest.mock import patch
|
|
|
+
|
|
|
+import pytest
|
|
|
+
|
|
|
+from opendevin.core.config import LLMConfig
|
|
|
+from opendevin.core.metrics import Metrics
|
|
|
+from opendevin.llm.llm import LLM
|
|
|
+
|
|
|
+
|
|
|
+@pytest.fixture
|
|
|
+def default_config():
|
|
|
+ return LLMConfig(model='gpt-3.5-turbo', api_key='test_key')
|
|
|
+
|
|
|
+
|
|
|
+def test_llm_init_with_default_config(default_config):
|
|
|
+ llm = LLM(default_config)
|
|
|
+ assert llm.config.model == 'gpt-3.5-turbo'
|
|
|
+ assert llm.config.api_key == 'test_key'
|
|
|
+ assert isinstance(llm.metrics, Metrics)
|
|
|
+
|
|
|
+
|
|
|
+@patch('opendevin.llm.llm.litellm.get_model_info')
|
|
|
+def test_llm_init_with_model_info(mock_get_model_info, default_config):
|
|
|
+ mock_get_model_info.return_value = {
|
|
|
+ 'max_input_tokens': 8000,
|
|
|
+ 'max_output_tokens': 2000,
|
|
|
+ }
|
|
|
+ llm = LLM(default_config)
|
|
|
+ assert llm.config.max_input_tokens == 8000
|
|
|
+ assert llm.config.max_output_tokens == 2000
|
|
|
+
|
|
|
+
|
|
|
+@patch('opendevin.llm.llm.litellm.get_model_info')
|
|
|
+def test_llm_init_without_model_info(mock_get_model_info, default_config):
|
|
|
+ mock_get_model_info.side_effect = Exception('Model info not available')
|
|
|
+ llm = LLM(default_config)
|
|
|
+ assert llm.config.max_input_tokens == 4096
|
|
|
+ assert llm.config.max_output_tokens == 1024
|
|
|
+
|
|
|
+
|
|
|
+def test_llm_init_with_custom_config():
|
|
|
+ custom_config = LLMConfig(
|
|
|
+ model='custom-model',
|
|
|
+ api_key='custom_key',
|
|
|
+ max_input_tokens=5000,
|
|
|
+ max_output_tokens=1500,
|
|
|
+ temperature=0.8,
|
|
|
+ top_p=0.9,
|
|
|
+ )
|
|
|
+ llm = LLM(custom_config)
|
|
|
+ assert llm.config.model == 'custom-model'
|
|
|
+ assert llm.config.api_key == 'custom_key'
|
|
|
+ assert llm.config.max_input_tokens == 5000
|
|
|
+ assert llm.config.max_output_tokens == 1500
|
|
|
+ assert llm.config.temperature == 0.8
|
|
|
+ assert llm.config.top_p == 0.9
|
|
|
+
|
|
|
+
|
|
|
+def test_llm_init_with_metrics():
|
|
|
+ config = LLMConfig(model='gpt-3.5-turbo', api_key='test_key')
|
|
|
+ metrics = Metrics()
|
|
|
+ llm = LLM(config, metrics=metrics)
|
|
|
+ assert llm.metrics is metrics
|
|
|
+
|
|
|
+
|
|
|
+def test_llm_reset():
|
|
|
+ llm = LLM(LLMConfig(model='gpt-3.5-turbo', api_key='test_key'))
|
|
|
+ initial_metrics = llm.metrics
|
|
|
+ llm.reset()
|
|
|
+ assert llm.metrics is not initial_metrics
|
|
|
+ assert isinstance(llm.metrics, Metrics)
|
|
|
+
|
|
|
+
|
|
|
+@patch('opendevin.llm.llm.litellm.get_model_info')
|
|
|
+def test_llm_init_with_openrouter_model(mock_get_model_info, default_config):
|
|
|
+ default_config.model = 'openrouter:gpt-3.5-turbo'
|
|
|
+ mock_get_model_info.return_value = {
|
|
|
+ 'max_input_tokens': 7000,
|
|
|
+ 'max_output_tokens': 1500,
|
|
|
+ }
|
|
|
+ llm = LLM(default_config)
|
|
|
+ assert llm.config.max_input_tokens == 7000
|
|
|
+ assert llm.config.max_output_tokens == 1500
|
|
|
+ mock_get_model_info.assert_called_once_with('openrouter:gpt-3.5-turbo')
|