test_llm.py 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384
  1. from unittest.mock import patch
  2. import pytest
  3. from openhands.core.config import LLMConfig
  4. from openhands.core.metrics import Metrics
  5. from openhands.llm.llm import LLM
  6. @pytest.fixture
  7. def default_config():
  8. return LLMConfig(model='gpt-3.5-turbo', api_key='test_key')
  9. def test_llm_init_with_default_config(default_config):
  10. llm = LLM(default_config)
  11. assert llm.config.model == 'gpt-3.5-turbo'
  12. assert llm.config.api_key == 'test_key'
  13. assert isinstance(llm.metrics, Metrics)
  14. @patch('openhands.llm.llm.litellm.get_model_info')
  15. def test_llm_init_with_model_info(mock_get_model_info, default_config):
  16. mock_get_model_info.return_value = {
  17. 'max_input_tokens': 8000,
  18. 'max_output_tokens': 2000,
  19. }
  20. llm = LLM(default_config)
  21. assert llm.config.max_input_tokens == 8000
  22. assert llm.config.max_output_tokens == 2000
  23. @patch('openhands.llm.llm.litellm.get_model_info')
  24. def test_llm_init_without_model_info(mock_get_model_info, default_config):
  25. mock_get_model_info.side_effect = Exception('Model info not available')
  26. llm = LLM(default_config)
  27. assert llm.config.max_input_tokens == 4096
  28. assert llm.config.max_output_tokens == 1024
  29. def test_llm_init_with_custom_config():
  30. custom_config = LLMConfig(
  31. model='custom-model',
  32. api_key='custom_key',
  33. max_input_tokens=5000,
  34. max_output_tokens=1500,
  35. temperature=0.8,
  36. top_p=0.9,
  37. )
  38. llm = LLM(custom_config)
  39. assert llm.config.model == 'custom-model'
  40. assert llm.config.api_key == 'custom_key'
  41. assert llm.config.max_input_tokens == 5000
  42. assert llm.config.max_output_tokens == 1500
  43. assert llm.config.temperature == 0.8
  44. assert llm.config.top_p == 0.9
  45. def test_llm_init_with_metrics():
  46. config = LLMConfig(model='gpt-3.5-turbo', api_key='test_key')
  47. metrics = Metrics()
  48. llm = LLM(config, metrics=metrics)
  49. assert llm.metrics is metrics
  50. def test_llm_reset():
  51. llm = LLM(LLMConfig(model='gpt-3.5-turbo', api_key='test_key'))
  52. initial_metrics = llm.metrics
  53. llm.reset()
  54. assert llm.metrics is not initial_metrics
  55. assert isinstance(llm.metrics, Metrics)
  56. @patch('openhands.llm.llm.litellm.get_model_info')
  57. def test_llm_init_with_openrouter_model(mock_get_model_info, default_config):
  58. default_config.model = 'openrouter:gpt-3.5-turbo'
  59. mock_get_model_info.return_value = {
  60. 'max_input_tokens': 7000,
  61. 'max_output_tokens': 1500,
  62. }
  63. llm = LLM(default_config)
  64. assert llm.config.max_input_tokens == 7000
  65. assert llm.config.max_output_tokens == 1500
  66. mock_get_model_info.assert_called_once_with('openrouter:gpt-3.5-turbo')