| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265 |
- from unittest.mock import MagicMock, patch
- import pytest
- import requests
- from litellm.exceptions import RateLimitError
- from openhands.core.config import LLMConfig
- from openhands.events.action.message import MessageAction
- from openhands.llm.llm import LLM
- from openhands.resolver.github_issue import GithubIssue
- from openhands.resolver.issue_definitions import IssueHandler, PRHandler
- @pytest.fixture(autouse=True)
- def mock_logger(monkeypatch):
- # suppress logging of completion data to file
- mock_logger = MagicMock()
- monkeypatch.setattr('openhands.llm.debug_mixin.llm_prompt_logger', mock_logger)
- monkeypatch.setattr('openhands.llm.debug_mixin.llm_response_logger', mock_logger)
- return mock_logger
- @pytest.fixture
- def default_config():
- return LLMConfig(
- model='gpt-4o',
- api_key='test_key',
- num_retries=2,
- retry_min_wait=1,
- retry_max_wait=2,
- )
- def test_handle_nonexistent_issue_reference():
- llm_config = LLMConfig(model='test', api_key='test')
- handler = PRHandler('test-owner', 'test-repo', 'test-token', llm_config)
- # Mock the requests.get to simulate a 404 error
- mock_response = MagicMock()
- mock_response.raise_for_status.side_effect = requests.exceptions.HTTPError(
- '404 Client Error: Not Found'
- )
- with patch('requests.get', return_value=mock_response):
- # Call the method with a non-existent issue reference
- result = handler._PRHandler__get_context_from_external_issues_references(
- closing_issues=[],
- closing_issue_numbers=[],
- issue_body='This references #999999', # Non-existent issue
- review_comments=[],
- review_threads=[],
- thread_comments=None,
- )
- # The method should return an empty list since the referenced issue couldn't be fetched
- assert result == []
- def test_handle_rate_limit_error():
- llm_config = LLMConfig(model='test', api_key='test')
- handler = PRHandler('test-owner', 'test-repo', 'test-token', llm_config)
- # Mock the requests.get to simulate a rate limit error
- mock_response = MagicMock()
- mock_response.raise_for_status.side_effect = requests.exceptions.HTTPError(
- '403 Client Error: Rate Limit Exceeded'
- )
- with patch('requests.get', return_value=mock_response):
- # Call the method with an issue reference
- result = handler._PRHandler__get_context_from_external_issues_references(
- closing_issues=[],
- closing_issue_numbers=[],
- issue_body='This references #123',
- review_comments=[],
- review_threads=[],
- thread_comments=None,
- )
- # The method should return an empty list since the request was rate limited
- assert result == []
- def test_handle_network_error():
- llm_config = LLMConfig(model='test', api_key='test')
- handler = PRHandler('test-owner', 'test-repo', 'test-token', llm_config)
- # Mock the requests.get to simulate a network error
- with patch(
- 'requests.get', side_effect=requests.exceptions.ConnectionError('Network Error')
- ):
- # Call the method with an issue reference
- result = handler._PRHandler__get_context_from_external_issues_references(
- closing_issues=[],
- closing_issue_numbers=[],
- issue_body='This references #123',
- review_comments=[],
- review_threads=[],
- thread_comments=None,
- )
- # The method should return an empty list since the network request failed
- assert result == []
- def test_successful_issue_reference():
- llm_config = LLMConfig(model='test', api_key='test')
- handler = PRHandler('test-owner', 'test-repo', 'test-token', llm_config)
- # Mock a successful response
- mock_response = MagicMock()
- mock_response.raise_for_status.return_value = None
- mock_response.json.return_value = {'body': 'This is the referenced issue body'}
- with patch('requests.get', return_value=mock_response):
- # Call the method with an issue reference
- result = handler._PRHandler__get_context_from_external_issues_references(
- closing_issues=[],
- closing_issue_numbers=[],
- issue_body='This references #123',
- review_comments=[],
- review_threads=[],
- thread_comments=None,
- )
- # The method should return a list with the referenced issue body
- assert result == ['This is the referenced issue body']
- class MockLLMResponse:
- """Mock LLM Response class to mimic the actual LLM response structure."""
- class Choice:
- class Message:
- def __init__(self, content):
- self.content = content
- def __init__(self, content):
- self.message = self.Message(content)
- def __init__(self, content):
- self.choices = [self.Choice(content)]
- class DotDict(dict):
- """
- A dictionary that supports dot notation access.
- """
- def __init__(self, *args, **kwargs):
- super().__init__(*args, **kwargs)
- for key, value in self.items():
- if isinstance(value, dict):
- self[key] = DotDict(value)
- elif isinstance(value, list):
- self[key] = [
- DotDict(item) if isinstance(item, dict) else item for item in value
- ]
- def __getattr__(self, key):
- if key in self:
- return self[key]
- else:
- raise AttributeError(
- f"'{self.__class__.__name__}' object has no attribute '{key}'"
- )
- def __setattr__(self, key, value):
- self[key] = value
- def __delattr__(self, key):
- if key in self:
- del self[key]
- else:
- raise AttributeError(
- f"'{self.__class__.__name__}' object has no attribute '{key}'"
- )
- @patch('openhands.llm.llm.litellm_completion')
- def test_guess_success_rate_limit_wait_time(mock_litellm_completion, default_config):
- """Test that the retry mechanism in guess_success respects wait time between retries."""
- with patch('time.sleep') as mock_sleep:
- # Simulate a rate limit error followed by a successful response
- mock_litellm_completion.side_effect = [
- RateLimitError(
- 'Rate limit exceeded', llm_provider='test_provider', model='test_model'
- ),
- DotDict(
- {
- 'choices': [
- {
- 'message': {
- 'content': '--- success\ntrue\n--- explanation\nRetry successful'
- }
- }
- ]
- }
- ),
- ]
- llm = LLM(config=default_config)
- handler = IssueHandler('test-owner', 'test-repo', 'test-token', default_config)
- handler.llm = llm
- # Mock issue and history
- issue = GithubIssue(
- owner='test-owner',
- repo='test-repo',
- number=1,
- title='Test Issue',
- body='This is a test issue.',
- thread_comments=['Please improve error handling'],
- )
- history = [MessageAction(content='Fixed error handling.')]
- # Call guess_success
- success, _, explanation = handler.guess_success(issue, history)
- # Assertions
- assert success is True
- assert explanation == 'Retry successful'
- assert mock_litellm_completion.call_count == 2 # Two attempts made
- mock_sleep.assert_called_once() # Sleep called once between retries
- # Validate wait time
- wait_time = mock_sleep.call_args[0][0]
- assert (
- default_config.retry_min_wait <= wait_time <= default_config.retry_max_wait
- ), f'Expected wait time between {default_config.retry_min_wait} and {default_config.retry_max_wait} seconds, but got {wait_time}'
- @patch('openhands.llm.llm.litellm_completion')
- def test_guess_success_exhausts_retries(mock_completion, default_config):
- """Test the retry mechanism in guess_success exhausts retries and raises an error."""
- # Simulate persistent rate limit errors by always raising RateLimitError
- mock_completion.side_effect = RateLimitError(
- 'Rate limit exceeded', llm_provider='test_provider', model='test_model'
- )
- # Initialize LLM and handler
- llm = LLM(config=default_config)
- handler = PRHandler('test-owner', 'test-repo', 'test-token', default_config)
- handler.llm = llm
- # Mock issue and history
- issue = GithubIssue(
- owner='test-owner',
- repo='test-repo',
- number=1,
- title='Test Issue',
- body='This is a test issue.',
- thread_comments=['Please improve error handling'],
- )
- history = [MessageAction(content='Fixed error handling.')]
- # Call guess_success and expect it to raise an error after retries
- with pytest.raises(RateLimitError):
- handler.guess_success(issue, history)
- # Assertions
- assert (
- mock_completion.call_count == default_config.num_retries
- ) # Initial call + retries
|