| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881 |
- import os
- import tempfile
- from unittest.mock import AsyncMock, MagicMock, patch
- import pytest
- from openhands.core.config import LLMConfig
- from openhands.events.action import CmdRunAction
- from openhands.events.observation import CmdOutputObservation, NullObservation
- from openhands.resolver.github_issue import GithubIssue, ReviewThread
- from openhands.resolver.issue_definitions import IssueHandler, PRHandler
- from openhands.resolver.resolve_issue import (
- complete_runtime,
- initialize_runtime,
- process_issue,
- )
- from openhands.resolver.resolver_output import ResolverOutput
- @pytest.fixture
- def mock_output_dir():
- with tempfile.TemporaryDirectory() as temp_dir:
- repo_path = os.path.join(temp_dir, 'repo')
- # Initialize a GitHub repo in "repo" and add a commit with "README.md"
- os.makedirs(repo_path)
- os.system(f'git init {repo_path}')
- readme_path = os.path.join(repo_path, 'README.md')
- with open(readme_path, 'w') as f:
- f.write('hello world')
- os.system(f'git -C {repo_path} add README.md')
- os.system(f"git -C {repo_path} commit -m 'Initial commit'")
- yield temp_dir
- @pytest.fixture
- def mock_subprocess():
- with patch('subprocess.check_output') as mock_check_output:
- yield mock_check_output
- @pytest.fixture
- def mock_os():
- with patch('os.system') as mock_system, patch('os.path.join') as mock_join:
- yield mock_system, mock_join
- @pytest.fixture
- def mock_prompt_template():
- return 'Issue: {{ body }}\n\nPlease fix this issue.'
- @pytest.fixture
- def mock_followup_prompt_template():
- return 'Issue context: {{ issues }}\n\nReview comments: {{ review_comments }}\n\nReview threads: {{ review_threads }}\n\nFiles: {{ files }}\n\nThread comments: {{ thread_context }}\n\nPlease fix this issue.'
- def create_cmd_output(exit_code: int, content: str, command_id: int, command: str):
- return CmdOutputObservation(
- exit_code=exit_code, content=content, command_id=command_id, command=command
- )
- def test_initialize_runtime():
- mock_runtime = MagicMock()
- mock_runtime.run_action.side_effect = [
- create_cmd_output(
- exit_code=0, content='', command_id=1, command='cd /workspace'
- ),
- create_cmd_output(
- exit_code=0,
- content='',
- command_id=2,
- command='git config --global core.pager ""',
- ),
- ]
- initialize_runtime(mock_runtime)
- assert mock_runtime.run_action.call_count == 2
- mock_runtime.run_action.assert_any_call(CmdRunAction(command='cd /workspace'))
- mock_runtime.run_action.assert_any_call(
- CmdRunAction(command='git config --global core.pager ""')
- )
- def test_download_issues_from_github():
- handler = IssueHandler('owner', 'repo', 'token')
- mock_issues_response = MagicMock()
- mock_issues_response.json.side_effect = [
- [
- {'number': 1, 'title': 'Issue 1', 'body': 'This is an issue'},
- {
- 'number': 2,
- 'title': 'PR 1',
- 'body': 'This is a pull request',
- 'pull_request': {},
- },
- {'number': 3, 'title': 'Issue 2', 'body': 'This is another issue'},
- ],
- None,
- ]
- mock_issues_response.raise_for_status = MagicMock()
- mock_comments_response = MagicMock()
- mock_comments_response.json.return_value = []
- mock_comments_response.raise_for_status = MagicMock()
- def get_mock_response(url, *args, **kwargs):
- if '/comments' in url:
- return mock_comments_response
- return mock_issues_response
- with patch('requests.get', side_effect=get_mock_response):
- issues = handler.get_converted_issues(issue_numbers=[1, 3])
- assert len(issues) == 2
- assert handler.issue_type == 'issue'
- assert all(isinstance(issue, GithubIssue) for issue in issues)
- assert [issue.number for issue in issues] == [1, 3]
- assert [issue.title for issue in issues] == ['Issue 1', 'Issue 2']
- assert [issue.review_comments for issue in issues] == [None, None]
- assert [issue.closing_issues for issue in issues] == [None, None]
- assert [issue.thread_ids for issue in issues] == [None, None]
- def test_download_pr_from_github():
- handler = PRHandler('owner', 'repo', 'token')
- mock_pr_response = MagicMock()
- mock_pr_response.json.side_effect = [
- [
- {
- 'number': 1,
- 'title': 'PR 1',
- 'body': 'This is a pull request',
- 'head': {'ref': 'b1'},
- },
- {
- 'number': 2,
- 'title': 'My PR',
- 'body': 'This is another pull request',
- 'head': {'ref': 'b2'},
- },
- {'number': 3, 'title': 'PR 3', 'body': 'Final PR', 'head': {'ref': 'b3'}},
- ],
- None,
- ]
- mock_pr_response.raise_for_status = MagicMock()
- # Mock for PR comments response
- mock_comments_response = MagicMock()
- mock_comments_response.json.return_value = [] # No PR comments
- mock_comments_response.raise_for_status = MagicMock()
- # Mock for GraphQL request (for download_pr_metadata)
- mock_graphql_response = MagicMock()
- mock_graphql_response.json.side_effect = lambda: {
- 'data': {
- 'repository': {
- 'pullRequest': {
- 'closingIssuesReferences': {
- 'edges': [
- {'node': {'body': 'Issue 1 body', 'number': 1}},
- {'node': {'body': 'Issue 2 body', 'number': 2}},
- ]
- },
- 'reviewThreads': {
- 'edges': [
- {
- 'node': {
- 'isResolved': False,
- 'id': '1',
- 'comments': {
- 'nodes': [
- {
- 'body': 'Unresolved comment 1',
- 'path': '/frontend/header.tsx',
- },
- {'body': 'Follow up thread'},
- ]
- },
- }
- },
- {
- 'node': {
- 'isResolved': True,
- 'id': '2',
- 'comments': {
- 'nodes': [
- {
- 'body': 'Resolved comment 1',
- 'path': '/some/file.py',
- }
- ]
- },
- }
- },
- {
- 'node': {
- 'isResolved': False,
- 'id': '3',
- 'comments': {
- 'nodes': [
- {
- 'body': 'Unresolved comment 3',
- 'path': '/another/file.py',
- }
- ]
- },
- }
- },
- ]
- },
- }
- }
- }
- }
- mock_graphql_response.raise_for_status = MagicMock()
- def get_mock_response(url, *args, **kwargs):
- if '/comments' in url:
- return mock_comments_response
- return mock_pr_response
- with patch('requests.get', side_effect=get_mock_response):
- with patch('requests.post', return_value=mock_graphql_response):
- issues = handler.get_converted_issues(issue_numbers=[1, 2, 3])
- assert len(issues) == 3
- assert handler.issue_type == 'pr'
- assert all(isinstance(issue, GithubIssue) for issue in issues)
- assert [issue.number for issue in issues] == [1, 2, 3]
- assert [issue.title for issue in issues] == ['PR 1', 'My PR', 'PR 3']
- assert [issue.head_branch for issue in issues] == ['b1', 'b2', 'b3']
- assert len(issues[0].review_threads) == 2 # Only unresolved threads
- assert (
- issues[0].review_threads[0].comment
- == 'Unresolved comment 1\n---\nlatest feedback:\nFollow up thread\n'
- )
- assert issues[0].review_threads[0].files == ['/frontend/header.tsx']
- assert (
- issues[0].review_threads[1].comment
- == 'latest feedback:\nUnresolved comment 3\n'
- )
- assert issues[0].review_threads[1].files == ['/another/file.py']
- assert issues[0].closing_issues == ['Issue 1 body', 'Issue 2 body']
- assert issues[0].thread_ids == ['1', '3']
- @pytest.mark.asyncio
- async def test_complete_runtime():
- mock_runtime = MagicMock()
- mock_runtime.run_action.side_effect = [
- create_cmd_output(
- exit_code=0, content='', command_id=1, command='cd /workspace'
- ),
- create_cmd_output(
- exit_code=0,
- content='',
- command_id=2,
- command='git config --global core.pager ""',
- ),
- create_cmd_output(
- exit_code=0,
- content='',
- command_id=3,
- command='git config --global --add safe.directory /workspace',
- ),
- create_cmd_output(
- exit_code=0,
- content='',
- command_id=4,
- command='git diff base_commit_hash fix',
- ),
- create_cmd_output(
- exit_code=0, content='git diff content', command_id=5, command='git apply'
- ),
- ]
- result = await complete_runtime(mock_runtime, 'base_commit_hash')
- assert result == {'git_patch': 'git diff content'}
- assert mock_runtime.run_action.call_count == 5
- @pytest.mark.asyncio
- async def test_process_issue(mock_output_dir, mock_prompt_template):
- # Mock dependencies
- mock_create_runtime = MagicMock()
- mock_initialize_runtime = AsyncMock()
- mock_run_controller = AsyncMock()
- mock_complete_runtime = AsyncMock()
- handler_instance = MagicMock()
- # Set up test data
- issue = GithubIssue(
- owner='test_owner',
- repo='test_repo',
- number=1,
- title='Test Issue',
- body='This is a test issue',
- )
- base_commit = 'abcdef1234567890'
- repo_instruction = 'Resolve this repo'
- max_iterations = 5
- llm_config = LLMConfig(model='test_model', api_key='test_api_key')
- runtime_container_image = 'test_image:latest'
- # Test cases for different scenarios
- test_cases = [
- {
- 'name': 'successful_run',
- 'run_controller_return': MagicMock(
- history=[NullObservation(content='')],
- metrics=MagicMock(
- get=MagicMock(return_value={'test_result': 'passed'})
- ),
- last_error=None,
- ),
- 'run_controller_raises': None,
- 'expected_success': True,
- 'expected_error': None,
- 'expected_explanation': 'Issue resolved successfully',
- },
- {
- 'name': 'value_error',
- 'run_controller_return': None,
- 'run_controller_raises': ValueError('Test value error'),
- 'expected_success': False,
- 'expected_error': 'Agent failed to run or crashed',
- 'expected_explanation': 'Agent failed to run',
- },
- {
- 'name': 'runtime_error',
- 'run_controller_return': None,
- 'run_controller_raises': RuntimeError('Test runtime error'),
- 'expected_success': False,
- 'expected_error': 'Agent failed to run or crashed',
- 'expected_explanation': 'Agent failed to run',
- },
- {
- 'name': 'json_decode_error',
- 'run_controller_return': MagicMock(
- history=[NullObservation(content='')],
- metrics=MagicMock(
- get=MagicMock(return_value={'test_result': 'passed'})
- ),
- last_error=None,
- ),
- 'run_controller_raises': None,
- 'expected_success': True,
- 'expected_error': None,
- 'expected_explanation': 'Non-JSON explanation',
- 'is_pr': True,
- 'comment_success': [
- True,
- False,
- ], # To trigger the PR success logging code path
- },
- ]
- for test_case in test_cases:
- # Reset mocks
- mock_create_runtime.reset_mock()
- mock_initialize_runtime.reset_mock()
- mock_run_controller.reset_mock()
- mock_complete_runtime.reset_mock()
- handler_instance.reset_mock()
- # Mock return values
- mock_create_runtime.return_value = MagicMock(connect=AsyncMock())
- if test_case['run_controller_raises']:
- mock_run_controller.side_effect = test_case['run_controller_raises']
- else:
- mock_run_controller.return_value = test_case['run_controller_return']
- mock_run_controller.side_effect = None
- mock_complete_runtime.return_value = {'git_patch': 'test patch'}
- handler_instance.guess_success.return_value = (
- test_case['expected_success'],
- test_case.get('comment_success', None),
- test_case['expected_explanation'],
- )
- handler_instance.get_instruction.return_value = ('Test instruction', [])
- handler_instance.issue_type = 'pr' if test_case.get('is_pr', False) else 'issue'
- with patch(
- 'openhands.resolver.resolve_issue.create_runtime', mock_create_runtime
- ), patch(
- 'openhands.resolver.resolve_issue.initialize_runtime',
- mock_initialize_runtime,
- ), patch(
- 'openhands.resolver.resolve_issue.run_controller', mock_run_controller
- ), patch(
- 'openhands.resolver.resolve_issue.complete_runtime', mock_complete_runtime
- ), patch('openhands.resolver.resolve_issue.logger'):
- # Call the function
- result = await process_issue(
- issue,
- base_commit,
- max_iterations,
- llm_config,
- mock_output_dir,
- runtime_container_image,
- mock_prompt_template,
- handler_instance,
- repo_instruction,
- reset_logger=False,
- )
- # Assert the result
- expected_issue_type = 'pr' if test_case.get('is_pr', False) else 'issue'
- assert handler_instance.issue_type == expected_issue_type
- assert isinstance(result, ResolverOutput)
- assert result.issue == issue
- assert result.base_commit == base_commit
- assert result.git_patch == 'test patch'
- assert result.success == test_case['expected_success']
- assert result.success_explanation == test_case['expected_explanation']
- assert result.error == test_case['expected_error']
- # Assert that the mocked functions were called
- mock_create_runtime.assert_called_once()
- mock_initialize_runtime.assert_called_once()
- mock_run_controller.assert_called_once()
- mock_complete_runtime.assert_called_once()
- # Assert that guess_success was called only for successful runs
- if test_case['expected_success']:
- handler_instance.guess_success.assert_called_once()
- else:
- handler_instance.guess_success.assert_not_called()
- def test_get_instruction(mock_prompt_template, mock_followup_prompt_template):
- issue = GithubIssue(
- owner='test_owner',
- repo='test_repo',
- number=123,
- title='Test Issue',
- body='This is a test issue refer to image ',
- )
- issue_handler = IssueHandler('owner', 'repo', 'token')
- instruction, images_urls = issue_handler.get_instruction(
- issue, mock_prompt_template, None
- )
- expected_instruction = 'Issue: Test Issue\n\nThis is a test issue refer to image \n\nPlease fix this issue.'
- assert images_urls == ['https://sampleimage.com/image1.png']
- assert issue_handler.issue_type == 'issue'
- assert instruction == expected_instruction
- issue = GithubIssue(
- owner='test_owner',
- repo='test_repo',
- number=123,
- title='Test Issue',
- body='This is a test issue',
- closing_issues=['Issue 1 fix the type'],
- review_threads=[
- ReviewThread(
- comment="There is still a typo 'pthon' instead of 'python'", files=[]
- )
- ],
- thread_comments=[
- "I've left review comments, please address them",
- 'This is a valid concern.',
- ],
- )
- pr_handler = PRHandler('owner', 'repo', 'token')
- instruction, images_urls = pr_handler.get_instruction(
- issue, mock_followup_prompt_template, None
- )
- expected_instruction = "Issue context: [\n \"Issue 1 fix the type\"\n]\n\nReview comments: None\n\nReview threads: [\n \"There is still a typo 'pthon' instead of 'python'\"\n]\n\nFiles: []\n\nThread comments: I've left review comments, please address them\n---\nThis is a valid concern.\n\nPlease fix this issue."
- assert images_urls == []
- assert pr_handler.issue_type == 'pr'
- assert instruction == expected_instruction
- def test_file_instruction():
- issue = GithubIssue(
- owner='test_owner',
- repo='test_repo',
- number=123,
- title='Test Issue',
- body='This is a test issue ',
- )
- # load prompt from openhands/resolver/prompts/resolve/basic.jinja
- with open('openhands/resolver/prompts/resolve/basic.jinja', 'r') as f:
- prompt = f.read()
- # Test without thread comments
- issue_handler = IssueHandler('owner', 'repo', 'token')
- instruction, images_urls = issue_handler.get_instruction(issue, prompt, None)
- expected_instruction = """Please fix the following issue for the repository in /workspace.
- An environment has been set up for you to start working. You may assume all necessary tools are installed.
- # Problem Statement
- Test Issue
- This is a test issue 
- IMPORTANT: You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.
- You SHOULD INCLUDE PROPER INDENTATION in your edit commands.
- When you think you have fixed the issue through code changes, please finish the interaction."""
- assert instruction == expected_instruction
- assert images_urls == ['https://sampleimage.com/sample.png']
- def test_file_instruction_with_repo_instruction():
- issue = GithubIssue(
- owner='test_owner',
- repo='test_repo',
- number=123,
- title='Test Issue',
- body='This is a test issue',
- )
- # load prompt from openhands/resolver/prompts/resolve/basic.jinja
- with open('openhands/resolver/prompts/resolve/basic.jinja', 'r') as f:
- prompt = f.read()
- # load repo instruction from openhands/resolver/prompts/repo_instructions/all-hands-ai___openhands-resolver.txt
- with open(
- 'openhands/resolver/prompts/repo_instructions/all-hands-ai___openhands-resolver.txt',
- 'r',
- ) as f:
- repo_instruction = f.read()
- issue_handler = IssueHandler('owner', 'repo', 'token')
- instruction, image_urls = issue_handler.get_instruction(
- issue, prompt, repo_instruction
- )
- expected_instruction = """Please fix the following issue for the repository in /workspace.
- An environment has been set up for you to start working. You may assume all necessary tools are installed.
- # Problem Statement
- Test Issue
- This is a test issue
- IMPORTANT: You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.
- You SHOULD INCLUDE PROPER INDENTATION in your edit commands.
- Some basic information about this repository:
- This is a Python repo for openhands-resolver, a library that attempts to resolve github issues with the AI agent OpenHands.
- - Setup: `poetry install --with test --with dev`
- - Testing: `poetry run pytest tests/test_*.py`
- When you think you have fixed the issue through code changes, please finish the interaction."""
- assert instruction == expected_instruction
- assert issue_handler.issue_type == 'issue'
- assert image_urls == []
- def test_guess_success():
- mock_issue = GithubIssue(
- owner='test_owner',
- repo='test_repo',
- number=1,
- title='Test Issue',
- body='This is a test issue',
- )
- mock_history = [
- create_cmd_output(
- exit_code=0, content='', command_id=1, command='cd /workspace'
- )
- ]
- mock_llm_config = LLMConfig(model='test_model', api_key='test_api_key')
- mock_completion_response = MagicMock()
- mock_completion_response.choices = [
- MagicMock(
- message=MagicMock(
- content='--- success\ntrue\n--- explanation\nIssue resolved successfully'
- )
- )
- ]
- issue_handler = IssueHandler('owner', 'repo', 'token')
- with patch('litellm.completion', MagicMock(return_value=mock_completion_response)):
- success, comment_success, explanation = issue_handler.guess_success(
- mock_issue, mock_history, mock_llm_config
- )
- assert issue_handler.issue_type == 'issue'
- assert comment_success is None
- assert success
- assert explanation == 'Issue resolved successfully'
- def test_guess_success_with_thread_comments():
- mock_issue = GithubIssue(
- owner='test_owner',
- repo='test_repo',
- number=1,
- title='Test Issue',
- body='This is a test issue',
- thread_comments=[
- 'First comment',
- 'Second comment',
- 'latest feedback:\nPlease add tests',
- ],
- )
- mock_history = [MagicMock(message='I have added tests for this case')]
- mock_llm_config = LLMConfig(model='test_model', api_key='test_api_key')
- mock_completion_response = MagicMock()
- mock_completion_response.choices = [
- MagicMock(
- message=MagicMock(
- content='--- success\ntrue\n--- explanation\nTests have been added to verify thread comments handling'
- )
- )
- ]
- issue_handler = IssueHandler('owner', 'repo', 'token')
- with patch('litellm.completion', MagicMock(return_value=mock_completion_response)):
- success, comment_success, explanation = issue_handler.guess_success(
- mock_issue, mock_history, mock_llm_config
- )
- assert issue_handler.issue_type == 'issue'
- assert comment_success is None
- assert success
- assert 'Tests have been added' in explanation
- def test_instruction_with_thread_comments():
- # Create an issue with thread comments
- issue = GithubIssue(
- owner='test_owner',
- repo='test_repo',
- number=123,
- title='Test Issue',
- body='This is a test issue',
- thread_comments=[
- 'First comment',
- 'Second comment',
- 'latest feedback:\nPlease add tests',
- ],
- )
- # Load the basic prompt template
- with open('openhands/resolver/prompts/resolve/basic.jinja', 'r') as f:
- prompt = f.read()
- issue_handler = IssueHandler('owner', 'repo', 'token')
- instruction, images_urls = issue_handler.get_instruction(issue, prompt, None)
- # Verify that thread comments are included in the instruction
- assert 'First comment' in instruction
- assert 'Second comment' in instruction
- assert 'Please add tests' in instruction
- assert 'Issue Thread Comments:' in instruction
- assert images_urls == []
- def test_guess_success_failure():
- mock_issue = GithubIssue(
- owner='test_owner',
- repo='test_repo',
- number=1,
- title='Test Issue',
- body='This is a test issue',
- thread_comments=[
- 'First comment',
- 'Second comment',
- 'latest feedback:\nPlease add tests',
- ],
- )
- mock_history = [MagicMock(message='I have added tests for this case')]
- mock_llm_config = LLMConfig(model='test_model', api_key='test_api_key')
- mock_completion_response = MagicMock()
- mock_completion_response.choices = [
- MagicMock(
- message=MagicMock(
- content='--- success\ntrue\n--- explanation\nTests have been added to verify thread comments handling'
- )
- )
- ]
- issue_handler = IssueHandler('owner', 'repo', 'token')
- with patch('litellm.completion', MagicMock(return_value=mock_completion_response)):
- success, comment_success, explanation = issue_handler.guess_success(
- mock_issue, mock_history, mock_llm_config
- )
- assert issue_handler.issue_type == 'issue'
- assert comment_success is None
- assert success
- assert 'Tests have been added' in explanation
- def test_guess_success_negative_case():
- mock_issue = GithubIssue(
- owner='test_owner',
- repo='test_repo',
- number=1,
- title='Test Issue',
- body='This is a test issue',
- )
- mock_history = [
- create_cmd_output(
- exit_code=0, content='', command_id=1, command='cd /workspace'
- )
- ]
- mock_llm_config = LLMConfig(model='test_model', api_key='test_api_key')
- mock_completion_response = MagicMock()
- mock_completion_response.choices = [
- MagicMock(
- message=MagicMock(
- content='--- success\nfalse\n--- explanation\nIssue not resolved'
- )
- )
- ]
- issue_handler = IssueHandler('owner', 'repo', 'token')
- with patch('litellm.completion', MagicMock(return_value=mock_completion_response)):
- success, comment_success, explanation = issue_handler.guess_success(
- mock_issue, mock_history, mock_llm_config
- )
- assert issue_handler.issue_type == 'issue'
- assert comment_success is None
- assert not success
- assert explanation == 'Issue not resolved'
- def test_guess_success_invalid_output():
- mock_issue = GithubIssue(
- owner='test_owner',
- repo='test_repo',
- number=1,
- title='Test Issue',
- body='This is a test issue',
- )
- mock_history = [
- create_cmd_output(
- exit_code=0, content='', command_id=1, command='cd /workspace'
- )
- ]
- mock_llm_config = LLMConfig(model='test_model', api_key='test_api_key')
- mock_completion_response = MagicMock()
- mock_completion_response.choices = [
- MagicMock(message=MagicMock(content='This is not a valid output'))
- ]
- issue_handler = IssueHandler('owner', 'repo', 'token')
- with patch('litellm.completion', MagicMock(return_value=mock_completion_response)):
- success, comment_success, explanation = issue_handler.guess_success(
- mock_issue, mock_history, mock_llm_config
- )
- assert issue_handler.issue_type == 'issue'
- assert comment_success is None
- assert not success
- assert (
- explanation
- == 'Failed to decode answer from LLM response: This is not a valid output'
- )
- def test_download_pr_with_review_comments():
- handler = PRHandler('owner', 'repo', 'token')
- mock_pr_response = MagicMock()
- mock_pr_response.json.side_effect = [
- [
- {
- 'number': 1,
- 'title': 'PR 1',
- 'body': 'This is a pull request',
- 'head': {'ref': 'b1'},
- },
- ],
- None,
- ]
- mock_pr_response.raise_for_status = MagicMock()
- # Mock for PR comments response
- mock_comments_response = MagicMock()
- mock_comments_response.json.return_value = [] # No PR comments
- mock_comments_response.raise_for_status = MagicMock()
- # Mock for GraphQL request with review comments but no threads
- mock_graphql_response = MagicMock()
- mock_graphql_response.json.side_effect = lambda: {
- 'data': {
- 'repository': {
- 'pullRequest': {
- 'closingIssuesReferences': {'edges': []},
- 'reviews': {
- 'nodes': [
- {'body': 'Please fix this typo'},
- {'body': 'Add more tests'},
- ]
- },
- }
- }
- }
- }
- mock_graphql_response.raise_for_status = MagicMock()
- def get_mock_response(url, *args, **kwargs):
- if '/comments' in url:
- return mock_comments_response
- return mock_pr_response
- with patch('requests.get', side_effect=get_mock_response):
- with patch('requests.post', return_value=mock_graphql_response):
- issues = handler.get_converted_issues(issue_numbers=[1])
- assert len(issues) == 1
- assert handler.issue_type == 'pr'
- assert isinstance(issues[0], GithubIssue)
- assert issues[0].number == 1
- assert issues[0].title == 'PR 1'
- assert issues[0].head_branch == 'b1'
- # Verify review comments are set but threads are empty
- assert len(issues[0].review_comments) == 2
- assert issues[0].review_comments[0] == 'Please fix this typo'
- assert issues[0].review_comments[1] == 'Add more tests'
- assert not issues[0].review_threads
- assert not issues[0].closing_issues
- assert not issues[0].thread_ids
- def test_download_issue_with_specific_comment():
- handler = IssueHandler('owner', 'repo', 'token')
- # Define the specific comment_id to filter
- specific_comment_id = 101
- # Mock issue and comment responses
- mock_issue_response = MagicMock()
- mock_issue_response.json.side_effect = [
- [
- {'number': 1, 'title': 'Issue 1', 'body': 'This is an issue'},
- ],
- None,
- ]
- mock_issue_response.raise_for_status = MagicMock()
- mock_comments_response = MagicMock()
- mock_comments_response.json.return_value = [
- {
- 'id': specific_comment_id,
- 'body': 'Specific comment body',
- 'issue_url': 'https://api.github.com/repos/owner/repo/issues/1',
- },
- {
- 'id': 102,
- 'body': 'Another comment body',
- 'issue_url': 'https://api.github.com/repos/owner/repo/issues/2',
- },
- ]
- mock_comments_response.raise_for_status = MagicMock()
- def get_mock_response(url, *args, **kwargs):
- if '/comments' in url:
- return mock_comments_response
- return mock_issue_response
- with patch('requests.get', side_effect=get_mock_response):
- issues = handler.get_converted_issues(
- issue_numbers=[1], comment_id=specific_comment_id
- )
- assert len(issues) == 1
- assert issues[0].number == 1
- assert issues[0].title == 'Issue 1'
- assert issues[0].thread_comments == ['Specific comment body']
- if __name__ == '__main__':
- pytest.main()
|