run_infer.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515
  1. import asyncio
  2. import json
  3. import logging
  4. import multiprocessing as mp
  5. import os
  6. import pathlib
  7. import subprocess
  8. import time
  9. from concurrent.futures import ProcessPoolExecutor
  10. import pandas as pd
  11. import toml
  12. import whatthepatch
  13. from datasets import load_dataset
  14. from tqdm import tqdm
  15. import agenthub
  16. from evaluation.swe_bench.swe_env_box import SWEBenchSSHBox
  17. from opendevin.controller.state.state import State
  18. from opendevin.core.config import args, config, get_llm_config_arg
  19. from opendevin.core.logger import get_console_handler
  20. from opendevin.core.logger import opendevin_logger as logger
  21. from opendevin.core.main import main
  22. from opendevin.events.action import MessageAction
  23. from opendevin.events.serialization.event import event_to_dict
  24. USE_HINT_TEXT = os.environ.get('USE_HINT_TEXT', 'false') == 'true'
  25. def cleanup():
  26. print('Cleaning up child processes...')
  27. for process in mp.active_children():
  28. print(f'Terminating child process: {process.name}')
  29. process.terminate()
  30. process.join()
  31. def codeact_user_response(state: State) -> str:
  32. msg = (
  33. 'Please continue working on the task on whatever approach you think is suitable.\n'
  34. 'If you think you have modified the code in a way that fixes the issue, please run the following command: <execute_bash> exit </execute_bash>.\n'
  35. 'IMPORTANT: YOU SHOULD NEVER ASK FOR HUMAN HELP OR USE THE INTERNET TO SOLVE THIS TASK.\n'
  36. )
  37. if state.history:
  38. user_msgs = [
  39. action
  40. for action, _ in state.history
  41. if isinstance(action, MessageAction) and action.source == 'user'
  42. ]
  43. if len(user_msgs) >= 2:
  44. # let the agent know that it can give up when it has tried 3 times
  45. return (
  46. msg
  47. + 'If you want to give up, run: <execute_bash> exit </execute_bash>.\n'
  48. )
  49. return msg
  50. def monologue_user_response(state: State) -> str:
  51. raise NotImplementedError('MonologueAgent should never ask for user responses.')
  52. AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = {
  53. 'CodeActAgent': codeact_user_response,
  54. 'CodeActSWEAgent': codeact_user_response,
  55. 'MonologueAgent': monologue_user_response,
  56. }
  57. AGENT_CLS_TO_INST_SUFFIX = {
  58. 'CodeActAgent': 'When you think you have fixed the issue through code changes, please run the following command: <execute_bash> exit </execute_bash>.\n',
  59. 'CodeActSWEAgent': 'When you think you have fixed the issue through code changes, please run the following command: <execute_bash> exit </execute_bash>.\n',
  60. }
  61. def get_test_result(instance, sandbox, workspace_dir_name):
  62. test_result = {'result': {}, 'metadata': {}}
  63. # NOTE: if you need to do something in the sandbox to get the correctness metric, modify this function
  64. try:
  65. test_patch_parsed = whatthepatch.parse_patch(instance.test_patch)
  66. # get a list of filepaths that are involved in the patch
  67. involved_filepaths = set()
  68. for patch in test_patch_parsed:
  69. involved_filepaths.add(patch.header.old_path.removeprefix('a/'))
  70. involved_filepaths.add(patch.header.new_path.removeprefix('b/'))
  71. involved_filepaths = list(involved_filepaths)
  72. test_result['metadata']['1_test_patch_parse_success'] = True
  73. test_result['metadata']['1_test_involved_filepaths'] = involved_filepaths
  74. except Exception as e:
  75. logger.error(
  76. f'Error parsing test patch for instance {instance.instance_id}: {e}'
  77. )
  78. test_result['metadata']['1_test_patch_parse_success'] = False
  79. test_result['metadata']['1_test_patch_parse_error'] = str(e)
  80. test_result['metadata']['1_test_involved_filepaths'] = None
  81. involved_filepaths = []
  82. # Try to revert the changes for involved filepaths
  83. err_code, output = sandbox.execute(f'cd /workspace/{workspace_dir_name}')
  84. test_result['metadata']['2_revert_test_involved_filepaths_success'] = []
  85. for filepath in involved_filepaths:
  86. err_code, output = sandbox.execute(
  87. f'git checkout {instance["base_commit"]} -- {filepath}'
  88. )
  89. if err_code != 0:
  90. logger.error(f'Error reverting changes for {filepath}: {output}')
  91. test_result['metadata']['2_revert_test_involved_filepaths_success'].append(
  92. False
  93. )
  94. else:
  95. test_result['metadata']['2_revert_test_involved_filepaths_success'].append(
  96. True
  97. )
  98. # Apply the testcase
  99. err_code, output = sandbox.execute('git apply $SWE_TASK_DIR/test.patch')
  100. if err_code != 0:
  101. logger.error(f'Error applying test patch: {output}')
  102. test_result['metadata']['3_apply_test_patch_success'] = False
  103. test_result['metadata']['3_apply_test_patch_error'] = output
  104. else:
  105. test_result['metadata']['3_apply_test_patch_success'] = True
  106. # Run the test command
  107. err_code, output = sandbox.execute(
  108. '$TEST_CMD > /workspace/$SWE_INSTANCE_ID.log 2>&1'
  109. )
  110. if err_code != 0:
  111. logger.error(f'Error running test command: {output}')
  112. test_result['metadata']['4_run_test_command_success'] = False
  113. test_result['metadata']['4_run_test_command_error'] = output
  114. else:
  115. test_result['metadata']['4_run_test_command_success'] = True
  116. # Get the test output
  117. err_code, output = sandbox.execute('cat /workspace/$SWE_INSTANCE_ID.log')
  118. if err_code != 0:
  119. logger.error(f'Error getting test output: {output}')
  120. test_result['metadata']['4_get_test_output_success'] = False
  121. test_result['metadata']['4_get_test_output_error'] = output
  122. else:
  123. test_result['metadata']['4_get_test_output_success'] = True
  124. test_result['test_output'] = output
  125. # Reformat instance.json
  126. # $SWE_TASK_DIR/instance.json is a dict {"XXX": "YYY"}, add a [ before and a ] after
  127. err_code, output = sandbox.execute(
  128. (
  129. 'cat $SWE_TASK_DIR/instance.json | sed "s/^{/[{/" | sed "s/}$/}]/" > /workspace/instance.json'
  130. )
  131. )
  132. if err_code != 0:
  133. logger.error(f'Error creating instance.json: {output}')
  134. test_result['metadata']['5_reformat_instance_json_success'] = False
  135. test_result['metadata']['5_reformat_instance_json_error'] = output
  136. else:
  137. test_result['metadata']['5_reformat_instance_json_success'] = True
  138. # Get the instance report
  139. err_code, output = sandbox.execute(
  140. (
  141. 'cd /swe_util/OD-SWE-bench '
  142. '&& export PYTHONPATH=$(pwd):$PYTHONPATH '
  143. '&& conda run -n swe-bench-eval python swebench/metrics/get_instance_report.py --swe_bench_task /workspace/instance.json --log_path /workspace/$SWE_INSTANCE_ID.log'
  144. )
  145. )
  146. if err_code != 0:
  147. logger.error(f'Error getting instance report: {output}')
  148. test_result['metadata']['6_get_instance_report_success'] = False
  149. test_result['metadata']['6_get_instance_report_error'] = output
  150. else:
  151. test_result['metadata']['6_get_instance_report_success'] = True
  152. test_result['result_raw'] = output
  153. # try to parse output
  154. for line in output.strip().split('\n'):
  155. line = line.strip('-')
  156. try:
  157. key, value = line.split(':')
  158. except ValueError:
  159. # skip this line
  160. print(f'Error parsing result line: {line}')
  161. continue
  162. value = value.strip()
  163. try:
  164. value = int(value)
  165. except ValueError:
  166. pass
  167. test_result['result'][key.strip()] = value
  168. return test_result
  169. def process_instance(
  170. instance: dict,
  171. agent_class: str,
  172. metadata: dict,
  173. skip_workspace_mount: bool,
  174. eval_output_dir: str,
  175. reset_logger: bool = True,
  176. ):
  177. workspace_mount_path = os.path.join(config.workspace_mount_path, '_eval_workspace')
  178. # create process-specific workspace dir
  179. # if `not skip_workspace_mount` - we will create a workspace directory for EACH process
  180. # so that different agent don't interfere with each other.
  181. if not skip_workspace_mount:
  182. workspace_mount_path = os.path.join(workspace_mount_path, str(os.getpid()))
  183. pathlib.Path(workspace_mount_path).mkdir(parents=True, exist_ok=True)
  184. # Setup the logger properly, so you can run multi-processing to parallelize the evaluation
  185. if reset_logger:
  186. # Set up logger
  187. log_file = os.path.join(
  188. eval_output_dir, 'logs', f'instance_{instance.instance_id}.log'
  189. )
  190. # Remove all existing handlers from logger
  191. for handler in logger.handlers[:]:
  192. logger.removeHandler(handler)
  193. # add back the console handler to print ONE line
  194. logger.addHandler(get_console_handler())
  195. logger.info(
  196. f'Starting evaluation for instance {instance.instance_id}.\nHint: run "tail -f {log_file}" to see live logs in a separate shell'
  197. )
  198. # Remove all existing handlers from logger
  199. for handler in logger.handlers[:]:
  200. logger.removeHandler(handler)
  201. file_handler = logging.FileHandler(log_file)
  202. file_handler.setFormatter(
  203. logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
  204. )
  205. logger.addHandler(file_handler)
  206. else:
  207. logger.info(f'Starting evaluation for instance {instance.instance_id}.')
  208. if not skip_workspace_mount:
  209. logger.info(f'Process-specific workspace mounted at {workspace_mount_path}')
  210. # NOTE: this is something special we do for SWE-Bench due to the reason described in the previous section
  211. # You can omit this if you don't need to setup specialized sandbox
  212. workspace_dir_name = f'{instance.repo}__{instance.version}'.replace('/', '__')
  213. sandbox = SWEBenchSSHBox.get_box_for_instance(
  214. instance,
  215. workspace_dir_name,
  216. skip_workspace_mount=skip_workspace_mount,
  217. workspace_mount_path=workspace_mount_path,
  218. sandbox_plugins=agenthub.Agent.get_cls(agent_class).sandbox_plugins,
  219. )
  220. # Prepare instruction
  221. if agent_class == 'CodeActSWEAgent':
  222. instruction = (
  223. 'We are currently solving the following issue within our repository. Here is the issue text:\n'
  224. '--- BEGIN ISSUE ---\n'
  225. f'{instance.problem_statement}\n'
  226. '--- END ISSUE ---\n\n'
  227. )
  228. if USE_HINT_TEXT and instance.hints_text:
  229. instruction += (
  230. f'--- BEGIN HINTS ---\n{instance.hints_text}\n--- END HINTS ---\n'
  231. )
  232. instruction += f"""Now, you're going to solve this issue on your own. Your terminal session has started and you're in the repository's root directory. You can use any bash commands or the special interface to help you. Edit all the files you need to and run any checks or tests that you want.
  233. Remember, YOU CAN ONLY ENTER ONE COMMAND AT A TIME. You should always wait for feedback after every command.
  234. When you're satisfied with all of the changes you've made, you can run the following command: <execute_bash> exit </execute_bash>.
  235. Note however that you cannot use any interactive session commands (e.g. vim) in this environment, but you can write scripts and run them. E.g. you can write a python script and then run it with `python <script_name>.py`.
  236. NOTE ABOUT THE EDIT COMMAND: Indentation really matters! When editing a file, make sure to insert appropriate indentation before each line!
  237. IMPORTANT TIPS:
  238. 1. Always start by trying to replicate the bug that the issues discusses.
  239. If the issue includes code for reproducing the bug, we recommend that you re-implement that in your environment, and run it to make sure you can reproduce the bug.
  240. Then start trying to fix it.
  241. When you think you've fixed the bug, re-run the bug reproduction script to make sure that the bug has indeed been fixed.
  242. If the bug reproduction script does not print anything when it successfully runs, we recommend adding a print("Script completed successfully, no errors.") command at the end of the file,
  243. so that you can be sure that the script indeed ran fine all the way through.
  244. 2. If you run a command and it doesn't work, try running a different command. A command that did not work once will not work the second time unless you modify it!
  245. 3. If you open a file and need to get to an area around a specific line that is not in the first 100 lines, say line 583, don't just use the scroll_down command multiple times. Instead, use the goto 583 command. It's much quicker.
  246. 4. If the bug reproduction script requires inputting/reading a specific file, such as buggy-input.png, and you'd like to understand how to input that file, conduct a search in the existing repo code, to see whether someone else has already done that. Do this by running the command: find_file("buggy-input.png") If that doesn't work, use the linux 'find' command.
  247. 5. Always make sure to look at the currently open file and the current working directory (which appears right after the currently open file). The currently open file might be in a different directory than the working directory! Note that some commands, such as 'create', open files, so they might change the current open file.
  248. 6. When editing files, it is easy to accidentally specify a wrong line number or to write code with incorrect indentation. Always check the code after you issue an edit to make sure that it reflects what you wanted to accomplish. If it didn't, issue another command to fix it.
  249. [Current directory: /workspace/{workspace_dir_name}]
  250. """
  251. else:
  252. # Testing general agents
  253. instruction = (
  254. f'Please fix the following issue for the repository in /workspace/{workspace_dir_name}.\n'
  255. 'Environment has been set up for you to start working. You may assume all necessary tools are installed.\n\n'
  256. '# Problem Statement\n'
  257. f'{instance.problem_statement}\n\n'
  258. )
  259. if USE_HINT_TEXT and instance.hints_text:
  260. instruction += f'# Hints\n{instance.hints_text}\n\n'
  261. instruction += (
  262. 'IMPORTANT: You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.\n'
  263. 'You should NOT modify any existing test case files. If needed, you can add new test cases in a NEW file to reproduce the issue.\n'
  264. 'You SHOULD INCLUDE PROPER INDENTATION in your edit commands.\n'
  265. )
  266. # NOTE: You can actually set slightly different instruction for different agents
  267. instruction += AGENT_CLS_TO_INST_SUFFIX.get(agent_class, '')
  268. # Here's how you can run the agent (similar to the `main` function) and get the final task state
  269. state: State = asyncio.run(
  270. main(
  271. instruction,
  272. fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN.get(agent_class),
  273. sandbox=sandbox,
  274. )
  275. )
  276. # ======= THIS IS SWE-Bench specific =======
  277. # Get git patch
  278. git_patch = sandbox.get_diff_patch()
  279. logger.info(f'Got git diff for instance {instance.instance_id}')
  280. # ==========================================
  281. # ======= Attempt to evaluate the agent's edits =======
  282. # TODO: if you need to do something in the sandbox to get the correctness metric, modify this function
  283. test_result = get_test_result(instance, sandbox, workspace_dir_name)
  284. # If you are working on some simpler benchmark that only evaluates the final model output (e.g., in a MessageAction)
  285. # You can simply get the LAST `MessageAction` from the returned `state.history` and parse it for evaluation.
  286. if state is None:
  287. raise ValueError('State should not be None.')
  288. metrics = state.metrics.get() if state.metrics else None
  289. # Save the output
  290. output = {
  291. 'instance_id': instance.instance_id,
  292. 'swe_instance': instance.to_dict(), # SWE Bench specific
  293. 'instruction': instruction,
  294. 'git_patch': git_patch, # SWE Bench specific
  295. 'metadata': metadata,
  296. 'history': [
  297. (event_to_dict(action), event_to_dict(obs)) for action, obs in state.history
  298. ],
  299. 'metrics': metrics,
  300. 'error': state.error if state and state.error else None,
  301. 'test_result': test_result,
  302. }
  303. # Close the sandbox
  304. sandbox.close()
  305. return output
  306. def filter_dataset(dataset: pd.DataFrame, filter_column: str) -> pd.DataFrame:
  307. file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config.toml')
  308. if os.path.exists(file_path):
  309. with open(file_path, 'r') as file:
  310. data = toml.load(file)
  311. if 'selected_ids' in data:
  312. selected_ids = data['selected_ids']
  313. logger.info(
  314. f'Filtering {len(selected_ids)} tasks from "selected_ids"...'
  315. )
  316. subset = dataset[dataset[filter_column].isin(selected_ids)]
  317. logger.info(f'Retained {subset.shape[0]} tasks after filtering')
  318. return subset
  319. return dataset
  320. if __name__ == '__main__':
  321. # NOTE: It is preferable to load datasets from huggingface datasets and perform post-processing
  322. # so we don't need to manage file uploading to OpenDevin's repo
  323. dataset = load_dataset('princeton-nlp/SWE-bench_Lite')
  324. swe_bench_tests = filter_dataset(dataset['test'].to_pandas(), 'instance_id')
  325. # Check https://github.com/OpenDevin/OpenDevin/blob/main/evaluation/swe_bench/README.md#configure-opendevin-and-your-llm
  326. # for details of how to set `llm_config`
  327. if args.llm_config:
  328. specified_llm_config = get_llm_config_arg(args.llm_config)
  329. if specified_llm_config:
  330. config.llm = specified_llm_config
  331. logger.info(f'Config for evaluation: {config}')
  332. # TEST METADATA
  333. agent_class = args.agent_cls
  334. assert (
  335. agent_class in AGENT_CLS_TO_FAKE_USER_RESPONSE_FN
  336. ), f'Unsupported agent class: {agent_class}'
  337. model_name = config.llm.model.split('/')[-1]
  338. max_iterations = args.max_iterations
  339. eval_note = ''
  340. if args.eval_note is not None:
  341. eval_note += '_N_' + args.eval_note
  342. eval_output_dir = os.path.join(
  343. args.eval_output_dir,
  344. 'swe_bench_lite',
  345. agent_class,
  346. model_name + '_maxiter_' + str(max_iterations) + eval_note,
  347. )
  348. pathlib.Path(eval_output_dir).mkdir(parents=True, exist_ok=True)
  349. pathlib.Path(os.path.join(eval_output_dir, 'logs')).mkdir(
  350. parents=True, exist_ok=True
  351. )
  352. logger.info(f'Using evaluation output directory: {eval_output_dir}')
  353. metadata = {
  354. 'agent_class': agent_class,
  355. 'model_name': model_name,
  356. 'max_iterations': max_iterations,
  357. 'eval_output_dir': eval_output_dir,
  358. 'start_time': time.strftime('%Y-%m-%d %H:%M:%S'),
  359. # get the commit id of current repo for reproducibility
  360. 'git_commit': subprocess.check_output(['git', 'rev-parse', 'HEAD'])
  361. .decode('utf-8')
  362. .strip(),
  363. }
  364. _agent_cls = agenthub.Agent.get_cls(agent_class)
  365. if hasattr(_agent_cls, 'system_message'):
  366. metadata['system_message'] = _agent_cls.system_message
  367. if hasattr(_agent_cls, 'in_context_example'):
  368. metadata['in_context_example'] = _agent_cls.in_context_example
  369. logger.info(f'Metadata: {metadata}')
  370. with open(os.path.join(eval_output_dir, 'metadata.json'), 'w') as f:
  371. json.dump(metadata, f)
  372. # LIMIT EVALUATION
  373. eval_n_limit = args.eval_n_limit
  374. if eval_n_limit:
  375. swe_bench_tests = swe_bench_tests.head(eval_n_limit)
  376. logger.info(f'Limiting evaluation to first {eval_n_limit} instances.')
  377. # OUTPUT FILE
  378. output_file = os.path.join(eval_output_dir, 'output.jsonl')
  379. logger.info(f'Writing evaluation output to {output_file}')
  380. finished_instance_ids = set()
  381. if os.path.exists(output_file):
  382. with open(output_file, 'r') as f:
  383. for line in f:
  384. data = json.loads(line)
  385. finished_instance_ids.add(data['instance_id'])
  386. logger.warning(
  387. f'Output file {output_file} already exists. Loaded {len(finished_instance_ids)} finished instances.'
  388. )
  389. output_fp = open(output_file, 'a')
  390. logger.info(
  391. f'Evaluation started with Agent {agent_class}, model {model_name}, max iterations {max_iterations}.'
  392. )
  393. # =============================================
  394. # filter out finished instances
  395. new_swe_bench_tests = []
  396. for idx, instance in swe_bench_tests.iterrows():
  397. if instance.instance_id in finished_instance_ids:
  398. logger.info(
  399. f'Skipping instance {instance.instance_id} as it is already finished.'
  400. )
  401. continue
  402. new_swe_bench_tests.append(instance)
  403. swe_bench_tests = pd.DataFrame(new_swe_bench_tests)
  404. logger.info(
  405. f'Finished instances: {len(finished_instance_ids)}, Remaining instances: {len(swe_bench_tests)}'
  406. )
  407. # =============================================
  408. pbar = tqdm(total=len(swe_bench_tests))
  409. # This function tracks the progress AND write the output to a JSONL file
  410. def update_progress(future):
  411. pbar.update(1)
  412. output = future.result()
  413. pbar.set_description(f'Instance {output["instance_id"][:10]}')
  414. pbar.set_postfix_str(f'Test Result: {output["test_result"]["result"]}')
  415. logger.info(
  416. f'Finished evaluation for instance {output["instance_id"]}: {output["test_result"]["result"]}'
  417. )
  418. output_fp.write(json.dumps(output) + '\n')
  419. output_fp.flush()
  420. # This sets the multi-processing
  421. num_workers = args.eval_num_workers
  422. logger.info(f'Using {num_workers} workers for evaluation.')
  423. # This is SWE-Bench specific - CodeActAgent doesn't require mounted workspace to work
  424. skip_workspace_mount = agent_class == 'CodeActAgent'
  425. logger.info(f'Skipping workspace mount: {skip_workspace_mount}')
  426. try:
  427. with ProcessPoolExecutor(num_workers) as executor:
  428. futures = []
  429. # This is how we perform multi-processing
  430. for row_idx, instance in swe_bench_tests.iterrows():
  431. future = executor.submit(
  432. process_instance,
  433. instance,
  434. agent_class,
  435. metadata,
  436. skip_workspace_mount,
  437. eval_output_dir,
  438. reset_logger=bool(num_workers > 1),
  439. )
  440. future.add_done_callback(update_progress)
  441. futures.append(future)
  442. # Wait for all futures to complete
  443. for future in futures:
  444. future.result()
  445. except KeyboardInterrupt:
  446. print('KeyboardInterrupt received. Cleaning up...')
  447. cleanup()
  448. output_fp.close()
  449. logger.info('Evaluation finished.')