run_infer.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606
  1. import asyncio
  2. import json
  3. import os
  4. from collections import Counter
  5. from typing import Any
  6. import pandas as pd
  7. from commit0.harness.constants import SPLIT
  8. from datasets import load_dataset
  9. import openhands.agenthub
  10. from evaluation.utils.shared import (
  11. EvalException,
  12. EvalMetadata,
  13. EvalOutput,
  14. assert_and_raise,
  15. codeact_user_response,
  16. make_metadata,
  17. prepare_dataset,
  18. reset_logger_for_multiprocessing,
  19. run_evaluation,
  20. update_llm_config_for_completions_logging,
  21. )
  22. from openhands.controller.state.state import State
  23. from openhands.core.config import (
  24. AgentConfig,
  25. AppConfig,
  26. SandboxConfig,
  27. get_llm_config_arg,
  28. get_parser,
  29. )
  30. from openhands.core.logger import openhands_logger as logger
  31. from openhands.core.main import create_runtime, run_controller
  32. from openhands.events.action import CmdRunAction, MessageAction
  33. from openhands.events.observation import CmdOutputObservation, ErrorObservation
  34. from openhands.events.serialization.event import event_to_dict
  35. from openhands.runtime.base import Runtime
  36. from openhands.utils.async_utils import call_async_from_sync
  37. from openhands.utils.shutdown_listener import sleep_if_should_continue
  38. USE_HINT_TEXT = os.environ.get('USE_HINT_TEXT', 'false').lower() == 'true'
  39. USE_INSTANCE_IMAGE = os.environ.get('USE_INSTANCE_IMAGE', 'false').lower() == 'true'
  40. RUN_WITH_BROWSING = os.environ.get('RUN_WITH_BROWSING', 'false').lower() == 'true'
  41. AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = {
  42. 'CodeActAgent': codeact_user_response,
  43. 'CodeActCommit0Agent': codeact_user_response,
  44. }
  45. def _get_commit0_workspace_dir_name(instance: pd.Series) -> str:
  46. return instance['repo'].split('/')[1]
  47. def get_instruction(instance: pd.Series, metadata: EvalMetadata):
  48. workspace_dir_name = _get_commit0_workspace_dir_name(instance)
  49. # Prepare instruction
  50. test_cmd = instance['test']['test_cmd']
  51. test_dir = instance['test']['test_dir']
  52. # Instruction based on Anthropic's official trajectory
  53. # https://github.com/eschluntz/swe-bench-experiments/tree/main/evaluation/verified/20241022_tools_claude-3-5-sonnet-updated/trajs
  54. instruction = (
  55. '<uploaded_files>\n'
  56. f'/workspace/{workspace_dir_name}\n'
  57. '</uploaded_files>\n'
  58. f"I've uploaded a python code repository in the directory {workspace_dir_name}. Here is your task:\n\n"
  59. 'Here is your task:\n\n'
  60. ' You need to complete the implementations for all functions (i.e., those with pass\n'
  61. ' statements) and pass the unit tests.\n\n'
  62. ' Do not change the names of existing functions or classes, as they may be referenced\n'
  63. ' from other code like unit tests, etc.\n\n'
  64. ' When you generate code, you must maintain the original formatting of the function\n'
  65. ' stubs (such as whitespaces), otherwise we will not able to search/replace blocks\n'
  66. ' for code modifications, and therefore you will receive a score of 0 for your generated\n'
  67. ' code.'
  68. '\n\n'
  69. 'Here is the command to run the unit tests:\n'
  70. '<test_command>\n'
  71. f'{test_cmd} {test_dir}\n'
  72. '</test_command>\n\n'
  73. 'Make a local git commit for each agent step for all code changes. If there is not change in current step, do not make a commit.'
  74. )
  75. if RUN_WITH_BROWSING:
  76. instruction += (
  77. '<IMPORTANT!>\n'
  78. 'You SHOULD NEVER attempt to browse the web. '
  79. '</IMPORTANT!>\n'
  80. )
  81. return instruction
  82. # TODO: migrate all swe-bench docker to ghcr.io/openhands
  83. DOCKER_IMAGE_PREFIX = os.environ.get(
  84. 'EVAL_DOCKER_IMAGE_PREFIX', 'docker.io/wentingzhao/'
  85. )
  86. logger.info(f'Using docker image prefix: {DOCKER_IMAGE_PREFIX}')
  87. def get_instance_docker_image(repo_name: str) -> str:
  88. return (DOCKER_IMAGE_PREFIX.rstrip('/') + '/' + repo_name).lower() + ':v0'
  89. def get_config(
  90. instance: pd.Series,
  91. metadata: EvalMetadata,
  92. ) -> AppConfig:
  93. # COMMIT0_CONTAINER_IMAGE = 'wentingzhao/'
  94. assert USE_INSTANCE_IMAGE
  95. # We use a different instance image for the each instance of commit0 eval
  96. repo_name = instance['repo'].split('/')[1]
  97. base_container_image = get_instance_docker_image(repo_name)
  98. logger.info(
  99. f'Using instance container image: {base_container_image}. '
  100. f'Please make sure this image exists. '
  101. f'Submit an issue on https://github.com/All-Hands-AI/OpenHands if you run into any issues.'
  102. )
  103. # else:
  104. # raise
  105. # base_container_image = SWE_BENCH_CONTAINER_IMAGE
  106. # logger.info(f'Using swe-bench container image: {base_container_image}')
  107. config = AppConfig(
  108. default_agent=metadata.agent_class,
  109. run_as_openhands=False,
  110. max_iterations=metadata.max_iterations,
  111. runtime=os.environ.get('RUNTIME', 'eventstream'),
  112. sandbox=SandboxConfig(
  113. base_container_image=base_container_image,
  114. enable_auto_lint=True,
  115. use_host_network=False,
  116. # large enough timeout, since some testcases take very long to run
  117. timeout=300,
  118. api_key=os.environ.get('ALLHANDS_API_KEY', None),
  119. remote_runtime_api_url=os.environ.get('SANDBOX_REMOTE_RUNTIME_API_URL'),
  120. keep_runtime_alive=False,
  121. remote_runtime_init_timeout=3600,
  122. ),
  123. # do not mount workspace
  124. workspace_base=None,
  125. workspace_mount_path=None,
  126. )
  127. config.set_llm_config(
  128. update_llm_config_for_completions_logging(
  129. metadata.llm_config, metadata.eval_output_dir, instance['instance_id']
  130. )
  131. )
  132. agent_config = AgentConfig(
  133. codeact_enable_jupyter=False,
  134. codeact_enable_browsing=RUN_WITH_BROWSING,
  135. codeact_enable_llm_editor=False,
  136. )
  137. config.set_agent_config(agent_config)
  138. return config
  139. def initialize_runtime(
  140. runtime: Runtime,
  141. instance: pd.Series, # this argument is not required
  142. ):
  143. """Initialize the runtime for the agent.
  144. This function is called before the runtime is used to run the agent.
  145. """
  146. logger.info('-' * 30)
  147. logger.info('BEGIN Runtime Initialization Fn')
  148. logger.info('-' * 30)
  149. workspace_dir_name = _get_commit0_workspace_dir_name(instance)
  150. obs: CmdOutputObservation
  151. action = CmdRunAction(
  152. command=f'git clone -b commit0_combined https://github.com/{instance["repo"]}.git'
  153. )
  154. action.timeout = 600
  155. logger.info(action, extra={'msg_type': 'ACTION'})
  156. obs = runtime.run_action(action)
  157. logger.info(obs, extra={'msg_type': 'OBSERVATION'})
  158. assert_and_raise(
  159. obs.exit_code == 0,
  160. f'Failed to git clone -b commit0_combined https://github.com/{instance["repo"]}.git: {str(obs)}',
  161. )
  162. action = CmdRunAction(command=f'cd /workspace/{workspace_dir_name}')
  163. action.timeout = 600
  164. logger.info(action, extra={'msg_type': 'ACTION'})
  165. obs = runtime.run_action(action)
  166. logger.info(obs, extra={'msg_type': 'OBSERVATION'})
  167. assert_and_raise(
  168. obs.exit_code == 0,
  169. f'Failed to cd to /workspace/{workspace_dir_name}: {str(obs)}',
  170. )
  171. action = CmdRunAction(command='git checkout -b openhands')
  172. action.timeout = 600
  173. logger.info(action, extra={'msg_type': 'ACTION'})
  174. obs = runtime.run_action(action)
  175. logger.info(obs, extra={'msg_type': 'OBSERVATION'})
  176. assert_and_raise(
  177. obs.exit_code == 0, f'Failed to git checkout new branch openhands: {str(obs)}'
  178. )
  179. # Install commit0
  180. action = CmdRunAction(command='/root/.cargo/bin/uv pip install commit0')
  181. action.timeout = 600
  182. logger.info(action, extra={'msg_type': 'ACTION'})
  183. obs = runtime.run_action(action)
  184. # logger.info(obs, extra={'msg_type': 'OBSERVATION'})
  185. assert_and_raise(
  186. obs.exit_code == 0,
  187. f'Failed to install commit0: {str(obs)}',
  188. )
  189. logger.info('-' * 30)
  190. logger.info('END Runtime Initialization Fn')
  191. logger.info('-' * 30)
  192. def complete_runtime(
  193. runtime: Runtime,
  194. instance: pd.Series, # this argument is not required, but it is used to get the workspace_dir_name
  195. ) -> dict[str, Any]:
  196. """Complete the runtime for the agent.
  197. This function is called before the runtime is used to run the agent.
  198. If you need to do something in the sandbox to get the correctness metric after
  199. the agent has run, modify this function.
  200. """
  201. logger.info('-' * 30)
  202. logger.info('BEGIN Runtime Completion Fn')
  203. logger.info('-' * 30)
  204. obs: CmdOutputObservation
  205. workspace_dir_name = _get_commit0_workspace_dir_name(instance)
  206. action = CmdRunAction(command='git add .')
  207. action.timeout = 600
  208. logger.info(action, extra={'msg_type': 'ACTION'})
  209. obs = runtime.run_action(action)
  210. logger.info(obs, extra={'msg_type': 'OBSERVATION'})
  211. assert_and_raise(
  212. isinstance(obs, CmdOutputObservation) and obs.exit_code == 0,
  213. f'Failed to git add -A: {str(obs)}',
  214. )
  215. action = CmdRunAction(command='git commit -m "openhands edits"')
  216. action.timeout = 600
  217. logger.info(action, extra={'msg_type': 'ACTION'})
  218. obs = runtime.run_action(action)
  219. logger.info(obs, extra={'msg_type': 'OBSERVATION'})
  220. assert_and_raise(
  221. isinstance(obs, CmdOutputObservation)
  222. and (obs.exit_code == 0 or obs.exit_code == 1),
  223. f'Failed to git commit -m "openhands": {str(obs)}',
  224. )
  225. # Generate diff patch compared to base commit, excluding spec.pdf.bz2 files
  226. n_retries = 0
  227. git_patch = None
  228. while n_retries < 5:
  229. action = CmdRunAction(
  230. command=f"git diff {instance['base_commit']} HEAD -- . ':(exclude)spec.pdf.bz2'"
  231. )
  232. action.timeout = 600 + 100 * n_retries
  233. logger.info(action, extra={'msg_type': 'ACTION'})
  234. obs = runtime.run_action(action)
  235. # logger.info(obs, extra={'msg_type': 'OBSERVATION'})
  236. n_retries += 1
  237. if isinstance(obs, CmdOutputObservation):
  238. if obs.exit_code == 0:
  239. git_patch = obs.content.strip()
  240. break
  241. else:
  242. logger.info('Failed to get git diff, retrying...')
  243. sleep_if_should_continue(10)
  244. elif isinstance(obs, ErrorObservation):
  245. logger.error(f'Error occurred: {obs.content}. Retrying...')
  246. sleep_if_should_continue(10)
  247. else:
  248. assert_and_raise(False, f'Unexpected observation type: {str(obs)}')
  249. assert_and_raise(git_patch is not None, 'Failed to get git diff (None)')
  250. test_dir = instance['test']['test_dir']
  251. action = CmdRunAction(
  252. command=f"{instance['test']['test_cmd']} --json-report --json-report-file=report.json --continue-on-collection-errors {test_dir} > test_output.txt 2>&1"
  253. )
  254. action.timeout = 600
  255. logger.info(action, extra={'msg_type': 'ACTION'})
  256. obs = runtime.run_action(action)
  257. logger.info(obs, extra={'msg_type': 'OBSERVATION'})
  258. assert_and_raise(
  259. isinstance(obs, CmdOutputObservation),
  260. f'Failed to run test command: {str(obs)}',
  261. )
  262. # Read test output
  263. action = CmdRunAction(command='cat test_output.txt')
  264. action.timeout = 600
  265. logger.info(action, extra={'msg_type': 'ACTION'})
  266. obs = runtime.run_action(action)
  267. # logger.info(obs, extra={'msg_type': 'OBSERVATION'})
  268. assert_and_raise(
  269. isinstance(obs, CmdOutputObservation),
  270. f'Failed to read test output: {str(obs)}',
  271. )
  272. test_output = obs.content.strip()
  273. # logger.info(f'Test output: {test_output}')
  274. # Save pytest exit code
  275. action = CmdRunAction(command='echo $?')
  276. action.timeout = 600
  277. logger.info(action, extra={'msg_type': 'ACTION'})
  278. obs = runtime.run_action(action)
  279. # logger.info(obs, extra={'msg_type': 'OBSERVATION'})
  280. assert_and_raise(
  281. isinstance(obs, CmdOutputObservation) and obs.exit_code == 0,
  282. f'Failed to save pytest exit code: {str(obs)}',
  283. )
  284. pytest_exit_code = obs.content.strip()
  285. # logger.info(f'Pytest exit code: {pytest_exit_code}')
  286. # Read the test report
  287. action = CmdRunAction(command='cat report.json')
  288. action.timeout = 600
  289. logger.info(action, extra={'msg_type': 'ACTION'})
  290. obs = runtime.run_action(action)
  291. # logger.info(obs, extra={'msg_type': 'OBSERVATION'})
  292. assert_and_raise(
  293. isinstance(obs, CmdOutputObservation),
  294. f'Failed to read test report: {str(obs)}',
  295. )
  296. # Get test IDs from instance
  297. repo_name = instance['repo'].split('/')[1]
  298. repo_name = repo_name.replace('.', '-')
  299. action = CmdRunAction(command=f'commit0 get-tests {repo_name}')
  300. action.timeout = 600
  301. logger.info(action, extra={'msg_type': 'ACTION'})
  302. obs = runtime.run_action(action)
  303. # logger.info(obs, extra={'msg_type': 'OBSERVATION'})
  304. test_ids = obs.content.strip().split('\n')
  305. try:
  306. report = json.loads(obs.content)
  307. tests = {x['nodeid']: x['call'] for x in report['tests'] if 'call' in x}
  308. # Calculate test statistics
  309. status = []
  310. runtimes = []
  311. no_runs = 0
  312. for test_id in test_ids:
  313. if test_id in tests and tests[test_id] is not None:
  314. status.append(tests[test_id]['outcome'])
  315. runtimes.append(tests[test_id]['duration'])
  316. no_runs += 1
  317. else:
  318. status.append('failed')
  319. runtimes.append(0)
  320. status_counts = Counter(status)
  321. total_runtime = sum(runtimes) if no_runs > 0 else 0
  322. num_passed = status_counts.get('passed', 0) + status_counts.get('xfail', 0)
  323. passed_ratio = num_passed / len(status) if status else 0
  324. eval_result = {
  325. 'name': workspace_dir_name,
  326. 'sum': total_runtime,
  327. 'passed': passed_ratio,
  328. 'num_passed': num_passed,
  329. 'num_tests': len(test_ids),
  330. }
  331. except json.JSONDecodeError:
  332. logger.error('Failed to parse test report JSON')
  333. eval_result = {
  334. 'name': workspace_dir_name,
  335. 'sum': 0,
  336. 'passed': 0,
  337. 'num_passed': 0,
  338. 'num_tests': len(test_ids),
  339. }
  340. # Create tarball of workspace
  341. temp_zip = runtime.copy_from(f'/workspace/{workspace_dir_name}')
  342. commit0_dir = os.path.dirname(__file__)
  343. persistent_zip = os.path.join(commit0_dir, f'{workspace_dir_name}.zip')
  344. with open(temp_zip, 'rb') as src, open(persistent_zip, 'wb') as dst:
  345. dst.write(src.read())
  346. zip_file = persistent_zip
  347. return {
  348. 'eval_result': eval_result,
  349. 'git_patch': git_patch,
  350. 'test_output': test_output,
  351. 'pytest_exit_code': pytest_exit_code,
  352. 'zip_file': zip_file,
  353. }
  354. def process_instance(
  355. instance: pd.Series,
  356. metadata: EvalMetadata,
  357. reset_logger: bool = True,
  358. ) -> EvalOutput:
  359. config = get_config(instance, metadata)
  360. # Setup the logger properly, so you can run multi-processing to parallelize the evaluation
  361. if reset_logger:
  362. log_dir = os.path.join(metadata.eval_output_dir, 'infer_logs')
  363. reset_logger_for_multiprocessing(logger, instance.instance_id, log_dir)
  364. else:
  365. logger.info(f'Starting evaluation for instance {instance.instance_id}.')
  366. runtime = create_runtime(config)
  367. call_async_from_sync(runtime.connect)
  368. try:
  369. initialize_runtime(runtime, instance)
  370. instruction = get_instruction(instance, metadata)
  371. # Here's how you can run the agent (similar to the `main` function) and get the final task state
  372. state: State | None = asyncio.run(
  373. run_controller(
  374. config=config,
  375. initial_user_action=MessageAction(content=instruction),
  376. runtime=runtime,
  377. fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN[
  378. metadata.agent_class
  379. ],
  380. )
  381. )
  382. # if fatal error, throw EvalError to trigger re-run
  383. if (
  384. state.last_error
  385. and 'fatal error during agent execution' in state.last_error
  386. and 'stuck in a loop' not in state.last_error
  387. ):
  388. raise EvalException('Fatal error detected: ' + state.last_error)
  389. # ======= THIS IS Commit0 specific =======
  390. # Get git patch
  391. return_val = complete_runtime(runtime, instance)
  392. eval_result = return_val['eval_result']
  393. git_patch = return_val['git_patch']
  394. test_output = return_val['test_output']
  395. pytest_exit_code = return_val['pytest_exit_code']
  396. zip_file = return_val['zip_file']
  397. repo_name = instance['repo'].split('/')[1]
  398. zip_dest = os.path.join(
  399. metadata.eval_output_dir, 'repos', repo_name, f'{repo_name}.zip'
  400. )
  401. patch_file = os.path.join(
  402. metadata.eval_output_dir, 'repos', repo_name, f'{repo_name}_patch.diff'
  403. )
  404. test_output_file = os.path.join(
  405. metadata.eval_output_dir, 'repos', repo_name, f'{repo_name}_test_output.txt'
  406. )
  407. pytest_exit_code_file = os.path.join(
  408. metadata.eval_output_dir,
  409. 'repos',
  410. repo_name,
  411. f'{repo_name}_pytest_exit_code.txt',
  412. )
  413. os.makedirs(os.path.dirname(zip_dest), exist_ok=True)
  414. os.rename(zip_file, zip_dest)
  415. write_targets = [
  416. (patch_file, git_patch),
  417. (test_output_file, test_output),
  418. (pytest_exit_code_file, pytest_exit_code),
  419. ]
  420. for write_target in write_targets:
  421. with open(write_target[0], 'w') as f:
  422. f.write(write_target[1])
  423. logger.info(
  424. f'Got evaluation result for repo {instance.instance_id}:\n--------\n{eval_result}\n--------'
  425. )
  426. finally:
  427. runtime.close()
  428. # ==========================================
  429. # ======= Attempt to evaluate the agent's edits =======
  430. # we use eval_infer.sh to evaluate the agent's edits, not here
  431. # because the agent may alter the environment / testcases
  432. test_result = {
  433. 'eval_result': eval_result,
  434. }
  435. # If you are working on some simpler benchmark that only evaluates the final model output (e.g., in a MessageAction)
  436. # You can simply get the LAST `MessageAction` from the returned `state.history` and parse it for evaluation.
  437. if state is None:
  438. raise ValueError('State should not be None.')
  439. # NOTE: this is NO LONGER the event stream, but an agent history that includes delegate agent's events
  440. histories = [event_to_dict(event) for event in state.history]
  441. metrics = state.metrics.get() if state.metrics else None
  442. # Save the output
  443. output = EvalOutput(
  444. instance_id=instance.instance_id,
  445. instruction=instruction,
  446. instance=instance.to_dict(),
  447. test_result=test_result,
  448. metadata=metadata,
  449. history=histories,
  450. metrics=metrics,
  451. error=state.last_error if state and state.last_error else None,
  452. )
  453. return output
  454. def commit0_setup(dataset: pd.DataFrame, repo_split: str) -> pd.DataFrame:
  455. """Setup Commit0 dataset based on split type.
  456. Args:
  457. dataset: Full Commit0 dataset
  458. repo_split: Split type ('all', 'lite' or specific repo name)
  459. Returns:
  460. Filtered dataset based on split type
  461. """
  462. filtered_dataset = pd.concat(
  463. [
  464. dataset[dataset['repo'].str.split('/').str[1] == repo]
  465. for repo in SPLIT.get(repo_split, [])
  466. ]
  467. )
  468. # Drop setup column if it exists
  469. if 'setup' in filtered_dataset.columns:
  470. filtered_dataset = filtered_dataset.drop('setup', axis=1)
  471. # Replace all forward slashes in instance_id with hyphens
  472. filtered_dataset['instance_id'] = filtered_dataset['repo'].str.split('/').str[1]
  473. return filtered_dataset
  474. if __name__ == '__main__':
  475. parser = get_parser()
  476. parser.add_argument(
  477. '--dataset',
  478. type=str,
  479. default='wentingzhao/commit0_combined',
  480. help='dataset to evaluate on, only test split exists for this HF dataset',
  481. )
  482. parser.add_argument(
  483. '--split',
  484. type=str,
  485. default='test',
  486. help='this is the HF dataset split',
  487. )
  488. parser.add_argument(
  489. '--repo-split',
  490. type=str,
  491. default='lite',
  492. help='all, lite, or each repo name',
  493. )
  494. args, _ = parser.parse_known_args()
  495. # NOTE: It is preferable to load datasets from huggingface datasets and perform post-processing
  496. # so we don't need to manage file uploading to OpenHands's repo
  497. dataset = load_dataset(args.dataset, split=args.split)
  498. commit0_datasets = commit0_setup(dataset.to_pandas(), args.repo_split)
  499. logger.info(f'Loaded dataset {args.dataset} with reposplit {args.repo_split}')
  500. llm_config = None
  501. if args.llm_config:
  502. llm_config = get_llm_config_arg(args.llm_config)
  503. llm_config.log_completions = True
  504. if llm_config is None:
  505. raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')
  506. details = {}
  507. _agent_cls = openhands.agenthub.Agent.get_cls(args.agent_cls)
  508. dataset_descrption = (
  509. args.dataset.replace('/', '__') + '-' + args.repo_split.replace('/', '__')
  510. )
  511. metadata = make_metadata(
  512. llm_config,
  513. dataset_descrption,
  514. args.agent_cls,
  515. args.max_iterations,
  516. args.eval_note,
  517. args.eval_output_dir,
  518. details=details,
  519. )
  520. output_file = os.path.join(metadata.eval_output_dir, 'output.jsonl')
  521. instances = prepare_dataset(commit0_datasets, output_file, args.eval_n_limit)
  522. run_evaluation(
  523. instances,
  524. metadata,
  525. output_file,
  526. args.eval_num_workers,
  527. process_instance,
  528. timeout_seconds=120 * 60, # 2 hour PER instance should be more than enough
  529. )