run_infer.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543
  1. import asyncio
  2. import json
  3. import os
  4. import tempfile
  5. from typing import Any
  6. import pandas as pd
  7. import toml
  8. from datasets import load_dataset
  9. import openhands.agenthub
  10. from evaluation.swe_bench.prompt import CODEACT_SWE_PROMPT
  11. from evaluation.utils.shared import (
  12. EvalException,
  13. EvalMetadata,
  14. EvalOutput,
  15. assert_and_raise,
  16. codeact_user_response,
  17. make_metadata,
  18. prepare_dataset,
  19. reset_logger_for_multiprocessing,
  20. run_evaluation,
  21. update_llm_config_for_completions_logging,
  22. )
  23. from openhands.controller.state.state import State
  24. from openhands.core.config import (
  25. AgentConfig,
  26. AppConfig,
  27. SandboxConfig,
  28. get_llm_config_arg,
  29. get_parser,
  30. )
  31. from openhands.core.logger import openhands_logger as logger
  32. from openhands.core.main import create_runtime, run_controller
  33. from openhands.events.action import CmdRunAction, MessageAction
  34. from openhands.events.observation import CmdOutputObservation, ErrorObservation
  35. from openhands.events.serialization.event import event_to_dict
  36. from openhands.runtime.base import Runtime
  37. from openhands.utils.async_utils import call_async_from_sync
  38. from openhands.utils.shutdown_listener import sleep_if_should_continue
  39. USE_HINT_TEXT = os.environ.get('USE_HINT_TEXT', 'false').lower() == 'true'
  40. USE_INSTANCE_IMAGE = os.environ.get('USE_INSTANCE_IMAGE', 'false').lower() == 'true'
  41. RUN_WITH_BROWSING = os.environ.get('RUN_WITH_BROWSING', 'false').lower() == 'true'
  42. AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = {
  43. 'CodeActAgent': codeact_user_response,
  44. 'CodeActSWEAgent': codeact_user_response,
  45. }
  46. def _get_swebench_workspace_dir_name(instance: pd.Series) -> str:
  47. return f'{instance.repo}__{instance.version}'.replace('/', '__')
  48. def get_instruction(instance: pd.Series, metadata: EvalMetadata):
  49. workspace_dir_name = _get_swebench_workspace_dir_name(instance)
  50. # Prepare instruction
  51. if metadata.agent_class == 'CodeActSWEAgent':
  52. instruction = (
  53. 'We are currently solving the following issue within our repository. Here is the issue text:\n'
  54. '--- BEGIN ISSUE ---\n'
  55. f'{instance.problem_statement}\n'
  56. '--- END ISSUE ---\n\n'
  57. )
  58. if USE_HINT_TEXT and instance.hints_text:
  59. instruction += (
  60. f'--- BEGIN HINTS ---\n{instance.hints_text}\n--- END HINTS ---\n'
  61. )
  62. instruction += CODEACT_SWE_PROMPT.format(workspace_dir_name=workspace_dir_name)
  63. else:
  64. # Instruction based on Anthropic's official trajectory
  65. # https://github.com/eschluntz/swe-bench-experiments/tree/main/evaluation/verified/20241022_tools_claude-3-5-sonnet-updated/trajs
  66. instruction = (
  67. '<uploaded_files>\n'
  68. f'/workspace/{workspace_dir_name}\n'
  69. '</uploaded_files>\n'
  70. f"I've uploaded a python code repository in the directory {workspace_dir_name}. Consider the following PR description:\n\n"
  71. f'<pr_description>\n'
  72. f'{instance.problem_statement}\n'
  73. '</pr_description>\n\n'
  74. 'Can you help me implement the necessary changes to the repository so that the requirements specified in the <pr_description> are met?\n'
  75. "I've already taken care of all changes to any of the test files described in the <pr_description>. This means you DON'T have to modify the testing logic or any of the tests in any way!\n"
  76. 'Your task is to make the minimal changes to non-tests files in the /workspace directory to ensure the <pr_description> is satisfied.\n'
  77. 'Follow these steps to resolve the issue:\n'
  78. '1. As a first step, it might be a good idea to explore the repo to familiarize yourself with its structure.\n'
  79. '2. Create a script to reproduce the error and execute it with `python <filename.py>` using the BashTool, to confirm the error\n'
  80. '3. Edit the sourcecode of the repo to resolve the issue\n'
  81. '4. Rerun your reproduce script and confirm that the error is fixed!\n'
  82. '5. Think about edgecases and make sure your fix handles them as well\n'
  83. "Your thinking should be thorough and so it's fine if it's very long.\n"
  84. )
  85. if RUN_WITH_BROWSING:
  86. instruction += (
  87. '<IMPORTANT!>\n'
  88. 'You SHOULD NEVER attempt to browse the web. '
  89. '</IMPORTANT!>\n'
  90. )
  91. return instruction
  92. # TODO: migrate all swe-bench docker to ghcr.io/openhands
  93. DOCKER_IMAGE_PREFIX = os.environ.get('EVAL_DOCKER_IMAGE_PREFIX', 'docker.io/xingyaoww/')
  94. logger.info(f'Using docker image prefix: {DOCKER_IMAGE_PREFIX}')
  95. def get_instance_docker_image(instance_id: str) -> str:
  96. image_name = 'sweb.eval.x86_64.' + instance_id
  97. image_name = image_name.replace(
  98. '__', '_s_'
  99. ) # to comply with docker image naming convention
  100. return (DOCKER_IMAGE_PREFIX.rstrip('/') + '/' + image_name).lower()
  101. def get_config(
  102. instance: pd.Series,
  103. metadata: EvalMetadata,
  104. ) -> AppConfig:
  105. SWE_BENCH_CONTAINER_IMAGE = 'ghcr.io/opendevin/eval-swe-bench:full-v1.2.1'
  106. if USE_INSTANCE_IMAGE:
  107. # We use a different instance image for the each instance of swe-bench eval
  108. base_container_image = get_instance_docker_image(instance['instance_id'])
  109. logger.info(
  110. f'Using instance container image: {base_container_image}. '
  111. f'Please make sure this image exists. '
  112. f'Submit an issue on https://github.com/All-Hands-AI/OpenHands if you run into any issues.'
  113. )
  114. else:
  115. base_container_image = SWE_BENCH_CONTAINER_IMAGE
  116. logger.info(f'Using swe-bench container image: {base_container_image}')
  117. config = AppConfig(
  118. default_agent=metadata.agent_class,
  119. run_as_openhands=False,
  120. max_iterations=metadata.max_iterations,
  121. runtime=os.environ.get('RUNTIME', 'eventstream'),
  122. sandbox=SandboxConfig(
  123. base_container_image=base_container_image,
  124. enable_auto_lint=True,
  125. use_host_network=False,
  126. # large enough timeout, since some testcases take very long to run
  127. timeout=300,
  128. # Add platform to the sandbox config to solve issue 4401
  129. platform='linux/amd64',
  130. api_key=os.environ.get('ALLHANDS_API_KEY', None),
  131. remote_runtime_api_url=os.environ.get('SANDBOX_REMOTE_RUNTIME_API_URL'),
  132. keep_remote_runtime_alive=False,
  133. remote_runtime_init_timeout=3600,
  134. ),
  135. # do not mount workspace
  136. workspace_base=None,
  137. workspace_mount_path=None,
  138. )
  139. config.set_llm_config(
  140. update_llm_config_for_completions_logging(
  141. metadata.llm_config, metadata.eval_output_dir, instance['instance_id']
  142. )
  143. )
  144. agent_config = AgentConfig(
  145. codeact_enable_jupyter=False,
  146. codeact_enable_browsing=RUN_WITH_BROWSING,
  147. codeact_enable_llm_editor=False,
  148. )
  149. config.set_agent_config(agent_config)
  150. return config
  151. def initialize_runtime(
  152. runtime: Runtime,
  153. instance: pd.Series, # this argument is not required
  154. ):
  155. """Initialize the runtime for the agent.
  156. This function is called before the runtime is used to run the agent.
  157. """
  158. logger.info('-' * 30)
  159. logger.info('BEGIN Runtime Initialization Fn')
  160. logger.info('-' * 30)
  161. workspace_dir_name = _get_swebench_workspace_dir_name(instance)
  162. obs: CmdOutputObservation
  163. # Set instance id
  164. action = CmdRunAction(
  165. command=f"""echo 'export SWE_INSTANCE_ID={instance['instance_id']}' >> ~/.bashrc && echo 'export PIP_CACHE_DIR=~/.cache/pip' >> ~/.bashrc && echo "alias git='git --no-pager'" >> ~/.bashrc"""
  166. )
  167. action.timeout = 600
  168. logger.info(action, extra={'msg_type': 'ACTION'})
  169. obs = runtime.run_action(action)
  170. logger.info(obs, extra={'msg_type': 'OBSERVATION'})
  171. assert_and_raise(
  172. obs.exit_code == 0, f'Failed to export SWE_INSTANCE_ID: {str(obs)}'
  173. )
  174. action = CmdRunAction(command="""export USER=$(whoami); echo USER=${USER} """)
  175. action.timeout = 600
  176. logger.info(action, extra={'msg_type': 'ACTION'})
  177. obs = runtime.run_action(action)
  178. logger.info(obs, extra={'msg_type': 'OBSERVATION'})
  179. assert_and_raise(obs.exit_code == 0, f'Failed to export USER: {str(obs)}')
  180. if USE_INSTANCE_IMAGE:
  181. # inject the init script
  182. script_dir = os.path.dirname(__file__)
  183. # inject the instance info
  184. action = CmdRunAction(command='mkdir -p /swe_util/eval_data/instances')
  185. action.timeout = 600
  186. logger.info(action, extra={'msg_type': 'ACTION'})
  187. obs = runtime.run_action(action)
  188. logger.info(obs, extra={'msg_type': 'OBSERVATION'})
  189. assert_and_raise(
  190. obs.exit_code == 0,
  191. f'Failed to create /swe_util/eval_data/instances: {str(obs)}',
  192. )
  193. swe_instance_json_name = 'swe-bench-instance.json'
  194. with tempfile.TemporaryDirectory() as temp_dir:
  195. # Construct the full path for the desired file name within the temporary directory
  196. temp_file_path = os.path.join(temp_dir, swe_instance_json_name)
  197. # Write to the file with the desired name within the temporary directory
  198. with open(temp_file_path, 'w') as f:
  199. if not isinstance(instance, dict):
  200. json.dump([instance.to_dict()], f)
  201. else:
  202. json.dump([instance], f)
  203. # Copy the file to the desired location
  204. runtime.copy_to(temp_file_path, '/swe_util/eval_data/instances/')
  205. # inject the instance swe entry
  206. runtime.copy_to(
  207. str(os.path.join(script_dir, 'scripts/setup/instance_swe_entry.sh')),
  208. '/swe_util/',
  209. )
  210. action = CmdRunAction(command='cat ~/.bashrc')
  211. action.timeout = 600
  212. logger.info(action, extra={'msg_type': 'ACTION'})
  213. obs = runtime.run_action(action)
  214. logger.info(obs, extra={'msg_type': 'OBSERVATION'})
  215. assert_and_raise(obs.exit_code == 0, f'Failed to cat ~/.bashrc: {str(obs)}')
  216. action = CmdRunAction(command='source ~/.bashrc')
  217. action.timeout = 600
  218. logger.info(action, extra={'msg_type': 'ACTION'})
  219. obs = runtime.run_action(action)
  220. logger.info(obs, extra={'msg_type': 'OBSERVATION'})
  221. if isinstance(obs, ErrorObservation):
  222. logger.error(f'Failed to source ~/.bashrc: {str(obs)}')
  223. assert_and_raise(obs.exit_code == 0, f'Failed to source ~/.bashrc: {str(obs)}')
  224. action = CmdRunAction(command='source /swe_util/instance_swe_entry.sh')
  225. action.timeout = 3600
  226. logger.info(action, extra={'msg_type': 'ACTION'})
  227. obs = runtime.run_action(action)
  228. logger.info(obs, extra={'msg_type': 'OBSERVATION'})
  229. assert_and_raise(
  230. obs.exit_code == 0,
  231. f'Failed to source /swe_util/instance_swe_entry.sh: {str(obs)}',
  232. )
  233. else:
  234. action = CmdRunAction(command='source /swe_util/swe_entry.sh')
  235. action.timeout = 1800
  236. logger.info(action, extra={'msg_type': 'ACTION'})
  237. obs = runtime.run_action(action)
  238. logger.info(obs, extra={'msg_type': 'OBSERVATION'})
  239. assert_and_raise(
  240. obs.exit_code == 0,
  241. f'Failed to source /swe_util/swe_entry.sh: {str(obs)}',
  242. )
  243. action = CmdRunAction(command=f'cd /workspace/{workspace_dir_name}')
  244. action.timeout = 600
  245. logger.info(action, extra={'msg_type': 'ACTION'})
  246. obs = runtime.run_action(action)
  247. logger.info(obs, extra={'msg_type': 'OBSERVATION'})
  248. assert_and_raise(
  249. obs.exit_code == 0,
  250. f'Failed to cd to /workspace/{workspace_dir_name}: {str(obs)}',
  251. )
  252. action = CmdRunAction(command='git reset --hard')
  253. action.timeout = 600
  254. logger.info(action, extra={'msg_type': 'ACTION'})
  255. obs = runtime.run_action(action)
  256. logger.info(obs, extra={'msg_type': 'OBSERVATION'})
  257. assert_and_raise(obs.exit_code == 0, f'Failed to git reset --hard: {str(obs)}')
  258. action = CmdRunAction(
  259. command='for remote_name in $(git remote); do git remote remove "${remote_name}"; done'
  260. )
  261. action.timeout = 600
  262. logger.info(action, extra={'msg_type': 'ACTION'})
  263. obs = runtime.run_action(action)
  264. logger.info(obs, extra={'msg_type': 'OBSERVATION'})
  265. assert_and_raise(obs.exit_code == 0, f'Failed to remove git remotes: {str(obs)}')
  266. logger.info('-' * 30)
  267. logger.info('END Runtime Initialization Fn')
  268. logger.info('-' * 30)
  269. def complete_runtime(
  270. runtime: Runtime,
  271. instance: pd.Series, # this argument is not required, but it is used to get the workspace_dir_name
  272. ) -> dict[str, Any]:
  273. """Complete the runtime for the agent.
  274. This function is called before the runtime is used to run the agent.
  275. If you need to do something in the sandbox to get the correctness metric after
  276. the agent has run, modify this function.
  277. """
  278. logger.info('-' * 30)
  279. logger.info('BEGIN Runtime Completion Fn')
  280. logger.info('-' * 30)
  281. obs: CmdOutputObservation
  282. workspace_dir_name = _get_swebench_workspace_dir_name(instance)
  283. action = CmdRunAction(command=f'cd /workspace/{workspace_dir_name}')
  284. action.timeout = 600
  285. logger.info(action, extra={'msg_type': 'ACTION'})
  286. obs = runtime.run_action(action)
  287. logger.info(obs, extra={'msg_type': 'OBSERVATION'})
  288. assert_and_raise(
  289. isinstance(obs, CmdOutputObservation) and obs.exit_code == 0,
  290. f'Failed to cd to /workspace/{workspace_dir_name}: {str(obs)}',
  291. )
  292. action = CmdRunAction(command='git config --global core.pager ""')
  293. action.timeout = 600
  294. logger.info(action, extra={'msg_type': 'ACTION'})
  295. obs = runtime.run_action(action)
  296. logger.info(obs, extra={'msg_type': 'OBSERVATION'})
  297. assert_and_raise(
  298. isinstance(obs, CmdOutputObservation) and obs.exit_code == 0,
  299. f'Failed to git config --global core.pager "": {str(obs)}',
  300. )
  301. action = CmdRunAction(command='git add -A')
  302. action.timeout = 600
  303. logger.info(action, extra={'msg_type': 'ACTION'})
  304. obs = runtime.run_action(action)
  305. logger.info(obs, extra={'msg_type': 'OBSERVATION'})
  306. assert_and_raise(
  307. isinstance(obs, CmdOutputObservation) and obs.exit_code == 0,
  308. f'Failed to git add -A: {str(obs)}',
  309. )
  310. n_retries = 0
  311. git_patch = None
  312. while n_retries < 5:
  313. action = CmdRunAction(
  314. command=f'git diff --no-color --cached {instance["base_commit"]}',
  315. keep_prompt=False,
  316. )
  317. action.timeout = 600 + 100 * n_retries
  318. logger.info(action, extra={'msg_type': 'ACTION'})
  319. obs = runtime.run_action(action)
  320. logger.info(obs, extra={'msg_type': 'OBSERVATION'})
  321. n_retries += 1
  322. if isinstance(obs, CmdOutputObservation):
  323. if obs.exit_code == 0:
  324. git_patch = obs.content.strip()
  325. break
  326. else:
  327. logger.info('Failed to get git diff, retrying...')
  328. sleep_if_should_continue(10)
  329. elif isinstance(obs, ErrorObservation):
  330. logger.error(f'Error occurred: {obs.content}. Retrying...')
  331. sleep_if_should_continue(10)
  332. else:
  333. assert_and_raise(False, f'Unexpected observation type: {str(obs)}')
  334. assert_and_raise(git_patch is not None, 'Failed to get git diff (None)')
  335. logger.info('-' * 30)
  336. logger.info('END Runtime Completion Fn')
  337. logger.info('-' * 30)
  338. return {'git_patch': git_patch}
  339. def process_instance(
  340. instance: pd.Series,
  341. metadata: EvalMetadata,
  342. reset_logger: bool = True,
  343. ) -> EvalOutput:
  344. config = get_config(instance, metadata)
  345. # Setup the logger properly, so you can run multi-processing to parallelize the evaluation
  346. if reset_logger:
  347. log_dir = os.path.join(metadata.eval_output_dir, 'infer_logs')
  348. reset_logger_for_multiprocessing(logger, instance.instance_id, log_dir)
  349. else:
  350. logger.info(f'Starting evaluation for instance {instance.instance_id}.')
  351. runtime = create_runtime(config)
  352. call_async_from_sync(runtime.connect)
  353. try:
  354. initialize_runtime(runtime, instance)
  355. instruction = get_instruction(instance, metadata)
  356. # Here's how you can run the agent (similar to the `main` function) and get the final task state
  357. state: State | None = asyncio.run(
  358. run_controller(
  359. config=config,
  360. initial_user_action=MessageAction(content=instruction),
  361. runtime=runtime,
  362. fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN[
  363. metadata.agent_class
  364. ],
  365. )
  366. )
  367. # if fatal error, throw EvalError to trigger re-run
  368. if (
  369. state.last_error
  370. and 'fatal error during agent execution' in state.last_error
  371. and 'stuck in a loop' not in state.last_error
  372. ):
  373. raise EvalException('Fatal error detected: ' + state.last_error)
  374. # ======= THIS IS SWE-Bench specific =======
  375. # Get git patch
  376. return_val = complete_runtime(runtime, instance)
  377. git_patch = return_val['git_patch']
  378. logger.info(
  379. f'Got git diff for instance {instance.instance_id}:\n--------\n{git_patch}\n--------'
  380. )
  381. finally:
  382. runtime.close()
  383. # ==========================================
  384. # ======= Attempt to evaluate the agent's edits =======
  385. # we use eval_infer.sh to evaluate the agent's edits, not here
  386. # because the agent may alter the environment / testcases
  387. test_result = {
  388. 'git_patch': git_patch,
  389. }
  390. # If you are working on some simpler benchmark that only evaluates the final model output (e.g., in a MessageAction)
  391. # You can simply get the LAST `MessageAction` from the returned `state.history` and parse it for evaluation.
  392. if state is None:
  393. raise ValueError('State should not be None.')
  394. # NOTE: this is NO LONGER the event stream, but an agent history that includes delegate agent's events
  395. histories = [event_to_dict(event) for event in state.history]
  396. metrics = state.metrics.get() if state.metrics else None
  397. # Save the output
  398. output = EvalOutput(
  399. instance_id=instance.instance_id,
  400. instruction=instruction,
  401. instance=instance.to_dict(), # SWE Bench specific
  402. test_result=test_result,
  403. metadata=metadata,
  404. history=histories,
  405. metrics=metrics,
  406. error=state.last_error if state and state.last_error else None,
  407. )
  408. return output
  409. def filter_dataset(dataset: pd.DataFrame, filter_column: str) -> pd.DataFrame:
  410. file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config.toml')
  411. if os.path.exists(file_path):
  412. with open(file_path, 'r') as file:
  413. data = toml.load(file)
  414. if 'selected_ids' in data:
  415. selected_ids = data['selected_ids']
  416. logger.info(
  417. f'Filtering {len(selected_ids)} tasks from "selected_ids"...'
  418. )
  419. subset = dataset[dataset[filter_column].isin(selected_ids)]
  420. logger.info(f'Retained {subset.shape[0]} tasks after filtering')
  421. return subset
  422. return dataset
  423. if __name__ == '__main__':
  424. parser = get_parser()
  425. parser.add_argument(
  426. '--dataset',
  427. type=str,
  428. default='princeton-nlp/SWE-bench',
  429. help='data set to evaluate on, either full-test or lite-test',
  430. )
  431. parser.add_argument(
  432. '--split',
  433. type=str,
  434. default='test',
  435. help='split to evaluate on',
  436. )
  437. args, _ = parser.parse_known_args()
  438. # NOTE: It is preferable to load datasets from huggingface datasets and perform post-processing
  439. # so we don't need to manage file uploading to OpenHands's repo
  440. dataset = load_dataset(args.dataset, split=args.split)
  441. logger.info(f'Loaded dataset {args.dataset} with split {args.split}')
  442. swe_bench_tests = filter_dataset(dataset.to_pandas(), 'instance_id')
  443. llm_config = None
  444. if args.llm_config:
  445. llm_config = get_llm_config_arg(args.llm_config)
  446. llm_config.log_completions = True
  447. if llm_config is None:
  448. raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')
  449. details = {}
  450. _agent_cls = openhands.agenthub.Agent.get_cls(args.agent_cls)
  451. dataset_descrption = (
  452. args.dataset.replace('/', '__') + '-' + args.split.replace('/', '__')
  453. )
  454. metadata = make_metadata(
  455. llm_config,
  456. dataset_descrption,
  457. args.agent_cls,
  458. args.max_iterations,
  459. args.eval_note,
  460. args.eval_output_dir,
  461. details=details,
  462. )
  463. output_file = os.path.join(metadata.eval_output_dir, 'output.jsonl')
  464. instances = prepare_dataset(swe_bench_tests, output_file, args.eval_n_limit)
  465. if len(instances) > 0 and not isinstance(
  466. instances['PASS_TO_PASS'][instances['PASS_TO_PASS'].index[0]], str
  467. ):
  468. for col in ['PASS_TO_PASS', 'FAIL_TO_PASS']:
  469. instances[col] = instances[col].apply(lambda x: str(x))
  470. run_evaluation(
  471. instances,
  472. metadata,
  473. output_file,
  474. args.eval_num_workers,
  475. process_instance,
  476. timeout_seconds=120 * 60, # 2 hour PER instance should be more than enough
  477. )