run_infer.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362
  1. import asyncio
  2. import functools
  3. import json
  4. import logging
  5. import multiprocessing as mp
  6. import os
  7. import pathlib
  8. import subprocess
  9. import time
  10. from concurrent.futures import ProcessPoolExecutor
  11. from typing import Dict
  12. import tasks
  13. from config_variables import TASK_INFO_MAP
  14. from datasets import load_dataset
  15. from datatypes import TaskState
  16. from env import SimplifiedEnv
  17. from prompts import ToolPromptTemplate
  18. from tasks import Task
  19. from tqdm import tqdm
  20. from evaluation.swe_bench.swe_env_box import DockerSSHBox
  21. from opendevin.controller.state.state import State
  22. from opendevin.core.config import config, get_llm_config_arg, get_parser
  23. from opendevin.core.logger import get_console_handler
  24. from opendevin.core.logger import opendevin_logger as logger
  25. from opendevin.core.main import main
  26. from opendevin.events.serialization.event import event_to_dict
  27. def cleanup():
  28. print('Cleaning up child processes...')
  29. for process in mp.active_children():
  30. print(f'Terminating child process: {process.name}')
  31. process.terminate()
  32. process.join()
  33. def codeact_user_response(state: State, task: Task, task_config: Dict[str, int]):
  34. logger.info(f'Gold reference: {task.reference}')
  35. logger.info(f'Task config: {task_config}')
  36. env = SimplifiedEnv(
  37. agent_state=state,
  38. task=task,
  39. task_config=task_config,
  40. )
  41. last_action, _ = state.history[-1]
  42. result_state: TaskState = env.step(last_action.message)
  43. state.task_state = result_state
  44. if not result_state.latest_output:
  45. # Task is finished
  46. msg = '/exit'
  47. else:
  48. msg = result_state.latest_output['content']
  49. logger.info('User response:' + msg)
  50. return msg
  51. def monologue_user_response(state: State) -> str:
  52. raise NotImplementedError('MonologueAgent should never ask for user responses.')
  53. AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = {
  54. 'CodeActAgent': codeact_user_response,
  55. 'MonologueAgent': monologue_user_response,
  56. }
  57. AGENT_CLS_TO_INST_SUFFIX = {
  58. 'CodeActAgent': '\nIMPORTANT: When your answer is confirmed by the user to be correct, you can exit using the following command: <execute_bash> exit </execute_bash>.\n'
  59. }
  60. def process_instance(
  61. instance: Task,
  62. agent_class,
  63. metadata,
  64. skip_workspace_mount,
  65. eval_output_dir,
  66. reset_logger: bool = True,
  67. ):
  68. workspace_mount_path = os.path.join(config.workspace_mount_path, '_eval_workspace')
  69. # create process-specific workspace dir
  70. # if `not skip_workspace_mount` - we will create a workspace directory for EACH process
  71. # so that different agent don't interfere with each other.
  72. if not skip_workspace_mount:
  73. workspace_mount_path = os.path.join(workspace_mount_path, str(os.getpid()))
  74. pathlib.Path(workspace_mount_path).mkdir(parents=True, exist_ok=True)
  75. # Setup the logger properly, so you can run multi-processing to parallelize the evaluation
  76. if reset_logger:
  77. # Set up logger
  78. log_file = os.path.join(
  79. eval_output_dir, 'logs', f'instance_{instance.task_id}.log'
  80. )
  81. # Remove all existing handlers from logger
  82. for handler in logger.handlers[:]:
  83. logger.removeHandler(handler)
  84. # add back the console handler to print ONE line
  85. logger.addHandler(get_console_handler())
  86. logger.info(
  87. f'Starting evaluation for instance {instance.task_id}.\nHint: run "tail -f {log_file}" to see live logs in a separate shell'
  88. )
  89. # Remove all existing handlers from logger
  90. for handler in logger.handlers[:]:
  91. logger.removeHandler(handler)
  92. file_handler = logging.FileHandler(log_file)
  93. file_handler.setFormatter(
  94. logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
  95. )
  96. logger.addHandler(file_handler)
  97. if not skip_workspace_mount:
  98. logger.info(f'Process-specific workspace mounted at {workspace_mount_path}')
  99. sandbox = DockerSSHBox()
  100. requirements_host_src = 'evaluation/mint/requirements.txt'
  101. requirements_sandbox_dest = '/opendevin/plugins/mint/requirements.txt'
  102. sandbox.copy_to(
  103. host_src=requirements_host_src,
  104. sandbox_dest=requirements_sandbox_dest,
  105. recursive=False,
  106. )
  107. logger.info(
  108. f'Copied files from [{requirements_host_src}] to [{requirements_sandbox_dest}] inside sandbox.'
  109. )
  110. exit_code, output = sandbox.execute(f'pip install -r {requirements_sandbox_dest}')
  111. # Prepare instruction
  112. instruction = ToolPromptTemplate(use_tool=True)(
  113. max_total_steps=metadata['max_iterations'],
  114. max_propose_solution=metadata['max_propose_solution'],
  115. in_context_example=instance.in_context_example(
  116. use_tool=True, with_feedback=False
  117. ),
  118. task_prompt='Task:\n' + instance.prompt,
  119. )
  120. instruction += 'IMPORTANT: You should ONLY interact with the environment provided to you or provide the concise RESULT inside <solution> tag AND NEVER ASK FOR HUMAN HELP.\n'
  121. # NOTE: You can actually set slightly different instruction for different agents
  122. instruction += AGENT_CLS_TO_INST_SUFFIX.get(agent_class, '')
  123. # Here's how you can run the agent (similar to the `main` function) and get the final task state
  124. fake_user_response_fn = functools.partial(
  125. AGENT_CLS_TO_FAKE_USER_RESPONSE_FN.get(agent_class),
  126. task=instance,
  127. task_config={
  128. 'max_iterations': metadata['max_iterations'],
  129. 'max_propose_solution': metadata['max_propose_solution'],
  130. },
  131. )
  132. state: State = asyncio.run(
  133. main(
  134. instruction,
  135. fake_user_response_fn=fake_user_response_fn,
  136. sandbox=sandbox,
  137. )
  138. )
  139. if state is None:
  140. raise ValueError('State should not be None.')
  141. task_state = None
  142. if hasattr(state, 'task_state'):
  143. task_state = state.task_state
  144. logger.info('Task state: ' + str(task_state.to_dict()))
  145. metrics = state.metrics.get() if state.metrics else None
  146. # Save the output
  147. output = {
  148. 'id': instance.task_id,
  149. 'instance': instance.to_dict(),
  150. 'instruction': instruction,
  151. 'metadata': metadata,
  152. 'history': [
  153. (event_to_dict(action), event_to_dict(obs)) for action, obs in state.history
  154. ],
  155. 'metrics': metrics,
  156. 'error': state.error if state and state.error else None,
  157. 'test_result': task_state.success if task_state else False,
  158. }
  159. # Close the sandbox
  160. sandbox.close()
  161. return output
  162. if __name__ == '__main__':
  163. parser = get_parser()
  164. parser.add_argument(
  165. '--subset',
  166. default='math',
  167. choices=['math', 'gsm8k', 'mmlu', 'theoremqa', 'mbpp', 'humaneval'],
  168. type=str,
  169. help='subset of the dataset to be used',
  170. )
  171. parser.add_argument(
  172. '--max-propose-solution',
  173. default=2,
  174. type=int,
  175. help='maximum number of times the agent can propose a solution',
  176. )
  177. args, _ = parser.parse_known_args()
  178. # NOTE: It is preferable to load datasets from huggingface datasets and perform post-processing
  179. # so we don't need to manage file uploading to OpenDevin's repo
  180. mint_dataset = load_dataset(
  181. 'ryanhoangt/xingyaoww-mint-bench', name=args.subset, split='test'
  182. )
  183. logger.info(f'Evaluating MINT - {args.subset} subset')
  184. # Check https://github.com/OpenDevin/OpenDevin/blob/main/evaluation/swe_bench/README.md#configure-opendevin-and-your-llm
  185. # for details of how to set `llm_config`
  186. if args.llm_config:
  187. specified_llm_config = get_llm_config_arg(args.llm_config)
  188. if specified_llm_config:
  189. config.llm = specified_llm_config
  190. logger.info(f'Config for evaluation: {config}')
  191. # TEST METADATA
  192. agent_class = args.agent_cls
  193. assert (
  194. agent_class in AGENT_CLS_TO_FAKE_USER_RESPONSE_FN
  195. ), f'Unsupported agent class: {agent_class}'
  196. model_name = config.llm.model.split('/')[-1]
  197. max_iterations = args.max_iterations
  198. eval_note = ''
  199. if args.eval_note is not None:
  200. eval_note += '_N_' + args.eval_note
  201. eval_output_dir = os.path.join(
  202. args.eval_output_dir,
  203. 'mint',
  204. agent_class,
  205. model_name + '_maxiter_' + str(max_iterations) + eval_note,
  206. args.subset,
  207. )
  208. pathlib.Path(eval_output_dir).mkdir(parents=True, exist_ok=True)
  209. pathlib.Path(os.path.join(eval_output_dir, 'logs')).mkdir(
  210. parents=True, exist_ok=True
  211. )
  212. logger.info(f'Using evaluation output directory: {eval_output_dir}')
  213. metadata = {
  214. 'agent_class': agent_class,
  215. 'model_name': model_name,
  216. 'max_iterations': max_iterations,
  217. 'max_propose_solution': args.max_propose_solution,
  218. 'eval_output_dir': eval_output_dir,
  219. 'start_time': time.strftime('%Y-%m-%d %H:%M:%S'),
  220. # get the commit id of current repo for reproducibility
  221. 'git_commit': subprocess.check_output(['git', 'rev-parse', 'HEAD'])
  222. .decode('utf-8')
  223. .strip(),
  224. }
  225. logger.info(f'Metadata: {metadata}')
  226. with open(os.path.join(eval_output_dir, 'metadata.json'), 'w') as f:
  227. json.dump(metadata, f)
  228. # LIMIT EVALUATION
  229. eval_n_limit = args.eval_n_limit
  230. if eval_n_limit:
  231. mint_dataset = mint_dataset.select(range(eval_n_limit))
  232. logger.info(f'Limiting evaluation to first {eval_n_limit} instances.')
  233. # OUTPUT FILE
  234. output_file = os.path.join(eval_output_dir, 'output.jsonl')
  235. logger.info(f'Writing evaluation output to {output_file}')
  236. finished_instance_ids = set()
  237. if os.path.exists(output_file):
  238. with open(output_file, 'r') as f:
  239. for line in f:
  240. data = json.loads(line)
  241. finished_instance_ids.add(data['id'])
  242. logger.warning(
  243. f'Output file {output_file} already exists. Loaded {len(finished_instance_ids)} finished instances.'
  244. )
  245. output_fp = open(output_file, 'a')
  246. logger.info(
  247. f'Evaluation started with Agent {agent_class}, model {model_name}, max iterations {max_iterations}, max propose solution {args.max_propose_solution}.'
  248. )
  249. # =============================================
  250. # filter out finished instances
  251. task_class: Task = getattr(tasks, TASK_INFO_MAP[args.subset]['class'])
  252. new_mint_tests: list[Task] = []
  253. for instance in mint_dataset:
  254. if instance['id'] in finished_instance_ids:
  255. logger.info(
  256. f'Skipping instance {instance["id"]} as it is already finished.'
  257. )
  258. continue
  259. # convert to Task object
  260. instance = task_class(**instance)
  261. new_mint_tests.append(instance)
  262. mint_dataset = new_mint_tests
  263. logger.info(
  264. f'Finished instances: {len(finished_instance_ids)}, Remaining instances: {len(mint_dataset)}'
  265. )
  266. # =============================================
  267. pbar = tqdm(total=len(mint_dataset))
  268. # This function tracks the progress AND write the output to a JSONL file
  269. def update_progress(future):
  270. pbar.update(1)
  271. output = future.result()
  272. # logger.info('Output: ', output)
  273. # pbar.set_description(f'Instance {output["instance_id"]}')
  274. # pbar.set_postfix_str(f'Test Result: {output["test_result"]["result"]}')
  275. # logger.info(
  276. # f'Finished evaluation for instance {output["instance_id"]}: {output["test_result"]["result"]}'
  277. # )
  278. output_fp.write(json.dumps(output) + '\n')
  279. output_fp.flush()
  280. # This sets the multi-processing
  281. num_workers = args.eval_num_workers
  282. logger.info(f'Using {num_workers} workers for evaluation.')
  283. # This is SWE-Bench specific - CodeActAgent doesn't require mounted workspace to work
  284. skip_workspace_mount = agent_class == 'CodeActAgent'
  285. logger.info(f'Skipping workspace mount: {skip_workspace_mount}')
  286. try:
  287. with ProcessPoolExecutor(num_workers) as executor:
  288. futures = []
  289. # This is how we perform multi-processing
  290. for instance in mint_dataset:
  291. future = executor.submit(
  292. process_instance,
  293. instance,
  294. agent_class,
  295. metadata,
  296. skip_workspace_mount,
  297. eval_output_dir,
  298. reset_logger=bool(num_workers > 1),
  299. )
  300. future.add_done_callback(update_progress)
  301. futures.append(future)
  302. # Wait for all futures to complete
  303. for future in futures:
  304. future.result()
  305. except KeyboardInterrupt:
  306. print('KeyboardInterrupt received. Cleaning up...')
  307. cleanup()
  308. output_fp.close()
  309. logger.info('Evaluation finished.')