run_infer.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362
  1. import asyncio
  2. import json
  3. import logging
  4. import multiprocessing as mp
  5. import os
  6. import pathlib
  7. import subprocess
  8. import time
  9. from concurrent.futures import ProcessPoolExecutor
  10. from tqdm import tqdm
  11. from opendevin.controller.agent import Agent
  12. from opendevin.controller.state.state import State
  13. from opendevin.core.config import get_llm_config_arg, get_parser, load_app_config
  14. from opendevin.core.logger import get_console_handler
  15. from opendevin.core.logger import opendevin_logger as logger
  16. from opendevin.core.main import run_agent_controller
  17. from opendevin.events.action import MessageAction
  18. from opendevin.llm.llm import LLM
  19. from .utils import encode_question, get_data
  20. config = load_app_config()
  21. def cleanup():
  22. print('Cleaning up child processes...')
  23. for process in mp.active_children():
  24. print(f'Terminating child process: {process.name}')
  25. process.terminate()
  26. process.join()
  27. def codeact_user_response(state: State) -> str:
  28. msg = (
  29. #'Please continue working on the task on whatever approach you think is suitable.\n'
  30. 'Please run the following command: <execute_bash> exit </execute_bash>.\n'
  31. #'IMPORTANT: YOU SHOULD NEVER ASK FOR HUMAN HELP OR USE THE INTERNET TO SOLVE THIS TASK.\n'
  32. )
  33. # check if the agent has tried to talk to the user 3 times, if so, let the agent know it can give up
  34. if state.history:
  35. user_msgs = [
  36. event
  37. for event in state.history.get_events()
  38. if isinstance(event, MessageAction) and event.source == 'user'
  39. ]
  40. if len(user_msgs) > 2:
  41. # let the agent know that it can give up when it has tried 3 times
  42. return (
  43. msg
  44. + 'If you want to give up, run: <execute_bash> exit </execute_bash>.\n'
  45. )
  46. return msg
  47. AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = {
  48. 'CodeActAgent': codeact_user_response,
  49. }
  50. AGENT_CLS_TO_INST_SUFFIX = {
  51. 'CodeActAgent': 'When you think you have completed the request, please run the following command: <execute_bash> exit </execute_bash>.\n'
  52. }
  53. def process_instance(agent, question_id, question, metadata, reset_logger: bool = True):
  54. # create process-specific workspace dir
  55. # we will create a workspace directory for EACH process
  56. # so that different agent don't interfere with each other.
  57. old_workspace_mount_path = config.workspace_mount_path
  58. try:
  59. workspace_mount_path = os.path.join(
  60. config.workspace_mount_path, '_eval_workspace'
  61. )
  62. workspace_mount_path = os.path.join(workspace_mount_path, str(os.getpid()))
  63. pathlib.Path(workspace_mount_path).mkdir(parents=True, exist_ok=True)
  64. config.workspace_mount_path = workspace_mount_path
  65. # Setup the logger properly, so you can run multi-processing to parallize the evaluation
  66. eval_output_dir = metadata['eval_output_dir']
  67. if reset_logger:
  68. # Set up logger
  69. log_file = os.path.join(
  70. eval_output_dir, 'logs', f'instance_{question_id}.log'
  71. )
  72. # Remove all existing handlers from logger
  73. for handler in logger.handlers[:]:
  74. logger.removeHandler(handler)
  75. # add back the console handler to print ONE line
  76. logger.addHandler(get_console_handler())
  77. logger.info(
  78. f'Starting evaluation for instance {question_id}.\nLOG: tail -f {log_file}'
  79. )
  80. # Remove all existing handlers from logger
  81. for handler in logger.handlers[:]:
  82. logger.removeHandler(handler)
  83. file_handler = logging.FileHandler(log_file)
  84. file_handler.setFormatter(
  85. logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
  86. )
  87. logger.addHandler(file_handler)
  88. logger.info(f'Process-specific workspace mounted at {workspace_mount_path}')
  89. # Prepare instruction
  90. instruction = encode_question(question, metadata['hub'])
  91. instruction += 'IMPORTANT: You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.\n'
  92. # NOTE: You can actually set slightly different instruction for different agents
  93. instruction += AGENT_CLS_TO_INST_SUFFIX[agent.__class__.__name__]
  94. # logger.info(f'Instruction:\n{instruction}', extra={'msg_type': 'OBSERVATION'})
  95. # Here's how you can run the agent (similar to the `main` function) and get the final task state
  96. state: State | None = asyncio.run(
  97. run_agent_controller(
  98. agent,
  99. instruction,
  100. max_iterations=metadata.max_iterations,
  101. max_budget_per_task=config.max_budget_per_task,
  102. fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN.get(
  103. agent.__class__.__name__
  104. ),
  105. sid=question_id,
  106. )
  107. )
  108. # ======= Attempt to evaluate the agent's edits =======
  109. # If you are working on simpler benchmark that only evaluates the final model output (e.g., in a MessageAction)
  110. # You can simply get the LAST `MessageAction` from the returned `state.history` and parse it for evaluation.
  111. if state is None:
  112. raise ValueError('State should not be None.')
  113. # retrieve the last message from the agent
  114. model_answer_raw = state.history.get_last_agent_message()
  115. # attempt to parse model_answer
  116. _, _, ast_eval = get_data(metadata['hub'])
  117. correct, hallucination = ast_eval(question_id, model_answer_raw)
  118. metrics = state.metrics.get() if state.metrics else None
  119. logger.info(
  120. f'Final message: {model_answer_raw} | Correctness: {correct} | Hallucination: {hallucination}'
  121. )
  122. # history is now available as a stream of events, rather than list of pairs of (Action, Observation)
  123. # for compatibility with the existing output format, we can remake the pairs here
  124. # remove when it becomes unnecessary
  125. histories = state.history.compatibility_for_eval_history_pairs()
  126. # Save the output
  127. output = {
  128. 'question_id': question_id,
  129. 'text': model_answer_raw,
  130. 'correct': correct,
  131. 'hallucination': hallucination,
  132. 'answer_id': 'None',
  133. 'model_id': metadata['model_name'],
  134. 'metadata': metadata.model_dump(),
  135. 'history': histories,
  136. 'metrics': metrics,
  137. 'error': state.last_error if state and state.last_error else None,
  138. }
  139. except Exception:
  140. logger.error('Process instance failed')
  141. raise
  142. finally:
  143. config.workspace_mount_path = old_workspace_mount_path
  144. return output
  145. if __name__ == '__main__':
  146. parser = get_parser()
  147. parser.add_argument(
  148. '--hubs',
  149. type=str,
  150. help='Which hubs to evaluate from APIBench. APIBench contains 3 hubs, namely huggingface, torch, and tensorflow. You could choose one or more from hf, torch, or tf, separated by commas. For example, the default is --hub hf,torch,tf.',
  151. default='hf,torch,tf',
  152. )
  153. args, _ = parser.parse_known_args()
  154. if args.directory:
  155. config.workspace_base = os.path.abspath(args.directory)
  156. print(f'Setting workspace base to {config.workspace_base}')
  157. # Check https://github.com/OpenDevin/OpenDevin/blob/main/evaluation/swe_bench/README.md#configure-opendevin-and-your-llm
  158. # for details of how to set `llm_config`
  159. if args.llm_config:
  160. specified_llm_config = get_llm_config_arg(args.llm_config)
  161. if specified_llm_config:
  162. config.llm = specified_llm_config
  163. logger.info(f'Config for evaluation: {config}')
  164. agent_class = args.agent_cls
  165. assert (
  166. agent_class in AGENT_CLS_TO_FAKE_USER_RESPONSE_FN
  167. ), f'Unsupported agent class: {agent_class}'
  168. model_name = config.llm.model.split('/')[-1]
  169. max_iterations = args.max_iterations
  170. eval_note = ''
  171. if args.eval_note is not None:
  172. eval_note += '_N_' + args.eval_note
  173. eval_output_dir = os.path.join(
  174. args.eval_output_dir,
  175. 'gorilla',
  176. agent_class,
  177. model_name + '_maxiter_' + str(max_iterations) + eval_note,
  178. )
  179. pathlib.Path(eval_output_dir).mkdir(parents=True, exist_ok=True)
  180. pathlib.Path(os.path.join(eval_output_dir, 'logs')).mkdir(
  181. parents=True, exist_ok=True
  182. )
  183. logger.info(f'Using evaluation output directory: {eval_output_dir}')
  184. hubs = []
  185. if 'hf' in args.hubs:
  186. hubs.append('hf')
  187. if 'torch' in args.hubs or 'th' in args.hubs:
  188. hubs.append('torch')
  189. if 'tf' in args.hubs:
  190. hubs.append('tf')
  191. if hubs == []:
  192. raise ValueError('Please choose at least one from hf, torch, and tf for hubs.')
  193. for hub in hubs:
  194. logger.info(f'Evaluating APIBench {hub} test')
  195. questions, question_ids, ast_eval = get_data(hub)
  196. # TEST METADATA
  197. metadata = {
  198. 'hub': hub,
  199. 'agent_class': agent_class,
  200. 'model_name': model_name,
  201. 'max_iterations': max_iterations,
  202. 'eval_output_dir': eval_output_dir,
  203. 'start_time': time.strftime('%Y-%m-%d %H:%M:%S'),
  204. # get the commit id of current repo for reproduciblity
  205. 'git_commit': subprocess.check_output(['git', 'rev-parse', 'HEAD'])
  206. .decode('utf-8')
  207. .strip(),
  208. }
  209. logger.info(f'Metadata: {metadata}')
  210. with open(os.path.join(eval_output_dir, f'metadata_{hub}.json'), 'w') as f:
  211. json.dump(metadata, f)
  212. # LIMIT EVALUATION
  213. eval_n_limit = args.eval_n_limit
  214. if eval_n_limit:
  215. questions = questions[: (eval_n_limit // len(hubs))]
  216. question_ids = question_ids[: (eval_n_limit // len(hubs))]
  217. logger.info(
  218. f'Limiting evaluation to a total of first {eval_n_limit} instances -> first {eval_n_limit//len(hubs)} instances per hub.'
  219. )
  220. output_file = os.path.join(eval_output_dir, f'output_{model_name}_{hub}.jsonl')
  221. logger.info(f'Writing evaluation output to {output_file}')
  222. finished_task_ids = set()
  223. if os.path.exists(output_file):
  224. with open(output_file, 'r') as f:
  225. for line in f:
  226. data = json.loads(line)
  227. for i in range(len(question_ids)):
  228. if question_ids[i] == int(data['question_id']):
  229. finished_task_ids.add(data['question_id'])
  230. logger.warning(
  231. f'Output file {output_file} already exists. Loaded {len(finished_task_ids)} finished instances.'
  232. )
  233. output_fp = open(output_file, 'a')
  234. logger.info(
  235. f'Evaluation started with Agent {agent_class}, model {model_name}, max iterations {max_iterations}.'
  236. )
  237. # =============================================
  238. # filter out finished instances
  239. new_questions = []
  240. new_question_ids = []
  241. for i in range(len(question_ids)):
  242. if question_ids[i] in finished_task_ids:
  243. logger.info(
  244. f'Skipping instance {question_ids[i]} as it is already finished.'
  245. )
  246. continue
  247. new_questions.append(questions[i])
  248. new_question_ids.append(question_ids[i])
  249. finished_task_number = len(finished_task_ids)
  250. questions = new_questions
  251. question_ids = new_question_ids
  252. logger.info(
  253. f'Finished instances: {finished_task_number}, Remaining instances: {len(question_ids)}'
  254. )
  255. # =============================================
  256. pbar = tqdm(total=len(question_ids))
  257. # This function tracks the progress AND write the output to a JSONL file
  258. def update_progress(future, pbar, output_fp, finished_task_ids):
  259. pbar.update(1)
  260. output = future.result()
  261. pbar.set_description(f'Instance {output["question_id"]}')
  262. pbar.set_postfix_str(f'Test Result: {output["correct"]}')
  263. logger.info(
  264. f'Finished evaluation for instance {output["question_id"]}: {output["correct"]}'
  265. )
  266. output_fp.write(json.dumps(output) + '\n')
  267. output_fp.flush()
  268. finished_task_ids.add(output['question_id'])
  269. # Create the agent
  270. agent = Agent.get_cls(agent_class)(llm=LLM(config.llm))
  271. # This sets the multi-processing
  272. num_workers = args.eval_num_workers
  273. logger.info(f'Using {num_workers} workers for evaluation.')
  274. try:
  275. with ProcessPoolExecutor(num_workers) as executor:
  276. futures = []
  277. # This is how we perform multi-processing
  278. for i in range(len(question_ids)):
  279. try:
  280. question_id = question_ids[i]
  281. question = questions[i]
  282. future = executor.submit(
  283. process_instance,
  284. agent,
  285. question_id,
  286. question,
  287. metadata,
  288. reset_logger=bool(num_workers > 1),
  289. )
  290. future.add_done_callback(
  291. update_progress, pbar, output_fp, finished_task_ids
  292. )
  293. futures.append(future)
  294. except Exception:
  295. continue
  296. # Wait for all futures to complete
  297. for future in futures:
  298. try:
  299. future.result()
  300. except Exception:
  301. continue
  302. except KeyboardInterrupt:
  303. logger.info('KeyboardInterrupt received. Cleaning up...')
  304. cleanup()
  305. output_fp.close()
  306. total_correct = 0
  307. total_hallucination = 0
  308. output = []
  309. with open(output_file, 'r') as f:
  310. for line in f:
  311. data = json.loads(line)
  312. output.append(data)
  313. if int(data['question_id']) in finished_task_ids:
  314. if str(data['correct']).lower() == 'true':
  315. total_correct += 1
  316. if str(data['hallucination']).lower() == 'true':
  317. total_hallucination += 1
  318. # sort all output by question_id
  319. output = sorted(output, key=lambda x: x['question_id'])
  320. with open(output_file, 'w') as f:
  321. for dat in output:
  322. f.write(json.dumps(dat) + '\n')
  323. f.flush()
  324. logger.info(
  325. f'Evaluation finished for {hub}. Total: {len(question_ids)+finished_task_number}; Correct: {total_correct}; Hallucination: {total_hallucination}. Accuracy: {total_correct / (len(question_ids)+finished_task_number)}'
  326. )