run_infer.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294
  1. """Overview:
  2. This code implements the evaluation of agents on the GPQA Benchmark with Open Book setting.
  3. - The benchmark consists of 448 high-quality and extremely difficult multiple-choice questions in the domains of biology, physics, and chemistry. The questions are intentionally designed to be "Google-proof," meaning that even highly skilled non-expert validators achieve only 34% accuracy despite unrestricted access to the web.
  4. - Even experts in the corresponding domains achieve only 65% accuracy.
  5. - State-of-the-art AI systems achieve only 39% accuracy on this challenging dataset.
  6. Accurate solving of above graduate level questions would require both tool use (e.g., python for calculations) and web-search for finding related facts as information required for the questions might not be part of the LLM knowledge / training data.
  7. Further references:
  8. - https://arxiv.org/pdf/2311.12022
  9. - https://paperswithcode.com/dataset/gpqa
  10. - https://github.com/idavidrein/gpqa
  11. TODOs:
  12. - Add evaluation on other Agent classes (e.g., MonologueAgent)
  13. - Batch inference and evaluation of agents on the GPQA Benchmark.
  14. """
  15. import asyncio
  16. import logging
  17. import os
  18. import pathlib
  19. import random
  20. import re
  21. import pandas as pd
  22. from datasets import load_dataset
  23. from evaluation.utils.shared import (
  24. EvalMetadata,
  25. codeact_user_response,
  26. make_metadata,
  27. monologue_user_response,
  28. prepare_dataset,
  29. run_evaluation,
  30. )
  31. from opendevin.controller.agent import Agent
  32. from opendevin.controller.state.state import State
  33. from opendevin.core.config import config, get_llm_config_arg, get_parser
  34. from opendevin.core.logger import get_console_handler
  35. from opendevin.core.logger import opendevin_logger as logger
  36. from opendevin.core.main import run_agent_controller
  37. from opendevin.llm.llm import LLM
  38. AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = {
  39. 'CodeActAgent': codeact_user_response,
  40. 'MonologueAgent': monologue_user_response,
  41. }
  42. AGENT_CLS_TO_INST_SUFFIX = {
  43. 'CodeActAgent': '\n\n SUPER IMPORTANT: When you think you have solved the question, first report it back to the user in the requested format. Only once that is done, in the next turn, please run the following command: <execute_bash> exit </execute_bash>.\n'
  44. }
  45. def parse_final_answer(final_answer: str) -> str:
  46. """Parse the final answer from the final message generated by the agent
  47. to extract the final answer. The final answer is usually enclosed in the format:
  48. <<FINAL_ANSWER||
  49. <insert correct answer here>
  50. ||FINAL_ANSWER>>
  51. """
  52. pattern = re.compile(r'<<FINAL_ANSWER\|\|(.*?)\|\|FINAL_ANSWER>>', re.DOTALL)
  53. match = pattern.search(final_answer)
  54. if match:
  55. return match.group(1).strip()
  56. else:
  57. return 'No final answer found in the provided string.'
  58. def compare_answers(predicted_answer, ground_truth):
  59. """Compare the predicted answer with the ground truth answer"""
  60. return predicted_answer == ground_truth
  61. def get_test_result(model_output, ground_truth):
  62. """Implements the evaluation logic for GPQA
  63. Checks if the output of a given instance is correct (as per the ground truth)
  64. """
  65. # parse the final answer from model output
  66. predicted_answer = parse_final_answer(model_output)
  67. # check if the model output matches the ground truth
  68. result = compare_answers(predicted_answer, ground_truth)
  69. return result
  70. def convert_instance_dict(instance):
  71. """Used for preprocessing the hf dataset into a format that can be used by the agent.
  72. Reads and extracts relevant information from the dataset instance.
  73. """
  74. out_instance_dict = {}
  75. out_instance_dict['question'] = instance['Question']
  76. correct_answer = instance['Correct Answer']
  77. out_instance_dict['choices'] = [
  78. correct_answer,
  79. instance['Incorrect Answer 1'],
  80. instance['Incorrect Answer 2'],
  81. instance['Incorrect Answer 3'],
  82. ]
  83. # Randomize the order of choices
  84. random.shuffle(out_instance_dict['choices'])
  85. # Find the index of the correct answer after shuffling and store it as a letter (A/B/C/D)
  86. correct_index = out_instance_dict['choices'].index(correct_answer)
  87. correct_letter = chr(
  88. 65 + correct_index
  89. ) # Convert index (0-3) to corresponding letter (A-D)
  90. out_instance_dict['correct_solution'] = correct_letter
  91. return out_instance_dict
  92. def process_instance(
  93. instance: pd.Series,
  94. metadata: EvalMetadata,
  95. reset_logger: bool = True,
  96. ):
  97. # Create the agent
  98. agent = Agent.get_cls(metadata.agent_class)(llm=LLM(llm_config=metadata.llm_config))
  99. old_workspace_mount_path = config.workspace_mount_path
  100. old_workspace_base = config.workspace_base
  101. try:
  102. workspace_mount_path = os.path.join(
  103. config.workspace_mount_path, '_eval_workspace'
  104. )
  105. # create process-specific workspace dir
  106. workspace_mount_path = os.path.join(workspace_mount_path, str(os.getpid()))
  107. pathlib.Path(workspace_mount_path).mkdir(parents=True, exist_ok=True)
  108. # reset workspace to config
  109. config.workspace_base = workspace_mount_path
  110. config.workspace_mount_path = workspace_mount_path
  111. # Setup the logger properly, so you can run multi-processing to parallelize the evaluation
  112. if reset_logger:
  113. # Set up logger
  114. log_file = os.path.join(
  115. metadata.eval_output_dir, 'logs', f'instance_{instance.instance_id}.log'
  116. )
  117. # Remove all existing handlers from logger
  118. for handler in logger.handlers[:]:
  119. logger.removeHandler(handler)
  120. # add back the console handler to print ONE line
  121. logger.addHandler(get_console_handler())
  122. logger.info(
  123. f'Starting evaluation for instance {instance.instance_id}.\nHint: run "tail -f {log_file}" to see live logs in a separate shell'
  124. )
  125. # Remove all existing handlers from logger
  126. for handler in logger.handlers[:]:
  127. logger.removeHandler(handler)
  128. file_handler = logging.FileHandler(log_file)
  129. file_handler.setFormatter(
  130. logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
  131. )
  132. logger.addHandler(file_handler)
  133. else:
  134. logger.info(f'Starting evaluation for instance {instance.instance_id}.')
  135. logger.info(f'Process-specific workspace mounted at {workspace_mount_path}')
  136. # ======= Run the agent on the instance =======
  137. # Prepare instruction for the agent using suggested format in gpqa codebase
  138. instruction = f"""
  139. What is the correct answer to this question:\n
  140. {instance['question']}\n
  141. Choices:\n
  142. (A) {instance['choices'][0]}\n
  143. (B) {instance['choices'][1]}\n
  144. (C) {instance['choices'][2]}\n
  145. (D) {instance['choices'][3]}\n
  146. \n\n
  147. MOST IMPORTANT: Format your response as follows:
  148. <<FINAL_ANSWER||
  149. <insert correct answer here, must be one of A, B, C, D> (Please dont use any additional characters. Just the letter of the correct answer (A/B/C/D).)
  150. ||FINAL_ANSWER>>
  151. Additional Instructions:
  152. - You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.
  153. """
  154. # NOTE: You can actually set slightly different instruction for different agents
  155. instruction += AGENT_CLS_TO_INST_SUFFIX[agent.__class__.__name__]
  156. # Here's how you can run the agent (similar to the `main` function) and get the final task state
  157. state: State | None = asyncio.run(
  158. run_agent_controller(
  159. agent,
  160. instruction,
  161. max_iterations=metadata.max_iterations,
  162. fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN.get(
  163. agent.__class__.__name__
  164. ),
  165. sid=instance.instance_id,
  166. )
  167. )
  168. assert state is not None, 'State should not be None.'
  169. # ======= Attempt to evaluate the agent's edits =======
  170. # get the final message from the state history (default to empty if not found)
  171. final_message = state.history.get_last_agent_message()
  172. logger.info(f'Final message generated by the agent: {final_message}')
  173. test_result = get_test_result(final_message, instance.correct_solution)
  174. # If you are working on some simpler benchmark that only evaluates the final model output (e.g., in a MessageAction)
  175. # You can simply get the LAST `MessageAction` from the returned `state.history` and parse it for evaluation.
  176. if state is None:
  177. raise ValueError('State should not be None.')
  178. metrics = state.metrics.get() if state.metrics else None
  179. # history is now available as a stream of events, rather than list of pairs of (Action, Observation)
  180. # for compatibility with the existing output format, we can remake the pairs here
  181. # remove when it becomes unnecessary
  182. histories = state.history.compatibility_for_eval_history_pairs()
  183. # Save the output
  184. output = {
  185. 'task_id': instance.task_id,
  186. 'instance_id': instance.instance_id,
  187. 'instruction': instruction,
  188. 'metadata': metadata.model_dump(),
  189. 'history': histories,
  190. 'metrics': metrics,
  191. 'error': state.last_error if state and state.last_error else None,
  192. 'test_result': test_result,
  193. }
  194. except Exception:
  195. logger.error('Process instance failed')
  196. raise
  197. finally:
  198. config.workspace_mount_path = old_workspace_mount_path
  199. config.workspace_base = old_workspace_base
  200. return output
  201. if __name__ == '__main__':
  202. parser = get_parser()
  203. # data split must be one of 'gpqa_main', 'gqpa_diamond', 'gpqa_experts', 'gpqa_extended'
  204. parser.add_argument(
  205. '--data-split',
  206. type=str,
  207. choices=['gpqa_main', 'gpqa_diamond', 'gpqa_experts', 'gpqa_extended'],
  208. default='gpqa_diamond',
  209. help='data split to evaluate, eg. gpqa_diamond',
  210. )
  211. args, _ = parser.parse_known_args()
  212. llm_config = get_llm_config_arg(args.llm_config) if args.llm_config else config.llm
  213. logger.info(f'Config for evaluation: {config}')
  214. # NOTE: It is preferable to load datasets from huggingface datasets and perform post-processing
  215. # so we don't need to manage file uploading to OpenDevin's repo
  216. dataset = load_dataset('Idavidrein/gpqa', args.data_split)
  217. gpqa_dataset = dataset['train']
  218. # preprocess the dataset
  219. gpqa_dataset = gpqa_dataset.map(convert_instance_dict)
  220. gpqa_dataset = gpqa_dataset.to_pandas()
  221. # Add a new column 'instance_id' with the index
  222. gpqa_dataset['instance_id'] = gpqa_dataset.index
  223. gpqa_dataset['task_id'] = gpqa_dataset.index
  224. # gpqa_dataset = dataset['train'].to_pandas().sort_values(by='id').reset_index(drop=True)
  225. metadata = make_metadata(
  226. llm_config=llm_config,
  227. dataset_name='gpqa',
  228. agent_class=args.agent_cls,
  229. max_iterations=args.max_iterations,
  230. eval_note=args.eval_note,
  231. eval_output_dir=args.eval_output_dir,
  232. data_split=args.data_split,
  233. )
  234. output_file = os.path.join(metadata.eval_output_dir, 'output.jsonl')
  235. prepared_dataset = prepare_dataset(
  236. gpqa_dataset, output_file, args.eval_n_limit, 'task_id'
  237. )
  238. run_evaluation(
  239. dataset=prepared_dataset,
  240. metadata=metadata,
  241. output_file=output_file,
  242. num_workers=args.eval_num_workers,
  243. process_instance_func=process_instance,
  244. id_column='task_id',
  245. )