run_infer.py 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295
  1. import asyncio
  2. import logging
  3. import os
  4. import pathlib
  5. import shutil
  6. import pandas as pd
  7. from datasets import load_dataset
  8. from evaluation.swe_bench.swe_env_box import DockerSSHBox
  9. from evaluation.utils.shared import (
  10. EvalMetadata,
  11. codeact_user_response,
  12. make_metadata,
  13. prepare_dataset,
  14. run_evaluation,
  15. )
  16. from opendevin.controller.agent import Agent
  17. from opendevin.controller.state.state import State
  18. from opendevin.core.config import config, get_llm_config_arg, get_parser
  19. from opendevin.core.logger import get_console_handler
  20. from opendevin.core.logger import opendevin_logger as logger
  21. from opendevin.core.main import run_agent_controller
  22. from opendevin.llm.llm import LLM
  23. AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = {
  24. 'CodeActAgent': codeact_user_response,
  25. }
  26. AGENT_CLS_TO_INST_SUFFIX = {
  27. 'CodeActAgent': 'When you think you have solved the question, please first send your answer to user through message and then exit.\n'
  28. }
  29. def get_choice(answer_str):
  30. choices = [
  31. 'A',
  32. 'B',
  33. 'C',
  34. 'D',
  35. 'E',
  36. 'F',
  37. 'G',
  38. 'H',
  39. 'A)',
  40. 'B)',
  41. 'C)',
  42. 'D)',
  43. 'E)',
  44. 'F)',
  45. 'G)',
  46. 'H)',
  47. 'A.',
  48. 'B.',
  49. 'C.',
  50. 'D.',
  51. 'E.',
  52. 'F.',
  53. 'G.',
  54. 'H.',
  55. ]
  56. for c in choices:
  57. if answer_str.startswith(c):
  58. return c.replace(')', '')
  59. if answer_str.startswith(':'):
  60. return answer_str.replace(':', '').replace('.', '').strip()
  61. return None
  62. def get_test_result(
  63. model_answer: str,
  64. ground_truth: str,
  65. ) -> dict[str, bool]:
  66. gold_answer = ground_truth.replace('(', '').replace(')', '').strip()
  67. answer_str = model_answer if model_answer is not None else ''
  68. prediction = get_choice(answer_str)
  69. indicators = [
  70. 'the correct option is',
  71. 'the correct answer is',
  72. 'The correct answer is',
  73. 'The correct option is',
  74. 'Thus, the answer is',
  75. ]
  76. if prediction is None:
  77. for indicator in indicators:
  78. if answer_str.find(indicator) >= 0:
  79. answer_str = answer_str.split(indicator)[1].strip()
  80. prediction = get_choice(answer_str)
  81. break
  82. isTrue = prediction == gold_answer
  83. test_result = {'result': isTrue}
  84. return test_result
  85. def process_instance(
  86. instance: pd.Series,
  87. metadata: EvalMetadata,
  88. reset_logger: bool = True,
  89. ):
  90. # Create the agent
  91. agent = Agent.get_cls(metadata.agent_class)(llm=LLM(config=metadata.llm_config))
  92. old_workspace_mount_path = config.workspace_mount_path
  93. old_workspace_base = config.workspace_base
  94. try:
  95. workspace_mount_path = os.path.join(
  96. config.workspace_mount_path, '_eval_workspace'
  97. )
  98. # create process-specific workspace dir
  99. workspace_mount_path = os.path.join(workspace_mount_path, str(os.getpid()))
  100. pathlib.Path(workspace_mount_path).mkdir(parents=True, exist_ok=True)
  101. # reset workspace to config
  102. config.workspace_base = workspace_mount_path
  103. config.workspace_mount_path = workspace_mount_path
  104. # Setup the logger properly, so you can run multi-processing to parallelize the evaluation
  105. if reset_logger:
  106. # Set up logger
  107. log_file = os.path.join(
  108. metadata.eval_output_dir, 'logs', f'instance_{instance["id"]}.log'
  109. )
  110. # Remove all existing handlers from logger
  111. for handler in logger.handlers[:]:
  112. logger.removeHandler(handler)
  113. # add back the console handler to print ONE line
  114. logger.addHandler(get_console_handler())
  115. logger.info(
  116. f'Starting evaluation for instance {instance["id"]}.\nLOG: tail -f {log_file}'
  117. )
  118. # Remove all existing handlers from logger
  119. for handler in logger.handlers[:]:
  120. logger.removeHandler(handler)
  121. file_handler = logging.FileHandler(log_file)
  122. file_handler.setFormatter(
  123. logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
  124. )
  125. logger.addHandler(file_handler)
  126. logger.info(f'Process-specific workspace mounted at {workspace_mount_path}')
  127. # sandbox = DockerSSHBox()
  128. logic_inference_path = os.path.join(workspace_mount_path, 'logic_inference.py')
  129. if not os.path.exists(logic_inference_path):
  130. shutil.copyfile(
  131. './evaluation/logic_reasoning/logic_inference.py', logic_inference_path
  132. )
  133. logger.info(f'logic_inference.py copied to {workspace_mount_path}')
  134. cache_dir = os.path.join(workspace_mount_path, '.cache_program')
  135. if not os.path.exists(cache_dir):
  136. os.makedirs(cache_dir)
  137. # Prepare instruction
  138. with open('./evaluation/logic_reasoning/instruction.txt', 'r') as f:
  139. instruction = f.read()
  140. instance_logic_programs = instance['raw_logic_programs'][0].strip()
  141. instruction = instruction.replace('[[dataset_name]]', dataset_name)
  142. instruction = instruction.replace('[[logic_programs]]', instance_logic_programs)
  143. instruction = instruction.replace(
  144. '[[logic_inference_path.py]]', logic_inference_path
  145. )
  146. # NOTE: You can actually set slightly different instruction for different agents
  147. instruction += AGENT_CLS_TO_INST_SUFFIX[agent.__class__.__name__]
  148. # use a session id for concurrent evaluation
  149. sid = instance['id'] + '_' + str(os.getpid())
  150. sandbox = DockerSSHBox(sid=sid)
  151. exit_code, command_output = sandbox.execute('pip install scitools-pyke')
  152. # Here's how you can run the agent (similar to the `main` function) and get the final task state
  153. state: State | None = asyncio.run(
  154. run_agent_controller(
  155. agent,
  156. instruction,
  157. max_iterations=metadata.max_iterations,
  158. max_budget_per_task=config.max_budget_per_task,
  159. fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN.get(
  160. agent.__class__.__name__
  161. ),
  162. sandbox=sandbox,
  163. sid=sid,
  164. )
  165. )
  166. # ======= Attempt to evaluate the agent's edits =======
  167. # If you are working on simpler benchmark that only evaluates the final model output (e.g., in a MessageAction)
  168. # You can simply get the LAST `MessageAction` from the returned `state.history` and parse it for evaluation.
  169. if state is None:
  170. raise ValueError('State should not be None.')
  171. final_message = ''
  172. messages = []
  173. for event in state.history.get_events(reverse=True):
  174. # will this be a MessageAction?
  175. # TODO we can filter for types of events if we know what to expect
  176. messages.append(event.content)
  177. if str(event.content) in ["'A'", "'B'", "'C'"]:
  178. final_message = event.content
  179. break
  180. final_message = final_message.strip("'")
  181. logger.info(
  182. f'Predicted answer: {final_message}, Ground truth: {instance["answer"]}'
  183. )
  184. test_result = get_test_result(
  185. model_answer=final_message, ground_truth=instance['answer']
  186. )
  187. metrics = state.metrics.get() if state.metrics else None
  188. # history is now available as a stream of events, rather than list of pairs of (Action, Observation)
  189. # for compatibility with the existing output format, we can remake the pairs here
  190. # remove when it becomes unnecessary
  191. histories = state.history.compatibility_for_eval_history_pairs()
  192. # Save the output
  193. output = {
  194. 'id': instance['id'],
  195. 'instance': instance,
  196. 'instruction': instruction,
  197. # 'metadata': metadata.model_dump(),
  198. 'history': histories,
  199. 'metrics': metrics,
  200. 'final_message': final_message,
  201. 'messages': messages,
  202. 'error': state.last_error if state and state.last_error else None,
  203. 'test_result': test_result,
  204. }
  205. except Exception:
  206. logger.error('Process instance failed')
  207. raise
  208. finally:
  209. config.workspace_mount_path = old_workspace_mount_path
  210. config.workspace_base = old_workspace_base
  211. # Close the sandbox
  212. sandbox.close()
  213. return output
  214. if __name__ == '__main__':
  215. parser = get_parser()
  216. parser.add_argument(
  217. '--dataset',
  218. type=str,
  219. help='the logic reasoning dataset to evaluate on {ProntoQA, ProofWriter}',
  220. default='ProntoQA',
  221. )
  222. parser.add_argument(
  223. '--data_split',
  224. type=str,
  225. help='data split to evaluate on {validation}', # right now we only support validation split
  226. default='validation',
  227. )
  228. args, _ = parser.parse_known_args()
  229. if args.directory:
  230. config.workspace_base = os.path.abspath(args.directory)
  231. print(f'Setting workspace base to {config.workspace_base}')
  232. dataset_name = args.dataset
  233. data_split = args.data_split
  234. dataset = load_dataset(f'renma/{dataset_name}')
  235. logic_reasoning_tests = dataset[data_split]
  236. id_column = 'id'
  237. llm_config = get_llm_config_arg(args.llm_config) if args.llm_config else config.llm
  238. logger.info(f'Config for evaluation: {config}')
  239. metadata = make_metadata(
  240. llm_config,
  241. args.dataset_name,
  242. args.agent_cls,
  243. args.max_iterations,
  244. args.eval_note,
  245. args.eval_output_dir,
  246. )
  247. output_file = os.path.join(metadata.eval_output_dir, 'output.jsonl')
  248. instances = prepare_dataset(dataset, output_file, args.eval_n_limit, id_column)
  249. run_evaluation(
  250. instances,
  251. metadata,
  252. output_file,
  253. args.eval_num_workers,
  254. process_instance,
  255. id_column,
  256. )