run_infer.py 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296
  1. import asyncio
  2. import logging
  3. import os
  4. import pathlib
  5. import shutil
  6. import pandas as pd
  7. from datasets import load_dataset
  8. from evaluation.swe_bench.swe_env_box import DockerSSHBox
  9. from evaluation.utils.shared import (
  10. EvalMetadata,
  11. codeact_user_response,
  12. make_metadata,
  13. monologue_user_response,
  14. prepare_dataset,
  15. run_evaluation,
  16. )
  17. from opendevin.controller.agent import Agent
  18. from opendevin.controller.state.state import State
  19. from opendevin.core.config import config, get_llm_config_arg, get_parser
  20. from opendevin.core.logger import get_console_handler
  21. from opendevin.core.logger import opendevin_logger as logger
  22. from opendevin.core.main import run_agent_controller
  23. from opendevin.llm.llm import LLM
  24. AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = {
  25. 'CodeActAgent': codeact_user_response,
  26. 'MonologueAgent': monologue_user_response,
  27. }
  28. AGENT_CLS_TO_INST_SUFFIX = {
  29. 'CodeActAgent': 'When you think you have solved the question, please first send your answer to user through message and then exit.\n'
  30. }
  31. def get_choice(answer_str):
  32. choices = [
  33. 'A',
  34. 'B',
  35. 'C',
  36. 'D',
  37. 'E',
  38. 'F',
  39. 'G',
  40. 'H',
  41. 'A)',
  42. 'B)',
  43. 'C)',
  44. 'D)',
  45. 'E)',
  46. 'F)',
  47. 'G)',
  48. 'H)',
  49. 'A.',
  50. 'B.',
  51. 'C.',
  52. 'D.',
  53. 'E.',
  54. 'F.',
  55. 'G.',
  56. 'H.',
  57. ]
  58. for c in choices:
  59. if answer_str.startswith(c):
  60. return c.replace(')', '')
  61. if answer_str.startswith(':'):
  62. return answer_str.replace(':', '').replace('.', '').strip()
  63. return None
  64. def get_test_result(
  65. model_answer: str,
  66. ground_truth: str,
  67. ) -> dict[str, bool]:
  68. gold_answer = ground_truth.replace('(', '').replace(')', '').strip()
  69. answer_str = model_answer if model_answer is not None else ''
  70. prediction = get_choice(answer_str)
  71. indicators = [
  72. 'the correct option is',
  73. 'the correct answer is',
  74. 'The correct answer is',
  75. 'The correct option is',
  76. 'Thus, the answer is',
  77. ]
  78. if prediction is None:
  79. for indicator in indicators:
  80. if answer_str.find(indicator) >= 0:
  81. answer_str = answer_str.split(indicator)[1].strip()
  82. prediction = get_choice(answer_str)
  83. break
  84. isTrue = prediction == gold_answer
  85. test_result = {'result': isTrue}
  86. return test_result
  87. def process_instance(
  88. instance: pd.Series,
  89. metadata: EvalMetadata,
  90. reset_logger: bool = True,
  91. ):
  92. # Create the agent
  93. agent = Agent.get_cls(metadata.agent_class)(llm=LLM(llm_config=metadata.llm_config))
  94. old_workspace_mount_path = config.workspace_mount_path
  95. old_workspace_base = config.workspace_base
  96. try:
  97. workspace_mount_path = os.path.join(
  98. config.workspace_mount_path, '_eval_workspace'
  99. )
  100. # create process-specific workspace dir
  101. workspace_mount_path = os.path.join(workspace_mount_path, str(os.getpid()))
  102. pathlib.Path(workspace_mount_path).mkdir(parents=True, exist_ok=True)
  103. # reset workspace to config
  104. config.workspace_base = workspace_mount_path
  105. config.workspace_mount_path = workspace_mount_path
  106. # Setup the logger properly, so you can run multi-processing to parallelize the evaluation
  107. if reset_logger:
  108. # Set up logger
  109. log_file = os.path.join(
  110. metadata.eval_output_dir, 'logs', f'instance_{instance["id"]}.log'
  111. )
  112. # Remove all existing handlers from logger
  113. for handler in logger.handlers[:]:
  114. logger.removeHandler(handler)
  115. # add back the console handler to print ONE line
  116. logger.addHandler(get_console_handler())
  117. logger.info(
  118. f'Starting evaluation for instance {instance["id"]}.\nLOG: tail -f {log_file}'
  119. )
  120. # Remove all existing handlers from logger
  121. for handler in logger.handlers[:]:
  122. logger.removeHandler(handler)
  123. file_handler = logging.FileHandler(log_file)
  124. file_handler.setFormatter(
  125. logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
  126. )
  127. logger.addHandler(file_handler)
  128. logger.info(f'Process-specific workspace mounted at {workspace_mount_path}')
  129. # sandbox = DockerSSHBox()
  130. logic_inference_path = os.path.join(workspace_mount_path, 'logic_inference.py')
  131. if not os.path.exists(logic_inference_path):
  132. shutil.copyfile(
  133. './evaluation/logic_reasoning/logic_inference.py', logic_inference_path
  134. )
  135. logger.info(f'logic_inference.py copied to {workspace_mount_path}')
  136. cache_dir = os.path.join(workspace_mount_path, '.cache_program')
  137. if not os.path.exists(cache_dir):
  138. os.makedirs(cache_dir)
  139. # Prepare instruction
  140. with open('./evaluation/logic_reasoning/instruction.txt', 'r') as f:
  141. instruction = f.read()
  142. instance_logic_programs = instance['raw_logic_programs'][0].strip()
  143. instruction = instruction.replace('[[dataset_name]]', dataset_name)
  144. instruction = instruction.replace('[[logic_programs]]', instance_logic_programs)
  145. instruction = instruction.replace(
  146. '[[logic_inference_path.py]]', logic_inference_path
  147. )
  148. # NOTE: You can actually set slightly different instruction for different agents
  149. instruction += AGENT_CLS_TO_INST_SUFFIX[agent.__class__.__name__]
  150. # use a session id for concurrent evaluation
  151. sid = instance['id'] + '_' + str(os.getpid())
  152. sandbox = DockerSSHBox(sid=sid)
  153. exit_code, command_output = sandbox.execute('pip install scitools-pyke')
  154. # Here's how you can run the agent (similar to the `main` function) and get the final task state
  155. state: State | None = asyncio.run(
  156. run_agent_controller(
  157. agent,
  158. instruction,
  159. max_iterations=metadata.max_iterations,
  160. fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN.get(
  161. agent.__class__.__name__
  162. ),
  163. sandbox=sandbox,
  164. sid=sid,
  165. )
  166. )
  167. # ======= Attempt to evaluate the agent's edits =======
  168. # If you are working on simpler benchmark that only evaluates the final model output (e.g., in a MessageAction)
  169. # You can simply get the LAST `MessageAction` from the returned `state.history` and parse it for evaluation.
  170. if state is None:
  171. raise ValueError('State should not be None.')
  172. final_message = ''
  173. messages = []
  174. for event in state.history.get_events(reverse=True):
  175. # will this be a MessageAction?
  176. # TODO we can filter for types of events if we know what to expect
  177. messages.append(event.content)
  178. if str(event.content) in ["'A'", "'B'", "'C'"]:
  179. final_message = event.content
  180. break
  181. final_message = final_message.strip("'")
  182. logger.info(
  183. f'Predicted answer: {final_message}, Ground truth: {instance["answer"]}'
  184. )
  185. test_result = get_test_result(
  186. model_answer=final_message, ground_truth=instance['answer']
  187. )
  188. metrics = state.metrics.get() if state.metrics else None
  189. # history is now available as a stream of events, rather than list of pairs of (Action, Observation)
  190. # for compatibility with the existing output format, we can remake the pairs here
  191. # remove when it becomes unnecessary
  192. histories = state.history.compatibility_for_eval_history_pairs()
  193. # Save the output
  194. output = {
  195. 'id': instance['id'],
  196. 'instance': instance,
  197. 'instruction': instruction,
  198. # 'metadata': metadata.model_dump(),
  199. 'history': histories,
  200. 'metrics': metrics,
  201. 'final_message': final_message,
  202. 'messages': messages,
  203. 'error': state.last_error if state and state.last_error else None,
  204. 'test_result': test_result,
  205. }
  206. except Exception:
  207. logger.error('Process instance failed')
  208. raise
  209. finally:
  210. config.workspace_mount_path = old_workspace_mount_path
  211. config.workspace_base = old_workspace_base
  212. # Close the sandbox
  213. sandbox.close()
  214. return output
  215. if __name__ == '__main__':
  216. parser = get_parser()
  217. parser.add_argument(
  218. '--dataset',
  219. type=str,
  220. help='the logic reasoning dataset to evaluate on {ProntoQA, ProofWriter}',
  221. default='ProntoQA',
  222. )
  223. parser.add_argument(
  224. '--data_split',
  225. type=str,
  226. help='data split to evaluate on {validation}', # right now we only support validation split
  227. default='validation',
  228. )
  229. args, _ = parser.parse_known_args()
  230. if args.directory:
  231. config.workspace_base = os.path.abspath(args.directory)
  232. print(f'Setting workspace base to {config.workspace_base}')
  233. dataset_name = args.dataset
  234. data_split = args.data_split
  235. dataset = load_dataset(f'renma/{dataset_name}')
  236. logic_reasoning_tests = dataset[data_split]
  237. id_column = 'id'
  238. llm_config = get_llm_config_arg(args.llm_config) if args.llm_config else config.llm
  239. logger.info(f'Config for evaluation: {config}')
  240. metadata = make_metadata(
  241. llm_config,
  242. args.dataset_name,
  243. args.agent_cls,
  244. args.max_iterations,
  245. args.eval_note,
  246. args.eval_output_dir,
  247. )
  248. output_file = os.path.join(metadata.eval_output_dir, 'output.jsonl')
  249. instances = prepare_dataset(dataset, output_file, args.eval_n_limit, id_column)
  250. run_evaluation(
  251. instances,
  252. metadata,
  253. output_file,
  254. args.eval_num_workers,
  255. process_instance,
  256. id_column,
  257. )