run_infer.py 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221
  1. import asyncio
  2. import os
  3. from typing import Any
  4. import pandas as pd
  5. from evaluation.benchmarks.toolqa.utils import encode_question, eval_answer, get_data
  6. from evaluation.utils.shared import (
  7. EvalMetadata,
  8. EvalOutput,
  9. codeact_user_response,
  10. compatibility_for_eval_history_pairs,
  11. make_metadata,
  12. prepare_dataset,
  13. reset_logger_for_multiprocessing,
  14. run_evaluation,
  15. )
  16. from openhands.controller.state.state import State
  17. from openhands.core.config import (
  18. AppConfig,
  19. SandboxConfig,
  20. get_llm_config_arg,
  21. get_parser,
  22. )
  23. from openhands.core.logger import openhands_logger as logger
  24. from openhands.core.main import create_runtime, run_controller
  25. from openhands.events.action import CmdRunAction, MessageAction
  26. from openhands.events.observation import CmdOutputObservation
  27. from openhands.runtime.base import Runtime
  28. from openhands.utils.async_utils import call_async_from_sync
  29. AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = {
  30. 'CodeActAgent': codeact_user_response,
  31. }
  32. AGENT_CLS_TO_INST_SUFFIX = {
  33. 'CodeActAgent': 'When you think you have completed the request, please finish the interaction using the "finish" tool.\n'
  34. }
  35. def get_config(
  36. metadata: EvalMetadata,
  37. ) -> AppConfig:
  38. config = AppConfig(
  39. default_agent=metadata.agent_class,
  40. run_as_openhands=False,
  41. runtime='eventstream',
  42. max_iterations=metadata.max_iterations,
  43. sandbox=SandboxConfig(
  44. base_container_image='python:3.12-bookworm',
  45. enable_auto_lint=True,
  46. use_host_network=False,
  47. ),
  48. # do not mount workspace
  49. workspace_base=None,
  50. workspace_mount_path=None,
  51. )
  52. config.set_llm_config(metadata.llm_config)
  53. return config
  54. def initialize_runtime(runtime: Runtime):
  55. """Initialize the runtime for the agent.
  56. This function is called before the runtime is used to run the agent.
  57. """
  58. logger.info(f"{'-' * 50} BEGIN Runtime Initialization Fn {'-' * 50}")
  59. obs: CmdOutputObservation
  60. # Set instance id
  61. action = CmdRunAction(command='mkdir -p /workspace')
  62. logger.info(action, extra={'msg_type': 'ACTION'})
  63. obs = runtime.run_action(action)
  64. assert obs.exit_code == 0
  65. action = CmdRunAction(command='cd /workspace')
  66. logger.info(action, extra={'msg_type': 'ACTION'})
  67. obs = runtime.run_action(action)
  68. assert obs.exit_code == 0
  69. runtime.add_env_vars({'WOLFRAM_ALPHA_APPID': args.wolfram_alpha_appid})
  70. logger.info(f"{'-' * 50} END Runtime Initialization Fn {'-' * 50}")
  71. def process_instance(instance: Any, metadata: EvalMetadata, reset_logger: bool = True):
  72. config = get_config(metadata)
  73. qid = instance.qid
  74. question = instance.question
  75. answer = instance.answer
  76. # Setup the logger properly, so you can run multi-processing to parallelize the evaluation
  77. if reset_logger:
  78. log_dir = os.path.join(metadata.eval_output_dir, 'infer_logs')
  79. reset_logger_for_multiprocessing(logger, qid, log_dir)
  80. else:
  81. logger.info(f'Starting evaluation for instance {qid}.')
  82. # Prepare instruction
  83. instruction = encode_question(question)
  84. instruction += 'IMPORTANT: You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.\n'
  85. # NOTE: You can actually set slightly different instruction for different agents
  86. instruction += AGENT_CLS_TO_INST_SUFFIX[metadata.agent_class]
  87. logger.info(f'Instruction:\n{instruction}', extra={'msg_type': 'OBSERVATION'})
  88. runtime = create_runtime(config)
  89. call_async_from_sync(runtime.connect)
  90. initialize_runtime(runtime)
  91. # Here's how you can run the agent (similar to the `main` function) and get the final task state
  92. state: State | None = asyncio.run(
  93. run_controller(
  94. config=config,
  95. initial_user_action=MessageAction(content=instruction),
  96. runtime=runtime,
  97. fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN[
  98. metadata.agent_class
  99. ],
  100. )
  101. )
  102. # ======= Attempt to evaluate the agent's edits =======
  103. # If you are working on simpler benchmark that only evaluates the final model output (e.g., in a MessageAction)
  104. # You can simply get the LAST `MessageAction` from the returned `state.history` and parse it for evaluation.
  105. if state is None:
  106. raise ValueError('State should not be None.')
  107. # retrieve the last message from the agent
  108. last_agent_message = state.get_last_agent_message()
  109. model_answer_raw = last_agent_message.content if last_agent_message else ''
  110. # attempt to parse model_answer
  111. correct = eval_answer(str(model_answer_raw), str(answer))
  112. logger.info(f'Final message: {model_answer_raw} | Correctness: {correct}')
  113. metrics = state.metrics.get() if state.metrics else None
  114. # history is now available as a stream of events, rather than list of pairs of (Action, Observation)
  115. # for compatibility with the existing output format, we can remake the pairs here
  116. # remove when it becomes unnecessary
  117. histories = compatibility_for_eval_history_pairs(state.history)
  118. # Save the output
  119. output = EvalOutput(
  120. instance_id=qid,
  121. test_result={
  122. 'model_answer_raw': model_answer_raw,
  123. 'correct': correct,
  124. },
  125. metadata=metadata,
  126. history=histories,
  127. metrics=metrics,
  128. error=state.last_error if state and state.last_error else None,
  129. )
  130. return output
  131. if __name__ == '__main__':
  132. parser = get_parser()
  133. parser.add_argument(
  134. '--dataset',
  135. type=str,
  136. help='Which dataset to evaluate from ToolQA. ToolQA contains 8 datasets, namely agenda, airbnb, coffee, dblp, flight, gsm8k, scirex, yelp. For example, the default is --dataset flight.',
  137. default='flight',
  138. )
  139. parser.add_argument(
  140. '--hardness',
  141. type=str,
  142. help='Which level of difficulty to evaluate from ToolQA. ToolQA contains 2 levels of hardness, namely easy and hard. For example, the default is --hardness easy.',
  143. default='easy',
  144. )
  145. parser.add_argument(
  146. '--wolfram_alpha_appid',
  147. type=str,
  148. help='wolfram alpha appid to use for wolfram alpha related tests',
  149. default='YOUR_WOLFRAMALPHA_APPID',
  150. )
  151. args, _ = parser.parse_known_args()
  152. llm_config = None
  153. if args.llm_config:
  154. llm_config = get_llm_config_arg(args.llm_config)
  155. if llm_config is None:
  156. raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')
  157. dataset = ''
  158. hardness = ''
  159. dataset_choices = [
  160. 'agenda',
  161. 'airbnb',
  162. 'coffee',
  163. 'dblp',
  164. 'flight',
  165. 'gsm8k',
  166. 'scirex',
  167. 'yelp',
  168. 'genda',
  169. ]
  170. if args.dataset not in dataset_choices:
  171. raise ValueError(
  172. 'Please choose from agenda, airbnb, coffee, dblp, flight, gsm8k, scirex, yelp for dataset.'
  173. )
  174. if args.hardness not in ['easy', 'hard']:
  175. raise ValueError('Please choose from easy and hard for hardness.')
  176. toolqa_test = pd.DataFrame(get_data(dataset, hardness))
  177. toolqa_test.rename(columns={'qid': 'instance_id'}, inplace=True)
  178. metadata = make_metadata(
  179. llm_config,
  180. f'toolqa-{args.dataset}-{args.hardness}',
  181. args.agent_cls,
  182. args.eval_note,
  183. args.eval_output_dir,
  184. )
  185. output_file = os.path.join(metadata.eval_output_dir, 'output.jsonl')
  186. instances = prepare_dataset(toolqa_test, output_file, args.eval_n_limit)
  187. run_evaluation(
  188. instances, metadata, output_file, args.eval_num_workers, process_instance
  189. )