run_infer.py 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171
  1. import asyncio
  2. import json
  3. import logging
  4. import os
  5. import browsergym.webarena # noqa F401 register webarena tasks as gym environments
  6. import gymnasium as gym
  7. import pandas as pd
  8. from evaluation.utils.shared import (
  9. EvalMetadata,
  10. make_metadata,
  11. prepare_dataset,
  12. run_evaluation,
  13. )
  14. from opendevin.controller.agent import Agent
  15. from opendevin.controller.state.state import State
  16. from opendevin.core.config import config, get_llm_config_arg, parse_arguments
  17. from opendevin.core.logger import get_console_handler
  18. from opendevin.core.logger import opendevin_logger as logger
  19. from opendevin.core.main import run_agent_controller
  20. from opendevin.llm.llm import LLM
  21. from opendevin.runtime.docker.ssh_box import DockerSSHBox
  22. from opendevin.runtime.tools import RuntimeTool
  23. SUPPORTED_AGENT_CLS = {'BrowsingAgent'}
  24. docker_ssh_box: DockerSSHBox | None = None
  25. def get_sandbox():
  26. global docker_ssh_box
  27. if docker_ssh_box is None:
  28. docker_ssh_box = DockerSSHBox()
  29. return docker_ssh_box
  30. def process_instance(
  31. instance: pd.Series,
  32. metadata: EvalMetadata,
  33. reset_logger: bool = True,
  34. ):
  35. # Create the agent
  36. agent = Agent.get_cls(metadata.agent_class)(llm=LLM(llm_config=metadata.llm_config))
  37. env_id = instance.id
  38. # Setup the logger properly, so you can run multi-processing to parallelize the evaluation
  39. if reset_logger:
  40. # Set up logger
  41. log_file = os.path.join(
  42. metadata.eval_output_dir, 'logs', f'instance_{env_id}.log'
  43. )
  44. # Remove all existing handlers from logger
  45. for handler in logger.handlers[:]:
  46. logger.removeHandler(handler)
  47. # add back the console handler to print ONE line
  48. logger.addHandler(get_console_handler())
  49. logger.info(
  50. f'Starting evaluation for instance {env_id}.\nHint: run "tail -f {log_file}" to see live logs in a separate shell'
  51. )
  52. # Remove all existing handlers from logger
  53. for handler in logger.handlers[:]:
  54. logger.removeHandler(handler)
  55. file_handler = logging.FileHandler(log_file)
  56. file_handler.setFormatter(
  57. logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
  58. )
  59. logger.addHandler(file_handler)
  60. else:
  61. logger.info(f'Starting evaluation for instance {env_id}.')
  62. # Here's how you can run the agent (similar to the `main` function) and get the final task state
  63. runtime_tools_config = {
  64. RuntimeTool.BROWSER: {
  65. 'browsergym_eval': env_id,
  66. 'browsergym_eval_save_dir': metadata.eval_output_dir,
  67. }
  68. }
  69. state: State | None = asyncio.run(
  70. run_agent_controller(
  71. agent,
  72. 'PLACEHOLDER_GOAL',
  73. max_iterations=metadata.max_iterations,
  74. runtime_tools_config=runtime_tools_config,
  75. sandbox=get_sandbox(),
  76. sid=env_id,
  77. )
  78. )
  79. # ======= Attempt to evaluate the agent's environment impact =======
  80. # If you are working on some simpler benchmark that only evaluates the final model output (e.g., in a MessageAction)
  81. # You can simply get the LAST `MessageAction` from the returned `state.history` and parse it for evaluation.
  82. if state is None:
  83. raise ValueError('State should not be None.')
  84. metrics = state.metrics.get() if state.metrics else None
  85. browsergym_eval_dir = os.path.join(metadata.eval_output_dir, env_id.split('/')[1])
  86. # read goal
  87. with open(
  88. os.path.join(browsergym_eval_dir, 'goal.txt'), 'r', encoding='utf-8'
  89. ) as f:
  90. instruction = f.read()
  91. # read reward
  92. with open(
  93. os.path.join(browsergym_eval_dir, 'rewards.json'), 'r', encoding='utf-8'
  94. ) as f:
  95. rewards = json.load(f)
  96. reward = max(rewards)
  97. # history is now available as a stream of events, rather than list of pairs of (Action, Observation)
  98. # for compatibility with the existing output format, we can remake the pairs here
  99. # remove when it becomes unnecessary
  100. histories = state.history.compatibility_for_eval_history_pairs()
  101. # Save the output
  102. output = {
  103. 'instance_id': env_id,
  104. 'instruction': instruction,
  105. 'metadata': metadata.model_dump(),
  106. 'history': histories,
  107. 'metrics': metrics,
  108. 'error': state.last_error if state and state.last_error else None,
  109. 'test_result': reward,
  110. }
  111. return output
  112. if __name__ == '__main__':
  113. args = parse_arguments()
  114. env_ids = [
  115. id for id in gym.envs.registry.keys() if id.startswith('browsergym/webarena')
  116. ]
  117. dataset = pd.DataFrame(
  118. {
  119. 'id': [
  120. id
  121. for id in gym.envs.registry.keys()
  122. if id.startswith('browsergym/miniwob')
  123. ]
  124. }
  125. )
  126. id_column = 'id'
  127. llm_config = get_llm_config_arg(args.llm_config) if args.llm_config else config.llm
  128. logger.info(f'Config for evaluation: {config}')
  129. metadata = make_metadata(
  130. llm_config,
  131. args.dataset_name,
  132. args.agent_cls,
  133. args.max_iterations,
  134. args.eval_note,
  135. args.eval_output_dir,
  136. )
  137. output_file = os.path.join(metadata.eval_output_dir, 'output.jsonl')
  138. instances = prepare_dataset(dataset, output_file, args.eval_n_limit, id_column)
  139. _ = get_sandbox() # Initialize the sandbox
  140. run_evaluation(
  141. instances,
  142. metadata,
  143. output_file,
  144. args.eval_num_workers,
  145. process_instance,
  146. id_column,
  147. )