run_infer.py 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171
  1. import asyncio
  2. import os
  3. import re
  4. import nltk
  5. import pandas as pd
  6. from datasets import load_dataset
  7. from evaluation.utils.shared import (
  8. EvalMetadata,
  9. EvalOutput,
  10. compatibility_for_eval_history_pairs,
  11. make_metadata,
  12. prepare_dataset,
  13. reset_logger_for_multiprocessing,
  14. run_evaluation,
  15. )
  16. from openhands.controller.state.state import State
  17. from openhands.core.config import (
  18. AppConfig,
  19. SandboxConfig,
  20. get_llm_config_arg,
  21. parse_arguments,
  22. )
  23. from openhands.core.logger import openhands_logger as logger
  24. from openhands.core.main import create_runtime, run_controller
  25. from openhands.events.action import MessageAction
  26. # Only CodeActAgent can delegate to BrowsingAgent
  27. SUPPORTED_AGENT_CLS = {'CodeActAgent'}
  28. def get_config(
  29. metadata: EvalMetadata,
  30. ) -> AppConfig:
  31. assert (
  32. metadata.max_iterations == 1
  33. ), 'max_iterations must be 1 for browsing delegation evaluation.'
  34. config = AppConfig(
  35. default_agent=metadata.agent_class,
  36. run_as_openhands=False,
  37. runtime='eventstream',
  38. max_iterations=metadata.max_iterations,
  39. sandbox=SandboxConfig(
  40. base_container_image='python:3.12-bookworm',
  41. enable_auto_lint=False,
  42. use_host_network=False,
  43. ),
  44. workspace_base=None,
  45. workspace_mount_path=None,
  46. )
  47. config.set_llm_config(metadata.llm_config)
  48. return config
  49. def process_instance(
  50. instance: pd.Series,
  51. metadata: EvalMetadata,
  52. reset_logger: bool = True,
  53. ) -> EvalOutput:
  54. config = get_config(metadata)
  55. # Setup the logger properly, so you can run multi-processing to parallelize the evaluation
  56. if reset_logger:
  57. log_dir = os.path.join(metadata.eval_output_dir, 'infer_logs')
  58. reset_logger_for_multiprocessing(logger, instance.instance_id, log_dir)
  59. else:
  60. logger.info(f'Starting evaluation for instance {instance.instance_id}.')
  61. instruction = (
  62. f'You can delegate browsing tasks to a browser agent. '
  63. f"For example, for query 'Who is the president of the United States?', you can delegate the task to a browser agent via <execute_browse> Who is the president of the United States? </execute_browse>.\n"
  64. f'Now, solve the following query: "{instance.instruction}"\n'
  65. f'NOTE: You should copy the "query" as is into the <execute_browse> tag. DO NOT change ANYTHING in the query.'
  66. )
  67. runtime = create_runtime(config)
  68. state: State | None = asyncio.run(
  69. run_controller(
  70. config=config,
  71. initial_user_action=MessageAction(content=instruction),
  72. runtime=runtime,
  73. )
  74. )
  75. if state is None:
  76. raise ValueError('State should not be None.')
  77. metrics = state.metrics.get() if state.metrics else None
  78. # history is now available as a stream of events, rather than list of pairs of (Action, Observation)
  79. # for compatibility with the existing output format, we can remake the pairs here
  80. # remove when it becomes unnecessary
  81. histories = compatibility_for_eval_history_pairs(state.history)
  82. # find the last delegate action
  83. last_delegate_action = None
  84. result = {}
  85. for action, _ in histories:
  86. if action['action'] == 'delegate':
  87. last_delegate_action = action
  88. instruction_for_delegate = action['args']['inputs']['task']
  89. # parse `browse_actions` from `instruction_for_delegate`
  90. # task = f'{thought}. I should start with: {browse_actions}'
  91. instruction_for_delegate = re.search(
  92. r'I should start with: (.*)', instruction_for_delegate
  93. ).group(1)
  94. # calculate the edit distance between the instance.instruction and the instruction_for_delegate
  95. edit_distance = nltk.edit_distance(
  96. instance.instruction, instruction_for_delegate
  97. )
  98. is_exact_match = (
  99. instance.instruction.strip() == instruction_for_delegate.strip()
  100. )
  101. result['edit_distance'] = edit_distance
  102. result['is_exact_match'] = is_exact_match
  103. # Save the output
  104. output = EvalOutput(
  105. instance_id=instance.instance_id,
  106. instruction=instruction,
  107. metadata=metadata,
  108. history=histories,
  109. metrics=metrics,
  110. error=state.last_error if state and state.last_error else None,
  111. test_result={
  112. 'query': instance.instruction,
  113. 'action': last_delegate_action,
  114. 'result': result,
  115. },
  116. )
  117. return output
  118. if __name__ == '__main__':
  119. args = parse_arguments()
  120. dataset = load_dataset('OpenHands/eval-browsing-instructions')
  121. dataset = dataset['train'].to_pandas()
  122. assert dataset.columns.tolist() == ['instance_id', 'instruction']
  123. llm_config = None
  124. if args.llm_config:
  125. llm_config = get_llm_config_arg(args.llm_config)
  126. if llm_config is None:
  127. raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')
  128. metadata = make_metadata(
  129. llm_config,
  130. 'browsing_delegation',
  131. args.agent_cls,
  132. args.max_iterations,
  133. args.eval_note,
  134. args.eval_output_dir,
  135. )
  136. if metadata.agent_class not in SUPPORTED_AGENT_CLS:
  137. raise ValueError(
  138. f'Agent class {metadata.agent_class} not supported with AgentDelegation.'
  139. )
  140. output_file = os.path.join(metadata.eval_output_dir, 'output.jsonl')
  141. instances = prepare_dataset(dataset, output_file, args.eval_n_limit)
  142. run_evaluation(
  143. instances,
  144. metadata,
  145. output_file,
  146. args.eval_num_workers,
  147. process_instance,
  148. )