run_infer.py 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211
  1. import asyncio
  2. import importlib.util
  3. import os
  4. import pandas as pd
  5. from evaluation.integration_tests.tests.base import BaseIntegrationTest, TestResult
  6. from evaluation.utils.shared import (
  7. EvalMetadata,
  8. EvalOutput,
  9. codeact_user_response,
  10. make_metadata,
  11. prepare_dataset,
  12. reset_logger_for_multiprocessing,
  13. run_evaluation,
  14. )
  15. from openhands.controller.state.state import State
  16. from openhands.core.config import (
  17. AppConfig,
  18. SandboxConfig,
  19. get_llm_config_arg,
  20. parse_arguments,
  21. )
  22. from openhands.core.logger import openhands_logger as logger
  23. from openhands.core.main import create_runtime, run_controller
  24. from openhands.events.action import MessageAction
  25. from openhands.runtime.base import Runtime
  26. FAKE_RESPONSES = {
  27. 'CodeActAgent': codeact_user_response,
  28. }
  29. def get_config(
  30. metadata: EvalMetadata,
  31. instance_id: str,
  32. ) -> AppConfig:
  33. config = AppConfig(
  34. default_agent=metadata.agent_class,
  35. run_as_openhands=False,
  36. runtime='eventstream',
  37. max_iterations=metadata.max_iterations,
  38. sandbox=SandboxConfig(
  39. # use default base_container_image
  40. enable_auto_lint=True,
  41. use_host_network=False,
  42. timeout=100,
  43. ),
  44. # do not mount workspace
  45. workspace_base=None,
  46. workspace_mount_path=None,
  47. )
  48. if metadata.llm_config.log_completions:
  49. metadata.llm_config.log_completions_folder = os.path.join(
  50. metadata.eval_output_dir, 'llm_completions', instance_id
  51. )
  52. logger.info(
  53. f'Logging LLM completions for instance {instance_id} to '
  54. f'{metadata.llm_config.log_completions_folder}'
  55. )
  56. config.set_llm_config(metadata.llm_config)
  57. return config
  58. def process_instance(
  59. instance: pd.Series,
  60. metadata: EvalMetadata,
  61. reset_logger: bool = True,
  62. ) -> EvalOutput:
  63. config = get_config(metadata, instance.instance_id)
  64. # Setup the logger properly, so you can run multi-processing to parallelize the evaluation
  65. if reset_logger:
  66. log_dir = os.path.join(metadata.eval_output_dir, 'infer_logs')
  67. reset_logger_for_multiprocessing(logger, str(instance.instance_id), log_dir)
  68. else:
  69. logger.info(
  70. f'\nStarting evaluation for instance {str(instance.instance_id)}.\n'
  71. )
  72. # =============================================
  73. # import test instance
  74. # =============================================
  75. instance_id = instance.instance_id
  76. spec = importlib.util.spec_from_file_location(instance_id, instance.file_path)
  77. test_module = importlib.util.module_from_spec(spec)
  78. spec.loader.exec_module(test_module)
  79. assert hasattr(
  80. test_module, 'Test'
  81. ), f'Test module {instance_id} does not have a Test class'
  82. test_class: type[BaseIntegrationTest] = test_module.Test
  83. assert issubclass(
  84. test_class, BaseIntegrationTest
  85. ), f'Test class {instance_id} does not inherit from BaseIntegrationTest'
  86. instruction = test_class.INSTRUCTION
  87. # =============================================
  88. # create sandbox and run the agent
  89. # =============================================
  90. runtime: Runtime = create_runtime(config)
  91. test_class.initialize_runtime(runtime)
  92. # Here's how you can run the agent (similar to the `main` function) and get the final task state
  93. state: State | None = asyncio.run(
  94. run_controller(
  95. config=config,
  96. initial_user_action=MessageAction(content=instruction),
  97. runtime=runtime,
  98. fake_user_response_fn=FAKE_RESPONSES[metadata.agent_class],
  99. )
  100. )
  101. if state is None:
  102. raise ValueError('State should not be None.')
  103. # # =============================================
  104. # # result evaluation
  105. # # =============================================
  106. histories = state.history.get_events()
  107. test_result: TestResult = test_class.verify_result(runtime, histories)
  108. metrics = state.metrics.get() if state.metrics else None
  109. # Save the output
  110. output = EvalOutput(
  111. instance_id=str(instance.instance_id),
  112. instance=instance.to_dict(),
  113. instruction=instruction,
  114. metadata=metadata,
  115. history=histories,
  116. metrics=metrics,
  117. error=state.last_error if state and state.last_error else None,
  118. test_result=test_result.model_dump(),
  119. )
  120. return output
  121. def load_integration_tests() -> pd.DataFrame:
  122. """Load tests from python files under ./tests"""
  123. cur_dir = os.path.dirname(os.path.abspath(__file__))
  124. test_dir = os.path.join(cur_dir, 'tests')
  125. test_files = [
  126. os.path.join(test_dir, f)
  127. for f in os.listdir(test_dir)
  128. if f.startswith('t') and f.endswith('.py')
  129. ]
  130. df = pd.DataFrame(test_files, columns=['file_path'])
  131. df['instance_id'] = df['file_path'].apply(
  132. lambda x: os.path.basename(x).rstrip('.py')
  133. )
  134. return df
  135. if __name__ == '__main__':
  136. args = parse_arguments()
  137. integration_tests = load_integration_tests()
  138. llm_config = None
  139. if args.llm_config:
  140. llm_config = get_llm_config_arg(args.llm_config)
  141. if llm_config is None:
  142. raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')
  143. metadata = make_metadata(
  144. llm_config,
  145. 'integration_tests',
  146. args.agent_cls,
  147. args.max_iterations,
  148. args.eval_note,
  149. args.eval_output_dir,
  150. )
  151. output_file = os.path.join(metadata.eval_output_dir, 'output.jsonl')
  152. # Parse dataset IDs if provided
  153. eval_ids = None
  154. if args.eval_ids:
  155. eval_ids = str(args.eval_ids).split(',')
  156. logger.info(f'\nUsing specific dataset IDs: {eval_ids}\n')
  157. instances = prepare_dataset(
  158. integration_tests,
  159. output_file,
  160. args.eval_n_limit,
  161. eval_ids=eval_ids,
  162. )
  163. run_evaluation(
  164. instances,
  165. metadata,
  166. output_file,
  167. args.eval_num_workers,
  168. process_instance,
  169. )
  170. df = pd.read_json(output_file, lines=True, orient='records')
  171. df['success'] = df['test_result'].apply(lambda x: x['success'])
  172. df['reason'] = df['test_result'].apply(lambda x: x['reason'])
  173. logger.info('-' * 100)
  174. logger.info(
  175. f'Success rate: {df["success"].mean():.2%} ({df["success"].sum()}/{len(df)})'
  176. )
  177. logger.info(
  178. '\nEvaluation Results:'
  179. + '\n'
  180. + df[['instance_id', 'success', 'reason']].to_string(index=False)
  181. )
  182. logger.info('-' * 100)