run_infer.py 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227
  1. import asyncio
  2. import importlib.util
  3. import os
  4. import pandas as pd
  5. from evaluation.integration_tests.tests.base import BaseIntegrationTest, TestResult
  6. from evaluation.utils.shared import (
  7. EvalMetadata,
  8. EvalOutput,
  9. codeact_user_response,
  10. make_metadata,
  11. prepare_dataset,
  12. reset_logger_for_multiprocessing,
  13. run_evaluation,
  14. update_llm_config_for_completions_logging,
  15. )
  16. from openhands.controller.state.state import State
  17. from openhands.core.config import (
  18. AgentConfig,
  19. AppConfig,
  20. SandboxConfig,
  21. get_llm_config_arg,
  22. parse_arguments,
  23. )
  24. from openhands.core.logger import openhands_logger as logger
  25. from openhands.core.main import create_runtime, run_controller
  26. from openhands.events.action import MessageAction
  27. from openhands.events.serialization.event import event_to_dict
  28. from openhands.runtime.base import Runtime
  29. from openhands.utils.async_utils import call_async_from_sync
  30. FAKE_RESPONSES = {
  31. 'CodeActAgent': codeact_user_response,
  32. }
  33. def get_config(
  34. metadata: EvalMetadata,
  35. instance_id: str,
  36. ) -> AppConfig:
  37. config = AppConfig(
  38. default_agent=metadata.agent_class,
  39. run_as_openhands=False,
  40. runtime=os.environ.get('RUNTIME', 'eventstream'),
  41. max_iterations=metadata.max_iterations,
  42. sandbox=SandboxConfig(
  43. # use default base_container_image
  44. enable_auto_lint=True,
  45. use_host_network=False,
  46. timeout=100,
  47. api_key=os.environ.get('ALLHANDS_API_KEY', None),
  48. remote_runtime_api_url=os.environ.get('SANDBOX_REMOTE_RUNTIME_API_URL'),
  49. ),
  50. # do not mount workspace
  51. workspace_base=None,
  52. workspace_mount_path=None,
  53. )
  54. config.set_llm_config(
  55. update_llm_config_for_completions_logging(
  56. metadata.llm_config, metadata.eval_output_dir, instance_id
  57. )
  58. )
  59. agent_config = AgentConfig(
  60. codeact_enable_jupyter=True,
  61. codeact_enable_browsing=True,
  62. codeact_enable_llm_editor=False,
  63. )
  64. config.set_agent_config(agent_config)
  65. return config
  66. def process_instance(
  67. instance: pd.Series,
  68. metadata: EvalMetadata,
  69. reset_logger: bool = True,
  70. ) -> EvalOutput:
  71. config = get_config(metadata, instance.instance_id)
  72. # Setup the logger properly, so you can run multi-processing to parallelize the evaluation
  73. if reset_logger:
  74. log_dir = os.path.join(metadata.eval_output_dir, 'infer_logs')
  75. reset_logger_for_multiprocessing(logger, str(instance.instance_id), log_dir)
  76. else:
  77. logger.info(
  78. f'\nStarting evaluation for instance {str(instance.instance_id)}.\n'
  79. )
  80. # =============================================
  81. # import test instance
  82. # =============================================
  83. instance_id = instance.instance_id
  84. spec = importlib.util.spec_from_file_location(instance_id, instance.file_path)
  85. test_module = importlib.util.module_from_spec(spec)
  86. spec.loader.exec_module(test_module)
  87. assert hasattr(
  88. test_module, 'Test'
  89. ), f'Test module {instance_id} does not have a Test class'
  90. test_class: type[BaseIntegrationTest] = test_module.Test
  91. assert issubclass(
  92. test_class, BaseIntegrationTest
  93. ), f'Test class {instance_id} does not inherit from BaseIntegrationTest'
  94. instruction = test_class.INSTRUCTION
  95. # =============================================
  96. # create sandbox and run the agent
  97. # =============================================
  98. runtime: Runtime = create_runtime(config)
  99. call_async_from_sync(runtime.connect)
  100. test_class.initialize_runtime(runtime)
  101. # Here's how you can run the agent (similar to the `main` function) and get the final task state
  102. state: State | None = asyncio.run(
  103. run_controller(
  104. config=config,
  105. initial_user_action=MessageAction(content=instruction),
  106. runtime=runtime,
  107. fake_user_response_fn=FAKE_RESPONSES[metadata.agent_class],
  108. )
  109. )
  110. if state is None:
  111. raise ValueError('State should not be None.')
  112. # # =============================================
  113. # # result evaluation
  114. # # =============================================
  115. histories = [event_to_dict(event) for event in state.history]
  116. test_result: TestResult = test_class.verify_result(runtime, histories)
  117. metrics = state.metrics.get() if state.metrics else None
  118. # Save the output
  119. output = EvalOutput(
  120. instance_id=str(instance.instance_id),
  121. instance=instance.to_dict(),
  122. instruction=instruction,
  123. metadata=metadata,
  124. history=histories,
  125. metrics=metrics,
  126. error=state.last_error if state and state.last_error else None,
  127. test_result=test_result.model_dump(),
  128. )
  129. return output
  130. def load_integration_tests() -> pd.DataFrame:
  131. """Load tests from python files under ./tests"""
  132. cur_dir = os.path.dirname(os.path.abspath(__file__))
  133. test_dir = os.path.join(cur_dir, 'tests')
  134. test_files = [
  135. os.path.join(test_dir, f)
  136. for f in os.listdir(test_dir)
  137. if f.startswith('t') and f.endswith('.py')
  138. ]
  139. df = pd.DataFrame(test_files, columns=['file_path'])
  140. df['instance_id'] = df['file_path'].apply(
  141. lambda x: os.path.basename(x).rstrip('.py')
  142. )
  143. return df
  144. if __name__ == '__main__':
  145. args = parse_arguments()
  146. integration_tests = load_integration_tests()
  147. llm_config = None
  148. if args.llm_config:
  149. llm_config = get_llm_config_arg(args.llm_config)
  150. if llm_config is None:
  151. raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')
  152. metadata = make_metadata(
  153. llm_config,
  154. 'integration_tests',
  155. args.agent_cls,
  156. args.max_iterations,
  157. args.eval_note,
  158. args.eval_output_dir,
  159. )
  160. output_file = os.path.join(metadata.eval_output_dir, 'output.jsonl')
  161. # Parse dataset IDs if provided
  162. eval_ids = None
  163. if args.eval_ids:
  164. eval_ids = str(args.eval_ids).split(',')
  165. logger.info(f'\nUsing specific dataset IDs: {eval_ids}\n')
  166. instances = prepare_dataset(
  167. integration_tests,
  168. output_file,
  169. args.eval_n_limit,
  170. eval_ids=eval_ids,
  171. )
  172. run_evaluation(
  173. instances,
  174. metadata,
  175. output_file,
  176. args.eval_num_workers,
  177. process_instance,
  178. )
  179. df = pd.read_json(output_file, lines=True, orient='records')
  180. df['success'] = df['test_result'].apply(lambda x: x['success'])
  181. df['reason'] = df['test_result'].apply(lambda x: x['reason'])
  182. logger.info('-' * 100)
  183. logger.info(
  184. f'Success rate: {df["success"].mean():.2%} ({df["success"].sum()}/{len(df)})'
  185. )
  186. logger.info(
  187. '\nEvaluation Results:'
  188. + '\n'
  189. + df[['instance_id', 'success', 'reason']].to_string(index=False)
  190. )
  191. logger.info('-' * 100)
  192. report_file = os.path.join(metadata.eval_output_dir, 'report.md')
  193. with open(report_file, 'w') as f:
  194. f.write(
  195. f'Success rate: {df["success"].mean():.2%} ({df["success"].sum()}/{len(df)})\n'
  196. )
  197. f.write(df[['instance_id', 'success', 'reason']].to_markdown(index=False))