shared.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310
  1. import asyncio
  2. import json
  3. import logging
  4. import multiprocessing as mp
  5. import os
  6. import pathlib
  7. import subprocess
  8. import time
  9. from concurrent.futures import ProcessPoolExecutor
  10. from typing import Any, Awaitable, Callable
  11. import pandas as pd
  12. from pydantic import BaseModel
  13. from tqdm import tqdm
  14. from openhands.controller.state.state import State
  15. from openhands.core.config import LLMConfig
  16. from openhands.core.logger import get_console_handler
  17. from openhands.core.logger import openhands_logger as logger
  18. from openhands.events.action import Action
  19. from openhands.events.action.message import MessageAction
  20. class EvalMetadata(BaseModel):
  21. agent_class: str
  22. llm_config: LLMConfig
  23. max_iterations: int
  24. eval_output_dir: str
  25. start_time: str
  26. git_commit: str
  27. dataset: str | None = None
  28. data_split: str | None = None
  29. details: dict[str, Any] | None = None
  30. def model_dump(self, *args, **kwargs):
  31. dumped_dict = super().model_dump(*args, **kwargs)
  32. # avoid leaking sensitive information
  33. dumped_dict['llm_config'] = self.llm_config.to_safe_dict()
  34. return dumped_dict
  35. def model_dump_json(self, *args, **kwargs):
  36. dumped = super().model_dump_json(*args, **kwargs)
  37. dumped_dict = json.loads(dumped)
  38. logger.debug(f'Dumped metadata: {dumped_dict}')
  39. # avoid leaking sensitive information
  40. dumped_dict['llm_config'] = self.llm_config.to_safe_dict()
  41. return json.dumps(dumped_dict)
  42. class EvalOutput(BaseModel):
  43. # NOTE: User-specified
  44. instance_id: str
  45. instruction: str
  46. # output of the evaluation
  47. # store anything that is needed for the score calculation
  48. test_result: dict[str, Any]
  49. # Interaction info
  50. metadata: EvalMetadata
  51. history: list[tuple[dict[str, Any], dict[str, Any]]]
  52. metrics: dict[str, Any]
  53. error: str | None = None
  54. # Optionally save the input test instance
  55. instance: dict[str, Any] | None = None
  56. def model_dump(self, *args, **kwargs):
  57. dumped_dict = super().model_dump(*args, **kwargs)
  58. # Apply custom serialization for metadata (to avoid leaking sensitive information)
  59. dumped_dict['metadata'] = self.metadata.model_dump()
  60. return dumped_dict
  61. def model_dump_json(self, *args, **kwargs):
  62. dumped = super().model_dump_json(*args, **kwargs)
  63. dumped_dict = json.loads(dumped)
  64. # Apply custom serialization for metadata (to avoid leaking sensitive information)
  65. dumped_dict['metadata'] = json.loads(self.metadata.model_dump_json())
  66. return json.dumps(dumped_dict)
  67. def codeact_user_response(
  68. state: State,
  69. encapsulate_solution: bool = False,
  70. try_parse: Callable[[Action], str] | None = None,
  71. ) -> str:
  72. encaps_str = (
  73. (
  74. 'Please encapsulate your final answer (answer ONLY) within <solution> and </solution>.\n'
  75. 'For example: The answer to the question is <solution> 42 </solution>.\n'
  76. )
  77. if encapsulate_solution
  78. else ''
  79. )
  80. msg = (
  81. 'Please continue working on the task on whatever approach you think is suitable.\n'
  82. 'If you think you have solved the task, please first send your answer to user through message and then <execute_bash> exit </execute_bash>.\n'
  83. f'{encaps_str}'
  84. 'IMPORTANT: YOU SHOULD NEVER ASK FOR HUMAN HELP.\n'
  85. )
  86. if state.history:
  87. # check if the last action has an answer, if so, early exit
  88. if try_parse is not None:
  89. last_action = state.history.get_last_action()
  90. ans = try_parse(last_action)
  91. if ans is not None:
  92. return '/exit'
  93. # check if the agent has tried to talk to the user 3 times, if so, let the agent know it can give up
  94. user_msgs = [
  95. event
  96. for event in state.history.get_events()
  97. if isinstance(event, MessageAction) and event.source == 'user'
  98. ]
  99. if len(user_msgs) >= 2:
  100. # let the agent know that it can give up when it has tried 3 times
  101. return (
  102. msg
  103. + 'If you want to give up, run: <execute_bash> exit </execute_bash>.\n'
  104. )
  105. return msg
  106. def cleanup():
  107. print('Cleaning up child processes...')
  108. for process in mp.active_children():
  109. print(f'Terminating child process: {process.name}')
  110. process.terminate()
  111. process.join()
  112. def make_metadata(
  113. llm_config: LLMConfig,
  114. dataset_name: str,
  115. agent_class: str,
  116. max_iterations: int,
  117. eval_note: str | None,
  118. eval_output_dir: str,
  119. data_split: str | None = None,
  120. details: dict[str, Any] | None = None,
  121. ) -> EvalMetadata:
  122. model_name = llm_config.model.split('/')[-1]
  123. eval_note = f'_N_{eval_note}' if eval_note else ''
  124. eval_output_path = os.path.join(
  125. eval_output_dir,
  126. dataset_name,
  127. agent_class,
  128. f'{model_name}_maxiter_{max_iterations}{eval_note}',
  129. )
  130. pathlib.Path(eval_output_path).mkdir(parents=True, exist_ok=True)
  131. pathlib.Path(os.path.join(eval_output_path, 'logs')).mkdir(
  132. parents=True, exist_ok=True
  133. )
  134. logger.info(f'Using evaluation output directory: {eval_output_path}')
  135. metadata = EvalMetadata(
  136. agent_class=agent_class,
  137. llm_config=llm_config,
  138. max_iterations=max_iterations,
  139. eval_output_dir=eval_output_path,
  140. start_time=time.strftime('%Y-%m-%d %H:%M:%S'),
  141. git_commit=subprocess.check_output(['git', 'rev-parse', 'HEAD'])
  142. .decode('utf-8')
  143. .strip(),
  144. dataset=dataset_name,
  145. data_split=data_split,
  146. details=details,
  147. )
  148. metadata_json = metadata.model_dump_json()
  149. logger.info(f'Metadata: {metadata_json}')
  150. with open(os.path.join(eval_output_path, 'metadata.json'), 'w') as f:
  151. f.write(metadata_json)
  152. return metadata
  153. def prepare_dataset(
  154. dataset: pd.DataFrame,
  155. output_file: str,
  156. eval_n_limit: int,
  157. eval_ids: list[str] | None = None,
  158. ):
  159. assert (
  160. 'instance_id' in dataset.columns
  161. ), "Expected 'instance_id' column in the dataset. You should define your own unique identifier for each instance and use it as the 'instance_id' column."
  162. id_column = 'instance_id'
  163. logger.info(f'Writing evaluation output to {output_file}')
  164. finished_ids = set()
  165. if os.path.exists(output_file):
  166. with open(output_file, 'r') as f:
  167. for line in f:
  168. data = json.loads(line)
  169. finished_ids.add(data[id_column])
  170. logger.warning(
  171. f'Output file {output_file} already exists. Loaded {len(finished_ids)} finished instances.'
  172. )
  173. if eval_ids:
  174. eval_ids_converted = [dataset[id_column].dtype.type(id) for id in eval_ids]
  175. dataset = dataset[dataset[id_column].isin(eval_ids_converted)]
  176. logger.info(f'Limiting evaluation to {len(eval_ids)} specific instances.')
  177. elif eval_n_limit:
  178. dataset = dataset.head(eval_n_limit)
  179. logger.info(f'Limiting evaluation to first {eval_n_limit} instances.')
  180. new_dataset = [
  181. instance
  182. for _, instance in dataset.iterrows()
  183. if instance[id_column] not in finished_ids
  184. ]
  185. logger.info(
  186. f'Finished instances: {len(finished_ids)}, Remaining instances: {len(new_dataset)}'
  187. )
  188. return pd.DataFrame(new_dataset)
  189. async def run_evaluation(
  190. dataset: pd.DataFrame,
  191. metadata: EvalMetadata,
  192. output_file: str,
  193. num_workers: int,
  194. process_instance_func: Callable[
  195. [pd.Series, EvalMetadata, bool], Awaitable[EvalOutput]
  196. ],
  197. ):
  198. use_multiprocessing = num_workers > 1
  199. logger.info(
  200. f'Evaluation started with Agent {metadata.agent_class}, '
  201. f'model {metadata.llm_config.model}, max iterations {metadata.max_iterations}.'
  202. )
  203. pbar = tqdm(total=len(dataset))
  204. output_fp = open(output_file, 'a')
  205. async def update_progress(future):
  206. pbar.update(1)
  207. output: EvalOutput = await future if use_multiprocessing else future
  208. pbar.set_description(f'Instance {output.instance_id}')
  209. pbar.set_postfix_str(f'Test Result: {output.test_result}')
  210. logger.info(
  211. f'Finished evaluation for instance {output.instance_id}: {output.test_result}'
  212. )
  213. output_fp.write(json.dumps(output.model_dump()) + '\n')
  214. output_fp.flush()
  215. try:
  216. if use_multiprocessing:
  217. with ProcessPoolExecutor(num_workers) as executor:
  218. loop = asyncio.get_event_loop()
  219. futures = []
  220. for _, instance in dataset.iterrows():
  221. future = loop.run_in_executor(
  222. executor,
  223. process_instance_func,
  224. instance,
  225. metadata,
  226. bool(num_workers > 1),
  227. )
  228. futures.append(update_progress(future))
  229. await asyncio.gather(*futures)
  230. # Use plain for loop for single process for easier debugging
  231. else:
  232. assert num_workers == 1
  233. for _, instance in dataset.iterrows():
  234. output = await process_instance_func(instance, metadata, False)
  235. await update_progress(output)
  236. except KeyboardInterrupt:
  237. print('KeyboardInterrupt received. Cleaning up...')
  238. cleanup()
  239. output_fp.close()
  240. logger.info('Evaluation finished.')
  241. def reset_logger_for_multiprocessing(
  242. logger: logging.Logger, instance_id: str, log_dir: str
  243. ):
  244. """Reset the logger for multiprocessing.
  245. Save logs to a separate file for each process, instead of trying to write to the
  246. same file/console from multiple processes.
  247. """
  248. # Set up logger
  249. log_file = os.path.join(
  250. log_dir,
  251. f'instance_{instance_id}.log',
  252. )
  253. # Remove all existing handlers from logger
  254. for handler in logger.handlers[:]:
  255. logger.removeHandler(handler)
  256. # add back the console handler to print ONE line
  257. logger.addHandler(get_console_handler())
  258. logger.info(
  259. f'Starting evaluation for instance {instance_id}.\n'
  260. f'Hint: run "tail -f {log_file}" to see live logs in a separate shell'
  261. )
  262. # Remove all existing handlers from logger
  263. for handler in logger.handlers[:]:
  264. logger.removeHandler(handler)
  265. os.makedirs(os.path.dirname(log_file), exist_ok=True)
  266. file_handler = logging.FileHandler(log_file)
  267. file_handler.setFormatter(
  268. logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
  269. )
  270. logger.addHandler(file_handler)