shared.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455
  1. import json
  2. import logging
  3. import multiprocessing as mp
  4. import os
  5. import pathlib
  6. import subprocess
  7. import time
  8. import traceback
  9. from typing import Any, Awaitable, Callable, TextIO
  10. import pandas as pd
  11. from pydantic import BaseModel
  12. from tqdm import tqdm
  13. from openhands.controller.state.state import State
  14. from openhands.core.config import LLMConfig
  15. from openhands.core.logger import get_console_handler
  16. from openhands.core.logger import openhands_logger as logger
  17. from openhands.events.action import Action
  18. from openhands.events.action.message import MessageAction
  19. from openhands.events.event import Event
  20. from openhands.events.serialization.event import event_to_dict
  21. from openhands.events.utils import get_pairs_from_events
  22. class EvalMetadata(BaseModel):
  23. agent_class: str
  24. llm_config: LLMConfig
  25. max_iterations: int
  26. eval_output_dir: str
  27. start_time: str
  28. git_commit: str
  29. dataset: str | None = None
  30. data_split: str | None = None
  31. details: dict[str, Any] | None = None
  32. def model_dump(self, *args, **kwargs):
  33. dumped_dict = super().model_dump(*args, **kwargs)
  34. # avoid leaking sensitive information
  35. dumped_dict['llm_config'] = self.llm_config.to_safe_dict()
  36. return dumped_dict
  37. def model_dump_json(self, *args, **kwargs):
  38. dumped = super().model_dump_json(*args, **kwargs)
  39. dumped_dict = json.loads(dumped)
  40. # avoid leaking sensitive information
  41. dumped_dict['llm_config'] = self.llm_config.to_safe_dict()
  42. logger.debug(f'Dumped metadata: {dumped_dict}')
  43. return json.dumps(dumped_dict)
  44. class EvalOutput(BaseModel):
  45. # NOTE: User-specified
  46. instance_id: str
  47. # output of the evaluation
  48. # store anything that is needed for the score calculation
  49. test_result: dict[str, Any]
  50. instruction: str | None = None
  51. # Interaction info
  52. metadata: EvalMetadata | None = None
  53. # list[tuple[dict[str, Any], dict[str, Any]]] - for compatibility with the old format
  54. history: (
  55. list[dict[str, Any]] | list[tuple[dict[str, Any], dict[str, Any]]] | None
  56. ) = None
  57. metrics: dict[str, Any] | None = None
  58. error: str | None = None
  59. # Optionally save the input test instance
  60. instance: dict[str, Any] | None = None
  61. def model_dump(self, *args, **kwargs):
  62. dumped_dict = super().model_dump(*args, **kwargs)
  63. # Remove None values
  64. dumped_dict = {k: v for k, v in dumped_dict.items() if v is not None}
  65. # Apply custom serialization for metadata (to avoid leaking sensitive information)
  66. if self.metadata is not None:
  67. dumped_dict['metadata'] = self.metadata.model_dump()
  68. return dumped_dict
  69. def model_dump_json(self, *args, **kwargs):
  70. dumped = super().model_dump_json(*args, **kwargs)
  71. dumped_dict = json.loads(dumped)
  72. # Apply custom serialization for metadata (to avoid leaking sensitive information)
  73. if 'metadata' in dumped_dict:
  74. dumped_dict['metadata'] = json.loads(self.metadata.model_dump_json())
  75. return json.dumps(dumped_dict)
  76. class EvalException(Exception):
  77. pass
  78. def codeact_user_response(
  79. state: State,
  80. encapsulate_solution: bool = False,
  81. try_parse: Callable[[Action], str] | None = None,
  82. ) -> str:
  83. encaps_str = (
  84. (
  85. 'Please encapsulate your final answer (answer ONLY) within <solution> and </solution>.\n'
  86. 'For example: The answer to the question is <solution> 42 </solution>.\n'
  87. )
  88. if encapsulate_solution
  89. else ''
  90. )
  91. msg = (
  92. 'Please continue working on the task on whatever approach you think is suitable.\n'
  93. 'If you think you have solved the task, please first send your answer to user through message and then finish the interaction.\n'
  94. f'{encaps_str}'
  95. 'IMPORTANT: YOU SHOULD NEVER ASK FOR HUMAN HELP.\n'
  96. )
  97. if state.history:
  98. # check if the last action has an answer, if so, early exit
  99. if try_parse is not None:
  100. last_action = next(
  101. (
  102. event
  103. for event in reversed(state.history)
  104. if isinstance(event, Action)
  105. ),
  106. None,
  107. )
  108. ans = try_parse(last_action)
  109. if ans is not None:
  110. return '/exit'
  111. # check if the agent has tried to talk to the user 3 times, if so, let the agent know it can give up
  112. user_msgs = [
  113. event
  114. for event in state.history
  115. if isinstance(event, MessageAction) and event.source == 'user'
  116. ]
  117. if len(user_msgs) >= 2:
  118. # let the agent know that it can give up when it has tried 3 times
  119. return (
  120. msg
  121. + 'If you want to give up, run: <execute_bash> exit </execute_bash>.\n'
  122. )
  123. return msg
  124. def cleanup():
  125. print('Cleaning up child processes...')
  126. for process in mp.active_children():
  127. print(f'Terminating child process: {process.name}')
  128. process.terminate()
  129. process.join()
  130. def make_metadata(
  131. llm_config: LLMConfig,
  132. dataset_name: str,
  133. agent_class: str,
  134. max_iterations: int,
  135. eval_note: str | None,
  136. eval_output_dir: str,
  137. data_split: str | None = None,
  138. details: dict[str, Any] | None = None,
  139. ) -> EvalMetadata:
  140. model_name = llm_config.model.split('/')[-1]
  141. model_path = model_name.replace(':', '_').replace('@', '-')
  142. eval_note = f'_N_{eval_note}' if eval_note else ''
  143. eval_output_path = os.path.join(
  144. eval_output_dir,
  145. dataset_name,
  146. agent_class,
  147. f'{model_path}_maxiter_{max_iterations}{eval_note}',
  148. )
  149. pathlib.Path(eval_output_path).mkdir(parents=True, exist_ok=True)
  150. pathlib.Path(os.path.join(eval_output_path, 'logs')).mkdir(
  151. parents=True, exist_ok=True
  152. )
  153. logger.info(f'Using evaluation output directory: {eval_output_path}')
  154. metadata = EvalMetadata(
  155. agent_class=agent_class,
  156. llm_config=llm_config,
  157. max_iterations=max_iterations,
  158. eval_output_dir=eval_output_path,
  159. start_time=time.strftime('%Y-%m-%d %H:%M:%S'),
  160. git_commit=subprocess.check_output(['git', 'rev-parse', 'HEAD'])
  161. .decode('utf-8')
  162. .strip(),
  163. dataset=dataset_name,
  164. data_split=data_split,
  165. details=details,
  166. )
  167. metadata_json = metadata.model_dump_json()
  168. logger.info(f'Metadata: {metadata_json}')
  169. with open(os.path.join(eval_output_path, 'metadata.json'), 'w') as f:
  170. f.write(metadata_json)
  171. return metadata
  172. def prepare_dataset(
  173. dataset: pd.DataFrame,
  174. output_file: str,
  175. eval_n_limit: int,
  176. eval_ids: list[str] | None = None,
  177. skip_num: int | None = None,
  178. ):
  179. assert (
  180. 'instance_id' in dataset.columns
  181. ), "Expected 'instance_id' column in the dataset. You should define your own unique identifier for each instance and use it as the 'instance_id' column."
  182. id_column = 'instance_id'
  183. logger.info(f'Writing evaluation output to {output_file}')
  184. finished_ids: set[str] = set()
  185. if os.path.exists(output_file):
  186. with open(output_file, 'r') as f:
  187. for line in f:
  188. data = json.loads(line)
  189. finished_ids.add(str(data[id_column]))
  190. logger.warning(
  191. f'\nOutput file {output_file} already exists. Loaded {len(finished_ids)} finished instances.'
  192. )
  193. if eval_ids:
  194. eval_ids_converted = [dataset[id_column].dtype.type(id) for id in eval_ids]
  195. dataset = dataset[dataset[id_column].isin(eval_ids_converted)]
  196. logger.info(f'Limiting evaluation to {len(eval_ids)} specific instances.')
  197. elif skip_num and skip_num >= 0:
  198. skip_num = min(skip_num, len(dataset))
  199. dataset = dataset.iloc[skip_num:]
  200. logger.info(
  201. f'Starting evaluation with skipping first {skip_num} instances ({len(dataset)} instances to run).'
  202. )
  203. if eval_n_limit and eval_n_limit > 0:
  204. dataset = dataset.head(eval_n_limit)
  205. logger.info(f'Limiting evaluation to {eval_n_limit} instances.')
  206. elif eval_n_limit and eval_n_limit > 0:
  207. dataset = dataset.head(eval_n_limit)
  208. logger.info(f'Limiting evaluation to first {eval_n_limit} instances.')
  209. new_dataset = [
  210. instance
  211. for _, instance in dataset.iterrows()
  212. if str(instance[id_column]) not in finished_ids
  213. ]
  214. logger.info(
  215. f'Finished instances: {len(finished_ids)}, Remaining instances: {len(new_dataset)}'
  216. )
  217. return pd.DataFrame(new_dataset)
  218. def update_progress(
  219. result: EvalOutput,
  220. pbar: tqdm,
  221. output_fp: TextIO,
  222. ):
  223. """Update the progress bar and write the result to the output file."""
  224. pbar.update(1)
  225. pbar.set_description(f'Instance {result.instance_id}')
  226. pbar.set_postfix_str(f'Test Result: {str(result.test_result)[:300]}...')
  227. logger.info(
  228. f'Finished evaluation for instance {result.instance_id}: {str(result.test_result)[:300]}...\n'
  229. )
  230. output_fp.write(json.dumps(result.model_dump()) + '\n')
  231. output_fp.flush()
  232. def assert_and_raise(condition: bool, msg: str):
  233. """Raise an EvalException if the condition is not met.
  234. This will be used in conjunction with _process_instance_wrapper to handle retries. An EvalException should trigger a retry.
  235. """
  236. if not condition:
  237. raise EvalException(msg)
  238. def _process_instance_wrapper(
  239. process_instance_func: Callable[[pd.Series, EvalMetadata, bool], EvalOutput],
  240. instance: pd.Series,
  241. metadata: EvalMetadata,
  242. use_mp: bool,
  243. max_retries: int = 5,
  244. ) -> EvalOutput:
  245. """Wrap the process_instance_func to handle retries and errors.
  246. Retry an instance up to max_retries times if it fails (e.g., due to transient network/runtime issues).
  247. """
  248. for attempt in range(max_retries + 1):
  249. try:
  250. result = process_instance_func(instance, metadata, use_mp)
  251. return result
  252. except Exception as e:
  253. error = str(e)
  254. stacktrace = traceback.format_exc()
  255. if attempt == max_retries:
  256. logger.exception(e)
  257. msg = (
  258. '-' * 10
  259. + '\n'
  260. + f'Error in instance [{instance.instance_id}]: {error}. Stacktrace:\n{stacktrace}'
  261. + '\n'
  262. + f'[Encountered after {max_retries} retries. Please check the logs and report the issue.]'
  263. + '-' * 10
  264. )
  265. # Raise an error after all retries & stop the evaluation
  266. logger.exception(e)
  267. raise RuntimeError(
  268. f'Maximum error retries reached for instance {instance.instance_id}'
  269. ) from e
  270. msg = (
  271. '-' * 10
  272. + '\n'
  273. + f'Error in instance [{instance.instance_id}]: {error}. Stacktrace:\n{stacktrace}'
  274. + '\n'
  275. + '-' * 10
  276. + f'[The above error occurred. Retrying... (attempt {attempt + 1} of {max_retries})]'
  277. + '-' * 10
  278. + '\n'
  279. )
  280. logger.error(msg)
  281. if use_mp:
  282. print(msg) # use print to directly print to console
  283. time.sleep(5)
  284. def _process_instance_wrapper_mp(args):
  285. """Wrapper for multiprocessing, especially for imap_unordered."""
  286. return _process_instance_wrapper(*args)
  287. def run_evaluation(
  288. dataset: pd.DataFrame,
  289. metadata: EvalMetadata | None,
  290. output_file: str,
  291. num_workers: int,
  292. process_instance_func: Callable[
  293. [pd.Series, EvalMetadata, bool], Awaitable[EvalOutput]
  294. ],
  295. max_retries: int = 5, # number of retries for each instance
  296. ):
  297. use_multiprocessing = num_workers > 1
  298. if metadata is not None:
  299. logger.info(
  300. f'Evaluation started with Agent {metadata.agent_class}:\n'
  301. f'model {metadata.llm_config.model}, max iterations {metadata.max_iterations}.\n'
  302. )
  303. else:
  304. logger.info(f'Evaluation started with {num_workers} workers.')
  305. total_instances = len(dataset)
  306. pbar = tqdm(total=total_instances, desc='Instances processed')
  307. output_fp = open(output_file, 'a')
  308. try:
  309. if use_multiprocessing:
  310. with mp.Pool(num_workers) as pool:
  311. args_iter = (
  312. (process_instance_func, instance, metadata, True, max_retries)
  313. for _, instance in dataset.iterrows()
  314. )
  315. results = pool.imap_unordered(_process_instance_wrapper_mp, args_iter)
  316. for result in results:
  317. update_progress(result, pbar, output_fp)
  318. else:
  319. for _, instance in dataset.iterrows():
  320. result = _process_instance_wrapper(
  321. process_instance_func=process_instance_func,
  322. instance=instance,
  323. metadata=metadata,
  324. use_mp=False,
  325. max_retries=max_retries,
  326. )
  327. update_progress(result, pbar, output_fp)
  328. except KeyboardInterrupt:
  329. print('\nKeyboardInterrupt received. Cleaning up...\n')
  330. cleanup()
  331. output_fp.close()
  332. logger.info('\nEvaluation finished.\n')
  333. def reset_logger_for_multiprocessing(
  334. logger: logging.Logger, instance_id: str, log_dir: str
  335. ):
  336. """Reset the logger for multiprocessing.
  337. Save logs to a separate file for each process, instead of trying to write to the
  338. same file/console from multiple processes.
  339. """
  340. # Set up logger
  341. log_file = os.path.join(
  342. log_dir,
  343. f'instance_{instance_id}.log',
  344. )
  345. # Remove all existing handlers from logger
  346. for handler in logger.handlers[:]:
  347. logger.removeHandler(handler)
  348. # add console handler to print ONE line
  349. console_handler = get_console_handler(log_level=logging.INFO)
  350. console_handler.setFormatter(
  351. logging.Formatter(
  352. f'Instance {instance_id} - ' + '%(asctime)s - %(levelname)s - %(message)s'
  353. )
  354. )
  355. logger.addHandler(console_handler)
  356. logger.info(
  357. f'Starting evaluation for instance {instance_id}.\n'
  358. f'Hint: run "tail -f {log_file}" to see live logs in a separate shell'
  359. )
  360. # Only log WARNING or higher to console
  361. console_handler.setLevel(logging.WARNING)
  362. # Log INFO and above to file
  363. os.makedirs(os.path.dirname(log_file), exist_ok=True)
  364. file_handler = logging.FileHandler(log_file)
  365. file_handler.setFormatter(
  366. logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
  367. )
  368. file_handler.setLevel(logging.INFO)
  369. logger.addHandler(file_handler)
  370. def update_llm_config_for_completions_logging(
  371. llm_config: LLMConfig,
  372. eval_output_dir: str,
  373. instance_id: str,
  374. ) -> LLMConfig:
  375. """Update the LLM config for logging completions."""
  376. if llm_config.log_completions:
  377. llm_config.log_completions_folder = os.path.join(
  378. eval_output_dir, 'llm_completions', instance_id
  379. )
  380. logger.info(
  381. f'Logging LLM completions for instance {instance_id} to '
  382. f'{llm_config.log_completions_folder}'
  383. )
  384. return llm_config
  385. # history is now available as a filtered stream of events, rather than list of pairs of (Action, Observation)
  386. # we rebuild the pairs here
  387. # for compatibility with the existing output format in evaluations
  388. # remove this when it's no longer necessary
  389. def compatibility_for_eval_history_pairs(
  390. history: list[Event],
  391. ) -> list[tuple[dict, dict]]:
  392. history_pairs = []
  393. for action, observation in get_pairs_from_events(history):
  394. history_pairs.append((event_to_dict(action), event_to_dict(observation)))
  395. return history_pairs