shared.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505
  1. import json
  2. import logging
  3. import multiprocessing as mp
  4. import os
  5. import pathlib
  6. import signal
  7. import subprocess
  8. import time
  9. import traceback
  10. from contextlib import contextmanager
  11. from typing import Any, Awaitable, Callable, TextIO
  12. import pandas as pd
  13. from pydantic import BaseModel
  14. from tqdm import tqdm
  15. from openhands.controller.state.state import State
  16. from openhands.core.config import LLMConfig
  17. from openhands.core.logger import get_console_handler
  18. from openhands.core.logger import openhands_logger as logger
  19. from openhands.events.action import Action
  20. from openhands.events.action.message import MessageAction
  21. from openhands.events.event import Event
  22. from openhands.events.serialization.event import event_to_dict
  23. from openhands.events.utils import get_pairs_from_events
  24. class EvalMetadata(BaseModel):
  25. agent_class: str
  26. llm_config: LLMConfig
  27. max_iterations: int
  28. eval_output_dir: str
  29. start_time: str
  30. git_commit: str
  31. dataset: str | None = None
  32. data_split: str | None = None
  33. details: dict[str, Any] | None = None
  34. def model_dump(self, *args, **kwargs):
  35. dumped_dict = super().model_dump(*args, **kwargs)
  36. # avoid leaking sensitive information
  37. dumped_dict['llm_config'] = self.llm_config.to_safe_dict()
  38. return dumped_dict
  39. def model_dump_json(self, *args, **kwargs):
  40. dumped = super().model_dump_json(*args, **kwargs)
  41. dumped_dict = json.loads(dumped)
  42. # avoid leaking sensitive information
  43. dumped_dict['llm_config'] = self.llm_config.to_safe_dict()
  44. logger.debug(f'Dumped metadata: {dumped_dict}')
  45. return json.dumps(dumped_dict)
  46. class EvalOutput(BaseModel):
  47. # NOTE: User-specified
  48. instance_id: str
  49. # output of the evaluation
  50. # store anything that is needed for the score calculation
  51. test_result: dict[str, Any]
  52. instruction: str | None = None
  53. # Interaction info
  54. metadata: EvalMetadata | None = None
  55. # list[tuple[dict[str, Any], dict[str, Any]]] - for compatibility with the old format
  56. history: (
  57. list[dict[str, Any]] | list[tuple[dict[str, Any], dict[str, Any]]] | None
  58. ) = None
  59. metrics: dict[str, Any] | None = None
  60. error: str | None = None
  61. # Optionally save the input test instance
  62. instance: dict[str, Any] | None = None
  63. def model_dump(self, *args, **kwargs):
  64. dumped_dict = super().model_dump(*args, **kwargs)
  65. # Remove None values
  66. dumped_dict = {k: v for k, v in dumped_dict.items() if v is not None}
  67. # Apply custom serialization for metadata (to avoid leaking sensitive information)
  68. if self.metadata is not None:
  69. dumped_dict['metadata'] = self.metadata.model_dump()
  70. return dumped_dict
  71. def model_dump_json(self, *args, **kwargs):
  72. dumped = super().model_dump_json(*args, **kwargs)
  73. dumped_dict = json.loads(dumped)
  74. # Apply custom serialization for metadata (to avoid leaking sensitive information)
  75. if 'metadata' in dumped_dict:
  76. dumped_dict['metadata'] = json.loads(self.metadata.model_dump_json())
  77. return json.dumps(dumped_dict)
  78. class EvalException(Exception):
  79. pass
  80. class EvalTimeoutException(Exception):
  81. pass
  82. @contextmanager
  83. def timeout(seconds: int):
  84. def timeout_handler(signum, frame):
  85. raise EvalTimeoutException(f'Function timed out after {seconds} seconds')
  86. # Set up the signal handler
  87. original_handler = signal.signal(signal.SIGALRM, timeout_handler)
  88. signal.alarm(seconds)
  89. try:
  90. yield
  91. finally:
  92. # Restore the original handler and disable the alarm
  93. signal.alarm(0)
  94. signal.signal(signal.SIGALRM, original_handler)
  95. def codeact_user_response(
  96. state: State,
  97. encapsulate_solution: bool = False,
  98. try_parse: Callable[[Action], str] | None = None,
  99. ) -> str:
  100. encaps_str = (
  101. (
  102. 'Please encapsulate your final answer (answer ONLY) within <solution> and </solution>.\n'
  103. 'For example: The answer to the question is <solution> 42 </solution>.\n'
  104. )
  105. if encapsulate_solution
  106. else ''
  107. )
  108. msg = (
  109. 'Please continue working on the task on whatever approach you think is suitable.\n'
  110. 'If you think you have solved the task, please first send your answer to user through message and then finish the interaction.\n'
  111. f'{encaps_str}'
  112. 'IMPORTANT: YOU SHOULD NEVER ASK FOR HUMAN HELP.\n'
  113. )
  114. if state.history:
  115. # check if the last action has an answer, if so, early exit
  116. if try_parse is not None:
  117. last_action = next(
  118. (
  119. event
  120. for event in reversed(state.history)
  121. if isinstance(event, Action)
  122. ),
  123. None,
  124. )
  125. ans = try_parse(last_action)
  126. if ans is not None:
  127. return '/exit'
  128. # check if the agent has tried to talk to the user 3 times, if so, let the agent know it can give up
  129. user_msgs = [
  130. event
  131. for event in state.history
  132. if isinstance(event, MessageAction) and event.source == 'user'
  133. ]
  134. if len(user_msgs) >= 2:
  135. # let the agent know that it can give up when it has tried 3 times
  136. return (
  137. msg
  138. + 'If you want to give up, use the "finish" tool to finish the interaction.\n'
  139. )
  140. return msg
  141. def cleanup():
  142. print('Cleaning up child processes...')
  143. for process in mp.active_children():
  144. print(f'Terminating child process: {process.name}')
  145. process.terminate()
  146. process.join()
  147. def make_metadata(
  148. llm_config: LLMConfig,
  149. dataset_name: str,
  150. agent_class: str,
  151. max_iterations: int,
  152. eval_note: str | None,
  153. eval_output_dir: str,
  154. data_split: str | None = None,
  155. details: dict[str, Any] | None = None,
  156. ) -> EvalMetadata:
  157. model_name = llm_config.model.split('/')[-1]
  158. model_path = model_name.replace(':', '_').replace('@', '-')
  159. eval_note = f'_N_{eval_note}' if eval_note else ''
  160. eval_output_path = os.path.join(
  161. eval_output_dir,
  162. dataset_name,
  163. agent_class,
  164. f'{model_path}_maxiter_{max_iterations}{eval_note}',
  165. )
  166. pathlib.Path(eval_output_path).mkdir(parents=True, exist_ok=True)
  167. pathlib.Path(os.path.join(eval_output_path, 'logs')).mkdir(
  168. parents=True, exist_ok=True
  169. )
  170. logger.info(f'Using evaluation output directory: {eval_output_path}')
  171. metadata = EvalMetadata(
  172. agent_class=agent_class,
  173. llm_config=llm_config,
  174. max_iterations=max_iterations,
  175. eval_output_dir=eval_output_path,
  176. start_time=time.strftime('%Y-%m-%d %H:%M:%S'),
  177. git_commit=subprocess.check_output(['git', 'rev-parse', 'HEAD'])
  178. .decode('utf-8')
  179. .strip(),
  180. dataset=dataset_name,
  181. data_split=data_split,
  182. details=details,
  183. )
  184. metadata_json = metadata.model_dump_json()
  185. logger.info(f'Metadata: {metadata_json}')
  186. with open(os.path.join(eval_output_path, 'metadata.json'), 'w') as f:
  187. f.write(metadata_json)
  188. return metadata
  189. def prepare_dataset(
  190. dataset: pd.DataFrame,
  191. output_file: str,
  192. eval_n_limit: int,
  193. eval_ids: list[str] | None = None,
  194. skip_num: int | None = None,
  195. ):
  196. assert (
  197. 'instance_id' in dataset.columns
  198. ), "Expected 'instance_id' column in the dataset. You should define your own unique identifier for each instance and use it as the 'instance_id' column."
  199. id_column = 'instance_id'
  200. logger.info(f'Writing evaluation output to {output_file}')
  201. finished_ids: set[str] = set()
  202. if os.path.exists(output_file):
  203. with open(output_file, 'r') as f:
  204. for line in f:
  205. data = json.loads(line)
  206. finished_ids.add(str(data[id_column]))
  207. logger.warning(
  208. f'\nOutput file {output_file} already exists. Loaded {len(finished_ids)} finished instances.'
  209. )
  210. if eval_ids:
  211. eval_ids_converted = [dataset[id_column].dtype.type(id) for id in eval_ids]
  212. dataset = dataset[dataset[id_column].isin(eval_ids_converted)]
  213. logger.info(f'Limiting evaluation to {len(eval_ids)} specific instances.')
  214. elif skip_num and skip_num >= 0:
  215. skip_num = min(skip_num, len(dataset))
  216. dataset = dataset.iloc[skip_num:]
  217. logger.info(
  218. f'Starting evaluation with skipping first {skip_num} instances ({len(dataset)} instances to run).'
  219. )
  220. if eval_n_limit and eval_n_limit > 0:
  221. dataset = dataset.head(eval_n_limit)
  222. logger.info(f'Limiting evaluation to {eval_n_limit} instances.')
  223. elif eval_n_limit and eval_n_limit > 0:
  224. dataset = dataset.head(eval_n_limit)
  225. logger.info(f'Limiting evaluation to first {eval_n_limit} instances.')
  226. new_dataset = [
  227. instance
  228. for _, instance in dataset.iterrows()
  229. if str(instance[id_column]) not in finished_ids
  230. ]
  231. logger.info(
  232. f'Finished instances: {len(finished_ids)}, Remaining instances: {len(new_dataset)}'
  233. )
  234. return pd.DataFrame(new_dataset)
  235. def update_progress(
  236. result: EvalOutput,
  237. pbar: tqdm,
  238. output_fp: TextIO,
  239. ):
  240. """Update the progress bar and write the result to the output file."""
  241. pbar.update(1)
  242. pbar.set_description(f'Instance {result.instance_id}')
  243. pbar.set_postfix_str(f'Test Result: {str(result.test_result)[:300]}...')
  244. logger.info(
  245. f'Finished evaluation for instance {result.instance_id}: {str(result.test_result)[:300]}...\n'
  246. )
  247. output_fp.write(json.dumps(result.model_dump()) + '\n')
  248. output_fp.flush()
  249. def assert_and_raise(condition: bool, msg: str):
  250. """Raise an EvalException if the condition is not met.
  251. This will be used in conjunction with _process_instance_wrapper to handle retries. An EvalException should trigger a retry.
  252. """
  253. if not condition:
  254. raise EvalException(msg)
  255. def _process_instance_wrapper(
  256. process_instance_func: Callable[[pd.Series, EvalMetadata, bool], EvalOutput],
  257. instance: pd.Series,
  258. metadata: EvalMetadata,
  259. use_mp: bool,
  260. max_retries: int = 5,
  261. timeout_seconds: int | None = None,
  262. ) -> EvalOutput:
  263. """Wrap the process_instance_func to handle retries and errors."""
  264. for attempt in range(max_retries + 1):
  265. try:
  266. if timeout_seconds is not None:
  267. with timeout(timeout_seconds):
  268. result = process_instance_func(instance, metadata, use_mp)
  269. else:
  270. result = process_instance_func(instance, metadata, use_mp)
  271. return result
  272. except EvalTimeoutException as e:
  273. error = f'Timeout after {timeout_seconds} seconds'
  274. stacktrace = traceback.format_exc()
  275. msg = (
  276. '-' * 10
  277. + '\n'
  278. + f'Timeout ({timeout_seconds} seconds) in instance [{instance.instance_id}], Stopped evaluation for this instance.'
  279. + '\n'
  280. + '-' * 10
  281. )
  282. logger.exception(e)
  283. return EvalOutput(
  284. instance_id=instance.instance_id,
  285. test_result={},
  286. error=error,
  287. )
  288. except Exception as e:
  289. error = str(e)
  290. stacktrace = traceback.format_exc()
  291. if attempt == max_retries:
  292. logger.exception(e)
  293. msg = (
  294. '-' * 10
  295. + '\n'
  296. + f'Error in instance [{instance.instance_id}]: {error}. Stacktrace:\n{stacktrace}'
  297. + '\n'
  298. + f'[Encountered after {max_retries} retries. Please check the logs and report the issue.]'
  299. + '-' * 10
  300. )
  301. # Raise an error after all retries & stop the evaluation
  302. logger.exception(e)
  303. raise RuntimeError(
  304. f'Maximum error retries reached for instance {instance.instance_id}'
  305. ) from e
  306. msg = (
  307. '-' * 10
  308. + '\n'
  309. + f'Error in instance [{instance.instance_id}]: {error}. Stacktrace:\n{stacktrace}'
  310. + '\n'
  311. + '-' * 10
  312. + f'[The above error occurred. Retrying... (attempt {attempt + 1} of {max_retries})]'
  313. + '-' * 10
  314. + '\n'
  315. )
  316. logger.error(msg)
  317. if use_mp:
  318. print(msg) # use print to directly print to console
  319. time.sleep(5)
  320. def _process_instance_wrapper_mp(args):
  321. """Wrapper for multiprocessing, especially for imap_unordered."""
  322. return _process_instance_wrapper(*args)
  323. def run_evaluation(
  324. dataset: pd.DataFrame,
  325. metadata: EvalMetadata | None,
  326. output_file: str,
  327. num_workers: int,
  328. process_instance_func: Callable[
  329. [pd.Series, EvalMetadata, bool], Awaitable[EvalOutput]
  330. ],
  331. max_retries: int = 5, # number of retries for each instance
  332. timeout_seconds: int | None = None,
  333. ):
  334. use_multiprocessing = num_workers > 1
  335. if metadata is not None:
  336. logger.info(
  337. f'Evaluation started with Agent {metadata.agent_class}:\n'
  338. f'model {metadata.llm_config.model}, max iterations {metadata.max_iterations}.\n'
  339. )
  340. else:
  341. logger.warning('Running evaluation without metadata.')
  342. logger.info(f'Evaluation started with {num_workers} workers.')
  343. total_instances = len(dataset)
  344. pbar = tqdm(total=total_instances, desc='Instances processed')
  345. output_fp = open(output_file, 'a')
  346. try:
  347. if use_multiprocessing:
  348. with mp.Pool(num_workers) as pool:
  349. args_iter = (
  350. (
  351. process_instance_func,
  352. instance,
  353. metadata,
  354. True,
  355. max_retries,
  356. timeout_seconds,
  357. )
  358. for _, instance in dataset.iterrows()
  359. )
  360. results = pool.imap_unordered(_process_instance_wrapper_mp, args_iter)
  361. for result in results:
  362. update_progress(result, pbar, output_fp)
  363. else:
  364. for _, instance in dataset.iterrows():
  365. result = _process_instance_wrapper(
  366. process_instance_func=process_instance_func,
  367. instance=instance,
  368. metadata=metadata,
  369. use_mp=False,
  370. max_retries=max_retries,
  371. )
  372. update_progress(result, pbar, output_fp)
  373. except KeyboardInterrupt:
  374. print('\nKeyboardInterrupt received. Cleaning up...\n')
  375. cleanup()
  376. output_fp.close()
  377. logger.info('\nEvaluation finished.\n')
  378. def reset_logger_for_multiprocessing(
  379. logger: logging.Logger, instance_id: str, log_dir: str
  380. ):
  381. """Reset the logger for multiprocessing.
  382. Save logs to a separate file for each process, instead of trying to write to the
  383. same file/console from multiple processes.
  384. """
  385. # Set up logger
  386. log_file = os.path.join(
  387. log_dir,
  388. f'instance_{instance_id}.log',
  389. )
  390. # Remove all existing handlers from logger
  391. for handler in logger.handlers[:]:
  392. logger.removeHandler(handler)
  393. # add console handler to print ONE line
  394. console_handler = get_console_handler(log_level=logging.INFO)
  395. console_handler.setFormatter(
  396. logging.Formatter(
  397. f'Instance {instance_id} - ' + '%(asctime)s - %(levelname)s - %(message)s'
  398. )
  399. )
  400. logger.addHandler(console_handler)
  401. logger.info(
  402. f'Starting evaluation for instance {instance_id}.\n'
  403. f'Hint: run "tail -f {log_file}" to see live logs in a separate shell'
  404. )
  405. # Only log WARNING or higher to console
  406. console_handler.setLevel(logging.WARNING)
  407. # Log INFO and above to file
  408. os.makedirs(os.path.dirname(log_file), exist_ok=True)
  409. file_handler = logging.FileHandler(log_file)
  410. file_handler.setFormatter(
  411. logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
  412. )
  413. file_handler.setLevel(logging.INFO)
  414. logger.addHandler(file_handler)
  415. def update_llm_config_for_completions_logging(
  416. llm_config: LLMConfig,
  417. eval_output_dir: str,
  418. instance_id: str,
  419. ) -> LLMConfig:
  420. """Update the LLM config for logging completions."""
  421. if llm_config.log_completions:
  422. llm_config.log_completions_folder = os.path.join(
  423. eval_output_dir, 'llm_completions', instance_id
  424. )
  425. logger.info(
  426. f'Logging LLM completions for instance {instance_id} to '
  427. f'{llm_config.log_completions_folder}'
  428. )
  429. return llm_config
  430. # history is now available as a filtered stream of events, rather than list of pairs of (Action, Observation)
  431. # we rebuild the pairs here
  432. # for compatibility with the existing output format in evaluations
  433. # remove this when it's no longer necessary
  434. def compatibility_for_eval_history_pairs(
  435. history: list[Event],
  436. ) -> list[tuple[dict, dict]]:
  437. history_pairs = []
  438. for action, observation in get_pairs_from_events(history):
  439. history_pairs.append((event_to_dict(action), event_to_dict(observation)))
  440. return history_pairs