shared.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395
  1. import json
  2. import logging
  3. import multiprocessing as mp
  4. import os
  5. import pathlib
  6. import subprocess
  7. import time
  8. import traceback
  9. from concurrent.futures import ProcessPoolExecutor, as_completed
  10. from typing import Any, Awaitable, Callable, TextIO
  11. import pandas as pd
  12. from pydantic import BaseModel
  13. from tqdm import tqdm
  14. from openhands.controller.state.state import State
  15. from openhands.core.config import LLMConfig
  16. from openhands.core.logger import get_console_handler
  17. from openhands.core.logger import openhands_logger as logger
  18. from openhands.events.action import Action
  19. from openhands.events.action.message import MessageAction
  20. class EvalMetadata(BaseModel):
  21. agent_class: str
  22. llm_config: LLMConfig
  23. max_iterations: int
  24. eval_output_dir: str
  25. start_time: str
  26. git_commit: str
  27. dataset: str | None = None
  28. data_split: str | None = None
  29. details: dict[str, Any] | None = None
  30. def model_dump(self, *args, **kwargs):
  31. dumped_dict = super().model_dump(*args, **kwargs)
  32. # avoid leaking sensitive information
  33. dumped_dict['llm_config'] = self.llm_config.to_safe_dict()
  34. return dumped_dict
  35. def model_dump_json(self, *args, **kwargs):
  36. dumped = super().model_dump_json(*args, **kwargs)
  37. dumped_dict = json.loads(dumped)
  38. logger.debug(f'Dumped metadata: {dumped_dict}')
  39. # avoid leaking sensitive information
  40. dumped_dict['llm_config'] = self.llm_config.to_safe_dict()
  41. return json.dumps(dumped_dict)
  42. class EvalOutput(BaseModel):
  43. # NOTE: User-specified
  44. instance_id: str
  45. # output of the evaluation
  46. # store anything that is needed for the score calculation
  47. test_result: dict[str, Any]
  48. instruction: str | None = None
  49. # Interaction info
  50. metadata: EvalMetadata | None = None
  51. # list[tuple[dict[str, Any], dict[str, Any]]] - for compatibility with the old format
  52. history: (
  53. list[dict[str, Any]] | list[tuple[dict[str, Any], dict[str, Any]]] | None
  54. ) = None
  55. llm_completions: list[dict[str, Any]]
  56. metrics: dict[str, Any] | None = None
  57. error: str | None = None
  58. # Optionally save the input test instance
  59. instance: dict[str, Any] | None = None
  60. def model_dump(self, *args, **kwargs):
  61. dumped_dict = super().model_dump(*args, **kwargs)
  62. # Remove None values
  63. dumped_dict = {k: v for k, v in dumped_dict.items() if v is not None}
  64. # Apply custom serialization for metadata (to avoid leaking sensitive information)
  65. if self.metadata is not None:
  66. dumped_dict['metadata'] = self.metadata.model_dump()
  67. return dumped_dict
  68. def model_dump_json(self, *args, **kwargs):
  69. dumped = super().model_dump_json(*args, **kwargs)
  70. dumped_dict = json.loads(dumped)
  71. # Apply custom serialization for metadata (to avoid leaking sensitive information)
  72. if 'metadata' in dumped_dict:
  73. dumped_dict['metadata'] = json.loads(self.metadata.model_dump_json())
  74. return json.dumps(dumped_dict)
  75. def codeact_user_response(
  76. state: State,
  77. encapsulate_solution: bool = False,
  78. try_parse: Callable[[Action], str] | None = None,
  79. ) -> str:
  80. encaps_str = (
  81. (
  82. 'Please encapsulate your final answer (answer ONLY) within <solution> and </solution>.\n'
  83. 'For example: The answer to the question is <solution> 42 </solution>.\n'
  84. )
  85. if encapsulate_solution
  86. else ''
  87. )
  88. msg = (
  89. 'Please continue working on the task on whatever approach you think is suitable.\n'
  90. 'If you think you have solved the task, please first send your answer to user through message and then <execute_bash> exit </execute_bash>.\n'
  91. f'{encaps_str}'
  92. 'IMPORTANT: YOU SHOULD NEVER ASK FOR HUMAN HELP.\n'
  93. )
  94. if state.history:
  95. # check if the last action has an answer, if so, early exit
  96. if try_parse is not None:
  97. last_action = state.history.get_last_action()
  98. ans = try_parse(last_action)
  99. if ans is not None:
  100. return '/exit'
  101. # check if the agent has tried to talk to the user 3 times, if so, let the agent know it can give up
  102. user_msgs = [
  103. event
  104. for event in state.history.get_events()
  105. if isinstance(event, MessageAction) and event.source == 'user'
  106. ]
  107. if len(user_msgs) >= 2:
  108. # let the agent know that it can give up when it has tried 3 times
  109. return (
  110. msg
  111. + 'If you want to give up, run: <execute_bash> exit </execute_bash>.\n'
  112. )
  113. return msg
  114. def cleanup():
  115. print('Cleaning up child processes...')
  116. for process in mp.active_children():
  117. print(f'Terminating child process: {process.name}')
  118. process.terminate()
  119. process.join()
  120. def make_metadata(
  121. llm_config: LLMConfig,
  122. dataset_name: str,
  123. agent_class: str,
  124. max_iterations: int,
  125. eval_note: str | None,
  126. eval_output_dir: str,
  127. data_split: str | None = None,
  128. details: dict[str, Any] | None = None,
  129. ) -> EvalMetadata:
  130. model_name = llm_config.model.split('/')[-1]
  131. model_path = model_name.replace(':', '_')
  132. eval_note = f'_N_{eval_note}' if eval_note else ''
  133. eval_output_path = os.path.join(
  134. eval_output_dir,
  135. dataset_name,
  136. agent_class,
  137. f'{model_path}_maxiter_{max_iterations}{eval_note}',
  138. )
  139. pathlib.Path(eval_output_path).mkdir(parents=True, exist_ok=True)
  140. pathlib.Path(os.path.join(eval_output_path, 'logs')).mkdir(
  141. parents=True, exist_ok=True
  142. )
  143. logger.info(f'Using evaluation output directory: {eval_output_path}')
  144. metadata = EvalMetadata(
  145. agent_class=agent_class,
  146. llm_config=llm_config,
  147. max_iterations=max_iterations,
  148. eval_output_dir=eval_output_path,
  149. start_time=time.strftime('%Y-%m-%d %H:%M:%S'),
  150. git_commit=subprocess.check_output(['git', 'rev-parse', 'HEAD'])
  151. .decode('utf-8')
  152. .strip(),
  153. dataset=dataset_name,
  154. data_split=data_split,
  155. details=details,
  156. )
  157. metadata_json = metadata.model_dump_json()
  158. logger.info(f'Metadata: {metadata_json}')
  159. with open(os.path.join(eval_output_path, 'metadata.json'), 'w') as f:
  160. f.write(metadata_json)
  161. return metadata
  162. def prepare_dataset(
  163. dataset: pd.DataFrame,
  164. output_file: str,
  165. eval_n_limit: int,
  166. eval_ids: list[str] | None = None,
  167. skip_num: int | None = None,
  168. ):
  169. assert (
  170. 'instance_id' in dataset.columns
  171. ), "Expected 'instance_id' column in the dataset. You should define your own unique identifier for each instance and use it as the 'instance_id' column."
  172. id_column = 'instance_id'
  173. logger.info(f'Writing evaluation output to {output_file}')
  174. finished_ids: set[str] = set()
  175. if os.path.exists(output_file):
  176. with open(output_file, 'r') as f:
  177. for line in f:
  178. data = json.loads(line)
  179. finished_ids.add(str(data[id_column]))
  180. logger.warning(
  181. f'\nOutput file {output_file} already exists. Loaded {len(finished_ids)} finished instances.'
  182. )
  183. if eval_ids:
  184. eval_ids_converted = [dataset[id_column].dtype.type(id) for id in eval_ids]
  185. dataset = dataset[dataset[id_column].isin(eval_ids_converted)]
  186. logger.info(f'Limiting evaluation to {len(eval_ids)} specific instances.')
  187. elif skip_num and skip_num >= 0:
  188. skip_num = min(skip_num, len(dataset))
  189. dataset = dataset.iloc[skip_num:]
  190. logger.info(
  191. f'Starting evaluation with skipping first {skip_num} instances ({len(dataset)} instances to run).'
  192. )
  193. if eval_n_limit and eval_n_limit > 0:
  194. dataset = dataset.head(eval_n_limit)
  195. logger.info(f'Limiting evaluation to {eval_n_limit} instances.')
  196. elif eval_n_limit and eval_n_limit > 0:
  197. dataset = dataset.head(eval_n_limit)
  198. logger.info(f'Limiting evaluation to first {eval_n_limit} instances.')
  199. new_dataset = [
  200. instance
  201. for _, instance in dataset.iterrows()
  202. if str(instance[id_column]) not in finished_ids
  203. ]
  204. logger.info(
  205. f'Finished instances: {len(finished_ids)}, Remaining instances: {len(new_dataset)}'
  206. )
  207. return pd.DataFrame(new_dataset)
  208. def update_progress(
  209. result: EvalOutput,
  210. pbar: tqdm,
  211. output_fp: TextIO,
  212. ):
  213. """Update the progress bar and write the result to the output file."""
  214. pbar.update(1)
  215. pbar.set_description(f'Instance {result.instance_id}')
  216. pbar.set_postfix_str(f'Test Result: {result.test_result}')
  217. logger.info(
  218. f'Finished evaluation for instance {result.instance_id}: {str(result.test_result)[:300]}...\n'
  219. )
  220. output_fp.write(json.dumps(result.model_dump()) + '\n')
  221. output_fp.flush()
  222. def _process_instance_wrapper(
  223. process_instance_func: Callable[[pd.Series, EvalMetadata, bool], EvalOutput],
  224. instance: pd.Series,
  225. metadata: EvalMetadata,
  226. use_mp: bool,
  227. max_retries: int = 5,
  228. ) -> EvalOutput:
  229. """Wrap the process_instance_func to handle retries and errors.
  230. Retry an instance up to max_retries times if it fails (e.g., due to transient network/runtime issues).
  231. """
  232. for attempt in range(max_retries + 1):
  233. try:
  234. result = process_instance_func(instance, metadata, use_mp)
  235. return result
  236. except Exception as e:
  237. error = str(e)
  238. stacktrace = traceback.format_exc()
  239. if attempt == max_retries:
  240. logger.exception(e)
  241. msg = (
  242. '-' * 10
  243. + '\n'
  244. + f'Error in instance [{instance.instance_id}]: {error}. Stacktrace:\n{stacktrace}'
  245. + '\n'
  246. + f'[Encountered after {max_retries} retries. Please check the logs and report the issue.]'
  247. + '-' * 10
  248. )
  249. # Raise an error after all retries & stop the evaluation
  250. logger.exception(e)
  251. raise RuntimeError(
  252. f'Maximum error retries reached for instance {instance.instance_id}'
  253. ) from e
  254. msg = (
  255. '-' * 10
  256. + '\n'
  257. + f'Error in instance [{instance.instance_id}]: {error}. Stacktrace:\n{stacktrace}'
  258. + '\n'
  259. + '-' * 10
  260. + f'[The above error occurred. Retrying... (attempt {attempt + 1} of {max_retries})]'
  261. + '-' * 10
  262. + '\n'
  263. )
  264. logger.error(msg)
  265. if use_mp:
  266. print(msg) # use print to directly print to console
  267. time.sleep(5)
  268. def run_evaluation(
  269. dataset: pd.DataFrame,
  270. metadata: EvalMetadata | None,
  271. output_file: str,
  272. num_workers: int,
  273. process_instance_func: Callable[
  274. [pd.Series, EvalMetadata, bool], Awaitable[EvalOutput]
  275. ],
  276. max_retries: int = 5, # number of retries for each instance
  277. ):
  278. use_multiprocessing = num_workers > 1
  279. if metadata is not None:
  280. logger.info(
  281. f'Evaluation started with Agent {metadata.agent_class}:\n'
  282. f'model {metadata.llm_config.model}, max iterations {metadata.max_iterations}.\n'
  283. )
  284. else:
  285. logger.info(f'Evaluation started with {num_workers} workers.')
  286. total_instances = len(dataset)
  287. pbar = tqdm(total=total_instances, desc='Instances processed')
  288. output_fp = open(output_file, 'a')
  289. try:
  290. if use_multiprocessing:
  291. with ProcessPoolExecutor(num_workers) as executor:
  292. futures = [
  293. executor.submit(
  294. _process_instance_wrapper,
  295. process_instance_func=process_instance_func,
  296. instance=instance,
  297. metadata=metadata,
  298. use_mp=True,
  299. max_retries=max_retries,
  300. )
  301. for _, instance in dataset.iterrows()
  302. ]
  303. for future in as_completed(futures):
  304. result = future.result()
  305. update_progress(result, pbar, output_fp)
  306. else:
  307. for _, instance in dataset.iterrows():
  308. result = _process_instance_wrapper(
  309. process_instance_func=process_instance_func,
  310. instance=instance,
  311. metadata=metadata,
  312. use_mp=False,
  313. max_retries=max_retries,
  314. )
  315. update_progress(result, pbar, output_fp)
  316. except KeyboardInterrupt:
  317. print('\nKeyboardInterrupt received. Cleaning up...\n')
  318. cleanup()
  319. output_fp.close()
  320. logger.info('\nEvaluation finished.\n')
  321. def reset_logger_for_multiprocessing(
  322. logger: logging.Logger, instance_id: str, log_dir: str
  323. ):
  324. """Reset the logger for multiprocessing.
  325. Save logs to a separate file for each process, instead of trying to write to the
  326. same file/console from multiple processes.
  327. """
  328. # Set up logger
  329. log_file = os.path.join(
  330. log_dir,
  331. f'instance_{instance_id}.log',
  332. )
  333. # Remove all existing handlers from logger
  334. for handler in logger.handlers[:]:
  335. logger.removeHandler(handler)
  336. # add back the console handler to print ONE line
  337. logger.addHandler(get_console_handler())
  338. logger.info(
  339. f'Starting evaluation for instance {instance_id}.\n'
  340. f'Hint: run "tail -f {log_file}" to see live logs in a separate shell'
  341. )
  342. # Remove all existing handlers from logger
  343. for handler in logger.handlers[:]:
  344. logger.removeHandler(handler)
  345. os.makedirs(os.path.dirname(log_file), exist_ok=True)
  346. file_handler = logging.FileHandler(log_file)
  347. file_handler.setFormatter(
  348. logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
  349. )
  350. logger.addHandler(file_handler)