shared.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319
  1. import json
  2. import logging
  3. import multiprocessing as mp
  4. import os
  5. import pathlib
  6. import subprocess
  7. import time
  8. from concurrent.futures import ProcessPoolExecutor
  9. from typing import Any, Awaitable, Callable
  10. import pandas as pd
  11. from pydantic import BaseModel
  12. from tqdm import tqdm
  13. from openhands.controller.state.state import State
  14. from openhands.core.config import LLMConfig
  15. from openhands.core.logger import get_console_handler
  16. from openhands.core.logger import openhands_logger as logger
  17. from openhands.events.action import Action
  18. from openhands.events.action.message import MessageAction
  19. class EvalMetadata(BaseModel):
  20. agent_class: str
  21. llm_config: LLMConfig
  22. max_iterations: int
  23. eval_output_dir: str
  24. start_time: str
  25. git_commit: str
  26. dataset: str | None = None
  27. data_split: str | None = None
  28. details: dict[str, Any] | None = None
  29. def model_dump(self, *args, **kwargs):
  30. dumped_dict = super().model_dump(*args, **kwargs)
  31. # avoid leaking sensitive information
  32. dumped_dict['llm_config'] = self.llm_config.to_safe_dict()
  33. return dumped_dict
  34. def model_dump_json(self, *args, **kwargs):
  35. dumped = super().model_dump_json(*args, **kwargs)
  36. dumped_dict = json.loads(dumped)
  37. logger.debug(f'Dumped metadata: {dumped_dict}')
  38. # avoid leaking sensitive information
  39. dumped_dict['llm_config'] = self.llm_config.to_safe_dict()
  40. return json.dumps(dumped_dict)
  41. class EvalOutput(BaseModel):
  42. # NOTE: User-specified
  43. instance_id: str
  44. instruction: str
  45. # output of the evaluation
  46. # store anything that is needed for the score calculation
  47. test_result: dict[str, Any]
  48. # Interaction info
  49. metadata: EvalMetadata
  50. history: list[tuple[dict[str, Any], dict[str, Any]]]
  51. metrics: dict[str, Any]
  52. error: str | None = None
  53. # Optionally save the input test instance
  54. instance: dict[str, Any] | None = None
  55. def model_dump(self, *args, **kwargs):
  56. dumped_dict = super().model_dump(*args, **kwargs)
  57. # Apply custom serialization for metadata (to avoid leaking sensitive information)
  58. dumped_dict['metadata'] = self.metadata.model_dump()
  59. return dumped_dict
  60. def model_dump_json(self, *args, **kwargs):
  61. dumped = super().model_dump_json(*args, **kwargs)
  62. dumped_dict = json.loads(dumped)
  63. # Apply custom serialization for metadata (to avoid leaking sensitive information)
  64. dumped_dict['metadata'] = json.loads(self.metadata.model_dump_json())
  65. return json.dumps(dumped_dict)
  66. def codeact_user_response(
  67. state: State,
  68. encapsulate_solution: bool = False,
  69. try_parse: Callable[[Action], str] | None = None,
  70. ) -> str:
  71. encaps_str = (
  72. (
  73. 'Please encapsulate your final answer (answer ONLY) within <solution> and </solution>.\n'
  74. 'For example: The answer to the question is <solution> 42 </solution>.\n'
  75. )
  76. if encapsulate_solution
  77. else ''
  78. )
  79. msg = (
  80. 'Please continue working on the task on whatever approach you think is suitable.\n'
  81. 'If you think you have solved the task, please first send your answer to user through message and then <execute_bash> exit </execute_bash>.\n'
  82. f'{encaps_str}'
  83. 'IMPORTANT: YOU SHOULD NEVER ASK FOR HUMAN HELP.\n'
  84. )
  85. if state.history:
  86. # check if the last action has an answer, if so, early exit
  87. if try_parse is not None:
  88. last_action = state.history.get_last_action()
  89. ans = try_parse(last_action)
  90. if ans is not None:
  91. return '/exit'
  92. # check if the agent has tried to talk to the user 3 times, if so, let the agent know it can give up
  93. user_msgs = [
  94. event
  95. for event in state.history.get_events()
  96. if isinstance(event, MessageAction) and event.source == 'user'
  97. ]
  98. if len(user_msgs) >= 2:
  99. # let the agent know that it can give up when it has tried 3 times
  100. return (
  101. msg
  102. + 'If you want to give up, run: <execute_bash> exit </execute_bash>.\n'
  103. )
  104. return msg
  105. def cleanup():
  106. print('Cleaning up child processes...')
  107. for process in mp.active_children():
  108. print(f'Terminating child process: {process.name}')
  109. process.terminate()
  110. process.join()
  111. def make_metadata(
  112. llm_config: LLMConfig,
  113. dataset_name: str,
  114. agent_class: str,
  115. max_iterations: int,
  116. eval_note: str | None,
  117. eval_output_dir: str,
  118. data_split: str | None = None,
  119. details: dict[str, Any] | None = None,
  120. ) -> EvalMetadata:
  121. model_name = llm_config.model.split('/')[-1]
  122. model_path = model_name.replace(':', '_')
  123. eval_note = f'_N_{eval_note}' if eval_note else ''
  124. eval_output_path = os.path.join(
  125. eval_output_dir,
  126. dataset_name,
  127. agent_class,
  128. f'{model_path}_maxiter_{max_iterations}{eval_note}',
  129. )
  130. pathlib.Path(eval_output_path).mkdir(parents=True, exist_ok=True)
  131. pathlib.Path(os.path.join(eval_output_path, 'logs')).mkdir(
  132. parents=True, exist_ok=True
  133. )
  134. logger.info(f'Using evaluation output directory: {eval_output_path}')
  135. metadata = EvalMetadata(
  136. agent_class=agent_class,
  137. llm_config=llm_config,
  138. max_iterations=max_iterations,
  139. eval_output_dir=eval_output_path,
  140. start_time=time.strftime('%Y-%m-%d %H:%M:%S'),
  141. git_commit=subprocess.check_output(['git', 'rev-parse', 'HEAD'])
  142. .decode('utf-8')
  143. .strip(),
  144. dataset=dataset_name,
  145. data_split=data_split,
  146. details=details,
  147. )
  148. metadata_json = metadata.model_dump_json()
  149. logger.info(f'Metadata: {metadata_json}')
  150. with open(os.path.join(eval_output_path, 'metadata.json'), 'w') as f:
  151. f.write(metadata_json)
  152. return metadata
  153. def prepare_dataset(
  154. dataset: pd.DataFrame,
  155. output_file: str,
  156. eval_n_limit: int,
  157. eval_ids: list[str] | None = None,
  158. skip_num: int | None = None,
  159. ):
  160. assert (
  161. 'instance_id' in dataset.columns
  162. ), "Expected 'instance_id' column in the dataset. You should define your own unique identifier for each instance and use it as the 'instance_id' column."
  163. id_column = 'instance_id'
  164. logger.info(f'Writing evaluation output to {output_file}')
  165. finished_ids: set[str] = set()
  166. if os.path.exists(output_file):
  167. with open(output_file, 'r') as f:
  168. for line in f:
  169. data = json.loads(line)
  170. finished_ids.add(str(data[id_column]))
  171. logger.warning(
  172. f'\nOutput file {output_file} already exists. Loaded {len(finished_ids)} finished instances.'
  173. )
  174. if eval_ids:
  175. eval_ids_converted = [dataset[id_column].dtype.type(id) for id in eval_ids]
  176. dataset = dataset[dataset[id_column].isin(eval_ids_converted)]
  177. logger.info(f'Limiting evaluation to {len(eval_ids)} specific instances.')
  178. elif skip_num and skip_num >= 0:
  179. skip_num = min(skip_num, len(dataset))
  180. dataset = dataset.iloc[skip_num:]
  181. logger.info(
  182. f'Starting evaluation with skipping first {skip_num} instances ({len(dataset)} instances to run).'
  183. )
  184. if eval_n_limit and eval_n_limit > 0:
  185. dataset = dataset.head(eval_n_limit)
  186. logger.info(f'Limiting evaluation to {eval_n_limit} instances.')
  187. elif eval_n_limit and eval_n_limit > 0:
  188. dataset = dataset.head(eval_n_limit)
  189. logger.info(f'Limiting evaluation to first {eval_n_limit} instances.')
  190. new_dataset = [
  191. instance
  192. for _, instance in dataset.iterrows()
  193. if str(instance[id_column]) not in finished_ids
  194. ]
  195. logger.info(
  196. f'Finished instances: {len(finished_ids)}, Remaining instances: {len(new_dataset)}'
  197. )
  198. return pd.DataFrame(new_dataset)
  199. def run_evaluation(
  200. dataset: pd.DataFrame,
  201. metadata: EvalMetadata,
  202. output_file: str,
  203. num_workers: int,
  204. process_instance_func: Callable[
  205. [pd.Series, EvalMetadata, bool], Awaitable[EvalOutput]
  206. ],
  207. ):
  208. use_multiprocessing = num_workers > 1
  209. logger.info(
  210. f'Evaluation started with Agent {metadata.agent_class}:\n'
  211. f'model {metadata.llm_config.model}, max iterations {metadata.max_iterations}.\n'
  212. )
  213. pbar = tqdm(total=len(dataset))
  214. output_fp = open(output_file, 'a')
  215. def update_progress(future):
  216. pbar.update(1)
  217. output: EvalOutput = future.result() if use_multiprocessing else future
  218. pbar.set_description(f'Instance {output.instance_id}')
  219. pbar.set_postfix_str(f'Test Result: {output.test_result}')
  220. logger.info(
  221. f'Finished evaluation for instance {output.instance_id}: {str(output.test_result)[:300]}...\n'
  222. )
  223. output_fp.write(json.dumps(output.model_dump()) + '\n')
  224. output_fp.flush()
  225. try:
  226. if use_multiprocessing:
  227. with ProcessPoolExecutor(num_workers) as executor:
  228. futures = []
  229. for _, instance in dataset.iterrows():
  230. future = executor.submit(
  231. process_instance_func,
  232. instance,
  233. metadata,
  234. bool(num_workers > 1),
  235. )
  236. future.add_done_callback(update_progress)
  237. futures.append(future)
  238. for future in futures:
  239. future.result()
  240. # Use plain for loop for single process for easier debugging
  241. else:
  242. assert num_workers == 1
  243. for _, instance in dataset.iterrows():
  244. output = process_instance_func(instance, metadata, False)
  245. update_progress(output)
  246. except KeyboardInterrupt:
  247. print('\nKeyboardInterrupt received. Cleaning up...\n')
  248. cleanup()
  249. output_fp.close()
  250. logger.info('\nEvaluation finished.\n')
  251. def reset_logger_for_multiprocessing(
  252. logger: logging.Logger, instance_id: str, log_dir: str
  253. ):
  254. """Reset the logger for multiprocessing.
  255. Save logs to a separate file for each process, instead of trying to write to the
  256. same file/console from multiple processes.
  257. """
  258. # Set up logger
  259. log_file = os.path.join(
  260. log_dir,
  261. f'instance_{instance_id}.log',
  262. )
  263. # Remove all existing handlers from logger
  264. for handler in logger.handlers[:]:
  265. logger.removeHandler(handler)
  266. # add back the console handler to print ONE line
  267. logger.addHandler(get_console_handler())
  268. logger.info(
  269. f'Starting evaluation for instance {instance_id}.\n'
  270. f'Hint: run "tail -f {log_file}" to see live logs in a separate shell'
  271. )
  272. # Remove all existing handlers from logger
  273. for handler in logger.handlers[:]:
  274. logger.removeHandler(handler)
  275. os.makedirs(os.path.dirname(log_file), exist_ok=True)
  276. file_handler = logging.FileHandler(log_file)
  277. file_handler.setFormatter(
  278. logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
  279. )
  280. logger.addHandler(file_handler)