run_analysis.py 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162
  1. import json
  2. import os
  3. import pprint
  4. import tqdm
  5. from opendevin.core.config import config, get_llm_config_arg, get_parser
  6. from opendevin.core.logger import opendevin_logger as logger
  7. from opendevin.llm.llm import LLM
  8. def extract_test_results(res_file_path: str) -> tuple[list[str], list[str]]:
  9. passed = []
  10. failed = []
  11. costs = []
  12. instance_ids = set()
  13. instances = []
  14. with open(res_file_path, 'r') as file:
  15. for line in file:
  16. data = json.loads(line.strip())
  17. success = data['metrics']['success']
  18. if data['instance_id'] in instance_ids:
  19. print(f'WARNING: Duplicate instance_id found: {data["instance_id"]}')
  20. continue
  21. instance_ids.add(data['instance_id'])
  22. instances.append(data)
  23. if success:
  24. passed.append(
  25. {
  26. 'instance_id': data['instance_id'],
  27. 'repo': data['repo'],
  28. 'instruction': data['instruction'],
  29. 'eval_script': data['eval_script'],
  30. 'eval_exit_code': data['eval_exit_code'],
  31. 'eval_output': data['eval_output'],
  32. 'accumulated_cost': data['metrics']['accumulated_cost'],
  33. }
  34. )
  35. else:
  36. failed.append(
  37. {
  38. 'instance_id': data['instance_id'],
  39. 'repo': data['repo'],
  40. 'instruction': data['instruction'],
  41. 'metadata': data['metadata'],
  42. 'history': data['history'],
  43. 'eval_script': data['eval_script'],
  44. 'eval_exit_code': data['eval_exit_code'],
  45. 'eval_output': data['eval_output'],
  46. 'accumulated_cost': data['metrics']['accumulated_cost'],
  47. }
  48. )
  49. costs.append(data['metrics']['accumulated_cost'])
  50. # sort by instance_id
  51. instances.sort(key=lambda x: x['instance_id'])
  52. with open(res_file_path, 'w') as file:
  53. for instance in instances:
  54. file.write(json.dumps(instance) + '\n')
  55. return passed, failed, costs
  56. def classify_error(llm: LLM, failed_case: dict) -> str:
  57. prompt = f"""
  58. Please classify the error for the following failed case based on the history and eval_output:
  59. Instruction:
  60. {failed_case['instruction']}
  61. Eval Script:
  62. {failed_case['eval_script']}s
  63. History:
  64. {failed_case['history']}
  65. Eval Output:
  66. {failed_case['eval_output']}
  67. The error categories are:
  68. E1: Hallucination Errors - The model misinterpreted the user's intention, misplaced Python code and bash script, or generated random or irrelevant code.
  69. E2: Lack of Knowledge or Information - The model lacks sufficient information or domain-specific knowledge to satisfy the user's requirements.
  70. E3: Knowledge Manipulation - The model failed to integrate or manipulate information properly.
  71. E4: Syntax Errors - The model generated code with syntax errors.
  72. E5: Operational Error - The model gave up easily or exited without finishing the tasks.
  73. Please provide only the error category (E1, E2, E3, E4, or E5) without any explanation.
  74. """
  75. try:
  76. response = llm.completion(messages=[{'content': prompt, 'role': 'user'}])
  77. error_category = response.choices[0].message['content']
  78. except Exception as e:
  79. logger.error(
  80. f"Failed to classify the error for the failed case: {failed_case['instance_id']}"
  81. )
  82. logger.error(e)
  83. error_category = input(
  84. failed_case['instruction']
  85. + ': '
  86. + failed_case['eval_script']
  87. + ' - '
  88. + failed_case['eval_output']
  89. )
  90. if error_category not in ['E1', 'E2', 'E3', 'E4', 'E5']:
  91. raise ValueError(f'Invalid error category: {error_category}')
  92. return error_category
  93. if __name__ == '__main__':
  94. parser = get_parser()
  95. parser.add_argument(
  96. '--json_file_path',
  97. type=str,
  98. required=True,
  99. help='Path to the jsonl file containing the evaluation results',
  100. )
  101. args, _ = parser.parse_known_args()
  102. # Check https://github.com/OpenDevin/OpenDevin/blob/main/evaluation/swe_bench/README.md#configure-opendevin-and-your-llm
  103. # for details of how to set `llm_config`
  104. if args.llm_config:
  105. specified_llm_config = get_llm_config_arg(args.llm_config)
  106. if specified_llm_config:
  107. config.llm = specified_llm_config
  108. logger.info(f'Config for evaluation: {config}')
  109. llm = LLM(llm_config=specified_llm_config)
  110. passed, new_failed, costs = extract_test_results(args.json_file_path)
  111. failed = []
  112. if os.path.exists(args.json_file_path.replace('.jsonl', '_failed.jsonl')):
  113. with open(args.json_file_path.replace('.jsonl', '_failed.jsonl'), 'r') as file:
  114. for line in file:
  115. failed.append(json.loads(line.strip()))
  116. print(
  117. f'Loaded {len(failed)} failed cases from {args.json_file_path.replace(".jsonl", "_failed.jsonl")}'
  118. )
  119. for failed_case in tqdm.tqdm(new_failed):
  120. if failed_case['instance_id'] in [case['instance_id'] for case in failed]:
  121. continue
  122. error_category = classify_error(llm, failed_case)
  123. failed_case['error_category'] = error_category
  124. failed.append(failed_case)
  125. with open(args.json_file_path.replace('.jsonl', '_failed.jsonl'), 'a') as file:
  126. file.write(json.dumps(failed_case) + '\n')
  127. # Print the summary
  128. print('Summary:')
  129. print(f'Passed: {len(passed)}')
  130. print(f'Failed: {len(failed)}')
  131. print(f'Costs: {costs}')
  132. print('Failed cases:')
  133. error_categories = {}
  134. for case in failed:
  135. error_category = case['error_category']
  136. if error_category not in error_categories:
  137. error_categories[error_category] = 0
  138. error_categories[error_category] += 1
  139. pprint.pprint(error_categories)