Просмотр исходного кода

[Eval,Arch] Update GPTQ eval and add `headless_mode` for Controller (#2994)

* update and polish gptq eval

* fix typo

* Update evaluation/gpqa/README.md

Co-authored-by: Graham Neubig <neubig@gmail.com>

* Update evaluation/gpqa/run_infer.py

Co-authored-by: Graham Neubig <neubig@gmail.com>

* add headless mode to all appropriate agent controller call

* delegate set to error when in headless mode

* try to deduplicate a bit

* make headless_mode default to True and only change it to false for AgentSession

---------

Co-authored-by: Graham Neubig <neubig@gmail.com>
Xingyao Wang 1 год назад
Родитель
Сommit
6b16a5da0b

+ 1 - 1
agenthub/codeact_agent/action_parser.py

@@ -98,7 +98,7 @@ class CodeActActionParserCmdRun(ActionParser):
         # a command was found
         command_group = self.bash_command.group(1).strip()
         if command_group.strip() == 'exit':
-            return AgentFinishAction()
+            return AgentFinishAction(thought=thought)
         return CmdRunAction(command=command_group, thought=thought)
 
 

+ 1 - 4
evaluation/browsing_delegation/run_infer.py

@@ -67,10 +67,7 @@ def process_instance(
 
     state: State | None = asyncio.run(
         run_agent_controller(
-            agent,
-            instruction,
-            max_iterations=metadata.max_iterations,
-            sid=env_id,
+            agent, instruction, max_iterations=metadata.max_iterations, sid=env_id
         )
     )
 

+ 1 - 13
evaluation/gpqa/README.md

@@ -15,10 +15,6 @@ Further references:
 - https://paperswithcode.com/dataset/gpqa
 - https://github.com/idavidrein/gpqa
 
-## TODOs
-- [ ] Add support for other agents (currently only tested on `CodeActAgent`)
-- [ ] Complete full benchmark evaluation
-- [ ] Fix intermittent `BrowserException: Failed to start browser environment` error
 
 ## Setup Environment
 
@@ -27,19 +23,11 @@ Please follow [this document](https://github.com/OpenDevin/OpenDevin/blob/main/D
 
 ## Configure OpenDevin and your LLM
 
-Create a `config.toml` file if it does not exist at the root of the workspace.
+Create a `config.toml` file (you can copy from `config.template.toml`) if it does not exist at the root of the workspace.
 
 Add the following configurations:
 
 ```toml
-[core]
-max_iterations = 100
-cache_dir = "/tmp/cache"
-ssh_hostname = "localhost"
-
-[sandbox]
-enable_auto_lint = true
-
 # TODO: Change these to the model you want to evaluate
 [llm.eval_gpt4_1106_preview]
 model = "gpt-4-1106-preview"

+ 145 - 52
evaluation/gpqa/run_infer.py

@@ -22,6 +22,7 @@ import os
 import pathlib
 import random
 import re
+from typing import Callable
 
 import pandas as pd
 from datasets import load_dataset
@@ -39,51 +40,82 @@ from opendevin.core.config import config, get_llm_config_arg, get_parser
 from opendevin.core.logger import get_console_handler
 from opendevin.core.logger import opendevin_logger as logger
 from opendevin.core.main import run_agent_controller
+from opendevin.events.action import Action, AgentFinishAction, MessageAction
+from opendevin.events.observation import Observation
 from opendevin.llm.llm import LLM
 
-AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = {
-    'CodeActAgent': codeact_user_response,
-}
+ACTION_FORMAT = """
+<<FINAL_ANSWER||
+<insert correct answer here, must be one of A, B, C, D> (Please dont use any additional characters. Just the letter of the correct answer (A/B/C/D).)
+||FINAL_ANSWER>>
+""".strip()
+
+
+def gpqa_codeact_user_response(
+    state: State,
+    encapsulate_solution: bool = False,
+    try_parse: Callable[[Action], str] | None = None,
+) -> str:
+    msg = (
+        'Please continue working on the task on whatever approach you think is suitable.\n'
+        'Feel free to use all tools for calculations and solving the problem, and web-search for finding relevant facts during the process if needed\n'
+        'If you have finished reporting the answer in the expected format, (and only once that is done), please run the following command to submit: <execute_bash> exit </execute_bash>.\n'
+        'Again you are being told a million times to first report the answer in the requested format (see again below for reference) before exiting. DO NOT EXIT WITHOUT REPORTING THE ANSWER FIRST.\n'
+        'That is, when you have decided on the answer report in the following format:\n'
+        f'{ACTION_FORMAT}\n'
+        '<execute_bash> exit </execute_bash>\n'
+        'IMPORTANT: YOU SHOULD NEVER ASK FOR HUMAN HELP TO SOLVE THIS TASK.\n'
+    )
+
+    return msg
+
+
+AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = {'CodeActAgent': codeact_user_response}
 
 AGENT_CLS_TO_INST_SUFFIX = {
     'CodeActAgent': '\n\n SUPER IMPORTANT: When you think you have solved the question, first report it back to the user in the requested format. Only once that is done, in the next turn, please run the following command: <execute_bash> exit </execute_bash>.\n'
 }
 
 
-def parse_final_answer(final_answer: str) -> str:
+def parse_final_answer(final_answer: str | None) -> str | None:
     """Parse the final answer from the final message generated by the agent
     to extract the final answer. The final answer is usually enclosed in the format:
     <<FINAL_ANSWER||
     <insert correct answer here>
     ||FINAL_ANSWER>>
     """
+    # to do this first extract the part enclosed in the format <<FINAL_ANSWER|| ... ||FINAL_ANSWER>>
     pattern = re.compile(r'<<FINAL_ANSWER\|\|(.*?)\|\|FINAL_ANSWER>>', re.DOTALL)
     match = pattern.search(final_answer)
 
-    if match:
-        return match.group(1).strip()
-    else:
-        return 'No final answer found in the provided string.'
+    # and then strip it, remove any leading/trailing spaces line breaks etc.
+    answer = match.group(1).strip()
+    # finally capitalize it
+    answer = answer.upper()
+    # and then return A, B, C, D depending on whether the answer A, B, C, D is found in the final answer
+    for letter in ['A', 'B', 'C', 'D']:
+        if letter in answer:
+            return letter
 
 
-def compare_answers(predicted_answer, ground_truth):
+def compare_answers(model_output: str | None, ground_truth: str):
     """Compare the predicted answer with the ground truth answer"""
+    try:
+        # parse the final answer from model output
+        predicted_answer = parse_final_answer(model_output)
+    except Exception as e:
+        # Log the exception
+        logger.error(f'An error occurred: {e}\n defaulting to random guess ...')
+        # choose a random answer if the model output is not in the correct format
+        predicted_answer = random.choice(['A', 'B', 'C', 'D'])
+
+    logger.info('#############################################')
+    logger.info(f'Predicted answer: {predicted_answer}')
+    logger.info(f'Ground truth answer: {ground_truth}')
+    logger.info('#############################################')
     return predicted_answer == ground_truth
 
 
-def get_test_result(model_output, ground_truth):
-    """Implements the evaluation logic for GPQA
-    Checks if the output of a given instance is correct (as per the ground truth)
-    """
-    # parse the final answer from model output
-    predicted_answer = parse_final_answer(model_output)
-
-    # check if the model output matches the ground truth
-    result = compare_answers(predicted_answer, ground_truth)
-
-    return result
-
-
 def convert_instance_dict(instance):
     """Used for preprocessing the hf dataset into a format that can be used by the agent.
     Reads and extracts relevant information from the dataset instance.
@@ -163,27 +195,33 @@ def process_instance(
         # ======= Run the agent on the instance =======
         # Prepare instruction for the agent using suggested format in gpqa codebase
         instruction = f"""
-        What is the correct answer to this question:\n
-        {instance['question']}\n
+What is the correct answer to this question:\n
+{instance['question']}\n
 
-        Choices:\n
-        (A) {instance['choices'][0]}\n
-        (B) {instance['choices'][1]}\n
-        (C) {instance['choices'][2]}\n
-        (D) {instance['choices'][3]}\n
-        \n\n
+Choices:\n
+(A) {instance['choices'][0]}\n
+(B) {instance['choices'][1]}\n
+(C) {instance['choices'][2]}\n
+(D) {instance['choices'][3]}\n
+\n\n
 
-        MOST IMPORTANT: Format your response as follows:
-        <<FINAL_ANSWER||
-        <insert correct answer here, must be one of A, B, C, D> (Please dont use any additional characters. Just the letter of the correct answer (A/B/C/D).)
-        ||FINAL_ANSWER>>
+MOST IMPORTANT: Format your response as follows:
+{ACTION_FORMAT}
 
-        Additional Instructions:
-        - You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.
-        """
+Additional Instructions:
+- Do not try to solve the question in a single step. Break it down into smaller steps.
+- You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.
 
-        # NOTE: You can actually set slightly different instruction for different agents
-        instruction += AGENT_CLS_TO_INST_SUFFIX[agent.__class__.__name__]
+- SUPER IMPORTANT: When you have reported the answer to the user in the requested format, (and only once that is done) in the next turn, please run the following command: <execute_bash> exit </execute_bash>.
+- Again you are being told a million times to first report the answer in the requested format (see again below for reference) before exiting. DO NOT EXIT WITHOUT REPORTING THE ANSWER FIRST.
+    That is, when you have decided on the answer report in the following format:
+
+{ACTION_FORMAT}
+<execute_bash> exit </execute_bash>
+
+Again do not quit without reporting the answer first.
+Ok now its time to start solving the question. Good luck!
+"""
 
         # Here's how you can run the agent (similar to the `main` function) and get the final task state
         state: State | None = asyncio.run(
@@ -194,18 +232,69 @@ def process_instance(
                 fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN.get(
                     agent.__class__.__name__
                 ),
-                sid=instance.instance_id,
+                sid=f'gptq_{str(instance.instance_id)}',
             )
         )
         assert state is not None, 'State should not be None.'
 
         # ======= Attempt to evaluate the agent's edits =======
-        # get the final message from the state history (default to empty if not found)
-        final_message = state.history.get_last_agent_message()
 
+        question_choices = {
+            'A': instance['choices'][0],
+            'B': instance['choices'][1],
+            'C': instance['choices'][2],
+            'D': instance['choices'][3],
+        }
+        # get the final message from the state history (default to empty if not found)
+        found_answers = {
+            'A': False,
+            'B': False,
+            'C': False,
+            'D': False,
+        }
+        for event in state.history.get_events(reverse=True):
+            if (
+                isinstance(event, AgentFinishAction)
+                and event.source != 'user'
+                and '<<FINAL_ANSWER||' in event.thought
+            ):
+                final_message = event.thought
+                break
+            elif (
+                isinstance(event, MessageAction)
+                and event.source != 'user'
+                and '<<FINAL_ANSWER||' in event.content
+            ):
+                final_message = event.content
+                break
+            elif isinstance(event, Observation):
+                for option, option_text in question_choices.items():
+                    if option_text in event.content:
+                        found_answers[option] = True
+            else:
+                final_message = None
+
+        found_options = [option for option, found in found_answers.items() if found]
+        logger.info('#############################################')
         logger.info(f'Final message generated by the agent: {final_message}')
-
-        test_result = get_test_result(final_message, instance.correct_solution)
+        logger.info('#############################################')
+
+        # check if the model output matches the ground truth
+        test_result = compare_answers(final_message, instance.correct_solution)
+        if final_message is None and len(found_options) > 0:
+            _selected = random.choice(found_options)
+            # if the final message is None, then the agent did not report the answer in the correct format
+            # so we randomly select one of the found options and compare it with the correct solution
+            test_result = _selected == instance.correct_solution
+            logger.info('#############################################')
+            logger.info('Agent did not report the answer in the correct format.')
+            logger.info(f'Found options: {found_options}')
+            logger.info(f'Selected option: {_selected}')
+            logger.info('#############################################')
+
+        logger.info('#############################################')
+        logger.info(f'Test result: {test_result}')
+        logger.info('#############################################')
 
         # If you are working on some simpler benchmark that only evaluates the final model output (e.g., in a MessageAction)
         # You can simply get the LAST `MessageAction` from the returned `state.history` and parse it for evaluation.
@@ -214,21 +303,20 @@ def process_instance(
 
         metrics = state.metrics.get() if state.metrics else None
 
-        # history is now available as a stream of events, rather than list of pairs of (Action, Observation)
-        # for compatibility with the existing output format, we can remake the pairs here
-        # remove when it becomes unnecessary
-        histories = state.history.compatibility_for_eval_history_pairs()
-
         # Save the output
         output = {
             'task_id': instance.task_id,
             'instance_id': instance.instance_id,
             'instruction': instruction,
             'metadata': metadata.model_dump(),
-            'history': histories,
+            'history': state.history.compatibility_for_eval_history_pairs(),
             'metrics': metrics,
             'error': state.last_error if state and state.last_error else None,
-            'test_result': test_result,
+            'test_result': {
+                'result': test_result,
+                'found_answers': found_answers,
+                'last_message': final_message,
+            },
         }
 
     except Exception:
@@ -267,9 +355,14 @@ if __name__ == '__main__':
     gpqa_dataset['task_id'] = gpqa_dataset.index
     # gpqa_dataset = dataset['train'].to_pandas().sort_values(by='id').reset_index(drop=True)
 
+    if args.agent_cls != 'CodeActAgent':
+        raise ValueError(
+            f'Agent class {args.agent_cls} not supported for GPQA evaluation.'
+        )
+
     metadata = make_metadata(
         llm_config=llm_config,
-        dataset_name='gpqa',
+        dataset_name=args.data_split,
         agent_class=args.agent_cls,
         max_iterations=args.max_iterations,
         eval_note=args.eval_note,

+ 18 - 4
evaluation/gpqa/scripts/run_infer.sh

@@ -68,6 +68,7 @@ class AgentController:
         max_budget_per_task: float | None = MAX_BUDGET_PER_TASK,
         initial_state: State | None = None,
         is_delegate: bool = False,
+        headless_mode: bool = True,
     ):
         """Initializes a new instance of the AgentController class.
 
@@ -79,10 +80,12 @@ class AgentController:
             max_budget_per_task: The maximum budget (in USD) allowed per task, beyond which the agent will stop.
             initial_state: The initial state of the controller.
             is_delegate: Whether this controller is a delegate.
+            headless_mode: Whether the agent is run in headless mode.
         """
         self._step_lock = asyncio.Lock()
         self.id = sid
         self.agent = agent
+        self.headless_mode = headless_mode
 
         # subscribe to the event stream
         self.event_stream = event_stream
@@ -293,6 +296,9 @@ class AgentController:
             logger.debug(f'[Agent Controller {self.id}] Delegate step done')
             assert self.delegate is not None
             delegate_state = self.delegate.get_agent_state()
+            logger.debug(
+                f'[Agent Controller {self.id}] Delegate state: {delegate_state}'
+            )
             if delegate_state == AgentState.ERROR:
                 # close the delegate upon error
                 await self.delegate.close()
@@ -345,10 +351,18 @@ class AgentController:
                 self.state.traffic_control_state = TrafficControlState.NORMAL
             else:
                 self.state.traffic_control_state = TrafficControlState.THROTTLING
-                await self.report_error(
-                    f'Agent reached maximum number of iterations, task paused. {TRAFFIC_CONTROL_REMINDER}'
-                )
-                await self.set_agent_state_to(AgentState.PAUSED)
+                if self.headless_mode:
+                    # set to ERROR state if running in headless mode
+                    # since user cannot resume on the web interface
+                    await self.report_error(
+                        'Agent reached maximum number of iterations in headless mode, task stopped.'
+                    )
+                    await self.set_agent_state_to(AgentState.ERROR)
+                else:
+                    await self.report_error(
+                        f'Agent reached maximum number of iterations, task paused. {TRAFFIC_CONTROL_REMINDER}'
+                    )
+                    await self.set_agent_state_to(AgentState.PAUSED)
                 return
         elif self.max_budget_per_task is not None:
             current_cost = self.state.metrics.accumulated_cost

+ 3 - 0
opendevin/core/main.py

@@ -40,6 +40,7 @@ async def run_agent_controller(
     sandbox: Sandbox | None = None,
     runtime_tools_config: dict | None = None,
     sid: str | None = None,
+    headless_mode: bool = True,
 ) -> State | None:
     """Main coroutine to run the agent controller with task input flexibility.
     It's only used when you launch opendevin backend directly via cmdline.
@@ -49,6 +50,7 @@ async def run_agent_controller(
         exit_on_message: quit if agent asks for a message from user (optional)
         fake_user_response_fn: An optional function that receives the current state (could be None) and returns a fake user response.
         sandbox: An optional sandbox to run the agent in.
+        headless_mode: Whether the agent is run in headless mode.
     """
     # Logging
     logger.info(
@@ -75,6 +77,7 @@ async def run_agent_controller(
         max_budget_per_task=max_budget_per_task,
         event_stream=event_stream,
         initial_state=initial_state,
+        headless_mode=headless_mode,
     )
 
     # runtime and tools

+ 3 - 0
opendevin/server/session/agent.py

@@ -101,6 +101,9 @@ class AgentSession:
             agent=agent,
             max_iterations=int(max_iterations),
             confirmation_mode=confirmation_mode,
+            # AgentSession is designed to communicate with the frontend, so we don't want to
+            # run the agent in headless mode.
+            headless_mode=False,
         )
         try:
             agent_state = State.restore_from_session(self.sid)