run_infer.sh 1.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364
  1. #!/bin/bash
  2. set -eo pipefail
  3. source "evaluation/utils/version_control.sh"
  4. MODEL_CONFIG=$1
  5. COMMIT_HASH=$2
  6. AGENT=$3
  7. DATASET=$4
  8. EVAL_LIMIT=$5
  9. NUM_WORKERS=$6
  10. if [ -z "$NUM_WORKERS" ]; then
  11. NUM_WORKERS=1
  12. echo "Number of workers not specified, use default $NUM_WORKERS"
  13. fi
  14. checkout_eval_branch
  15. if [ -z "$AGENT" ]; then
  16. echo "Agent not specified, use default CodeActAgent"
  17. AGENT="CodeActAgent"
  18. fi
  19. get_agent_version
  20. if [ -z "$DATASET" ]; then
  21. echo "Dataset not specified, use default 'things'"
  22. DATASET="things"
  23. fi
  24. # check if OPENAI_API_KEY is set
  25. if [ -z "$OPENAI_API_KEY" ]; then
  26. echo "OPENAI_API_KEY is not set, please set it to run the script"
  27. exit 1
  28. fi
  29. # IMPORTANT: Because Agent's prompt changes fairly often in the rapidly evolving codebase of OpenHands
  30. # We need to track the version of Agent in the evaluation to make sure results are comparable
  31. AGENT_VERSION=v$(poetry run python -c "import openhands.agenthub; from openhands.controller.agent import Agent; print(Agent.get_cls('$AGENT').VERSION)")
  32. echo "AGENT: $AGENT"
  33. echo "AGENT_VERSION: $AGENT_VERSION"
  34. echo "MODEL_CONFIG: $MODEL_CONFIG"
  35. echo "DATASET: $DATASET"
  36. COMMAND="poetry run python evaluation/EDA/run_infer.py \
  37. --agent-cls $AGENT \
  38. --llm-config $MODEL_CONFIG \
  39. --dataset $DATASET \
  40. --data-split test \
  41. --max-iterations 20 \
  42. --OPENAI_API_KEY $OPENAI_API_KEY \
  43. --max-chars 10000000 \
  44. --eval-num-workers $NUM_WORKERS \
  45. --eval-note ${AGENT_VERSION}_${DATASET}"
  46. if [ -n "$EVAL_LIMIT" ]; then
  47. echo "EVAL_LIMIT: $EVAL_LIMIT"
  48. COMMAND="$COMMAND --eval-n-limit $EVAL_LIMIT"
  49. fi
  50. # Run the command
  51. echo $COMMAND
  52. eval $COMMAND