run_infer.sh 1.0 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849
  1. #!/bin/bash
  2. set -eo pipefail
  3. source "evaluation/utils/version_control.sh"
  4. MODEL_CONFIG=$1
  5. COMMIT_HASH=$2
  6. USE_KNOWLEDGE=$3
  7. AGENT=$4
  8. EVAL_LIMIT=$5
  9. NUM_WORKERS=$6
  10. if [ -z "$NUM_WORKERS" ]; then
  11. NUM_WORKERS=1
  12. echo "Number of workers not specified, use default $NUM_WORKERS"
  13. fi
  14. checkout_eval_branch
  15. if [ -z "$AGENT" ]; then
  16. echo "Agent not specified, use default CodeActAgent"
  17. AGENT="CodeActAgent"
  18. fi
  19. if [ -z "$USE_KNOWLEDGE" ]; then
  20. echo "Use knowledge not specified, use default False"
  21. USE_KNOWLEDGE=false
  22. fi
  23. get_openhands_version
  24. echo "AGENT: $AGENT"
  25. echo "OPENHANDS_VERSION: $OPENHANDS_VERSION"
  26. echo "MODEL_CONFIG: $MODEL_CONFIG"
  27. COMMAND="poetry run python evaluation/benchmarks/scienceagentbench/run_infer.py \
  28. --agent-cls $AGENT \
  29. --llm-config $MODEL_CONFIG \
  30. --use_knowledge $USE_KNOWLEDGE \
  31. --max-iterations 30 \
  32. --eval-num-workers $NUM_WORKERS \
  33. --eval-note $OPENHANDS_VERSION" \
  34. if [ -n "$EVAL_LIMIT" ]; then
  35. echo "EVAL_LIMIT: $EVAL_LIMIT"
  36. COMMAND="$COMMAND --eval-n-limit $EVAL_LIMIT"
  37. fi
  38. # Run the command
  39. eval $COMMAND