run_infer.sh 1.1 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253
  1. #!/bin/bash
  2. set -eo pipefail
  3. source "evaluation/utils/version_control.sh"
  4. MODEL_CONFIG=$1
  5. COMMIT_HASH=$2
  6. SPLIT=$3
  7. AGENT=$4
  8. EVAL_LIMIT=$5
  9. NUM_WORKERS=$6
  10. if [ -z "$NUM_WORKERS" ]; then
  11. NUM_WORKERS=1
  12. echo "Number of workers not specified, use default $NUM_WORKERS"
  13. fi
  14. checkout_eval_branch
  15. if [ -z "$MODEL_CONFIG" ]; then
  16. echo "Model config not specified, use default"
  17. MODEL_CONFIG="eval_gpt4_1106_preview"
  18. fi
  19. if [ -z "$AGENT" ]; then
  20. echo "Agent not specified, use default CodeActAgent"
  21. AGENT="CodeActAgent"
  22. fi
  23. get_agent_version
  24. echo "AGENT: $AGENT"
  25. echo "AGENT_VERSION: $AGENT_VERSION"
  26. echo "MODEL_CONFIG: $MODEL_CONFIG"
  27. COMMAND="poetry run python evaluation/ml_bench/run_infer.py \
  28. --agent-cls $AGENT \
  29. --llm-config $MODEL_CONFIG \
  30. --max-iterations 10 \
  31. --eval-num-workers $NUM_WORKERS \
  32. --eval-note $AGENT_VERSION"
  33. if [ -n "$EVAL_LIMIT" ]; then
  34. echo "EVAL_LIMIT: $EVAL_LIMIT"
  35. COMMAND="$COMMAND --eval-n-limit $EVAL_LIMIT"
  36. fi
  37. if [ -n "$SPLIT" ]; then
  38. echo "SPLIT: $SPLIT"
  39. COMMAND="$COMMAND --eval-split $SPLIT"
  40. fi
  41. # Run the command
  42. eval $COMMAND