eval-runner.yml 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137
  1. name: Run SWE-Bench Evaluation
  2. on:
  3. pull_request:
  4. types: [labeled]
  5. workflow_dispatch:
  6. inputs:
  7. reason:
  8. description: "Reason for manual trigger"
  9. required: true
  10. default: ""
  11. env:
  12. N_PROCESSES: 32 # Global configuration for number of parallel processes for evaluation
  13. jobs:
  14. run-evaluation:
  15. if: github.event.label.name == 'eval-this' || github.event_name != 'pull_request'
  16. runs-on: ubuntu-latest
  17. permissions:
  18. contents: "read"
  19. id-token: "write"
  20. pull-requests: "write"
  21. issues: "write"
  22. strategy:
  23. matrix:
  24. python-version: ["3.12"]
  25. steps:
  26. - name: Checkout repository
  27. uses: actions/checkout@v4
  28. - name: Install poetry via pipx
  29. run: pipx install poetry
  30. - name: Set up Python
  31. uses: actions/setup-python@v5
  32. with:
  33. python-version: ${{ matrix.python-version }}
  34. cache: "poetry"
  35. - name: Comment on PR if 'eval-this' label is present
  36. if: github.event_name == 'pull_request' && github.event.label.name == 'eval-this'
  37. uses: KeisukeYamashita/create-comment@v1
  38. with:
  39. unique: false
  40. comment: |
  41. Hi! I started running the evaluation on your PR. You will receive a comment with the results shortly.
  42. - name: Install Python dependencies using Poetry
  43. run: poetry install
  44. - name: Configure config.toml for evaluation
  45. env:
  46. DEEPSEEK_API_KEY: ${{ secrets.DEEPSEEK_LLM_API_KEY }}
  47. run: |
  48. echo "[llm.eval]" > config.toml
  49. echo "model = \"deepseek/deepseek-chat\"" >> config.toml
  50. echo "api_key = \"$DEEPSEEK_API_KEY\"" >> config.toml
  51. echo "temperature = 0.0" >> config.toml
  52. - name: Run SWE-Bench evaluation
  53. env:
  54. ALLHANDS_API_KEY: ${{ secrets.ALLHANDS_EVAL_RUNTIME_API_KEY }}
  55. RUNTIME: remote
  56. SANDBOX_REMOTE_RUNTIME_API_URL: https://runtime.eval.all-hands.dev
  57. EVAL_DOCKER_IMAGE_PREFIX: us-central1-docker.pkg.dev/evaluation-092424/swe-bench-images
  58. run: |
  59. poetry run ./evaluation/benchmarks/swe_bench/scripts/run_infer.sh llm.eval HEAD CodeActAgent 300 30 $N_PROCESSES "princeton-nlp/SWE-bench_Lite" test
  60. OUTPUT_FOLDER=$(find evaluation/evaluation_outputs/outputs/princeton-nlp__SWE-bench_Lite-test/CodeActAgent -name "deepseek-chat_maxiter_50_N_*-no-hint-run_1" -type d | head -n 1)
  61. echo "OUTPUT_FOLDER for SWE-bench evaluation: $OUTPUT_FOLDER"
  62. poetry run ./evaluation/benchmarks/swe_bench/scripts/eval_infer_remote.sh $OUTPUT_FOLDER/output.jsonl $N_PROCESSES "princeton-nlp/SWE-bench_Lite" test
  63. poetry run ./evaluation/benchmarks/swe_bench/scripts/eval/summarize_outputs.py $OUTPUT_FOLDER/output.jsonl > summarize_outputs.log 2>&1
  64. echo "SWEBENCH_REPORT<<EOF" >> $GITHUB_ENV
  65. cat summarize_outputs.log >> $GITHUB_ENV
  66. echo "EOF" >> $GITHUB_ENV
  67. - name: Create tar.gz of evaluation outputs
  68. run: |
  69. TIMESTAMP=$(date +'%y-%m-%d-%H-%M')
  70. tar -czvf evaluation_outputs_${TIMESTAMP}.tar.gz evaluation/evaluation_outputs/outputs
  71. - name: Upload evaluation results as artifact
  72. uses: actions/upload-artifact@v4
  73. id: upload_results_artifact
  74. with:
  75. name: evaluation-outputs
  76. path: evaluation_outputs_*.tar.gz
  77. - name: Get artifact URL
  78. run: echo "ARTIFACT_URL=${{ steps.upload_results_artifact.outputs.artifact-url }}" >> $GITHUB_ENV
  79. - name: Authenticate to Google Cloud
  80. uses: 'google-github-actions/auth@v2'
  81. with:
  82. credentials_json: ${{ secrets.GCP_RESEARCH_OBJECT_CREATOR_SA_KEY }}
  83. - name: Set timestamp and trigger reason
  84. run: |
  85. echo "TIMESTAMP=$(date +'%Y-%m-%d-%H-%M')" >> $GITHUB_ENV
  86. if [[ "${{ github.event_name }}" == "pull_request" ]]; then
  87. echo "TRIGGER_REASON=pr-${{ github.event.pull_request.number }}" >> $GITHUB_ENV
  88. elif [[ "${{ github.event_name }}" == "schedule" ]]; then
  89. echo "TRIGGER_REASON=schedule" >> $GITHUB_ENV
  90. else
  91. echo "TRIGGER_REASON=manual-${{ github.event.inputs.reason }}" >> $GITHUB_ENV
  92. fi
  93. - name: Upload evaluation results to Google Cloud Storage
  94. uses: 'google-github-actions/upload-cloud-storage@v2'
  95. with:
  96. path: 'evaluation/evaluation_outputs/outputs'
  97. destination: 'openhands-oss-eval-results/${{ env.TIMESTAMP }}-${{ env.TRIGGER_REASON }}'
  98. - name: Comment with evaluation results and artifact link
  99. id: create_comment
  100. uses: KeisukeYamashita/create-comment@v1
  101. with:
  102. number: ${{ github.event_name == 'pull_request' && github.event.pull_request.number || 4504 }}
  103. unique: false
  104. comment: |
  105. Trigger by: ${{ github.event_name == 'pull_request' && format('Pull Request (eval-this label on PR #{0})', github.event.pull_request.number) || github.event_name == 'schedule' && 'Daily Schedule' || format('Manual Trigger: {0}', github.event.inputs.reason) }}
  106. Commit: ${{ github.sha }}
  107. **SWE-Bench Evaluation Report**
  108. ${{ env.SWEBENCH_REPORT }}
  109. ---
  110. You can download the full evaluation outputs [here](${{ env.ARTIFACT_URL }}).
  111. - name: Post to a Slack channel
  112. id: slack
  113. uses: slackapi/slack-github-action@v1.27.0
  114. with:
  115. channel-id: 'C07SVQSCR6F'
  116. slack-message: "*Evaluation Trigger:* ${{ github.event_name == 'pull_request' && format('Pull Request (eval-this label on PR #{0})', github.event.pull_request.number) || github.event_name == 'schedule' && 'Daily Schedule' || format('Manual Trigger: {0}', github.event.inputs.reason) }}\n\nLink to summary: [here](https://github.com/${{ github.repository }}/issues/${{ github.event_name == 'pull_request' && github.event.pull_request.number || 4504 }}#issuecomment-${{ steps.create_comment.outputs.comment-id }})"
  117. env:
  118. SLACK_BOT_TOKEN: ${{ secrets.EVAL_NOTIF_SLACK_BOT_TOKEN }}