test_arg_parser.py 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566
  1. import pytest
  2. from opendevin.core.config import get_parser
  3. def test_help_message(capsys):
  4. parser = get_parser()
  5. with pytest.raises(SystemExit): # `--help` causes SystemExit
  6. parser.parse_args(['--help'])
  7. captured = capsys.readouterr()
  8. expected_help_message = """
  9. usage: pytest [-h] [-d DIRECTORY] [-t TASK] [-f FILE] [-c AGENT_CLS]
  10. [-m MODEL_NAME] [-i MAX_ITERATIONS] [-b MAX_BUDGET_PER_TASK]
  11. [-n MAX_CHARS] [--eval-output-dir EVAL_OUTPUT_DIR]
  12. [--eval-n-limit EVAL_N_LIMIT]
  13. [--eval-num-workers EVAL_NUM_WORKERS] [--eval-note EVAL_NOTE]
  14. [-l LLM_CONFIG]
  15. Run an agent with a specific task
  16. options:
  17. -h, --help show this help message and exit
  18. -d DIRECTORY, --directory DIRECTORY
  19. The working directory for the agent
  20. -t TASK, --task TASK The task for the agent to perform
  21. -f FILE, --file FILE Path to a file containing the task. Overrides -t if
  22. both are provided.
  23. -c AGENT_CLS, --agent-cls AGENT_CLS
  24. The agent class to use
  25. -m MODEL_NAME, --model-name MODEL_NAME
  26. The (litellm) model name to use
  27. -i MAX_ITERATIONS, --max-iterations MAX_ITERATIONS
  28. The maximum number of iterations to run the agent
  29. -b MAX_BUDGET_PER_TASK, --max-budget-per-task MAX_BUDGET_PER_TASK
  30. The maximum budget allowed per task, beyond which the
  31. agent will stop.
  32. -n MAX_CHARS, --max-chars MAX_CHARS
  33. The maximum number of characters to send to and
  34. receive from LLM per task
  35. --eval-output-dir EVAL_OUTPUT_DIR
  36. The directory to save evaluation output
  37. --eval-n-limit EVAL_N_LIMIT
  38. The number of instances to evaluate
  39. --eval-num-workers EVAL_NUM_WORKERS
  40. The number of workers to use for evaluation
  41. --eval-note EVAL_NOTE
  42. The note to add to the evaluation directory
  43. -l LLM_CONFIG, --llm-config LLM_CONFIG
  44. The group of llm settings, e.g. a [llama3] section in
  45. the toml file. Overrides model if both are provided.
  46. """
  47. actual_lines = captured.out.strip().split('\n')
  48. print('\n'.join(actual_lines))
  49. expected_lines = expected_help_message.strip().split('\n')
  50. # Ensure both outputs have the same number of lines
  51. assert len(actual_lines) == len(
  52. expected_lines
  53. ), 'The number of lines in the help message does not match.'
  54. # Compare each line
  55. for actual, expected in zip(actual_lines, expected_lines):
  56. assert (
  57. actual.strip() == expected.strip()
  58. ), f"Expected '{expected}', got '{actual}'"