Makefile 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106
  1. # Makefile for OpenDevin project
  2. # Variables
  3. DOCKER_IMAGE = ghcr.io/opendevin/sandbox
  4. BACKEND_PORT = 3000
  5. BACKEND_HOST = "127.0.0.1:$(BACKEND_PORT)"
  6. FRONTEND_PORT = 3001
  7. DEFAULT_WORKSPACE_DIR = "./workspace"
  8. DEFAULT_MODEL = "gpt-4-0125-preview"
  9. CONFIG_FILE = config.toml
  10. PRECOMMIT_CONFIG_PATH = "./dev_config/python/.pre-commit-config.yaml"
  11. # Build
  12. build:
  13. @echo "Building project..."
  14. @echo "Pulling Docker image..."
  15. @docker pull $(DOCKER_IMAGE)
  16. @echo "Installing Python dependencies..."
  17. @curl -sSL https://install.python-poetry.org | python3 -
  18. @poetry install --without evaluation
  19. @echo "Activating Poetry shell..."
  20. @echo "Installing pre-commit hooks..."
  21. @git config --unset-all core.hooksPath || true
  22. @poetry run pre-commit install --config $(PRECOMMIT_CONFIG_PATH)
  23. @echo "Setting up frontend environment..."
  24. @echo "Detect Node.js version..."
  25. @cd frontend && node ./scripts/detect-node-version.js
  26. @cd frontend && if [ -f node_modules/.package-lock.json ]; then \
  27. echo "This project currently uses \"pnpm\" for dependency management. It has detected that dependencies were previously installed using \"npm\" and has automatically deleted the \"node_modules\" directory to prevent unnecessary conflicts."; \
  28. rm -rf node_modules; \
  29. fi
  30. @which corepack > /dev/null || (echo "Installing corepack..." && npm install -g corepack)
  31. @cd frontend && corepack enable && pnpm install && pnpm run make-i18n
  32. # Start backend
  33. start-backend:
  34. @echo "Starting backend..."
  35. @poetry run uvicorn opendevin.server.listen:app --port $(BACKEND_PORT)
  36. # Start frontend
  37. start-frontend:
  38. @echo "Starting frontend..."
  39. @cd frontend && BACKEND_HOST=$(BACKEND_HOST) FRONTEND_PORT=$(FRONTEND_PORT) pnpm run start
  40. # Run the app
  41. run:
  42. @echo "Running the app..."
  43. @if [ "$(OS)" = "Windows_NT" ]; then \
  44. echo "`make run` is not supported on Windows. Please run `make start-frontend` and `make start-backend` separately."; \
  45. exit 1; \
  46. fi
  47. @mkdir -p logs
  48. @poetry run nohup uvicorn opendevin.server.listen:app --port $(BACKEND_PORT) > logs/backend_$(shell date +'%Y%m%d_%H%M%S').log 2>&1 &
  49. @echo "Waiting for the backend to start..."
  50. @until nc -z localhost $(BACKEND_PORT); do sleep 0.1; done
  51. @cd frontend && pnpm run start -- --port $(FRONTEND_PORT)
  52. # Setup config.toml
  53. setup-config:
  54. @echo "Setting up config.toml..."
  55. @read -p "Enter your LLM Model name (see https://docs.litellm.ai/docs/providers for full list) [default: $(DEFAULT_MODEL)]: " llm_model; \
  56. llm_model=$${llm_model:-$(DEFAULT_MODEL)}; \
  57. echo "LLM_MODEL=\"$$llm_model\"" > $(CONFIG_FILE).tmp
  58. @read -p "Enter your LLM API key: " llm_api_key; \
  59. echo "LLM_API_KEY=\"$$llm_api_key\"" >> $(CONFIG_FILE).tmp
  60. @read -p "Enter your LLM Base URL [mostly used for local LLMs, leave blank if not needed - example: http://localhost:5001/v1/]: " llm_base_url; \
  61. if [[ ! -z "$$llm_base_url" ]]; then echo "LLM_BASE_URL=\"$$llm_base_url\"" >> $(CONFIG_FILE).tmp; fi
  62. @echo "Enter your LLM Embedding Model\nChoices are openai, azureopenai, llama2 or leave blank to default to 'BAAI/bge-small-en-v1.5' via huggingface"; \
  63. read -p "> " llm_embedding_model; \
  64. echo "LLM_EMBEDDING_MODEL=\"$$llm_embedding_model\"" >> $(CONFIG_FILE).tmp; \
  65. if [ "$$llm_embedding_model" = "llama2" ]; then \
  66. read -p "Enter the local model URL (will overwrite LLM_BASE_URL): " llm_base_url; \
  67. echo "LLM_BASE_URL=\"$$llm_base_url\"" >> $(CONFIG_FILE).tmp; \
  68. elif [ "$$llm_embedding_model" = "azureopenai" ]; then \
  69. read -p "Enter the Azure endpoint URL (will overwrite LLM_BASE_URL): " llm_base_url; \
  70. echo "LLM_BASE_URL=\"$$llm_base_url\"" >> $(CONFIG_FILE).tmp; \
  71. read -p "Enter the Azure LLM Deployment Name: " llm_deployment_name; \
  72. echo "LLM_DEPLOYMENT_NAME=\"$$llm_deployment_name\"" >> $(CONFIG_FILE).tmp; \
  73. read -p "Enter the Azure API Version: " llm_api_version; \
  74. echo "LLM_API_VERSION=\"$$llm_api_version\"" >> $(CONFIG_FILE).tmp; \
  75. fi
  76. @read -p "Enter your workspace directory [default: $(DEFAULT_WORKSPACE_DIR)]: " workspace_dir; \
  77. workspace_dir=$${workspace_dir:-$(DEFAULT_WORKSPACE_DIR)}; \
  78. echo "WORKSPACE_DIR=\"$$workspace_dir\"" >> $(CONFIG_FILE).tmp
  79. @mv $(CONFIG_FILE).tmp $(CONFIG_FILE)
  80. # Help
  81. help:
  82. @echo "Usage: make [target]"
  83. @echo "Targets:"
  84. @echo " build - Build project, including environment setup and dependencies."
  85. @echo " build-eval - Build project evaluation pipeline, including environment setup and dependencies."
  86. @echo " start-backend - Start the backend server for the OpenDevin project."
  87. @echo " start-frontend - Start the frontend server for the OpenDevin project."
  88. @echo " run - Run the OpenDevin application, starting both backend and frontend servers."
  89. @echo " Backend Log file will be stored in the 'logs' directory."
  90. @echo " setup-config - Setup the configuration for OpenDevin by providing LLM API key, LLM Model name, and workspace directory."
  91. @echo " help - Display this help message, providing information on available targets."
  92. # Phony targets
  93. .PHONY: build build-eval start-backend start-frontend run setup-config help