runtime_build.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403
  1. import argparse
  2. import os
  3. import shutil
  4. import subprocess
  5. import tempfile
  6. import docker
  7. import toml
  8. from dirhash import dirhash
  9. from jinja2 import Environment, FileSystemLoader
  10. import opendevin
  11. from opendevin.core.logger import opendevin_logger as logger
  12. RUNTIME_IMAGE_REPO = os.getenv(
  13. 'OD_RUNTIME_RUNTIME_IMAGE_REPO', 'ghcr.io/opendevin/od_runtime'
  14. )
  15. def _get_package_version():
  16. """Read the version from pyproject.toml as the other one may be outdated."""
  17. project_root = os.path.dirname(os.path.dirname(os.path.abspath(opendevin.__file__)))
  18. pyproject_path = os.path.join(project_root, 'pyproject.toml')
  19. with open(pyproject_path, 'r') as f:
  20. pyproject_data = toml.load(f)
  21. return pyproject_data['tool']['poetry']['version']
  22. def _create_project_source_dist():
  23. """Create a source distribution of the project. Return the path to the tarball."""
  24. # Copy the project directory to the container
  25. # get the location of "opendevin" package
  26. project_root = os.path.dirname(os.path.dirname(os.path.abspath(opendevin.__file__)))
  27. logger.info(f'Using project root: {project_root}')
  28. # run "python -m build -s" on project_root
  29. result = subprocess.run(['python', '-m', 'build', '-s', project_root])
  30. if result.returncode != 0:
  31. logger.error(f'Build failed: {result}')
  32. raise Exception(f'Build failed: {result}')
  33. # Fetch the correct version from pyproject.toml
  34. package_version = _get_package_version()
  35. tarball_path = os.path.join(
  36. project_root, 'dist', f'opendevin-{package_version}.tar.gz'
  37. )
  38. if not os.path.exists(tarball_path):
  39. logger.error(f'Source distribution not found at {tarball_path}')
  40. raise Exception(f'Source distribution not found at {tarball_path}')
  41. logger.info(f'Source distribution created at {tarball_path}')
  42. return tarball_path
  43. def _put_source_code_to_dir(temp_dir: str):
  44. """Put the source code of OpenDevin to the temp_dir/code."""
  45. tarball_path = _create_project_source_dist()
  46. filename = os.path.basename(tarball_path)
  47. filename = filename.removesuffix('.tar.gz')
  48. # move the tarball to temp_dir
  49. _res = shutil.copy(tarball_path, os.path.join(temp_dir, 'project.tar.gz'))
  50. if _res:
  51. os.remove(tarball_path)
  52. logger.info(
  53. f'Source distribution moved to {os.path.join(temp_dir, "project.tar.gz")}'
  54. )
  55. # unzip the tarball
  56. shutil.unpack_archive(os.path.join(temp_dir, 'project.tar.gz'), temp_dir)
  57. # remove the tarball
  58. os.remove(os.path.join(temp_dir, 'project.tar.gz'))
  59. # rename the directory to the 'code'
  60. os.rename(os.path.join(temp_dir, filename), os.path.join(temp_dir, 'code'))
  61. logger.info(f'Unpacked source code directory: {os.path.join(temp_dir, "code")}')
  62. def _generate_dockerfile(
  63. base_image: str,
  64. skip_init: bool = False,
  65. extra_deps: str | None = None,
  66. ) -> str:
  67. """Generate the Dockerfile content for the eventstream runtime image based on user-provided base image."""
  68. env = Environment(
  69. loader=FileSystemLoader(
  70. searchpath=os.path.join(os.path.dirname(__file__), 'runtime_templates')
  71. )
  72. )
  73. template = env.get_template('Dockerfile.j2')
  74. dockerfile_content = template.render(
  75. base_image=base_image,
  76. skip_init=skip_init,
  77. extra_deps=extra_deps if extra_deps is not None else '',
  78. )
  79. return dockerfile_content
  80. def prep_docker_build_folder(
  81. dir_path: str,
  82. base_image: str,
  83. skip_init: bool = False,
  84. extra_deps: str | None = None,
  85. ) -> str:
  86. """Prepares the docker build folder by copying the source code and generating the Dockerfile.
  87. Return the MD5 hash of the directory.
  88. """
  89. _put_source_code_to_dir(dir_path)
  90. dockerfile_content = _generate_dockerfile(
  91. base_image,
  92. skip_init=skip_init,
  93. extra_deps=extra_deps,
  94. )
  95. logger.info(
  96. (
  97. f'===== Dockerfile content =====\n'
  98. f'{dockerfile_content}\n'
  99. f'==============================='
  100. )
  101. )
  102. with open(os.path.join(dir_path, 'Dockerfile'), 'w') as file:
  103. file.write(dockerfile_content)
  104. hash = dirhash(dir_path, 'md5')
  105. logger.info(
  106. f'Input base image: {base_image}\n'
  107. f'Skip init: {skip_init}\n'
  108. f'Extra deps: {extra_deps}\n'
  109. f'Hash for docker build directory [{dir_path}] (contents: {os.listdir(dir_path)}): {hash}\n'
  110. )
  111. return hash
  112. def _build_sandbox_image(
  113. docker_folder: str,
  114. docker_client: docker.DockerClient,
  115. target_image_repo: str,
  116. target_image_hash_tag: str,
  117. target_image_tag: str,
  118. ) -> str:
  119. """Build the sandbox image.
  120. The image will be tagged as both:
  121. - target_image_repo:target_image_hash_tag
  122. - target_image_repo:target_image_tag
  123. Args:
  124. docker_folder: str: the path to the docker build folder
  125. docker_client: docker.DockerClient: the docker client
  126. target_image_repo: str: the repository name for the target image
  127. target_image_hash_tag: str: the *hash* tag for the target image that is calculated based
  128. on the contents of the docker build folder (source code and Dockerfile)
  129. e.g., ubuntu:latest -> od_runtime:1234567890abcdef
  130. target_image_tag: str: the tag for the target image that's generic and based on the base image name
  131. e.g., ubuntu:latest -> od_runtime:ubuntu_tag_latest
  132. """
  133. # 1. Always directly build and tag using the dir_hash
  134. target_image_hash_name = f'{target_image_repo}:{target_image_hash_tag}'
  135. try:
  136. build_logs = docker_client.api.build(
  137. path=docker_folder,
  138. tag=target_image_hash_name,
  139. rm=True,
  140. decode=True,
  141. )
  142. except docker.errors.BuildError as e:
  143. logger.error(f'Sandbox image build failed: {e}')
  144. raise e
  145. for log in build_logs:
  146. if 'stream' in log:
  147. print(log['stream'].strip())
  148. elif 'error' in log:
  149. logger.error(log['error'].strip())
  150. else:
  151. logger.info(str(log))
  152. # 2. Re-tag the image with a more generic tag (as somewhat of "latest" tag)
  153. logger.info(f'Image [{target_image_hash_name}] build finished.')
  154. image = docker_client.images.get(target_image_hash_name)
  155. image.tag(target_image_repo, target_image_tag)
  156. logger.info(
  157. f'Re-tagged image [{target_image_hash_name}] with more generic tag [{target_image_tag}]'
  158. )
  159. # check if the image is built successfully
  160. image = docker_client.images.get(target_image_hash_name)
  161. if image is None:
  162. raise RuntimeError(
  163. f'Build failed: Image [{target_image_repo}:{target_image_hash_tag}] not found'
  164. )
  165. logger.info(
  166. f'Image [{target_image_repo}:{target_image_hash_tag}] (hash: [{target_image_tag}]) built successfully'
  167. )
  168. return target_image_hash_name
  169. def get_runtime_image_repo_and_tag(base_image: str) -> tuple[str, str]:
  170. if RUNTIME_IMAGE_REPO in base_image:
  171. logger.info(
  172. f'The provided image [{base_image}] is a already a valid od_runtime image.\n'
  173. f'Will try to reuse it as is.'
  174. )
  175. if ':' not in base_image:
  176. base_image = base_image + ':latest'
  177. repo, tag = base_image.split(':')
  178. return repo, tag
  179. else:
  180. if ':' not in base_image:
  181. base_image = base_image + ':latest'
  182. [repo, tag] = base_image.split(':')
  183. repo = repo.replace('/', '___')
  184. od_version = _get_package_version()
  185. return RUNTIME_IMAGE_REPO, f'od_v{od_version}_image_{repo}_tag_{tag}'
  186. def _check_image_exists(image_name: str, docker_client: docker.DockerClient) -> bool:
  187. """Check if the image exists in the registry (try to pull it first) AND in the local store.
  188. image_name is f'{repo}:{tag}'
  189. """
  190. # Try to pull the new image from the registry
  191. try:
  192. docker_client.images.pull(image_name)
  193. except Exception:
  194. logger.info(f'Cannot pull image {image_name} directly')
  195. images = docker_client.images.list()
  196. if images:
  197. for image in images:
  198. if image_name in image.tags:
  199. return True
  200. return False
  201. def build_runtime_image(
  202. base_image: str,
  203. docker_client: docker.DockerClient,
  204. extra_deps: str | None = None,
  205. docker_build_folder: str | None = None,
  206. dry_run: bool = False,
  207. ) -> str:
  208. """Build the runtime image for the OpenDevin runtime.
  209. See https://docs.all-hands.dev/modules/usage/runtime for more details.
  210. """
  211. runtime_image_repo, runtime_image_tag = get_runtime_image_repo_and_tag(base_image)
  212. # Calculate the hash for the docker build folder (source code and Dockerfile)
  213. with tempfile.TemporaryDirectory() as temp_dir:
  214. from_scratch_hash = prep_docker_build_folder(
  215. temp_dir,
  216. base_image=base_image,
  217. skip_init=False,
  218. extra_deps=extra_deps,
  219. )
  220. # hash image name, if the hash matches, it means the image is already
  221. # built from scratch with the *exact SAME source code* on the exact Dockerfile
  222. hash_runtime_image_name = f'{runtime_image_repo}:{from_scratch_hash}'
  223. # non-hash generic image name, it could contains *similar* dependencies
  224. # but *might* not exactly match the state of the source code.
  225. # It resembles the "latest" tag in the docker image naming convention for
  226. # a particular {repo}:{tag} pair (e.g., ubuntu:latest -> od_runtime:ubuntu_tag_latest)
  227. # we will build from IT to save time if the `from_scratch_hash` is not found
  228. generic_runtime_image_name = f'{runtime_image_repo}:{runtime_image_tag}'
  229. # 1. If the image exists with the same hash, we will reuse it as is
  230. if _check_image_exists(hash_runtime_image_name, docker_client):
  231. logger.info(
  232. f'Image [{hash_runtime_image_name}] exists with matched hash for Docker build folder.\n'
  233. 'Will reuse it as is.'
  234. )
  235. return hash_runtime_image_name
  236. # 2. If the exact hash is not found, we will FIRST try to re-build it
  237. # by leveraging the non-hash `generic_runtime_image_name` to save some time
  238. # from re-building the dependencies (e.g., poetry install, apt install)
  239. elif _check_image_exists(generic_runtime_image_name, docker_client):
  240. logger.info(
  241. f'Cannot find matched hash for image [{hash_runtime_image_name}]\n'
  242. f'Will try to re-build it from latest [{generic_runtime_image_name}] image to potentially save '
  243. f'time for dependencies installation.\n'
  244. )
  245. cur_docker_build_folder = docker_build_folder or tempfile.mkdtemp()
  246. _skip_init_hash = prep_docker_build_folder(
  247. cur_docker_build_folder,
  248. # we want to use the existing generic image as base
  249. # so that we can leverage existing dependencies already installed in the image
  250. base_image=generic_runtime_image_name,
  251. skip_init=True, # skip init since we are re-using the existing image
  252. extra_deps=extra_deps,
  253. )
  254. assert (
  255. _skip_init_hash != from_scratch_hash
  256. ), f'The skip_init hash [{_skip_init_hash}] should not match the existing hash [{from_scratch_hash}]'
  257. if not dry_run:
  258. _build_sandbox_image(
  259. docker_folder=cur_docker_build_folder,
  260. docker_client=docker_client,
  261. target_image_repo=runtime_image_repo,
  262. # NOTE: WE ALWAYS use the "from_scratch_hash" tag for the target image
  263. # otherwise, even if the source code is exactly the same, the image *might* be re-built
  264. # because the same source code will generate different hash when skip_init=True/False
  265. # since the Dockerfile is slightly different
  266. target_image_hash_tag=from_scratch_hash,
  267. target_image_tag=runtime_image_tag,
  268. )
  269. else:
  270. logger.info(
  271. f'Dry run: Skipping image build for [{generic_runtime_image_name}]'
  272. )
  273. if docker_build_folder is None:
  274. shutil.rmtree(cur_docker_build_folder)
  275. # 3. If the image is not found AND we cannot re-use the non-hash latest relavant image,
  276. # we will build it completely from scratch
  277. else:
  278. cur_docker_build_folder = docker_build_folder or tempfile.mkdtemp()
  279. _new_from_scratch_hash = prep_docker_build_folder(
  280. cur_docker_build_folder,
  281. base_image,
  282. skip_init=False,
  283. extra_deps=extra_deps,
  284. )
  285. assert (
  286. _new_from_scratch_hash == from_scratch_hash
  287. ), f'The new from scratch hash [{_new_from_scratch_hash}] does not match the existing hash [{from_scratch_hash}]'
  288. if not dry_run:
  289. _build_sandbox_image(
  290. docker_folder=cur_docker_build_folder,
  291. docker_client=docker_client,
  292. target_image_repo=runtime_image_repo,
  293. # NOTE: WE ALWAYS use the "from_scratch_hash" tag for the target image
  294. target_image_hash_tag=from_scratch_hash,
  295. target_image_tag=runtime_image_tag,
  296. )
  297. else:
  298. logger.info(
  299. f'Dry run: Skipping image build for [{generic_runtime_image_name}]'
  300. )
  301. if docker_build_folder is None:
  302. shutil.rmtree(cur_docker_build_folder)
  303. return f'{runtime_image_repo}:{from_scratch_hash}'
  304. if __name__ == '__main__':
  305. parser = argparse.ArgumentParser()
  306. parser.add_argument('--base_image', type=str, default='ubuntu:22.04')
  307. parser.add_argument('--build_folder', type=str, default=None)
  308. args = parser.parse_args()
  309. if args.build_folder is not None:
  310. build_folder = args.build_folder
  311. assert os.path.exists(
  312. build_folder
  313. ), f'Build folder {build_folder} does not exist'
  314. logger.info(
  315. f'Will prepare a build folder by copying the source code and generating the Dockerfile: {build_folder}'
  316. )
  317. runtime_image_repo, runtime_image_tag = get_runtime_image_repo_and_tag(
  318. args.base_image
  319. )
  320. with tempfile.TemporaryDirectory() as temp_dir:
  321. runtime_image_hash_name = build_runtime_image(
  322. args.base_image,
  323. docker_client=docker.from_env(),
  324. docker_build_folder=temp_dir,
  325. dry_run=True,
  326. )
  327. _runtime_image_repo, runtime_image_hash_tag = runtime_image_hash_name.split(
  328. ':'
  329. )
  330. # Move contents of temp_dir to build_folder
  331. shutil.copytree(temp_dir, build_folder, dirs_exist_ok=True)
  332. logger.info(
  333. f'Build folder [{build_folder}] is ready: {os.listdir(build_folder)}'
  334. )
  335. with open(os.path.join(build_folder, 'config.sh'), 'a') as file:
  336. file.write(
  337. (
  338. f'\n'
  339. f'DOCKER_IMAGE={runtime_image_repo}\n'
  340. f'DOCKER_IMAGE_TAG={runtime_image_tag}\n'
  341. f'DOCKER_IMAGE_HASH_TAG={runtime_image_hash_tag}\n'
  342. )
  343. )
  344. logger.info(
  345. f'`config.sh` is updated with the new image name [{runtime_image_repo}] and tag [{runtime_image_tag}, {runtime_image_hash_tag}]'
  346. )
  347. logger.info(f'Dockerfile and source distribution are ready in {build_folder}')
  348. else:
  349. logger.info('Building image in a temporary folder')
  350. client = docker.from_env()
  351. image_name = build_runtime_image(args.base_image, client)
  352. print(f'\nBUILT Image: {image_name}\n')