runtime_build.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442
  1. import argparse
  2. import os
  3. import shutil
  4. import subprocess
  5. import tempfile
  6. import docker
  7. import toml
  8. from dirhash import dirhash
  9. from jinja2 import Environment, FileSystemLoader
  10. import openhands
  11. from openhands.core.logger import openhands_logger as logger
  12. from openhands.runtime.builder import DockerRuntimeBuilder, RuntimeBuilder
  13. RUNTIME_IMAGE_REPO = os.getenv(
  14. 'OD_RUNTIME_RUNTIME_IMAGE_REPO', 'ghcr.io/all-hands-ai/runtime'
  15. )
  16. def _get_package_version():
  17. """Read the version from pyproject.toml.
  18. Returns:
  19. - The version specified in pyproject.toml under [tool.poetry]
  20. """
  21. project_root = os.path.dirname(os.path.dirname(os.path.abspath(openhands.__file__)))
  22. pyproject_path = os.path.join(project_root, 'pyproject.toml')
  23. with open(pyproject_path, 'r') as f:
  24. pyproject_data = toml.load(f)
  25. return pyproject_data['tool']['poetry']['version']
  26. def _create_project_source_dist():
  27. """Create a source distribution of the project.
  28. Returns:
  29. - str: The path to the project tarball
  30. """
  31. project_root = os.path.dirname(os.path.dirname(os.path.abspath(openhands.__file__)))
  32. logger.info(f'Using project root: {project_root}')
  33. # run "python -m build -s" on project_root to create project tarball
  34. result = subprocess.run(f'python -m build -s {project_root}', shell=True)
  35. if result.returncode != 0:
  36. logger.error(f'Build failed: {result}')
  37. raise Exception(f'Build failed: {result}')
  38. # Fetch the correct version from pyproject.toml
  39. package_version = _get_package_version()
  40. tarball_path = os.path.join(
  41. project_root, 'dist', f'openhands-{package_version}.tar.gz'
  42. )
  43. if not os.path.exists(tarball_path):
  44. logger.error(f'Source distribution not found at {tarball_path}')
  45. raise Exception(f'Source distribution not found at {tarball_path}')
  46. logger.info(f'Source distribution created at {tarball_path}')
  47. return tarball_path
  48. def _put_source_code_to_dir(temp_dir: str):
  49. """Builds the project source tarball. Copies it to temp_dir and unpacks it.
  50. The OpenHands source code ends up in the temp_dir/code directory
  51. Parameters:
  52. - temp_dir (str): The directory to put the source code in
  53. """
  54. # Build the project source tarball
  55. tarball_path = _create_project_source_dist()
  56. filename = os.path.basename(tarball_path)
  57. filename = filename.removesuffix('.tar.gz')
  58. # Move the project tarball to temp_dir
  59. _res = shutil.copy(tarball_path, os.path.join(temp_dir, 'project.tar.gz'))
  60. if _res:
  61. os.remove(tarball_path)
  62. logger.info(
  63. f'Source distribution moved to {os.path.join(temp_dir, "project.tar.gz")}'
  64. )
  65. # Unzip the tarball
  66. shutil.unpack_archive(os.path.join(temp_dir, 'project.tar.gz'), temp_dir)
  67. # Remove the tarball
  68. os.remove(os.path.join(temp_dir, 'project.tar.gz'))
  69. # Rename the directory containing the code to 'code'
  70. os.rename(os.path.join(temp_dir, filename), os.path.join(temp_dir, 'code'))
  71. logger.info(f'Unpacked source code directory: {os.path.join(temp_dir, "code")}')
  72. def _generate_dockerfile(
  73. base_image: str,
  74. skip_init: bool = False,
  75. extra_deps: str | None = None,
  76. ) -> str:
  77. """Generate the Dockerfile content for the runtime image based on the base image.
  78. Parameters:
  79. - base_image (str): The base image provided for the runtime image
  80. - skip_init (boolean):
  81. - extra_deps (str):
  82. Returns:
  83. - str: The resulting Dockerfile content
  84. """
  85. env = Environment(
  86. loader=FileSystemLoader(
  87. searchpath=os.path.join(os.path.dirname(__file__), 'runtime_templates')
  88. )
  89. )
  90. template = env.get_template('Dockerfile.j2')
  91. dockerfile_content = template.render(
  92. base_image=base_image,
  93. skip_init=skip_init,
  94. extra_deps=extra_deps if extra_deps is not None else '',
  95. )
  96. return dockerfile_content
  97. def prep_docker_build_folder(
  98. dir_path: str,
  99. base_image: str,
  100. skip_init: bool = False,
  101. extra_deps: str | None = None,
  102. ) -> str:
  103. """Prepares a docker build folder by copying the source code and generating the Dockerfile
  104. Parameters:
  105. - dir_path (str): The build folder to place the source code and Dockerfile
  106. - base_image (str): The base Docker image to use for the Dockerfile
  107. - skip_init (str):
  108. - extra_deps (str):
  109. Returns:
  110. - str: The MD5 hash of the build folder directory (dir_path)
  111. """
  112. # Copy the source code to directory. It will end up in dir_path/code
  113. _put_source_code_to_dir(dir_path)
  114. # Create a Dockerfile and write it to dir_path
  115. dockerfile_content = _generate_dockerfile(
  116. base_image,
  117. skip_init=skip_init,
  118. extra_deps=extra_deps,
  119. )
  120. logger.debug(
  121. (
  122. f'===== Dockerfile content start =====\n'
  123. f'{dockerfile_content}\n'
  124. f'===== Dockerfile content end ====='
  125. )
  126. )
  127. with open(os.path.join(dir_path, 'Dockerfile'), 'w') as file:
  128. file.write(dockerfile_content)
  129. # Get the MD5 hash of the dir_path directory
  130. hash = dirhash(dir_path, 'md5')
  131. logger.info(
  132. f'Input base image: {base_image}\n'
  133. f'Skip init: {skip_init}\n'
  134. f'Extra deps: {extra_deps}\n'
  135. f'Hash for docker build directory [{dir_path}] (contents: {os.listdir(dir_path)}): {hash}\n'
  136. )
  137. return hash
  138. def get_runtime_image_repo_and_tag(base_image: str) -> tuple[str, str]:
  139. """Retrieves the Docker repo and tag associated with the Docker image.
  140. Parameters:
  141. - base_image (str): The name of the base Docker image
  142. Returns:
  143. - tuple[str, str]: The Docker repo and tag of the Docker image
  144. """
  145. if RUNTIME_IMAGE_REPO in base_image:
  146. logger.info(
  147. f'The provided image [{base_image}] is a already a valid runtime image.\n'
  148. f'Will try to reuse it as is.'
  149. )
  150. if ':' not in base_image:
  151. base_image = base_image + ':latest'
  152. repo, tag = base_image.split(':')
  153. return repo, tag
  154. else:
  155. if ':' not in base_image:
  156. base_image = base_image + ':latest'
  157. [repo, tag] = base_image.split(':')
  158. repo = repo.replace('/', '___')
  159. od_version = _get_package_version()
  160. return RUNTIME_IMAGE_REPO, f'od_v{od_version}_image_{repo}_tag_{tag}'
  161. def build_runtime_image(
  162. base_image: str,
  163. runtime_builder: RuntimeBuilder,
  164. extra_deps: str | None = None,
  165. docker_build_folder: str | None = None,
  166. dry_run: bool = False,
  167. force_rebuild: bool = False,
  168. ) -> str:
  169. """Prepares the final docker build folder.
  170. If dry_run is False, it will also build the OpenHands runtime Docker image using the docker build folder.
  171. Parameters:
  172. - base_image (str): The name of the base Docker image to use
  173. - runtime_builder (RuntimeBuilder): The runtime builder to use
  174. - extra_deps (str):
  175. - docker_build_folder (str): The directory to use for the build. If not provided a temporary directory will be used
  176. - dry_run (bool): if True, it will only ready the build folder. It will not actually build the Docker image
  177. - force_rebuild (bool): if True, it will create the Dockerfile which uses the base_image
  178. Returns:
  179. - str: <image_repo>:<MD5 hash>. Where MD5 hash is the hash of the docker build folder
  180. See https://docs.all-hands.dev/modules/usage/runtime for more details.
  181. """
  182. # Calculate the hash for the docker build folder (source code and Dockerfile)
  183. with tempfile.TemporaryDirectory() as temp_dir:
  184. from_scratch_hash = prep_docker_build_folder(
  185. temp_dir,
  186. base_image=base_image,
  187. skip_init=False,
  188. extra_deps=extra_deps,
  189. )
  190. runtime_image_repo, runtime_image_tag = get_runtime_image_repo_and_tag(base_image)
  191. # The image name in the format <image repo>:<hash>
  192. hash_runtime_image_name = f'{runtime_image_repo}:{from_scratch_hash}'
  193. # non-hash generic image name, it could contain *similar* dependencies
  194. # but *might* not exactly match the state of the source code.
  195. # It resembles the "latest" tag in the docker image naming convention for
  196. # a particular {repo}:{tag} pair (e.g., ubuntu:latest -> runtime:ubuntu_tag_latest)
  197. # we will build from IT to save time if the `from_scratch_hash` is not found
  198. generic_runtime_image_name = f'{runtime_image_repo}:{runtime_image_tag}'
  199. # Scenario 1: If we already have an image with the exact same hash, then it means the image is already built
  200. # with the exact same source code and Dockerfile, so we will reuse it. Building it is not required.
  201. if runtime_builder.image_exists(hash_runtime_image_name):
  202. logger.info(
  203. f'Image [{hash_runtime_image_name}] already exists so we will reuse it.'
  204. )
  205. return hash_runtime_image_name
  206. # Scenario 2: If a Docker image with the exact hash is not found, we will FIRST try to re-build it
  207. # by leveraging the `generic_runtime_image_name` to save some time
  208. # from re-building the dependencies (e.g., poetry install, apt install)
  209. elif runtime_builder.image_exists(generic_runtime_image_name) and not force_rebuild:
  210. logger.info(
  211. f'Cannot find docker Image [{hash_runtime_image_name}]\n'
  212. f'Will try to re-build it from latest [{generic_runtime_image_name}] image to potentially save '
  213. f'time for dependencies installation.\n'
  214. )
  215. cur_docker_build_folder = docker_build_folder or tempfile.mkdtemp()
  216. _skip_init_hash = prep_docker_build_folder(
  217. cur_docker_build_folder,
  218. # we want to use the existing generic image as base
  219. # so that we can leverage existing dependencies already installed in the image
  220. base_image=generic_runtime_image_name,
  221. skip_init=True, # skip init since we are re-using the existing image
  222. extra_deps=extra_deps,
  223. )
  224. assert (
  225. _skip_init_hash != from_scratch_hash
  226. ), f'The skip_init hash [{_skip_init_hash}] should not match the existing hash [{from_scratch_hash}]'
  227. if not dry_run:
  228. _build_sandbox_image(
  229. docker_folder=cur_docker_build_folder,
  230. runtime_builder=runtime_builder,
  231. target_image_repo=runtime_image_repo,
  232. # NOTE: WE ALWAYS use the "from_scratch_hash" tag for the target image
  233. # otherwise, even if the source code is exactly the same, the image *might* be re-built
  234. # because the same source code will generate different hash when skip_init=True/False
  235. # since the Dockerfile is slightly different
  236. target_image_hash_tag=from_scratch_hash,
  237. target_image_tag=runtime_image_tag,
  238. )
  239. else:
  240. logger.info(
  241. f'Dry run: Skipping image build for [{generic_runtime_image_name}]'
  242. )
  243. if docker_build_folder is None:
  244. shutil.rmtree(cur_docker_build_folder)
  245. # Scenario 3: If the Docker image with the required hash is not found AND we cannot re-use the latest
  246. # relevant image, we will build it completely from scratch
  247. else:
  248. if force_rebuild:
  249. logger.info(
  250. f'Force re-build: Will try to re-build image [{generic_runtime_image_name}] from scratch.\n'
  251. )
  252. cur_docker_build_folder = docker_build_folder or tempfile.mkdtemp()
  253. _new_from_scratch_hash = prep_docker_build_folder(
  254. cur_docker_build_folder,
  255. base_image,
  256. skip_init=False,
  257. extra_deps=extra_deps,
  258. )
  259. assert (
  260. _new_from_scratch_hash == from_scratch_hash
  261. ), f'The new from scratch hash [{_new_from_scratch_hash}] does not match the existing hash [{from_scratch_hash}]'
  262. if not dry_run:
  263. _build_sandbox_image(
  264. docker_folder=cur_docker_build_folder,
  265. runtime_builder=runtime_builder,
  266. target_image_repo=runtime_image_repo,
  267. # NOTE: WE ALWAYS use the "from_scratch_hash" tag for the target image
  268. target_image_hash_tag=from_scratch_hash,
  269. target_image_tag=runtime_image_tag,
  270. )
  271. else:
  272. logger.info(
  273. f'Dry run: Skipping image build for [{generic_runtime_image_name}]'
  274. )
  275. if docker_build_folder is None:
  276. shutil.rmtree(cur_docker_build_folder)
  277. return f'{runtime_image_repo}:{from_scratch_hash}'
  278. def _build_sandbox_image(
  279. docker_folder: str,
  280. runtime_builder: RuntimeBuilder,
  281. target_image_repo: str,
  282. target_image_hash_tag: str,
  283. target_image_tag: str,
  284. ) -> str:
  285. """Build and tag the sandbox image.
  286. The image will be tagged as both:
  287. - target_image_hash_tag
  288. - target_image_tag
  289. Parameters:
  290. - docker_folder (str): the path to the docker build folder
  291. - runtime_builder (RuntimeBuilder): the runtime builder instance
  292. - target_image_repo (str): the repository name for the target image
  293. - target_image_hash_tag (str): the *hash* tag for the target image that is calculated based
  294. on the contents of the docker build folder (source code and Dockerfile)
  295. e.g. 1234567890abcdef
  296. -target_image_tag (str): the tag for the target image that's generic and based on the base image name
  297. e.g. od_v0.8.3_image_ubuntu_tag_22.04
  298. """
  299. target_image_hash_name = f'{target_image_repo}:{target_image_hash_tag}'
  300. target_image_generic_name = f'{target_image_repo}:{target_image_tag}'
  301. try:
  302. success = runtime_builder.build(
  303. path=docker_folder, tags=[target_image_hash_name, target_image_generic_name]
  304. )
  305. if not success:
  306. raise RuntimeError(f'Build failed for image {target_image_hash_name}')
  307. except Exception as e:
  308. logger.error(f'Sandbox image build failed: {e}')
  309. raise
  310. return target_image_hash_name
  311. if __name__ == '__main__':
  312. parser = argparse.ArgumentParser()
  313. parser.add_argument(
  314. '--base_image', type=str, default='nikolaik/python-nodejs:python3.11-nodejs22'
  315. )
  316. parser.add_argument('--build_folder', type=str, default=None)
  317. parser.add_argument('--force_rebuild', action='store_true', default=False)
  318. args = parser.parse_args()
  319. if args.build_folder is not None:
  320. # If a build_folder is provided, we do not actually build the Docker image. We copy the necessary source code
  321. # and create a Dockerfile dynamically and place it in the build_folder only. This allows the Docker image to
  322. # then be created using the Dockerfile (most likely using the containers/build.sh script)
  323. build_folder = args.build_folder
  324. assert os.path.exists(
  325. build_folder
  326. ), f'Build folder {build_folder} does not exist'
  327. logger.info(
  328. f'Copying the source code and generating the Dockerfile in the build folder: {build_folder}'
  329. )
  330. runtime_image_repo, runtime_image_tag = get_runtime_image_repo_and_tag(
  331. args.base_image
  332. )
  333. logger.info(
  334. f'Runtime image repo: {runtime_image_repo} and runtime image tag: {runtime_image_tag}'
  335. )
  336. with tempfile.TemporaryDirectory() as temp_dir:
  337. # dry_run is true so we only prepare a temp_dir containing the required source code and the Dockerfile. We
  338. # then obtain the MD5 hash of the folder and return <image_repo>:<temp_dir_md5_hash>
  339. runtime_image_hash_name = build_runtime_image(
  340. args.base_image,
  341. runtime_builder=DockerRuntimeBuilder(docker.from_env()),
  342. docker_build_folder=temp_dir,
  343. dry_run=True,
  344. force_rebuild=args.force_rebuild,
  345. )
  346. _runtime_image_repo, runtime_image_hash_tag = runtime_image_hash_name.split(
  347. ':'
  348. )
  349. # Move contents of temp_dir to build_folder
  350. shutil.copytree(temp_dir, build_folder, dirs_exist_ok=True)
  351. logger.info(
  352. f'Build folder [{build_folder}] is ready: {os.listdir(build_folder)}'
  353. )
  354. # We now update the config.sh in the build_folder to contain the required values. This is used in the
  355. # containers/build.sh script which is called to actually build the Docker image
  356. with open(os.path.join(build_folder, 'config.sh'), 'a') as file:
  357. file.write(
  358. (
  359. f'\n'
  360. f'DOCKER_IMAGE_TAG={runtime_image_tag}\n'
  361. f'DOCKER_IMAGE_HASH_TAG={runtime_image_hash_tag}\n'
  362. )
  363. )
  364. logger.info(
  365. f'`config.sh` is updated with the image repo[{runtime_image_repo}] and tags [{runtime_image_tag}, {runtime_image_hash_tag}]'
  366. )
  367. logger.info(
  368. f'Dockerfile, source code and config.sh are ready in {build_folder}'
  369. )
  370. else:
  371. # If a build_folder is not provided, after copying the required source code and dynamically creating the
  372. # Dockerfile, we actually build the Docker image
  373. logger.info('Building image in a temporary folder')
  374. docker_builder = DockerRuntimeBuilder(docker.from_env())
  375. image_name = build_runtime_image(args.base_image, docker_builder)
  376. print(f'\nBUILT Image: {image_name}\n')