|
|
@@ -1,16 +1,17 @@
|
|
|
import argparse
|
|
|
import hashlib
|
|
|
-import importlib.metadata
|
|
|
import os
|
|
|
import shutil
|
|
|
+import string
|
|
|
import tempfile
|
|
|
+from pathlib import Path
|
|
|
+from typing import List
|
|
|
|
|
|
import docker
|
|
|
from dirhash import dirhash
|
|
|
from jinja2 import Environment, FileSystemLoader
|
|
|
|
|
|
import openhands
|
|
|
-from openhands import __package_name__
|
|
|
from openhands import __version__ as oh_version
|
|
|
from openhands.core.logger import openhands_logger as logger
|
|
|
from openhands.runtime.builder import DockerRuntimeBuilder, RuntimeBuilder
|
|
|
@@ -20,65 +21,16 @@ def get_runtime_image_repo():
|
|
|
return os.getenv('OH_RUNTIME_RUNTIME_IMAGE_REPO', 'ghcr.io/all-hands-ai/runtime')
|
|
|
|
|
|
|
|
|
-def _put_source_code_to_dir(temp_dir: str):
|
|
|
- """Builds the project source tarball directly in temp_dir and unpacks it.
|
|
|
- The OpenHands source code ends up in the temp_dir/code directory.
|
|
|
- Parameters:
|
|
|
- - temp_dir (str): The directory to put the source code in
|
|
|
- """
|
|
|
- if not os.path.isdir(temp_dir):
|
|
|
- raise RuntimeError(f'Temp directory {temp_dir} does not exist')
|
|
|
-
|
|
|
- dest_dir = os.path.join(temp_dir, 'code')
|
|
|
- openhands_dir = None
|
|
|
-
|
|
|
- try:
|
|
|
- # Try to get the source directory from the installed package
|
|
|
- distribution = importlib.metadata.distribution(__package_name__)
|
|
|
- source_dir = os.path.dirname(distribution.locate_file(__package_name__))
|
|
|
- openhands_dir = os.path.join(source_dir, 'openhands')
|
|
|
- except importlib.metadata.PackageNotFoundError:
|
|
|
- pass
|
|
|
-
|
|
|
- if openhands_dir is not None and os.path.isdir(openhands_dir):
|
|
|
- logger.info(f'Package {__package_name__} found')
|
|
|
- shutil.copytree(openhands_dir, os.path.join(dest_dir, 'openhands'))
|
|
|
- # note: "pyproject.toml" and "poetry.lock" are included in the openhands
|
|
|
- # package, so we need to move them out to the top-level directory
|
|
|
- for filename in ['pyproject.toml', 'poetry.lock']:
|
|
|
- shutil.move(os.path.join(dest_dir, 'openhands', filename), dest_dir)
|
|
|
- else:
|
|
|
- # If package is not found, build from source code
|
|
|
- project_root = os.path.dirname(
|
|
|
- os.path.dirname(os.path.abspath(openhands.__file__))
|
|
|
- )
|
|
|
- logger.info(f'Building source distribution using project root: {project_root}')
|
|
|
-
|
|
|
- # Copy the 'openhands' directory
|
|
|
- openhands_dir = os.path.join(project_root, 'openhands')
|
|
|
- if not os.path.isdir(openhands_dir):
|
|
|
- raise RuntimeError(f"'openhands' directory not found in {project_root}")
|
|
|
- shutil.copytree(openhands_dir, os.path.join(dest_dir, 'openhands'))
|
|
|
-
|
|
|
- # Copy pyproject.toml and poetry.lock files
|
|
|
- for file in ['pyproject.toml', 'poetry.lock']:
|
|
|
- src_file = os.path.join(project_root, file)
|
|
|
- dest_file = os.path.join(dest_dir, file)
|
|
|
- shutil.copy2(src_file, dest_file)
|
|
|
-
|
|
|
- logger.info(f'Unpacked source code directory: {dest_dir}')
|
|
|
-
|
|
|
-
|
|
|
def _generate_dockerfile(
|
|
|
base_image: str,
|
|
|
- skip_init: bool = False,
|
|
|
+ build_from_scratch: bool = True,
|
|
|
extra_deps: str | None = None,
|
|
|
) -> str:
|
|
|
"""Generate the Dockerfile content for the runtime image based on the base image.
|
|
|
|
|
|
Parameters:
|
|
|
- base_image (str): The base image provided for the runtime image
|
|
|
- - skip_init (boolean):
|
|
|
+ - build_from_scratch (boolean): False implies most steps can be skipped (Base image is another openhands instance)
|
|
|
- extra_deps (str):
|
|
|
|
|
|
Returns:
|
|
|
@@ -93,69 +45,12 @@ def _generate_dockerfile(
|
|
|
|
|
|
dockerfile_content = template.render(
|
|
|
base_image=base_image,
|
|
|
- skip_init=skip_init,
|
|
|
+ build_from_scratch=build_from_scratch,
|
|
|
extra_deps=extra_deps if extra_deps is not None else '',
|
|
|
)
|
|
|
return dockerfile_content
|
|
|
|
|
|
|
|
|
-def prep_docker_build_folder(
|
|
|
- dir_path: str,
|
|
|
- base_image: str,
|
|
|
- skip_init: bool = False,
|
|
|
- extra_deps: str | None = None,
|
|
|
-) -> str:
|
|
|
- """Prepares a docker build folder by copying the source code and generating the Dockerfile
|
|
|
-
|
|
|
- Parameters:
|
|
|
- - dir_path (str): The build folder to place the source code and Dockerfile
|
|
|
- - base_image (str): The base Docker image to use for the Dockerfile
|
|
|
- - skip_init (str):
|
|
|
- - extra_deps (str):
|
|
|
-
|
|
|
- Returns:
|
|
|
- - str: The MD5 hash of the build folder directory (dir_path)
|
|
|
- """
|
|
|
- # Copy the source code to directory. It will end up in dir_path/code
|
|
|
- _put_source_code_to_dir(dir_path)
|
|
|
-
|
|
|
- # Create a Dockerfile and write it to dir_path
|
|
|
- dockerfile_content = _generate_dockerfile(
|
|
|
- base_image,
|
|
|
- skip_init=skip_init,
|
|
|
- extra_deps=extra_deps,
|
|
|
- )
|
|
|
- if os.getenv('SKIP_CONTAINER_LOGS', 'false') != 'true':
|
|
|
- logger.debug(
|
|
|
- (
|
|
|
- f'===== Dockerfile content start =====\n'
|
|
|
- f'{dockerfile_content}\n'
|
|
|
- f'===== Dockerfile content end ====='
|
|
|
- )
|
|
|
- )
|
|
|
- with open(os.path.join(dir_path, 'Dockerfile'), 'w') as file:
|
|
|
- file.write(dockerfile_content)
|
|
|
-
|
|
|
- # Get the MD5 hash of the dir_path directory
|
|
|
- dir_hash = dirhash(
|
|
|
- dir_path,
|
|
|
- 'md5',
|
|
|
- ignore=[
|
|
|
- '.*/', # hidden directories
|
|
|
- '__pycache__/',
|
|
|
- '*.pyc',
|
|
|
- ],
|
|
|
- )
|
|
|
- hash = f'v{oh_version}_{dir_hash}'
|
|
|
- logger.info(
|
|
|
- f'Input base image: {base_image}\n'
|
|
|
- f'Skip init: {skip_init}\n'
|
|
|
- f'Extra deps: {extra_deps}\n'
|
|
|
- f'Hash for docker build directory [{dir_path}] (contents: {os.listdir(dir_path)}): {hash}\n'
|
|
|
- )
|
|
|
- return hash
|
|
|
-
|
|
|
-
|
|
|
def get_runtime_image_repo_and_tag(base_image: str) -> tuple[str, str]:
|
|
|
"""Retrieves the Docker repo and tag associated with the Docker image.
|
|
|
|
|
|
@@ -204,7 +99,7 @@ def build_runtime_image(
|
|
|
base_image: str,
|
|
|
runtime_builder: RuntimeBuilder,
|
|
|
extra_deps: str | None = None,
|
|
|
- docker_build_folder: str | None = None,
|
|
|
+ build_folder: str | None = None,
|
|
|
dry_run: bool = False,
|
|
|
force_rebuild: bool = False,
|
|
|
) -> str:
|
|
|
@@ -215,7 +110,7 @@ def build_runtime_image(
|
|
|
- base_image (str): The name of the base Docker image to use
|
|
|
- runtime_builder (RuntimeBuilder): The runtime builder to use
|
|
|
- extra_deps (str):
|
|
|
- - docker_build_folder (str): The directory to use for the build. If not provided a temporary directory will be used
|
|
|
+ - build_folder (str): The directory to use for the build. If not provided a temporary directory will be used
|
|
|
- dry_run (bool): if True, it will only ready the build folder. It will not actually build the Docker image
|
|
|
- force_rebuild (bool): if True, it will create the Dockerfile which uses the base_image
|
|
|
|
|
|
@@ -224,160 +119,195 @@ def build_runtime_image(
|
|
|
|
|
|
See https://docs.all-hands.dev/modules/usage/architecture/runtime for more details.
|
|
|
"""
|
|
|
- # Calculate the hash for the docker build folder (source code and Dockerfile)
|
|
|
- with tempfile.TemporaryDirectory() as temp_dir:
|
|
|
- from_scratch_hash = prep_docker_build_folder(
|
|
|
- temp_dir,
|
|
|
- base_image=base_image,
|
|
|
- skip_init=False,
|
|
|
- extra_deps=extra_deps,
|
|
|
- )
|
|
|
-
|
|
|
- runtime_image_repo, runtime_image_tag = get_runtime_image_repo_and_tag(base_image)
|
|
|
-
|
|
|
- # The image name in the format <image repo>:<hash>
|
|
|
- hash_runtime_image_name = f'{runtime_image_repo}:{from_scratch_hash}'
|
|
|
-
|
|
|
- # non-hash generic image name, it could contain *similar* dependencies
|
|
|
- # but *might* not exactly match the state of the source code.
|
|
|
- # It resembles the "latest" tag in the docker image naming convention for
|
|
|
- # a particular {repo}:{tag} pair (e.g., ubuntu:latest -> runtime:ubuntu_tag_latest)
|
|
|
- # we will build from IT to save time if the `from_scratch_hash` is not found
|
|
|
- generic_runtime_image_name = f'{runtime_image_repo}:{runtime_image_tag}'
|
|
|
-
|
|
|
- # Scenario 1: If we already have an image with the exact same hash, then it means the image is already built
|
|
|
- # with the exact same source code and Dockerfile, so we will reuse it. Building it is not required.
|
|
|
- if not force_rebuild and runtime_builder.image_exists(
|
|
|
- hash_runtime_image_name, False
|
|
|
- ):
|
|
|
- logger.info(
|
|
|
- f'Image [{hash_runtime_image_name}] already exists so we will reuse it.'
|
|
|
- )
|
|
|
- return hash_runtime_image_name
|
|
|
-
|
|
|
- # Scenario 2: If a Docker image with the exact hash is not found, we will FIRST try to re-build it
|
|
|
- # by leveraging the `generic_runtime_image_name` to save some time
|
|
|
- # from re-building the dependencies (e.g., poetry install, apt install)
|
|
|
- if not force_rebuild and runtime_builder.image_exists(generic_runtime_image_name):
|
|
|
- logger.info(
|
|
|
- f'Could not find docker image [{hash_runtime_image_name}]\n'
|
|
|
- f'Will try to re-build it from latest [{generic_runtime_image_name}] image to potentially save '
|
|
|
- f'time for dependencies installation.\n'
|
|
|
- )
|
|
|
-
|
|
|
- cur_docker_build_folder = docker_build_folder or tempfile.mkdtemp()
|
|
|
- _skip_init_hash = prep_docker_build_folder(
|
|
|
- cur_docker_build_folder,
|
|
|
- # we want to use the existing generic image as base
|
|
|
- # so that we can leverage existing dependencies already installed in the image
|
|
|
- base_image=generic_runtime_image_name,
|
|
|
- skip_init=True, # skip init since we are re-using the existing image
|
|
|
- extra_deps=extra_deps,
|
|
|
- )
|
|
|
-
|
|
|
- assert (
|
|
|
- _skip_init_hash != from_scratch_hash
|
|
|
- ), f'The skip_init hash [{_skip_init_hash}] should not match the existing hash [{from_scratch_hash}]'
|
|
|
-
|
|
|
- if not dry_run:
|
|
|
- _build_sandbox_image(
|
|
|
- docker_folder=cur_docker_build_folder,
|
|
|
+ if build_folder is None:
|
|
|
+ with tempfile.TemporaryDirectory() as temp_dir:
|
|
|
+ result = build_runtime_image_in_folder(
|
|
|
+ base_image=base_image,
|
|
|
runtime_builder=runtime_builder,
|
|
|
- target_image_repo=runtime_image_repo,
|
|
|
- # NOTE: WE ALWAYS use the "from_scratch_hash" tag for the target image
|
|
|
- # otherwise, even if the source code is exactly the same, the image *might* be re-built
|
|
|
- # because the same source code will generate different hash when skip_init=True/False
|
|
|
- # since the Dockerfile is slightly different
|
|
|
- target_image_hash_tag=from_scratch_hash,
|
|
|
- target_image_tag=runtime_image_tag,
|
|
|
+ build_folder=Path(temp_dir),
|
|
|
+ extra_deps=extra_deps,
|
|
|
+ dry_run=dry_run,
|
|
|
+ force_rebuild=force_rebuild,
|
|
|
)
|
|
|
- else:
|
|
|
- logger.info(
|
|
|
- f'Dry run: Skipping image build for [{generic_runtime_image_name}]'
|
|
|
- )
|
|
|
-
|
|
|
- if docker_build_folder is None:
|
|
|
- shutil.rmtree(cur_docker_build_folder)
|
|
|
+ return result
|
|
|
|
|
|
- # Scenario 3: If the Docker image with the required hash is not found AND we cannot re-use the latest
|
|
|
- # relevant image, we will build it completely from scratch
|
|
|
- else:
|
|
|
- if force_rebuild:
|
|
|
- logger.info(
|
|
|
- f'Force re-build: Will try to re-build image [{generic_runtime_image_name}] from scratch.\n'
|
|
|
- )
|
|
|
+ result = build_runtime_image_in_folder(
|
|
|
+ base_image=base_image,
|
|
|
+ runtime_builder=runtime_builder,
|
|
|
+ build_folder=Path(build_folder),
|
|
|
+ extra_deps=extra_deps,
|
|
|
+ dry_run=dry_run,
|
|
|
+ force_rebuild=force_rebuild,
|
|
|
+ )
|
|
|
+ return result
|
|
|
|
|
|
- cur_docker_build_folder = docker_build_folder or tempfile.mkdtemp()
|
|
|
- _new_from_scratch_hash = prep_docker_build_folder(
|
|
|
- cur_docker_build_folder,
|
|
|
- base_image,
|
|
|
- skip_init=False,
|
|
|
- extra_deps=extra_deps,
|
|
|
- )
|
|
|
- assert (
|
|
|
- _new_from_scratch_hash == from_scratch_hash
|
|
|
- ), f'The new from scratch hash [{_new_from_scratch_hash}] does not match the existing hash [{from_scratch_hash}]'
|
|
|
|
|
|
+def build_runtime_image_in_folder(
|
|
|
+ base_image: str,
|
|
|
+ runtime_builder: RuntimeBuilder,
|
|
|
+ build_folder: Path,
|
|
|
+ extra_deps: str | None,
|
|
|
+ dry_run: bool,
|
|
|
+ force_rebuild: bool,
|
|
|
+) -> str:
|
|
|
+ runtime_image_repo, _ = get_runtime_image_repo_and_tag(base_image)
|
|
|
+ lock_tag = f'oh_v{oh_version}_{get_hash_for_lock_files(base_image)}'
|
|
|
+ hash_tag = f'{lock_tag}_{get_hash_for_source_files()}'
|
|
|
+ hash_image_name = f'{runtime_image_repo}:{hash_tag}'
|
|
|
+
|
|
|
+ if force_rebuild:
|
|
|
+ logger.info(f'Force rebuild: [{runtime_image_repo}:{hash_tag}] from scratch.')
|
|
|
+ prep_build_folder(build_folder, base_image, True, extra_deps)
|
|
|
if not dry_run:
|
|
|
_build_sandbox_image(
|
|
|
- docker_folder=cur_docker_build_folder,
|
|
|
- runtime_builder=runtime_builder,
|
|
|
- target_image_repo=runtime_image_repo,
|
|
|
- # NOTE: WE ALWAYS use the "from_scratch_hash" tag for the target image
|
|
|
- target_image_hash_tag=from_scratch_hash,
|
|
|
- target_image_tag=runtime_image_tag,
|
|
|
- )
|
|
|
- else:
|
|
|
- logger.info(
|
|
|
- f'Dry run: Skipping image build for [{generic_runtime_image_name}]'
|
|
|
+ build_folder,
|
|
|
+ runtime_builder,
|
|
|
+ runtime_image_repo,
|
|
|
+ hash_tag,
|
|
|
+ lock_tag,
|
|
|
)
|
|
|
+ return hash_image_name
|
|
|
+
|
|
|
+ lock_image_name = f'{runtime_image_repo}:{lock_tag}'
|
|
|
+ build_from_scratch = True
|
|
|
+
|
|
|
+ # If the exact image already exists, we do not need to build it
|
|
|
+ if runtime_builder.image_exists(hash_image_name, False):
|
|
|
+ logger.info(f'Reusing Image [{hash_image_name}]')
|
|
|
+ return hash_image_name
|
|
|
+
|
|
|
+ # We look for an existing image that shares the same lock_tag. If such an image exists, we
|
|
|
+ # can use it as the base image for the build and just copy source files. This makes the build
|
|
|
+ # much faster.
|
|
|
+ if runtime_builder.image_exists(lock_image_name):
|
|
|
+ logger.info(f'Build [{hash_image_name}] from [{lock_image_name}]')
|
|
|
+ build_from_scratch = False
|
|
|
+ base_image = lock_image_name
|
|
|
+ else:
|
|
|
+ logger.info(f'Build [{hash_image_name}] from scratch')
|
|
|
+
|
|
|
+ prep_build_folder(build_folder, base_image, build_from_scratch, extra_deps)
|
|
|
+ if not dry_run:
|
|
|
+ _build_sandbox_image(
|
|
|
+ build_folder,
|
|
|
+ runtime_builder,
|
|
|
+ runtime_image_repo,
|
|
|
+ hash_tag,
|
|
|
+ lock_tag,
|
|
|
+ )
|
|
|
|
|
|
- if docker_build_folder is None:
|
|
|
- shutil.rmtree(cur_docker_build_folder)
|
|
|
-
|
|
|
- return f'{runtime_image_repo}:{from_scratch_hash}'
|
|
|
+ return hash_image_name
|
|
|
|
|
|
|
|
|
-def _build_sandbox_image(
|
|
|
- docker_folder: str,
|
|
|
- runtime_builder: RuntimeBuilder,
|
|
|
- target_image_repo: str,
|
|
|
- target_image_hash_tag: str,
|
|
|
- target_image_tag: str,
|
|
|
-) -> str:
|
|
|
- """Build and tag the sandbox image.
|
|
|
- The image will be tagged as both:
|
|
|
- - target_image_hash_tag
|
|
|
- - target_image_tag
|
|
|
+def prep_build_folder(
|
|
|
+ build_folder: Path,
|
|
|
+ base_image: str,
|
|
|
+ build_from_scratch: bool,
|
|
|
+ extra_deps: str | None,
|
|
|
+):
|
|
|
+ # Copy the source code to directory. It will end up in build_folder/code
|
|
|
+ # If package is not found, build from source code
|
|
|
+ openhands_source_dir = Path(openhands.__file__).parent
|
|
|
+ project_root = openhands_source_dir.parent
|
|
|
+ logger.info(f'Building source distribution using project root: {project_root}')
|
|
|
+
|
|
|
+ # Copy the 'openhands' directory (Source code)
|
|
|
+ shutil.copytree(
|
|
|
+ openhands_source_dir,
|
|
|
+ Path(build_folder, 'code', 'openhands'),
|
|
|
+ ignore=shutil.ignore_patterns(
|
|
|
+ '.*/',
|
|
|
+ '__pycache__/',
|
|
|
+ '*.pyc',
|
|
|
+ '*.md',
|
|
|
+ ),
|
|
|
+ )
|
|
|
|
|
|
- Parameters:
|
|
|
- - docker_folder (str): the path to the docker build folder
|
|
|
- - runtime_builder (RuntimeBuilder): the runtime builder instance
|
|
|
- - target_image_repo (str): the repository name for the target image
|
|
|
- - target_image_hash_tag (str): the *hash* tag for the target image that is calculated based
|
|
|
- on the contents of the docker build folder (source code and Dockerfile)
|
|
|
- e.g. 1234567890abcdef
|
|
|
- -target_image_tag (str): the tag for the target image that's generic and based on the base image name
|
|
|
- e.g. oh_v0.9.3_image_ubuntu_tag_22.04
|
|
|
- """
|
|
|
- target_image_hash_name = f'{target_image_repo}:{target_image_hash_tag}'
|
|
|
- target_image_generic_name = f'{target_image_repo}:{target_image_tag}'
|
|
|
+ # Copy pyproject.toml and poetry.lock files
|
|
|
+ for file in ['pyproject.toml', 'poetry.lock']:
|
|
|
+ src = Path(openhands_source_dir, file)
|
|
|
+ if not src.exists():
|
|
|
+ src = Path(project_root, file)
|
|
|
+ shutil.copy2(src, Path(build_folder, 'code', file))
|
|
|
|
|
|
- tags_to_add = [target_image_hash_name]
|
|
|
+ # Create a Dockerfile and write it to build_folder
|
|
|
+ dockerfile_content = _generate_dockerfile(
|
|
|
+ base_image,
|
|
|
+ build_from_scratch=build_from_scratch,
|
|
|
+ extra_deps=extra_deps,
|
|
|
+ )
|
|
|
+ with open(Path(build_folder, 'Dockerfile'), 'w') as file: # type: ignore
|
|
|
+ file.write(dockerfile_content) # type: ignore
|
|
|
+
|
|
|
+
|
|
|
+_ALPHABET = string.digits + string.ascii_lowercase
|
|
|
+
|
|
|
+
|
|
|
+def truncate_hash(hash: str) -> str:
|
|
|
+ """Convert the base16 hash to base36 and truncate at 16 characters."""
|
|
|
+ value = int(hash, 16)
|
|
|
+ result: List[str] = []
|
|
|
+ while value > 0 and len(result) < 16:
|
|
|
+ value, remainder = divmod(value, len(_ALPHABET))
|
|
|
+ result.append(_ALPHABET[remainder])
|
|
|
+ return ''.join(result)
|
|
|
+
|
|
|
+
|
|
|
+def get_hash_for_lock_files(base_image: str):
|
|
|
+ openhands_source_dir = Path(openhands.__file__).parent
|
|
|
+ md5 = hashlib.md5()
|
|
|
+ md5.update(base_image.encode())
|
|
|
+ for file in ['pyproject.toml', 'poetry.lock']:
|
|
|
+ src = Path(openhands_source_dir, file)
|
|
|
+ if not src.exists():
|
|
|
+ src = Path(openhands_source_dir.parent, file)
|
|
|
+ with open(src, 'rb') as f:
|
|
|
+ for chunk in iter(lambda: f.read(4096), b''):
|
|
|
+ md5.update(chunk)
|
|
|
+ # We get away with truncation because we want something that is unique
|
|
|
+ # rather than something that is cryptographically secure
|
|
|
+ result = truncate_hash(md5.hexdigest())
|
|
|
+ return result
|
|
|
+
|
|
|
+
|
|
|
+def get_hash_for_source_files():
|
|
|
+ openhands_source_dir = Path(openhands.__file__).parent
|
|
|
+ dir_hash = dirhash(
|
|
|
+ openhands_source_dir,
|
|
|
+ 'md5',
|
|
|
+ ignore=[
|
|
|
+ '.*/', # hidden directories
|
|
|
+ '__pycache__/',
|
|
|
+ '*.pyc',
|
|
|
+ ],
|
|
|
+ )
|
|
|
+ # We get away with truncation because we want something that is unique
|
|
|
+ # rather than something that is cryptographically secure
|
|
|
+ result = truncate_hash(dir_hash)
|
|
|
+ return result
|
|
|
|
|
|
- # Only add the generic tag if the image does not exist
|
|
|
- # so it does not get overwritten & only points to the earliest version
|
|
|
- # to avoid "too many layers" after many re-builds
|
|
|
- if not runtime_builder.image_exists(target_image_generic_name):
|
|
|
- tags_to_add.append(target_image_generic_name)
|
|
|
|
|
|
- try:
|
|
|
- image_name = runtime_builder.build(path=docker_folder, tags=tags_to_add)
|
|
|
- if not image_name:
|
|
|
- raise RuntimeError(f'Build failed for image {target_image_hash_name}')
|
|
|
- except Exception as e:
|
|
|
- logger.error(f'Sandbox image build failed: {str(e)}')
|
|
|
- raise
|
|
|
+def _build_sandbox_image(
|
|
|
+ build_folder: Path,
|
|
|
+ runtime_builder: RuntimeBuilder,
|
|
|
+ runtime_image_repo: str,
|
|
|
+ hash_tag: str,
|
|
|
+ lock_tag: str,
|
|
|
+):
|
|
|
+ """Build and tag the sandbox image. The image will be tagged with all tags that do not yet exist"""
|
|
|
+
|
|
|
+ names = [
|
|
|
+ name
|
|
|
+ for name in [
|
|
|
+ f'{runtime_image_repo}:{hash_tag}',
|
|
|
+ f'{runtime_image_repo}:{lock_tag}',
|
|
|
+ ]
|
|
|
+ if not runtime_builder.image_exists(name, False)
|
|
|
+ ]
|
|
|
+
|
|
|
+ image_name = runtime_builder.build(path=str(build_folder), tags=names)
|
|
|
+ if not image_name:
|
|
|
+ raise RuntimeError(f'Build failed for image {names}')
|
|
|
|
|
|
return image_name
|
|
|
|
|
|
@@ -416,7 +346,7 @@ if __name__ == '__main__':
|
|
|
runtime_image_hash_name = build_runtime_image(
|
|
|
args.base_image,
|
|
|
runtime_builder=DockerRuntimeBuilder(docker.from_env()),
|
|
|
- docker_build_folder=temp_dir,
|
|
|
+ build_folder=temp_dir,
|
|
|
dry_run=True,
|
|
|
force_rebuild=args.force_rebuild,
|
|
|
)
|