runtime.py 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605
  1. import os
  2. import tempfile
  3. import threading
  4. import uuid
  5. from typing import Callable
  6. from zipfile import ZipFile
  7. import docker
  8. import requests
  9. import tenacity
  10. from openhands.core.config import AppConfig
  11. from openhands.core.logger import DEBUG
  12. from openhands.core.logger import openhands_logger as logger
  13. from openhands.events import EventStream
  14. from openhands.events.action import (
  15. ActionConfirmationStatus,
  16. BrowseInteractiveAction,
  17. BrowseURLAction,
  18. CmdRunAction,
  19. FileReadAction,
  20. FileWriteAction,
  21. IPythonRunCellAction,
  22. )
  23. from openhands.events.action.action import Action
  24. from openhands.events.observation import (
  25. ErrorObservation,
  26. NullObservation,
  27. Observation,
  28. UserRejectObservation,
  29. )
  30. from openhands.events.serialization import event_to_dict, observation_from_dict
  31. from openhands.events.serialization.action import ACTION_TYPE_TO_CLASS
  32. from openhands.runtime.builder import DockerRuntimeBuilder
  33. from openhands.runtime.plugins import PluginRequirement
  34. from openhands.runtime.runtime import Runtime
  35. from openhands.runtime.utils import find_available_tcp_port
  36. from openhands.runtime.utils.request import send_request_with_retry
  37. from openhands.runtime.utils.runtime_build import build_runtime_image
  38. from openhands.utils.tenacity_stop import stop_if_should_exit
  39. class LogBuffer:
  40. """Synchronous buffer for Docker container logs.
  41. This class provides a thread-safe way to collect, store, and retrieve logs
  42. from a Docker container. It uses a list to store log lines and provides methods
  43. for appending, retrieving, and clearing logs.
  44. """
  45. def __init__(self, container: docker.models.containers.Container):
  46. self.client_ready = False
  47. self.init_msg = 'Runtime client initialized.'
  48. self.buffer: list[str] = []
  49. self.lock = threading.Lock()
  50. self._stop_event = threading.Event()
  51. self.log_generator = container.logs(stream=True, follow=True)
  52. self.log_stream_thread = threading.Thread(target=self.stream_logs)
  53. self.log_stream_thread.daemon = True
  54. self.log_stream_thread.start()
  55. def append(self, log_line: str):
  56. with self.lock:
  57. self.buffer.append(log_line)
  58. def get_and_clear(self) -> list[str]:
  59. with self.lock:
  60. logs = list(self.buffer)
  61. self.buffer.clear()
  62. return logs
  63. def stream_logs(self):
  64. """
  65. Stream logs from the Docker container in a separate thread.
  66. This method runs in its own thread to handle the blocking
  67. operation of reading log lines from the Docker SDK's synchronous generator.
  68. """
  69. try:
  70. for log_line in self.log_generator:
  71. if self._stop_event.is_set():
  72. break
  73. if log_line:
  74. decoded_line = log_line.decode('utf-8').rstrip()
  75. self.append(decoded_line)
  76. if self.init_msg in decoded_line:
  77. self.client_ready = True
  78. except Exception as e:
  79. logger.error(f'Error streaming docker logs: {e}')
  80. def __del__(self):
  81. if self.log_stream_thread.is_alive():
  82. logger.warn(
  83. "LogBuffer was not properly closed. Use 'log_buffer.close()' for clean shutdown."
  84. )
  85. self.close(timeout=5)
  86. def close(self, timeout: float = 5.0):
  87. self._stop_event.set()
  88. self.log_stream_thread.join(timeout)
  89. class EventStreamRuntime(Runtime):
  90. """This runtime will subscribe the event stream.
  91. When receive an event, it will send the event to runtime-client which run inside the docker environment.
  92. From the sid also an instance_id is generated in combination with a UID.
  93. Args:
  94. config (AppConfig): The application configuration.
  95. event_stream (EventStream): The event stream to subscribe to.
  96. sid (str, optional): The session ID. Defaults to 'default'.
  97. plugins (list[PluginRequirement] | None, optional): List of plugin requirements. Defaults to None.
  98. env_vars (dict[str, str] | None, optional): Environment variables to set. Defaults to None.
  99. """
  100. container_name_prefix = 'openhands-sandbox-'
  101. def __init__(
  102. self,
  103. config: AppConfig,
  104. event_stream: EventStream,
  105. sid: str = 'default',
  106. plugins: list[PluginRequirement] | None = None,
  107. env_vars: dict[str, str] | None = None,
  108. status_message_callback: Callable | None = None,
  109. ):
  110. self.config = config
  111. self._host_port = 30000 # initial dummy value
  112. self._container_port = 30001 # initial dummy value
  113. self.api_url = f'{self.config.sandbox.local_runtime_url}:{self._container_port}'
  114. self.session = requests.Session()
  115. self.instance_id = (
  116. sid + '_' + str(uuid.uuid4()) if sid is not None else str(uuid.uuid4())
  117. )
  118. self.status_message_callback = status_message_callback
  119. self.send_status_message('STATUS$STARTING_RUNTIME')
  120. self.docker_client: docker.DockerClient = self._init_docker_client()
  121. self.base_container_image = self.config.sandbox.base_container_image
  122. self.runtime_container_image = self.config.sandbox.runtime_container_image
  123. self.container_name = self.container_name_prefix + self.instance_id
  124. self.container = None
  125. self.action_semaphore = threading.Semaphore(1) # Ensure one action at a time
  126. self.runtime_builder = DockerRuntimeBuilder(self.docker_client)
  127. logger.debug(f'EventStreamRuntime `{self.instance_id}`')
  128. # Buffer for container logs
  129. self.log_buffer: LogBuffer | None = None
  130. if self.config.sandbox.runtime_extra_deps:
  131. logger.debug(
  132. f'Installing extra user-provided dependencies in the runtime image: {self.config.sandbox.runtime_extra_deps}'
  133. )
  134. self.skip_container_logs = (
  135. os.environ.get('SKIP_CONTAINER_LOGS', 'false').lower() == 'true'
  136. )
  137. if self.runtime_container_image is None:
  138. if self.base_container_image is None:
  139. raise ValueError(
  140. 'Neither runtime container image nor base container image is set'
  141. )
  142. logger.info('Preparing container, this might take a few minutes...')
  143. self.send_status_message('STATUS$STARTING_CONTAINER')
  144. self.runtime_container_image = build_runtime_image(
  145. self.base_container_image,
  146. self.runtime_builder,
  147. extra_deps=self.config.sandbox.runtime_extra_deps,
  148. force_rebuild=self.config.sandbox.force_rebuild_runtime,
  149. )
  150. self.container = self._init_container(
  151. sandbox_workspace_dir=self.config.workspace_mount_path_in_sandbox, # e.g. /workspace
  152. mount_dir=self.config.workspace_mount_path, # e.g. /opt/openhands/_test_workspace
  153. plugins=plugins,
  154. )
  155. # will initialize both the event stream and the env vars
  156. super().__init__(
  157. config, event_stream, sid, plugins, env_vars, status_message_callback
  158. )
  159. logger.info('Waiting for client to become ready...')
  160. self.send_status_message('STATUS$WAITING_FOR_CLIENT')
  161. self._wait_until_alive()
  162. self.setup_initial_env()
  163. logger.info(
  164. f'Container initialized with plugins: {[plugin.name for plugin in self.plugins]}'
  165. )
  166. self.send_status_message(' ')
  167. @staticmethod
  168. def _init_docker_client() -> docker.DockerClient:
  169. try:
  170. return docker.from_env()
  171. except Exception as ex:
  172. logger.error(
  173. 'Launch docker client failed. Please make sure you have installed docker and started docker desktop/daemon.'
  174. )
  175. raise ex
  176. @tenacity.retry(
  177. stop=tenacity.stop_after_attempt(5) | stop_if_should_exit(),
  178. wait=tenacity.wait_exponential(multiplier=1, min=4, max=60),
  179. )
  180. def _init_container(
  181. self,
  182. sandbox_workspace_dir: str,
  183. mount_dir: str | None = None,
  184. plugins: list[PluginRequirement] | None = None,
  185. ):
  186. try:
  187. logger.info('Preparing to start container...')
  188. self.send_status_message('STATUS$PREPARING_CONTAINER')
  189. plugin_arg = ''
  190. if plugins is not None and len(plugins) > 0:
  191. plugin_arg = (
  192. f'--plugins {" ".join([plugin.name for plugin in plugins])} '
  193. )
  194. self._host_port = self._find_available_port()
  195. self._container_port = (
  196. self._host_port
  197. ) # in future this might differ from host port
  198. self.api_url = (
  199. f'{self.config.sandbox.local_runtime_url}:{self._container_port}'
  200. )
  201. use_host_network = self.config.sandbox.use_host_network
  202. network_mode: str | None = 'host' if use_host_network else None
  203. port_mapping: dict[str, list[dict[str, str]]] | None = (
  204. None
  205. if use_host_network
  206. else {
  207. f'{self._container_port}/tcp': [{'HostPort': str(self._host_port)}]
  208. }
  209. )
  210. if use_host_network:
  211. logger.warn(
  212. 'Using host network mode. If you are using MacOS, please make sure you have the latest version of Docker Desktop and enabled host network feature: https://docs.docker.com/network/drivers/host/#docker-desktop'
  213. )
  214. # Combine environment variables
  215. environment = {
  216. 'port': str(self._container_port),
  217. 'PYTHONUNBUFFERED': 1,
  218. }
  219. if self.config.debug or DEBUG:
  220. environment['DEBUG'] = 'true'
  221. logger.debug(f'Workspace Base: {self.config.workspace_base}')
  222. if mount_dir is not None and sandbox_workspace_dir is not None:
  223. # e.g. result would be: {"/home/user/openhands/workspace": {'bind': "/workspace", 'mode': 'rw'}}
  224. volumes = {mount_dir: {'bind': sandbox_workspace_dir, 'mode': 'rw'}}
  225. logger.debug(f'Mount dir: {mount_dir}')
  226. else:
  227. logger.warn(
  228. 'Warning: Mount dir is not set, will not mount the workspace directory to the container!\n'
  229. )
  230. volumes = None
  231. logger.debug(f'Sandbox workspace: {sandbox_workspace_dir}')
  232. if self.config.sandbox.browsergym_eval_env is not None:
  233. browsergym_arg = (
  234. f'--browsergym-eval-env {self.config.sandbox.browsergym_eval_env}'
  235. )
  236. else:
  237. browsergym_arg = ''
  238. container = self.docker_client.containers.run(
  239. self.runtime_container_image,
  240. command=(
  241. f'/openhands/micromamba/bin/micromamba run -n openhands '
  242. f'poetry run '
  243. f'python -u -m openhands.runtime.client.client {self._container_port} '
  244. f'--working-dir "{sandbox_workspace_dir}" '
  245. f'{plugin_arg}'
  246. f'--username {"openhands" if self.config.run_as_openhands else "root"} '
  247. f'--user-id {self.config.sandbox.user_id} '
  248. f'{browsergym_arg}'
  249. ),
  250. network_mode=network_mode,
  251. ports=port_mapping,
  252. working_dir='/openhands/code/', # do not change this!
  253. name=self.container_name,
  254. detach=True,
  255. environment=environment,
  256. volumes=volumes,
  257. )
  258. self.log_buffer = LogBuffer(container)
  259. logger.info(f'Container started. Server url: {self.api_url}')
  260. self.send_status_message('STATUS$CONTAINER_STARTED')
  261. return container
  262. except Exception as e:
  263. logger.error(
  264. f'Error: Instance {self.instance_id} FAILED to start container!\n'
  265. )
  266. logger.exception(e)
  267. self.close(close_client=False)
  268. raise e
  269. def _refresh_logs(self):
  270. logger.debug('Getting container logs...')
  271. assert (
  272. self.log_buffer is not None
  273. ), 'Log buffer is expected to be initialized when container is started'
  274. logs = self.log_buffer.get_and_clear()
  275. if logs:
  276. formatted_logs = '\n'.join([f' |{log}' for log in logs])
  277. logger.info(
  278. '\n'
  279. + '-' * 35
  280. + 'Container logs:'
  281. + '-' * 35
  282. + f'\n{formatted_logs}'
  283. + '\n'
  284. + '-' * 80
  285. )
  286. @tenacity.retry(
  287. stop=tenacity.stop_after_delay(120) | stop_if_should_exit(),
  288. wait=tenacity.wait_exponential(multiplier=2, min=1, max=20),
  289. reraise=(ConnectionRefusedError,),
  290. )
  291. def _wait_until_alive(self):
  292. self._refresh_logs()
  293. if not (self.log_buffer and self.log_buffer.client_ready):
  294. raise RuntimeError('Runtime client is not ready.')
  295. response = send_request_with_retry(
  296. self.session,
  297. 'GET',
  298. f'{self.api_url}/alive',
  299. retry_exceptions=[ConnectionRefusedError],
  300. timeout=300, # 5 minutes gives the container time to be alive 🧟‍♂️
  301. )
  302. if response.status_code == 200:
  303. return
  304. else:
  305. msg = f'Action execution API is not alive. Response: {response}'
  306. logger.error(msg)
  307. raise RuntimeError(msg)
  308. def close(self, close_client: bool = True, rm_all_containers: bool = True):
  309. """Closes the EventStreamRuntime and associated objects
  310. Parameters:
  311. - close_client (bool): Whether to close the DockerClient
  312. - rm_all_containers (bool): Whether to remove all containers with the 'openhands-sandbox-' prefix
  313. """
  314. if self.log_buffer:
  315. self.log_buffer.close()
  316. if self.session:
  317. self.session.close()
  318. try:
  319. containers = self.docker_client.containers.list(all=True)
  320. for container in containers:
  321. try:
  322. # If the app doesn't shut down properly, it can leave runtime containers on the system. This ensures
  323. # that all 'openhands-sandbox-' containers are removed as well.
  324. if rm_all_containers and container.name.startswith(
  325. self.container_name_prefix
  326. ):
  327. container.remove(force=True)
  328. elif container.name == self.container_name:
  329. if not self.skip_container_logs:
  330. logs = container.logs(tail=1000).decode('utf-8')
  331. logger.debug(
  332. f'==== Container logs on close ====\n{logs}\n==== End of container logs ===='
  333. )
  334. container.remove(force=True)
  335. except docker.errors.APIError:
  336. pass
  337. except docker.errors.NotFound:
  338. pass
  339. except docker.errors.NotFound: # yes, this can happen!
  340. pass
  341. if close_client:
  342. self.docker_client.close()
  343. def run_action(self, action: Action) -> Observation:
  344. # set timeout to default if not set
  345. if action.timeout is None:
  346. action.timeout = self.config.sandbox.timeout
  347. with self.action_semaphore:
  348. if not action.runnable:
  349. return NullObservation('')
  350. if (
  351. hasattr(action, 'is_confirmed')
  352. and action.is_confirmed
  353. == ActionConfirmationStatus.AWAITING_CONFIRMATION
  354. ):
  355. return NullObservation('')
  356. action_type = action.action # type: ignore[attr-defined]
  357. if action_type not in ACTION_TYPE_TO_CLASS:
  358. return ErrorObservation(f'Action {action_type} does not exist.')
  359. if not hasattr(self, action_type):
  360. return ErrorObservation(
  361. f'Action {action_type} is not supported in the current runtime.'
  362. )
  363. if (
  364. hasattr(action, 'is_confirmed')
  365. and action.is_confirmed == ActionConfirmationStatus.REJECTED
  366. ):
  367. return UserRejectObservation(
  368. 'Action has been rejected by the user! Waiting for further user input.'
  369. )
  370. self._refresh_logs()
  371. assert action.timeout is not None
  372. try:
  373. response = send_request_with_retry(
  374. self.session,
  375. 'POST',
  376. f'{self.api_url}/execute_action',
  377. json={'action': event_to_dict(action)},
  378. timeout=action.timeout,
  379. )
  380. if response.status_code == 200:
  381. output = response.json()
  382. obs = observation_from_dict(output)
  383. obs._cause = action.id # type: ignore[attr-defined]
  384. else:
  385. logger.debug(f'action: {action}')
  386. logger.debug(f'response: {response}')
  387. error_message = response.text
  388. logger.error(f'Error from server: {error_message}')
  389. obs = ErrorObservation(f'Action execution failed: {error_message}')
  390. except requests.Timeout:
  391. logger.error('No response received within the timeout period.')
  392. obs = ErrorObservation(
  393. f'Action execution timed out after {action.timeout} seconds.'
  394. )
  395. except Exception as e:
  396. logger.error(f'Error during action execution: {e}')
  397. obs = ErrorObservation(f'Action execution failed: {str(e)}')
  398. self._refresh_logs()
  399. return obs
  400. def run(self, action: CmdRunAction) -> Observation:
  401. return self.run_action(action)
  402. def run_ipython(self, action: IPythonRunCellAction) -> Observation:
  403. return self.run_action(action)
  404. def read(self, action: FileReadAction) -> Observation:
  405. return self.run_action(action)
  406. def write(self, action: FileWriteAction) -> Observation:
  407. return self.run_action(action)
  408. def browse(self, action: BrowseURLAction) -> Observation:
  409. return self.run_action(action)
  410. def browse_interactive(self, action: BrowseInteractiveAction) -> Observation:
  411. return self.run_action(action)
  412. # ====================================================================
  413. # Implement these methods (for file operations) in the subclass
  414. # ====================================================================
  415. def copy_to(
  416. self, host_src: str, sandbox_dest: str, recursive: bool = False
  417. ) -> None:
  418. if not os.path.exists(host_src):
  419. raise FileNotFoundError(f'Source file {host_src} does not exist')
  420. self._refresh_logs()
  421. try:
  422. if recursive:
  423. # For recursive copy, create a zip file
  424. with tempfile.NamedTemporaryFile(
  425. suffix='.zip', delete=False
  426. ) as temp_zip:
  427. temp_zip_path = temp_zip.name
  428. with ZipFile(temp_zip_path, 'w') as zipf:
  429. for root, _, files in os.walk(host_src):
  430. for file in files:
  431. file_path = os.path.join(root, file)
  432. arcname = os.path.relpath(
  433. file_path, os.path.dirname(host_src)
  434. )
  435. zipf.write(file_path, arcname)
  436. upload_data = {'file': open(temp_zip_path, 'rb')}
  437. else:
  438. # For single file copy
  439. upload_data = {'file': open(host_src, 'rb')}
  440. params = {'destination': sandbox_dest, 'recursive': str(recursive).lower()}
  441. response = send_request_with_retry(
  442. self.session,
  443. 'POST',
  444. f'{self.api_url}/upload_file',
  445. files=upload_data,
  446. params=params,
  447. timeout=300,
  448. )
  449. if response.status_code == 200:
  450. return
  451. else:
  452. error_message = response.text
  453. raise Exception(f'Copy operation failed: {error_message}')
  454. except requests.Timeout:
  455. raise TimeoutError('Copy operation timed out')
  456. except Exception as e:
  457. raise RuntimeError(f'Copy operation failed: {str(e)}')
  458. finally:
  459. if recursive:
  460. os.unlink(temp_zip_path)
  461. logger.info(f'Copy completed: host:{host_src} -> runtime:{sandbox_dest}')
  462. self._refresh_logs()
  463. def list_files(self, path: str | None = None) -> list[str]:
  464. """List files in the sandbox.
  465. If path is None, list files in the sandbox's initial working directory (e.g., /workspace).
  466. """
  467. self._refresh_logs()
  468. try:
  469. data = {}
  470. if path is not None:
  471. data['path'] = path
  472. response = send_request_with_retry(
  473. self.session,
  474. 'POST',
  475. f'{self.api_url}/list_files',
  476. json=data,
  477. timeout=30, # 30 seconds because the container should already be alive
  478. )
  479. if response.status_code == 200:
  480. response_json = response.json()
  481. assert isinstance(response_json, list)
  482. return response_json
  483. else:
  484. error_message = response.text
  485. raise Exception(f'List files operation failed: {error_message}')
  486. except requests.Timeout:
  487. raise TimeoutError('List files operation timed out')
  488. except Exception as e:
  489. raise RuntimeError(f'List files operation failed: {str(e)}')
  490. def copy_from(self, path: str) -> bytes:
  491. """Zip all files in the sandbox and return as a stream of bytes."""
  492. self._refresh_logs()
  493. try:
  494. params = {'path': path}
  495. response = send_request_with_retry(
  496. self.session,
  497. 'GET',
  498. f'{self.api_url}/download_files',
  499. params=params,
  500. stream=True,
  501. timeout=30,
  502. )
  503. if response.status_code == 200:
  504. data = response.content
  505. return data
  506. else:
  507. error_message = response.text
  508. raise Exception(f'Copy operation failed: {error_message}')
  509. except requests.Timeout:
  510. raise TimeoutError('Copy operation timed out')
  511. except Exception as e:
  512. raise RuntimeError(f'Copy operation failed: {str(e)}')
  513. def _is_port_in_use_docker(self, port):
  514. containers = self.docker_client.containers.list()
  515. for container in containers:
  516. container_ports = container.ports
  517. if str(port) in str(container_ports):
  518. return True
  519. return False
  520. def _find_available_port(self, max_attempts=5):
  521. port = 39999
  522. for _ in range(max_attempts):
  523. port = find_available_tcp_port(30000, 39999)
  524. if not self._is_port_in_use_docker(port):
  525. return port
  526. # If no port is found after max_attempts, return the last tried port
  527. return port
  528. def send_status_message(self, message: str):
  529. """Sends a status message if the callback function was provided."""
  530. if self.status_message_callback:
  531. self.status_message_callback(message)