|
@@ -27,7 +27,7 @@ from tenacity import (
|
|
|
wait_random_exponential,
|
|
wait_random_exponential,
|
|
|
)
|
|
)
|
|
|
|
|
|
|
|
-from openhands.core.exceptions import UserCancelledError
|
|
|
|
|
|
|
+from openhands.core.exceptions import LLMResponseError, UserCancelledError
|
|
|
from openhands.core.logger import llm_prompt_logger, llm_response_logger
|
|
from openhands.core.logger import llm_prompt_logger, llm_response_logger
|
|
|
from openhands.core.logger import openhands_logger as logger
|
|
from openhands.core.logger import openhands_logger as logger
|
|
|
from openhands.core.metrics import Metrics
|
|
from openhands.core.metrics import Metrics
|
|
@@ -410,7 +410,10 @@ class LLM:
|
|
|
|
|
|
|
|
Check the complete documentation at https://litellm.vercel.app/docs/completion
|
|
Check the complete documentation at https://litellm.vercel.app/docs/completion
|
|
|
"""
|
|
"""
|
|
|
- return self._completion
|
|
|
|
|
|
|
+ try:
|
|
|
|
|
+ return self._completion
|
|
|
|
|
+ except Exception as e:
|
|
|
|
|
+ raise LLMResponseError(e)
|
|
|
|
|
|
|
|
@property
|
|
@property
|
|
|
def async_completion(self):
|
|
def async_completion(self):
|
|
@@ -418,7 +421,10 @@ class LLM:
|
|
|
|
|
|
|
|
Check the complete documentation at https://litellm.vercel.app/docs/providers/ollama#example-usage---streaming--acompletion
|
|
Check the complete documentation at https://litellm.vercel.app/docs/providers/ollama#example-usage---streaming--acompletion
|
|
|
"""
|
|
"""
|
|
|
- return self._async_completion
|
|
|
|
|
|
|
+ try:
|
|
|
|
|
+ return self._async_completion
|
|
|
|
|
+ except Exception as e:
|
|
|
|
|
+ raise LLMResponseError(e)
|
|
|
|
|
|
|
|
@property
|
|
@property
|
|
|
def async_streaming_completion(self):
|
|
def async_streaming_completion(self):
|
|
@@ -426,7 +432,10 @@ class LLM:
|
|
|
|
|
|
|
|
Check the complete documentation at https://litellm.vercel.app/docs/providers/ollama#example-usage---streaming--acompletion
|
|
Check the complete documentation at https://litellm.vercel.app/docs/providers/ollama#example-usage---streaming--acompletion
|
|
|
"""
|
|
"""
|
|
|
- return self._async_streaming_completion
|
|
|
|
|
|
|
+ try:
|
|
|
|
|
+ return self._async_streaming_completion
|
|
|
|
|
+ except Exception as e:
|
|
|
|
|
+ raise LLMResponseError(e)
|
|
|
|
|
|
|
|
def supports_vision(self):
|
|
def supports_vision(self):
|
|
|
return litellm.supports_vision(self.config.model)
|
|
return litellm.supports_vision(self.config.model)
|