llm.py 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620
  1. import copy
  2. import os
  3. import time
  4. import warnings
  5. from functools import partial
  6. from typing import Any
  7. import requests
  8. from openhands.core.config import LLMConfig
  9. with warnings.catch_warnings():
  10. warnings.simplefilter('ignore')
  11. import litellm
  12. from litellm import Message as LiteLLMMessage
  13. from litellm import ModelInfo, PromptTokensDetails
  14. from litellm import completion as litellm_completion
  15. from litellm import completion_cost as litellm_completion_cost
  16. from litellm.exceptions import (
  17. APIConnectionError,
  18. APIError,
  19. InternalServerError,
  20. RateLimitError,
  21. ServiceUnavailableError,
  22. )
  23. from litellm.types.utils import CostPerToken, ModelResponse, Usage
  24. from litellm.utils import create_pretrained_tokenizer
  25. from openhands.core.exceptions import CloudFlareBlockageError
  26. from openhands.core.logger import openhands_logger as logger
  27. from openhands.core.message import Message
  28. from openhands.llm.debug_mixin import DebugMixin
  29. from openhands.llm.fn_call_converter import (
  30. STOP_WORDS,
  31. convert_fncall_messages_to_non_fncall_messages,
  32. convert_non_fncall_messages_to_fncall_messages,
  33. )
  34. from openhands.llm.metrics import Metrics
  35. from openhands.llm.retry_mixin import RetryMixin
  36. __all__ = ['LLM']
  37. # tuple of exceptions to retry on
  38. LLM_RETRY_EXCEPTIONS: tuple[type[Exception], ...] = (
  39. APIConnectionError,
  40. # FIXME: APIError is useful on 502 from a proxy for example,
  41. # but it also retries on other errors that are permanent
  42. APIError,
  43. InternalServerError,
  44. RateLimitError,
  45. ServiceUnavailableError,
  46. )
  47. # cache prompt supporting models
  48. # remove this when we gemini and deepseek are supported
  49. CACHE_PROMPT_SUPPORTED_MODELS = [
  50. 'claude-3-5-sonnet-20241022',
  51. 'claude-3-5-sonnet-20240620',
  52. 'claude-3-5-haiku-20241022',
  53. 'claude-3-haiku-20240307',
  54. 'claude-3-opus-20240229',
  55. ]
  56. # function calling supporting models
  57. FUNCTION_CALLING_SUPPORTED_MODELS = [
  58. 'claude-3-5-sonnet',
  59. 'claude-3-5-sonnet-20240620',
  60. 'claude-3-5-sonnet-20241022',
  61. 'claude-3.5-haiku',
  62. 'claude-3-5-haiku-20241022',
  63. 'gpt-4o-mini',
  64. 'gpt-4o',
  65. ]
  66. class LLM(RetryMixin, DebugMixin):
  67. """The LLM class represents a Language Model instance.
  68. Attributes:
  69. config: an LLMConfig object specifying the configuration of the LLM.
  70. """
  71. def __init__(
  72. self,
  73. config: LLMConfig,
  74. metrics: Metrics | None = None,
  75. ):
  76. """Initializes the LLM. If LLMConfig is passed, its values will be the fallback.
  77. Passing simple parameters always overrides config.
  78. Args:
  79. config: The LLM configuration.
  80. metrics: The metrics to use.
  81. """
  82. self._tried_model_info = False
  83. self.metrics: Metrics = (
  84. metrics if metrics is not None else Metrics(model_name=config.model)
  85. )
  86. self.cost_metric_supported: bool = True
  87. self.config: LLMConfig = copy.deepcopy(config)
  88. # litellm actually uses base Exception here for unknown model
  89. self.model_info: ModelInfo | None = None
  90. if self.config.log_completions:
  91. if self.config.log_completions_folder is None:
  92. raise RuntimeError(
  93. 'log_completions_folder is required when log_completions is enabled'
  94. )
  95. os.makedirs(self.config.log_completions_folder, exist_ok=True)
  96. # call init_model_info to initialize config.max_output_tokens
  97. # which is used in partial function
  98. with warnings.catch_warnings():
  99. warnings.simplefilter('ignore')
  100. self.init_model_info()
  101. if self.vision_is_active():
  102. logger.debug('LLM: model has vision enabled')
  103. if self.is_caching_prompt_active():
  104. logger.debug('LLM: caching prompt enabled')
  105. if self.is_function_calling_active():
  106. logger.debug('LLM: model supports function calling')
  107. # if using a custom tokenizer, make sure it's loaded and accessible in the format expected by litellm
  108. if self.config.custom_tokenizer is not None:
  109. self.tokenizer = create_pretrained_tokenizer(self.config.custom_tokenizer)
  110. else:
  111. self.tokenizer = None
  112. # set up the completion function
  113. self._completion = partial(
  114. litellm_completion,
  115. model=self.config.model,
  116. api_key=self.config.api_key,
  117. base_url=self.config.base_url,
  118. api_version=self.config.api_version,
  119. custom_llm_provider=self.config.custom_llm_provider,
  120. max_tokens=self.config.max_output_tokens,
  121. timeout=self.config.timeout,
  122. temperature=self.config.temperature,
  123. top_p=self.config.top_p,
  124. drop_params=self.config.drop_params,
  125. modify_params=self.config.modify_params,
  126. )
  127. self._completion_unwrapped = self._completion
  128. @self.retry_decorator(
  129. num_retries=self.config.num_retries,
  130. retry_exceptions=LLM_RETRY_EXCEPTIONS,
  131. retry_min_wait=self.config.retry_min_wait,
  132. retry_max_wait=self.config.retry_max_wait,
  133. retry_multiplier=self.config.retry_multiplier,
  134. )
  135. def wrapper(*args, **kwargs):
  136. """Wrapper for the litellm completion function. Logs the input and output of the completion function."""
  137. from openhands.core.utils import json
  138. messages: list[dict[str, Any]] | dict[str, Any] = []
  139. mock_function_calling = kwargs.pop('mock_function_calling', False)
  140. # some callers might send the model and messages directly
  141. # litellm allows positional args, like completion(model, messages, **kwargs)
  142. if len(args) > 1:
  143. # ignore the first argument if it's provided (it would be the model)
  144. # design wise: we don't allow overriding the configured values
  145. # implementation wise: the partial function set the model as a kwarg already
  146. # as well as other kwargs
  147. messages = args[1] if len(args) > 1 else args[0]
  148. kwargs['messages'] = messages
  149. # remove the first args, they're sent in kwargs
  150. args = args[2:]
  151. elif 'messages' in kwargs:
  152. messages = kwargs['messages']
  153. # ensure we work with a list of messages
  154. messages = messages if isinstance(messages, list) else [messages]
  155. original_fncall_messages = copy.deepcopy(messages)
  156. mock_fncall_tools = None
  157. if mock_function_calling:
  158. assert (
  159. 'tools' in kwargs
  160. ), "'tools' must be in kwargs when mock_function_calling is True"
  161. messages = convert_fncall_messages_to_non_fncall_messages(
  162. messages, kwargs['tools']
  163. )
  164. kwargs['messages'] = messages
  165. kwargs['stop'] = STOP_WORDS
  166. mock_fncall_tools = kwargs.pop('tools')
  167. # if we have no messages, something went very wrong
  168. if not messages:
  169. raise ValueError(
  170. 'The messages list is empty. At least one message is required.'
  171. )
  172. # log the entire LLM prompt
  173. self.log_prompt(messages)
  174. if self.is_caching_prompt_active():
  175. # Anthropic-specific prompt caching
  176. if 'claude-3' in self.config.model:
  177. kwargs['extra_headers'] = {
  178. 'anthropic-beta': 'prompt-caching-2024-07-31',
  179. }
  180. try:
  181. # Record start time for latency measurement
  182. start_time = time.time()
  183. # we don't support streaming here, thus we get a ModelResponse
  184. resp: ModelResponse = self._completion_unwrapped(*args, **kwargs)
  185. # Calculate and record latency
  186. latency = time.time() - start_time
  187. response_id = resp.get('id', 'unknown')
  188. self.metrics.add_response_latency(latency, response_id)
  189. non_fncall_response = copy.deepcopy(resp)
  190. if mock_function_calling:
  191. assert len(resp.choices) == 1
  192. assert mock_fncall_tools is not None
  193. non_fncall_response_message = resp.choices[0].message
  194. fn_call_messages_with_response = (
  195. convert_non_fncall_messages_to_fncall_messages(
  196. messages + [non_fncall_response_message], mock_fncall_tools
  197. )
  198. )
  199. fn_call_response_message = fn_call_messages_with_response[-1]
  200. if not isinstance(fn_call_response_message, LiteLLMMessage):
  201. fn_call_response_message = LiteLLMMessage(
  202. **fn_call_response_message
  203. )
  204. resp.choices[0].message = fn_call_response_message
  205. message_back: str = resp['choices'][0]['message']['content'] or ''
  206. tool_calls = resp['choices'][0]['message'].get('tool_calls', [])
  207. if tool_calls:
  208. for tool_call in tool_calls:
  209. fn_name = tool_call.function.name
  210. fn_args = tool_call.function.arguments
  211. message_back += f'\nFunction call: {fn_name}({fn_args})'
  212. # log the LLM response
  213. self.log_response(message_back)
  214. # post-process the response first to calculate cost
  215. cost = self._post_completion(resp)
  216. # log for evals or other scripts that need the raw completion
  217. if self.config.log_completions:
  218. assert self.config.log_completions_folder is not None
  219. log_file = os.path.join(
  220. self.config.log_completions_folder,
  221. # use the metric model name (for draft editor)
  222. f'{self.metrics.model_name.replace("/", "__")}-{time.time()}.json',
  223. )
  224. # set up the dict to be logged
  225. _d = {
  226. 'messages': messages,
  227. 'response': resp,
  228. 'args': args,
  229. 'kwargs': {k: v for k, v in kwargs.items() if k != 'messages'},
  230. 'timestamp': time.time(),
  231. 'cost': cost,
  232. }
  233. # if non-native function calling, save messages/response separately
  234. if mock_function_calling:
  235. # Overwrite response as non-fncall to be consistent with messages
  236. _d['response'] = non_fncall_response
  237. # Save fncall_messages/response separately
  238. _d['fncall_messages'] = original_fncall_messages
  239. _d['fncall_response'] = resp
  240. with open(log_file, 'w') as f:
  241. f.write(json.dumps(_d))
  242. return resp
  243. except APIError as e:
  244. if 'Attention Required! | Cloudflare' in str(e):
  245. raise CloudFlareBlockageError(
  246. 'Request blocked by CloudFlare'
  247. ) from e
  248. raise
  249. self._completion = wrapper
  250. @property
  251. def completion(self):
  252. """Decorator for the litellm completion function.
  253. Check the complete documentation at https://litellm.vercel.app/docs/completion
  254. """
  255. return self._completion
  256. def init_model_info(self):
  257. if self._tried_model_info:
  258. return
  259. self._tried_model_info = True
  260. try:
  261. if self.config.model.startswith('openrouter'):
  262. self.model_info = litellm.get_model_info(self.config.model)
  263. except Exception as e:
  264. logger.debug(f'Error getting model info: {e}')
  265. if self.config.model.startswith('litellm_proxy/'):
  266. # IF we are using LiteLLM proxy, get model info from LiteLLM proxy
  267. # GET {base_url}/v1/model/info with litellm_model_id as path param
  268. response = requests.get(
  269. f'{self.config.base_url}/v1/model/info',
  270. headers={'Authorization': f'Bearer {self.config.api_key}'},
  271. )
  272. resp_json = response.json()
  273. if 'data' not in resp_json:
  274. logger.error(
  275. f'Error getting model info from LiteLLM proxy: {resp_json}'
  276. )
  277. all_model_info = resp_json.get('data', [])
  278. current_model_info = next(
  279. (
  280. info
  281. for info in all_model_info
  282. if info['model_name']
  283. == self.config.model.removeprefix('litellm_proxy/')
  284. ),
  285. None,
  286. )
  287. if current_model_info:
  288. self.model_info = current_model_info['model_info']
  289. # Last two attempts to get model info from NAME
  290. if not self.model_info:
  291. try:
  292. self.model_info = litellm.get_model_info(
  293. self.config.model.split(':')[0]
  294. )
  295. # noinspection PyBroadException
  296. except Exception:
  297. pass
  298. if not self.model_info:
  299. try:
  300. self.model_info = litellm.get_model_info(
  301. self.config.model.split('/')[-1]
  302. )
  303. # noinspection PyBroadException
  304. except Exception:
  305. pass
  306. logger.debug(f'Model info: {self.model_info}')
  307. if self.config.model.startswith('huggingface'):
  308. # HF doesn't support the OpenAI default value for top_p (1)
  309. logger.debug(
  310. f'Setting top_p to 0.9 for Hugging Face model: {self.config.model}'
  311. )
  312. self.config.top_p = 0.9 if self.config.top_p == 1 else self.config.top_p
  313. # Set the max tokens in an LM-specific way if not set
  314. if self.config.max_input_tokens is None:
  315. if (
  316. self.model_info is not None
  317. and 'max_input_tokens' in self.model_info
  318. and isinstance(self.model_info['max_input_tokens'], int)
  319. ):
  320. self.config.max_input_tokens = self.model_info['max_input_tokens']
  321. else:
  322. # Safe fallback for any potentially viable model
  323. self.config.max_input_tokens = 4096
  324. if self.config.max_output_tokens is None:
  325. # Safe default for any potentially viable model
  326. self.config.max_output_tokens = 4096
  327. if self.model_info is not None:
  328. # max_output_tokens has precedence over max_tokens, if either exists.
  329. # litellm has models with both, one or none of these 2 parameters!
  330. if 'max_output_tokens' in self.model_info and isinstance(
  331. self.model_info['max_output_tokens'], int
  332. ):
  333. self.config.max_output_tokens = self.model_info['max_output_tokens']
  334. elif 'max_tokens' in self.model_info and isinstance(
  335. self.model_info['max_tokens'], int
  336. ):
  337. self.config.max_output_tokens = self.model_info['max_tokens']
  338. def vision_is_active(self) -> bool:
  339. with warnings.catch_warnings():
  340. warnings.simplefilter('ignore')
  341. return not self.config.disable_vision and self._supports_vision()
  342. def _supports_vision(self) -> bool:
  343. """Acquire from litellm if model is vision capable.
  344. Returns:
  345. bool: True if model is vision capable. Return False if model not supported by litellm.
  346. """
  347. # litellm.supports_vision currently returns False for 'openai/gpt-...' or 'anthropic/claude-...' (with prefixes)
  348. # but model_info will have the correct value for some reason.
  349. # we can go with it, but we will need to keep an eye if model_info is correct for Vertex or other providers
  350. # remove when litellm is updated to fix https://github.com/BerriAI/litellm/issues/5608
  351. # Check both the full model name and the name after proxy prefix for vision support
  352. return (
  353. litellm.supports_vision(self.config.model)
  354. or litellm.supports_vision(self.config.model.split('/')[-1])
  355. or (
  356. self.model_info is not None
  357. and self.model_info.get('supports_vision', False)
  358. )
  359. )
  360. def is_caching_prompt_active(self) -> bool:
  361. """Check if prompt caching is supported and enabled for current model.
  362. Returns:
  363. boolean: True if prompt caching is supported and enabled for the given model.
  364. """
  365. return (
  366. self.config.caching_prompt is True
  367. and (
  368. self.config.model in CACHE_PROMPT_SUPPORTED_MODELS
  369. or self.config.model.split('/')[-1] in CACHE_PROMPT_SUPPORTED_MODELS
  370. )
  371. # We don't need to look-up model_info, because only Anthropic models needs the explicit caching breakpoint
  372. )
  373. def is_function_calling_active(self) -> bool:
  374. # Check if model name is in supported list before checking model_info
  375. model_name_supported = (
  376. self.config.model in FUNCTION_CALLING_SUPPORTED_MODELS
  377. or self.config.model.split('/')[-1] in FUNCTION_CALLING_SUPPORTED_MODELS
  378. or any(m in self.config.model for m in FUNCTION_CALLING_SUPPORTED_MODELS)
  379. )
  380. return model_name_supported
  381. def _post_completion(self, response: ModelResponse) -> float:
  382. """Post-process the completion response.
  383. Logs the cost and usage stats of the completion call.
  384. """
  385. try:
  386. cur_cost = self._completion_cost(response)
  387. except Exception:
  388. cur_cost = 0
  389. stats = ''
  390. if self.cost_metric_supported:
  391. # keep track of the cost
  392. stats = 'Cost: %.2f USD | Accumulated Cost: %.2f USD\n' % (
  393. cur_cost,
  394. self.metrics.accumulated_cost,
  395. )
  396. # Add latency to stats if available
  397. if self.metrics.response_latencies:
  398. latest_latency = self.metrics.response_latencies[-1]
  399. stats += 'Response Latency: %.3f seconds\n' % latest_latency.latency
  400. usage: Usage | None = response.get('usage')
  401. if usage:
  402. # keep track of the input and output tokens
  403. input_tokens = usage.get('prompt_tokens')
  404. output_tokens = usage.get('completion_tokens')
  405. if input_tokens:
  406. stats += 'Input tokens: ' + str(input_tokens)
  407. if output_tokens:
  408. stats += (
  409. (' | ' if input_tokens else '')
  410. + 'Output tokens: '
  411. + str(output_tokens)
  412. + '\n'
  413. )
  414. # read the prompt cache hit, if any
  415. prompt_tokens_details: PromptTokensDetails = usage.get(
  416. 'prompt_tokens_details'
  417. )
  418. cache_hit_tokens = (
  419. prompt_tokens_details.cached_tokens if prompt_tokens_details else None
  420. )
  421. if cache_hit_tokens:
  422. stats += 'Input tokens (cache hit): ' + str(cache_hit_tokens) + '\n'
  423. # For Anthropic, the cache writes have a different cost than regular input tokens
  424. # but litellm doesn't separate them in the usage stats
  425. # so we can read it from the provider-specific extra field
  426. model_extra = usage.get('model_extra', {})
  427. cache_write_tokens = model_extra.get('cache_creation_input_tokens')
  428. if cache_write_tokens:
  429. stats += 'Input tokens (cache write): ' + str(cache_write_tokens) + '\n'
  430. # log the stats
  431. if stats:
  432. logger.debug(stats)
  433. return cur_cost
  434. def get_token_count(self, messages: list[dict] | list[Message]) -> int:
  435. """Get the number of tokens in a list of messages. Use dicts for better token counting.
  436. Args:
  437. messages (list): A list of messages, either as a list of dicts or as a list of Message objects.
  438. Returns:
  439. int: The number of tokens.
  440. """
  441. # attempt to convert Message objects to dicts, litellm expects dicts
  442. if (
  443. isinstance(messages, list)
  444. and len(messages) > 0
  445. and isinstance(messages[0], Message)
  446. ):
  447. logger.info(
  448. 'Message objects now include serialized tool calls in token counting'
  449. )
  450. messages = self.format_messages_for_llm(messages) # type: ignore
  451. # try to get the token count with the default litellm tokenizers
  452. # or the custom tokenizer if set for this LLM configuration
  453. try:
  454. return litellm.token_counter(
  455. model=self.config.model,
  456. messages=messages,
  457. custom_tokenizer=self.tokenizer,
  458. )
  459. except Exception as e:
  460. # limit logspam in case token count is not supported
  461. logger.error(
  462. f'Error getting token count for\n model {self.config.model}\n{e}'
  463. + (
  464. f'\ncustom_tokenizer: {self.config.custom_tokenizer}'
  465. if self.config.custom_tokenizer is not None
  466. else ''
  467. )
  468. )
  469. return 0
  470. def _is_local(self) -> bool:
  471. """Determines if the system is using a locally running LLM.
  472. Returns:
  473. boolean: True if executing a local model.
  474. """
  475. if self.config.base_url is not None:
  476. for substring in ['localhost', '127.0.0.1' '0.0.0.0']:
  477. if substring in self.config.base_url:
  478. return True
  479. elif self.config.model is not None:
  480. if self.config.model.startswith('ollama'):
  481. return True
  482. return False
  483. def _completion_cost(self, response) -> float:
  484. """Calculate the cost of a completion response based on the model. Local models are treated as free.
  485. Add the current cost into total cost in metrics.
  486. Args:
  487. response: A response from a model invocation.
  488. Returns:
  489. number: The cost of the response.
  490. """
  491. if not self.cost_metric_supported:
  492. return 0.0
  493. extra_kwargs = {}
  494. if (
  495. self.config.input_cost_per_token is not None
  496. and self.config.output_cost_per_token is not None
  497. ):
  498. cost_per_token = CostPerToken(
  499. input_cost_per_token=self.config.input_cost_per_token,
  500. output_cost_per_token=self.config.output_cost_per_token,
  501. )
  502. logger.debug(f'Using custom cost per token: {cost_per_token}')
  503. extra_kwargs['custom_cost_per_token'] = cost_per_token
  504. try:
  505. # try directly get response_cost from response
  506. cost = getattr(response, '_hidden_params', {}).get('response_cost', None)
  507. if cost is None:
  508. cost = litellm_completion_cost(
  509. completion_response=response, **extra_kwargs
  510. )
  511. self.metrics.add_cost(cost)
  512. return cost
  513. except Exception:
  514. self.cost_metric_supported = False
  515. logger.debug('Cost calculation not supported for this model.')
  516. return 0.0
  517. def __str__(self):
  518. if self.config.api_version:
  519. return f'LLM(model={self.config.model}, api_version={self.config.api_version}, base_url={self.config.base_url})'
  520. elif self.config.base_url:
  521. return f'LLM(model={self.config.model}, base_url={self.config.base_url})'
  522. return f'LLM(model={self.config.model})'
  523. def __repr__(self):
  524. return str(self)
  525. def reset(self) -> None:
  526. self.metrics.reset()
  527. def format_messages_for_llm(self, messages: Message | list[Message]) -> list[dict]:
  528. if isinstance(messages, Message):
  529. messages = [messages]
  530. # set flags to know how to serialize the messages
  531. for message in messages:
  532. message.cache_enabled = self.is_caching_prompt_active()
  533. message.vision_enabled = self.vision_is_active()
  534. message.function_calling_enabled = self.is_function_calling_active()
  535. # let pydantic handle the serialization
  536. return [message.model_dump() for message in messages]