memory.py 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. import threading
  2. from openai._exceptions import APIConnectionError, InternalServerError, RateLimitError
  3. from tenacity import (
  4. retry,
  5. retry_if_exception_type,
  6. stop_after_attempt,
  7. wait_random_exponential,
  8. )
  9. from openhands.core.config import LLMConfig
  10. from openhands.core.logger import openhands_logger as logger
  11. from openhands.core.utils import json
  12. try:
  13. import chromadb
  14. import llama_index.embeddings.openai.base as llama_openai
  15. from llama_index.core import Document, VectorStoreIndex
  16. from llama_index.core.retrievers import VectorIndexRetriever
  17. from llama_index.vector_stores.chroma import ChromaVectorStore
  18. LLAMA_INDEX_AVAILABLE = True
  19. except ImportError:
  20. LLAMA_INDEX_AVAILABLE = False
  21. if LLAMA_INDEX_AVAILABLE:
  22. # TODO: this could be made configurable
  23. num_retries: int = 10
  24. retry_min_wait: int = 3
  25. retry_max_wait: int = 300
  26. # llama-index includes a retry decorator around openai.get_embeddings() function
  27. # it is initialized with hard-coded values and errors
  28. # this non-customizable behavior is creating issues when it's retrying faster than providers' rate limits
  29. # this block attempts to banish it and replace it with our decorator, to allow users to set their own limits
  30. if hasattr(llama_openai.get_embeddings, '__wrapped__'):
  31. original_get_embeddings = llama_openai.get_embeddings.__wrapped__
  32. else:
  33. logger.warning('Cannot set custom retry limits.')
  34. num_retries = 1
  35. original_get_embeddings = llama_openai.get_embeddings
  36. def attempt_on_error(retry_state):
  37. logger.error(
  38. f'{retry_state.outcome.exception()}. Attempt #{retry_state.attempt_number} | You can customize these settings in the configuration.',
  39. exc_info=False,
  40. )
  41. return None
  42. @retry(
  43. reraise=True,
  44. stop=stop_after_attempt(num_retries),
  45. wait=wait_random_exponential(min=retry_min_wait, max=retry_max_wait),
  46. retry=retry_if_exception_type(
  47. (RateLimitError, APIConnectionError, InternalServerError)
  48. ),
  49. after=attempt_on_error,
  50. )
  51. def wrapper_get_embeddings(*args, **kwargs):
  52. return original_get_embeddings(*args, **kwargs)
  53. llama_openai.get_embeddings = wrapper_get_embeddings
  54. class EmbeddingsLoader:
  55. """Loader for embedding model initialization."""
  56. @staticmethod
  57. def get_embedding_model(strategy: str, llm_config: LLMConfig):
  58. supported_ollama_embed_models = [
  59. 'llama2',
  60. 'mxbai-embed-large',
  61. 'nomic-embed-text',
  62. 'all-minilm',
  63. 'stable-code',
  64. ]
  65. if strategy in supported_ollama_embed_models:
  66. from llama_index.embeddings.ollama import OllamaEmbedding
  67. return OllamaEmbedding(
  68. model_name=strategy,
  69. base_url=llm_config.embedding_base_url,
  70. ollama_additional_kwargs={'mirostat': 0},
  71. )
  72. elif strategy == 'openai':
  73. from llama_index.embeddings.openai import OpenAIEmbedding
  74. return OpenAIEmbedding(
  75. model='text-embedding-ada-002',
  76. api_key=llm_config.api_key,
  77. )
  78. elif strategy == 'azureopenai':
  79. from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding
  80. return AzureOpenAIEmbedding(
  81. model='text-embedding-ada-002',
  82. deployment_name=llm_config.embedding_deployment_name,
  83. api_key=llm_config.api_key,
  84. azure_endpoint=llm_config.base_url,
  85. api_version=llm_config.api_version,
  86. )
  87. elif (strategy is not None) and (strategy.lower() == 'none'):
  88. # TODO: this works but is not elegant enough. The incentive is when
  89. # an agent using embeddings is not used, there is no reason we need to
  90. # initialize an embedding model
  91. return None
  92. else:
  93. from llama_index.embeddings.huggingface import HuggingFaceEmbedding
  94. return HuggingFaceEmbedding(model_name='BAAI/bge-small-en-v1.5')
  95. class LongTermMemory:
  96. """Handles storing information for the agent to access later, using chromadb."""
  97. def __init__(self, llm_config: LLMConfig, memory_max_threads: int = 1):
  98. """Initialize the chromadb and set up ChromaVectorStore for later use."""
  99. if not LLAMA_INDEX_AVAILABLE:
  100. raise ImportError(
  101. 'llama_index and its dependencies are not installed. '
  102. 'To use LongTermMemory, please run: poetry install --with llama-index'
  103. )
  104. db = chromadb.Client(chromadb.Settings(anonymized_telemetry=False))
  105. self.collection = db.get_or_create_collection(name='memories')
  106. vector_store = ChromaVectorStore(chroma_collection=self.collection)
  107. embedding_strategy = llm_config.embedding_model
  108. embed_model = EmbeddingsLoader.get_embedding_model(
  109. embedding_strategy, llm_config
  110. )
  111. self.index = VectorStoreIndex.from_vector_store(vector_store, embed_model)
  112. self.sema = threading.Semaphore(value=memory_max_threads)
  113. self.thought_idx = 0
  114. self._add_threads: list[threading.Thread] = []
  115. def add_event(self, event: dict):
  116. """Adds a new event to the long term memory with a unique id.
  117. Parameters:
  118. - event (dict): The new event to be added to memory
  119. """
  120. id = ''
  121. t = ''
  122. if 'action' in event:
  123. t = 'action'
  124. id = event['action']
  125. elif 'observation' in event:
  126. t = 'observation'
  127. id = event['observation']
  128. doc = Document(
  129. text=json.dumps(event),
  130. doc_id=str(self.thought_idx),
  131. extra_info={
  132. 'type': t,
  133. 'id': id,
  134. 'idx': self.thought_idx,
  135. },
  136. )
  137. self.thought_idx += 1
  138. logger.debug('Adding %s event to memory: %d', t, self.thought_idx)
  139. thread = threading.Thread(target=self._add_doc, args=(doc,))
  140. self._add_threads.append(thread)
  141. thread.start() # We add the doc concurrently so we don't have to wait ~500ms for the insert
  142. def _add_doc(self, doc):
  143. with self.sema:
  144. self.index.insert(doc)
  145. def search(self, query: str, k: int = 10):
  146. """Searches through the current memory using VectorIndexRetriever
  147. Parameters:
  148. - query (str): A query to match search results to
  149. - k (int): Number of top results to return
  150. Returns:
  151. - list[str]: list of top k results found in current memory
  152. """
  153. retriever = VectorIndexRetriever(
  154. index=self.index,
  155. similarity_top_k=k,
  156. )
  157. results = retriever.retrieve(query)
  158. return [r.get_text() for r in results]