Kaynağa Gözat

simplified get (#962)

* simplified get

* resolved merge conflicts

* removed default param for get

* Update opendevin/config.py

---------

Co-authored-by: Robert Brennan <accounts@rbren.io>
மனோஜ்குமார் பழனிச்சாமி 1 yıl önce
ebeveyn
işleme
70534f203e

+ 32 - 32
agenthub/monologue_agent/utils/memory.py

@@ -7,36 +7,37 @@ from llama_index.vector_stores.chroma import ChromaVectorStore
 from opendevin import config
 from . import json
 
-embedding_strategy = config.get("LLM_EMBEDDING_MODEL")
+embedding_strategy = config.get('LLM_EMBEDDING_MODEL')
 
 # TODO: More embeddings: https://docs.llamaindex.ai/en/stable/examples/embeddings/OpenAI/
 # There's probably a more programmatic way to do this.
-if embedding_strategy == "llama2":
+if embedding_strategy == 'llama2':
     from llama_index.embeddings.ollama import OllamaEmbedding
     embed_model = OllamaEmbedding(
-        model_name="llama2",
-        base_url=config.get_or_error("LLM_BASE_URL"),
-        ollama_additional_kwargs={"mirostat": 0},
+        model_name='llama2',
+        base_url=config.get('LLM_BASE_URL', required=True),
+        ollama_additional_kwargs={'mirostat': 0},
     )
-elif embedding_strategy == "openai":
+elif embedding_strategy == 'openai':
     from llama_index.embeddings.openai import OpenAIEmbedding
     embed_model = OpenAIEmbedding(
-        model="text-embedding-ada-002",
-        api_key=config.get_or_error("LLM_API_KEY")
+        model='text-embedding-ada-002',
+        api_key=config.get('LLM_API_KEY', required=True)
     )
-elif embedding_strategy == "azureopenai":
-    from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding  # Need to instruct to set these env variables in documentation
+elif embedding_strategy == 'azureopenai':
+    # Need to instruct to set these env variables in documentation
+    from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding
     embed_model = AzureOpenAIEmbedding(
-        model="text-embedding-ada-002",
-        deployment_name=config.get_or_error("LLM_DEPLOYMENT_NAME"),
-        api_key=config.get_or_error("LLM_API_KEY"),
-        azure_endpoint=config.get_or_error("LLM_BASE_URL"),
-        api_version=config.get_or_error("LLM_API_VERSION"),
+        model='text-embedding-ada-002',
+        deployment_name=config.get('LLM_DEPLOYMENT_NAME', required=True),
+        api_key=config.get('LLM_API_KEY', required=True),
+        azure_endpoint=config.get('LLM_BASE_URL', required=True),
+        api_version=config.get('LLM_API_VERSION', required=True),
     )
 else:
     from llama_index.embeddings.huggingface import HuggingFaceEmbedding
     embed_model = HuggingFaceEmbedding(
-        model_name="BAAI/bge-small-en-v1.5"
+        model_name='BAAI/bge-small-en-v1.5'
     )
 
 
@@ -51,9 +52,10 @@ class LongTermMemory:
         Initialize the chromadb and set up ChromaVectorStore for later use.
         """
         db = chromadb.Client()
-        self.collection = db.get_or_create_collection(name="memories")
+        self.collection = db.get_or_create_collection(name='memories')
         vector_store = ChromaVectorStore(chroma_collection=self.collection)
-        self.index = VectorStoreIndex.from_vector_store(vector_store, embed_model=embed_model)
+        self.index = VectorStoreIndex.from_vector_store(
+            vector_store, embed_model=embed_model)
         self.thought_idx = 0
 
     def add_event(self, event: dict):
@@ -63,27 +65,27 @@ class LongTermMemory:
         Parameters:
         - event (dict): The new event to be added to memory
         """
-        id = ""
-        t = ""
-        if "action" in event:
-            t = "action"
-            id = event["action"]
-        elif "observation" in event:
-            t = "observation"
-            id = event["observation"]
+        id = ''
+        t = ''
+        if 'action' in event:
+            t = 'action'
+            id = event['action']
+        elif 'observation' in event:
+            t = 'observation'
+            id = event['observation']
         doc = Document(
             text=json.dumps(event),
             doc_id=str(self.thought_idx),
             extra_info={
-                "type": t,
-                "id": id,
-                "idx": self.thought_idx,
+                'type': t,
+                'id': id,
+                'idx': self.thought_idx,
             },
         )
         self.thought_idx += 1
         self.index.insert(doc)
 
-    def search(self, query: str, k: int=10):
+    def search(self, query: str, k: int = 10):
         """
         Searches through the current memory using VectorIndexRetriever
 
@@ -100,5 +102,3 @@ class LongTermMemory:
         )
         results = retriever.retrieve(query)
         return [r.get_text() for r in results]
-
-

+ 8 - 33
opendevin/config.py

@@ -23,7 +23,8 @@ DEFAULT_CONFIG: dict = {
     ConfigType.DIRECTORY_REWRITE: '',
     ConfigType.MAX_ITERATIONS: 100,
     ConfigType.AGENT: 'MonologueAgent',
-    ConfigType.SANDBOX_TYPE: 'ssh'
+    ConfigType.SANDBOX_TYPE: 'ssh',
+    ConfigType.DISABLE_COLOR: 'false',
 }
 
 config_str = ''
@@ -40,47 +41,21 @@ for k, v in config.items():
         config[k] = tomlConfig[k]
 
 
-def _get(key: str, default):
-    value = config.get(key, default)
-    if not value:
-        value = os.environ.get(key, default)
-    return value
-
-
-def get_or_error(key: str):
+def get(key: str, required: bool = False):
     """
-    Get a key from the config, or raise an error if it doesn't exist.
+    Get a key from the environment variables or config.toml or default configs.
     """
-    value = get_or_none(key)
+    value = os.environ.get(key)
     if not value:
+      value = config.get(key)
+    if not value and required:
         raise KeyError(f"Please set '{key}' in `config.toml` or `.env`.")
     return value
 
 
-def get_or_default(key: str, default):
-    """
-    Get a key from the config, or return a default value if it doesn't exist.
-    """
-    return _get(key, default)
-
-
-def get_or_none(key: str):
-    """
-    Get a key from the config, or return None if it doesn't exist.
-    """
-    return _get(key, None)
-
-
-def get(key: str):
-    """
-    Get a key from the config, please make sure it exists.
-    """
-    return config.get(key)
-
-
 def get_fe_config() -> dict:
     """
-    Get all the configuration values by performing a deep copy.
+    Get all the frontend configuration values by performing a deep copy.
     """
     fe_config = copy.deepcopy(config)
     del fe_config['LLM_API_KEY']

+ 3 - 3
opendevin/controller/agent_controller.py

@@ -39,7 +39,7 @@ ColorType = Literal[
 ]
 
 DISABLE_COLOR_PRINTING = (
-    config.get_or_default("DISABLE_COLOR", "false").lower() == "true"
+    config.get('DISABLE_COLOR').lower() == 'true'
 )
 MAX_ITERATIONS = config.get("MAX_ITERATIONS")
 
@@ -55,10 +55,10 @@ def print_with_color(text: Any, print_type: str = "INFO"):
     }
     color = TYPE_TO_COLOR.get(print_type.upper(), TYPE_TO_COLOR["INFO"])
     if DISABLE_COLOR_PRINTING:
-        print(f"\n{print_type.upper()}:\n{str(text)}", flush=True)
+        print(f'\n{print_type.upper()}:\n{str(text)}', flush=True)
     else:
         print(
-            colored(f"\n{print_type.upper()}:\n", color, attrs=["bold"])
+            colored(f'\n{print_type.upper()}:\n', color, attrs=['bold'])
             + colored(str(text), color),
             flush=True,
         )

+ 24 - 22
opendevin/main.py

@@ -12,7 +12,7 @@ from opendevin.llm.llm import LLM
 
 def read_task_from_file(file_path: str) -> str:
     """Read task from the specified file."""
-    with open(file_path, "r", encoding="utf-8") as file:
+    with open(file_path, 'r', encoding='utf-8') as file:
         return file.read()
 
 
@@ -23,43 +23,44 @@ def read_task_from_stdin() -> str:
 
 def parse_arguments():
     """Parse command-line arguments."""
-    parser = argparse.ArgumentParser(description="Run an agent with a specific task")
+    parser = argparse.ArgumentParser(
+        description='Run an agent with a specific task')
     parser.add_argument(
-        "-d",
-        "--directory",
+        '-d',
+        '--directory',
         required=True,
         type=str,
-        help="The working directory for the agent",
+        help='The working directory for the agent',
     )
     parser.add_argument(
-        "-t", "--task", type=str, default="", help="The task for the agent to perform"
+        '-t', '--task', type=str, default='', help='The task for the agent to perform'
     )
     parser.add_argument(
-        "-f",
-        "--file",
+        '-f',
+        '--file',
         type=str,
-        help="Path to a file containing the task. Overrides -t if both are provided.",
+        help='Path to a file containing the task. Overrides -t if both are provided.',
     )
     parser.add_argument(
-        "-c",
-        "--agent-cls",
-        default="MonologueAgent",
+        '-c',
+        '--agent-cls',
+        default='MonologueAgent',
         type=str,
-        help="The agent class to use",
+        help='The agent class to use',
     )
     parser.add_argument(
-        "-m",
-        "--model-name",
-        default=config.get_or_default("LLM_MODEL", "gpt-3.5-turbo-1106"),
+        '-m',
+        '--model-name',
+        default=config.get('LLM_MODEL'),
         type=str,
-        help="The (litellm) model name to use",
+        help='The (litellm) model name to use',
     )
     parser.add_argument(
-        "-i",
-        "--max-iterations",
+        '-i',
+        '--max-iterations',
         default=100,
         type=int,
-        help="The maximum number of iterations to run the agent",
+        help='The maximum number of iterations to run the agent',
     )
     return parser.parse_args()
 
@@ -77,7 +78,8 @@ async def main():
         task = args.task
 
     if not task:
-        raise ValueError("No task provided. Please specify a task through -t, -f.")
+        raise ValueError(
+            'No task provided. Please specify a task through -t, -f.')
 
     print(
         f'Running agent {args.agent_cls} (model: {args.model_name}, directory: {args.directory}) with task: "{task}"'
@@ -92,5 +94,5 @@ async def main():
     await controller.start_loop(task)
 
 
-if __name__ == "__main__":
+if __name__ == '__main__':
     asyncio.run(main())

+ 2 - 2
opendevin/sandbox/exec_box.py

@@ -25,8 +25,8 @@ CONTAINER_IMAGE = config.get(ConfigType.SANDBOX_CONTAINER_IMAGE)
 # How do we make this more flexible?
 RUN_AS_DEVIN = config.get('RUN_AS_DEVIN').lower() != 'false'
 USER_ID = 1000
-if config.get_or_none('SANDBOX_USER_ID') is not None:
-    USER_ID = int(config.get_or_default('SANDBOX_USER_ID', ''))
+if SANDBOX_USER_ID := config.get('SANDBOX_USER_ID'):
+    USER_ID = int(SANDBOX_USER_ID)
 elif hasattr(os, 'getuid'):
     USER_ID = os.getuid()
 

+ 2 - 2
opendevin/sandbox/ssh_box.py

@@ -27,8 +27,8 @@ CONTAINER_IMAGE = config.get(ConfigType.SANDBOX_CONTAINER_IMAGE)
 # How do we make this more flexible?
 RUN_AS_DEVIN = config.get('RUN_AS_DEVIN').lower() != 'false'
 USER_ID = 1000
-if config.get_or_none('SANDBOX_USER_ID') is not None:
-    USER_ID = int(config.get_or_default('SANDBOX_USER_ID', ''))
+if SANDBOX_USER_ID := config.get('SANDBOX_USER_ID'):
+    USER_ID = int(SANDBOX_USER_ID)
 elif hasattr(os, 'getuid'):
     USER_ID = os.getuid()
 

+ 1 - 0
opendevin/schema/config.py

@@ -17,3 +17,4 @@ class ConfigType(str, Enum):
     MAX_ITERATIONS = 'MAX_ITERATIONS'
     AGENT = 'AGENT'
     SANDBOX_TYPE = 'SANDBOX_TYPE'
+    DISABLE_COLOR = 'DISABLE_COLOR'