Explorar el Código

increase the default retries for LLM (#2986)

Xingyao Wang hace 1 año
padre
commit
135da0ea2b
Se han modificado 2 ficheros con 7 adiciones y 3 borrados
  1. 4 2
      opendevin/core/config.py
  2. 3 1
      opendevin/llm/llm.py

+ 4 - 2
opendevin/core/config.py

@@ -33,6 +33,7 @@ class LLMConfig:
         aws_secret_access_key: The AWS secret access key.
         aws_region_name: The AWS region name.
         num_retries: The number of retries to attempt.
+        retry_multiplier: The multiplier for the exponential backoff.
         retry_min_wait: The minimum time to wait between retries, in seconds. This is exponential backoff minimum. For models with very low limits, this can be set to 15-20.
         retry_max_wait: The maximum time to wait between retries, in seconds. This is exponential backoff maximum.
         timeout: The timeout for the API.
@@ -57,9 +58,10 @@ class LLMConfig:
     aws_access_key_id: str | None = None
     aws_secret_access_key: str | None = None
     aws_region_name: str | None = None
-    num_retries: int = 5
+    num_retries: int = 10
+    retry_multiplier: float = 2
     retry_min_wait: int = 3
-    retry_max_wait: int = 60
+    retry_max_wait: int = 300
     timeout: int | None = None
     max_message_chars: int = 10_000  # maximum number of characters in an observation's content when sent to the llm
     temperature: float = 0

+ 3 - 1
opendevin/llm/llm.py

@@ -117,7 +117,9 @@ class LLM:
             reraise=True,
             stop=stop_after_attempt(config.num_retries),
             wait=wait_random_exponential(
-                min=config.retry_min_wait, max=config.retry_max_wait
+                multiplier=config.retry_multiplier,
+                min=config.retry_min_wait,
+                max=config.retry_max_wait,
             ),
             retry=retry_if_exception_type(
                 (