Browse Source

fix typos (#2187)

* fix typos

no functional change

* fix typos
RainRat 1 year ago
parent
commit
ed6dcc8381

+ 1 - 1
agenthub/browsing_agent/prompt.py

@@ -146,7 +146,7 @@ class Shrinkable(PromptElement, abc.ABC):
         """Implement shrinking of this prompt element.
 
         You need to recursively call all shrinkable elements that are part of
-        this prompt. You can also implement a shriking startegy for this prompt.
+        this prompt. You can also implement a shrinking strategy for this prompt.
         Shrinking is can be called multiple times to progressively shrink the
         prompt until it fits max_tokens. Default max shrink iterations is 20.
         """

+ 2 - 2
evaluation/EDA/run_infer.py

@@ -125,7 +125,7 @@ def process_instance(instance, agent_class, metadata, reset_logger: bool = True)
         )
     )
     # ======= Attempt to evaluate the agent's edits =======
-    # If you are working on simplier benchmark that only evaluates the final model output (e.g., in a MessageAction)
+    # If you are working on simpler benchmark that only evaluates the final model output (e.g., in a MessageAction)
     # You can simply get the LAST `MessageAction` from the returned `state.history` and parse it for evaluation.
 
     if state is None:
@@ -235,7 +235,7 @@ if __name__ == '__main__':
         'max_iterations': max_iterations,
         'eval_output_dir': eval_output_dir,
         'start_time': time.strftime('%Y-%m-%d %H:%M:%S'),
-        # get the commit id of current repo for reproduciblity
+        # get the commit id of current repo for reproducibility
         'git_commit': subprocess.check_output(['git', 'rev-parse', 'HEAD'])
         .decode('utf-8')
         .strip(),

+ 2 - 2
evaluation/gaia/run_infer.py

@@ -142,7 +142,7 @@ def process_instance(instance, agent_class, metadata, reset_logger: bool = True)
         )
     )
     # ======= Attempt to evaluate the agent's edits =======
-    # If you are working on simplier benchmark that only evaluates the final model output (e.g., in a MessageAction)
+    # If you are working on simpler benchmark that only evaluates the final model output (e.g., in a MessageAction)
     # You can simply get the LAST `MessageAction` from the returned `state.history` and parse it for evaluation.
 
     if state is None:
@@ -264,7 +264,7 @@ if __name__ == '__main__':
         'max_iterations': max_iterations,
         'eval_output_dir': eval_output_dir,
         'start_time': time.strftime('%Y-%m-%d %H:%M:%S'),
-        # get the commit id of current repo for reproduciblity
+        # get the commit id of current repo for reproducibility
         'git_commit': subprocess.check_output(['git', 'rev-parse', 'HEAD'])
         .decode('utf-8')
         .strip(),

+ 1 - 1
evaluation/humanevalfix/run_infer.py

@@ -284,7 +284,7 @@ if __name__ == '__main__':
         'max_iterations': max_iterations,
         'eval_output_dir': eval_output_dir,
         'start_time': time.strftime('%Y-%m-%d %H:%M:%S'),
-        # get the commit id of current repo for reproduciblity
+        # get the commit id of current repo for reproducibility
         'git_commit': subprocess.check_output(['git', 'rev-parse', 'HEAD'])
         .decode('utf-8')
         .strip(),

+ 1 - 1
evaluation/logic_reasoning/run_infer.py

@@ -215,7 +215,7 @@ def process_instance(
         )
     )
     # ======= Attempt to evaluate the agent's edits =======
-    # If you are working on simplier benchmark that only evaluates the final model output (e.g., in a MessageAction)
+    # If you are working on simpler benchmark that only evaluates the final model output (e.g., in a MessageAction)
     # You can simply get the LAST `MessageAction` from the returned `state.history` and parse it for evaluation.
 
     if state is None:

+ 2 - 2
evaluation/mint/run_infer.py

@@ -104,7 +104,7 @@ def process_instance(
         # add back the console handler to print ONE line
         logger.addHandler(get_console_handler())
         logger.info(
-            f'Starting evaluation for instance {instance.task_id}.\nHint: run "tail -f {log_file}" to see live logs in a seperate shell'
+            f'Starting evaluation for instance {instance.task_id}.\nHint: run "tail -f {log_file}" to see live logs in a separate shell'
         )
         # Remove all existing handlers from logger
         for handler in logger.handlers[:]:
@@ -256,7 +256,7 @@ if __name__ == '__main__':
         'max_propose_solution': args.max_propose_solution,
         'eval_output_dir': eval_output_dir,
         'start_time': time.strftime('%Y-%m-%d %H:%M:%S'),
-        # get the commit id of current repo for reproduciblity
+        # get the commit id of current repo for reproducibility
         'git_commit': subprocess.check_output(['git', 'rev-parse', 'HEAD'])
         .decode('utf-8')
         .strip(),

+ 2 - 2
evaluation/swe_bench/run_infer.py

@@ -217,7 +217,7 @@ def process_instance(
         # add back the console handler to print ONE line
         logger.addHandler(get_console_handler())
         logger.info(
-            f'Starting evaluation for instance {instance.instance_id}.\nHint: run "tail -f {log_file}" to see live logs in a seperate shell'
+            f'Starting evaluation for instance {instance.instance_id}.\nHint: run "tail -f {log_file}" to see live logs in a separate shell'
         )
         # Remove all existing handlers from logger
         for handler in logger.handlers[:]:
@@ -410,7 +410,7 @@ if __name__ == '__main__':
         'max_iterations': max_iterations,
         'eval_output_dir': eval_output_dir,
         'start_time': time.strftime('%Y-%m-%d %H:%M:%S'),
-        # get the commit id of current repo for reproduciblity
+        # get the commit id of current repo for reproducibility
         'git_commit': subprocess.check_output(['git', 'rev-parse', 'HEAD'])
         .decode('utf-8')
         .strip(),

+ 1 - 1
frontend/src/components/file-explorer/FileExplorer.test.tsx

@@ -130,7 +130,7 @@ describe("FileExplorer", () => {
     });
 
     expect(uploadFiles).rejects.toThrow();
-    // TODO: figure out why spy isnt called to pass test
+    // TODO: figure out why spy isn't called to pass test
     expect(toastSpy).toHaveBeenCalledWith("ws", "Error uploading file");
   });
 });

+ 1 - 1
opendevin/runtime/plugins/agent_skills/agentskills.py

@@ -293,7 +293,7 @@ def edit_file(start: int, end: int, content: str) -> None:
     n_edited_lines = len(edited_content.split('\n'))
     new_lines = lines[: start - 1] + [edited_content] + lines[end:]
 
-    # directly write editted lines to the file
+    # directly write edited lines to the file
     with open(CURRENT_FILE, 'w') as file:
         file.writelines(new_lines)