Explorar o código

More json cleaning (#924)

* More json cleaning

* remove redundant check
Engel Nyst hai 1 ano
pai
achega
973a42fd78

+ 7 - 5
agenthub/monologue_agent/utils/json.py

@@ -1,6 +1,7 @@
 import json
 from json_repair import repair_json
 
+
 def my_encoder(obj):
     """
     Encodes objects as dictionaries
@@ -11,9 +12,10 @@ def my_encoder(obj):
     Returns:
     - dict: If the object can be converted it is returned in dict format
     """
-    if hasattr(obj, "to_dict"):
+    if hasattr(obj, 'to_dict'):
         return obj.to_dict()
 
+
 def dumps(obj, **kwargs):
     """
     Serialize an object to str format
@@ -21,15 +23,15 @@ def dumps(obj, **kwargs):
 
     return json.dumps(obj, default=my_encoder, **kwargs)
 
+
 def loads(s, **kwargs):
     """
     Create a JSON object from str
     """
-    json_start = s.find("{")
-    json_end = s.rfind("}") + 1
+    json_start = s.find('{')
+    json_end = s.rfind('}') + 1
     if json_start == -1 or json_end == -1:
-        raise ValueError("Invalid response: no JSON found")
+        raise ValueError('Invalid response: no JSON found')
     s = s[json_start:json_end]
     s = repair_json(s)
     return json.loads(s, **kwargs)
-

+ 5 - 8
agenthub/monologue_agent/utils/monologue.py

@@ -3,6 +3,7 @@ from opendevin.llm.llm import LLM
 import agenthub.monologue_agent.utils.json as json
 import agenthub.monologue_agent.utils.prompts as prompts
 
+
 class Monologue:
     """
     The monologue is a representation for the agent's internal monologue where it can think.
@@ -26,7 +27,7 @@ class Monologue:
         - ValueError: If t is not a dict
         """
         if not isinstance(t, dict):
-            raise ValueError("Event must be a dictionary")
+            raise ValueError('Event must be a dictionary')
         self.thoughts.append(t)
 
     def get_thoughts(self):
@@ -63,17 +64,13 @@ class Monologue:
         Raises:
         - RunTimeError: When the condensing process fails for any reason
         """
-        
+
         try:
             prompt = prompts.get_summarize_monologue_prompt(self.thoughts)
-            messages = [{"content": prompt,"role": "user"}]
+            messages = [{'content': prompt, 'role': 'user'}]
             resp = llm.completion(messages=messages)
             summary_resp = resp['choices'][0]['message']['content']
-            self.thoughts = prompts.parse_summary_response(strip_markdown(summary_resp))
+            self.thoughts = prompts.parse_summary_response(summary_resp)
         except Exception as e:
             traceback.print_exc()
             raise RuntimeError(f"Error condensing thoughts: {e}")
-
-def strip_markdown(markdown_json):
-    # remove markdown code block
-    return markdown_json.replace('```json\n', '').replace('```', '').strip()