|
|
@@ -112,7 +112,7 @@ class BaseTranslator:
|
|
|
return [
|
|
|
{
|
|
|
"role": "system",
|
|
|
- "content": "You are a professional,authentic machine translation engine.",
|
|
|
+ "content": "You are a professional,authentic machine translation engine. Only Output the translated text, do not include any other text.",
|
|
|
},
|
|
|
{
|
|
|
"role": "user",
|
|
|
@@ -280,6 +280,7 @@ class OllamaTranslator(BaseTranslator):
|
|
|
|
|
|
def do_translate(self, text):
|
|
|
maxlen = max(2000, len(text) * 5)
|
|
|
+ print("Prompt:", self.prompt(text, self.prompttext), "Maxlen:" , maxlen)
|
|
|
for model in self.model.split(";"):
|
|
|
try:
|
|
|
response = ""
|
|
|
@@ -289,10 +290,24 @@ class OllamaTranslator(BaseTranslator):
|
|
|
messages=self.prompt(text, self.prompttext),
|
|
|
stream=True,
|
|
|
)
|
|
|
+ in_think_block = False
|
|
|
+ is_deepseek_r1 = model == "deepseek-r1"
|
|
|
for chunk in stream:
|
|
|
chunk = chunk["message"]["content"]
|
|
|
- response += chunk
|
|
|
+ # 只在 deepseek-r1 模型下检查 <think> 块
|
|
|
+ if is_deepseek_r1:
|
|
|
+ if "<think>" in chunk:
|
|
|
+ in_think_block = True
|
|
|
+ chunk = chunk.split("<think>")[0]
|
|
|
+ if "</think>" in chunk:
|
|
|
+ in_think_block = False
|
|
|
+ chunk = chunk.split("</think>")[1]
|
|
|
+ if not in_think_block:
|
|
|
+ response += chunk
|
|
|
+ else:
|
|
|
+ response += chunk
|
|
|
if len(response) > maxlen:
|
|
|
+ print(response)
|
|
|
raise Exception("Response too long")
|
|
|
return response.strip()
|
|
|
except Exception as e:
|