Skip to content

Commit 985f89f

Browse files
committed
fix: resolve 500 error caused by <think> tags and markdown code fences in content field from reasoning models like MiniMax/GLM
1 parent a1ff79c commit 985f89f

File tree

1 file changed

+15
-3
lines changed

1 file changed

+15
-3
lines changed

backend/app/utils/llm_client.py

Lines changed: 15 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
"""
55

66
import json
7+
import re
78
from typing import Optional, Dict, Any, List
89
from openai import OpenAI
910

@@ -61,7 +62,10 @@ def chat(
6162
kwargs["response_format"] = response_format
6263

6364
response = self.client.chat.completions.create(**kwargs)
64-
return response.choices[0].message.content
65+
content = response.choices[0].message.content
66+
# 部分模型(如MiniMax M2.5)会在content中包含<think>思考内容,需要移除
67+
content = re.sub(r'<think>[\s\S]*?</think>', '', content).strip()
68+
return content
6569

6670
def chat_json(
6771
self,
@@ -86,6 +90,14 @@ def chat_json(
8690
max_tokens=max_tokens,
8791
response_format={"type": "json_object"}
8892
)
89-
90-
return json.loads(response)
93+
# 清理markdown代码块标记
94+
cleaned_response = response.strip()
95+
cleaned_response = re.sub(r'^```(?:json)?\s*\n?', '', cleaned_response, flags=re.IGNORECASE)
96+
cleaned_response = re.sub(r'\n?```\s*$', '', cleaned_response)
97+
cleaned_response = cleaned_response.strip()
98+
99+
try:
100+
return json.loads(cleaned_response)
101+
except json.JSONDecodeError:
102+
raise ValueError(f"LLM返回的JSON格式无效: {cleaned_response}")
91103

0 commit comments

Comments
 (0)