-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathchatjimmy_client.py
More file actions
142 lines (116 loc) · 4.66 KB
/
chatjimmy_client.py
File metadata and controls
142 lines (116 loc) · 4.66 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
"""
ChatJimmy API 客户端
处理与 ChatJimmy.ai 的通信
"""
import httpx
import json
import re
from typing import AsyncGenerator, Tuple, Optional
from models import ChatJimmyRequest, ChatOptions
import config
class ChatJimmyClient:
"""ChatJimmy API 客户端"""
def __init__(self):
self.base_url = config.CHATJIMMY_API_URL
self.headers = {
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36",
"Referer": "https://chatjimmy.ai/"
}
def convert_request(self, messages: list, model: str = None, temperature: float = None, top_p: float = None, max_tokens: int = None) -> ChatJimmyRequest:
"""
将 OpenAI 格式的消息转换为 ChatJimmy 格式
Args:
messages: OpenAI 格式的消息列表
model: 模型名称
temperature: 控制输出随机性,翻译任务推荐 0.1-0.3
top_p: 核采样参数
max_tokens: 最大输出 token 数
Returns:
ChatJimmyRequest 对象
"""
# 提取 system 消息作为 systemPrompt
system_prompt = ""
chat_messages = []
for msg in messages:
if msg["role"] == "system":
system_prompt = msg["content"]
else:
chat_messages.append({
"role": msg["role"],
"content": msg["content"]
})
# 增强系统提示词(如果启用)
if config.ENHANCE_TRANSLATION and system_prompt:
system_prompt = system_prompt + config.TRANSLATION_ENHANCEMENT_RULES
return ChatJimmyRequest(
messages=chat_messages,
chatOptions=ChatOptions(
selectedModel=model or config.CHATJIMMY_DEFAULT_MODEL,
systemPrompt=system_prompt,
topK=8,
temperature=temperature,
topP=top_p,
maxTokens=max_tokens
),
attachment=None
)
def parse_response(self, response_text: str) -> Tuple[str, dict]:
"""
解析 ChatJimmy 响应
Args:
response_text: ChatJimmy 返回的原始文本
Returns:
(内容, 统计信息) 元组
"""
# 解析 <|stats|> 标签中的统计信息
stats = {}
content = response_text
stats_match = re.search(r'<\|stats\|>(.*?)<\|/stats\|>', response_text, re.DOTALL)
if stats_match:
try:
stats = json.loads(stats_match.group(1))
# 移除统计信息部分,获取纯内容
content = response_text[:stats_match.start()].strip()
except json.JSONDecodeError:
pass
return content, stats
async def chat(self, messages: list, model: str = None, temperature: float = None, top_p: float = None, max_tokens: int = None) -> Tuple[str, dict]:
"""
发送聊天请求(非流式)
Args:
messages: OpenAI 格式的消息列表
model: 模型名称
temperature: 控制输出随机性,翻译任务推荐 0.1-0.3
top_p: 核采样参数
max_tokens: 最大输出 token 数
Returns:
(内容, 统计信息) 元组
"""
request = self.convert_request(messages, model, temperature, top_p, max_tokens)
async with httpx.AsyncClient(timeout=60.0) as client:
response = await client.post(
self.base_url,
content=request.model_dump_json(),
headers=self.headers
)
response.raise_for_status()
response_text = response.text
return self.parse_response(response_text)
async def chat_stream(self, messages: list, model: str = None) -> AsyncGenerator[str, None]:
"""
发送聊天请求(流式)
由于 ChatJimmy API 本身返回的是完整响应,我们需要模拟流式输出
以保持与 OpenAI 流式 API 的兼容性
Args:
messages: OpenAI 格式的消息列表
model: 模型名称
Yields:
字符片段
"""
content, stats = await self.chat(messages, model)
# 逐字符 yield 以模拟流式输出
for char in content:
yield char
# 全局客户端实例
client = ChatJimmyClient()