Skip to content

Commit 1e6e70b

Browse files
brauliopfKernelDeimos
authored andcommitted
add temperature and max_tokens parameters to complete method
1 parent 2e90919 commit 1e6e70b

File tree

6 files changed

+17
-9
lines changed

6 files changed

+17
-9
lines changed

src/backend/src/modules/puterai/ClaudeService.js

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -128,8 +128,8 @@ class ClaudeService extends BaseService {
128128
const init_chat_stream = async ({ chatStream }) => {
129129
const completion = await this.anthropic.messages.stream({
130130
model: model ?? this.get_default_model(),
131-
max_tokens: max_tokens || (model === 'claude-3-5-sonnet-20241022' || model === 'claude-3-5-sonnet-20240620') ? 8192 : 4096,
132-
temperature: temperature || 0,
131+
max_tokens: max_tokens || (model === 'claude-3-5-sonnet-20241022' || model === 'claude-3-5-sonnet-20240620') ? 8192 : 4096, //required
132+
temperature: temperature || 0, // required
133133
system: PUTER_PROMPT + JSON.stringify(system_prompts),
134134
messages,
135135
...(tools ? { tools } : {}),

src/backend/src/modules/puterai/DeepSeekService.js

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ class DeepSeekService extends BaseService {
119119
* AI Chat completion method.
120120
* See AIChatService for more details.
121121
*/
122-
async complete ({ messages, stream, model, tools }) {
122+
async complete ({ messages, stream, model, tools, max_tokens, temperature }) {
123123
model = this.adapt_model(model);
124124

125125
messages = await OpenAIUtil.process_input_messages(messages);
@@ -169,7 +169,8 @@ class DeepSeekService extends BaseService {
169169
messages,
170170
model: model ?? this.get_default_model(),
171171
...(tools ? { tools } : {}),
172-
max_tokens: 1000,
172+
max_tokens: max_tokens || 1000,
173+
temperature, // the default temperature is 1.0. suggested 0 for math/coding and 1.5 for creative poetry
173174
stream,
174175
...(stream ? {
175176
stream_options: { include_usage: true },

src/backend/src/modules/puterai/GroqAIService.js

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ class GroqAIService extends BaseService {
9797
* @param {boolean} [options.stream] - Whether to stream the response
9898
* @returns {TypedValue|Object} Returns either a TypedValue with streaming response or completion object with usage stats
9999
*/
100-
async complete ({ messages, model, stream, tools }) {
100+
async complete ({ messages, model, stream, tools, max_tokens, temperature }) {
101101
model = model ?? this.get_default_model();
102102

103103
messages = await OpenAIUtil.process_input_messages(messages);
@@ -113,6 +113,8 @@ class GroqAIService extends BaseService {
113113
model,
114114
stream,
115115
tools,
116+
max_completion_tokens: max_tokens, // max_tokens has been deprecated
117+
temperature
116118
});
117119

118120
return OpenAIUtil.handle_completion_output({

src/backend/src/modules/puterai/MistralAIService.js

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -216,7 +216,7 @@ class MistralAIService extends BaseService {
216216
* AI Chat completion method.
217217
* See AIChatService for more details.
218218
*/
219-
async complete ({ messages, stream, model, tools }) {
219+
async complete ({ messages, stream, model, tools, max_tokens, temperature }) {
220220

221221
messages = await OpenAIUtil.process_input_messages(messages);
222222
for ( const message of messages ) {
@@ -238,6 +238,8 @@ class MistralAIService extends BaseService {
238238
model: model ?? this.get_default_model(),
239239
...(tools ? { tools } : {}),
240240
messages,
241+
max_tokens: max_tokens,
242+
temperature
241243
});
242244

243245
return await OpenAIUtil.handle_completion_output({

src/backend/src/modules/puterai/OpenAICompletionService.js

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -163,12 +163,14 @@ class OpenAICompletionService extends BaseService {
163163
return model_names;
164164
},
165165

166-
async complete ({ messages, test_mode, stream, model, tools }) {
166+
async complete ({ messages, test_mode, stream, model, tools, max_tokens, temperature }) {
167167
return await this.complete(messages, {
168168
model: model,
169169
tools,
170170
moderation: true,
171171
stream,
172+
max_tokens,
173+
temperature
172174
});
173175
}
174176
}

src/backend/src/modules/puterai/OpenRouterService.js

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ class OpenRouterService extends BaseService {
116116
* AI Chat completion method.
117117
* See AIChatService for more details.
118118
*/
119-
async complete ({ messages, stream, model, tools }) {
119+
async complete ({ messages, stream, model, tools, max_tokens, temperature }) {
120120
model = this.adapt_model(model);
121121

122122
if ( model.startsWith('openrouter:') ) {
@@ -134,7 +134,8 @@ class OpenRouterService extends BaseService {
134134
messages,
135135
model: model ?? this.get_default_model(),
136136
...(tools ? { tools } : {}),
137-
max_tokens: 1000,
137+
max_tokens: max_tokens || 1000,
138+
temperature: temperature, // default to 1.0
138139
stream,
139140
...(stream ? {
140141
stream_options: { include_usage: true },

0 commit comments

Comments
 (0)