@@ -19,12 +19,15 @@ export default class ChatGPTClient {
19
19
...modelOptions ,
20
20
// set some good defaults (check for undefined in some cases because they may be 0)
21
21
model : modelOptions . model || CHATGPT_MODEL ,
22
- temperature : typeof modelOptions . temperature === 'undefined' ? 0.9 : modelOptions . temperature ,
22
+ temperature : typeof modelOptions . temperature === 'undefined' ? 0.8 : modelOptions . temperature ,
23
23
top_p : typeof modelOptions . top_p === 'undefined' ? 1 : modelOptions . top_p ,
24
24
presence_penalty : typeof modelOptions . presence_penalty === 'undefined' ? 0.6 : modelOptions . presence_penalty ,
25
25
stop : modelOptions . stop ,
26
26
} ;
27
27
28
+ this . userLabel = this . options . userLabel || 'User' ;
29
+ this . chatGptLabel = this . options . chatGptLabel || 'ChatGPT' ;
30
+
28
31
if ( this . modelOptions . model . startsWith ( 'text-chat' ) ) {
29
32
this . endToken = '<|im_end|>' ;
30
33
this . separatorToken = '<|im_sep|>' ;
@@ -39,6 +42,9 @@ export default class ChatGPTClient {
39
42
} else {
40
43
this . modelOptions . stop = [ this . endToken ] ;
41
44
}
45
+ this . modelOptions . stop . push ( `\n\n${ this . userLabel } :` ) ;
46
+ this . modelOptions . stop . push ( `\n\nInstructions:` ) ;
47
+ // I chose not to do one for `chatGptLabel` because I've never seen it happen, plus there's a max of 4 stops
42
48
}
43
49
44
50
cacheOptions . namespace = cacheOptions . namespace || 'chatgpt' ;
@@ -100,6 +106,7 @@ export default class ChatGPTClient {
100
106
const result = await this . getCompletion ( prompt ) ;
101
107
if ( this . options . debug ) {
102
108
console . debug ( JSON . stringify ( result ) ) ;
109
+ console . debug ( ) ;
103
110
}
104
111
105
112
const reply = result . choices [ 0 ] . text . trim ( ) ;
@@ -138,24 +145,22 @@ export default class ChatGPTClient {
138
145
139
146
let promptPrefix ;
140
147
if ( this . options . promptPrefix ) {
141
- promptPrefix = this . options . promptPrefix ;
148
+ promptPrefix = this . options . promptPrefix . trim ( ) ;
142
149
// If the prompt prefix doesn't end with the separator token, add it.
143
150
if ( ! promptPrefix . endsWith ( `${ this . separatorToken } \n\n` ) ) {
144
151
promptPrefix = `${ promptPrefix . trim ( ) } ${ this . separatorToken } \n\n` ;
145
152
}
153
+ promptPrefix = `\nInstructions:\n${ promptPrefix } ` ;
146
154
} else {
147
155
const currentDateString = new Date ( ) . toLocaleDateString (
148
156
'en-us' ,
149
157
{ year : 'numeric' , month : 'long' , day : 'numeric' } ,
150
158
) ;
151
159
152
- promptPrefix = `You are ChatGPT, a large language model trained by OpenAI.\nCurrent date: ${ currentDateString } ${ this . endToken } \n\n`
160
+ promptPrefix = `\nInstructions:\nYou are ChatGPT, a large language model trained by OpenAI.\nCurrent date: ${ currentDateString } ${ this . separatorToken } \n\n`
153
161
}
154
162
155
- const userLabel = this . options . userLabel || 'User' ;
156
- const chatGptLabel = this . options . chatGptLabel || 'ChatGPT' ;
157
-
158
- const promptSuffix = `${ chatGptLabel } :\n` ; // Prompt ChatGPT to respond.
163
+ const promptSuffix = `${ this . chatGptLabel } :\n` ; // Prompt ChatGPT to respond.
159
164
160
165
let currentTokenCount = this . getTokenCount ( `${ promptPrefix } ${ promptSuffix } ` ) ;
161
166
let promptBody = '' ;
@@ -164,9 +169,18 @@ export default class ChatGPTClient {
164
169
// Iterate backwards through the messages, adding them to the prompt until we reach the max token count.
165
170
while ( currentTokenCount < maxTokenCount && orderedMessages . length > 0 ) {
166
171
const message = orderedMessages . pop ( ) ;
167
- const roleLabel = message . role === 'User' ? userLabel : chatGptLabel ;
172
+ const roleLabel = message . role === 'User' ? this . userLabel : this . chatGptLabel ;
168
173
const messageString = `${ roleLabel } :\n${ message . message } ${ this . separatorToken } \n` ;
169
- const newPromptBody = `${ messageString } ${ promptBody } ` ;
174
+ let newPromptBody ;
175
+ if ( promptBody ) {
176
+ newPromptBody = `${ messageString } ${ promptBody } ` ;
177
+ } else {
178
+ // Always insert prompt prefix before the last user message.
179
+ // This makes the AI obey the prompt instructions better, which is important for custom instructions.
180
+ // After a bunch of testing, it doesn't seem to cause the AI any confusion, even if you ask it things
181
+ // like "what's the last thing I wrote?".
182
+ newPromptBody = `${ promptPrefix } ${ messageString } ${ promptBody } ` ;
183
+ }
170
184
171
185
// The reason I don't simply get the token count of the messageString and add it to currentTokenCount is because
172
186
// joined words may combine into a single token. Actually, that isn't really applicable here, but I can't
@@ -182,7 +196,7 @@ export default class ChatGPTClient {
182
196
currentTokenCount = newTokenCount ;
183
197
}
184
198
185
- const prompt = `${ promptPrefix } ${ promptBody } ${ promptSuffix } ` ;
199
+ const prompt = `${ promptBody } ${ promptSuffix } ` ;
186
200
187
201
const numTokens = this . getTokenCount ( prompt ) ;
188
202
// Use up to 4097 tokens (prompt + response), but try to leave 1000 tokens for the response.
0 commit comments