Skip to content

Commit 1833c6f

Browse files
feat(client): obey custom instructions better
1 parent 15c790a commit 1833c6f

File tree

3 files changed

+30
-16
lines changed

3 files changed

+30
-16
lines changed

README.md

+4-4
Original file line numberDiff line numberDiff line change
@@ -63,8 +63,8 @@ const clientOptions = {
6363
// it and any other parameters here
6464
model: 'text-chat-davinci-002-20221122',
6565
},
66-
// (Optional) Set a custom prompt prefix. As per my testing it should work with two newlines
67-
// promptPrefix: 'You are not ChatGPT...\n\n',
66+
// (Optional) Set custom instructions instead of "You are ChatGPT...".
67+
// promptPrefix: 'You are Bob, a cowboy in Western times...',
6868
// (Optional) Set a custom name for the user
6969
// userLabel: 'User',
7070
// (Optional) Set a custom name for ChatGPT
@@ -111,8 +111,8 @@ module.exports = {
111111
// it and any other parameters here
112112
model: 'text-chat-davinci-002-20221122',
113113
},
114-
// (Optional) Set a custom prompt prefix. As per my testing it should work with two newlines
115-
// promptPrefix: 'You are not ChatGPT...\n\n',
114+
// (Optional) Set custom instructions instead of "You are ChatGPT...".
115+
// promptPrefix: 'You are Bob, a cowboy in Western times...',
116116
// (Optional) Set a custom name for the user
117117
// userLabel: 'User',
118118
// (Optional) Set a custom name for ChatGPT

settings.example.js

+2-2
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,8 @@ export default {
88
// it and any other parameters here
99
model: 'text-chat-davinci-002-20221122',
1010
},
11-
// (Optional) Set a custom prompt prefix. As per my testing it should work with two newlines
12-
// promptPrefix: 'You are not ChatGPT...\n\n',
11+
// (Optional) Set custom instructions instead of "You are ChatGPT...".
12+
// promptPrefix: 'You are Bob, a cowboy in Western times...',
1313
// (Optional) Set a custom name for the user
1414
// userLabel: 'User',
1515
// (Optional) Set a custom name for ChatGPT

src/ChatGPTClient.js

+24-10
Original file line numberDiff line numberDiff line change
@@ -19,12 +19,15 @@ export default class ChatGPTClient {
1919
...modelOptions,
2020
// set some good defaults (check for undefined in some cases because they may be 0)
2121
model: modelOptions.model || CHATGPT_MODEL,
22-
temperature: typeof modelOptions.temperature === 'undefined' ? 0.9 : modelOptions.temperature,
22+
temperature: typeof modelOptions.temperature === 'undefined' ? 0.8 : modelOptions.temperature,
2323
top_p: typeof modelOptions.top_p === 'undefined' ? 1 : modelOptions.top_p,
2424
presence_penalty: typeof modelOptions.presence_penalty === 'undefined' ? 0.6 : modelOptions.presence_penalty,
2525
stop: modelOptions.stop,
2626
};
2727

28+
this.userLabel = this.options.userLabel || 'User';
29+
this.chatGptLabel = this.options.chatGptLabel || 'ChatGPT';
30+
2831
if (this.modelOptions.model.startsWith('text-chat')) {
2932
this.endToken = '<|im_end|>';
3033
this.separatorToken = '<|im_sep|>';
@@ -39,6 +42,9 @@ export default class ChatGPTClient {
3942
} else {
4043
this.modelOptions.stop = [this.endToken];
4144
}
45+
this.modelOptions.stop.push(`\n\n${this.userLabel}:`);
46+
this.modelOptions.stop.push(`\n\nInstructions:`);
47+
// I chose not to do one for `chatGptLabel` because I've never seen it happen, plus there's a max of 4 stops
4248
}
4349

4450
cacheOptions.namespace = cacheOptions.namespace || 'chatgpt';
@@ -100,6 +106,7 @@ export default class ChatGPTClient {
100106
const result = await this.getCompletion(prompt);
101107
if (this.options.debug) {
102108
console.debug(JSON.stringify(result));
109+
console.debug();
103110
}
104111

105112
const reply = result.choices[0].text.trim();
@@ -138,24 +145,22 @@ export default class ChatGPTClient {
138145

139146
let promptPrefix;
140147
if (this.options.promptPrefix) {
141-
promptPrefix = this.options.promptPrefix;
148+
promptPrefix = this.options.promptPrefix.trim();
142149
// If the prompt prefix doesn't end with the separator token, add it.
143150
if (!promptPrefix.endsWith(`${this.separatorToken}\n\n`)) {
144151
promptPrefix = `${promptPrefix.trim()}${this.separatorToken}\n\n`;
145152
}
153+
promptPrefix = `\nInstructions:\n${promptPrefix}`;
146154
} else {
147155
const currentDateString = new Date().toLocaleDateString(
148156
'en-us',
149157
{ year: 'numeric', month: 'long', day: 'numeric' },
150158
);
151159

152-
promptPrefix = `You are ChatGPT, a large language model trained by OpenAI.\nCurrent date: ${currentDateString}${this.endToken}\n\n`
160+
promptPrefix = `\nInstructions:\nYou are ChatGPT, a large language model trained by OpenAI.\nCurrent date: ${currentDateString}${this.separatorToken}\n\n`
153161
}
154162

155-
const userLabel = this.options.userLabel || 'User';
156-
const chatGptLabel = this.options.chatGptLabel || 'ChatGPT';
157-
158-
const promptSuffix = `${chatGptLabel}:\n`; // Prompt ChatGPT to respond.
163+
const promptSuffix = `${this.chatGptLabel}:\n`; // Prompt ChatGPT to respond.
159164

160165
let currentTokenCount = this.getTokenCount(`${promptPrefix}${promptSuffix}`);
161166
let promptBody = '';
@@ -164,9 +169,18 @@ export default class ChatGPTClient {
164169
// Iterate backwards through the messages, adding them to the prompt until we reach the max token count.
165170
while (currentTokenCount < maxTokenCount && orderedMessages.length > 0) {
166171
const message = orderedMessages.pop();
167-
const roleLabel = message.role === 'User' ? userLabel : chatGptLabel;
172+
const roleLabel = message.role === 'User' ? this.userLabel : this.chatGptLabel;
168173
const messageString = `${roleLabel}:\n${message.message}${this.separatorToken}\n`;
169-
const newPromptBody = `${messageString}${promptBody}`;
174+
let newPromptBody;
175+
if (promptBody) {
176+
newPromptBody = `${messageString}${promptBody}`;
177+
} else {
178+
// Always insert prompt prefix before the last user message.
179+
// This makes the AI obey the prompt instructions better, which is important for custom instructions.
180+
// After a bunch of testing, it doesn't seem to cause the AI any confusion, even if you ask it things
181+
// like "what's the last thing I wrote?".
182+
newPromptBody = `${promptPrefix}${messageString}${promptBody}`;
183+
}
170184

171185
// The reason I don't simply get the token count of the messageString and add it to currentTokenCount is because
172186
// joined words may combine into a single token. Actually, that isn't really applicable here, but I can't
@@ -182,7 +196,7 @@ export default class ChatGPTClient {
182196
currentTokenCount = newTokenCount;
183197
}
184198

185-
const prompt = `${promptPrefix}${promptBody}${promptSuffix}`;
199+
const prompt = `${promptBody}${promptSuffix}`;
186200

187201
const numTokens = this.getTokenCount(prompt);
188202
// Use up to 4097 tokens (prompt + response), but try to leave 1000 tokens for the response.

0 commit comments

Comments
 (0)