Skip to content

Commit 1d2804d

Browse files
authored
Merge pull request #372 from harmony-one/prompt-command
add prompt command and refactor context logic
2 parents 4871857 + a90ae62 commit 1d2804d

File tree

14 files changed

+69
-29
lines changed

14 files changed

+69
-29
lines changed

src/bot.ts

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -516,12 +516,24 @@ const logErrorHandler = (ex: any): void => {
516516
bot.command('new', async (ctx) => {
517517
writeCommandLog(ctx as OnMessageContext).catch(logErrorHandler)
518518
await openAiBot.onStop(ctx as OnMessageContext)
519+
await claudeBot.onStop(ctx as OnMessageContext) // any Bot with 'llms' as sessionKey works.
519520
return await ctx.reply('Chat history reseted', {
520521
parse_mode: 'Markdown',
521522
message_thread_id: ctx.message?.message_thread_id
522523
})
523524
})
524525

526+
bot.command('prompt', async (ctx) => {
527+
const context = ctx.match
528+
if (context) {
529+
ctx.session.currentPrompt = context
530+
}
531+
await ctx.reply(`Prompt set to: _${ctx.session.currentPrompt}_`, {
532+
parse_mode: 'Markdown',
533+
message_thread_id: ctx.message?.message_thread_id
534+
})
535+
})
536+
525537
bot.command('more', async (ctx) => {
526538
writeCommandLog(ctx as OnMessageContext).catch(logErrorHandler)
527539
return await ctx.reply(commandsHelpText.more, {

src/helpers.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,7 @@ export function createInitialSessionData (): BotSessionData {
5656
isVoiceForwardingEnabled: config.voiceMemo.isVoiceForwardingEnabled
5757
},
5858
currentModel: LlmModelsEnum.GPT_4O,
59+
currentPrompt: config.openAi.chatGpt.chatCompletionContext,
5960
lastBroadcast: ''
6061
}
6162
}

src/modules/llms/api/athropic.ts

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -24,11 +24,12 @@ const API_ENDPOINT = config.llms.apiEndpoint // 'http://127.0.0.1:5000' // confi
2424
export const anthropicCompletion = async (
2525
conversation: ChatConversation[],
2626
model = LlmModelsEnum.CLAUDE_3_OPUS,
27+
ctx: OnMessageContext | OnCallBackQueryData,
2728
parameters?: ModelParameters
2829
): Promise<LlmCompletion> => {
2930
logger.info(`Handling ${model} completion`)
3031
parameters = parameters ?? {
31-
system: config.openAi.chatGpt.chatCompletionContext,
32+
system: ctx.session.currentPrompt,
3233
max_tokens: +config.openAi.chatGpt.maxTokens
3334
}
3435
const data = {
@@ -68,11 +69,12 @@ export const anthropicCompletion = async (
6869
export const xaiCompletion = async (
6970
conversation: ChatConversation[],
7071
model = LlmModelsEnum.GROK,
72+
ctx: OnMessageContext | OnCallBackQueryData,
7173
parameters?: ModelParameters
7274
): Promise<LlmCompletion> => {
7375
logger.info(`Handling ${model} completion`)
7476
parameters = parameters ?? {
75-
system: config.openAi.chatGpt.chatCompletionContext,
77+
system: ctx.session.currentPrompt,
7678
max_tokens: +config.openAi.chatGpt.maxTokens
7779
}
7880
const data = {
@@ -119,7 +121,7 @@ export const anthropicStreamCompletion = async (
119121
): Promise<LlmCompletion> => {
120122
logger.info(`Handling ${model} stream completion`)
121123
parameters = parameters ?? {
122-
system: config.openAi.chatGpt.chatCompletionContext,
124+
system: ctx.session.currentPrompt,
123125
max_tokens: +config.openAi.chatGpt.maxTokens
124126
}
125127
const data = {
@@ -217,11 +219,12 @@ export const anthropicStreamCompletion = async (
217219
export const toolsChatCompletion = async (
218220
conversation: ChatConversation[],
219221
model = LlmModelsEnum.CLAUDE_3_OPUS,
222+
ctx: OnMessageContext | OnCallBackQueryData,
220223
parameters?: ModelParameters
221224
): Promise<LlmCompletion> => {
222225
logger.info(`Handling ${model} completion`)
223226
parameters = parameters ?? {
224-
system: config.openAi.chatGpt.chatCompletionContext,
227+
system: ctx.session.currentPrompt,
225228
max_tokens: +config.openAi.chatGpt.maxTokens
226229
}
227230
const input = {

src/modules/llms/api/openai.ts

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -81,25 +81,26 @@ export async function alterGeneratedImg (
8181

8282
type ConversationOutput = Omit<ChatConversation, 'timestamp' | 'model' | 'id' | 'author' | 'numSubAgents'>
8383

84-
const prepareConversation = (conversation: ChatConversation[], model: string): ConversationOutput[] => {
84+
const prepareConversation = (conversation: ChatConversation[], model: string, ctx: OnMessageContext | OnCallBackQueryData): ConversationOutput[] => {
8585
const messages = conversation.filter(c => c.model === model).map(m => { return { content: m.content, role: m.role } })
8686
if (messages.length !== 1 || model === LlmModelsEnum.O1) {
8787
return messages
8888
}
8989
const systemMessage = {
9090
role: 'system',
91-
content: config.openAi.chatGpt.chatCompletionContext
91+
content: ctx.session.currentPrompt
9292
}
9393
return [systemMessage, ...messages]
9494
}
9595

9696
export async function chatCompletion (
9797
conversation: ChatConversation[],
9898
model = config.openAi.chatGpt.model,
99+
ctx: OnMessageContext | OnCallBackQueryData,
99100
limitTokens = true,
100101
parameters?: ModelParameters
101102
): Promise<LlmCompletion> {
102-
const messages = prepareConversation(conversation, model)
103+
const messages = prepareConversation(conversation, model, ctx)
103104
parameters = parameters ?? {
104105
max_completion_tokens: config.openAi.chatGpt.maxTokens,
105106
temperature: config.openAi.dalle.completions.temperature
@@ -139,15 +140,15 @@ export async function chatCompletion (
139140

140141
export const streamChatCompletion = async (
141142
conversation: ChatConversation[],
142-
ctx: OnMessageContext | OnCallBackQueryData,
143143
model = LlmModelsEnum.GPT_4,
144+
ctx: OnMessageContext | OnCallBackQueryData,
144145
msgId: number,
145146
limitTokens = true,
146147
parameters?: ModelParameters
147148
): Promise<LlmCompletion> => {
148149
let completion = ''
149150
let wordCountMinimum = 2
150-
const messages = prepareConversation(conversation, model)
151+
const messages = prepareConversation(conversation, model, ctx)
151152
parameters = parameters ?? {
152153
max_completion_tokens: config.openAi.chatGpt.maxTokens,
153154
temperature: config.openAi.dalle.completions.temperature || 0.8
@@ -322,10 +323,10 @@ export const streamChatVisionCompletion = async (
322323
}
323324
}
324325

325-
export async function improvePrompt (promptText: string, model: string): Promise<string> {
326+
export async function improvePrompt (promptText: string, model: string, ctx: OnMessageContext | OnCallBackQueryData): Promise<string> {
326327
const prompt = `Improve this picture description using max 100 words and don't add additional text to the image: ${promptText} `
327328
const conversation = [{ role: 'user', content: prompt, timestamp: Date.now() }]
328-
const response = await chatCompletion(conversation, model)
329+
const response = await chatCompletion(conversation, model, ctx)
329330
return response.completion?.content as string ?? ''
330331
}
331332

src/modules/llms/api/vertex.ts

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,10 +22,12 @@ const logger = pino({
2222
export const vertexCompletion = async (
2323
conversation: ChatConversation[],
2424
model = config.llms.model,
25+
ctx: OnMessageContext | OnCallBackQueryData,
2526
parameters?: ModelParameters
2627
): Promise<LlmCompletion> => {
2728
const data = {
2829
model,
30+
system: ctx.session.currentPrompt,
2931
stream: false,
3032
messages: conversation.filter(c => c.model === model)
3133
.map((msg) => {
@@ -71,10 +73,9 @@ export const vertexStreamCompletion = async (
7173
parameters?: ModelParameters
7274
): Promise<LlmCompletion> => {
7375
parameters = parameters ?? {
74-
system: config.openAi.chatGpt.chatCompletionContext,
76+
system: ctx.session.currentPrompt,
7577
max_tokens: +config.openAi.chatGpt.maxTokens
7678
}
77-
7879
const data = {
7980
model,
8081
stream: true, // Set stream to true to receive the completion as a stream

src/modules/llms/claudeBot.ts

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,9 @@ export class ClaudeBot extends LlmsBase {
4242
msgId: number,
4343
limitTokens: boolean,
4444
parameters?: ModelParameters): Promise<LlmCompletion> {
45+
if (parameters) {
46+
parameters.system = ctx.session.currentPrompt
47+
}
4548
return await anthropicStreamCompletion(
4649
conversation,
4750
model,
@@ -55,13 +58,17 @@ export class ClaudeBot extends LlmsBase {
5558
async chatCompletion (
5659
conversation: ChatConversation[],
5760
model: ModelVersion,
61+
ctx: OnMessageContext | OnCallBackQueryData,
5862
hasTools: boolean,
5963
parameters?: ModelParameters
6064
): Promise<LlmCompletion> {
65+
if (parameters) {
66+
parameters.system = ctx.session.currentPrompt
67+
}
6168
if (hasTools) {
62-
return await toolsChatCompletion(conversation, model, parameters)
69+
return await toolsChatCompletion(conversation, model, ctx, parameters)
6370
}
64-
return await anthropicCompletion(conversation, model, parameters)
71+
return await anthropicCompletion(conversation, model, ctx, parameters)
6572
}
6673

6774
public async onEvent (ctx: OnMessageContext | OnCallBackQueryData): Promise<void> {

src/modules/llms/dalleBot.ts

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -133,18 +133,19 @@ export class DalleBot extends LlmsBase {
133133
): Promise<LlmCompletion> {
134134
return await streamChatCompletion(
135135
conversation,
136-
ctx,
137136
model,
137+
ctx,
138138
msgId,
139139
true // telegram messages has a character limit
140140
)
141141
}
142142

143143
async chatCompletion (
144144
conversation: ChatConversation[],
145-
model: ModelVersion
145+
model: ModelVersion,
146+
ctx: OnMessageContext | OnCallBackQueryData
146147
): Promise<LlmCompletion> {
147-
return await chatCompletion(conversation, model)
148+
return await chatCompletion(conversation, model, ctx)
148149
}
149150

150151
hasPrefix (prompt: string): string {

src/modules/llms/llmsBase.ts

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -120,6 +120,7 @@ export abstract class LlmsBase implements PayableBot {
120120
protected abstract chatCompletion (
121121
conversation: ChatConversation[],
122122
model: ModelVersion,
123+
ctx: OnMessageContext | OnCallBackQueryData,
123124
usesTools: boolean,
124125
parameters?: ModelParameters
125126
): Promise<LlmCompletion>
@@ -373,7 +374,7 @@ export abstract class LlmsBase implements PayableBot {
373374
}
374375
} else {
375376
const parameters = this.modelManager.getModelParameters(model)
376-
const response = await this.chatCompletion(conversation, model, usesTools, parameters)
377+
const response = await this.chatCompletion(conversation, model, ctx, usesTools, parameters)
377378
conversation.push({
378379
role: 'assistant',
379380
content: response.completion?.content ?? '',
@@ -406,7 +407,7 @@ export abstract class LlmsBase implements PayableBot {
406407
).message_id
407408
ctx.chatAction = 'typing'
408409
const parameters = this.modelManager.getModelParameters(model)
409-
const response = await this.chatCompletion(conversation, model, usesTools, parameters)
410+
const response = await this.chatCompletion(conversation, model, ctx, usesTools, parameters)
410411
if (response.completion) {
411412
if (model === this.modelsEnum.O1) {
412413
const msgs = splitTelegramMessage(response.completion.content as string)

src/modules/llms/openaiBot.ts

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -73,8 +73,8 @@ export class OpenAIBot extends LlmsBase {
7373
): Promise<LlmCompletion> {
7474
return await streamChatCompletion(
7575
conversation,
76-
ctx,
7776
model,
77+
ctx,
7878
msgId,
7979
true, // telegram messages has a character limit
8080
parameters
@@ -84,10 +84,11 @@ export class OpenAIBot extends LlmsBase {
8484
async chatCompletion (
8585
conversation: ChatConversation[],
8686
model: ModelVersion,
87+
ctx: OnMessageContext | OnCallBackQueryData,
8788
usesTools: boolean,
8889
parameters?: ModelParameters
8990
): Promise<LlmCompletion> {
90-
return await chatCompletion(conversation, model, model !== this.modelsEnum.O1, parameters) // limitTokens doesn't apply for o1-preview
91+
return await chatCompletion(conversation, model, ctx, model !== this.modelsEnum.O1, parameters) // limitTokens doesn't apply for o1-preview
9192
}
9293

9394
hasPrefix (prompt: string): string {

src/modules/llms/utils/llmsData.ts

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -223,25 +223,25 @@ export const llmData: LLMData = {
223223
},
224224
claude: {
225225
defaultParameters: {
226-
system: config.openAi.chatGpt.chatCompletionContext,
226+
// system: config.openAi.chatGpt.chatCompletionContext,
227227
max_tokens: +config.openAi.chatGpt.maxTokens
228228
}
229229
},
230230
xai: {
231231
defaultParameters: {
232-
system: config.openAi.chatGpt.chatCompletionContext,
232+
// system: config.openAi.chatGpt.chatCompletionContext,
233233
max_tokens: +config.openAi.chatGpt.maxTokens
234234
}
235235
},
236236
vertex: {
237237
defaultParameters: {
238-
system: config.openAi.chatGpt.chatCompletionContext,
238+
// system: config.openAi.chatGpt.chatCompletionContext,
239239
max_tokens: +config.openAi.chatGpt.maxTokens
240240
}
241241
},
242242
luma: {
243243
defaultParameters: {
244-
system: config.openAi.chatGpt.chatCompletionContext,
244+
// system: config.openAi.chatGpt.chatCompletionContext,
245245
max_tokens: +config.openAi.chatGpt.maxTokens
246246
}
247247
}

0 commit comments

Comments
 (0)