Skip to content

Commit 8d25017

Browse files
authored
Merge pull request #31 from A-Baji/dev
chatgpt command and custom emoji support
2 parents 13c0109 + 31f740a commit 8d25017

File tree

9 files changed

+190
-22
lines changed

9 files changed

+190
-22
lines changed

.github/workflows/package.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -69,18 +69,18 @@ jobs:
6969
- name: Windows CLI Package
7070
if: ${{matrix.os == 'windows-latest'}}
7171
run: |
72-
pyinstaller discordai/command_line.py --console --onefile --name=discordai --add-binary='discordai/bot/cogs;discordai/bot/cogs' --hidden-import=openai --collect-data=discordai_modelizer
72+
pyinstaller discordai/command_line.py --console --onefile --name=discordai --add-binary='discordai/bot/cogs;discordai/bot/cogs' --hidden-import=openai --hidden-import=tiktoken --collect-data=discordai_modelizer
7373
Compress-Archive -Path dist\*discordai* -DestinationPath discordai-windows.zip
7474
- name: Mac CLI Package
7575
if: ${{matrix.os == 'macos-latest'}}
7676
run: |
77-
pyinstaller discordai/command_line.py --console --onefile --name=discordai --add-data='discordai/bot/cogs:discordai/bot/cogs' --hidden-import=openai --hidden-import=configparser --collect-data=discordai_modelizer --collect-data=aiohttp --collect-data=certifi
77+
pyinstaller discordai/command_line.py --console --onefile --name=discordai --add-data='discordai/bot/cogs:discordai/bot/cogs' --hidden-import=openai --hidden-import=tiktoken --hidden-import=configparser --collect-data=discordai_modelizer --collect-data=aiohttp --collect-data=certifi
7878
zip -j discordai-macos.zip dist/*discordai*
7979
chmod +x dist/*discordai*
8080
- name: Linux CLI Package
8181
if: ${{matrix.os == 'ubuntu-latest'}}
8282
run: |
83-
pyinstaller discordai/command_line.py --console --onefile --name=discordai --add-binary='discordai/bot/cogs:discordai/bot/cogs' --hidden-import=openai --collect-data=discordai_modelizer
83+
pyinstaller discordai/command_line.py --console --onefile --name=discordai --add-binary='discordai/bot/cogs:discordai/bot/cogs' --hidden-import=openai --hidden-import=tiktoken --collect-data=discordai_modelizer
8484
zip -j discordai-linux.zip dist/*discordai*
8585
chmod +x dist/*discordai*
8686
# Upload

.gitignore

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,9 @@ discordai/bot/cogs/*.py
44
!discordai/bot/cogs/customai.py
55
!discordai/bot/cogs/imageai.py
66
!discordai/bot/cogs/openai.py
7+
!discordai/bot/cogs/chatgpt.py
78
!discordai/bot/cogs/sync.py
9+
init.sh
810

911
# Byte-compiled / optimized / DLL files
1012
__pycache__/

CHANGELOG.md

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,14 @@ Observes [Semantic Versioning](https://semver.org/spec/v2.0.0.html) standard and
55
## [2.0.0] - TBD
66

77
### Added
8+
- A changelog
89
- An openAI image generation command
910
- The gpt3.5 model to the openai command and made it the default
10-
- A changelog
11+
- A chatGPT command with chat history functionality
12+
- Custom emoji support for custom models.
13+
14+
### Fixed
15+
- Upgrading the packaged executable version now properly applies any new features to existing, non-custom cogs
1116

1217
### Changed
1318

discordai/bot/__init__.py

Lines changed: 14 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,9 @@
2828

2929
def start_bot(config, sync=False):
3030
bot.config = config
31+
bot.chat_messages = {}
32+
bot.chat_init = {}
33+
bot.emoji_map = {}
3134

3235
@bot.event
3336
async def on_ready() -> None:
@@ -42,6 +45,10 @@ async def on_ready() -> None:
4245
if sync:
4346
print("Syncing commands globally...")
4447
await bot.tree.sync()
48+
for guild in bot.guilds:
49+
for emoji in guild.emojis:
50+
if emoji.name not in bot.emoji_map:
51+
bot.emoji_map[emoji.name] = [emoji.id, emoji.animated]
4552
print("-------------------")
4653

4754
@bot.event
@@ -120,12 +127,13 @@ async def load_cogs() -> None:
120127
"""
121128
if getattr(sys, 'frozen', False):
122129
# The code is being run as a frozen executable
123-
data_dir = pathlib.Path(appdirs.user_data_dir(appname="discordai"))
124-
cogs_path = data_dir / "discordai" / "bot" / "cogs"
125-
if not os.path.exists(cogs_path):
126-
data_dir = pathlib.Path(sys._MEIPASS)
127-
og_cogs_path = data_dir / "discordai" / "bot" / "cogs"
128-
shutil.copytree(og_cogs_path, cogs_path)
130+
cogs_path = pathlib.Path(appdirs.user_data_dir(appname="discordai")) / "discordai" / "bot" / "cogs"
131+
data_dir = pathlib.Path(sys._MEIPASS)
132+
og_cogs_path = data_dir / "discordai" / "bot" / "cogs"
133+
os.makedirs(cogs_path, exist_ok=True)
134+
for file in og_cogs_path.glob("*"):
135+
dest_file = cogs_path / file.name
136+
shutil.copy2(file, dest_file)
129137
for file in os.listdir(cogs_path):
130138
if file.endswith(".py"):
131139
extension = file[:-3]
@@ -136,7 +144,6 @@ async def load_cogs() -> None:
136144
module = importlib.util.module_from_spec(spec)
137145
spec.loader.exec_module(module)
138146
await module.setup(bot=bot)
139-
# await bot.load_extension(extension, package=cogs_path)
140147
print(f"Loaded extension '{extension}'")
141148
except Exception as e:
142149
exception = f"{type(e).__name__}: {e}"

discordai/bot/cogs/chatgpt.py

Lines changed: 128 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,128 @@
1+
""""
2+
Copyright © Krypton 2019-2022 - https://github.com/kkrypt0nn (https://krypton.ninja)
3+
Description:
4+
🐍 A simple template to start to code your own and personalized discord bot in Python programming language.
5+
6+
Version: 5.4.1
7+
"""
8+
9+
from discord import app_commands
10+
from discord.ext import commands
11+
from discord.ext.commands import Context
12+
from enum import Enum
13+
14+
import openai
15+
import tiktoken
16+
17+
class Roles(Enum):
18+
system = "system"
19+
user = "user"
20+
assistant = "assistant"
21+
22+
class Warnings(Enum):
23+
low = ""
24+
medium = "\n:warning:You are nearing the size limit for chatGPT's chat history:warning:"
25+
high = ":exclamation:You have reached the size limit for chatGPT's chat history. Use the `/resetchat` command to continue using chatGPT:exclamation:"
26+
27+
def num_tokens_from_messages(messages, model):
28+
"""Returns the number of tokens used by a list of messages."""
29+
try:
30+
encoding = tiktoken.encoding_for_model(model)
31+
except KeyError:
32+
encoding = tiktoken.get_encoding("cl100k_base")
33+
num_tokens = 0
34+
for message in messages:
35+
num_tokens += 4 # every message follows <im_start>{role/name}\n{content}<im_end>\n
36+
for key, value in message.items():
37+
num_tokens += len(encoding.encode(value))
38+
if key == "name": # if there's a name, the role is omitted
39+
num_tokens += -1 # role is always required and always 1 token
40+
num_tokens += 2 # every reply is primed with <im_start>assistant
41+
return num_tokens
42+
43+
class ChatGPT(commands.Cog, name="chatgpt"):
44+
def __init__(self, bot):
45+
self.bot = bot
46+
47+
@commands.hybrid_command(
48+
name="chatgpt",
49+
description="Generate an chatGPT completion",
50+
)
51+
@app_commands.describe(
52+
prompt="The prompt to pass to chatGPT: Default=\"\"",
53+
role=" system | user | asssistant: Default=user",
54+
temp="What sampling temperature to use. Higher values means more risks: Min=0 Max=1 Default=1",
55+
presence_penalty="Number between -2.0 and 2.0. Positive values will encourage new topics: Min=-2 Max=2 Default=0",
56+
frequency_penalty="Number between -2.0 and 2.0. Positive values will encourage new words: Min=-2 Max=2 Default=0")
57+
async def chatgpt(self, context: Context, prompt: str = "", role: Roles = Roles.user, temp: float = 1.0,
58+
presence_penalty: float = 0.0, frequency_penalty: float = 0.0):
59+
openai.api_key = self.bot.config["openai_key"]
60+
model = "gpt-3.5-turbo"
61+
temp = min(max(temp, 0), 1)
62+
presPen = min(max(presence_penalty, -2), 2)
63+
freqPen = min(max(frequency_penalty, -2), 2)
64+
65+
if context.guild.id not in self.bot.chat_messages:
66+
self.bot.chat_messages[context.guild.id] = [{"role": "system", "content": self.bot.chat_init[context.guild.id]}] if context.guild.id in self.bot.chat_init and self.bot.chat_init[context.guild.id] else []
67+
self.bot.chat_messages[context.guild.id].append({"role": role.value, "content": prompt})
68+
messages = self.bot.chat_messages[context.guild.id]
69+
70+
token_cost = num_tokens_from_messages(messages, model)
71+
if 325 <= 4096-token_cost:
72+
warning = Warnings.low
73+
elif 4096-token_cost >= 5:
74+
warning = Warnings.medium
75+
else:
76+
warning = Warnings.high
77+
78+
await context.defer()
79+
try:
80+
if warning == Warnings.high:
81+
await context.send(warning.value)
82+
else:
83+
response = openai.ChatCompletion.create(
84+
model=model,
85+
messages=messages,
86+
temperature=temp,
87+
frequency_penalty=presPen,
88+
presence_penalty=freqPen,
89+
max_tokens=325 if 325 <= 4096-token_cost else token_cost
90+
)
91+
await context.send(f"{prompt}\n{response['choices'][0]['message']['content']}{warning.value}"[:2000])
92+
self.bot.chat_messages[context.guild.id].append(response['choices'][0]['message'])
93+
except Exception as error:
94+
print(f"Failed to generate valid response for prompt: {prompt}\nError: {error}")
95+
await context.send(
96+
f"Failed to generate valid response for prompt: {prompt}\nError: {error}"
97+
)
98+
99+
@commands.hybrid_command(
100+
name="resetchat",
101+
description="Resets the chat history for chatGPT completions",
102+
)
103+
async def resetchat(self, context):
104+
self.bot.chat_messages[context.guild.id] = [{"role": "system", "content": self.bot.chat_init[context.guild.id]}] if context.guild.id in self.bot.chat_init and self.bot.chat_init[context.guild.id] else []
105+
await context.send("Chat history has been reset")
106+
107+
@commands.hybrid_command(
108+
name="setchatinit",
109+
description="Set the initialization message, a guide for the AI on how to respond to future chat messages",
110+
)
111+
@app_commands.describe(message="The init message for chatGPT completions. Omit to reset")
112+
async def setchatinit(self, context, message: str = ""):
113+
self.bot.chat_init[context.guild.id] = message
114+
if message:
115+
if context.guild.id in self.bot.chat_messages:
116+
if self.bot.chat_messages[context.guild.id] and self.bot.chat_messages[context.guild.id][0]["role"] == "system":
117+
self.bot.chat_messages[context.guild.id][0] = {"role": "system", "content": self.bot.chat_init[context.guild.id]}
118+
else:
119+
self.bot.chat_messages[context.guild.id] = [{"role": "system", "content": self.bot.chat_init[context.guild.id]}] + self.bot.chat_messages[context.guild.id]
120+
await context.send("Chat init message has been set")
121+
else:
122+
if context.guild.id in self.bot.chat_messages:
123+
if self.bot.chat_messages[context.guild.id] and self.bot.chat_messages[context.guild.id][0]["role"] == "system":
124+
self.bot.chat_messages[context.guild.id].pop(0)
125+
await context.send("Chat init message has been reset")
126+
127+
async def setup(bot):
128+
await bot.add_cog(ChatGPT(bot))

discordai/bot/cogs/openai.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -36,23 +36,23 @@ def __init__(self, bot):
3636
frequency_penalty="Number between -2.0 and 2.0. Positive values will encourage new words: Min=-2 Max=2 Default=0")
3737
async def openai(self, context: Context, prompt: str = "", model: Models = Models.chatgpt, temp: float = 1.0,
3838
presence_penalty: float = 0.0, frequency_penalty: float = 0.0):
39+
openai.api_key = self.bot.config["openai_key"]
3940
temp = min(max(temp, 0), 1)
4041
presPen = min(max(presence_penalty, -2), 2)
4142
freqPen = min(max(frequency_penalty, -2), 2)
4243

4344
await context.defer()
4445
try:
45-
openai.api_key = self.bot.config["openai_key"]
4646
if model.value == 'gpt-3.5-turbo':
4747
response = openai.ChatCompletion.create(
48-
model=model.value,
49-
messages=[{"role": "user", "content": prompt}],
50-
temperature=temp,
51-
frequency_penalty=presPen,
52-
presence_penalty=freqPen,
53-
max_tokens=325
48+
model=model.value,
49+
messages=[{"role": "user", "content": prompt}],
50+
temperature=temp,
51+
frequency_penalty=presPen,
52+
presence_penalty=freqPen,
53+
max_tokens=325
5454
)
55-
await context.send(f"{prompt}{response['choices'][0]['message']['content']}"[:2000])
55+
await context.send(f"{prompt}\n\n{response['choices'][0]['message']['content']}"[:2000])
5656
else:
5757
response = openai.Completion.create(
5858
engine=model.value,

discordai/template.py

Lines changed: 24 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,26 @@
1010
from discord.ext.commands import Context
1111
1212
import openai
13+
import re
14+
15+
def replace_emoji(emoji_name: str, emoji_map):
16+
if emoji_name in emoji_map:
17+
if emoji_map[emoji_name][1]:
18+
return f"<a:{{emoji_name}}:{{emoji_map[emoji_name][0]}}>"
19+
else:
20+
return f"<:{{emoji_name}}:{{emoji_map[emoji_name][0]}}>"
21+
elif emoji_name.upper() in emoji_map:
22+
if emoji_map[emoji_name.upper()][1]:
23+
return f"<a:{{emoji_name.upper()}}:{{emoji_map[emoji_name.upper()][0]}}>"
24+
else:
25+
return f"<:{{emoji_name.upper()}}:{{emoji_map[emoji_name.upper()][0]}}>"
26+
elif emoji_name.lower() in emoji_map:
27+
if emoji_map[emoji_name.lower()][1]:
28+
return f"<a:{{emoji_name.lower()}}:{{emoji_map[emoji_name.lower()][0]}}>"
29+
else:
30+
return f"<:{{emoji_name.lower()}}:{{emoji_map[emoji_name.lower()][0]}}>"
31+
else:
32+
return f":{{emoji_name}}:"
1333
1434
1535
class {class_name}(commands.Cog, name="{command_name}"):
@@ -28,7 +48,7 @@ def __init__(self, bot):
2848
max_tokens="The max number of tokens to generate. Each token costs credits: Default={max_tokens_default}",
2949
stop="Whether to stop after the first sentence: Default={stop_default}",
3050
bold="Whether to bolden the original prompt: Default={bold_default}")
31-
async def customai(self, context: Context, prompt: str = "", temp: float = {temp_default},
51+
async def {command_name}(self, context: Context, prompt: str = "", temp: float = {temp_default},
3252
presence_penalty: float = {pres_default}, frequency_penalty: float = {freq_default}, max_tokens: int = {max_tokens_default},
3353
stop: bool = {stop_default}, bold: bool = {bold_default}):
3454
temp = min(max(temp, 0), 1)
@@ -48,7 +68,9 @@ async def customai(self, context: Context, prompt: str = "", temp: float = {temp
4868
echo=False,
4969
stop='.' if stop else None,
5070
)
51-
await context.send(f"{{'**' if bold and prompt else ''}}{{prompt}}{{'**' if bold and prompt else ''}}{{response[\'choices\'][0][\'text\'][:2000]}}")
71+
emojied_response = re.sub(r":(\w+):", lambda match: replace_emoji(
72+
match.group(1), context.bot.emoji_map), f"{{'**' if bold and prompt else ''}}{{prompt}}{{'**' if bold and prompt else ''}}{{response[\'choices\'][0][\'text\']}}")
73+
await context.send(emojied_response[:2000])
5274
except Exception as error:
5375
print({error})
5476
await context.send(

docker-compose.yaml

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,11 +8,14 @@ services:
88
environment:
99
- DISCORD_TOKEN
1010
- OPENAI_KEY
11+
- PYTHONUNBUFFERED=1
1112
env_file: ./.env
1213
volumes:
1314
- ./discordai:/usr/local/lib/python3.11/site-packages/discordai
15+
- ./init.sh:/main/init.sh
1416
command:
1517
- sh
1618
- -c
1719
- |
18-
tail -f /dev/null
20+
./init.sh
21+
discordai bot start

requirements.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
discord.py
22
openai
3+
tiktoken
34
appdirs
45
discordai_modelizer @ git+https://github.com/A-Baji/[email protected]

0 commit comments

Comments
 (0)