-
Notifications
You must be signed in to change notification settings - Fork 0
/
bot.py
251 lines (213 loc) · 9.37 KB
/
bot.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
# bot.py
import os
import discord
from discord.ext import commands
from config import *
from sys import exc_info
from openai import AsyncOpenAI
import yaml
import importlib.util
import subprocess
import re
from pprint import pprint
openai = AsyncOpenAI(api_key=API_KEY)
if BACKEND_PATH and BACKEND_IMPORTED:
try:
spec = importlib.util.spec_from_file_location("module.name", BACKEND_PATH)
backend = importlib.util.module_from_spec(spec)
spec.loader.exec_module(backend)
except Exception as e:
print(f"Backend not loaded ({e}).")
BACKEND_PATH = ""
intents = discord.Intents.default()
intents.message_content = True
client = commands.Bot(command_prefix="/", intents=intents)
COMMANDS = ["prompt", "model", "context_size"]
channel_data = {}
def load_data():
global channel_data
if not os.path.exists(DATA):
with open(DATA, 'w') as f:
yaml.dump({}, f)
with open(DATA, 'r') as f:
channel_data = yaml.safe_load(f)
valid_models = [
'text-curie-001', 'text-babbage-001', 'text-ada-001', 'text-davinci-003', 'text-davinci-002', 'code-davinci-002',
'davinci', 'curie', 'babbage', 'ada', 'babbage-002', 'davinci-002', 'gpt-3.5-turbo', 'gpt-3.5-turbo-1106',
'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-instruct', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0301',
'text-moderation-latest', 'text-embedding-ada-002', 'gpt-4-1106-preview', 'gpt-4-32k', 'gpt-4-0613', 'gpt-4-32k-0613',
'gpt-4-0314', 'gpt-4-32k-0314', 'gpt-4'
]
@client.command(name="shutdown", brief="Shut ChatGPT down", help="Shut ChatGPT down.",
description="Open the Pod bay doors, please, HAL.")
async def shutdown(ctx):
await ctx.send(f"I'm afraid I can't do that, {ctx.author.mention}.")
@client.command(name="setting", brief="Change the system prompt and/or model for this channel",
help="Usage: /setting prompt [new_prompt] or /setting model [new_model] or /setting reset.")
async def setting(ctx, setting=None, *, new_value=None):
if not setting:
await ctx.send("Usage: `/setting prompt [new_prompt]` or `/setting model [new_model]` or `/setting reset`")
return
# handle reset
if setting == "reset":
await ctx.send("Defaults applied to this channel.\nPrompt: %s\nModel: %s\nContext size: %s" % DEFAULT)
return
# handle invalid prompts
elif setting not in COMMANDS:
await ctx.send("Invalid setting. Use %s/reset." % '/'.join(COMMANDS))
return
# handle prompt/model/context_size
index = COMMANDS.index(setting)
if not new_value:
current_value = channel_data.get(ctx.channel.id, DEFAULT)[index]
await ctx.send(f"Current channel {setting}: {current_value}")
else:
# error-check model
if setting == "model" and new_value not in valid_models:
await ctx.send("Warning: this model wasn't recognized but will be applied. "
f"Recognized models:\n`{', '.join(valid_models)}`")
# error-check context_size
if setting == "context_size":
try:
new_value = int(new_value)
if new_value < 0:
raise ValueError
except ValueError:
await ctx.send("Context size must be a non-negative integer.")
return
if ctx.channel.id not in channel_data:
channel_data[ctx.channel.id] = DEFAULT
channel_data[ctx.channel.id][index] = new_value
with open(DATA, "w") as f:
yaml.dump(channel_data, f)
await ctx.send(f"{setting.capitalize()} for this channel changed to: {new_value} -<@{ctx.author.id}>")
await ctx.message.delete()
@client.event
async def on_error(event, *args, **kwargs):
with open(ERRORS, 'a+') as f:
if event == 'on_message':
f.write(f'Unhandled message: {exc_info()}\n')
raise
@client.event
async def on_ready():
print(f'{client.user.name} is online.')
await client.change_presence(activity=discord.Game(name="with human lives"))
@client.event
async def on_message(message):
send = message.channel.send
# ignore system messages
if message.type != discord.MessageType.default:
return
# prevent self-reply loop
elif message.author == client.user:
return
if GUILD:
# check if DM user is in any approved guild
if isinstance(message.channel, discord.channel.DMChannel):
for guild_id in GUILD:
if client.get_guild(guild_id).get_member(message.author.id):
break
else:
return
if CHANNELS:
# lock to approved channels and their threads
if isinstance(message.channel, discord.channel.Thread):
if not message.channel.parent.id in CHANNELS:
return
# lock to text channels if approved category
elif not message.channel.id in CHANNELS:
return
# lock to approved user
if USER_ID and not message.author.id in USER_ID:
await send("Grant system not implemented yet. "+message.author.id)
return
load_data()
# process slash commands
if message.content.startswith("/"):
await client.process_commands(message)
return
print(f"Processing message: {message.content} in {message.channel.id}.")
# get channel settings
system_prompt, model, context_size = channel_data.get(message.channel.id, DEFAULT)
# remove --flags
msg = message.content.replace('--plugins', '').replace('--fallback', '')
# add context messages
history = []
async for hMsg in message.channel.history():
m = hMsg.content
# Exclude messages from users that start with "/setting"
if hMsg.author != client.user and not m.startswith("/setting"):
history.append("USER: "+m)
# Only append messages from the bot that start with "~ "
elif hMsg.author == client.user and m.startswith("~ "):
history.append("GPT: "+m)
if len(history) > context_size: break
msg = '\n'.join(reversed(history[1:])) + '\n' + msg
msg = '\n'.join(reversed(history))
# dry-run
if NO_GPT:
await send(f"Test mode enabled (no GPT API calls).\n**Prompt:** {system_prompt}\n**Model:** {model}\n**Context Size:** {context_size}\n**Message:**\n>>> {msg}")
return
async with message.channel.typing():
# use scanner if attachment/file link found [SCANNER MODE]
has_link = re.search(r'http\S+', message.content) is not None
if (message.attachments or has_link):
if not SCANNER_PATH:
await message.channel.send("Scanner not provided.")
return
if message.attachments:
file_url = message.attachments[0].url
elif has_link:
# find link in msg
link_match = re.search(r'http\S+', message.content)
file_url = link_match.group()
# remove link from message
message.content = re.sub(r'http\S+', '', message.content).strip()
# update env with user changes during runtime
env = os.environ.copy()
env["OPENAI_API_KEY"] = API_KEY
print(SCANNER_PATH, file_url, f"'{message.content}'")
result = subprocess.run(["powershell", "-File", SCANNER_PATH, file_url, f"'{message.content}'"],
capture_output=True, text=True, env=env)
print(result.stdout)
# get summary
txt = result.stdout.strip().splitlines()[3]+".overall_summary.txt"
with open(txt) as txt:
txt = txt.read()
# debug send summary
for line in txt.splitlines():
if line:
send(summary := line)
# plugins mode default for gpt4, --plugins override for gpt3 [PLUGIN MODE]
if BACKEND_PATH and "--fallback" not in message.content:
system = channel_data.get(message.channel.id, DEFAULT)
if BACKEND_IMPORTED:
backend.FEVER = system
if "gpt-4" in model or "--plugins" in message.content:
try:
await send("~ "+backend.run(msg))
if backend.references:
await send("\n\n**References:**")
await send(backend.show_references())
return
except Exception as e:
await send(f"`Error: {str(e)}`\nRetrying without plugin:")
return
result = subprocess.run(["powershell", "-File", BACKEND_PATH,
f"'{msg}'", f"'{system[0]}'", f"'{system[1]}"],
capture_output=True, text=True)
if (result := result.stdout.strip()) != "FALLBACK":
await message.channel.send(result)
return
await message.channel.send("Retrying with fallback:")
# fallback
response = await openai.chat.completions.create(
model=model,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": msg},
]
)
await send("~ " + response.choices[0].message.content)
if __name__ == "__main__":
client.run(TOKEN)