somewhat horrifying tweak to bridge mechanisms, AI
This commit is contained in:
parent
de835dfe08
commit
24760c5baf
|
@ -2,7 +2,7 @@
|
||||||
pytio==0.3.1
|
pytio==0.3.1
|
||||||
aiohttp==3.8.3
|
aiohttp==3.8.3
|
||||||
aiosqlite==0.17.0
|
aiosqlite==0.17.0
|
||||||
nextcord==2.0.0b4
|
nextcord==2.3.2
|
||||||
numpy==1.23
|
numpy==1.23
|
||||||
prometheus-async==19.2.0
|
prometheus-async==19.2.0
|
||||||
prometheus-client==0.15.0
|
prometheus-client==0.15.0
|
||||||
|
@ -10,4 +10,5 @@ pydot==1.4.2
|
||||||
toml==0.10.2
|
toml==0.10.2
|
||||||
requests==2.28.1
|
requests==2.28.1
|
||||||
python-dateutil==2.8.2
|
python-dateutil==2.8.2
|
||||||
irc==20.1.0
|
irc==20.1.0
|
||||||
|
parsedatetime
|
|
@ -177,5 +177,29 @@ AutoBotRobot is operated by gollark/osmarks.
|
||||||
|
|
||||||
await ctx.send("\n".join(map(lambda x: f"{x[0]} x{x[1]}", results)))
|
await ctx.send("\n".join(map(lambda x: f"{x[0]} x{x[1]}", results)))
|
||||||
|
|
||||||
|
@commands.command(help="Highly advanced AI Asisstant.")
|
||||||
|
async def ai(self, ctx, *, query=None):
|
||||||
|
prompt = []
|
||||||
|
async for message in ctx.channel.history(limit=20):
|
||||||
|
display_name = message.author.display_name
|
||||||
|
if message.author == self.bot.user:
|
||||||
|
display_name = util.config["ai"]["own_name"]
|
||||||
|
content = message.content
|
||||||
|
if content.startswith(ctx.prefix + "ai"):
|
||||||
|
content = content.removeprefix(ctx.prefix + "ai").lstrip()
|
||||||
|
if not content and message.embeds:
|
||||||
|
content = message.embeds[0].title
|
||||||
|
elif not content and message.attachments:
|
||||||
|
content = "[attachments]"
|
||||||
|
if not content:
|
||||||
|
continue
|
||||||
|
prompt.append(f"{display_name}: {content}\n\n")
|
||||||
|
if sum(len(x) for x in prompt) > util.config["ai"]["max_len"]:
|
||||||
|
break
|
||||||
|
prompt.reverse()
|
||||||
|
prompt.append(util.config["ai"]["own_name"] + ": ")
|
||||||
|
generation = await util.generate(self.session, "".join(prompt))
|
||||||
|
if generation: await ctx.send(generation)
|
||||||
|
|
||||||
def setup(bot):
|
def setup(bot):
|
||||||
bot.add_cog(GeneralCommands(bot))
|
bot.add_cog(GeneralCommands(bot))
|
||||||
|
|
|
@ -104,7 +104,10 @@ class Telephone(commands.Cog):
|
||||||
else:
|
else:
|
||||||
text = f"<{msg.author.name}> {text}"
|
text = f"<{msg.author.name}> {text}"
|
||||||
await channel.send(text[:2000], allowed_mentions=discord.AllowedMentions(everyone=False, roles=False, users=False))
|
await channel.send(text[:2000], allowed_mentions=discord.AllowedMentions(everyone=False, roles=False, users=False))
|
||||||
await send_raw(render_formatting(channel, msg.message)[:2000])
|
content = render_formatting(channel, msg.message)[:2000]
|
||||||
|
if channel_id in util.config["bridge_show_src"] and msg.source[0] == "discord":
|
||||||
|
content = f"<#{msg.source[1]}> " + content
|
||||||
|
await send_raw(content)
|
||||||
if attachments_text: await send_raw(attachments_text)
|
if attachments_text: await send_raw(attachments_text)
|
||||||
else:
|
else:
|
||||||
logging.warning("Channel %d not found", channel_id)
|
logging.warning("Channel %d not found", channel_id)
|
||||||
|
@ -181,7 +184,6 @@ When you want to end a call, use hangup.
|
||||||
await ctx.send(f"Successfully deleted.")
|
await ctx.send(f"Successfully deleted.")
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
async def find_recent(self, chs, query):
|
async def find_recent(self, chs, query):
|
||||||
one_week = timedelta(seconds=60*60*24*7)
|
one_week = timedelta(seconds=60*60*24*7)
|
||||||
one_week_ago = datetime.now() - one_week
|
one_week_ago = datetime.now() - one_week
|
||||||
|
@ -393,7 +395,7 @@ When you want to end a call, use hangup.
|
||||||
|
|
||||||
@telephone.command(brief="Dump links out of current channel.")
|
@telephone.command(brief="Dump links out of current channel.")
|
||||||
async def graph(self, ctx):
|
async def graph(self, ctx):
|
||||||
graph = pydot.Dot("linkgraph", ratio="fill")
|
graph = pydot.Dot("linkgraph", ratio="fill", overlap="false")
|
||||||
seen = set()
|
seen = set()
|
||||||
seen_edges = set()
|
seen_edges = set()
|
||||||
def node_name(x):
|
def node_name(x):
|
||||||
|
|
|
@ -15,6 +15,7 @@ import time
|
||||||
import math
|
import math
|
||||||
import pytz
|
import pytz
|
||||||
import collections
|
import collections
|
||||||
|
import aiohttp
|
||||||
|
|
||||||
config = {}
|
config = {}
|
||||||
|
|
||||||
|
@ -335,3 +336,11 @@ def chunks(source, length):
|
||||||
for i in range(0, len(source), length):
|
for i in range(0, len(source), length):
|
||||||
yield source[i : i+length]
|
yield source[i : i+length]
|
||||||
|
|
||||||
|
async def generate(response: aiohttp.ClientSession, prompt):
|
||||||
|
async with response.post(config["ai"]["llm_backend"], json={
|
||||||
|
"prompt": prompt,
|
||||||
|
"max_tokens": 200,
|
||||||
|
"stop": ["\n\n"]
|
||||||
|
}) as res:
|
||||||
|
data = await res.json()
|
||||||
|
return data["choices"][0]["text"]
|
Loading…
Reference in New Issue