1
0
mirror of https://github.com/osmarks/autobotrobot synced 2025-02-14 07:40:02 +00:00

Improve Autogollark

This commit is contained in:
osmarks 2024-08-29 20:56:55 +01:00
parent 2872c34951
commit a48a2706d5
2 changed files with 10 additions and 8 deletions

View File

@ -41,6 +41,8 @@ class Sentience(commands.Cog):
for prefix in PREFIXES: for prefix in PREFIXES:
if content.startswith(prefix): if content.startswith(prefix):
content = content.removeprefix(prefix).lstrip() content = content.removeprefix(prefix).lstrip()
if content == "wipe_memory":
prompt = []
if not content and message.embeds: if not content and message.embeds:
content = message.embeds[0].title content = message.embeds[0].title
elif not content and message.attachments: elif not content and message.attachments:
@ -102,10 +104,10 @@ class Sentience(commands.Cog):
print(gollark_data + conversation) print(gollark_data + conversation)
# generate response # generate response
generation = await util.generate(self.session, "GOLLARK SAMPLES:\n" + gollark_data + "CONVERSATION:\n" + conversation) generation = await util.generate(self.session, gollark_data + conversation, stop=["\n["])
print("output", generation) generation = generation.strip().strip("[\n ")
if generation.strip(): if generation:
await ctx.send(AUTOGOLLARK_MARKER + generation.strip()) await ctx.send(AUTOGOLLARK_MARKER + generation)
@commands.Cog.listener("on_message") @commands.Cog.listener("on_message")
async def autogollark_listener(self, message): async def autogollark_listener(self, message):
@ -118,7 +120,7 @@ class Sentience(commands.Cog):
raw_memes = await util.user_config_lookup(ctx, "enable_raw_memes") == "true" raw_memes = await util.user_config_lookup(ctx, "enable_raw_memes") == "true"
async with self.session.post(util.config["memetics"]["meme_search_backend"], json={ async with self.session.post(util.config["memetics"]["meme_search_backend"], json={
"terms": [{"text": query, "weight": 1}], "terms": [{"text": query, "weight": 1}],
"k": 20 "k": 200
}) as res: }) as res:
results = await res.json() results = await res.json()
mat = results["matches"][:(4 if search_many else 1)] mat = results["matches"][:(4 if search_many else 1)]

View File

@ -339,11 +339,11 @@ def chunks(source, length):
for i in range(0, len(source), length): for i in range(0, len(source), length):
yield source[i : i+length] yield source[i : i+length]
async def generate(response: aiohttp.ClientSession, prompt): async def generate(sess: aiohttp.ClientSession, prompt, stop=["\n"]):
async with response.post(config["ai"]["llm_backend"], json={ async with sess.post(config["ai"]["llm_backend"], json={
"prompt": prompt, "prompt": prompt,
"max_tokens": 200, "max_tokens": 200,
"stop": ["\n"], "stop": stop,
"client": "abr", "client": "abr",
**config["ai"].get("params", {}) **config["ai"].get("params", {})
}, headers=config["ai"].get("headers", {})) as res: }, headers=config["ai"].get("headers", {})) as res: