50 lines
1.8 KiB
Python
50 lines
1.8 KiB
Python
|
|
# utils/kindness.py
|
||
|
|
|
||
|
|
import random
|
||
|
|
import globals
|
||
|
|
from utils.llm import query_ollama # Adjust path as needed
|
||
|
|
|
||
|
|
|
||
|
|
async def detect_and_react_to_kindness(message, after_reply=False, server_context=None):
|
||
|
|
if message.id in globals.kindness_reacted_messages:
|
||
|
|
return # Already reacted — skip
|
||
|
|
|
||
|
|
content = message.content.lower()
|
||
|
|
|
||
|
|
emoji = random.choice(globals.HEART_REACTIONS)
|
||
|
|
|
||
|
|
# 1. Keyword-based detection
|
||
|
|
if any(keyword in content for keyword in globals.KINDNESS_KEYWORDS):
|
||
|
|
try:
|
||
|
|
await message.add_reaction(emoji)
|
||
|
|
globals.kindness_reacted_messages.add(message.id)
|
||
|
|
message.kindness_reacted = True # Mark as done
|
||
|
|
print("✅ Kindness detected via keywords. Reacted immediately.")
|
||
|
|
except Exception as e:
|
||
|
|
print(f"⚠️ Error adding reaction: {e}")
|
||
|
|
return
|
||
|
|
|
||
|
|
# 2. If not after_reply, defer model-based check
|
||
|
|
if not after_reply:
|
||
|
|
print("🗝️ No kindness via keywords. Deferring...")
|
||
|
|
return
|
||
|
|
|
||
|
|
# 3. Model-based detection
|
||
|
|
try:
|
||
|
|
prompt = (
|
||
|
|
"The following message was sent to Miku the bot. "
|
||
|
|
"Does it sound like the user is being explicitly kind or affectionate toward Miku? "
|
||
|
|
"Answer with 'yes' or 'no' only.\n\n"
|
||
|
|
f"Message: \"{message.content}\""
|
||
|
|
)
|
||
|
|
result = await query_ollama(prompt, user_id="kindness-check", guild_id=None, response_type="dm_response")
|
||
|
|
|
||
|
|
if result.strip().lower().startswith("yes"):
|
||
|
|
await message.add_reaction(emoji)
|
||
|
|
globals.kindness_reacted_messages.add(message.id)
|
||
|
|
print("✅ Kindness detected via model. Reacted.")
|
||
|
|
else:
|
||
|
|
print("🧊 No kindness detected.")
|
||
|
|
except Exception as e:
|
||
|
|
print(f"⚠️ Error during kindness analysis: {e}")
|