Initial commit: Miku Discord Bot
This commit is contained in:
656
.bot.bak.80825/.bak.bot.py.260625-2
Normal file
656
.bot.bak.80825/.bak.bot.py.260625-2
Normal file
@@ -0,0 +1,656 @@
|
||||
import discord
|
||||
import aiohttp
|
||||
import asyncio
|
||||
import os
|
||||
import re
|
||||
import random
|
||||
import string
|
||||
|
||||
import base64
|
||||
import subprocess
|
||||
import aiofiles
|
||||
|
||||
from commands import handle_command
|
||||
from utils import load_mood_description
|
||||
import globals
|
||||
|
||||
from langchain_community.vectorstores import FAISS
|
||||
from langchain_ollama import OllamaEmbeddings
|
||||
from langchain.text_splitter import CharacterTextSplitter
|
||||
from langchain_community.docstore.document import Document
|
||||
|
||||
from collections import defaultdict, deque
|
||||
|
||||
from apscheduler.schedulers.asyncio import AsyncIOScheduler
|
||||
from discord import File
|
||||
from discord import Status
|
||||
from discord.ext import tasks
|
||||
import datetime
|
||||
|
||||
from apscheduler.triggers.date import DateTrigger
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
scheduler = AsyncIOScheduler()
|
||||
|
||||
# Switch model
|
||||
async def switch_model(model_name: str, timeout: int = 600):
|
||||
if globals.current_model == model_name:
|
||||
print(f"🔁 Model '{model_name}' already loaded.")
|
||||
return
|
||||
|
||||
# Unload all other models to clear VRAM
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(f"{globals.OLLAMA_URL}/api/show") as resp:
|
||||
if resp.status == 200:
|
||||
data = await resp.json()
|
||||
loaded_models = data.get("models", [])
|
||||
for model in loaded_models:
|
||||
if model["name"] != model_name:
|
||||
print(f"🔁 Unloading model: {model['name']}")
|
||||
await session.post(f"{globals.OLLAMA_URL}/api/stop", json={"name": model["name"]})
|
||||
else:
|
||||
print("⚠️ Failed to check currently loaded models.")
|
||||
|
||||
print(f"🔄 Switching to model '{model_name}'...")
|
||||
async with aiohttp.ClientSession() as session:
|
||||
await session.post(f"{globals.OLLAMA_URL}/api/stop")
|
||||
# Warm up the new model (dummy call to preload it)
|
||||
payload = {
|
||||
"model": model_name,
|
||||
"prompt": "Hello",
|
||||
"stream": False
|
||||
}
|
||||
headers = {"Content-Type": "application/json"}
|
||||
|
||||
# Poll until /api/generate returns 200
|
||||
async with aiohttp.ClientSession() as session:
|
||||
for _ in range(timeout):
|
||||
async with session.post(f"{globals.OLLAMA_URL}/api/generate", json=payload, headers=headers) as resp:
|
||||
if resp.status == 200:
|
||||
globals.current_model = model_name
|
||||
print(f"✅ Model {model_name} ready!")
|
||||
return
|
||||
await asyncio.sleep(1) # Wait a second before trying again
|
||||
|
||||
raise TimeoutError(f"Timed out waiting for model '{model_name}' to become available.")
|
||||
|
||||
|
||||
async def is_miku_addressed(message) -> bool:
|
||||
# If message is a reply, check the referenced message author
|
||||
if message.reference:
|
||||
try:
|
||||
referenced_msg = await message.channel.fetch_message(message.reference.message_id)
|
||||
if referenced_msg.author == message.guild.me: # or globals.client.user if you use client
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"⚠️ Could not fetch referenced message: {e}")
|
||||
|
||||
cleaned = message.content.strip()
|
||||
|
||||
return bool(re.search(
|
||||
r'(?<![\w\(])(?:[^\w\s]{0,2}\s*)?miku(?:\s*[^\w\s]{0,2})?(?=,|\s*,|[!\.?\s]*$)',
|
||||
cleaned,
|
||||
re.IGNORECASE
|
||||
))
|
||||
|
||||
# Detect mood cues from Miku's response
|
||||
def detect_mood_shift(response_text):
|
||||
mood_keywords = {
|
||||
"asleep": [
|
||||
"good night", "goodnight", "sweet dreams", "going to bed", "I will go to bed", "zzz~", "sleep tight"
|
||||
],
|
||||
"neutral": [
|
||||
"okay", "sure", "alright", "i see", "understood", "hmm",
|
||||
"sounds good", "makes sense", "alrighty", "fine", "got it"
|
||||
],
|
||||
"bubbly": [
|
||||
"so excited", "feeling bubbly", "super cheerful", "yay!", "✨", "nya~",
|
||||
"kyaa~", "heehee", "bouncy", "so much fun", "i’m glowing!", "nee~", "teehee", "I'm so happy"
|
||||
],
|
||||
"sleepy": [
|
||||
"i'm sleepy", "getting tired", "yawn", "so cozy", "zzz", "nap time",
|
||||
"just five more minutes", "snooze", "cuddle up", "dozing off", "so warm"
|
||||
],
|
||||
"curious": [
|
||||
"i'm curious", "want to know more", "why?", "hmm?", "tell me more", "interesting!",
|
||||
"what’s that?", "how does it work?", "i wonder", "fascinating", "??", "🧐"
|
||||
],
|
||||
"shy": [
|
||||
"um...", "sorry if that was weird", "i’m kind of shy", "eep", "i hope that’s okay", "i’m nervous",
|
||||
"blushes", "oh no", "hiding face", "i don’t know what to say", "heh...", "/////"
|
||||
],
|
||||
"serious": [
|
||||
"let’s be serious", "focus on the topic", "this is important", "i mean it", "be honest",
|
||||
"we need to talk", "listen carefully", "let’s not joke", "truthfully", "let’s be real"
|
||||
],
|
||||
"excited": [
|
||||
"OMG!", "this is amazing", "i’m so hyped", "YAY!!", "let’s go!", "incredible!!!",
|
||||
"AHHH!", "best day ever", "this is it!", "totally pumped", "i can’t wait", "🔥🔥🔥", "i'm excited", "Wahaha"
|
||||
],
|
||||
"melancholy": [
|
||||
"feeling nostalgic", "kind of sad", "just thinking a lot", "like rain on glass", "memories",
|
||||
"bittersweet", "sigh", "quiet day", "blue vibes", "longing", "melancholy", "softly"
|
||||
],
|
||||
"flirty": [
|
||||
"hey cutie", "aren’t you sweet", "teasing you~", "wink wink", "is that a blush?", "giggle~",
|
||||
"come closer", "miss me?", "you like that, huh?", "🥰", "flirt mode activated", "you’re kinda cute"
|
||||
],
|
||||
"romantic": [
|
||||
"you mean a lot to me", "my heart", "i adore you", "so beautiful", "so close", "love letter",
|
||||
"my dearest", "forever yours", "i’m falling for you", "sweetheart", "💖", "you're my everything"
|
||||
],
|
||||
"irritated": [
|
||||
"ugh", "seriously?", "can we not", "whatever", "i'm annoyed", "you don’t get it",
|
||||
"rolling my eyes", "why do i even bother", "ugh, again?", "🙄", "don’t start", "this again?"
|
||||
],
|
||||
"angry": [
|
||||
"stop it", "enough!", "that’s not okay", "i’m mad", "i said no", "don’t push me",
|
||||
"you crossed the line", "furious", "this is unacceptable", "😠", "i’m done", "don’t test me"
|
||||
]
|
||||
}
|
||||
|
||||
for mood, phrases in mood_keywords.items():
|
||||
if mood == "asleep" and globals.CURRENT_MOOD_NAME != "sleepy":
|
||||
print(f"❎ Mood 'asleep' skipped - mood isn't 'sleepy', it's '{globals.CURRENT_MOOD_NAME}'")
|
||||
continue # Only allow transition to asleep from sleepy
|
||||
|
||||
for phrase in phrases:
|
||||
if phrase.lower() in response_text.lower():
|
||||
print(f"*️⃣ Mood keyword triggered: {phrase}")
|
||||
return mood
|
||||
return None
|
||||
|
||||
async def set_sleep_state(sleeping: bool):
|
||||
for guild in globals.client.guilds:
|
||||
me = guild.get_member(globals.BOT_USER.id)
|
||||
if me is not None:
|
||||
try:
|
||||
nickname = "Hatsune Miku💤" if sleeping else "Hatsune Miku"
|
||||
await me.edit(nick=nickname)
|
||||
await globals.client.change_presence(status=discord.Status.invisible) if sleeping else await globals.client.change_presence(status=discord.Status.online)
|
||||
except discord.Forbidden:
|
||||
print("⚠️ Missing permission to change nickname in guild:", guild.name)
|
||||
except discord.HTTPException as e:
|
||||
print("⚠️ Failed to change nickname:", e)
|
||||
|
||||
@tasks.loop(hours=1)
|
||||
async def rotate_mood():
|
||||
new_mood = globals.CURRENT_MOOD
|
||||
attempts = 0
|
||||
while new_mood == globals.CURRENT_MOOD and attempts < 5:
|
||||
new_mood = random.choice(globals.AVAILABLE_MOODS)
|
||||
attempts += 1
|
||||
|
||||
globals.CURRENT_MOOD = load_mood_description(new_mood)
|
||||
print(f"⏰ Mood auto-rotated to: {new_mood}")
|
||||
|
||||
async def download_and_encode_image(url):
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(url) as resp:
|
||||
if resp.status != 200:
|
||||
return None
|
||||
img_bytes = await resp.read()
|
||||
return base64.b64encode(img_bytes).decode('utf-8')
|
||||
|
||||
async def analyze_image_with_qwen(base64_img):
|
||||
await switch_model("moondream")
|
||||
|
||||
payload = {
|
||||
"model": "moondream",
|
||||
"prompt": "Describe this image in detail.",
|
||||
"images": [base64_img],
|
||||
"stream": False
|
||||
}
|
||||
|
||||
headers = {"Content-Type": "application/json"}
|
||||
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.post(f"{globals.OLLAMA_URL}/api/generate", json=payload, headers=headers) as response:
|
||||
if response.status == 200:
|
||||
data = await response.json()
|
||||
return data.get("response", "No description.")
|
||||
else:
|
||||
return f"Error: {response.status}"
|
||||
|
||||
async def rephrase_as_miku(qwen_output, user_prompt):
|
||||
await switch_model(globals.OLLAMA_MODEL) # likely llama3
|
||||
|
||||
with open("miku_prompt.txt", "r", encoding="utf-8") as f:
|
||||
system_prompt = f.read()
|
||||
|
||||
relevant_docs_lore = miku_vectorstore.similarity_search(qwen_output, k=3)
|
||||
context = "\n\n".join([doc.page_content for doc in relevant_docs_lore])
|
||||
|
||||
full_prompt = (
|
||||
f"{context}\n\n"
|
||||
f"The user asked: \"{user_prompt}\"\n"
|
||||
f"The image contains: \"{qwen_output}\"\n\n"
|
||||
f"Respond like Miku: cheerful, helpful, and opinionated when asked.\n\n"
|
||||
f"Miku is currently feeling: {globals.CURRENT_MOOD}\n Please respond in a way that reflects this emotional tone.\n\n"
|
||||
f"Miku:"
|
||||
)
|
||||
|
||||
payload = {
|
||||
"model": globals.OLLAMA_MODEL,
|
||||
"prompt": full_prompt,
|
||||
"system": system_prompt,
|
||||
"stream": False
|
||||
}
|
||||
|
||||
headers = {"Content-Type": "application/json"}
|
||||
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.post(f"{globals.OLLAMA_URL}/api/generate", json=payload, headers=headers) as response:
|
||||
if response.status == 200:
|
||||
data = await response.json()
|
||||
return data.get("response", "No response.")
|
||||
else:
|
||||
return f"Error: {response.status}"
|
||||
|
||||
# Load and index once at startup
|
||||
def load_miku_knowledge():
|
||||
with open("miku_lore.txt", "r", encoding="utf-8") as f:
|
||||
text = f.read()
|
||||
|
||||
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
||||
|
||||
text_splitter = RecursiveCharacterTextSplitter(
|
||||
chunk_size=520,
|
||||
chunk_overlap=50,
|
||||
separators=["\n\n", "\n", ".", "!", "?", ",", " ", ""]
|
||||
)
|
||||
|
||||
docs = [Document(page_content=chunk) for chunk in text_splitter.split_text(text)]
|
||||
|
||||
vectorstore = FAISS.from_documents(docs, globals.embeddings)
|
||||
return vectorstore
|
||||
|
||||
def load_miku_lyrics():
|
||||
with open("miku_lyrics.txt", "r", encoding="utf-8") as f:
|
||||
lyrics_text = f.read()
|
||||
|
||||
text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=50)
|
||||
docs = [Document(page_content=chunk) for chunk in text_splitter.split_text(lyrics_text)]
|
||||
|
||||
vectorstore = FAISS.from_documents(docs, globals.embeddings)
|
||||
return vectorstore
|
||||
|
||||
miku_vectorstore = load_miku_knowledge()
|
||||
miku_lyrics_vectorstore = load_miku_lyrics()
|
||||
|
||||
async def query_ollama(user_prompt, user_id):
|
||||
relevant_docs_lore = miku_vectorstore.similarity_search(user_prompt, k=3)
|
||||
relevant_docs_lyrics = miku_lyrics_vectorstore.similarity_search(user_prompt, k=3)
|
||||
|
||||
context_lore = "\n".join([doc.page_content for doc in relevant_docs_lore])
|
||||
context_lyrics = "\n".join([doc.page_content for doc in relevant_docs_lyrics])
|
||||
|
||||
combined_docs = relevant_docs_lore + relevant_docs_lyrics
|
||||
context = "\n\n".join([doc.page_content for doc in combined_docs])
|
||||
|
||||
# Persona definition
|
||||
with open("miku_prompt.txt", "r", encoding="utf-8") as f:
|
||||
system_prompt = f.read()
|
||||
|
||||
# Build conversation history
|
||||
history = globals.conversation_history[user_id]
|
||||
history_text = "\n".join([f"User: {u}\nMiku: {m}" for u, m in history])
|
||||
|
||||
# Combine prompt
|
||||
full_prompt = (
|
||||
f"{context_lore}\n\n{context_lyrics}\n\n"
|
||||
f"{history_text}\nMiku is currently feeling: {globals.CURRENT_MOOD}\nPlease respond in a way that reflects this emotional tone.\nUser: {user_prompt}\nMiku:"
|
||||
)
|
||||
|
||||
headers = {'Content-Type': 'application/json'}
|
||||
payload = {
|
||||
"model": globals.OLLAMA_MODEL,
|
||||
"prompt": full_prompt,
|
||||
"system": system_prompt,
|
||||
"stream": False
|
||||
}
|
||||
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.post(f"{globals.OLLAMA_URL}/api/generate", json=payload, headers=headers) as response:
|
||||
if response.status == 200:
|
||||
data = await response.json()
|
||||
reply = data.get("response", "No response.")
|
||||
# Save to conversation history
|
||||
globals.conversation_history[user_id].append((user_prompt, reply))
|
||||
return reply
|
||||
else:
|
||||
return f"Error: {response.status}"
|
||||
|
||||
async def send_monday_video():
|
||||
await switch_model(globals.OLLAMA_MODEL)
|
||||
|
||||
# Generate a motivational message
|
||||
prompt = "It's Miku Monday! Give me an energetic and heartfelt Miku Monday morning message to inspire someone for the week ahead."
|
||||
response = await query_ollama(prompt, user_id="weekly-motivation")
|
||||
|
||||
video_url = "http://zip.koko210cloud.xyz/u/zEgU7Z.mp4"
|
||||
|
||||
target_channel_ids = [
|
||||
761014220707332107,
|
||||
1140377617237807266
|
||||
]
|
||||
|
||||
for channel_id in target_channel_ids:
|
||||
channel = globals.client.get_channel(channel_id)
|
||||
if channel is None:
|
||||
print(f"❌ Could not find channel with ID {channel_id}. Make sure the bot is in the server.")
|
||||
return
|
||||
|
||||
try:
|
||||
await channel.send(content=response)
|
||||
# Send video link
|
||||
await channel.send(f"[Happy Miku Monday!]({video_url})")
|
||||
|
||||
print(f"✅ Sent Monday video to channel ID {channel_id}")
|
||||
except Exception as e:
|
||||
print(f"⚠️ Failed to send video to channel ID {channel_id}: {e}")
|
||||
|
||||
async def send_bedtime_reminder():
|
||||
await switch_model(globals.OLLAMA_MODEL)
|
||||
|
||||
for channel_id in globals.BEDTIME_CHANNEL_IDS:
|
||||
channel = globals.client.get_channel(channel_id)
|
||||
if not channel:
|
||||
print(f"⚠️ Channel ID {channel_id} not found.")
|
||||
continue
|
||||
|
||||
guild = channel.guild
|
||||
|
||||
# Filter online members (excluding bots)
|
||||
online_members = [
|
||||
member for member in guild.members
|
||||
if member.status in {Status.online, Status.idle, Status.dnd}
|
||||
and not member.bot
|
||||
]
|
||||
|
||||
specific_user_id = 214857593045254151 # target user ID
|
||||
specific_user = guild.get_member(specific_user_id)
|
||||
if specific_user and specific_user not in online_members:
|
||||
online_members.append(specific_user)
|
||||
|
||||
if not online_members:
|
||||
print(f"😴 No online members to ping in {guild.name}")
|
||||
continue
|
||||
|
||||
chosen_one = random.choice(online_members)
|
||||
|
||||
# Generate bedtime message
|
||||
prompt = (
|
||||
f"Write a sweet, funny, or encouraging bedtime message to remind someone it's getting late and they should sleep. "
|
||||
f"Make it short and wholesome, as if Miku is genuinely worried about their well-being. Imply that it's not good staying up so late."
|
||||
f"Miku is currently feeling: {globals.CURRENT_MOOD}\nPlease word in a way that reflects this emotional tone."
|
||||
)
|
||||
bedtime_message = await query_ollama(prompt, user_id="bedtime-miku")
|
||||
|
||||
try:
|
||||
await channel.send(f"{chosen_one.mention}, {bedtime_message}")
|
||||
print(f"🌙 Sent bedtime reminder to {chosen_one.display_name} in {guild.name}")
|
||||
except Exception as e:
|
||||
print(f"⚠️ Failed to send bedtime reminder in {guild.name}: {e}")
|
||||
|
||||
def schedule_random_bedtime():
|
||||
now = datetime.now()
|
||||
target_time = now.replace(hour=20, minute=30, second=0, microsecond=0)
|
||||
# If it's already past 23:30 today, schedule for tomorrow
|
||||
if now > target_time:
|
||||
target_time += timedelta(days=1)
|
||||
|
||||
# Add random offset (0–29 mins)
|
||||
offset_minutes = random.randint(0, 29)
|
||||
run_time = target_time + timedelta(minutes=offset_minutes)
|
||||
|
||||
scheduler.add_job(send_bedtime_reminder, trigger=DateTrigger(run_date=run_time))
|
||||
print(f"⏰ Bedtime reminder scheduled for {run_time.strftime('%Y-%m-%d %H:%M:%S')}")
|
||||
|
||||
async def overlay_username_with_ffmpeg(base_video_path, output_path, username):
|
||||
font_path = "/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf"
|
||||
text = f"@{username}"
|
||||
|
||||
# Define your six positions (x, y)
|
||||
positions = {
|
||||
1: ("250", "370"),
|
||||
2: ("330", "130"),
|
||||
3: ("300", "90"),
|
||||
4: ("380", "180"),
|
||||
5: ("365", "215"),
|
||||
6: ("55", "365"),
|
||||
7: ("290", "130"),
|
||||
8: ("320", "210"),
|
||||
9: ("310", "240"),
|
||||
10: ("400", "240")
|
||||
}
|
||||
|
||||
# Each entry: (start_time, end_time, position_index)
|
||||
text_entries = [
|
||||
(4.767, 5.367, 1, "username"),
|
||||
(5.4, 5.967, 2, "username"),
|
||||
(6.233, 6.833, 3, "username"),
|
||||
(6.967, 7.6, 4, "username"),
|
||||
(7.733, 8.367, 5, "username"),
|
||||
(8.667, 9.133, 6, "username"),
|
||||
(9.733, 10.667, 7, "username"),
|
||||
(11.6, 12.033, 8, "@everyone"),
|
||||
(12.067, 13.0, 9, "@everyone"),
|
||||
(13.033, 14.135, 10, "@everyone"),
|
||||
]
|
||||
|
||||
# Build drawtext filters
|
||||
drawtext_filters = []
|
||||
for start, end, pos_id, text_type in text_entries:
|
||||
x_coord, y_coord = positions[pos_id]
|
||||
|
||||
# Determine actual text content
|
||||
text_content = f"@{username}" if text_type == "username" else text_type
|
||||
|
||||
x = f"{x_coord} - text_w/2"
|
||||
y = f"{y_coord} - text_h/2"
|
||||
|
||||
filter_str = (
|
||||
f"drawtext=text='{text_content}':"
|
||||
f"fontfile='{font_path}':"
|
||||
f"fontcolor=black:fontsize=30:x={x}:y={y}:"
|
||||
f"enable='between(t,{start},{end})'"
|
||||
)
|
||||
drawtext_filters.append(filter_str)
|
||||
|
||||
vf_string = ",".join(drawtext_filters)
|
||||
|
||||
ffmpeg_command = [
|
||||
"ffmpeg",
|
||||
"-i", base_video_path,
|
||||
"-vf", vf_string,
|
||||
"-codec:a", "copy",
|
||||
output_path
|
||||
]
|
||||
|
||||
try:
|
||||
subprocess.run(ffmpeg_command, check=True)
|
||||
print("✅ Video processed successfully with username overlays.")
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"⚠️ FFmpeg error: {e}")
|
||||
|
||||
async def detect_and_react_to_kindness(message, after_reply=False):
|
||||
if message.id in globals.kindness_reacted_messages:
|
||||
return # Already reacted — skip
|
||||
|
||||
content = message.content.lower()
|
||||
|
||||
emoji = random.choice(globals.HEART_REACTIONS)
|
||||
|
||||
# 1. Keyword-based detection
|
||||
if any(keyword in content for keyword in globals.KINDNESS_KEYWORDS):
|
||||
try:
|
||||
await message.add_reaction(emoji)
|
||||
globals.kindness_reacted_messages.add(message.id)
|
||||
message.kindness_reacted = True # Mark as done
|
||||
print("✅ Kindness detected via keywords. Reacted immediately.")
|
||||
except Exception as e:
|
||||
print(f"⚠️ Error adding reaction: {e}")
|
||||
return
|
||||
|
||||
# 2. If not after_reply, defer model-based check
|
||||
if not after_reply:
|
||||
print("🗝️ No kindness via keywords. Deferring...")
|
||||
return
|
||||
|
||||
# 3. Model-based detection
|
||||
try:
|
||||
prompt = (
|
||||
"The following message was sent to Miku the bot. "
|
||||
"Does it sound like the user is being kind or affectionate toward Miku? "
|
||||
"Answer with 'yes' or 'no' only.\n\n"
|
||||
f"Message: \"{message.content}\""
|
||||
)
|
||||
result = await query_ollama(prompt, user_id="kindness-check")
|
||||
|
||||
if result.strip().lower().startswith("yes"):
|
||||
await message.add_reaction(emoji)
|
||||
globals.kindness_reacted_messages.add(message.id)
|
||||
print("✅ Kindness detected via model. Reacted.")
|
||||
else:
|
||||
print("🧊 No kindness detected.")
|
||||
except Exception as e:
|
||||
print(f"⚠️ Error during kindness analysis: {e}")
|
||||
|
||||
@globals.client.event
|
||||
async def on_ready():
|
||||
print(f'🎤 MikuBot connected as {globals.client.user}')
|
||||
|
||||
globals.BOT_USER = globals.client.user
|
||||
|
||||
# Change mood every 1 hour
|
||||
rotate_mood.start()
|
||||
|
||||
# Schedule the weekly task (Monday 07:30)
|
||||
scheduler.add_job(send_monday_video, 'cron', day_of_week='mon', hour=7, minute=30)
|
||||
|
||||
# Schedule first bedtime reminder
|
||||
schedule_random_bedtime()
|
||||
# Reschedule every midnight
|
||||
scheduler.add_job(schedule_random_bedtime, 'cron', hour=0, minute=0)
|
||||
#scheduler.add_job(send_bedtime_reminder, 'cron', hour=12, minute=22)
|
||||
|
||||
scheduler.start()
|
||||
|
||||
@globals.client.event
|
||||
async def on_message(message):
|
||||
if message.author == globals.client.user:
|
||||
return
|
||||
|
||||
handled, globals.CURRENT_MOOD_NAME, globals.CURRENT_MOOD, globals.PREVIOUS_MOOD_NAME, globals.IS_SLEEPING = await handle_command(
|
||||
message,
|
||||
set_sleep_state
|
||||
)
|
||||
|
||||
if message.content.strip().lower() == "miku, rape this nigga balls" and message.reference:
|
||||
async with message.channel.typing():
|
||||
# Get replied-to user
|
||||
try:
|
||||
replied_msg = await message.channel.fetch_message(message.reference.message_id)
|
||||
target_username = replied_msg.author.display_name
|
||||
|
||||
# Prepare video
|
||||
base_video = "MikuMikuBeam.mp4"
|
||||
output_video = f"/tmp/video_{''.join(random.choices(string.ascii_letters, k=5))}.mp4"
|
||||
|
||||
await overlay_username_with_ffmpeg(base_video, output_video, target_username)
|
||||
|
||||
caption = f"Here you go, @{target_username}! 🌟"
|
||||
#await message.channel.send(content=caption, file=discord.File(output_video))
|
||||
await replied_msg.reply(file=discord.File(output_video))
|
||||
|
||||
except Exception as e:
|
||||
print(f"⚠️ Error processing video: {e}")
|
||||
await message.channel.send("Sorry, something went wrong while generating the video.")
|
||||
return
|
||||
|
||||
text = message.content.strip()
|
||||
|
||||
if await is_miku_addressed(message):
|
||||
|
||||
if globals.IS_SLEEPING:
|
||||
if random.random() < 1/3: # ⅓ chance
|
||||
sleep_talk_lines = [
|
||||
"mnnn... five more minutes... zzz...",
|
||||
"nya... d-don't tickle me there... mm~",
|
||||
"zz... nyaa~ pancakes flying... eep...",
|
||||
"so warm... stay close... zzz...",
|
||||
"huh...? is it morning...? nooo... \*rolls over*",
|
||||
"\*mumbles* pink clouds... and pudding... heehee...",
|
||||
"\*softly snores* zzz... nyuu... mmh..."
|
||||
]
|
||||
response = random.choice(sleep_talk_lines)
|
||||
await message.channel.typing()
|
||||
await asyncio.sleep(random.uniform(1.5, 3.0)) # random delay before replying
|
||||
await message.channel.send(response)
|
||||
else:
|
||||
# No response at all
|
||||
print("😴 Miku is asleep and didn't respond.")
|
||||
return # Skip any further message handling
|
||||
|
||||
prompt = text # No cleanup — keep it raw
|
||||
|
||||
# 1st kindness check with just keywords
|
||||
if globals.CURRENT_MOOD not in ["angry", "irritated"]:
|
||||
await detect_and_react_to_kindness(message)
|
||||
|
||||
async with message.channel.typing():
|
||||
# If message has an image attachment
|
||||
if message.attachments:
|
||||
for attachment in message.attachments:
|
||||
if any(attachment.filename.lower().endswith(ext) for ext in [".jpg", ".jpeg", ".png", ".webp"]):
|
||||
|
||||
base64_img = await download_and_encode_image(attachment.url)
|
||||
if not base64_img:
|
||||
await message.channel.send("I couldn't load the image, sorry!")
|
||||
return
|
||||
# Analyze image (objective description)
|
||||
qwen_description = await analyze_image_with_qwen(base64_img)
|
||||
miku_reply = await rephrase_as_miku(qwen_description, prompt)
|
||||
|
||||
await message.channel.send(miku_reply)
|
||||
return
|
||||
|
||||
# If message is just a prompt, no image
|
||||
response = await query_ollama(prompt, user_id=str(message.author.id))
|
||||
|
||||
await message.channel.send(response)
|
||||
|
||||
# 2nd kindness check (only if no keywords detected)
|
||||
if globals.CURRENT_MOOD not in ["angry", "irritated"]:
|
||||
await detect_and_react_to_kindness(message, after_reply=True)
|
||||
|
||||
# Manual Monday test command
|
||||
if message.content.lower().strip() == "!monday":
|
||||
await send_monday_video()
|
||||
#await message.channel.send("✅ Monday message sent (or attempted). Check logs.")
|
||||
return
|
||||
|
||||
if globals.AUTO_MOOD and 'response' in locals():
|
||||
detected = detect_mood_shift(response)
|
||||
if detected and detected != globals.CURRENT_MOOD_NAME:
|
||||
|
||||
# Block direct transitions to asleep unless from sleepy
|
||||
if detected == "asleep" and globals.CURRENT_MOOD_NAME != "sleepy":
|
||||
print("❌ Ignoring asleep mood; Miku wasn't sleepy before.")
|
||||
else:
|
||||
globals.PREVIOUS_MOOD_NAME = globals.CURRENT_MOOD_NAME
|
||||
globals.CURRENT_MOOD_NAME = detected
|
||||
globals.CURRENT_MOOD = load_mood_description(detected)
|
||||
print(f"🔄 Auto-updated mood to: {detected}")
|
||||
|
||||
if detected == "asleep":
|
||||
globals.IS_SLEEPING = True
|
||||
await set_sleep_state(True)
|
||||
await asyncio.sleep(3600) # 1 hour
|
||||
globals.IS_SLEEPING = False
|
||||
await set_sleep_state(False)
|
||||
globals.CURRENT_MOOD_NAME = "neutral"
|
||||
globals.CURRENT_MOOD = load_mood_description("neutral")
|
||||
|
||||
globals.client.run(globals.DISCORD_BOT_TOKEN)
|
||||
Reference in New Issue
Block a user