import discord import aiohttp import asyncio import os import re import random import string import base64 import subprocess import aiofiles from commands import handle_command from utils import load_mood_description import globals from langchain_community.vectorstores import FAISS from langchain_ollama import OllamaEmbeddings from langchain.text_splitter import CharacterTextSplitter from langchain_community.docstore.document import Document from collections import defaultdict, deque from apscheduler.schedulers.asyncio import AsyncIOScheduler from discord import File from discord import Status from discord.ext import tasks import datetime from apscheduler.triggers.date import DateTrigger from datetime import datetime, timedelta scheduler = AsyncIOScheduler() # Switch model async def switch_model(model_name: str, timeout: int = 600): if globals.current_model == model_name: print(f"🔁 Model '{model_name}' already loaded.") return # Unload all other models to clear VRAM async with aiohttp.ClientSession() as session: async with session.get(f"{globals.OLLAMA_URL}/api/show") as resp: if resp.status == 200: data = await resp.json() loaded_models = data.get("models", []) for model in loaded_models: if model["name"] != model_name: print(f"🔁 Unloading model: {model['name']}") await session.post(f"{globals.OLLAMA_URL}/api/stop", json={"name": model["name"]}) else: print("⚠️ Failed to check currently loaded models.") print(f"🔄 Switching to model '{model_name}'...") async with aiohttp.ClientSession() as session: await session.post(f"{globals.OLLAMA_URL}/api/stop") # Warm up the new model (dummy call to preload it) payload = { "model": model_name, "prompt": "Hello", "stream": False } headers = {"Content-Type": "application/json"} # Poll until /api/generate returns 200 async with aiohttp.ClientSession() as session: for _ in range(timeout): async with session.post(f"{globals.OLLAMA_URL}/api/generate", json=payload, headers=headers) as resp: if resp.status == 200: globals.current_model = model_name print(f"✅ Model {model_name} ready!") return await asyncio.sleep(1) # Wait a second before trying again raise TimeoutError(f"Timed out waiting for model '{model_name}' to become available.") async def is_miku_addressed(message) -> bool: # If message is a reply, check the referenced message author if message.reference: try: referenced_msg = await message.channel.fetch_message(message.reference.message_id) if referenced_msg.author == message.guild.me: # or globals.client.user if you use client return True except Exception as e: print(f"⚠️ Could not fetch referenced message: {e}") cleaned = message.content.strip() return bool(re.search( r'(? target_time: target_time += timedelta(days=1) # Add random offset (0–29 mins) offset_minutes = random.randint(0, 29) run_time = target_time + timedelta(minutes=offset_minutes) scheduler.add_job(send_bedtime_reminder, trigger=DateTrigger(run_date=run_time)) print(f"⏰ Bedtime reminder scheduled for {run_time.strftime('%Y-%m-%d %H:%M:%S')}") async def overlay_username_with_ffmpeg(base_video_path, output_path, username): font_path = "/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf" text = f"@{username}" # Define your six positions (x, y) positions = { 1: ("250", "370"), 2: ("330", "130"), 3: ("300", "90"), 4: ("380", "180"), 5: ("365", "215"), 6: ("55", "365"), 7: ("290", "130"), 8: ("320", "210"), 9: ("310", "240"), 10: ("400", "240") } # Each entry: (start_time, end_time, position_index) text_entries = [ (4.767, 5.367, 1, "username"), (5.4, 5.967, 2, "username"), (6.233, 6.833, 3, "username"), (6.967, 7.6, 4, "username"), (7.733, 8.367, 5, "username"), (8.667, 9.133, 6, "username"), (9.733, 10.667, 7, "username"), (11.6, 12.033, 8, "@everyone"), (12.067, 13.0, 9, "@everyone"), (13.033, 14.135, 10, "@everyone"), ] # Build drawtext filters drawtext_filters = [] for start, end, pos_id, text_type in text_entries: x_coord, y_coord = positions[pos_id] # Determine actual text content text_content = f"@{username}" if text_type == "username" else text_type x = f"{x_coord} - text_w/2" y = f"{y_coord} - text_h/2" filter_str = ( f"drawtext=text='{text_content}':" f"fontfile='{font_path}':" f"fontcolor=black:fontsize=30:x={x}:y={y}:" f"enable='between(t,{start},{end})'" ) drawtext_filters.append(filter_str) vf_string = ",".join(drawtext_filters) ffmpeg_command = [ "ffmpeg", "-i", base_video_path, "-vf", vf_string, "-codec:a", "copy", output_path ] try: subprocess.run(ffmpeg_command, check=True) print("✅ Video processed successfully with username overlays.") except subprocess.CalledProcessError as e: print(f"⚠️ FFmpeg error: {e}") async def detect_and_react_to_kindness(message, after_reply=False): if message.id in globals.kindness_reacted_messages: return # Already reacted — skip content = message.content.lower() emoji = random.choice(globals.HEART_REACTIONS) # 1. Keyword-based detection if any(keyword in content for keyword in globals.KINDNESS_KEYWORDS): try: await message.add_reaction(emoji) globals.kindness_reacted_messages.add(message.id) message.kindness_reacted = True # Mark as done print("✅ Kindness detected via keywords. Reacted immediately.") except Exception as e: print(f"⚠️ Error adding reaction: {e}") return # 2. If not after_reply, defer model-based check if not after_reply: print("🗝️ No kindness via keywords. Deferring...") return # 3. Model-based detection try: prompt = ( "The following message was sent to Miku the bot. " "Does it sound like the user is being kind or affectionate toward Miku? " "Answer with 'yes' or 'no' only.\n\n" f"Message: \"{message.content}\"" ) result = await query_ollama(prompt, user_id="kindness-check") if result.strip().lower().startswith("yes"): await message.add_reaction(emoji) globals.kindness_reacted_messages.add(message.id) print("✅ Kindness detected via model. Reacted.") else: print("🧊 No kindness detected.") except Exception as e: print(f"⚠️ Error during kindness analysis: {e}") @globals.client.event async def on_ready(): print(f'🎤 MikuBot connected as {globals.client.user}') globals.BOT_USER = globals.client.user # Change mood every 1 hour rotate_mood.start() # Schedule the weekly task (Monday 07:30) scheduler.add_job(send_monday_video, 'cron', day_of_week='mon', hour=7, minute=30) # Schedule first bedtime reminder schedule_random_bedtime() # Reschedule every midnight scheduler.add_job(schedule_random_bedtime, 'cron', hour=0, minute=0) #scheduler.add_job(send_bedtime_reminder, 'cron', hour=12, minute=22) scheduler.start() @globals.client.event async def on_message(message): if message.author == globals.client.user: return handled, globals.CURRENT_MOOD_NAME, globals.CURRENT_MOOD, globals.PREVIOUS_MOOD_NAME, globals.IS_SLEEPING = await handle_command( message, set_sleep_state ) if message.content.strip().lower() == "miku, rape this nigga balls" and message.reference: async with message.channel.typing(): # Get replied-to user try: replied_msg = await message.channel.fetch_message(message.reference.message_id) target_username = replied_msg.author.display_name # Prepare video base_video = "MikuMikuBeam.mp4" output_video = f"/tmp/video_{''.join(random.choices(string.ascii_letters, k=5))}.mp4" await overlay_username_with_ffmpeg(base_video, output_video, target_username) caption = f"Here you go, @{target_username}! 🌟" #await message.channel.send(content=caption, file=discord.File(output_video)) await replied_msg.reply(file=discord.File(output_video)) except Exception as e: print(f"⚠️ Error processing video: {e}") await message.channel.send("Sorry, something went wrong while generating the video.") return text = message.content.strip() if await is_miku_addressed(message): if globals.IS_SLEEPING: if random.random() < 1/3: # ⅓ chance sleep_talk_lines = [ "mnnn... five more minutes... zzz...", "nya... d-don't tickle me there... mm~", "zz... nyaa~ pancakes flying... eep...", "so warm... stay close... zzz...", "huh...? is it morning...? nooo... \*rolls over*", "\*mumbles* pink clouds... and pudding... heehee...", "\*softly snores* zzz... nyuu... mmh..." ] response = random.choice(sleep_talk_lines) await message.channel.typing() await asyncio.sleep(random.uniform(1.5, 3.0)) # random delay before replying await message.channel.send(response) else: # No response at all print("😴 Miku is asleep and didn't respond.") return # Skip any further message handling prompt = text # No cleanup — keep it raw # 1st kindness check with just keywords if globals.CURRENT_MOOD not in ["angry", "irritated"]: await detect_and_react_to_kindness(message) async with message.channel.typing(): # If message has an image attachment if message.attachments: for attachment in message.attachments: if any(attachment.filename.lower().endswith(ext) for ext in [".jpg", ".jpeg", ".png", ".webp"]): base64_img = await download_and_encode_image(attachment.url) if not base64_img: await message.channel.send("I couldn't load the image, sorry!") return # Analyze image (objective description) qwen_description = await analyze_image_with_qwen(base64_img) miku_reply = await rephrase_as_miku(qwen_description, prompt) await message.channel.send(miku_reply) return # If message is just a prompt, no image response = await query_ollama(prompt, user_id=str(message.author.id)) await message.channel.send(response) # 2nd kindness check (only if no keywords detected) if globals.CURRENT_MOOD not in ["angry", "irritated"]: await detect_and_react_to_kindness(message, after_reply=True) # Manual Monday test command if message.content.lower().strip() == "!monday": await send_monday_video() #await message.channel.send("✅ Monday message sent (or attempted). Check logs.") return if globals.AUTO_MOOD and 'response' in locals(): detected = detect_mood_shift(response) if detected and detected != globals.CURRENT_MOOD_NAME: # Block direct transitions to asleep unless from sleepy if detected == "asleep" and globals.CURRENT_MOOD_NAME != "sleepy": print("❌ Ignoring asleep mood; Miku wasn't sleepy before.") else: globals.PREVIOUS_MOOD_NAME = globals.CURRENT_MOOD_NAME globals.CURRENT_MOOD_NAME = detected globals.CURRENT_MOOD = load_mood_description(detected) print(f"🔄 Auto-updated mood to: {detected}") if detected == "asleep": globals.IS_SLEEPING = True await set_sleep_state(True) await asyncio.sleep(3600) # 1 hour globals.IS_SLEEPING = False await set_sleep_state(False) globals.CURRENT_MOOD_NAME = "neutral" globals.CURRENT_MOOD = load_mood_description("neutral") globals.client.run(globals.DISCORD_BOT_TOKEN)