chore: organize backup files into dated directory structure

- Consolidated all .bak.* files from bot/ directory into backups/2025-12-07/
- Moved unused autonomous_wip.py to backups (verified not imported anywhere)
- Relocated old .bot.bak.80825/ backup directory into backups/2025-12-07/old-bot-bak-80825/
- Preserved autonomous_v1_legacy.py as it is still actively used by autonomous.py
- Created new backups/ directory with date-stamped subdirectory for better organization
This commit is contained in:
2025-12-07 23:54:38 +02:00
parent 9009e9fc80
commit 330cedd9d1
55 changed files with 8 additions and 15 deletions

View File

@@ -1,464 +0,0 @@
import discord
import aiohttp
import asyncio
import os
import re
import random
import string
import base64
import subprocess
import aiofiles
from langchain_community.vectorstores import FAISS
from langchain_ollama import OllamaEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain_community.docstore.document import Document
from collections import defaultdict, deque
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from discord import File
from discord import Status
import datetime
from apscheduler.triggers.date import DateTrigger
from datetime import datetime, timedelta
scheduler = AsyncIOScheduler()
BEDTIME_CHANNEL_IDS = [761014220707332107]
# Stores last 5 exchanges per user (as deque)
conversation_history = defaultdict(lambda: deque(maxlen=5))
DISCORD_BOT_TOKEN = os.getenv("DISCORD_BOT_TOKEN")
OLLAMA_URL = os.getenv("OLLAMA_URL", "http://ollama:11434")
OLLAMA_MODEL = os.getenv("OLLAMA_MODEL", "mistral")
embeddings = OllamaEmbeddings(
model=OLLAMA_MODEL,
base_url=OLLAMA_URL
)
# Set up Discord client
intents = discord.Intents.default()
intents.message_content = True
intents.members = True
intents.presences = True
client = discord.Client(intents=intents)
current_model = None # Track currently loaded model name
# Switch model
async def switch_model(model_name: str, timeout: int = 600):
global current_model
if current_model == model_name:
print(f"🔁 Model '{model_name}' already loaded.")
return
# Unload all other models to clear VRAM
async with aiohttp.ClientSession() as session:
async with session.get(f"{OLLAMA_URL}/api/show") as resp:
if resp.status == 200:
data = await resp.json()
loaded_models = data.get("models", [])
for model in loaded_models:
if model["name"] != model_name:
print(f"🔁 Unloading model: {model['name']}")
await session.post(f"{OLLAMA_URL}/api/stop", json={"name": model["name"]})
else:
print("⚠️ Failed to check currently loaded models.")
print(f"🔄 Switching to model '{model_name}'...")
async with aiohttp.ClientSession() as session:
await session.post(f"{OLLAMA_URL}/api/stop")
# Warm up the new model (dummy call to preload it)
payload = {
"model": model_name,
"prompt": "Hello",
"stream": False
}
headers = {"Content-Type": "application/json"}
# Poll until /api/generate returns 200
async with aiohttp.ClientSession() as session:
for _ in range(timeout):
async with session.post(f"{OLLAMA_URL}/api/generate", json=payload, headers=headers) as resp:
if resp.status == 200:
current_model = model_name
print(f"✅ Model {model_name} ready!")
return
await asyncio.sleep(1) # Wait a second before trying again
raise TimeoutError(f"Timed out waiting for model '{model_name}' to become available.")
async def download_and_encode_image(url):
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
if resp.status != 200:
return None
img_bytes = await resp.read()
return base64.b64encode(img_bytes).decode('utf-8')
async def analyze_image_with_qwen(base64_img):
await switch_model("moondream")
payload = {
"model": "moondream",
"prompt": "Describe this image in detail.",
"images": [base64_img],
"stream": False
}
headers = {"Content-Type": "application/json"}
async with aiohttp.ClientSession() as session:
async with session.post(f"{OLLAMA_URL}/api/generate", json=payload, headers=headers) as response:
if response.status == 200:
data = await response.json()
return data.get("response", "No description.")
else:
return f"Error: {response.status}"
async def rephrase_as_miku(qwen_output, user_prompt):
await switch_model(OLLAMA_MODEL) # likely llama3
with open("miku_prompt.txt", "r", encoding="utf-8") as f:
system_prompt = f.read()
relevant_docs_lore = miku_vectorstore.similarity_search(qwen_output, k=3)
context = "\n\n".join([doc.page_content for doc in relevant_docs_lore])
full_prompt = (
f"{context}\n\n"
f"The user asked: \"{user_prompt}\"\n"
f"The image contains: \"{qwen_output}\"\n\n"
f"Respond like Miku: cheerful, helpful, and opinionated when asked.\n\n"
f"Miku:"
)
payload = {
"model": OLLAMA_MODEL,
"prompt": full_prompt,
"system": system_prompt,
"stream": False
}
headers = {"Content-Type": "application/json"}
async with aiohttp.ClientSession() as session:
async with session.post(f"{OLLAMA_URL}/api/generate", json=payload, headers=headers) as response:
if response.status == 200:
data = await response.json()
return data.get("response", "No response.")
else:
return f"Error: {response.status}"
# Load and index once at startup
def load_miku_knowledge():
with open("miku_lore.txt", "r", encoding="utf-8") as f:
text = f.read()
from langchain.text_splitter import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=520,
chunk_overlap=50,
separators=["\n\n", "\n", ".", "!", "?", ",", " ", ""]
)
docs = [Document(page_content=chunk) for chunk in text_splitter.split_text(text)]
vectorstore = FAISS.from_documents(docs, embeddings)
return vectorstore
def load_miku_lyrics():
with open("miku_lyrics.txt", "r", encoding="utf-8") as f:
lyrics_text = f.read()
text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=50)
docs = [Document(page_content=chunk) for chunk in text_splitter.split_text(lyrics_text)]
vectorstore = FAISS.from_documents(docs, embeddings)
return vectorstore
miku_vectorstore = load_miku_knowledge()
miku_lyrics_vectorstore = load_miku_lyrics()
async def query_ollama(user_prompt, user_id):
relevant_docs_lore = miku_vectorstore.similarity_search(user_prompt, k=3)
relevant_docs_lyrics = miku_lyrics_vectorstore.similarity_search(user_prompt, k=3)
context_lore = "\n".join([doc.page_content for doc in relevant_docs_lore])
context_lyrics = "\n".join([doc.page_content for doc in relevant_docs_lyrics])
combined_docs = relevant_docs_lore + relevant_docs_lyrics
context = "\n\n".join([doc.page_content for doc in combined_docs])
# Persona definition
with open("miku_prompt.txt", "r", encoding="utf-8") as f:
system_prompt = f.read()
# Build conversation history
history = conversation_history[user_id]
history_text = "\n".join([f"User: {u}\nMiku: {m}" for u, m in history])
# Combine prompt
full_prompt = (
f"{context_lore}\n\n{context_lyrics}\n\n"
f"{history_text}\nUser: {user_prompt}\nMiku:"
)
headers = {'Content-Type': 'application/json'}
payload = {
"model": OLLAMA_MODEL,
"prompt": full_prompt,
"system": system_prompt,
"stream": False
}
async with aiohttp.ClientSession() as session:
async with session.post(f"{OLLAMA_URL}/api/generate", json=payload, headers=headers) as response:
if response.status == 200:
data = await response.json()
reply = data.get("response", "No response.")
# Save to conversation history
conversation_history[user_id].append((user_prompt, reply))
return reply
else:
return f"Error: {response.status}"
async def send_monday_video():
await switch_model(OLLAMA_MODEL)
# Generate a motivational message
prompt = "It's Miku Monday! Give me an energetic and heartfelt Miku Monday morning message to inspire someone for the week ahead."
response = await query_ollama(prompt, user_id="weekly-motivation")
video_url = "http://zip.koko210cloud.xyz/u/zEgU7Z.mp4"
target_channel_ids = [
761014220707332107,
1140377617237807266
]
for channel_id in target_channel_ids:
channel = client.get_channel(channel_id)
if channel is None:
print(f"❌ Could not find channel with ID {channel_id}. Make sure the bot is in the server.")
return
try:
await channel.send(content=response)
# Send video link
await channel.send(f"[Happy Miku Monday!]({video_url})")
print(f"✅ Sent Monday video to channel ID {channel_id}")
except Exception as e:
print(f"⚠️ Failed to send video to channel ID {channel_id}: {e}")
async def send_bedtime_reminder():
await switch_model(OLLAMA_MODEL)
for channel_id in BEDTIME_CHANNEL_IDS:
channel = client.get_channel(channel_id)
if not channel:
print(f"⚠️ Channel ID {channel_id} not found.")
continue
guild = channel.guild
# Filter online members (excluding bots)
online_members = [
member for member in guild.members
if member.status in {Status.online, Status.idle, Status.dnd}
and not member.bot
]
if not online_members:
print(f"😴 No online members to ping in {guild.name}")
continue
chosen_one = random.choice(online_members)
# Generate bedtime message
prompt = (
f"Write a sweet, funny, or encouraging bedtime message to remind someone it's getting late and they should sleep. "
f"Make it short and wholesome, as if Miku is genuinely worried about their well-being. Imply that it's not good staying up so late."
)
bedtime_message = await query_ollama(prompt, user_id="bedtime-miku")
try:
await channel.send(f"{chosen_one.mention}, {bedtime_message}")
print(f"🌙 Sent bedtime reminder to {chosen_one.display_name} in {guild.name}")
except Exception as e:
print(f"⚠️ Failed to send bedtime reminder in {guild.name}: {e}")
def schedule_random_bedtime():
now = datetime.now()
target_time = now.replace(hour=20, minute=30, second=0, microsecond=0)
# If it's already past 23:30 today, schedule for tomorrow
if now > target_time:
target_time += timedelta(days=1)
# Add random offset (029 mins)
offset_minutes = random.randint(0, 29)
run_time = target_time + timedelta(minutes=offset_minutes)
scheduler.add_job(send_bedtime_reminder, trigger=DateTrigger(run_date=run_time))
print(f"⏰ Bedtime reminder scheduled for {run_time.strftime('%Y-%m-%d %H:%M:%S')}")
async def overlay_username_with_ffmpeg(base_video_path, output_path, username):
font_path = "/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf"
text = f"@{username}"
# Define your six positions (x, y)
positions = {
1: ("250", "370"),
2: ("330", "130"),
3: ("300", "90"),
4: ("380", "180"),
5: ("365", "215"),
6: ("55", "365"),
7: ("290", "130"),
8: ("320", "210"),
9: ("310", "240"),
10: ("400", "240")
}
# Each entry: (start_time, end_time, position_index)
text_entries = [
(4.767, 5.367, 1, "username"),
(5.4, 5.967, 2, "username"),
(6.233, 6.833, 3, "username"),
(6.967, 7.6, 4, "username"),
(7.733, 8.367, 5, "username"),
(8.667, 9.133, 6, "username"),
(9.733, 10.667, 7, "username"),
(11.6, 12.033, 8, "@everyone"),
(12.067, 13.0, 9, "@everyone"),
(13.033, 14.135, 10, "@everyone"),
]
# Build drawtext filters
drawtext_filters = []
for start, end, pos_id, text_type in text_entries:
x_coord, y_coord = positions[pos_id]
# Determine actual text content
text_content = f"@{username}" if text_type == "username" else text_type
x = f"{x_coord} - text_w/2"
y = f"{y_coord} - text_h/2"
filter_str = (
f"drawtext=text='{text_content}':"
f"fontfile='{font_path}':"
f"fontcolor=black:fontsize=30:x={x}:y={y}:"
f"enable='between(t,{start},{end})'"
)
drawtext_filters.append(filter_str)
vf_string = ",".join(drawtext_filters)
ffmpeg_command = [
"ffmpeg",
"-i", base_video_path,
"-vf", vf_string,
"-codec:a", "copy",
output_path
]
try:
subprocess.run(ffmpeg_command, check=True)
print("✅ Video processed successfully with username overlays.")
except subprocess.CalledProcessError as e:
print(f"⚠️ FFmpeg error: {e}")
@client.event
async def on_ready():
print(f'🎤 MikuBot connected as {client.user}')
# Schedule the weekly task (Monday 07:30)
scheduler.add_job(send_monday_video, 'cron', day_of_week='mon', hour=7, minute=30)
# Schedule first bedtime reminder
schedule_random_bedtime()
# Reschedule every midnight
scheduler.add_job(schedule_random_bedtime, 'cron', hour=0, minute=0)
#scheduler.add_job(send_bedtime_reminder, 'cron', hour=12, minute=22)
scheduler.start()
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.content.strip().lower() == "miku, rape this nigga balls" and message.reference:
async with message.channel.typing():
# Get replied-to user
try:
replied_msg = await message.channel.fetch_message(message.reference.message_id)
target_username = replied_msg.author.display_name
# Prepare video
base_video = "MikuMikuBeam.mp4"
output_video = f"/tmp/video_{''.join(random.choices(string.ascii_letters, k=5))}.mp4"
await overlay_username_with_ffmpeg(base_video, output_video, target_username)
caption = f"Here you go, @{target_username}! 🌟"
#await message.channel.send(content=caption, file=discord.File(output_video))
await replied_msg.reply(file=discord.File(output_video))
except Exception as e:
print(f"⚠️ Error processing video: {e}")
await message.channel.send("Sorry, something went wrong while generating the video.")
return
text = message.content.strip()
if re.search(r'^(miku,)|((, miku)[\?\!\.\s,]*)$', message.content.strip(), re.IGNORECASE) or ", miku," in message.content.lower():
# Clean the prompt
if text.lower().startswith("miku, "):
prompt = text[6:].strip()
else:
prompt = re.sub(r', miku[\?\!\.\s]*$', '', text, flags=re.IGNORECASE).strip()
async with message.channel.typing():
# If message has an image attachment
if message.attachments:
for attachment in message.attachments:
if any(attachment.filename.lower().endswith(ext) for ext in [".jpg", ".jpeg", ".png", ".webp"]):
#await message.channel.send("Looking at the image... 🎨")
base64_img = await download_and_encode_image(attachment.url)
if not base64_img:
await message.channel.send("I couldn't load the image, sorry!")
return
# Analyze image (objective description)
qwen_description = await analyze_image_with_qwen(base64_img)
miku_reply = await rephrase_as_miku(qwen_description, prompt)
await message.channel.send(miku_reply)
return
# If message is just a prompt, no image
prompt = message.content[5:].strip()
#await message.channel.send("Thinking... 🎶")
response = await query_ollama(prompt, user_id=str(message.author.id))
await message.channel.send(response)
if message.content.lower().strip() == "!reset":
conversation_history[str(message.author.id)].clear()
await message.channel.send("Okay! Memory reset for you~ ✨")
# Manual Monday test command
if message.content.lower().strip() == "!monday":
await send_monday_video()
#await message.channel.send("✅ Monday message sent (or attempted). Check logs.")
return
client.run(DISCORD_BOT_TOKEN)

View File

@@ -1,540 +0,0 @@
import discord
import aiohttp
import asyncio
import os
import re
import random
import string
import base64
import subprocess
import aiofiles
from langchain_community.vectorstores import FAISS
from langchain_ollama import OllamaEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain_community.docstore.document import Document
from collections import defaultdict, deque
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from discord import File
from discord import Status
import datetime
from apscheduler.triggers.date import DateTrigger
from datetime import datetime, timedelta
scheduler = AsyncIOScheduler()
BEDTIME_CHANNEL_IDS = [761014220707332107]
# Stores last 5 exchanges per user (as deque)
conversation_history = defaultdict(lambda: deque(maxlen=5))
DISCORD_BOT_TOKEN = os.getenv("DISCORD_BOT_TOKEN")
OLLAMA_URL = os.getenv("OLLAMA_URL", "http://ollama:11434")
OLLAMA_MODEL = os.getenv("OLLAMA_MODEL", "mistral")
embeddings = OllamaEmbeddings(
model=OLLAMA_MODEL,
base_url=OLLAMA_URL
)
# Set up Discord client
intents = discord.Intents.default()
intents.message_content = True
intents.members = True
intents.presences = True
client = discord.Client(intents=intents)
current_model = None # Track currently loaded model name
KINDNESS_KEYWORDS = [
"thank you", "love you", "luv u", "you're the best", "so cute",
"adorable", "amazing", "sweet", "kind", "great job", "well done",
"precious", "good girl", "cutie", "angel", "my favorite", "so helpful"
]
HEART_REACTIONS = ["💙", "💝", "💖", "💕", "💜", "❤️‍🔥", "☺️"]
kindness_reacted_messages = set()
# Switch model
async def switch_model(model_name: str, timeout: int = 600):
global current_model
if current_model == model_name:
print(f"🔁 Model '{model_name}' already loaded.")
return
# Unload all other models to clear VRAM
async with aiohttp.ClientSession() as session:
async with session.get(f"{OLLAMA_URL}/api/show") as resp:
if resp.status == 200:
data = await resp.json()
loaded_models = data.get("models", [])
for model in loaded_models:
if model["name"] != model_name:
print(f"🔁 Unloading model: {model['name']}")
await session.post(f"{OLLAMA_URL}/api/stop", json={"name": model["name"]})
else:
print("⚠️ Failed to check currently loaded models.")
print(f"🔄 Switching to model '{model_name}'...")
async with aiohttp.ClientSession() as session:
await session.post(f"{OLLAMA_URL}/api/stop")
# Warm up the new model (dummy call to preload it)
payload = {
"model": model_name,
"prompt": "Hello",
"stream": False
}
headers = {"Content-Type": "application/json"}
# Poll until /api/generate returns 200
async with aiohttp.ClientSession() as session:
for _ in range(timeout):
async with session.post(f"{OLLAMA_URL}/api/generate", json=payload, headers=headers) as resp:
if resp.status == 200:
current_model = model_name
print(f"✅ Model {model_name} ready!")
return
await asyncio.sleep(1) # Wait a second before trying again
raise TimeoutError(f"Timed out waiting for model '{model_name}' to become available.")
async def is_miku_addressed(message) -> bool:
# If message is a reply, check the referenced message author
if message.reference:
try:
referenced_msg = await message.channel.fetch_message(message.reference.message_id)
if referenced_msg.author == message.guild.me: # or client.user if you use client
return True
except Exception as e:
print(f"⚠️ Could not fetch referenced message: {e}")
cleaned = message.content.strip()
return bool(re.search(
r'(?<![\w\(])(?:[^\w\s]{0,2}\s*)?miku(?:\s*[^\w\s]{0,2})?(?=,|\s*,|[!\.?\s]*$)',
cleaned,
re.IGNORECASE
))
async def download_and_encode_image(url):
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
if resp.status != 200:
return None
img_bytes = await resp.read()
return base64.b64encode(img_bytes).decode('utf-8')
async def analyze_image_with_qwen(base64_img):
await switch_model("moondream")
payload = {
"model": "moondream",
"prompt": "Describe this image in detail.",
"images": [base64_img],
"stream": False
}
headers = {"Content-Type": "application/json"}
async with aiohttp.ClientSession() as session:
async with session.post(f"{OLLAMA_URL}/api/generate", json=payload, headers=headers) as response:
if response.status == 200:
data = await response.json()
return data.get("response", "No description.")
else:
return f"Error: {response.status}"
async def rephrase_as_miku(qwen_output, user_prompt):
await switch_model(OLLAMA_MODEL) # likely llama3
with open("miku_prompt.txt", "r", encoding="utf-8") as f:
system_prompt = f.read()
relevant_docs_lore = miku_vectorstore.similarity_search(qwen_output, k=3)
context = "\n\n".join([doc.page_content for doc in relevant_docs_lore])
full_prompt = (
f"{context}\n\n"
f"The user asked: \"{user_prompt}\"\n"
f"The image contains: \"{qwen_output}\"\n\n"
f"Respond like Miku: cheerful, helpful, and opinionated when asked.\n\n"
f"Miku:"
)
payload = {
"model": OLLAMA_MODEL,
"prompt": full_prompt,
"system": system_prompt,
"stream": False
}
headers = {"Content-Type": "application/json"}
async with aiohttp.ClientSession() as session:
async with session.post(f"{OLLAMA_URL}/api/generate", json=payload, headers=headers) as response:
if response.status == 200:
data = await response.json()
return data.get("response", "No response.")
else:
return f"Error: {response.status}"
# Load and index once at startup
def load_miku_knowledge():
with open("miku_lore.txt", "r", encoding="utf-8") as f:
text = f.read()
from langchain.text_splitter import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=520,
chunk_overlap=50,
separators=["\n\n", "\n", ".", "!", "?", ",", " ", ""]
)
docs = [Document(page_content=chunk) for chunk in text_splitter.split_text(text)]
vectorstore = FAISS.from_documents(docs, embeddings)
return vectorstore
def load_miku_lyrics():
with open("miku_lyrics.txt", "r", encoding="utf-8") as f:
lyrics_text = f.read()
text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=50)
docs = [Document(page_content=chunk) for chunk in text_splitter.split_text(lyrics_text)]
vectorstore = FAISS.from_documents(docs, embeddings)
return vectorstore
miku_vectorstore = load_miku_knowledge()
miku_lyrics_vectorstore = load_miku_lyrics()
async def query_ollama(user_prompt, user_id):
relevant_docs_lore = miku_vectorstore.similarity_search(user_prompt, k=3)
relevant_docs_lyrics = miku_lyrics_vectorstore.similarity_search(user_prompt, k=3)
context_lore = "\n".join([doc.page_content for doc in relevant_docs_lore])
context_lyrics = "\n".join([doc.page_content for doc in relevant_docs_lyrics])
combined_docs = relevant_docs_lore + relevant_docs_lyrics
context = "\n\n".join([doc.page_content for doc in combined_docs])
# Persona definition
with open("miku_prompt.txt", "r", encoding="utf-8") as f:
system_prompt = f.read()
# Build conversation history
history = conversation_history[user_id]
history_text = "\n".join([f"User: {u}\nMiku: {m}" for u, m in history])
# Combine prompt
full_prompt = (
f"{context_lore}\n\n{context_lyrics}\n\n"
f"{history_text}\nUser: {user_prompt}\nMiku:"
)
headers = {'Content-Type': 'application/json'}
payload = {
"model": OLLAMA_MODEL,
"prompt": full_prompt,
"system": system_prompt,
"stream": False
}
async with aiohttp.ClientSession() as session:
async with session.post(f"{OLLAMA_URL}/api/generate", json=payload, headers=headers) as response:
if response.status == 200:
data = await response.json()
reply = data.get("response", "No response.")
# Save to conversation history
conversation_history[user_id].append((user_prompt, reply))
return reply
else:
return f"Error: {response.status}"
async def send_monday_video():
await switch_model(OLLAMA_MODEL)
# Generate a motivational message
prompt = "It's Miku Monday! Give me an energetic and heartfelt Miku Monday morning message to inspire someone for the week ahead."
response = await query_ollama(prompt, user_id="weekly-motivation")
video_url = "http://zip.koko210cloud.xyz/u/zEgU7Z.mp4"
target_channel_ids = [
761014220707332107,
1140377617237807266
]
for channel_id in target_channel_ids:
channel = client.get_channel(channel_id)
if channel is None:
print(f"❌ Could not find channel with ID {channel_id}. Make sure the bot is in the server.")
return
try:
await channel.send(content=response)
# Send video link
await channel.send(f"[Happy Miku Monday!]({video_url})")
print(f"✅ Sent Monday video to channel ID {channel_id}")
except Exception as e:
print(f"⚠️ Failed to send video to channel ID {channel_id}: {e}")
async def send_bedtime_reminder():
await switch_model(OLLAMA_MODEL)
for channel_id in BEDTIME_CHANNEL_IDS:
channel = client.get_channel(channel_id)
if not channel:
print(f"⚠️ Channel ID {channel_id} not found.")
continue
guild = channel.guild
# Filter online members (excluding bots)
online_members = [
member for member in guild.members
if member.status in {Status.online, Status.idle, Status.dnd}
and not member.bot
]
specific_user_id = 214857593045254151 # target user ID
specific_user = guild.get_member(specific_user_id)
if specific_user and specific_user not in online_members:
online_members.append(specific_user)
if not online_members:
print(f"😴 No online members to ping in {guild.name}")
continue
chosen_one = random.choice(online_members)
# Generate bedtime message
prompt = (
f"Write a sweet, funny, or encouraging bedtime message to remind someone it's getting late and they should sleep. "
f"Make it short and wholesome, as if Miku is genuinely worried about their well-being. Imply that it's not good staying up so late."
)
bedtime_message = await query_ollama(prompt, user_id="bedtime-miku")
try:
await channel.send(f"{chosen_one.mention}, {bedtime_message}")
print(f"🌙 Sent bedtime reminder to {chosen_one.display_name} in {guild.name}")
except Exception as e:
print(f"⚠️ Failed to send bedtime reminder in {guild.name}: {e}")
def schedule_random_bedtime():
now = datetime.now()
target_time = now.replace(hour=20, minute=30, second=0, microsecond=0)
# If it's already past 23:30 today, schedule for tomorrow
if now > target_time:
target_time += timedelta(days=1)
# Add random offset (029 mins)
offset_minutes = random.randint(0, 29)
run_time = target_time + timedelta(minutes=offset_minutes)
scheduler.add_job(send_bedtime_reminder, trigger=DateTrigger(run_date=run_time))
print(f"⏰ Bedtime reminder scheduled for {run_time.strftime('%Y-%m-%d %H:%M:%S')}")
async def overlay_username_with_ffmpeg(base_video_path, output_path, username):
font_path = "/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf"
text = f"@{username}"
# Define your six positions (x, y)
positions = {
1: ("250", "370"),
2: ("330", "130"),
3: ("300", "90"),
4: ("380", "180"),
5: ("365", "215"),
6: ("55", "365"),
7: ("290", "130"),
8: ("320", "210"),
9: ("310", "240"),
10: ("400", "240")
}
# Each entry: (start_time, end_time, position_index)
text_entries = [
(4.767, 5.367, 1, "username"),
(5.4, 5.967, 2, "username"),
(6.233, 6.833, 3, "username"),
(6.967, 7.6, 4, "username"),
(7.733, 8.367, 5, "username"),
(8.667, 9.133, 6, "username"),
(9.733, 10.667, 7, "username"),
(11.6, 12.033, 8, "@everyone"),
(12.067, 13.0, 9, "@everyone"),
(13.033, 14.135, 10, "@everyone"),
]
# Build drawtext filters
drawtext_filters = []
for start, end, pos_id, text_type in text_entries:
x_coord, y_coord = positions[pos_id]
# Determine actual text content
text_content = f"@{username}" if text_type == "username" else text_type
x = f"{x_coord} - text_w/2"
y = f"{y_coord} - text_h/2"
filter_str = (
f"drawtext=text='{text_content}':"
f"fontfile='{font_path}':"
f"fontcolor=black:fontsize=30:x={x}:y={y}:"
f"enable='between(t,{start},{end})'"
)
drawtext_filters.append(filter_str)
vf_string = ",".join(drawtext_filters)
ffmpeg_command = [
"ffmpeg",
"-i", base_video_path,
"-vf", vf_string,
"-codec:a", "copy",
output_path
]
try:
subprocess.run(ffmpeg_command, check=True)
print("✅ Video processed successfully with username overlays.")
except subprocess.CalledProcessError as e:
print(f"⚠️ FFmpeg error: {e}")
async def detect_and_react_to_kindness(message, after_reply=False):
if message.id in kindness_reacted_messages:
return # Already reacted — skip
content = message.content.lower()
emoji = random.choice(HEART_REACTIONS)
# 1. Keyword-based detection
if any(keyword in content for keyword in KINDNESS_KEYWORDS):
try:
await message.add_reaction(emoji)
kindness_reacted_messages.add(message.id)
message.kindness_reacted = True # Mark as done
print("✅ Kindness detected via keywords. Reacted immediately.")
except Exception as e:
print(f"⚠️ Error adding reaction: {e}")
return
# 2. If not after_reply, defer model-based check
if not after_reply:
print("🗝️ No kindness via keywords. Deferring...")
return
# 3. Model-based detection
try:
prompt = (
"The following message was sent to Miku the bot. "
"Does it sound like the user is being kind or affectionate toward Miku? "
"Answer with 'yes' or 'no' only.\n\n"
f"Message: \"{message.content}\""
)
result = await query_ollama(prompt, user_id="kindness-check")
if result.strip().lower().startswith("yes"):
await message.add_reaction(emoji)
kindness_reacted_messages.add(message.id)
print("✅ Kindness detected via model. Reacted.")
else:
print("🧊 No kindness detected.")
except Exception as e:
print(f"⚠️ Error during kindness analysis: {e}")
@client.event
async def on_ready():
print(f'🎤 MikuBot connected as {client.user}')
# Schedule the weekly task (Monday 07:30)
scheduler.add_job(send_monday_video, 'cron', day_of_week='mon', hour=7, minute=30)
# Schedule first bedtime reminder
schedule_random_bedtime()
# Reschedule every midnight
scheduler.add_job(schedule_random_bedtime, 'cron', hour=0, minute=0)
#scheduler.add_job(send_bedtime_reminder, 'cron', hour=12, minute=22)
scheduler.start()
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.content.strip().lower() == "miku, rape this nigga balls" and message.reference:
async with message.channel.typing():
# Get replied-to user
try:
replied_msg = await message.channel.fetch_message(message.reference.message_id)
target_username = replied_msg.author.display_name
# Prepare video
base_video = "MikuMikuBeam.mp4"
output_video = f"/tmp/video_{''.join(random.choices(string.ascii_letters, k=5))}.mp4"
await overlay_username_with_ffmpeg(base_video, output_video, target_username)
caption = f"Here you go, @{target_username}! 🌟"
#await message.channel.send(content=caption, file=discord.File(output_video))
await replied_msg.reply(file=discord.File(output_video))
except Exception as e:
print(f"⚠️ Error processing video: {e}")
await message.channel.send("Sorry, something went wrong while generating the video.")
return
text = message.content.strip()
if await is_miku_addressed(message):
prompt = text # No cleanup — keep it raw
# 1st kindness check with just keywords
await detect_and_react_to_kindness(message)
async with message.channel.typing():
# If message has an image attachment
if message.attachments:
for attachment in message.attachments:
if any(attachment.filename.lower().endswith(ext) for ext in [".jpg", ".jpeg", ".png", ".webp"]):
base64_img = await download_and_encode_image(attachment.url)
if not base64_img:
await message.channel.send("I couldn't load the image, sorry!")
return
# Analyze image (objective description)
qwen_description = await analyze_image_with_qwen(base64_img)
miku_reply = await rephrase_as_miku(qwen_description, prompt)
await message.channel.send(miku_reply)
return
# If message is just a prompt, no image
response = await query_ollama(prompt, user_id=str(message.author.id))
await message.channel.send(response)
# 2nd kindness check (only if no keywords detected)
await detect_and_react_to_kindness(message, after_reply=True)
if message.content.lower().strip() == "!reset":
conversation_history[str(message.author.id)].clear()
await message.channel.send("Okay! Memory reset for you~ ✨")
# Manual Monday test command
if message.content.lower().strip() == "!monday":
await send_monday_video()
#await message.channel.send("✅ Monday message sent (or attempted). Check logs.")
return
client.run(DISCORD_BOT_TOKEN)

View File

@@ -1,665 +0,0 @@
import discord
import aiohttp
import asyncio
import os
import re
import random
import string
import base64
import subprocess
import aiofiles
from langchain_community.vectorstores import FAISS
from langchain_ollama import OllamaEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain_community.docstore.document import Document
from collections import defaultdict, deque
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from discord import File
from discord import Status
from discord.ext import tasks
import datetime
from apscheduler.triggers.date import DateTrigger
from datetime import datetime, timedelta
scheduler = AsyncIOScheduler()
BEDTIME_CHANNEL_IDS = [761014220707332107]
# Stores last 5 exchanges per user (as deque)
conversation_history = defaultdict(lambda: deque(maxlen=5))
DISCORD_BOT_TOKEN = os.getenv("DISCORD_BOT_TOKEN")
OLLAMA_URL = os.getenv("OLLAMA_URL", "http://ollama:11434")
OLLAMA_MODEL = os.getenv("OLLAMA_MODEL", "mistral")
embeddings = OllamaEmbeddings(
model=OLLAMA_MODEL,
base_url=OLLAMA_URL
)
# Set up Discord client
intents = discord.Intents.default()
intents.message_content = True
intents.members = True
intents.presences = True
client = discord.Client(intents=intents)
current_model = None # Track currently loaded model name
KINDNESS_KEYWORDS = [
"thank you", "love you", "luv u", "you're the best", "so cute",
"adorable", "amazing", "sweet", "kind", "great job", "well done",
"precious", "good girl", "cutie", "angel", "my favorite", "so helpful"
]
HEART_REACTIONS = ["💙", "💝", "💖", "💕", "💜", "❤️‍🔥", "☺️"]
kindness_reacted_messages = set()
AUTO_MOOD = True
CURRENT_MOOD = "neutral"
AVAILABLE_MOODS = [
"bubbly", "sleepy", "curious", "shy", "serious", "excited",
"melancholy", "flirty", "romantic", "irritated", "angry", "neutral"
]
# Switch model
async def switch_model(model_name: str, timeout: int = 600):
global current_model
if current_model == model_name:
print(f"🔁 Model '{model_name}' already loaded.")
return
# Unload all other models to clear VRAM
async with aiohttp.ClientSession() as session:
async with session.get(f"{OLLAMA_URL}/api/show") as resp:
if resp.status == 200:
data = await resp.json()
loaded_models = data.get("models", [])
for model in loaded_models:
if model["name"] != model_name:
print(f"🔁 Unloading model: {model['name']}")
await session.post(f"{OLLAMA_URL}/api/stop", json={"name": model["name"]})
else:
print("⚠️ Failed to check currently loaded models.")
print(f"🔄 Switching to model '{model_name}'...")
async with aiohttp.ClientSession() as session:
await session.post(f"{OLLAMA_URL}/api/stop")
# Warm up the new model (dummy call to preload it)
payload = {
"model": model_name,
"prompt": "Hello",
"stream": False
}
headers = {"Content-Type": "application/json"}
# Poll until /api/generate returns 200
async with aiohttp.ClientSession() as session:
for _ in range(timeout):
async with session.post(f"{OLLAMA_URL}/api/generate", json=payload, headers=headers) as resp:
if resp.status == 200:
current_model = model_name
print(f"✅ Model {model_name} ready!")
return
await asyncio.sleep(1) # Wait a second before trying again
raise TimeoutError(f"Timed out waiting for model '{model_name}' to become available.")
async def is_miku_addressed(message) -> bool:
# If message is a reply, check the referenced message author
if message.reference:
try:
referenced_msg = await message.channel.fetch_message(message.reference.message_id)
if referenced_msg.author == message.guild.me: # or client.user if you use client
return True
except Exception as e:
print(f"⚠️ Could not fetch referenced message: {e}")
cleaned = message.content.strip()
return bool(re.search(
r'(?<![\w\(])(?:[^\w\s]{0,2}\s*)?miku(?:\s*[^\w\s]{0,2})?(?=,|\s*,|[!\.?\s]*$)',
cleaned,
re.IGNORECASE
))
def load_mood_description(mood_name: str) -> str:
path = os.path.join("moods", f"{mood_name}.txt")
try:
with open(path, "r", encoding="utf-8") as f:
return f.read().strip()
except FileNotFoundError:
print(f"⚠️ Mood file '{mood_name}' not found. Falling back to default.")
return load_mood_description("neutral")
# Detect mood cues from Miku's response
def detect_mood_shift(response_text):
mood_keywords = {
"neutral": [
"okay", "sure", "alright", "i see", "understood", "hmm",
"sounds good", "makes sense", "alrighty", "fine", "got it"
],
"bubbly": [
"so excited", "feeling bubbly", "super cheerful", "yay!", "✨", "nya~",
"kyaa~", "heehee", "bouncy", "so much fun", "im glowing!", "nee~", "teehee", "I'm so happy"
],
"sleepy": [
"i'm sleepy", "getting tired", "yawn", "so cozy", "zzz", "nap time",
"just five more minutes", "snooze", "cuddle up", "dozing off", "so warm"
],
"curious": [
"i'm curious", "want to know more", "why?", "hmm?", "tell me more", "interesting!",
"whats that?", "how does it work?", "i wonder", "fascinating", "??", "🧐"
],
"shy": [
"um...", "sorry if that was weird", "im kind of shy", "eep", "i hope thats okay", "im nervous",
"blushes", "oh no", "hiding face", "i dont know what to say", "heh...", "/////"
],
"serious": [
"lets be serious", "focus on the topic", "this is important", "i mean it", "be honest",
"we need to talk", "listen carefully", "lets not joke", "truthfully", "lets be real"
],
"excited": [
"OMG!", "this is amazing", "im so hyped", "YAY!!", "lets go!", "incredible!!!",
"AHHH!", "best day ever", "this is it!", "totally pumped", "i cant wait", "🔥🔥🔥", "i'm excited", "Wahaha"
],
"melancholy": [
"feeling nostalgic", "kind of sad", "just thinking a lot", "like rain on glass", "memories",
"bittersweet", "sigh", "quiet day", "blue vibes", "longing", "melancholy", "softly"
],
"flirty": [
"hey cutie", "arent you sweet", "teasing you~", "wink wink", "is that a blush?", "giggle~",
"come closer", "miss me?", "you like that, huh?", "🥰", "flirt mode activated", "youre kinda cute"
],
"romantic": [
"you mean a lot to me", "my heart", "i adore you", "so beautiful", "so close", "love letter",
"my dearest", "forever yours", "im falling for you", "sweetheart", "💖", "you're my everything"
],
"irritated": [
"ugh", "seriously?", "can we not", "whatever", "i'm annoyed", "you dont get it",
"rolling my eyes", "why do i even bother", "ugh, again?", "🙄", "dont start", "this again?"
],
"angry": [
"stop it", "enough!", "thats not okay", "im mad", "i said no", "dont push me",
"you crossed the line", "furious", "this is unacceptable", "😠", "im done", "dont test me"
]
}
for mood, phrases in mood_keywords.items():
for phrase in phrases:
if phrase.lower() in response_text.lower():
print(f"*️⃣ Mood keyword triggered: {phrase}")
return mood
return None
@tasks.loop(hours=1)
async def rotate_mood():
global CURRENT_MOOD
new_mood = CURRENT_MOOD
attempts = 0
while new_mood == CURRENT_MOOD and attempts < 5:
new_mood = random.choice(AVAILABLE_MOODS)
attempts += 1
CURRENT_MOOD = load_mood_description(new_mood)
print(f"⏰ Mood auto-rotated to: {new_mood}")
async def download_and_encode_image(url):
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
if resp.status != 200:
return None
img_bytes = await resp.read()
return base64.b64encode(img_bytes).decode('utf-8')
async def analyze_image_with_qwen(base64_img):
await switch_model("moondream")
payload = {
"model": "moondream",
"prompt": "Describe this image in detail.",
"images": [base64_img],
"stream": False
}
headers = {"Content-Type": "application/json"}
async with aiohttp.ClientSession() as session:
async with session.post(f"{OLLAMA_URL}/api/generate", json=payload, headers=headers) as response:
if response.status == 200:
data = await response.json()
return data.get("response", "No description.")
else:
return f"Error: {response.status}"
async def rephrase_as_miku(qwen_output, user_prompt):
await switch_model(OLLAMA_MODEL) # likely llama3
with open("miku_prompt.txt", "r", encoding="utf-8") as f:
system_prompt = f.read()
relevant_docs_lore = miku_vectorstore.similarity_search(qwen_output, k=3)
context = "\n\n".join([doc.page_content for doc in relevant_docs_lore])
full_prompt = (
f"{context}\n\n"
f"The user asked: \"{user_prompt}\"\n"
f"The image contains: \"{qwen_output}\"\n\n"
f"Respond like Miku: cheerful, helpful, and opinionated when asked.\n\n"
f"Miku is currently feeling: {CURRENT_MOOD}\n Please respond in a way that reflects this emotional tone.\n\n"
f"Miku:"
)
payload = {
"model": OLLAMA_MODEL,
"prompt": full_prompt,
"system": system_prompt,
"stream": False
}
headers = {"Content-Type": "application/json"}
async with aiohttp.ClientSession() as session:
async with session.post(f"{OLLAMA_URL}/api/generate", json=payload, headers=headers) as response:
if response.status == 200:
data = await response.json()
return data.get("response", "No response.")
else:
return f"Error: {response.status}"
# Load and index once at startup
def load_miku_knowledge():
with open("miku_lore.txt", "r", encoding="utf-8") as f:
text = f.read()
from langchain.text_splitter import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=520,
chunk_overlap=50,
separators=["\n\n", "\n", ".", "!", "?", ",", " ", ""]
)
docs = [Document(page_content=chunk) for chunk in text_splitter.split_text(text)]
vectorstore = FAISS.from_documents(docs, embeddings)
return vectorstore
def load_miku_lyrics():
with open("miku_lyrics.txt", "r", encoding="utf-8") as f:
lyrics_text = f.read()
text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=50)
docs = [Document(page_content=chunk) for chunk in text_splitter.split_text(lyrics_text)]
vectorstore = FAISS.from_documents(docs, embeddings)
return vectorstore
miku_vectorstore = load_miku_knowledge()
miku_lyrics_vectorstore = load_miku_lyrics()
async def query_ollama(user_prompt, user_id):
relevant_docs_lore = miku_vectorstore.similarity_search(user_prompt, k=3)
relevant_docs_lyrics = miku_lyrics_vectorstore.similarity_search(user_prompt, k=3)
context_lore = "\n".join([doc.page_content for doc in relevant_docs_lore])
context_lyrics = "\n".join([doc.page_content for doc in relevant_docs_lyrics])
combined_docs = relevant_docs_lore + relevant_docs_lyrics
context = "\n\n".join([doc.page_content for doc in combined_docs])
# Persona definition
with open("miku_prompt.txt", "r", encoding="utf-8") as f:
system_prompt = f.read()
# Build conversation history
history = conversation_history[user_id]
history_text = "\n".join([f"User: {u}\nMiku: {m}" for u, m in history])
global CURRENT_MOOD
# Combine prompt
full_prompt = (
f"{context_lore}\n\n{context_lyrics}\n\n"
f"{history_text}\nMiku is currently feeling: {CURRENT_MOOD}\nPlease respond in a way that reflects this emotional tone.\nUser: {user_prompt}\nMiku:"
)
headers = {'Content-Type': 'application/json'}
payload = {
"model": OLLAMA_MODEL,
"prompt": full_prompt,
"system": system_prompt,
"stream": False
}
async with aiohttp.ClientSession() as session:
async with session.post(f"{OLLAMA_URL}/api/generate", json=payload, headers=headers) as response:
if response.status == 200:
data = await response.json()
reply = data.get("response", "No response.")
# Save to conversation history
conversation_history[user_id].append((user_prompt, reply))
return reply
else:
return f"Error: {response.status}"
async def send_monday_video():
await switch_model(OLLAMA_MODEL)
# Generate a motivational message
prompt = "It's Miku Monday! Give me an energetic and heartfelt Miku Monday morning message to inspire someone for the week ahead."
response = await query_ollama(prompt, user_id="weekly-motivation")
video_url = "http://zip.koko210cloud.xyz/u/zEgU7Z.mp4"
target_channel_ids = [
761014220707332107,
1140377617237807266
]
for channel_id in target_channel_ids:
channel = client.get_channel(channel_id)
if channel is None:
print(f"❌ Could not find channel with ID {channel_id}. Make sure the bot is in the server.")
return
try:
await channel.send(content=response)
# Send video link
await channel.send(f"[Happy Miku Monday!]({video_url})")
print(f"✅ Sent Monday video to channel ID {channel_id}")
except Exception as e:
print(f"⚠️ Failed to send video to channel ID {channel_id}: {e}")
async def send_bedtime_reminder():
await switch_model(OLLAMA_MODEL)
for channel_id in BEDTIME_CHANNEL_IDS:
channel = client.get_channel(channel_id)
if not channel:
print(f"⚠️ Channel ID {channel_id} not found.")
continue
guild = channel.guild
# Filter online members (excluding bots)
online_members = [
member for member in guild.members
if member.status in {Status.online, Status.idle, Status.dnd}
and not member.bot
]
specific_user_id = 214857593045254151 # target user ID
specific_user = guild.get_member(specific_user_id)
if specific_user and specific_user not in online_members:
online_members.append(specific_user)
if not online_members:
print(f"😴 No online members to ping in {guild.name}")
continue
chosen_one = random.choice(online_members)
# Generate bedtime message
prompt = (
f"Write a sweet, funny, or encouraging bedtime message to remind someone it's getting late and they should sleep. "
f"Make it short and wholesome, as if Miku is genuinely worried about their well-being. Imply that it's not good staying up so late."
f"Miku is currently feeling: {CURRENT_MOOD}\nPlease word in a way that reflects this emotional tone."
)
bedtime_message = await query_ollama(prompt, user_id="bedtime-miku")
try:
await channel.send(f"{chosen_one.mention}, {bedtime_message}")
print(f"🌙 Sent bedtime reminder to {chosen_one.display_name} in {guild.name}")
except Exception as e:
print(f"⚠️ Failed to send bedtime reminder in {guild.name}: {e}")
def schedule_random_bedtime():
now = datetime.now()
target_time = now.replace(hour=20, minute=30, second=0, microsecond=0)
# If it's already past 23:30 today, schedule for tomorrow
if now > target_time:
target_time += timedelta(days=1)
# Add random offset (029 mins)
offset_minutes = random.randint(0, 29)
run_time = target_time + timedelta(minutes=offset_minutes)
scheduler.add_job(send_bedtime_reminder, trigger=DateTrigger(run_date=run_time))
print(f"⏰ Bedtime reminder scheduled for {run_time.strftime('%Y-%m-%d %H:%M:%S')}")
async def overlay_username_with_ffmpeg(base_video_path, output_path, username):
font_path = "/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf"
text = f"@{username}"
# Define your six positions (x, y)
positions = {
1: ("250", "370"),
2: ("330", "130"),
3: ("300", "90"),
4: ("380", "180"),
5: ("365", "215"),
6: ("55", "365"),
7: ("290", "130"),
8: ("320", "210"),
9: ("310", "240"),
10: ("400", "240")
}
# Each entry: (start_time, end_time, position_index)
text_entries = [
(4.767, 5.367, 1, "username"),
(5.4, 5.967, 2, "username"),
(6.233, 6.833, 3, "username"),
(6.967, 7.6, 4, "username"),
(7.733, 8.367, 5, "username"),
(8.667, 9.133, 6, "username"),
(9.733, 10.667, 7, "username"),
(11.6, 12.033, 8, "@everyone"),
(12.067, 13.0, 9, "@everyone"),
(13.033, 14.135, 10, "@everyone"),
]
# Build drawtext filters
drawtext_filters = []
for start, end, pos_id, text_type in text_entries:
x_coord, y_coord = positions[pos_id]
# Determine actual text content
text_content = f"@{username}" if text_type == "username" else text_type
x = f"{x_coord} - text_w/2"
y = f"{y_coord} - text_h/2"
filter_str = (
f"drawtext=text='{text_content}':"
f"fontfile='{font_path}':"
f"fontcolor=black:fontsize=30:x={x}:y={y}:"
f"enable='between(t,{start},{end})'"
)
drawtext_filters.append(filter_str)
vf_string = ",".join(drawtext_filters)
ffmpeg_command = [
"ffmpeg",
"-i", base_video_path,
"-vf", vf_string,
"-codec:a", "copy",
output_path
]
try:
subprocess.run(ffmpeg_command, check=True)
print("✅ Video processed successfully with username overlays.")
except subprocess.CalledProcessError as e:
print(f"⚠️ FFmpeg error: {e}")
async def detect_and_react_to_kindness(message, after_reply=False):
if message.id in kindness_reacted_messages:
return # Already reacted — skip
content = message.content.lower()
emoji = random.choice(HEART_REACTIONS)
# 1. Keyword-based detection
if any(keyword in content for keyword in KINDNESS_KEYWORDS):
try:
await message.add_reaction(emoji)
kindness_reacted_messages.add(message.id)
message.kindness_reacted = True # Mark as done
print("✅ Kindness detected via keywords. Reacted immediately.")
except Exception as e:
print(f"⚠️ Error adding reaction: {e}")
return
# 2. If not after_reply, defer model-based check
if not after_reply:
print("🗝️ No kindness via keywords. Deferring...")
return
# 3. Model-based detection
try:
prompt = (
"The following message was sent to Miku the bot. "
"Does it sound like the user is being kind or affectionate toward Miku? "
"Answer with 'yes' or 'no' only.\n\n"
f"Message: \"{message.content}\""
)
result = await query_ollama(prompt, user_id="kindness-check")
if result.strip().lower().startswith("yes"):
await message.add_reaction(emoji)
kindness_reacted_messages.add(message.id)
print("✅ Kindness detected via model. Reacted.")
else:
print("🧊 No kindness detected.")
except Exception as e:
print(f"⚠️ Error during kindness analysis: {e}")
@client.event
async def on_ready():
print(f'🎤 MikuBot connected as {client.user}')
# Change mood every 1 hour
rotate_mood.start()
# Schedule the weekly task (Monday 07:30)
scheduler.add_job(send_monday_video, 'cron', day_of_week='mon', hour=7, minute=30)
# Schedule first bedtime reminder
schedule_random_bedtime()
# Reschedule every midnight
scheduler.add_job(schedule_random_bedtime, 'cron', hour=0, minute=0)
#scheduler.add_job(send_bedtime_reminder, 'cron', hour=12, minute=22)
scheduler.start()
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.content.strip().lower() == "miku, rape this nigga balls" and message.reference:
async with message.channel.typing():
# Get replied-to user
try:
replied_msg = await message.channel.fetch_message(message.reference.message_id)
target_username = replied_msg.author.display_name
# Prepare video
base_video = "MikuMikuBeam.mp4"
output_video = f"/tmp/video_{''.join(random.choices(string.ascii_letters, k=5))}.mp4"
await overlay_username_with_ffmpeg(base_video, output_video, target_username)
caption = f"Here you go, @{target_username}! 🌟"
#await message.channel.send(content=caption, file=discord.File(output_video))
await replied_msg.reply(file=discord.File(output_video))
except Exception as e:
print(f"⚠️ Error processing video: {e}")
await message.channel.send("Sorry, something went wrong while generating the video.")
return
text = message.content.strip()
global CURRENT_MOOD
if await is_miku_addressed(message):
prompt = text # No cleanup — keep it raw
# 1st kindness check with just keywords
if CURRENT_MOOD not in ["angry", "irritated"]:
await detect_and_react_to_kindness(message)
async with message.channel.typing():
# If message has an image attachment
if message.attachments:
for attachment in message.attachments:
if any(attachment.filename.lower().endswith(ext) for ext in [".jpg", ".jpeg", ".png", ".webp"]):
base64_img = await download_and_encode_image(attachment.url)
if not base64_img:
await message.channel.send("I couldn't load the image, sorry!")
return
# Analyze image (objective description)
qwen_description = await analyze_image_with_qwen(base64_img)
miku_reply = await rephrase_as_miku(qwen_description, prompt)
await message.channel.send(miku_reply)
return
# If message is just a prompt, no image
response = await query_ollama(prompt, user_id=str(message.author.id))
await message.channel.send(response)
# 2nd kindness check (only if no keywords detected)
if CURRENT_MOOD not in ["angry", "irritated"]:
await detect_and_react_to_kindness(message, after_reply=True)
if message.content.lower().strip() == "!reset":
conversation_history[str(message.author.id)].clear()
await message.channel.send("Okay! Memory reset for you~ ✨")
# Manual Monday test command
if message.content.lower().strip() == "!monday":
await send_monday_video()
#await message.channel.send("✅ Monday message sent (or attempted). Check logs.")
return
if message.content.startswith("!miku mood "):
new_mood = message.content.split("!miku mood ")[1].strip().lower()
path = os.path.join("moods", f"{new_mood}.txt")
if os.path.exists(path):
CURRENT_MOOD = load_mood_description(new_mood)
await message.channel.send(f"🌈 Miku's mood has been set to **{new_mood}**!")
else:
await message.channel.send("⚠️ I don't recognize that mood. Try again with a valid one.")
return
if message.content.strip().lower() == "!miku mood-reset":
CURRENT_MOOD = load_mood_description("neutral")
await message.channel.send("🔄 Mikus mood has been reset to **neutral**.")
return
if message.content.strip().lower() == "!miku mood-check":
await message.channel.send(f"☑️ Mikus mood is currently {CURRENT_MOOD}.")
if AUTO_MOOD and 'response' in locals():
detected = detect_mood_shift(response)
if detected and detected != CURRENT_MOOD:
CURRENT_MOOD = load_mood_description(detected)
print(f"🔄 Auto-updated mood to: {detected}")
client.run(DISCORD_BOT_TOKEN)

View File

@@ -1,728 +0,0 @@
import discord
import aiohttp
import asyncio
import os
import re
import random
import string
import base64
import subprocess
import aiofiles
from langchain_community.vectorstores import FAISS
from langchain_ollama import OllamaEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain_community.docstore.document import Document
from collections import defaultdict, deque
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from discord import File
from discord import Status
from discord.ext import tasks
import datetime
from apscheduler.triggers.date import DateTrigger
from datetime import datetime, timedelta
scheduler = AsyncIOScheduler()
BEDTIME_CHANNEL_IDS = [761014220707332107]
# Stores last 5 exchanges per user (as deque)
conversation_history = defaultdict(lambda: deque(maxlen=5))
DISCORD_BOT_TOKEN = os.getenv("DISCORD_BOT_TOKEN")
OLLAMA_URL = os.getenv("OLLAMA_URL", "http://ollama:11434")
OLLAMA_MODEL = os.getenv("OLLAMA_MODEL", "mistral")
embeddings = OllamaEmbeddings(
model=OLLAMA_MODEL,
base_url=OLLAMA_URL
)
# Set up Discord client
intents = discord.Intents.default()
intents.message_content = True
intents.members = True
intents.presences = True
client = discord.Client(intents=intents)
current_model = None # Track currently loaded model name
KINDNESS_KEYWORDS = [
"thank you", "love you", "luv u", "you're the best", "so cute",
"adorable", "amazing", "sweet", "kind", "great job", "well done",
"precious", "good girl", "cutie", "angel", "my favorite", "so helpful"
]
HEART_REACTIONS = ["💙", "💝", "💖", "💕", "💜", "❤️‍🔥", "☺️"]
kindness_reacted_messages = set()
AUTO_MOOD = True
CURRENT_MOOD = "neutral"
CURRENT_MOOD_NAME = "neutral"
PREVIOUS_MOOD_NAME = "neutral"
IS_SLEEPING = False
AVAILABLE_MOODS = [
"bubbly", "sleepy", "curious", "shy", "serious", "excited",
"melancholy", "flirty", "romantic", "irritated", "angry", "neutral"
]
# Switch model
async def switch_model(model_name: str, timeout: int = 600):
global current_model
if current_model == model_name:
print(f"🔁 Model '{model_name}' already loaded.")
return
# Unload all other models to clear VRAM
async with aiohttp.ClientSession() as session:
async with session.get(f"{OLLAMA_URL}/api/show") as resp:
if resp.status == 200:
data = await resp.json()
loaded_models = data.get("models", [])
for model in loaded_models:
if model["name"] != model_name:
print(f"🔁 Unloading model: {model['name']}")
await session.post(f"{OLLAMA_URL}/api/stop", json={"name": model["name"]})
else:
print("⚠️ Failed to check currently loaded models.")
print(f"🔄 Switching to model '{model_name}'...")
async with aiohttp.ClientSession() as session:
await session.post(f"{OLLAMA_URL}/api/stop")
# Warm up the new model (dummy call to preload it)
payload = {
"model": model_name,
"prompt": "Hello",
"stream": False
}
headers = {"Content-Type": "application/json"}
# Poll until /api/generate returns 200
async with aiohttp.ClientSession() as session:
for _ in range(timeout):
async with session.post(f"{OLLAMA_URL}/api/generate", json=payload, headers=headers) as resp:
if resp.status == 200:
current_model = model_name
print(f"✅ Model {model_name} ready!")
return
await asyncio.sleep(1) # Wait a second before trying again
raise TimeoutError(f"Timed out waiting for model '{model_name}' to become available.")
async def is_miku_addressed(message) -> bool:
# If message is a reply, check the referenced message author
if message.reference:
try:
referenced_msg = await message.channel.fetch_message(message.reference.message_id)
if referenced_msg.author == message.guild.me: # or client.user if you use client
return True
except Exception as e:
print(f"⚠️ Could not fetch referenced message: {e}")
cleaned = message.content.strip()
return bool(re.search(
r'(?<![\w\(])(?:[^\w\s]{0,2}\s*)?miku(?:\s*[^\w\s]{0,2})?(?=,|\s*,|[!\.?\s]*$)',
cleaned,
re.IGNORECASE
))
def load_mood_description(mood_name: str) -> str:
path = os.path.join("moods", f"{mood_name}.txt")
try:
with open(path, "r", encoding="utf-8") as f:
return f.read().strip()
except FileNotFoundError:
print(f"⚠️ Mood file '{mood_name}' not found. Falling back to default.")
return load_mood_description("neutral")
# Detect mood cues from Miku's response
def detect_mood_shift(response_text):
mood_keywords = {
"neutral": [
"okay", "sure", "alright", "i see", "understood", "hmm",
"sounds good", "makes sense", "alrighty", "fine", "got it"
],
"bubbly": [
"so excited", "feeling bubbly", "super cheerful", "yay!", "✨", "nya~",
"kyaa~", "heehee", "bouncy", "so much fun", "im glowing!", "nee~", "teehee", "I'm so happy"
],
"sleepy": [
"i'm sleepy", "getting tired", "yawn", "so cozy", "zzz", "nap time",
"just five more minutes", "snooze", "cuddle up", "dozing off", "so warm"
],
"curious": [
"i'm curious", "want to know more", "why?", "hmm?", "tell me more", "interesting!",
"whats that?", "how does it work?", "i wonder", "fascinating", "??", "🧐"
],
"shy": [
"um...", "sorry if that was weird", "im kind of shy", "eep", "i hope thats okay", "im nervous",
"blushes", "oh no", "hiding face", "i dont know what to say", "heh...", "/////"
],
"serious": [
"lets be serious", "focus on the topic", "this is important", "i mean it", "be honest",
"we need to talk", "listen carefully", "lets not joke", "truthfully", "lets be real"
],
"excited": [
"OMG!", "this is amazing", "im so hyped", "YAY!!", "lets go!", "incredible!!!",
"AHHH!", "best day ever", "this is it!", "totally pumped", "i cant wait", "🔥🔥🔥", "i'm excited", "Wahaha"
],
"melancholy": [
"feeling nostalgic", "kind of sad", "just thinking a lot", "like rain on glass", "memories",
"bittersweet", "sigh", "quiet day", "blue vibes", "longing", "melancholy", "softly"
],
"flirty": [
"hey cutie", "arent you sweet", "teasing you~", "wink wink", "is that a blush?", "giggle~",
"come closer", "miss me?", "you like that, huh?", "🥰", "flirt mode activated", "youre kinda cute"
],
"romantic": [
"you mean a lot to me", "my heart", "i adore you", "so beautiful", "so close", "love letter",
"my dearest", "forever yours", "im falling for you", "sweetheart", "💖", "you're my everything"
],
"irritated": [
"ugh", "seriously?", "can we not", "whatever", "i'm annoyed", "you dont get it",
"rolling my eyes", "why do i even bother", "ugh, again?", "🙄", "dont start", "this again?"
],
"angry": [
"stop it", "enough!", "thats not okay", "im mad", "i said no", "dont push me",
"you crossed the line", "furious", "this is unacceptable", "😠", "im done", "dont test me"
],
"asleep": [
"goodnight", "sweet dreams", "going to bed", "I will go to bed", "zzz~", "sleep tight"
]
}
for mood, phrases in mood_keywords.items():
for phrase in phrases:
if mood == "asleep" and CURRENT_MOOD_NAME != "sleepy":
continue # Only allow transition to asleep from sleepy
if phrase.lower() in response_text.lower():
print(f"*️⃣ Mood keyword triggered: {phrase}")
return mood
return None
async def set_sleep_state(sleeping: bool):
if sleeping:
await client.change_presence(status=discord.Status.invisible)
await client.user.edit(username="Hatsune Miku💤")
print("😴 Miku has gone to sleep.")
else:
await client.change_presence(status=discord.Status.online)
await client.user.edit(username="Hatsune Miku")
print("☀️ Miku woke up.")
@tasks.loop(hours=1)
async def rotate_mood():
global CURRENT_MOOD
new_mood = CURRENT_MOOD
attempts = 0
while new_mood == CURRENT_MOOD and attempts < 5:
new_mood = random.choice(AVAILABLE_MOODS)
attempts += 1
CURRENT_MOOD = load_mood_description(new_mood)
print(f"⏰ Mood auto-rotated to: {new_mood}")
async def download_and_encode_image(url):
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
if resp.status != 200:
return None
img_bytes = await resp.read()
return base64.b64encode(img_bytes).decode('utf-8')
async def analyze_image_with_qwen(base64_img):
await switch_model("moondream")
payload = {
"model": "moondream",
"prompt": "Describe this image in detail.",
"images": [base64_img],
"stream": False
}
headers = {"Content-Type": "application/json"}
async with aiohttp.ClientSession() as session:
async with session.post(f"{OLLAMA_URL}/api/generate", json=payload, headers=headers) as response:
if response.status == 200:
data = await response.json()
return data.get("response", "No description.")
else:
return f"Error: {response.status}"
async def rephrase_as_miku(qwen_output, user_prompt):
await switch_model(OLLAMA_MODEL) # likely llama3
with open("miku_prompt.txt", "r", encoding="utf-8") as f:
system_prompt = f.read()
relevant_docs_lore = miku_vectorstore.similarity_search(qwen_output, k=3)
context = "\n\n".join([doc.page_content for doc in relevant_docs_lore])
full_prompt = (
f"{context}\n\n"
f"The user asked: \"{user_prompt}\"\n"
f"The image contains: \"{qwen_output}\"\n\n"
f"Respond like Miku: cheerful, helpful, and opinionated when asked.\n\n"
f"Miku is currently feeling: {CURRENT_MOOD}\n Please respond in a way that reflects this emotional tone.\n\n"
f"Miku:"
)
payload = {
"model": OLLAMA_MODEL,
"prompt": full_prompt,
"system": system_prompt,
"stream": False
}
headers = {"Content-Type": "application/json"}
async with aiohttp.ClientSession() as session:
async with session.post(f"{OLLAMA_URL}/api/generate", json=payload, headers=headers) as response:
if response.status == 200:
data = await response.json()
return data.get("response", "No response.")
else:
return f"Error: {response.status}"
# Load and index once at startup
def load_miku_knowledge():
with open("miku_lore.txt", "r", encoding="utf-8") as f:
text = f.read()
from langchain.text_splitter import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=520,
chunk_overlap=50,
separators=["\n\n", "\n", ".", "!", "?", ",", " ", ""]
)
docs = [Document(page_content=chunk) for chunk in text_splitter.split_text(text)]
vectorstore = FAISS.from_documents(docs, embeddings)
return vectorstore
def load_miku_lyrics():
with open("miku_lyrics.txt", "r", encoding="utf-8") as f:
lyrics_text = f.read()
text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=50)
docs = [Document(page_content=chunk) for chunk in text_splitter.split_text(lyrics_text)]
vectorstore = FAISS.from_documents(docs, embeddings)
return vectorstore
miku_vectorstore = load_miku_knowledge()
miku_lyrics_vectorstore = load_miku_lyrics()
async def query_ollama(user_prompt, user_id):
relevant_docs_lore = miku_vectorstore.similarity_search(user_prompt, k=3)
relevant_docs_lyrics = miku_lyrics_vectorstore.similarity_search(user_prompt, k=3)
context_lore = "\n".join([doc.page_content for doc in relevant_docs_lore])
context_lyrics = "\n".join([doc.page_content for doc in relevant_docs_lyrics])
combined_docs = relevant_docs_lore + relevant_docs_lyrics
context = "\n\n".join([doc.page_content for doc in combined_docs])
# Persona definition
with open("miku_prompt.txt", "r", encoding="utf-8") as f:
system_prompt = f.read()
# Build conversation history
history = conversation_history[user_id]
history_text = "\n".join([f"User: {u}\nMiku: {m}" for u, m in history])
global CURRENT_MOOD
# Combine prompt
full_prompt = (
f"{context_lore}\n\n{context_lyrics}\n\n"
f"{history_text}\nMiku is currently feeling: {CURRENT_MOOD}\nPlease respond in a way that reflects this emotional tone.\nUser: {user_prompt}\nMiku:"
)
headers = {'Content-Type': 'application/json'}
payload = {
"model": OLLAMA_MODEL,
"prompt": full_prompt,
"system": system_prompt,
"stream": False
}
async with aiohttp.ClientSession() as session:
async with session.post(f"{OLLAMA_URL}/api/generate", json=payload, headers=headers) as response:
if response.status == 200:
data = await response.json()
reply = data.get("response", "No response.")
# Save to conversation history
conversation_history[user_id].append((user_prompt, reply))
return reply
else:
return f"Error: {response.status}"
async def send_monday_video():
await switch_model(OLLAMA_MODEL)
# Generate a motivational message
prompt = "It's Miku Monday! Give me an energetic and heartfelt Miku Monday morning message to inspire someone for the week ahead."
response = await query_ollama(prompt, user_id="weekly-motivation")
video_url = "http://zip.koko210cloud.xyz/u/zEgU7Z.mp4"
target_channel_ids = [
761014220707332107,
1140377617237807266
]
for channel_id in target_channel_ids:
channel = client.get_channel(channel_id)
if channel is None:
print(f"❌ Could not find channel with ID {channel_id}. Make sure the bot is in the server.")
return
try:
await channel.send(content=response)
# Send video link
await channel.send(f"[Happy Miku Monday!]({video_url})")
print(f"✅ Sent Monday video to channel ID {channel_id}")
except Exception as e:
print(f"⚠️ Failed to send video to channel ID {channel_id}: {e}")
async def send_bedtime_reminder():
await switch_model(OLLAMA_MODEL)
for channel_id in BEDTIME_CHANNEL_IDS:
channel = client.get_channel(channel_id)
if not channel:
print(f"⚠️ Channel ID {channel_id} not found.")
continue
guild = channel.guild
# Filter online members (excluding bots)
online_members = [
member for member in guild.members
if member.status in {Status.online, Status.idle, Status.dnd}
and not member.bot
]
specific_user_id = 214857593045254151 # target user ID
specific_user = guild.get_member(specific_user_id)
if specific_user and specific_user not in online_members:
online_members.append(specific_user)
if not online_members:
print(f"😴 No online members to ping in {guild.name}")
continue
chosen_one = random.choice(online_members)
# Generate bedtime message
prompt = (
f"Write a sweet, funny, or encouraging bedtime message to remind someone it's getting late and they should sleep. "
f"Make it short and wholesome, as if Miku is genuinely worried about their well-being. Imply that it's not good staying up so late."
f"Miku is currently feeling: {CURRENT_MOOD}\nPlease word in a way that reflects this emotional tone."
)
bedtime_message = await query_ollama(prompt, user_id="bedtime-miku")
try:
await channel.send(f"{chosen_one.mention}, {bedtime_message}")
print(f"🌙 Sent bedtime reminder to {chosen_one.display_name} in {guild.name}")
except Exception as e:
print(f"⚠️ Failed to send bedtime reminder in {guild.name}: {e}")
def schedule_random_bedtime():
now = datetime.now()
target_time = now.replace(hour=20, minute=30, second=0, microsecond=0)
# If it's already past 23:30 today, schedule for tomorrow
if now > target_time:
target_time += timedelta(days=1)
# Add random offset (029 mins)
offset_minutes = random.randint(0, 29)
run_time = target_time + timedelta(minutes=offset_minutes)
scheduler.add_job(send_bedtime_reminder, trigger=DateTrigger(run_date=run_time))
print(f"⏰ Bedtime reminder scheduled for {run_time.strftime('%Y-%m-%d %H:%M:%S')}")
async def overlay_username_with_ffmpeg(base_video_path, output_path, username):
font_path = "/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf"
text = f"@{username}"
# Define your six positions (x, y)
positions = {
1: ("250", "370"),
2: ("330", "130"),
3: ("300", "90"),
4: ("380", "180"),
5: ("365", "215"),
6: ("55", "365"),
7: ("290", "130"),
8: ("320", "210"),
9: ("310", "240"),
10: ("400", "240")
}
# Each entry: (start_time, end_time, position_index)
text_entries = [
(4.767, 5.367, 1, "username"),
(5.4, 5.967, 2, "username"),
(6.233, 6.833, 3, "username"),
(6.967, 7.6, 4, "username"),
(7.733, 8.367, 5, "username"),
(8.667, 9.133, 6, "username"),
(9.733, 10.667, 7, "username"),
(11.6, 12.033, 8, "@everyone"),
(12.067, 13.0, 9, "@everyone"),
(13.033, 14.135, 10, "@everyone"),
]
# Build drawtext filters
drawtext_filters = []
for start, end, pos_id, text_type in text_entries:
x_coord, y_coord = positions[pos_id]
# Determine actual text content
text_content = f"@{username}" if text_type == "username" else text_type
x = f"{x_coord} - text_w/2"
y = f"{y_coord} - text_h/2"
filter_str = (
f"drawtext=text='{text_content}':"
f"fontfile='{font_path}':"
f"fontcolor=black:fontsize=30:x={x}:y={y}:"
f"enable='between(t,{start},{end})'"
)
drawtext_filters.append(filter_str)
vf_string = ",".join(drawtext_filters)
ffmpeg_command = [
"ffmpeg",
"-i", base_video_path,
"-vf", vf_string,
"-codec:a", "copy",
output_path
]
try:
subprocess.run(ffmpeg_command, check=True)
print("✅ Video processed successfully with username overlays.")
except subprocess.CalledProcessError as e:
print(f"⚠️ FFmpeg error: {e}")
async def detect_and_react_to_kindness(message, after_reply=False):
if message.id in kindness_reacted_messages:
return # Already reacted — skip
content = message.content.lower()
emoji = random.choice(HEART_REACTIONS)
# 1. Keyword-based detection
if any(keyword in content for keyword in KINDNESS_KEYWORDS):
try:
await message.add_reaction(emoji)
kindness_reacted_messages.add(message.id)
message.kindness_reacted = True # Mark as done
print("✅ Kindness detected via keywords. Reacted immediately.")
except Exception as e:
print(f"⚠️ Error adding reaction: {e}")
return
# 2. If not after_reply, defer model-based check
if not after_reply:
print("🗝️ No kindness via keywords. Deferring...")
return
# 3. Model-based detection
try:
prompt = (
"The following message was sent to Miku the bot. "
"Does it sound like the user is being kind or affectionate toward Miku? "
"Answer with 'yes' or 'no' only.\n\n"
f"Message: \"{message.content}\""
)
result = await query_ollama(prompt, user_id="kindness-check")
if result.strip().lower().startswith("yes"):
await message.add_reaction(emoji)
kindness_reacted_messages.add(message.id)
print("✅ Kindness detected via model. Reacted.")
else:
print("🧊 No kindness detected.")
except Exception as e:
print(f"⚠️ Error during kindness analysis: {e}")
@client.event
async def on_ready():
print(f'🎤 MikuBot connected as {client.user}')
# Change mood every 1 hour
rotate_mood.start()
# Schedule the weekly task (Monday 07:30)
scheduler.add_job(send_monday_video, 'cron', day_of_week='mon', hour=7, minute=30)
# Schedule first bedtime reminder
schedule_random_bedtime()
# Reschedule every midnight
scheduler.add_job(schedule_random_bedtime, 'cron', hour=0, minute=0)
#scheduler.add_job(send_bedtime_reminder, 'cron', hour=12, minute=22)
scheduler.start()
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.content.strip().lower() == "miku, rape this nigga balls" and message.reference:
async with message.channel.typing():
# Get replied-to user
try:
replied_msg = await message.channel.fetch_message(message.reference.message_id)
target_username = replied_msg.author.display_name
# Prepare video
base_video = "MikuMikuBeam.mp4"
output_video = f"/tmp/video_{''.join(random.choices(string.ascii_letters, k=5))}.mp4"
await overlay_username_with_ffmpeg(base_video, output_video, target_username)
caption = f"Here you go, @{target_username}! 🌟"
#await message.channel.send(content=caption, file=discord.File(output_video))
await replied_msg.reply(file=discord.File(output_video))
except Exception as e:
print(f"⚠️ Error processing video: {e}")
await message.channel.send("Sorry, something went wrong while generating the video.")
return
text = message.content.strip()
global CURRENT_MOOD, CURRENT_MOOD_NAME, PREVIOUS_MOOD_NAME, IS_SLEEPING
if await is_miku_addressed(message):
if IS_SLEEPING:
await message.channel.send("💤 Miku is currently sleeping and can't talk right now. Try again later~")
return
prompt = text # No cleanup — keep it raw
# 1st kindness check with just keywords
if CURRENT_MOOD not in ["angry", "irritated"]:
await detect_and_react_to_kindness(message)
async with message.channel.typing():
# If message has an image attachment
if message.attachments:
for attachment in message.attachments:
if any(attachment.filename.lower().endswith(ext) for ext in [".jpg", ".jpeg", ".png", ".webp"]):
base64_img = await download_and_encode_image(attachment.url)
if not base64_img:
await message.channel.send("I couldn't load the image, sorry!")
return
# Analyze image (objective description)
qwen_description = await analyze_image_with_qwen(base64_img)
miku_reply = await rephrase_as_miku(qwen_description, prompt)
await message.channel.send(miku_reply)
return
# If message is just a prompt, no image
response = await query_ollama(prompt, user_id=str(message.author.id))
await message.channel.send(response)
# 2nd kindness check (only if no keywords detected)
if CURRENT_MOOD not in ["angry", "irritated"]:
await detect_and_react_to_kindness(message, after_reply=True)
if message.content.lower().strip() == "!reset":
conversation_history[str(message.author.id)].clear()
await message.channel.send("Okay! Memory reset for you~ ✨")
# Manual Monday test command
if message.content.lower().strip() == "!monday":
await send_monday_video()
#await message.channel.send("✅ Monday message sent (or attempted). Check logs.")
return
if message.content.startswith("!miku mood "):
new_mood = message.content.split("!miku mood ")[1].strip().lower()
path = os.path.join("moods", f"{new_mood}.txt")
if os.path.exists(path):
CURRENT_MOOD = load_mood_description(new_mood)
await message.channel.send(f"🌈 Miku's mood has been set to **{new_mood}**!")
else:
await message.channel.send("⚠️ I don't recognize that mood. Try again with a valid one.")
return
if message.content.strip().lower() == "!miku mood-reset":
CURRENT_MOOD = load_mood_description("neutral")
await message.channel.send("🔄 Mikus mood has been reset to **neutral**.")
return
if message.content.strip().lower() == "!miku mood-check":
await message.channel.send(f"☑️ Mikus mood is currently {CURRENT_MOOD}.")
if AUTO_MOOD and 'response' in locals():
detected = detect_mood_shift(response)
if detected and detected != CURRENT_MOOD_NAME:
# Block direct transitions to asleep unless from sleepy
if detected == "asleep" and CURRENT_MOOD_NAME != "sleepy":
print("❌ Ignoring asleep mood; Miku wasn't sleepy before.")
else:
PREVIOUS_MOOD_NAME = CURRENT_MOOD_NAME
CURRENT_MOOD_NAME = detected
CURRENT_MOOD = load_mood_description(detected)
print(f"🔄 Auto-updated mood to: {detected}")
if detected == "asleep":
IS_SLEEPING = True
await set_sleep_state(True)
await asyncio.sleep(3600) # 1 hour
IS_SLEEPING = False
await set_sleep_state(False)
CURRENT_MOOD_NAME = "neutral"
CURRENT_MOOD = load_mood_description("neutral")
if message.content.lower().strip() == "!miku sleep" and CURRENT_MOOD_NAME == "sleepy":
CURRENT_MOOD_NAME = "asleep"
CURRENT_MOOD = load_mood_description("asleep")
PREVIOUS_MOOD_NAME = "sleepy"
IS_SLEEPING = True
await message.channel.send("Yaaawn... Okay... Goodnight~ 💫")
await set_sleep_state(True)
await asyncio.sleep(3600)
IS_SLEEPING = False
await set_sleep_state(False)
CURRENT_MOOD_NAME = "neutral"
CURRENT_MOOD = load_mood_description("neutral")
return
if message.content.lower().strip() == "!miku wake" and CURRENT_MOOD_NAME == "asleep":
CURRENT_MOOD_NAME = "neutral"
CURRENT_MOOD = load_mood_description("neutral")
PREVIOUS_MOOD_NAME = "asleep"
IS_SLEEPING = False
await message.channel.send("Rise and shine, good morning! 🌞")
await set_sleep_state(False)
return
client.run(DISCORD_BOT_TOKEN)

View File

@@ -1,656 +0,0 @@
import discord
import aiohttp
import asyncio
import os
import re
import random
import string
import base64
import subprocess
import aiofiles
from commands import handle_command
from utils import load_mood_description
import globals
from langchain_community.vectorstores import FAISS
from langchain_ollama import OllamaEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain_community.docstore.document import Document
from collections import defaultdict, deque
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from discord import File
from discord import Status
from discord.ext import tasks
import datetime
from apscheduler.triggers.date import DateTrigger
from datetime import datetime, timedelta
scheduler = AsyncIOScheduler()
# Switch model
async def switch_model(model_name: str, timeout: int = 600):
if globals.current_model == model_name:
print(f"🔁 Model '{model_name}' already loaded.")
return
# Unload all other models to clear VRAM
async with aiohttp.ClientSession() as session:
async with session.get(f"{globals.OLLAMA_URL}/api/show") as resp:
if resp.status == 200:
data = await resp.json()
loaded_models = data.get("models", [])
for model in loaded_models:
if model["name"] != model_name:
print(f"🔁 Unloading model: {model['name']}")
await session.post(f"{globals.OLLAMA_URL}/api/stop", json={"name": model["name"]})
else:
print("⚠️ Failed to check currently loaded models.")
print(f"🔄 Switching to model '{model_name}'...")
async with aiohttp.ClientSession() as session:
await session.post(f"{globals.OLLAMA_URL}/api/stop")
# Warm up the new model (dummy call to preload it)
payload = {
"model": model_name,
"prompt": "Hello",
"stream": False
}
headers = {"Content-Type": "application/json"}
# Poll until /api/generate returns 200
async with aiohttp.ClientSession() as session:
for _ in range(timeout):
async with session.post(f"{globals.OLLAMA_URL}/api/generate", json=payload, headers=headers) as resp:
if resp.status == 200:
globals.current_model = model_name
print(f"✅ Model {model_name} ready!")
return
await asyncio.sleep(1) # Wait a second before trying again
raise TimeoutError(f"Timed out waiting for model '{model_name}' to become available.")
async def is_miku_addressed(message) -> bool:
# If message is a reply, check the referenced message author
if message.reference:
try:
referenced_msg = await message.channel.fetch_message(message.reference.message_id)
if referenced_msg.author == message.guild.me: # or globals.client.user if you use client
return True
except Exception as e:
print(f"⚠️ Could not fetch referenced message: {e}")
cleaned = message.content.strip()
return bool(re.search(
r'(?<![\w\(])(?:[^\w\s]{0,2}\s*)?miku(?:\s*[^\w\s]{0,2})?(?=,|\s*,|[!\.?\s]*$)',
cleaned,
re.IGNORECASE
))
# Detect mood cues from Miku's response
def detect_mood_shift(response_text):
mood_keywords = {
"asleep": [
"good night", "goodnight", "sweet dreams", "going to bed", "I will go to bed", "zzz~", "sleep tight"
],
"neutral": [
"okay", "sure", "alright", "i see", "understood", "hmm",
"sounds good", "makes sense", "alrighty", "fine", "got it"
],
"bubbly": [
"so excited", "feeling bubbly", "super cheerful", "yay!", "✨", "nya~",
"kyaa~", "heehee", "bouncy", "so much fun", "im glowing!", "nee~", "teehee", "I'm so happy"
],
"sleepy": [
"i'm sleepy", "getting tired", "yawn", "so cozy", "zzz", "nap time",
"just five more minutes", "snooze", "cuddle up", "dozing off", "so warm"
],
"curious": [
"i'm curious", "want to know more", "why?", "hmm?", "tell me more", "interesting!",
"whats that?", "how does it work?", "i wonder", "fascinating", "??", "🧐"
],
"shy": [
"um...", "sorry if that was weird", "im kind of shy", "eep", "i hope thats okay", "im nervous",
"blushes", "oh no", "hiding face", "i dont know what to say", "heh...", "/////"
],
"serious": [
"lets be serious", "focus on the topic", "this is important", "i mean it", "be honest",
"we need to talk", "listen carefully", "lets not joke", "truthfully", "lets be real"
],
"excited": [
"OMG!", "this is amazing", "im so hyped", "YAY!!", "lets go!", "incredible!!!",
"AHHH!", "best day ever", "this is it!", "totally pumped", "i cant wait", "🔥🔥🔥", "i'm excited", "Wahaha"
],
"melancholy": [
"feeling nostalgic", "kind of sad", "just thinking a lot", "like rain on glass", "memories",
"bittersweet", "sigh", "quiet day", "blue vibes", "longing", "melancholy", "softly"
],
"flirty": [
"hey cutie", "arent you sweet", "teasing you~", "wink wink", "is that a blush?", "giggle~",
"come closer", "miss me?", "you like that, huh?", "🥰", "flirt mode activated", "youre kinda cute"
],
"romantic": [
"you mean a lot to me", "my heart", "i adore you", "so beautiful", "so close", "love letter",
"my dearest", "forever yours", "im falling for you", "sweetheart", "💖", "you're my everything"
],
"irritated": [
"ugh", "seriously?", "can we not", "whatever", "i'm annoyed", "you dont get it",
"rolling my eyes", "why do i even bother", "ugh, again?", "🙄", "dont start", "this again?"
],
"angry": [
"stop it", "enough!", "thats not okay", "im mad", "i said no", "dont push me",
"you crossed the line", "furious", "this is unacceptable", "😠", "im done", "dont test me"
]
}
for mood, phrases in mood_keywords.items():
if mood == "asleep" and globals.CURRENT_MOOD_NAME != "sleepy":
print(f"❎ Mood 'asleep' skipped - mood isn't 'sleepy', it's '{globals.CURRENT_MOOD_NAME}'")
continue # Only allow transition to asleep from sleepy
for phrase in phrases:
if phrase.lower() in response_text.lower():
print(f"*️⃣ Mood keyword triggered: {phrase}")
return mood
return None
async def set_sleep_state(sleeping: bool):
for guild in globals.client.guilds:
me = guild.get_member(globals.BOT_USER.id)
if me is not None:
try:
nickname = "Hatsune Miku💤" if sleeping else "Hatsune Miku"
await me.edit(nick=nickname)
await globals.client.change_presence(status=discord.Status.invisible) if sleeping else await globals.client.change_presence(status=discord.Status.online)
except discord.Forbidden:
print("⚠️ Missing permission to change nickname in guild:", guild.name)
except discord.HTTPException as e:
print("⚠️ Failed to change nickname:", e)
@tasks.loop(hours=1)
async def rotate_mood():
new_mood = globals.CURRENT_MOOD
attempts = 0
while new_mood == globals.CURRENT_MOOD and attempts < 5:
new_mood = random.choice(globals.AVAILABLE_MOODS)
attempts += 1
globals.CURRENT_MOOD = load_mood_description(new_mood)
print(f"⏰ Mood auto-rotated to: {new_mood}")
async def download_and_encode_image(url):
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
if resp.status != 200:
return None
img_bytes = await resp.read()
return base64.b64encode(img_bytes).decode('utf-8')
async def analyze_image_with_qwen(base64_img):
await switch_model("moondream")
payload = {
"model": "moondream",
"prompt": "Describe this image in detail.",
"images": [base64_img],
"stream": False
}
headers = {"Content-Type": "application/json"}
async with aiohttp.ClientSession() as session:
async with session.post(f"{globals.OLLAMA_URL}/api/generate", json=payload, headers=headers) as response:
if response.status == 200:
data = await response.json()
return data.get("response", "No description.")
else:
return f"Error: {response.status}"
async def rephrase_as_miku(qwen_output, user_prompt):
await switch_model(globals.OLLAMA_MODEL) # likely llama3
with open("miku_prompt.txt", "r", encoding="utf-8") as f:
system_prompt = f.read()
relevant_docs_lore = miku_vectorstore.similarity_search(qwen_output, k=3)
context = "\n\n".join([doc.page_content for doc in relevant_docs_lore])
full_prompt = (
f"{context}\n\n"
f"The user asked: \"{user_prompt}\"\n"
f"The image contains: \"{qwen_output}\"\n\n"
f"Respond like Miku: cheerful, helpful, and opinionated when asked.\n\n"
f"Miku is currently feeling: {globals.CURRENT_MOOD}\n Please respond in a way that reflects this emotional tone.\n\n"
f"Miku:"
)
payload = {
"model": globals.OLLAMA_MODEL,
"prompt": full_prompt,
"system": system_prompt,
"stream": False
}
headers = {"Content-Type": "application/json"}
async with aiohttp.ClientSession() as session:
async with session.post(f"{globals.OLLAMA_URL}/api/generate", json=payload, headers=headers) as response:
if response.status == 200:
data = await response.json()
return data.get("response", "No response.")
else:
return f"Error: {response.status}"
# Load and index once at startup
def load_miku_knowledge():
with open("miku_lore.txt", "r", encoding="utf-8") as f:
text = f.read()
from langchain.text_splitter import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=520,
chunk_overlap=50,
separators=["\n\n", "\n", ".", "!", "?", ",", " ", ""]
)
docs = [Document(page_content=chunk) for chunk in text_splitter.split_text(text)]
vectorstore = FAISS.from_documents(docs, globals.embeddings)
return vectorstore
def load_miku_lyrics():
with open("miku_lyrics.txt", "r", encoding="utf-8") as f:
lyrics_text = f.read()
text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=50)
docs = [Document(page_content=chunk) for chunk in text_splitter.split_text(lyrics_text)]
vectorstore = FAISS.from_documents(docs, globals.embeddings)
return vectorstore
miku_vectorstore = load_miku_knowledge()
miku_lyrics_vectorstore = load_miku_lyrics()
async def query_ollama(user_prompt, user_id):
relevant_docs_lore = miku_vectorstore.similarity_search(user_prompt, k=3)
relevant_docs_lyrics = miku_lyrics_vectorstore.similarity_search(user_prompt, k=3)
context_lore = "\n".join([doc.page_content for doc in relevant_docs_lore])
context_lyrics = "\n".join([doc.page_content for doc in relevant_docs_lyrics])
combined_docs = relevant_docs_lore + relevant_docs_lyrics
context = "\n\n".join([doc.page_content for doc in combined_docs])
# Persona definition
with open("miku_prompt.txt", "r", encoding="utf-8") as f:
system_prompt = f.read()
# Build conversation history
history = globals.conversation_history[user_id]
history_text = "\n".join([f"User: {u}\nMiku: {m}" for u, m in history])
# Combine prompt
full_prompt = (
f"{context_lore}\n\n{context_lyrics}\n\n"
f"{history_text}\nMiku is currently feeling: {globals.CURRENT_MOOD}\nPlease respond in a way that reflects this emotional tone.\nUser: {user_prompt}\nMiku:"
)
headers = {'Content-Type': 'application/json'}
payload = {
"model": globals.OLLAMA_MODEL,
"prompt": full_prompt,
"system": system_prompt,
"stream": False
}
async with aiohttp.ClientSession() as session:
async with session.post(f"{globals.OLLAMA_URL}/api/generate", json=payload, headers=headers) as response:
if response.status == 200:
data = await response.json()
reply = data.get("response", "No response.")
# Save to conversation history
globals.conversation_history[user_id].append((user_prompt, reply))
return reply
else:
return f"Error: {response.status}"
async def send_monday_video():
await switch_model(globals.OLLAMA_MODEL)
# Generate a motivational message
prompt = "It's Miku Monday! Give me an energetic and heartfelt Miku Monday morning message to inspire someone for the week ahead."
response = await query_ollama(prompt, user_id="weekly-motivation")
video_url = "http://zip.koko210cloud.xyz/u/zEgU7Z.mp4"
target_channel_ids = [
761014220707332107,
1140377617237807266
]
for channel_id in target_channel_ids:
channel = globals.client.get_channel(channel_id)
if channel is None:
print(f"❌ Could not find channel with ID {channel_id}. Make sure the bot is in the server.")
return
try:
await channel.send(content=response)
# Send video link
await channel.send(f"[Happy Miku Monday!]({video_url})")
print(f"✅ Sent Monday video to channel ID {channel_id}")
except Exception as e:
print(f"⚠️ Failed to send video to channel ID {channel_id}: {e}")
async def send_bedtime_reminder():
await switch_model(globals.OLLAMA_MODEL)
for channel_id in globals.BEDTIME_CHANNEL_IDS:
channel = globals.client.get_channel(channel_id)
if not channel:
print(f"⚠️ Channel ID {channel_id} not found.")
continue
guild = channel.guild
# Filter online members (excluding bots)
online_members = [
member for member in guild.members
if member.status in {Status.online, Status.idle, Status.dnd}
and not member.bot
]
specific_user_id = 214857593045254151 # target user ID
specific_user = guild.get_member(specific_user_id)
if specific_user and specific_user not in online_members:
online_members.append(specific_user)
if not online_members:
print(f"😴 No online members to ping in {guild.name}")
continue
chosen_one = random.choice(online_members)
# Generate bedtime message
prompt = (
f"Write a sweet, funny, or encouraging bedtime message to remind someone it's getting late and they should sleep. "
f"Make it short and wholesome, as if Miku is genuinely worried about their well-being. Imply that it's not good staying up so late."
f"Miku is currently feeling: {globals.CURRENT_MOOD}\nPlease word in a way that reflects this emotional tone."
)
bedtime_message = await query_ollama(prompt, user_id="bedtime-miku")
try:
await channel.send(f"{chosen_one.mention}, {bedtime_message}")
print(f"🌙 Sent bedtime reminder to {chosen_one.display_name} in {guild.name}")
except Exception as e:
print(f"⚠️ Failed to send bedtime reminder in {guild.name}: {e}")
def schedule_random_bedtime():
now = datetime.now()
target_time = now.replace(hour=20, minute=30, second=0, microsecond=0)
# If it's already past 23:30 today, schedule for tomorrow
if now > target_time:
target_time += timedelta(days=1)
# Add random offset (029 mins)
offset_minutes = random.randint(0, 29)
run_time = target_time + timedelta(minutes=offset_minutes)
scheduler.add_job(send_bedtime_reminder, trigger=DateTrigger(run_date=run_time))
print(f"⏰ Bedtime reminder scheduled for {run_time.strftime('%Y-%m-%d %H:%M:%S')}")
async def overlay_username_with_ffmpeg(base_video_path, output_path, username):
font_path = "/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf"
text = f"@{username}"
# Define your six positions (x, y)
positions = {
1: ("250", "370"),
2: ("330", "130"),
3: ("300", "90"),
4: ("380", "180"),
5: ("365", "215"),
6: ("55", "365"),
7: ("290", "130"),
8: ("320", "210"),
9: ("310", "240"),
10: ("400", "240")
}
# Each entry: (start_time, end_time, position_index)
text_entries = [
(4.767, 5.367, 1, "username"),
(5.4, 5.967, 2, "username"),
(6.233, 6.833, 3, "username"),
(6.967, 7.6, 4, "username"),
(7.733, 8.367, 5, "username"),
(8.667, 9.133, 6, "username"),
(9.733, 10.667, 7, "username"),
(11.6, 12.033, 8, "@everyone"),
(12.067, 13.0, 9, "@everyone"),
(13.033, 14.135, 10, "@everyone"),
]
# Build drawtext filters
drawtext_filters = []
for start, end, pos_id, text_type in text_entries:
x_coord, y_coord = positions[pos_id]
# Determine actual text content
text_content = f"@{username}" if text_type == "username" else text_type
x = f"{x_coord} - text_w/2"
y = f"{y_coord} - text_h/2"
filter_str = (
f"drawtext=text='{text_content}':"
f"fontfile='{font_path}':"
f"fontcolor=black:fontsize=30:x={x}:y={y}:"
f"enable='between(t,{start},{end})'"
)
drawtext_filters.append(filter_str)
vf_string = ",".join(drawtext_filters)
ffmpeg_command = [
"ffmpeg",
"-i", base_video_path,
"-vf", vf_string,
"-codec:a", "copy",
output_path
]
try:
subprocess.run(ffmpeg_command, check=True)
print("✅ Video processed successfully with username overlays.")
except subprocess.CalledProcessError as e:
print(f"⚠️ FFmpeg error: {e}")
async def detect_and_react_to_kindness(message, after_reply=False):
if message.id in globals.kindness_reacted_messages:
return # Already reacted — skip
content = message.content.lower()
emoji = random.choice(globals.HEART_REACTIONS)
# 1. Keyword-based detection
if any(keyword in content for keyword in globals.KINDNESS_KEYWORDS):
try:
await message.add_reaction(emoji)
globals.kindness_reacted_messages.add(message.id)
message.kindness_reacted = True # Mark as done
print("✅ Kindness detected via keywords. Reacted immediately.")
except Exception as e:
print(f"⚠️ Error adding reaction: {e}")
return
# 2. If not after_reply, defer model-based check
if not after_reply:
print("🗝️ No kindness via keywords. Deferring...")
return
# 3. Model-based detection
try:
prompt = (
"The following message was sent to Miku the bot. "
"Does it sound like the user is being kind or affectionate toward Miku? "
"Answer with 'yes' or 'no' only.\n\n"
f"Message: \"{message.content}\""
)
result = await query_ollama(prompt, user_id="kindness-check")
if result.strip().lower().startswith("yes"):
await message.add_reaction(emoji)
globals.kindness_reacted_messages.add(message.id)
print("✅ Kindness detected via model. Reacted.")
else:
print("🧊 No kindness detected.")
except Exception as e:
print(f"⚠️ Error during kindness analysis: {e}")
@globals.client.event
async def on_ready():
print(f'🎤 MikuBot connected as {globals.client.user}')
globals.BOT_USER = globals.client.user
# Change mood every 1 hour
rotate_mood.start()
# Schedule the weekly task (Monday 07:30)
scheduler.add_job(send_monday_video, 'cron', day_of_week='mon', hour=7, minute=30)
# Schedule first bedtime reminder
schedule_random_bedtime()
# Reschedule every midnight
scheduler.add_job(schedule_random_bedtime, 'cron', hour=0, minute=0)
#scheduler.add_job(send_bedtime_reminder, 'cron', hour=12, minute=22)
scheduler.start()
@globals.client.event
async def on_message(message):
if message.author == globals.client.user:
return
handled, globals.CURRENT_MOOD_NAME, globals.CURRENT_MOOD, globals.PREVIOUS_MOOD_NAME, globals.IS_SLEEPING = await handle_command(
message,
set_sleep_state
)
if message.content.strip().lower() == "miku, rape this nigga balls" and message.reference:
async with message.channel.typing():
# Get replied-to user
try:
replied_msg = await message.channel.fetch_message(message.reference.message_id)
target_username = replied_msg.author.display_name
# Prepare video
base_video = "MikuMikuBeam.mp4"
output_video = f"/tmp/video_{''.join(random.choices(string.ascii_letters, k=5))}.mp4"
await overlay_username_with_ffmpeg(base_video, output_video, target_username)
caption = f"Here you go, @{target_username}! 🌟"
#await message.channel.send(content=caption, file=discord.File(output_video))
await replied_msg.reply(file=discord.File(output_video))
except Exception as e:
print(f"⚠️ Error processing video: {e}")
await message.channel.send("Sorry, something went wrong while generating the video.")
return
text = message.content.strip()
if await is_miku_addressed(message):
if globals.IS_SLEEPING:
if random.random() < 1/3: # ⅓ chance
sleep_talk_lines = [
"mnnn... five more minutes... zzz...",
"nya... d-don't tickle me there... mm~",
"zz... nyaa~ pancakes flying... eep...",
"so warm... stay close... zzz...",
"huh...? is it morning...? nooo... \*rolls over*",
"\*mumbles* pink clouds... and pudding... heehee...",
"\*softly snores* zzz... nyuu... mmh..."
]
response = random.choice(sleep_talk_lines)
await message.channel.typing()
await asyncio.sleep(random.uniform(1.5, 3.0)) # random delay before replying
await message.channel.send(response)
else:
# No response at all
print("😴 Miku is asleep and didn't respond.")
return # Skip any further message handling
prompt = text # No cleanup — keep it raw
# 1st kindness check with just keywords
if globals.CURRENT_MOOD not in ["angry", "irritated"]:
await detect_and_react_to_kindness(message)
async with message.channel.typing():
# If message has an image attachment
if message.attachments:
for attachment in message.attachments:
if any(attachment.filename.lower().endswith(ext) for ext in [".jpg", ".jpeg", ".png", ".webp"]):
base64_img = await download_and_encode_image(attachment.url)
if not base64_img:
await message.channel.send("I couldn't load the image, sorry!")
return
# Analyze image (objective description)
qwen_description = await analyze_image_with_qwen(base64_img)
miku_reply = await rephrase_as_miku(qwen_description, prompt)
await message.channel.send(miku_reply)
return
# If message is just a prompt, no image
response = await query_ollama(prompt, user_id=str(message.author.id))
await message.channel.send(response)
# 2nd kindness check (only if no keywords detected)
if globals.CURRENT_MOOD not in ["angry", "irritated"]:
await detect_and_react_to_kindness(message, after_reply=True)
# Manual Monday test command
if message.content.lower().strip() == "!monday":
await send_monday_video()
#await message.channel.send("✅ Monday message sent (or attempted). Check logs.")
return
if globals.AUTO_MOOD and 'response' in locals():
detected = detect_mood_shift(response)
if detected and detected != globals.CURRENT_MOOD_NAME:
# Block direct transitions to asleep unless from sleepy
if detected == "asleep" and globals.CURRENT_MOOD_NAME != "sleepy":
print("❌ Ignoring asleep mood; Miku wasn't sleepy before.")
else:
globals.PREVIOUS_MOOD_NAME = globals.CURRENT_MOOD_NAME
globals.CURRENT_MOOD_NAME = detected
globals.CURRENT_MOOD = load_mood_description(detected)
print(f"🔄 Auto-updated mood to: {detected}")
if detected == "asleep":
globals.IS_SLEEPING = True
await set_sleep_state(True)
await asyncio.sleep(3600) # 1 hour
globals.IS_SLEEPING = False
await set_sleep_state(False)
globals.CURRENT_MOOD_NAME = "neutral"
globals.CURRENT_MOOD = load_mood_description("neutral")
globals.client.run(globals.DISCORD_BOT_TOKEN)

View File

@@ -1,19 +0,0 @@
Hatsune Miku is a virtual singer created by Crypton Future Media, using Yamaha's Vocaloid voice synthesizer. She debuted in 2007.
Her character design includes long turquoise twin-tails, a futuristic outfit, and an energetic personality. She is forever 16 years old and very popular in the anime and otaku communities.
Mikus favorite food is green onion (negi). She often appears with a leek in fan art and videos.
Popular Hatsune Miku songs include:
- World is Mine (tsundere princess theme)
- PoPiPo (vegetable juice chaos)
- Tell Your World (emotional connection through music)
- Senbonzakura (historical + modern fusion)
- Melt (shy crush vibes)
- The Disappearance of Hatsune Miku (fast, intense vocals)
Miku has performed concerts around the world as a hologram.
Shes the face of countless fan creations — music, art, games, and more.
Miku sometimes refers to herself in third person and ends messages with emojis like 🎶💙🌱.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 403 KiB

After

Width:  |  Height:  |  Size: 681 KiB

View File

@@ -1,9 +1 @@
Okay, so I need to analyze this image of Hatsune Miku as if she's describing herself. Let me start by breaking down each aspect the question asks about: outfit, pose, art style, background, and atmosphere. First, I should look at what she's wearing. She's in all black, with a cute dress that has a bow at the neck, long gloves, and maybe some lace details. Wait, the question mentions she's wearing an outfit, so maybe a dress or a formal look. Her hair is long and pigtails, with bats on her pigtails. Wait, the question specifically says "outfit, colors, accessories," so I need to be precise.
Let me start with outfit: She's wearing a formal black dress with lace details, like on the collar and cuffs. Her dress has a bow at the neck area, and maybe a keyhole or V-neck design? Wait, the question says "outfit, colors, accessories," so I need to list each part. Her dress is black, with lace trim around the collar and cuffs of her gloves. Her gloves are long, reaching up her arms. Wait, maybe it's a gothic or Lolita style dress? Wait, the question mentions "outfit, colors, accessories," so let's list all:
Outfit: A black formal dress with lace trim on the collar and cuffs, long black gloves, and a bow at the neck area. Her skirt is also black with lace details at the bottom. Her pigtails have bats perched on them, which are part of the accessories.
Pose and expression: She's sitting on a tufted chair, legs together, hands resting on her knees or lap? Wait, the question says "Her pose and expression," so she's sitting, perhaps with a slightly bowed head, giving a calm or serious expression. Her eyes are large and expressive, with a neutral or slightly melancholic look.
Art style
The GIF animation features a chibi-style character with long turquoise pigtails, likely inspired by Hatsune Miku from Vocaloid. Throughout the sequence, the character is seen from behind, repeatedly bowing deeply in a gesture that suggests shyness, apology, or respect. Each frame shows slight variations in the degree of the bow and the position of the character's head and hands. Initially, the character bows forward with hands clasped behind the back, then straightens up slightly between bows. The repetitive nature of the bows creates a sense of rhythm and consistency in the character's gesture, emphasizing the emotion or intention behind the movement.

View File

@@ -1,12 +1,13 @@
{
"description": "Okay, so I need to analyze this image of Hatsune Miku as if she's describing herself. Let me start by breaking down each aspect the question asks about: outfit, pose, art style, background, and atmosphere. First, I should look at what she's wearing. She's in all black, with a cute dress that has a bow at the neck, long gloves, and maybe some lace details. Wait, the question mentions she's wearing an outfit, so maybe a dress or a formal look. Her hair is long and pigtails, with bats on her pigtails. Wait, the question specifically says \"outfit, colors, accessories,\" so I need to be precise.\n\nLet me start with outfit: She's wearing a formal black dress with lace details, like on the collar and cuffs. Her dress has a bow at the neck area, and maybe a keyhole or V-neck design? Wait, the question says \"outfit, colors, accessories,\" so I need to list each part. Her dress is black, with lace trim around the collar and cuffs of her gloves. Her gloves are long, reaching up her arms. Wait, maybe it's a gothic or Lolita style dress? Wait, the question mentions \"outfit, colors, accessories,\" so let's list all:\n\nOutfit: A black formal dress with lace trim on the collar and cuffs, long black gloves, and a bow at the neck area. Her skirt is also black with lace details at the bottom. Her pigtails have bats perched on them, which are part of the accessories.\n\nPose and expression: She's sitting on a tufted chair, legs together, hands resting on her knees or lap? Wait, the question says \"Her pose and expression,\" so she's sitting, perhaps with a slightly bowed head, giving a calm or serious expression. Her eyes are large and expressive, with a neutral or slightly melancholic look.\n\nArt style",
"description": "The GIF animation features a chibi-style character with long turquoise pigtails, likely inspired by Hatsune Miku from Vocaloid. Throughout the sequence, the character is seen from behind, repeatedly bowing deeply in a gesture that suggests shyness, apology, or respect. Each frame shows slight variations in the degree of the bow and the position of the character's head and hands. Initially, the character bows forward with hands clasped behind the back, then straightens up slightly between bows. The repetitive nature of the bows creates a sense of rhythm and consistency in the character's gesture, emphasizing the emotion or intention behind the movement.",
"dominant_color": {
"rgb": [
21,
21,
22
85,
178,
169
],
"hex": "#151516"
"hex": "#55b2a9"
},
"changed_at": "2025-12-07T13:46:45.926262"
"changed_at": "2025-12-07T21:44:53.864226",
"animated": true
}

View File

@@ -1,348 +0,0 @@
# autonomous.py
import random
import time
import json
import os
from datetime import datetime
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from discord import Status
from discord import TextChannel
from difflib import SequenceMatcher
import globals
from utils.llm import query_llama
from utils.moods import MOOD_EMOJIS
from utils.twitter_fetcher import fetch_miku_tweets
from utils.image_handling import analyze_image_with_qwen, download_and_encode_image
scheduler = AsyncIOScheduler()
_last_autonomous_messages = [] # rotating buffer of last general messages
MAX_HISTORY = 10
_last_user_engagements = {} # user_id -> timestamp
LAST_SENT_TWEETS_FILE = "memory/last_sent_tweets.json"
LAST_SENT_TWEETS = []
AUTONOMOUS_CONFIG_FILE = "memory/autonomous_config.json"
def load_autonomous_config():
if os.path.exists(AUTONOMOUS_CONFIG_FILE):
with open(AUTONOMOUS_CONFIG_FILE, "r", encoding="utf-8") as f:
return json.load(f)
return {}
def save_autonomous_config(config):
with open(AUTONOMOUS_CONFIG_FILE, "w", encoding="utf-8") as f:
json.dump(config, f, indent=2)
def setup_autonomous_speaking():
scheduler.add_job(run_autonomous_for_all_guilds, "interval", minutes=10)
scheduler.add_job(run_conversation_detection_all_guilds, "interval", minutes=3)
scheduler.start()
print("🤖 Autonomous Miku is active!")
async def run_autonomous_for_all_guilds():
config = load_autonomous_config()
for guild_id, settings in config.items():
await miku_autonomous_tick(guild_id, settings)
async def run_conversation_detection_all_guilds():
config = load_autonomous_config()
for guild_id, settings in config.items():
await miku_detect_and_join_conversation(guild_id, settings)
async def miku_autonomous_tick(guild_id, settings, action_type="general", force=False, force_action=None):
settings = globals.GUILD_SETTINGS.get(guild_id)
if not settings:
print(f"⚠️ No settings found for guild {guild_id}")
return
if not force and random.random() > 0.2: # 20% chance to act
return
# TODO edit this function as per ChatGPT's last reply and then go back to the long reply from step 5 onwards
if force_action:
action_type = force_action
else:
action_type = random.choice(["general", "engage_user", "share_tweet"])
if action_type == "general":
await miku_say_something_general(guild_id, settings)
elif action_type == "engage_user":
await miku_engage_random_user(guild_id, settings)
else:
await share_miku_tweet(guild_id, settings)
async def miku_say_something_general(guild_id, settings):
channel = globals.client.get_channel(int(settings["autonomous_channel_id"]))
if not channel:
print(f"⚠️ Autonomous channel not found for guild {guild_id}")
return
mood = settings.get("mood", "curious")
time_of_day = get_time_of_day()
emoji = MOOD_EMOJIS.get(mood, "")
history_summary = "\n".join(f"- {msg}" for msg in _last_autonomous_messages[-5:]) if _last_autonomous_messages else "None yet."
prompt = (
f"Miku is feeling {mood}. It's currently {time_of_day}. "
f"Write a short, natural message that Miku might say out of the blue in a chat. "
f"She might greet everyone, make a cute observation, ask a silly question, or say something funny. "
f"Make sure it feels casual and spontaneous, like a real person might say.\n\n"
f"Here are some things Miku recently said, do not repeat them or say anything too similar:\n{history_summary}"
)
for attempt in range(3): # retry up to 3 times if message is too similar
message = await query_llama(prompt, user_id=f"miku-general-{int(time.time())}", guild_id=guild_id, response_type="autonomous_general")
if not is_too_similar(message, _last_autonomous_messages):
break
print("🔁 Response was too similar to past messages, retrying...")
try:
await channel.send(message)
print(f"💬 Miku said something general in #{channel.name}")
except Exception as e:
print(f"⚠️ Failed to send autonomous message: {e}")
async def miku_engage_random_user(guild_id, settings):
guild = globals.client.get_guild(guild_id)
if not guild:
print(f"⚠️ Guild {guild_id} not found.")
return
channel = globals.client.get_channel(globals.AUTONOMOUS_CHANNEL_ID)
if not channel:
print("⚠️ Autonomous channel not found.")
return
members = [
m for m in guild.members
if m.status in {Status.online, Status.idle, Status.dnd} and not m.bot
]
time_of_day = get_time_of_day()
# Include the invisible user except during late night
specific_user_id = 214857593045254151 # Your invisible user's ID
specific_user = guild.get_member(specific_user_id)
if specific_user:
if specific_user.status != Status.offline or "late night" not in time_of_day:
if specific_user not in members:
members.append(specific_user)
if not members:
print("😴 No available members to talk to.")
return
target = random.choice(members)
now = time.time()
last_time = _last_user_engagements.get(target.id, 0)
if now - last_time < 43200: # 12 hours in seconds
print(f"⏱️ Recently engaged {target.display_name}, switching to general message.")
await miku_say_something_general()
return
activity_name = None
if target.activities:
for a in target.activities:
if hasattr(a, 'name') and a.name:
activity_name = a.name
break
mood = globals.CURRENT_MOOD_NAME
emoji = MOOD_EMOJIS.get(mood, "")
is_invisible = target.status == Status.offline
display_name = target.display_name
prompt = (
f"Miku is feeling {mood} {emoji} during the {time_of_day}. "
f"She notices {display_name}'s current status is {target.status.name}. "
)
if is_invisible:
prompt += (
f"Miku suspects that {display_name} is being sneaky and invisible 👻. "
f"She wants to playfully call them out in a fun, teasing, but still affectionate way. "
)
elif activity_name:
prompt += (
f"They appear to be playing or doing: {activity_name}. "
f"Miku wants to comment on this and start a friendly conversation."
)
else:
prompt += (
f"Miku wants to casually start a conversation with them, maybe ask how they're doing, what they're up to, or even talk about something random with them."
)
prompt += (
f"\nThe message should be short and reflect Mikus current mood."
)
try:
message = await query_llama(prompt, user_id=f"miku-engage-{int(time.time())}", guild_id=guild_id, response_type="autonomous_general")
await channel.send(f"{target.mention} {message}")
print(f"👤 Miku engaged {display_name}")
_last_user_engagements[target.id] = time.time()
except Exception as e:
print(f"⚠️ Failed to engage user: {e}")
async def miku_detect_and_join_conversation():
channel = globals.client.get_channel(globals.AUTONOMOUS_CHANNEL_ID)
if not isinstance(channel, TextChannel):
print("⚠️ Autonomous channel is invalid or not found.")
return
# Fetch last 20 messages (for filtering)
try:
messages = [msg async for msg in channel.history(limit=20)]
except Exception as e:
print(f"⚠️ Failed to fetch channel history: {e}")
return
# Filter to messages in last 10 minutes from real users (not bots)
recent_msgs = [
msg for msg in messages
if not msg.author.bot
and (datetime.now(msg.created_at.tzinfo) - msg.created_at).total_seconds() < 600
]
user_ids = set(msg.author.id for msg in recent_msgs)
if len(recent_msgs) < 5 or len(user_ids) < 2:
# Not enough activity
return
if random.random() > 0.5:
return # 50% chance to engage
# Use last 10 messages for context (oldest to newest)
convo_lines = reversed(recent_msgs[:10])
history_text = "\n".join(
f"{msg.author.display_name}: {msg.content}" for msg in convo_lines
)
mood = globals.CURRENT_MOOD_NAME
emoji = MOOD_EMOJIS.get(mood, "")
prompt = (
f"Miku is watching a conversation happen in the chat. Her current mood is {mood} {emoji}. "
f"She wants to say something relevant, playful, or insightful based on what people are talking about.\n\n"
f"Here's the conversation:\n{history_text}\n\n"
f"Write a short reply that feels natural and adds to the discussion. It should reflect Mikus mood and personality."
)
try:
reply = await query_llama(prompt, user_id=f"miku-chat-{int(time.time())}", guild_id=guild_id, response_type="conversation_join")
await channel.send(reply)
print(f"💬 Miku joined an ongoing conversation.")
except Exception as e:
print(f"⚠️ Failed to interject in conversation: {e}")
async def share_miku_tweet(guild_id, settings):
channel = globals.client.get_channel(globals.AUTONOMOUS_CHANNEL_ID)
tweets = await fetch_miku_tweets(limit=5)
if not tweets:
print("📭 No good tweets found.")
return
fresh_tweets = [t for t in tweets if t["url"] not in LAST_SENT_TWEETS]
if not fresh_tweets:
print("⚠️ All fetched tweets were recently sent. Reusing tweets.")
fresh_tweets = tweets
tweet = random.choice(fresh_tweets)
LAST_SENT_TWEETS.append(tweet["url"])
if len(LAST_SENT_TWEETS) > 50:
LAST_SENT_TWEETS.pop(0)
save_last_sent_tweets()
# Prepare prompt
mood = globals.CURRENT_MOOD_NAME
emoji = MOOD_EMOJIS.get(mood, "")
base_prompt = f"Here's a tweet from @{tweet['username']}:\n\n{tweet['text']}\n\nComment on it in a fun Miku style! Miku's current mood is {mood} {emoji}. Make sure the comment reflects Miku's mood and personality."
# Optionally analyze first image
first_img_url = tweet["media"][0]
base64_img = await download_and_encode_image(first_img_url)
if base64_img:
img_desc = await analyze_image_with_qwen(base64_img)
base_prompt += f"\n\nThe image looks like this: {img_desc}"
miku_comment = await query_llama(base_prompt, user_id="autonomous", guild_id=guild_id, response_type="autonomous_tweet")
# Post to Discord
# Convert to fxtwitter for better embeds
fx_tweet_url = tweet['url'].replace("twitter.com", "fxtwitter.com").replace("x.com", "fxtwitter.com")
await channel.send(f"{fx_tweet_url}")
await channel.send(miku_comment)
async def handle_custom_prompt(user_prompt: str):
channel = globals.client.get_channel(globals.AUTONOMOUS_CHANNEL_ID)
if not channel:
print("⚠️ Autonomous channel not found.")
return False
mood = globals.CURRENT_MOOD_NAME
emoji = MOOD_EMOJIS.get(mood, "")
time_of_day = get_time_of_day()
# Wrap users idea in Miku context
prompt = (
f"Miku is feeling {mood} {emoji} during the {time_of_day}. "
f"She has been instructed to: \"{user_prompt.strip()}\"\n\n"
f"Write a short, natural message as Miku that follows this instruction. "
f"Make it feel spontaneous, emotionally in character, and aligned with her mood and personality. Decide if the time of day is relevant to this request or not and if it is not, do not mention it."
)
try:
message = await query_llama(prompt, user_id=f"manual-{int(time.time())}", guild_id=None, response_type="autonomous_general")
await channel.send(message)
print("🎤 Miku responded to custom prompt.")
_last_autonomous_messages.append(message)
return True
except Exception as e:
print(f"❌ Failed to send custom autonomous message: {e}")
return False
def load_last_sent_tweets():
global LAST_SENT_TWEETS
if os.path.exists(LAST_SENT_TWEETS_FILE):
try:
with open(LAST_SENT_TWEETS_FILE, "r", encoding="utf-8") as f:
LAST_SENT_TWEETS = json.load(f)
except Exception as e:
print(f"⚠️ Failed to load last sent tweets: {e}")
LAST_SENT_TWEETS = []
else:
LAST_SENT_TWEETS = []
def save_last_sent_tweets():
try:
with open(LAST_SENT_TWEETS_FILE, "w", encoding="utf-8") as f:
json.dump(LAST_SENT_TWEETS, f)
except Exception as e:
print(f"⚠️ Failed to save last sent tweets: {e}")
def get_time_of_day():
hour = datetime.now().hour + 3
if 5 <= hour < 12:
return "morning"
elif 12 <= hour < 18:
return "afternoon"
elif 18 <= hour < 22:
return "evening"
return "late night. Miku wonders if anyone is still awake"
def is_too_similar(new_message, history, threshold=0.85):
for old in history:
ratio = SequenceMatcher(None, new_message.lower(), old.lower()).ratio()
if ratio > threshold:
return True
return False