feat: Implement comprehensive non-hierarchical logging system

- Created new logging infrastructure with per-component filtering
- Added 6 log levels: DEBUG, INFO, API, WARNING, ERROR, CRITICAL
- Implemented non-hierarchical level control (any combination can be enabled)
- Migrated 917 print() statements across 31 files to structured logging
- Created web UI (system.html) for runtime configuration with dark theme
- Added global level controls to enable/disable levels across all components
- Added timestamp format control (off/time/date/datetime options)
- Implemented log rotation (10MB per file, 5 backups)
- Added API endpoints for dynamic log configuration
- Configured HTTP request logging with filtering via api.requests component
- Intercepted APScheduler logs with proper formatting
- Fixed persistence paths to use /app/memory for Docker volume compatibility
- Fixed checkbox display bug in web UI (enabled_levels now properly shown)
- Changed System Settings button to open in same tab instead of new window

Components: bot, api, api.requests, autonomous, persona, vision, llm,
conversation, mood, dm, scheduled, gpu, media, server, commands,
sentiment, core, apscheduler

All settings persist across container restarts via JSON config.
This commit is contained in:
2026-01-10 20:46:19 +02:00
parent ce00f9bd95
commit 32c2a7b930
34 changed files with 2766 additions and 936 deletions

View File

@@ -46,9 +46,13 @@ from utils.autonomous import (
)
from utils.dm_logger import dm_logger
from utils.dm_interaction_analyzer import init_dm_analyzer
from utils.logger import get_logger
import globals
# Initialize bot logger
logger = get_logger('bot')
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)s: %(message)s",
@@ -61,10 +65,14 @@ logging.basicConfig(
@globals.client.event
async def on_ready():
print(f'🎤 MikuBot connected as {globals.client.user}')
print(f'💬 DM support enabled - users can message Miku directly!')
logger.info(f'🎤 MikuBot connected as {globals.client.user}')
logger.info(f'💬 DM support enabled - users can message Miku directly!')
globals.BOT_USER = globals.client.user
# Intercept external library loggers (APScheduler, etc.)
from utils.logger import intercept_external_loggers
intercept_external_loggers()
# Restore evil mode state from previous session (if any)
from utils.evil_mode import restore_evil_mode_on_startup
@@ -77,7 +85,7 @@ async def on_ready():
# Initialize DM interaction analyzer
if globals.OWNER_USER_ID and globals.OWNER_USER_ID != 0:
init_dm_analyzer(globals.OWNER_USER_ID)
print(f"📊 DM Interaction Analyzer initialized for owner ID: {globals.OWNER_USER_ID}")
logger.info(f"📊 DM Interaction Analyzer initialized for owner ID: {globals.OWNER_USER_ID}")
# Schedule daily DM analysis (runs at 2 AM every day)
from utils.scheduled import run_daily_dm_analysis
@@ -88,9 +96,9 @@ async def on_ready():
minute=0,
id='daily_dm_analysis'
)
print("⏰ Scheduled daily DM analysis at 2:00 AM")
logger.info("⏰ Scheduled daily DM analysis at 2:00 AM")
else:
print("⚠️ OWNER_USER_ID not set, DM analysis feature disabled")
logger.warning("OWNER_USER_ID not set, DM analysis feature disabled")
# Setup autonomous speaking (now handled by server manager)
setup_autonomous_speaking()
@@ -146,7 +154,7 @@ async def on_message(message):
await replied_msg.reply(file=discord.File(output_video))
except Exception as e:
print(f"⚠️ Error processing video: {e}")
logger.error(f"Error processing video: {e}")
await message.channel.send("Sorry, something went wrong while generating the video.")
return
@@ -159,11 +167,11 @@ async def on_message(message):
miku_addressed = await is_miku_addressed(message)
if is_dm:
print(f"💌 DM from {message.author.display_name}: {message.content[:50]}{'...' if len(message.content) > 50 else ''}")
logger.info(f"💌 DM from {message.author.display_name}: {message.content[:50]}{'...' if len(message.content) > 50 else ''}")
# Check if user is blocked
if dm_logger.is_user_blocked(message.author.id):
print(f"🚫 Blocked user {message.author.display_name} ({message.author.id}) tried to send DM - ignoring")
logger.info(f"🚫 Blocked user {message.author.display_name} ({message.author.id}) tried to send DM - ignoring")
return
# Log the user's DM message
@@ -185,7 +193,7 @@ async def on_message(message):
# Add reply context marker to the prompt
prompt = f'[Replying to your message: "{replied_content}"] {prompt}'
except Exception as e:
print(f"⚠️ Failed to fetch replied message for context: {e}")
logger.error(f"Failed to fetch replied message for context: {e}")
async with message.channel.typing():
# If message has an image, video, or GIF attachment
@@ -212,9 +220,9 @@ async def on_message(message):
)
if is_dm:
print(f"💌 DM image response to {message.author.display_name} (using DM mood: {globals.DM_MOOD})")
logger.info(f"💌 DM image response to {message.author.display_name} (using DM mood: {globals.DM_MOOD})")
else:
print(f"💬 Server image response to {message.author.display_name} in {message.guild.name} (using server mood)")
logger.info(f"💬 Server image response to {message.author.display_name} in {message.guild.name} (using server mood)")
response_message = await message.channel.send(miku_reply)
@@ -229,7 +237,7 @@ async def on_message(message):
current_persona = "evil" if globals.EVIL_MODE else "miku"
asyncio.create_task(check_for_interjection(response_message, current_persona))
except Exception as e:
print(f"⚠️ Error checking for persona interjection: {e}")
logger.error(f"Error checking for persona interjection: {e}")
return
@@ -239,7 +247,7 @@ async def on_message(message):
is_gif = attachment.filename.lower().endswith('.gif')
media_type = "gif" if is_gif else "video"
print(f"🎬 Processing {media_type}: {attachment.filename}")
logger.debug(f"🎬 Processing {media_type}: {attachment.filename}")
# Download the media
media_bytes_b64 = await download_and_encode_media(attachment.url)
@@ -253,13 +261,13 @@ async def on_message(message):
# If it's a GIF, convert to MP4 for better processing
if is_gif:
print(f"🔄 Converting GIF to MP4 for processing...")
logger.debug(f"🔄 Converting GIF to MP4 for processing...")
mp4_bytes = await convert_gif_to_mp4(media_bytes)
if mp4_bytes:
media_bytes = mp4_bytes
print(f"✅ GIF converted to MP4")
logger.info(f"✅ GIF converted to MP4")
else:
print(f"⚠️ GIF conversion failed, trying direct processing")
logger.warning(f"GIF conversion failed, trying direct processing")
# Extract frames
frames = await extract_video_frames(media_bytes, num_frames=6)
@@ -268,7 +276,7 @@ async def on_message(message):
await message.channel.send(f"I couldn't extract frames from that {media_type}, sorry!")
return
print(f"📹 Extracted {len(frames)} frames from {attachment.filename}")
logger.debug(f"📹 Extracted {len(frames)} frames from {attachment.filename}")
# Analyze the video/GIF with appropriate media type
video_description = await analyze_video_with_vision(frames, media_type=media_type)
@@ -284,9 +292,9 @@ async def on_message(message):
)
if is_dm:
print(f"💌 DM {media_type} response to {message.author.display_name} (using DM mood: {globals.DM_MOOD})")
logger.info(f"💌 DM {media_type} response to {message.author.display_name} (using DM mood: {globals.DM_MOOD})")
else:
print(f"💬 Server video response to {message.author.display_name} in {message.guild.name} (using server mood)")
logger.info(f"💬 Server video response to {message.author.display_name} in {message.guild.name} (using server mood)")
response_message = await message.channel.send(miku_reply)
@@ -301,7 +309,7 @@ async def on_message(message):
current_persona = "evil" if globals.EVIL_MODE else "miku"
asyncio.create_task(check_for_interjection(response_message, current_persona))
except Exception as e:
print(f"⚠️ Error checking for persona interjection: {e}")
logger.error(f"Error checking for persona interjection: {e}")
return
@@ -310,7 +318,7 @@ async def on_message(message):
for embed in message.embeds:
# Handle Tenor GIF embeds specially (Discord uses these for /gif command)
if embed.type == 'gifv' and embed.url and 'tenor.com' in embed.url:
print(f"🎭 Processing Tenor GIF from embed: {embed.url}")
logger.info(f"🎭 Processing Tenor GIF from embed: {embed.url}")
# Extract the actual GIF URL from Tenor
gif_url = await extract_tenor_gif_url(embed.url)
@@ -322,7 +330,7 @@ async def on_message(message):
gif_url = embed.thumbnail.url
if not gif_url:
print(f"⚠️ Could not extract GIF URL from Tenor embed")
logger.warning(f"Could not extract GIF URL from Tenor embed")
continue
# Download the GIF
@@ -336,13 +344,13 @@ async def on_message(message):
media_bytes = base64.b64decode(media_bytes_b64)
# Convert GIF to MP4
print(f"🔄 Converting Tenor GIF to MP4 for processing...")
logger.debug(f"Converting Tenor GIF to MP4 for processing...")
mp4_bytes = await convert_gif_to_mp4(media_bytes)
if not mp4_bytes:
print(f"⚠️ GIF conversion failed, trying direct frame extraction")
logger.warning(f"GIF conversion failed, trying direct frame extraction")
mp4_bytes = media_bytes
else:
print(f"Tenor GIF converted to MP4")
logger.debug(f"Tenor GIF converted to MP4")
# Extract frames
frames = await extract_video_frames(mp4_bytes, num_frames=6)
@@ -351,7 +359,7 @@ async def on_message(message):
await message.channel.send("I couldn't extract frames from that GIF, sorry!")
return
print(f"📹 Extracted {len(frames)} frames from Tenor GIF")
logger.info(f"📹 Extracted {len(frames)} frames from Tenor GIF")
# Analyze the GIF with tenor_gif media type
video_description = await analyze_video_with_vision(frames, media_type="tenor_gif")
@@ -366,9 +374,9 @@ async def on_message(message):
)
if is_dm:
print(f"💌 DM Tenor GIF response to {message.author.display_name} (using DM mood: {globals.DM_MOOD})")
logger.info(f"💌 DM Tenor GIF response to {message.author.display_name} (using DM mood: {globals.DM_MOOD})")
else:
print(f"💬 Server Tenor GIF response to {message.author.display_name} in {message.guild.name} (using server mood)")
logger.info(f"💬 Server Tenor GIF response to {message.author.display_name} in {message.guild.name} (using server mood)")
response_message = await message.channel.send(miku_reply)
@@ -383,19 +391,19 @@ async def on_message(message):
current_persona = "evil" if globals.EVIL_MODE else "miku"
asyncio.create_task(check_for_interjection(response_message, current_persona))
except Exception as e:
print(f"⚠️ Error checking for persona interjection: {e}")
logger.error(f"Error checking for persona interjection: {e}")
return
# Handle other types of embeds (rich, article, image, video, link)
elif embed.type in ['rich', 'article', 'image', 'video', 'link']:
print(f"📰 Processing {embed.type} embed")
logger.error(f"Processing {embed.type} embed")
# Extract content from embed
embed_content = await extract_embed_content(embed)
if not embed_content['has_content']:
print(f"⚠️ Embed has no extractable content, skipping")
logger.warning(f"Embed has no extractable content, skipping")
continue
# Build context string with embed text
@@ -406,28 +414,28 @@ async def on_message(message):
# Process images from embed
if embed_content['images']:
for img_url in embed_content['images']:
print(f"🖼️ Processing image from embed: {img_url}")
logger.error(f"Processing image from embed: {img_url}")
try:
base64_img = await download_and_encode_image(img_url)
if base64_img:
print(f"Image downloaded, analyzing with vision model...")
logger.info(f"Image downloaded, analyzing with vision model...")
# Analyze image
qwen_description = await analyze_image_with_qwen(base64_img)
truncated = (qwen_description[:50] + "...") if len(qwen_description) > 50 else qwen_description
print(f"📝 Vision analysis result: {truncated}")
logger.error(f"Vision analysis result: {truncated}")
if qwen_description and qwen_description.strip():
embed_context_parts.append(f"[Embedded image shows: {qwen_description}]")
else:
print(f"Failed to download image from embed")
logger.error(f"Failed to download image from embed")
except Exception as e:
print(f"⚠️ Error processing embedded image: {e}")
logger.error(f"Error processing embedded image: {e}")
import traceback
traceback.print_exc()
# Process videos from embed
if embed_content['videos']:
for video_url in embed_content['videos']:
print(f"🎬 Processing video from embed: {video_url}")
logger.info(f"🎬 Processing video from embed: {video_url}")
try:
media_bytes_b64 = await download_and_encode_media(video_url)
if media_bytes_b64:
@@ -435,17 +443,17 @@ async def on_message(message):
media_bytes = base64.b64decode(media_bytes_b64)
frames = await extract_video_frames(media_bytes, num_frames=6)
if frames:
print(f"📹 Extracted {len(frames)} frames, analyzing with vision model...")
logger.info(f"📹 Extracted {len(frames)} frames, analyzing with vision model...")
video_description = await analyze_video_with_vision(frames, media_type="video")
print(f"📝 Video analysis result: {video_description[:100]}...")
logger.info(f"Video analysis result: {video_description[:100]}...")
if video_description and video_description.strip():
embed_context_parts.append(f"[Embedded video shows: {video_description}]")
else:
print(f"Failed to extract frames from video")
logger.error(f"Failed to extract frames from video")
else:
print(f"Failed to download video from embed")
logger.error(f"Failed to download video from embed")
except Exception as e:
print(f"⚠️ Error processing embedded video: {e}")
logger.error(f"Error processing embedded video: {e}")
import traceback
traceback.print_exc()
@@ -468,9 +476,9 @@ async def on_message(message):
)
if is_dm:
print(f"💌 DM embed response to {message.author.display_name} (using DM mood: {globals.DM_MOOD})")
logger.info(f"💌 DM embed response to {message.author.display_name} (using DM mood: {globals.DM_MOOD})")
else:
print(f"💬 Server embed response to {message.author.display_name} in {message.guild.name}")
logger.info(f"💬 Server embed response to {message.author.display_name} in {message.guild.name}")
response_message = await message.channel.send(response)
@@ -485,7 +493,7 @@ async def on_message(message):
current_persona = "evil" if globals.EVIL_MODE else "miku"
asyncio.create_task(check_for_interjection(response_message, current_persona))
except Exception as e:
print(f"⚠️ Error checking for persona interjection: {e}")
logger.error(f"Error checking for persona interjection: {e}")
return
@@ -494,7 +502,7 @@ async def on_message(message):
is_image_request, image_prompt = await detect_image_request(prompt)
if is_image_request and image_prompt:
print(f"🎨 Image generation request detected: '{image_prompt}' from {message.author.display_name}")
logger.info(f"🎨 Image generation request detected: '{image_prompt}' from {message.author.display_name}")
# Handle the image generation workflow
success = await handle_image_generation_request(message, image_prompt)
@@ -502,7 +510,7 @@ async def on_message(message):
return # Image generation completed successfully
# If image generation failed, fall back to normal response
print(f"⚠️ Image generation failed, falling back to normal response")
logger.warning(f"Image generation failed, falling back to normal response")
# If message is just a prompt, no image
# For DMs, pass None as guild_id to use DM mood
@@ -518,9 +526,9 @@ async def on_message(message):
)
if is_dm:
print(f"💌 DM response to {message.author.display_name} (using DM mood: {globals.DM_MOOD})")
logger.info(f"💌 DM response to {message.author.display_name} (using DM mood: {globals.DM_MOOD})")
else:
print(f"💬 Server response to {message.author.display_name} in {message.guild.name} (using server mood)")
logger.info(f"💬 Server response to {message.author.display_name} in {message.guild.name} (using server mood)")
response_message = await message.channel.send(response)
@@ -530,15 +538,15 @@ async def on_message(message):
# For server messages, check if opposite persona should interject (persona dialogue system)
if not is_dm and globals.BIPOLAR_MODE:
print(f"🔧 [DEBUG] Attempting to check for interjection (is_dm={is_dm}, BIPOLAR_MODE={globals.BIPOLAR_MODE})")
logger.debug(f"Attempting to check for interjection (is_dm={is_dm}, BIPOLAR_MODE={globals.BIPOLAR_MODE})")
try:
from utils.persona_dialogue import check_for_interjection
current_persona = "evil" if globals.EVIL_MODE else "miku"
print(f"🔧 [DEBUG] Creating interjection check task for persona: {current_persona}")
logger.debug(f"Creating interjection check task for persona: {current_persona}")
# Pass the bot's response message for analysis
asyncio.create_task(check_for_interjection(response_message, current_persona))
except Exception as e:
print(f"⚠️ Error checking for persona interjection: {e}")
logger.error(f"Error checking for persona interjection: {e}")
import traceback
traceback.print_exc()
@@ -557,11 +565,11 @@ async def on_message(message):
detected = detect_mood_shift(response, server_context)
if detected and detected != server_config.current_mood_name:
print(f"🔄 Auto mood detection for server {message.guild.name}: {server_config.current_mood_name} -> {detected}")
logger.info(f"🔄 Auto mood detection for server {message.guild.name}: {server_config.current_mood_name} -> {detected}")
# Block direct transitions to asleep unless from sleepy
if detected == "asleep" and server_config.current_mood_name != "sleepy":
print("Ignoring asleep mood; server wasn't sleepy before.")
logger.warning("Ignoring asleep mood; server wasn't sleepy before.")
else:
# Update server mood
server_manager.set_server_mood(message.guild.id, detected)
@@ -570,7 +578,7 @@ async def on_message(message):
from utils.moods import update_server_nickname
globals.client.loop.create_task(update_server_nickname(message.guild.id))
print(f"🔄 Server mood auto-updated to: {detected}")
logger.info(f"🔄 Server mood auto-updated to: {detected}")
if detected == "asleep":
server_manager.set_server_sleep_state(message.guild.id, True)
@@ -580,15 +588,15 @@ async def on_message(message):
server_manager.set_server_sleep_state(message.guild.id, False)
server_manager.set_server_mood(message.guild.id, "neutral")
await update_server_nickname(message.guild.id)
print(f"🌅 Server {message.guild.name} woke up from auto-sleep")
logger.info(f"🌅 Server {message.guild.name} woke up from auto-sleep")
globals.client.loop.create_task(delayed_wakeup())
else:
print(f"⚠️ No server config found for guild {message.guild.id}, skipping mood detection")
logger.error(f"No server config found for guild {message.guild.id}, skipping mood detection")
except Exception as e:
print(f"⚠️ Error in server mood detection: {e}")
logger.error(f"Error in server mood detection: {e}")
elif is_dm:
print("💌 DM message - no mood detection (DM mood only changes via auto-rotation)")
logger.debug("DM message - no mood detection (DM mood only changes via auto-rotation)")
# V2: Track message for autonomous engine (non-blocking, no LLM calls)
# IMPORTANT: Only call this if the message was NOT addressed to Miku
@@ -645,7 +653,7 @@ async def on_raw_reaction_add(payload):
)
reactor_type = "🤖 Miku" if is_bot_reactor else f"👤 {user.display_name}"
print(f" DM reaction added: {emoji_str} by {reactor_type} on message {payload.message_id}")
logger.debug(f"DM reaction added: {emoji_str} by {reactor_type} on message {payload.message_id}")
@globals.client.event
async def on_raw_reaction_remove(payload):
@@ -683,7 +691,7 @@ async def on_raw_reaction_remove(payload):
)
reactor_type = "🤖 Miku" if user.id == globals.client.user.id else f"👤 {user.display_name}"
print(f" DM reaction removed: {emoji_str} by {reactor_type} from message {payload.message_id}")
logger.debug(f"DM reaction removed: {emoji_str} by {reactor_type} from message {payload.message_id}")
@globals.client.event
async def on_presence_update(before, after):
@@ -698,16 +706,18 @@ async def on_member_join(member):
autonomous_member_join(member)
def start_api():
uvicorn.run(app, host="0.0.0.0", port=3939, log_level="info")
# Set log_level to "critical" to silence uvicorn's access logs
# Our custom api.requests middleware handles HTTP logging with better formatting and filtering
uvicorn.run(app, host="0.0.0.0", port=3939, log_level="critical")
def save_autonomous_state():
"""Save autonomous context on shutdown"""
try:
from utils.autonomous import autonomous_engine
autonomous_engine.save_context()
print("💾 Saved autonomous context on shutdown")
logger.info("💾 Saved autonomous context on shutdown")
except Exception as e:
print(f"⚠️ Failed to save autonomous context on shutdown: {e}")
logger.error(f"Failed to save autonomous context on shutdown: {e}")
# Register shutdown handlers
atexit.register(save_autonomous_state)