feat: Implement comprehensive non-hierarchical logging system

- Created new logging infrastructure with per-component filtering
- Added 6 log levels: DEBUG, INFO, API, WARNING, ERROR, CRITICAL
- Implemented non-hierarchical level control (any combination can be enabled)
- Migrated 917 print() statements across 31 files to structured logging
- Created web UI (system.html) for runtime configuration with dark theme
- Added global level controls to enable/disable levels across all components
- Added timestamp format control (off/time/date/datetime options)
- Implemented log rotation (10MB per file, 5 backups)
- Added API endpoints for dynamic log configuration
- Configured HTTP request logging with filtering via api.requests component
- Intercepted APScheduler logs with proper formatting
- Fixed persistence paths to use /app/memory for Docker volume compatibility
- Fixed checkbox display bug in web UI (enabled_levels now properly shown)
- Changed System Settings button to open in same tab instead of new window

Components: bot, api, api.requests, autonomous, persona, vision, llm,
conversation, mood, dm, scheduled, gpu, media, server, commands,
sentiment, core, apscheduler

All settings persist across container restarts via JSON config.
This commit is contained in:
2026-01-10 20:46:19 +02:00
parent ce00f9bd95
commit 32c2a7b930
34 changed files with 2766 additions and 936 deletions

View File

@@ -10,6 +10,10 @@ import os
from utils.context_manager import get_context_for_response_type, get_complete_context
from utils.moods import load_mood_description
from utils.conversation_history import conversation_history
from utils.logger import get_logger
logger = get_logger('llm')
def get_current_gpu_url():
"""Get the URL for the currently selected GPU for text models"""
@@ -23,7 +27,7 @@ def get_current_gpu_url():
else:
return globals.LLAMA_URL
except Exception as e:
print(f"⚠️ GPU state read error: {e}, defaulting to NVIDIA")
logger.warning(f"GPU state read error: {e}, defaulting to NVIDIA")
# Default to NVIDIA if state file doesn't exist
return globals.LLAMA_URL
@@ -102,7 +106,7 @@ async def query_llama(user_prompt, user_id, guild_id=None, response_type="dm_res
if model is None:
if evil_mode:
model = globals.EVIL_TEXT_MODEL # Use DarkIdol uncensored model
print(f"😈 Using evil model: {model}")
logger.info(f"Using evil model: {model}")
else:
model = globals.TEXT_MODEL
@@ -155,7 +159,7 @@ You ARE Miku. Act like it."""
is_sleeping = False
forced_angry_until = None
just_woken_up = False
print(f"😈 Using Evil mode with mood: {current_mood_name}")
logger.info(f"Using Evil mode with mood: {current_mood_name}")
else:
current_mood = globals.DM_MOOD_DESCRIPTION # Default to DM mood
current_mood_name = globals.DM_MOOD # Default to DM mood name
@@ -175,14 +179,14 @@ You ARE Miku. Act like it."""
is_sleeping = server_config.is_sleeping
forced_angry_until = server_config.forced_angry_until
just_woken_up = server_config.just_woken_up
print(f"🎭 Using server mood: {current_mood_name} for guild {guild_id}")
logger.debug(f"Using server mood: {current_mood_name} for guild {guild_id}")
else:
print(f"⚠️ No server config found for guild {guild_id}, using DM mood")
logger.warning(f"No server config found for guild {guild_id}, using DM mood")
except Exception as e:
print(f"⚠️ Failed to get server mood for guild {guild_id}, falling back to DM mood: {e}")
logger.error(f"Failed to get server mood for guild {guild_id}, falling back to DM mood: {e}")
# Fall back to DM mood if server mood fails
elif not evil_mode:
print(f"🌍 Using DM mood: {globals.DM_MOOD}")
logger.debug(f"Using DM mood: {globals.DM_MOOD}")
# Append angry wake-up note if JUST_WOKEN_UP flag is set (only in non-evil mode)
if just_woken_up and not evil_mode:
@@ -262,7 +266,7 @@ Please respond in a way that reflects this emotional tone.{pfp_context}"""
try:
# Get current GPU URL based on user selection
llama_url = get_current_gpu_url()
print(f"🎮 Using GPU endpoint: {llama_url}")
logger.debug(f"Using GPU endpoint: {llama_url}")
# Add timeout to prevent hanging indefinitely
timeout = aiohttp.ClientTimeout(total=300) # 300 second timeout
@@ -301,13 +305,13 @@ Please respond in a way that reflects this emotional tone.{pfp_context}"""
return reply
else:
error_text = await response.text()
print(f"Error from llama-swap: {response.status} - {error_text}")
logger.error(f"Error from llama-swap: {response.status} - {error_text}")
# Don't save error responses to conversation history
return f"Error: {response.status}"
except asyncio.TimeoutError:
return "Sorry, the response took too long. Please try again."
except Exception as e:
print(f"⚠️ Error in query_llama: {e}")
logger.error(f"Error in query_llama: {e}")
return f"Sorry, there was an error: {str(e)}"
# Backward compatibility alias for existing code