feat: Implement comprehensive non-hierarchical logging system

- Created new logging infrastructure with per-component filtering
- Added 6 log levels: DEBUG, INFO, API, WARNING, ERROR, CRITICAL
- Implemented non-hierarchical level control (any combination can be enabled)
- Migrated 917 print() statements across 31 files to structured logging
- Created web UI (system.html) for runtime configuration with dark theme
- Added global level controls to enable/disable levels across all components
- Added timestamp format control (off/time/date/datetime options)
- Implemented log rotation (10MB per file, 5 backups)
- Added API endpoints for dynamic log configuration
- Configured HTTP request logging with filtering via api.requests component
- Intercepted APScheduler logs with proper formatting
- Fixed persistence paths to use /app/memory for Docker volume compatibility
- Fixed checkbox display bug in web UI (enabled_levels now properly shown)
- Changed System Settings button to open in same tab instead of new window

Components: bot, api, api.requests, autonomous, persona, vision, llm,
conversation, mood, dm, scheduled, gpu, media, server, commands,
sentiment, core, apscheduler

All settings persist across container restarts via JSON config.
This commit is contained in:
2026-01-10 20:46:19 +02:00
parent ce00f9bd95
commit 32c2a7b930
34 changed files with 2766 additions and 936 deletions

View File

@@ -1,4 +1,4 @@
# face_detector_manager.py
Y# face_detector_manager.py
"""
Manages on-demand starting/stopping of anime-face-detector container
to free up VRAM when not needed.
@@ -9,6 +9,9 @@ import aiohttp
import subprocess
import time
from typing import Optional, Dict
from utils.logger import get_logger
logger = get_logger('gpu')
class FaceDetectorManager:
@@ -31,7 +34,7 @@ class FaceDetectorManager:
"""
try:
if debug:
print("🚀 Starting anime-face-detector container...")
logger.debug("Starting anime-face-detector container...")
# Start container using docker compose
result = subprocess.run(
@@ -44,7 +47,7 @@ class FaceDetectorManager:
if result.returncode != 0:
if debug:
print(f"⚠️ Failed to start container: {result.stderr}")
logger.error(f"Failed to start container: {result.stderr}")
return False
# Wait for API to be ready
@@ -53,17 +56,17 @@ class FaceDetectorManager:
if await self._check_health():
self.is_running = True
if debug:
print(f"Face detector container started and ready")
logger.info(f"Face detector container started and ready")
return True
await asyncio.sleep(1)
if debug:
print(f"⚠️ Container started but API not ready after {self.STARTUP_TIMEOUT}s")
logger.warning(f"Container started but API not ready after {self.STARTUP_TIMEOUT}s")
return False
except Exception as e:
if debug:
print(f"⚠️ Error starting face detector container: {e}")
logger.error(f"Error starting face detector container: {e}")
return False
async def stop_container(self, debug: bool = False) -> bool:
@@ -75,7 +78,7 @@ class FaceDetectorManager:
"""
try:
if debug:
print("🛑 Stopping anime-face-detector container...")
logger.debug("Stopping anime-face-detector container...")
result = subprocess.run(
["docker", "compose", "stop", self.CONTAINER_NAME],
@@ -88,16 +91,16 @@ class FaceDetectorManager:
if result.returncode == 0:
self.is_running = False
if debug:
print("Face detector container stopped")
logger.info("Face detector container stopped")
return True
else:
if debug:
print(f"⚠️ Failed to stop container: {result.stderr}")
logger.error(f"Failed to stop container: {result.stderr}")
return False
except Exception as e:
if debug:
print(f"⚠️ Error stopping face detector container: {e}")
logger.error(f"Error stopping face detector container: {e}")
return False
async def _check_health(self) -> bool:
@@ -137,7 +140,7 @@ class FaceDetectorManager:
# Step 1: Unload vision model if callback provided
if unload_vision_model:
if debug:
print("📤 Unloading vision model to free VRAM...")
logger.debug("Unloading vision model to free VRAM...")
await unload_vision_model()
await asyncio.sleep(2) # Give time for VRAM to clear
@@ -145,7 +148,7 @@ class FaceDetectorManager:
if not self.is_running:
if not await self.start_container(debug=debug):
if debug:
print("⚠️ Could not start face detector container")
logger.error("Could not start face detector container")
return None
container_was_started = True
@@ -161,7 +164,7 @@ class FaceDetectorManager:
if reload_vision_model:
if debug:
print("📥 Reloading vision model...")
logger.debug("Reloading vision model...")
await reload_vision_model()
async def _detect_face_api(self, image_bytes: bytes, debug: bool = False) -> Optional[Dict]:
@@ -178,14 +181,14 @@ class FaceDetectorManager:
) as response:
if response.status != 200:
if debug:
print(f"⚠️ Face detection API returned status {response.status}")
logger.warning(f"Face detection API returned status {response.status}")
return None
result = await response.json()
if result.get('count', 0) == 0:
if debug:
print("👤 No faces detected by API")
logger.debug("No faces detected by API")
return None
detections = result.get('detections', [])
@@ -205,9 +208,9 @@ class FaceDetectorManager:
if debug:
width = int(x2 - x1)
height = int(y2 - y1)
print(f"👤 Detected {len(detections)} face(s) via API, using best at ({center_x}, {center_y}) [confidence: {confidence:.2%}]")
print(f" Bounding box: x={int(x1)}, y={int(y1)}, w={width}, h={height}")
print(f" Keypoints: {len(keypoints)} facial landmarks detected")
logger.debug(f"Detected {len(detections)} face(s) via API, using best at ({center_x}, {center_y}) [confidence: {confidence:.2%}]")
logger.debug(f" Bounding box: x={int(x1)}, y={int(y1)}, w={width}, h={height}")
logger.debug(f" Keypoints: {len(keypoints)} facial landmarks detected")
return {
'center': (center_x, center_y),
@@ -219,7 +222,7 @@ class FaceDetectorManager:
except Exception as e:
if debug:
print(f"⚠️ Error calling face detection API: {e}")
logger.error(f"Error calling face detection API: {e}")
return None