feat: Implement comprehensive non-hierarchical logging system
- Created new logging infrastructure with per-component filtering - Added 6 log levels: DEBUG, INFO, API, WARNING, ERROR, CRITICAL - Implemented non-hierarchical level control (any combination can be enabled) - Migrated 917 print() statements across 31 files to structured logging - Created web UI (system.html) for runtime configuration with dark theme - Added global level controls to enable/disable levels across all components - Added timestamp format control (off/time/date/datetime options) - Implemented log rotation (10MB per file, 5 backups) - Added API endpoints for dynamic log configuration - Configured HTTP request logging with filtering via api.requests component - Intercepted APScheduler logs with proper formatting - Fixed persistence paths to use /app/memory for Docker volume compatibility - Fixed checkbox display bug in web UI (enabled_levels now properly shown) - Changed System Settings button to open in same tab instead of new window Components: bot, api, api.requests, autonomous, persona, vision, llm, conversation, mood, dm, scheduled, gpu, media, server, commands, sentiment, core, apscheduler All settings persist across container restarts via JSON config.
This commit is contained in:
@@ -25,8 +25,11 @@ import discord
|
||||
import globals
|
||||
|
||||
from .danbooru_client import danbooru_client
|
||||
from .logger import get_logger
|
||||
import globals
|
||||
|
||||
logger = get_logger('vision')
|
||||
|
||||
|
||||
class ProfilePictureManager:
|
||||
"""Manages Miku's profile picture with intelligent cropping and face detection"""
|
||||
@@ -55,10 +58,10 @@ class ProfilePictureManager:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get("http://anime-face-detector:6078/health", timeout=aiohttp.ClientTimeout(total=5)) as response:
|
||||
if response.status == 200:
|
||||
print("✅ Anime face detector API connected (pre-loaded)")
|
||||
logger.info("Anime face detector API connected (pre-loaded)")
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"ℹ️ Face detector not pre-loaded (container not running)")
|
||||
logger.info(f"Face detector not pre-loaded (container not running)")
|
||||
return False
|
||||
|
||||
async def _ensure_vram_available(self, debug: bool = False):
|
||||
@@ -68,7 +71,7 @@ class ProfilePictureManager:
|
||||
"""
|
||||
try:
|
||||
if debug:
|
||||
print("💾 Swapping to text model to free VRAM for face detection...")
|
||||
logger.info("Swapping to text model to free VRAM for face detection...")
|
||||
|
||||
# Make a simple request to text model to trigger swap
|
||||
async with aiohttp.ClientSession() as session:
|
||||
@@ -86,13 +89,13 @@ class ProfilePictureManager:
|
||||
) as response:
|
||||
if response.status == 200:
|
||||
if debug:
|
||||
print("✅ Vision model unloaded, VRAM available")
|
||||
logger.debug("Vision model unloaded, VRAM available")
|
||||
# Give system time to fully release VRAM
|
||||
await asyncio.sleep(3)
|
||||
return True
|
||||
except Exception as e:
|
||||
if debug:
|
||||
print(f"⚠️ Could not swap models: {e}")
|
||||
logger.error(f"Could not swap models: {e}")
|
||||
|
||||
return False
|
||||
|
||||
@@ -100,7 +103,7 @@ class ProfilePictureManager:
|
||||
"""Start the face detector container using Docker socket API"""
|
||||
try:
|
||||
if debug:
|
||||
print("🚀 Starting face detector container...")
|
||||
logger.info("Starting face detector container...")
|
||||
|
||||
# Use Docker socket API to start container
|
||||
import aiofiles
|
||||
@@ -112,7 +115,7 @@ class ProfilePictureManager:
|
||||
# Check if socket exists
|
||||
if not os.path.exists(socket_path):
|
||||
if debug:
|
||||
print("⚠️ Docker socket not available")
|
||||
logger.error("Docker socket not available")
|
||||
return False
|
||||
|
||||
# Use aiohttp UnixConnector to communicate with Docker socket
|
||||
@@ -127,7 +130,7 @@ class ProfilePictureManager:
|
||||
if response.status not in [204, 304]: # 204=started, 304=already running
|
||||
if debug:
|
||||
error_text = await response.text()
|
||||
print(f"⚠️ Failed to start container: {response.status} - {error_text}")
|
||||
logger.error(f"Failed to start container: {response.status} - {error_text}")
|
||||
return False
|
||||
|
||||
# Wait for API to be ready
|
||||
@@ -140,32 +143,32 @@ class ProfilePictureManager:
|
||||
) as response:
|
||||
if response.status == 200:
|
||||
if debug:
|
||||
print(f"✅ Face detector ready (took {i+1}s)")
|
||||
logger.info(f"Face detector ready (took {i+1}s)")
|
||||
return True
|
||||
except:
|
||||
pass
|
||||
await asyncio.sleep(1)
|
||||
|
||||
if debug:
|
||||
print("⚠️ Face detector didn't become ready in time")
|
||||
logger.warning("Face detector didn't become ready in time")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
if debug:
|
||||
print(f"⚠️ Error starting face detector: {e}")
|
||||
logger.error(f"Error starting face detector: {e}")
|
||||
return False
|
||||
|
||||
async def _stop_face_detector(self, debug: bool = False):
|
||||
"""Stop the face detector container using Docker socket API"""
|
||||
try:
|
||||
if debug:
|
||||
print("🛑 Stopping face detector to free VRAM...")
|
||||
logger.info("Stopping face detector to free VRAM...")
|
||||
|
||||
socket_path = "/var/run/docker.sock"
|
||||
|
||||
if not os.path.exists(socket_path):
|
||||
if debug:
|
||||
print("⚠️ Docker socket not available")
|
||||
logger.error("Docker socket not available")
|
||||
return
|
||||
|
||||
from aiohttp import UnixConnector
|
||||
@@ -178,26 +181,26 @@ class ProfilePictureManager:
|
||||
async with session.post(url, params={"t": 10}) as response: # 10 second timeout
|
||||
if response.status in [204, 304]: # 204=stopped, 304=already stopped
|
||||
if debug:
|
||||
print("✅ Face detector stopped")
|
||||
logger.info("Face detector stopped")
|
||||
else:
|
||||
if debug:
|
||||
error_text = await response.text()
|
||||
print(f"⚠️ Failed to stop container: {response.status} - {error_text}")
|
||||
logger.warning(f"Failed to stop container: {response.status} - {error_text}")
|
||||
|
||||
except Exception as e:
|
||||
if debug:
|
||||
print(f"⚠️ Error stopping face detector: {e}")
|
||||
logger.error(f"Error stopping face detector: {e}")
|
||||
|
||||
async def save_current_avatar_as_fallback(self):
|
||||
"""Save the bot's current avatar as fallback (only if fallback doesn't exist)"""
|
||||
try:
|
||||
# Only save if fallback doesn't already exist
|
||||
if os.path.exists(self.FALLBACK_PATH):
|
||||
print("✅ Fallback avatar already exists, skipping save")
|
||||
logger.info("Fallback avatar already exists, skipping save")
|
||||
return True
|
||||
|
||||
if not globals.client or not globals.client.user:
|
||||
print("⚠️ Bot client not ready")
|
||||
logger.warning("Bot client not ready")
|
||||
return False
|
||||
|
||||
avatar_asset = globals.client.user.avatar or globals.client.user.default_avatar
|
||||
@@ -209,11 +212,11 @@ class ProfilePictureManager:
|
||||
with open(self.FALLBACK_PATH, 'wb') as f:
|
||||
f.write(avatar_bytes)
|
||||
|
||||
print(f"✅ Saved current avatar as fallback ({len(avatar_bytes)} bytes)")
|
||||
logger.info(f"Saved current avatar as fallback ({len(avatar_bytes)} bytes)")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"⚠️ Error saving fallback avatar: {e}")
|
||||
logger.error(f"Error saving fallback avatar: {e}")
|
||||
return False
|
||||
|
||||
async def change_profile_picture(
|
||||
@@ -251,7 +254,7 @@ class ProfilePictureManager:
|
||||
if custom_image_bytes:
|
||||
# Custom upload - no retry needed
|
||||
if debug:
|
||||
print("🖼️ Using provided custom image")
|
||||
logger.info("Using provided custom image")
|
||||
image_bytes = custom_image_bytes
|
||||
result["source"] = "custom_upload"
|
||||
|
||||
@@ -259,7 +262,7 @@ class ProfilePictureManager:
|
||||
try:
|
||||
image = Image.open(io.BytesIO(image_bytes))
|
||||
if debug:
|
||||
print(f"📐 Original image size: {image.size}")
|
||||
logger.debug(f"Original image size: {image.size}")
|
||||
|
||||
# Check if it's an animated GIF
|
||||
if image.format == 'GIF':
|
||||
@@ -269,11 +272,11 @@ class ProfilePictureManager:
|
||||
is_animated_gif = True
|
||||
image.seek(0) # Reset to first frame
|
||||
if debug:
|
||||
print("🎬 Detected animated GIF - will preserve animation")
|
||||
logger.debug("Detected animated GIF - will preserve animation")
|
||||
except EOFError:
|
||||
# Only one frame, treat as static image
|
||||
if debug:
|
||||
print("🖼️ Single-frame GIF - will process as static image")
|
||||
logger.debug("Single-frame GIF - will process as static image")
|
||||
|
||||
except Exception as e:
|
||||
result["error"] = f"Failed to open image: {e}"
|
||||
@@ -282,11 +285,11 @@ class ProfilePictureManager:
|
||||
else:
|
||||
# Danbooru - retry until we find a valid Miku image
|
||||
if debug:
|
||||
print(f"🎨 Searching Danbooru for Miku image (mood: {mood})")
|
||||
logger.info(f"Searching Danbooru for Miku image (mood: {mood})")
|
||||
|
||||
for attempt in range(max_retries):
|
||||
if attempt > 0 and debug:
|
||||
print(f"🔄 Retry attempt {attempt + 1}/{max_retries}")
|
||||
logger.info(f"Retry attempt {attempt + 1}/{max_retries}")
|
||||
|
||||
post = await danbooru_client.get_random_miku_image(mood=mood)
|
||||
if not post:
|
||||
@@ -302,23 +305,23 @@ class ProfilePictureManager:
|
||||
continue
|
||||
|
||||
if debug:
|
||||
print(f"✅ Downloaded image from Danbooru (post #{danbooru_client.get_post_metadata(post).get('id')})")
|
||||
logger.info(f"Downloaded image from Danbooru (post #{danbooru_client.get_post_metadata(post).get('id')})")
|
||||
|
||||
# Load image with PIL
|
||||
try:
|
||||
temp_image = Image.open(io.BytesIO(temp_image_bytes))
|
||||
if debug:
|
||||
print(f"📐 Original image size: {temp_image.size}")
|
||||
logger.debug(f"Original image size: {temp_image.size}")
|
||||
except Exception as e:
|
||||
if debug:
|
||||
print(f"⚠️ Failed to open image: {e}")
|
||||
logger.warning(f"Failed to open image: {e}")
|
||||
continue
|
||||
|
||||
# Verify it's Miku
|
||||
miku_verification = await self._verify_and_locate_miku(temp_image_bytes, debug=debug)
|
||||
if not miku_verification["is_miku"]:
|
||||
if debug:
|
||||
print(f"❌ Image verification failed: not Miku, trying another...")
|
||||
logger.warning(f"Image verification failed: not Miku, trying another...")
|
||||
continue
|
||||
|
||||
# Success! This image is valid
|
||||
@@ -330,7 +333,7 @@ class ProfilePictureManager:
|
||||
# If multiple characters detected, use LLM's suggested crop region
|
||||
if miku_verification.get("crop_region"):
|
||||
if debug:
|
||||
print(f"🎯 Using LLM-suggested crop region for Miku")
|
||||
logger.debug(f"Using LLM-suggested crop region for Miku")
|
||||
image = self._apply_crop_region(image, miku_verification["crop_region"])
|
||||
|
||||
break
|
||||
@@ -344,11 +347,11 @@ class ProfilePictureManager:
|
||||
# If this is an animated GIF, skip most processing and use raw bytes
|
||||
if is_animated_gif:
|
||||
if debug:
|
||||
print("🎬 Using GIF fast path - skipping face detection and cropping")
|
||||
logger.info("Using GIF fast path - skipping face detection and cropping")
|
||||
|
||||
# Generate description of the animated GIF
|
||||
if debug:
|
||||
print("📝 Generating GIF description using video analysis pipeline...")
|
||||
logger.info("Generating GIF description using video analysis pipeline...")
|
||||
description = await self._generate_gif_description(image_bytes, debug=debug)
|
||||
if description:
|
||||
# Save description to file
|
||||
@@ -358,12 +361,12 @@ class ProfilePictureManager:
|
||||
f.write(description)
|
||||
result["metadata"]["description"] = description
|
||||
if debug:
|
||||
print(f"📝 Saved GIF description ({len(description)} chars)")
|
||||
logger.info(f"Saved GIF description ({len(description)} chars)")
|
||||
except Exception as e:
|
||||
print(f"⚠️ Failed to save description file: {e}")
|
||||
logger.error(f"Failed to save description file: {e}")
|
||||
else:
|
||||
if debug:
|
||||
print("⚠️ GIF description generation returned None")
|
||||
logger.error("GIF description generation returned None")
|
||||
|
||||
# Extract dominant color from first frame
|
||||
dominant_color = self._extract_dominant_color(image, debug=debug)
|
||||
@@ -373,14 +376,14 @@ class ProfilePictureManager:
|
||||
"hex": "#{:02x}{:02x}{:02x}".format(*dominant_color)
|
||||
}
|
||||
if debug:
|
||||
print(f"🎨 Dominant color from first frame: RGB{dominant_color} (#{result['metadata']['dominant_color']['hex'][1:]})")
|
||||
logger.debug(f"Dominant color from first frame: RGB{dominant_color} (#{result['metadata']['dominant_color']['hex'][1:]})")
|
||||
|
||||
# Save the original GIF bytes
|
||||
with open(self.CURRENT_PATH, 'wb') as f:
|
||||
f.write(image_bytes)
|
||||
|
||||
if debug:
|
||||
print(f"💾 Saved animated GIF ({len(image_bytes)} bytes)")
|
||||
logger.info(f"Saved animated GIF ({len(image_bytes)} bytes)")
|
||||
|
||||
# Update Discord avatar with original GIF
|
||||
if globals.client and globals.client.user:
|
||||
@@ -401,7 +404,7 @@ class ProfilePictureManager:
|
||||
# Save metadata
|
||||
self._save_metadata(result["metadata"])
|
||||
|
||||
print(f"✅ Animated profile picture updated successfully!")
|
||||
logger.info(f"Animated profile picture updated successfully!")
|
||||
|
||||
# Update role colors if we have a dominant color
|
||||
if dominant_color:
|
||||
@@ -411,12 +414,13 @@ class ProfilePictureManager:
|
||||
|
||||
except discord.HTTPException as e:
|
||||
result["error"] = f"Discord API error: {e}"
|
||||
print(f"⚠️ Failed to update Discord avatar with GIF: {e}")
|
||||
print(f" Note: Animated avatars require Discord Nitro")
|
||||
logger.warning(f"Failed to update Discord avatar with GIF: {e}")
|
||||
if debug:
|
||||
logger.debug("Note: Animated avatars require Discord Nitro")
|
||||
return result
|
||||
except Exception as e:
|
||||
result["error"] = f"Unexpected error updating avatar: {e}"
|
||||
print(f"⚠️ Unexpected error: {e}")
|
||||
logger.error(f"Unexpected error: {e}")
|
||||
return result
|
||||
else:
|
||||
result["error"] = "Bot client not ready"
|
||||
@@ -425,7 +429,7 @@ class ProfilePictureManager:
|
||||
# === NORMAL STATIC IMAGE PATH ===
|
||||
# Step 2: Generate description of the validated image
|
||||
if debug:
|
||||
print("📝 Generating image description...")
|
||||
logger.info("Generating image description...")
|
||||
description = await self._generate_image_description(image_bytes, debug=debug)
|
||||
if description:
|
||||
# Save description to file
|
||||
@@ -435,12 +439,12 @@ class ProfilePictureManager:
|
||||
f.write(description)
|
||||
result["metadata"]["description"] = description
|
||||
if debug:
|
||||
print(f"📝 Saved image description ({len(description)} chars)")
|
||||
logger.info(f"Saved image description ({len(description)} chars)")
|
||||
except Exception as e:
|
||||
print(f"⚠️ Failed to save description file: {e}")
|
||||
logger.warning(f"Failed to save description file: {e}")
|
||||
else:
|
||||
if debug:
|
||||
print("⚠️ Description generation returned None")
|
||||
logger.warning("Description generation returned None")
|
||||
|
||||
# Step 3: Detect face and crop intelligently
|
||||
cropped_image = await self._intelligent_crop(image, image_bytes, target_size=512, debug=debug)
|
||||
@@ -459,7 +463,7 @@ class ProfilePictureManager:
|
||||
f.write(cropped_bytes)
|
||||
|
||||
if debug:
|
||||
print(f"💾 Saved cropped image ({len(cropped_bytes)} bytes)")
|
||||
logger.info(f"Saved cropped image ({len(cropped_bytes)} bytes)")
|
||||
|
||||
# Step 5: Extract dominant color from saved current.png
|
||||
saved_image = Image.open(self.CURRENT_PATH)
|
||||
@@ -470,7 +474,7 @@ class ProfilePictureManager:
|
||||
"hex": "#{:02x}{:02x}{:02x}".format(*dominant_color)
|
||||
}
|
||||
if debug:
|
||||
print(f"🎨 Dominant color: RGB{dominant_color} (#{result['metadata']['dominant_color']['hex'][1:]})")
|
||||
logger.debug(f"Dominant color: RGB{dominant_color} (#{result['metadata']['dominant_color']['hex'][1:]})")
|
||||
|
||||
# Step 6: Update Discord avatar
|
||||
if globals.client and globals.client.user:
|
||||
@@ -495,7 +499,7 @@ class ProfilePictureManager:
|
||||
# Save metadata
|
||||
self._save_metadata(result["metadata"])
|
||||
|
||||
print(f"✅ Profile picture updated successfully!")
|
||||
logger.info(f"Profile picture updated successfully!")
|
||||
|
||||
# Step 7: Update role colors across all servers
|
||||
if dominant_color:
|
||||
@@ -503,16 +507,16 @@ class ProfilePictureManager:
|
||||
|
||||
except discord.HTTPException as e:
|
||||
result["error"] = f"Discord API error: {e}"
|
||||
print(f"⚠️ Failed to update Discord avatar: {e}")
|
||||
logger.warning(f"Failed to update Discord avatar: {e}")
|
||||
except Exception as e:
|
||||
result["error"] = f"Unexpected error updating avatar: {e}"
|
||||
print(f"⚠️ Unexpected error: {e}")
|
||||
logger.error(f"Unexpected error: {e}")
|
||||
else:
|
||||
result["error"] = "Bot client not ready"
|
||||
|
||||
except Exception as e:
|
||||
result["error"] = f"Unexpected error: {e}"
|
||||
print(f"⚠️ Error in change_profile_picture: {e}")
|
||||
logger.error(f"Error in change_profile_picture: {e}")
|
||||
|
||||
return result
|
||||
|
||||
@@ -524,7 +528,7 @@ class ProfilePictureManager:
|
||||
if response.status == 200:
|
||||
return await response.read()
|
||||
except Exception as e:
|
||||
print(f"⚠️ Error downloading image: {e}")
|
||||
logger.error(f"Error downloading image: {e}")
|
||||
return None
|
||||
|
||||
async def _generate_image_description(self, image_bytes: bytes, debug: bool = False) -> Optional[str]:
|
||||
@@ -544,7 +548,7 @@ class ProfilePictureManager:
|
||||
image_b64 = base64.b64encode(image_bytes).decode('utf-8')
|
||||
|
||||
if debug:
|
||||
print(f"📸 Encoded image: {len(image_b64)} chars, calling vision model...")
|
||||
logger.debug(f"Encoded image: {len(image_b64)} chars, calling vision model...")
|
||||
|
||||
prompt = """This is an image of Hatsune Miku that will be used as a profile picture.
|
||||
Please describe this image in detail, including:
|
||||
@@ -583,7 +587,7 @@ Keep the description conversational and in second-person (referring to Miku as "
|
||||
headers = {"Content-Type": "application/json"}
|
||||
|
||||
if debug:
|
||||
print(f"🌐 Calling {globals.LLAMA_URL}/v1/chat/completions with model {globals.VISION_MODEL}")
|
||||
logger.debug(f"Calling {globals.LLAMA_URL}/v1/chat/completions with model {globals.VISION_MODEL}")
|
||||
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.post(f"{globals.LLAMA_URL}/v1/chat/completions", json=payload, headers=headers, timeout=aiohttp.ClientTimeout(total=60)) as resp:
|
||||
@@ -591,8 +595,8 @@ Keep the description conversational and in second-person (referring to Miku as "
|
||||
data = await resp.json()
|
||||
|
||||
if debug:
|
||||
print(f"📦 API Response keys: {data.keys()}")
|
||||
print(f"📦 Choices: {data.get('choices', [])}")
|
||||
logger.debug(f"API Response keys: {data.keys()}")
|
||||
logger.debug(f"Choices: {data.get('choices', [])}")
|
||||
|
||||
# Try to get content from the response
|
||||
choice = data.get("choices", [{}])[0]
|
||||
@@ -607,21 +611,21 @@ Keep the description conversational and in second-person (referring to Miku as "
|
||||
|
||||
if description and description.strip():
|
||||
if debug:
|
||||
print(f"✅ Generated description: {description[:100]}...")
|
||||
logger.info(f"Generated description: {description[:100]}...")
|
||||
return description.strip()
|
||||
else:
|
||||
if debug:
|
||||
print(f"⚠️ Description is empty or None")
|
||||
print(f" Full response: {data}")
|
||||
logger.warning(f"Description is empty or None")
|
||||
logger.warning(f" Full response: {data}")
|
||||
else:
|
||||
print(f"⚠️ Description is empty or None")
|
||||
logger.warning(f"Description is empty or None")
|
||||
return None
|
||||
else:
|
||||
error_text = await resp.text()
|
||||
print(f"❌ Vision API error generating description: {resp.status} - {error_text}")
|
||||
logger.error(f"Vision API error generating description: {resp.status} - {error_text}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"⚠️ Error generating image description: {e}")
|
||||
logger.error(f"Error generating image description: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
@@ -642,19 +646,19 @@ Keep the description conversational and in second-person (referring to Miku as "
|
||||
from utils.image_handling import extract_video_frames, analyze_video_with_vision
|
||||
|
||||
if debug:
|
||||
print("🎬 Extracting frames from GIF...")
|
||||
logger.info("Extracting frames from GIF...")
|
||||
|
||||
# Extract frames from the GIF (6 frames for good analysis)
|
||||
frames = await extract_video_frames(gif_bytes, num_frames=6)
|
||||
|
||||
if not frames:
|
||||
if debug:
|
||||
print("⚠️ Failed to extract frames from GIF")
|
||||
logger.warning("Failed to extract frames from GIF")
|
||||
return None
|
||||
|
||||
if debug:
|
||||
print(f"✅ Extracted {len(frames)} frames from GIF")
|
||||
print(f"🌐 Analyzing GIF with vision model...")
|
||||
logger.info(f"Extracted {len(frames)} frames from GIF")
|
||||
logger.info(f"Analyzing GIF with vision model...")
|
||||
|
||||
# Use the existing analyze_video_with_vision function (no timeout issues)
|
||||
# Note: This uses a generic prompt, but it works reliably
|
||||
@@ -662,15 +666,15 @@ Keep the description conversational and in second-person (referring to Miku as "
|
||||
|
||||
if description and description.strip() and not description.startswith("Error"):
|
||||
if debug:
|
||||
print(f"✅ Generated GIF description: {description[:100]}...")
|
||||
logger.info(f"Generated GIF description: {description[:100]}...")
|
||||
return description.strip()
|
||||
else:
|
||||
if debug:
|
||||
print(f"⚠️ GIF description failed or empty: {description}")
|
||||
logger.warning(f"GIF description failed or empty: {description}")
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
print(f"⚠️ Error generating GIF description: {e}")
|
||||
logger.error(f"Error generating GIF description: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
@@ -740,11 +744,11 @@ Respond in JSON format:
|
||||
response = data.get("choices", [{}])[0].get("message", {}).get("content", "")
|
||||
else:
|
||||
error_text = await resp.text()
|
||||
print(f"❌ Vision API error: {resp.status} - {error_text}")
|
||||
logger.error(f"Vision API error: {resp.status} - {error_text}")
|
||||
return result
|
||||
|
||||
if debug:
|
||||
print(f"🤖 Vision model response: {response}")
|
||||
logger.debug(f"Vision model response: {response}")
|
||||
|
||||
# Parse JSON response
|
||||
import re
|
||||
@@ -766,7 +770,7 @@ Respond in JSON format:
|
||||
result["is_miku"] = "yes" in response_lower or "miku" in response_lower
|
||||
|
||||
except Exception as e:
|
||||
print(f"⚠️ Error in vision verification: {e}")
|
||||
logger.warning(f"Error in vision verification: {e}")
|
||||
# Assume it's Miku on error (trust Danbooru tags)
|
||||
result["is_miku"] = True
|
||||
|
||||
@@ -793,7 +797,7 @@ Respond in JSON format:
|
||||
region["vertical"] = "bottom"
|
||||
|
||||
if debug:
|
||||
print(f"📍 Parsed location '{location}' -> {region}")
|
||||
logger.debug(f"Parsed location '{location}' -> {region}")
|
||||
|
||||
return region
|
||||
|
||||
@@ -856,11 +860,11 @@ Respond in JSON format:
|
||||
|
||||
if face_detection and face_detection.get('center'):
|
||||
if debug:
|
||||
print(f"😊 Face detected at {face_detection['center']}")
|
||||
logger.debug(f"Face detected at {face_detection['center']}")
|
||||
crop_center = face_detection['center']
|
||||
else:
|
||||
if debug:
|
||||
print("🎯 No face detected, using saliency detection")
|
||||
logger.debug("No face detected, using saliency detection")
|
||||
# Fallback to saliency detection
|
||||
cv_image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
|
||||
crop_center = self._detect_saliency(cv_image, debug=debug)
|
||||
@@ -895,12 +899,12 @@ Respond in JSON format:
|
||||
top = 0
|
||||
# Adjust crop_center for logging
|
||||
if debug:
|
||||
print(f"⚠️ Face too close to top edge, shifted crop to y=0")
|
||||
logger.debug(f"Face too close to top edge, shifted crop to y=0")
|
||||
elif top + crop_size > height:
|
||||
# Face is too close to bottom edge
|
||||
top = height - crop_size
|
||||
if debug:
|
||||
print(f"⚠️ Face too close to bottom edge, shifted crop to y={top}")
|
||||
logger.debug(f"Face too close to bottom edge, shifted crop to y={top}")
|
||||
|
||||
# Crop
|
||||
cropped = image.crop((left, top, left + crop_size, top + crop_size))
|
||||
@@ -909,7 +913,7 @@ Respond in JSON format:
|
||||
cropped = cropped.resize((target_size, target_size), Image.Resampling.LANCZOS)
|
||||
|
||||
if debug:
|
||||
print(f"✂️ Cropped to {target_size}x{target_size} centered at {crop_center}")
|
||||
logger.debug(f"Cropped to {target_size}x{target_size} centered at {crop_center}")
|
||||
|
||||
return cropped
|
||||
|
||||
@@ -933,7 +937,7 @@ Respond in JSON format:
|
||||
# Step 2: Start face detector container
|
||||
if not await self._start_face_detector(debug=debug):
|
||||
if debug:
|
||||
print("⚠️ Could not start face detector")
|
||||
logger.error("Could not start face detector")
|
||||
return None
|
||||
|
||||
face_detector_started = True
|
||||
@@ -951,14 +955,14 @@ Respond in JSON format:
|
||||
) as response:
|
||||
if response.status != 200:
|
||||
if debug:
|
||||
print(f"⚠️ Face detection API returned status {response.status}")
|
||||
logger.error(f"Face detection API returned status {response.status}")
|
||||
return None
|
||||
|
||||
result = await response.json()
|
||||
|
||||
if result.get('count', 0) == 0:
|
||||
if debug:
|
||||
print("👤 No faces detected by API")
|
||||
logger.debug("No faces detected by API")
|
||||
return None
|
||||
|
||||
# Get detections and pick the one with highest confidence
|
||||
@@ -981,9 +985,9 @@ Respond in JSON format:
|
||||
if debug:
|
||||
width = int(x2 - x1)
|
||||
height = int(y2 - y1)
|
||||
print(f"👤 Detected {len(detections)} face(s) via API, using best at ({center_x}, {center_y}) [confidence: {confidence:.2%}]")
|
||||
print(f" Bounding box: x={int(x1)}, y={int(y1)}, w={width}, h={height}")
|
||||
print(f" Keypoints: {len(keypoints)} facial landmarks detected")
|
||||
logger.debug(f"Detected {len(detections)} face(s) via API, using best at ({center_x}, {center_y}) [confidence: {confidence:.2%}]")
|
||||
logger.debug(f" Bounding box: x={int(x1)}, y={int(y1)}, w={width}, h={height}")
|
||||
logger.debug(f" Keypoints: {len(keypoints)} facial landmarks detected")
|
||||
|
||||
return {
|
||||
'center': (center_x, center_y),
|
||||
@@ -995,10 +999,10 @@ Respond in JSON format:
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
if debug:
|
||||
print("⚠️ Face detection API timeout")
|
||||
logger.warning("Face detection API timeout")
|
||||
except Exception as e:
|
||||
if debug:
|
||||
print(f"⚠️ Error calling face detection API: {e}")
|
||||
logger.error(f"Error calling face detection API: {e}")
|
||||
finally:
|
||||
# Always stop face detector to free VRAM
|
||||
if face_detector_started:
|
||||
@@ -1027,12 +1031,12 @@ Respond in JSON format:
|
||||
_, max_val, _, max_loc = cv2.minMaxLoc(saliency_map)
|
||||
|
||||
if debug:
|
||||
print(f"🎯 Saliency peak at {max_loc}")
|
||||
logger.debug(f"Saliency peak at {max_loc}")
|
||||
|
||||
return max_loc
|
||||
except Exception as e:
|
||||
if debug:
|
||||
print(f"⚠️ Saliency detection failed: {e}")
|
||||
logger.error(f"Saliency detection failed: {e}")
|
||||
|
||||
# Ultimate fallback: center of image
|
||||
height, width = cv_image.shape[:2]
|
||||
@@ -1070,7 +1074,7 @@ Respond in JSON format:
|
||||
|
||||
if len(pixels) == 0:
|
||||
if debug:
|
||||
print("⚠️ No valid pixels after filtering, using fallback")
|
||||
logger.warning("No valid pixels after filtering, using fallback")
|
||||
return (200, 200, 200) # Neutral gray fallback
|
||||
|
||||
# Use k-means to find dominant colors
|
||||
@@ -1085,11 +1089,11 @@ Respond in JSON format:
|
||||
counts = np.bincount(labels)
|
||||
|
||||
if debug:
|
||||
print(f"🎨 Found {n_colors} color clusters:")
|
||||
logger.debug(f"Found {n_colors} color clusters:")
|
||||
for i, (color, count) in enumerate(zip(colors, counts)):
|
||||
pct = (count / len(labels)) * 100
|
||||
r, g, b = color.astype(int)
|
||||
print(f" {i+1}. RGB({r}, {g}, {b}) = #{r:02x}{g:02x}{b:02x} ({pct:.1f}%)")
|
||||
logger.debug(f" {i+1}. RGB({r}, {g}, {b}) = #{r:02x}{g:02x}{b:02x} ({pct:.1f}%)")
|
||||
|
||||
# Sort by frequency
|
||||
sorted_indices = np.argsort(-counts)
|
||||
@@ -1108,7 +1112,7 @@ Respond in JSON format:
|
||||
saturation = (max_c - min_c) / max_c if max_c > 0 else 0
|
||||
|
||||
if debug:
|
||||
print(f" Color RGB({r}, {g}, {b}) saturation: {saturation:.2f}")
|
||||
logger.debug(f" Color RGB({r}, {g}, {b}) saturation: {saturation:.2f}")
|
||||
|
||||
# Prefer more saturated colors
|
||||
if saturation > best_saturation:
|
||||
@@ -1118,7 +1122,7 @@ Respond in JSON format:
|
||||
|
||||
if best_color:
|
||||
if debug:
|
||||
print(f"🎨 Selected color: RGB{best_color} (saturation: {best_saturation:.2f})")
|
||||
logger.debug(f"Selected color: RGB{best_color} (saturation: {best_saturation:.2f})")
|
||||
return best_color
|
||||
|
||||
# Fallback to most common color
|
||||
@@ -1126,12 +1130,12 @@ Respond in JSON format:
|
||||
# Convert to native Python ints
|
||||
result = (int(dominant_color[0]), int(dominant_color[1]), int(dominant_color[2]))
|
||||
if debug:
|
||||
print(f"🎨 Using most common color: RGB{result}")
|
||||
logger.debug(f"Using most common color: RGB{result}")
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
if debug:
|
||||
print(f"⚠️ Error extracting dominant color: {e}")
|
||||
logger.error(f"Error extracting dominant color: {e}")
|
||||
return None
|
||||
|
||||
async def _update_role_colors(self, color: Tuple[int, int, int], debug: bool = False):
|
||||
@@ -1143,15 +1147,15 @@ Respond in JSON format:
|
||||
debug: Enable debug output
|
||||
"""
|
||||
if debug:
|
||||
print(f"🎨 Starting role color update with RGB{color}")
|
||||
logger.debug(f"Starting role color update with RGB{color}")
|
||||
|
||||
if not globals.client:
|
||||
if debug:
|
||||
print("⚠️ No client available for role updates")
|
||||
logger.error("No client available for role updates")
|
||||
return
|
||||
|
||||
if debug:
|
||||
print(f"🌐 Found {len(globals.client.guilds)} guild(s)")
|
||||
logger.debug(f"Found {len(globals.client.guilds)} guild(s)")
|
||||
|
||||
# Convert RGB to Discord color (integer)
|
||||
discord_color = discord.Color.from_rgb(*color)
|
||||
@@ -1162,20 +1166,20 @@ Respond in JSON format:
|
||||
for guild in globals.client.guilds:
|
||||
try:
|
||||
if debug:
|
||||
print(f"🔍 Checking guild: {guild.name}")
|
||||
logger.debug(f"Checking guild: {guild.name}")
|
||||
|
||||
# Find the bot's top role (usually colored role)
|
||||
member = guild.get_member(globals.client.user.id)
|
||||
if not member:
|
||||
if debug:
|
||||
print(f" ⚠️ Bot not found as member in {guild.name}")
|
||||
logger.warning(f" Bot not found as member in {guild.name}")
|
||||
continue
|
||||
|
||||
# Get the highest role that the bot has (excluding @everyone)
|
||||
roles = [r for r in member.roles if r.name != "@everyone"]
|
||||
if not roles:
|
||||
if debug:
|
||||
print(f" ⚠️ No roles found in {guild.name}")
|
||||
logger.warning(f" No roles found in {guild.name}")
|
||||
continue
|
||||
|
||||
# Look for a dedicated color role first (e.g., "Miku Color")
|
||||
@@ -1191,19 +1195,19 @@ Respond in JSON format:
|
||||
# Use dedicated color role if found, otherwise use top role
|
||||
if color_role:
|
||||
if debug:
|
||||
print(f" 🎨 Found dedicated color role: {color_role.name} (position {color_role.position})")
|
||||
logger.debug(f" Found dedicated color role: {color_role.name} (position {color_role.position})")
|
||||
target_role = color_role
|
||||
else:
|
||||
if debug:
|
||||
print(f" 📝 No 'Miku Color' role found, using top role: {bot_top_role.name} (position {bot_top_role.position})")
|
||||
logger.debug(f" No 'Miku Color' role found, using top role: {bot_top_role.name} (position {bot_top_role.position})")
|
||||
target_role = bot_top_role
|
||||
|
||||
# Check permissions
|
||||
can_manage = guild.me.guild_permissions.manage_roles
|
||||
|
||||
if debug:
|
||||
print(f" 🔑 Manage roles permission: {can_manage}")
|
||||
print(f" 📊 Bot top role: {bot_top_role.name} (pos {bot_top_role.position}), Target: {target_role.name} (pos {target_role.position})")
|
||||
logger.debug(f" Manage roles permission: {can_manage}")
|
||||
logger.debug(f" Bot top role: {bot_top_role.name} (pos {bot_top_role.position}), Target: {target_role.name} (pos {target_role.position})")
|
||||
|
||||
# Only update if we have permission and it's not a special role
|
||||
if can_manage:
|
||||
@@ -1219,28 +1223,28 @@ Respond in JSON format:
|
||||
|
||||
updated_count += 1
|
||||
if debug:
|
||||
print(f" ✅ Updated role color in {guild.name}: {target_role.name}")
|
||||
logger.info(f" Updated role color in {guild.name}: {target_role.name}")
|
||||
else:
|
||||
if debug:
|
||||
print(f" ⚠️ No manage_roles permission in {guild.name}")
|
||||
logger.warning(f" No manage_roles permission in {guild.name}")
|
||||
|
||||
except discord.Forbidden:
|
||||
failed_count += 1
|
||||
if debug:
|
||||
print(f" ❌ Forbidden: No permission to update role in {guild.name}")
|
||||
logger.error(f" Forbidden: No permission to update role in {guild.name}")
|
||||
except Exception as e:
|
||||
failed_count += 1
|
||||
if debug:
|
||||
print(f" ❌ Error updating role in {guild.name}: {e}")
|
||||
logger.error(f" Error updating role in {guild.name}: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
if updated_count > 0:
|
||||
print(f"🎨 Updated role colors in {updated_count} server(s)")
|
||||
logger.info(f"Updated role colors in {updated_count} server(s)")
|
||||
else:
|
||||
print(f"⚠️ No roles were updated (failed: {failed_count})")
|
||||
logger.warning(f"No roles were updated (failed: {failed_count})")
|
||||
if failed_count > 0 and debug:
|
||||
print(f"⚠️ Failed to update {failed_count} server(s)")
|
||||
logger.error(f"Failed to update {failed_count} server(s)")
|
||||
|
||||
async def set_custom_role_color(self, hex_color: str, debug: bool = False) -> Dict:
|
||||
"""
|
||||
@@ -1267,7 +1271,7 @@ Respond in JSON format:
|
||||
}
|
||||
|
||||
if debug:
|
||||
print(f"🎨 Setting custom role color: #{hex_color} RGB{color}")
|
||||
logger.debug(f"Setting custom role color: #{hex_color} RGB{color}")
|
||||
|
||||
await self._update_role_colors(color, debug=debug)
|
||||
|
||||
@@ -1290,7 +1294,7 @@ Respond in JSON format:
|
||||
Dict with success status
|
||||
"""
|
||||
if debug:
|
||||
print(f"🎨 Resetting to fallback color: RGB{self.FALLBACK_ROLE_COLOR}")
|
||||
logger.debug(f"Resetting to fallback color: RGB{self.FALLBACK_ROLE_COLOR}")
|
||||
|
||||
await self._update_role_colors(self.FALLBACK_ROLE_COLOR, debug=debug)
|
||||
|
||||
@@ -1308,7 +1312,7 @@ Respond in JSON format:
|
||||
with open(self.METADATA_PATH, 'w') as f:
|
||||
json.dump(metadata, f, indent=2)
|
||||
except Exception as e:
|
||||
print(f"⚠️ Error saving metadata: {e}")
|
||||
logger.error(f"Error saving metadata: {e}")
|
||||
|
||||
def load_metadata(self) -> Optional[Dict]:
|
||||
"""Load metadata about current profile picture"""
|
||||
@@ -1317,14 +1321,14 @@ Respond in JSON format:
|
||||
with open(self.METADATA_PATH, 'r') as f:
|
||||
return json.load(f)
|
||||
except Exception as e:
|
||||
print(f"⚠️ Error loading metadata: {e}")
|
||||
logger.error(f"Error loading metadata: {e}")
|
||||
return None
|
||||
|
||||
async def restore_fallback(self) -> bool:
|
||||
"""Restore the fallback profile picture"""
|
||||
try:
|
||||
if not os.path.exists(self.FALLBACK_PATH):
|
||||
print("⚠️ No fallback avatar found")
|
||||
logger.warning("No fallback avatar found")
|
||||
return False
|
||||
|
||||
with open(self.FALLBACK_PATH, 'rb') as f:
|
||||
@@ -1341,11 +1345,11 @@ Respond in JSON format:
|
||||
else:
|
||||
await globals.client.user.edit(avatar=avatar_bytes)
|
||||
|
||||
print("✅ Restored fallback avatar")
|
||||
logger.info("Restored fallback avatar")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"⚠️ Error restoring fallback: {e}")
|
||||
logger.error(f"Error restoring fallback: {e}")
|
||||
|
||||
return False
|
||||
|
||||
@@ -1362,7 +1366,7 @@ Respond in JSON format:
|
||||
with open(description_path, 'r', encoding='utf-8') as f:
|
||||
return f.read().strip()
|
||||
except Exception as e:
|
||||
print(f"⚠️ Error reading description: {e}")
|
||||
logger.error(f"Error reading description: {e}")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
Reference in New Issue
Block a user