feat: add Profile Picture Management tab with manual crop, description editor

- profile_picture_manager.py:
  - Add ORIGINAL_PATH constant; save full-res original before every crop
  - Add skip_crop param to change_profile_picture() for manual crop workflow
  - Add manual_crop(x,y,w,h) method with Discord avatar update + role color sync
  - Add auto_crop_only() to re-run face-detection crop on stored original
  - Add update_description() with Cheshire Cat declarative memory re-injection
  - Add regenerate_description() via vision model
  - Skip crop step if image is already at/below 512x512

- api.py:
  - GET /profile-picture/image/original — serve full-res original (no-cache)
  - GET /profile-picture/image/current  — serve current cropped avatar (no-cache)
  - POST /profile-picture/change-no-crop — acquire image, skip auto-crop
  - POST /profile-picture/manual-crop   — apply crop coords {x,y,width,height}
  - POST /profile-picture/auto-crop     — re-run intelligent crop on original
  - POST /profile-picture/description   — save freeform description + Cat inject
  - POST /profile-picture/regenerate-description — re-generate via vision model
  - GET  /profile-picture/description   — fetch current description text

- index.html:
  - Add new tab11 '🖼️ Profile Picture Management'
  - Remove PFP + role color sections from Actions tab (tab2)
  - Add Cropper.js 1.6.2 via CDN for manual square crop
  - Tab layout: action buttons, file upload, auto/manual crop toggle,
    Cropper.js interface, side-by-side original/cropped previews,
    role color management, freeform description editor, metadata box (bottom)
  - Wire switchTab hook for tab11 → loadPfpTab()
  - All new JS functions: pfpChangeDanbooru, pfpUploadCustom, pfpRestoreFallback,
    pfpShowCropInterface, pfpApplyManualCrop, pfpApplyAutoCrop, pfpSaveDescription,
    pfpRegenerateDescription, pfpRefreshPreviews, setCustomRoleColor, resetRoleColor
This commit is contained in:
2026-03-30 15:10:19 +03:00
parent 08fb465c67
commit f092cadb9d
3 changed files with 991 additions and 146 deletions

View File

@@ -37,6 +37,7 @@ class ProfilePictureManager:
PROFILE_PIC_DIR = "memory/profile_pictures"
FALLBACK_PATH = "memory/profile_pictures/fallback.png"
CURRENT_PATH = "memory/profile_pictures/current.png"
ORIGINAL_PATH = "memory/profile_pictures/original.png"
METADATA_PATH = "memory/profile_pictures/metadata.json"
# Face detection API endpoint
@@ -244,7 +245,8 @@ class ProfilePictureManager:
mood: Optional[str] = None,
custom_image_bytes: Optional[bytes] = None,
debug: bool = False,
max_retries: int = 5
max_retries: int = 5,
skip_crop: bool = False
) -> Dict:
"""
Main function to change Miku's profile picture.
@@ -254,6 +256,7 @@ class ProfilePictureManager:
custom_image_bytes: If provided, use this image instead of Danbooru
debug: Enable debug output
max_retries: Maximum number of attempts to find a valid Miku image (for Danbooru)
skip_crop: If True, save original but skip cropping/description (for manual crop workflow)
Returns:
Dict with status and metadata
@@ -467,6 +470,24 @@ class ProfilePictureManager:
return result
# === NORMAL STATIC IMAGE PATH ===
# Save full-resolution original for manual cropping later
with open(self.ORIGINAL_PATH, 'wb') as f:
f.write(image_bytes)
if debug:
logger.info(f"Saved full-resolution original ({len(image_bytes)} bytes, {image.size[0]}x{image.size[1]})")
result["metadata"]["original_width"] = image.size[0]
result["metadata"]["original_height"] = image.size[1]
# If skip_crop requested (manual crop workflow), return early with original saved
if skip_crop:
result["success"] = True
result["metadata"]["changed_at"] = datetime.now().isoformat()
result["metadata"]["skip_crop"] = True
self._save_metadata(result["metadata"])
if debug:
logger.info("Skipping auto-crop (manual crop workflow) - original saved")
return result
# Step 2: Generate description of the validated image
if debug:
logger.info("Generating image description...")
@@ -1425,6 +1446,312 @@ Respond in JSON format:
logger.error(f"Error reading description: {e}")
return None
async def manual_crop(self, x: int, y: int, width: int, height: int, target_size: int = 512, debug: bool = False) -> Dict:
"""
Manually crop the stored original image and apply it as the Discord avatar.
Args:
x: Left edge of crop region (pixels)
y: Top edge of crop region (pixels)
width: Width of crop region (pixels)
height: Height of crop region (pixels)
target_size: Final resize target (default 512)
debug: Enable debug output
Returns:
Dict with success status and metadata
"""
result = {"success": False, "error": None, "metadata": {}}
try:
if not os.path.exists(self.ORIGINAL_PATH):
result["error"] = "No original image found. Upload or fetch an image first."
return result
image = Image.open(self.ORIGINAL_PATH)
img_width, img_height = image.size
# Validate crop region
if x < 0 or y < 0:
result["error"] = f"Crop coordinates must be non-negative (got x={x}, y={y})"
return result
if x + width > img_width or y + height > img_height:
result["error"] = f"Crop region ({x},{y},{width},{height}) exceeds image bounds ({img_width}x{img_height})"
return result
if width < 64 or height < 64:
result["error"] = f"Crop region too small (minimum 64x64, got {width}x{height})"
return result
# Perform crop
cropped = image.crop((x, y, x + width, y + height))
# Resize to target
cropped = cropped.resize((target_size, target_size), Image.Resampling.LANCZOS)
if debug:
logger.info(f"Manual crop: ({x},{y},{width},{height}) -> {target_size}x{target_size}")
# Save cropped image
output_buffer = io.BytesIO()
cropped.save(output_buffer, format='PNG')
cropped_bytes = output_buffer.getvalue()
with open(self.CURRENT_PATH, 'wb') as f:
f.write(cropped_bytes)
# Extract dominant color
dominant_color = self._extract_dominant_color(cropped, debug=debug)
if dominant_color:
result["metadata"]["dominant_color"] = {
"rgb": dominant_color,
"hex": "#{:02x}{:02x}{:02x}".format(*dominant_color)
}
# Update Discord avatar
if globals.client and globals.client.user:
try:
if globals.client.loop and globals.client.loop.is_running():
future = asyncio.run_coroutine_threadsafe(
globals.client.user.edit(avatar=cropped_bytes),
globals.client.loop
)
future.result(timeout=10)
else:
await globals.client.user.edit(avatar=cropped_bytes)
result["success"] = True
result["metadata"]["changed_at"] = datetime.now().isoformat()
result["metadata"]["crop_region"] = {"x": x, "y": y, "width": width, "height": height}
# Update existing metadata
existing_meta = self.load_metadata() or {}
existing_meta.update(result["metadata"])
existing_meta.pop("skip_crop", None)
self._save_metadata(existing_meta)
logger.info("Manual crop applied and Discord avatar updated")
# Update role colors
if dominant_color:
await self._update_role_colors(dominant_color, debug=debug)
# Update bipolar webhook avatars
if globals.BIPOLAR_MODE:
try:
from utils.bipolar_mode import update_webhook_avatars
await update_webhook_avatars(globals.client)
except Exception as e:
logger.warning(f"Failed to update bipolar webhook avatars: {e}")
except discord.HTTPException as e:
result["error"] = f"Discord API error: {e}"
except Exception as e:
result["error"] = f"Unexpected error: {e}"
else:
result["error"] = "Bot client not ready"
except Exception as e:
result["error"] = f"Error in manual crop: {e}"
logger.error(f"Error in manual_crop: {e}")
return result
async def auto_crop_only(self, debug: bool = False) -> Dict:
"""
Run intelligent auto-crop on the stored original image and apply as Discord avatar.
Returns:
Dict with success status and metadata
"""
result = {"success": False, "error": None, "metadata": {}}
try:
if not os.path.exists(self.ORIGINAL_PATH):
result["error"] = "No original image found. Upload or fetch an image first."
return result
# Load original
with open(self.ORIGINAL_PATH, 'rb') as f:
image_bytes = f.read()
image = Image.open(io.BytesIO(image_bytes))
if debug:
logger.info(f"Auto-cropping original image ({image.size[0]}x{image.size[1]})")
# Run intelligent crop
target_size = 512
width, height = image.size
if width <= target_size and height <= target_size:
if debug:
logger.info(f"Image already at/below target size, skipping crop")
cropped_image = image
else:
cropped_image = await self._intelligent_crop(image, image_bytes, target_size=target_size, debug=debug)
if not cropped_image:
result["error"] = "Intelligent crop failed"
return result
# Save cropped
output_buffer = io.BytesIO()
cropped_image.save(output_buffer, format='PNG')
cropped_bytes = output_buffer.getvalue()
with open(self.CURRENT_PATH, 'wb') as f:
f.write(cropped_bytes)
if debug:
logger.info(f"Saved auto-cropped image ({len(cropped_bytes)} bytes)")
# Extract dominant color
dominant_color = self._extract_dominant_color(cropped_image, debug=debug)
if dominant_color:
result["metadata"]["dominant_color"] = {
"rgb": dominant_color,
"hex": "#{:02x}{:02x}{:02x}".format(*dominant_color)
}
# Update Discord avatar
if globals.client and globals.client.user:
try:
if globals.client.loop and globals.client.loop.is_running():
future = asyncio.run_coroutine_threadsafe(
globals.client.user.edit(avatar=cropped_bytes),
globals.client.loop
)
future.result(timeout=10)
else:
await globals.client.user.edit(avatar=cropped_bytes)
result["success"] = True
result["metadata"]["changed_at"] = datetime.now().isoformat()
# Update existing metadata
existing_meta = self.load_metadata() or {}
existing_meta.update(result["metadata"])
existing_meta.pop("skip_crop", None)
existing_meta.pop("crop_region", None)
self._save_metadata(existing_meta)
logger.info("Auto-crop applied and Discord avatar updated")
if dominant_color:
await self._update_role_colors(dominant_color, debug=debug)
if globals.BIPOLAR_MODE:
try:
from utils.bipolar_mode import update_webhook_avatars
await update_webhook_avatars(globals.client)
except Exception as e:
logger.warning(f"Failed to update bipolar webhook avatars: {e}")
except discord.HTTPException as e:
result["error"] = f"Discord API error: {e}"
except Exception as e:
result["error"] = f"Unexpected error: {e}"
else:
result["error"] = "Bot client not ready"
except Exception as e:
result["error"] = f"Error in auto_crop_only: {e}"
logger.error(f"Error in auto_crop_only: {e}")
return result
async def update_description(self, description: str, reinject_cat: bool = True, debug: bool = False) -> Dict:
"""
Update the profile picture description and optionally re-inject into Cheshire Cat memory.
Args:
description: New description text
reinject_cat: Whether to store as a declarative fact in Cheshire Cat
debug: Enable debug output
Returns:
Dict with success status
"""
result = {"success": False, "error": None}
try:
# Save to description file
description_path = os.path.join(self.PROFILE_PIC_DIR, "current_description.txt")
with open(description_path, 'w', encoding='utf-8') as f:
f.write(description)
# Update metadata
metadata = self.load_metadata() or {}
metadata["description"] = description
self._save_metadata(metadata)
if debug:
logger.info(f"Updated description ({len(description)} chars)")
# Re-inject into Cheshire Cat as declarative memory
if reinject_cat and globals.USE_CHESHIRE_CAT:
try:
from utils.cat_client import cat_adapter
fact_content = f"Miku's current profile picture shows: {description}"
await cat_adapter.create_memory_point(
collection="declarative",
content=fact_content,
user_id="profile_picture_manager",
source="profile_picture_description",
metadata={"type": "profile_picture", "updated_at": datetime.now().isoformat()}
)
if debug:
logger.info("Re-injected description into Cheshire Cat declarative memory")
except Exception as e:
logger.warning(f"Failed to re-inject description into Cat: {e}")
# Don't fail the whole operation
result["success"] = True
except Exception as e:
result["error"] = f"Error updating description: {e}"
logger.error(f"Error in update_description: {e}")
return result
async def regenerate_description(self, debug: bool = False) -> Dict:
"""
Re-generate the description from the current original image using the vision model.
Returns:
Dict with success status and new description
"""
result = {"success": False, "error": None, "description": None}
try:
# Try original first, fall back to current
image_path = self.ORIGINAL_PATH if os.path.exists(self.ORIGINAL_PATH) else self.CURRENT_PATH
if not os.path.exists(image_path):
result["error"] = "No image found to describe"
return result
with open(image_path, 'rb') as f:
image_bytes = f.read()
if debug:
logger.info("Regenerating image description via vision model...")
description = await self._generate_image_description(image_bytes, debug=debug)
if description:
# Save it
update_result = await self.update_description(description, reinject_cat=True, debug=debug)
result["success"] = update_result["success"]
result["description"] = description
if update_result.get("error"):
result["error"] = update_result["error"]
else:
result["error"] = "Vision model returned no description"
except Exception as e:
result["error"] = f"Error regenerating description: {e}"
logger.error(f"Error in regenerate_description: {e}")
return result
# Global instance