Initial commit: Miku Discord Bot

This commit is contained in:
2025-12-07 17:15:09 +02:00
commit 8c74ad5260
206 changed files with 50125 additions and 0 deletions

132
face-detector/api/main.py Normal file
View File

@@ -0,0 +1,132 @@
from fastapi import FastAPI, File, UploadFile
from fastapi.responses import JSONResponse
from anime_face_detector import create_detector
import numpy as np
import cv2
import os
import json
from datetime import datetime
from pathlib import Path
app = FastAPI(title="Anime Face Detector API")
# Create output directory
OUTPUT_DIR = Path("/app/api/outputs")
OUTPUT_DIR.mkdir(exist_ok=True)
# Load detector once at startup
detector = None
@app.on_event("startup")
async def startup_event():
global detector
detector = create_detector("yolov3", device="cuda:0")
@app.get("/")
async def root():
return {"message": "Anime Face Detector API", "status": "running"}
@app.get("/health")
async def health():
return {"status": "healthy", "detector_loaded": detector is not None}
@app.post("/detect")
async def detect_face(file: UploadFile = File(...)):
try:
if detector is None:
return JSONResponse(
status_code=503,
content={"error": "Detector not initialized yet"}
)
image_bytes = await file.read()
# Decode image
nparr = np.frombuffer(image_bytes, np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
if img is None:
return JSONResponse(
status_code=400,
content={"error": "Invalid image file"}
)
# Run detector
preds = detector(img)
# Convert output to JSON-safe structure
result = []
for pred in preds:
# bbox contains [x1, y1, x2, y2, score]
bbox = pred['bbox']
bbox_coords = bbox[:4].tolist()
score = float(bbox[4])
# keypoints contains [[x, y, score], ...]
keypoints = pred.get('keypoints', [])
landmarks = [[float(pt[0]), float(pt[1]), float(pt[2])] for pt in keypoints]
result.append({
"bbox": bbox_coords,
"confidence": score,
"keypoints": landmarks
})
# Draw detection results on image (like Gradio demo)
annotated_img = img.copy()
for pred in preds:
box = pred['bbox']
box_coords, score = box[:4], box[4]
box_coords = np.round(box_coords).astype(int)
# Calculate line thickness based on image size
lt = max(2, int(3 * (box_coords[2:] - box_coords[:2]).max() / 256))
# Draw bounding box
cv2.rectangle(annotated_img,
tuple(box_coords[:2]),
tuple(box_coords[2:]),
(0, 255, 0), lt)
# Draw keypoints
pred_pts = pred['keypoints']
for *pt, kp_score in pred_pts:
# Color based on keypoint confidence
if kp_score < 0.3: # Low confidence
color = (0, 255, 255) # Yellow
else:
color = (0, 0, 255) # Red
pt = np.round(pt).astype(int)
cv2.circle(annotated_img, tuple(pt), lt, color, cv2.FILLED)
# Generate unique filename based on timestamp
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S_%f")
original_filename = Path(file.filename).stem if file.filename else "image"
# Save annotated image
image_output_path = OUTPUT_DIR / f"{original_filename}_{timestamp}_annotated.jpg"
cv2.imwrite(str(image_output_path), annotated_img)
# Save JSON results
json_output_path = OUTPUT_DIR / f"{original_filename}_{timestamp}_results.json"
json_data = {
"filename": file.filename,
"timestamp": timestamp,
"detections": result,
"count": len(result)
}
with open(json_output_path, 'w') as f:
json.dump(json_data, f, indent=2)
return {
"detections": result,
"count": len(result),
"annotated_image": str(image_output_path),
"json_file": str(json_output_path)
}
except Exception as e:
return JSONResponse(
status_code=500,
content={"error": str(e)}
)