refactor: split api.py monolith into 19 route modules (Phase B)

Split 3,598-line api.py into thin orchestrator (128 lines) + 19 route
modules in bot/routes/:

  core.py (7 routes), mood.py (10), language.py (3), evil_mode.py (6),
  bipolar_mode.py (9), gpu.py (2), bot_actions.py (4), autonomous.py (13),
  profile_picture.py (26), manual_send.py (3), servers.py (6),
  figurines.py (5), dms.py (18), image_generation.py (4), chat.py (1),
  config.py (7), logging_config.py (9), voice.py (3), memory.py (10)

All 146 routes verified present via test_route_split.py (149 tests).
21/21 regression tests (test_config_state.py) pass.
Monolith backup: bot/api_monolith_backup.py (revert: cp it to api.py).
This commit is contained in:
2026-04-15 11:38:14 +03:00
parent 8b14160028
commit 979217e7cc
26 changed files with 7624 additions and 3541 deletions

38
bot/routes/gpu.py Normal file
View File

@@ -0,0 +1,38 @@
"""GPU selection routes."""
from fastapi import APIRouter, Request
from utils.logger import get_logger
logger = get_logger('api')
router = APIRouter()
@router.get("/gpu-status")
def get_gpu_status():
"""Get current GPU selection"""
from config_manager import config_manager
return {"gpu": config_manager.get_gpu()}
@router.post("/gpu-select")
async def select_gpu(request: Request):
"""Select which GPU to use for inference"""
data = await request.json()
gpu = data.get("gpu", "nvidia").lower()
if gpu not in ["nvidia", "amd"]:
return {"status": "error", "message": "Invalid GPU selection. Must be 'nvidia' or 'amd'"}
try:
from config_manager import config_manager
success = config_manager.set_gpu(gpu)
if success:
logger.info(f"GPU Selection: Switched to {gpu.upper()} GPU")
return {"status": "ok", "message": f"Switched to {gpu.upper()} GPU", "gpu": gpu}
else:
return {"status": "error", "message": "Failed to save GPU state"}
except Exception as e:
logger.error(f"GPU Selection Error: {e}")
return {"status": "error", "message": str(e)}