Split 3,598-line api.py into thin orchestrator (128 lines) + 19 route modules in bot/routes/: core.py (7 routes), mood.py (10), language.py (3), evil_mode.py (6), bipolar_mode.py (9), gpu.py (2), bot_actions.py (4), autonomous.py (13), profile_picture.py (26), manual_send.py (3), servers.py (6), figurines.py (5), dms.py (18), image_generation.py (4), chat.py (1), config.py (7), logging_config.py (9), voice.py (3), memory.py (10) All 146 routes verified present via test_route_split.py (149 tests). 21/21 regression tests (test_config_state.py) pass. Monolith backup: bot/api_monolith_backup.py (revert: cp it to api.py).
39 lines
1.2 KiB
Python
39 lines
1.2 KiB
Python
"""GPU selection routes."""
|
|
|
|
from fastapi import APIRouter, Request
|
|
from utils.logger import get_logger
|
|
|
|
logger = get_logger('api')
|
|
|
|
router = APIRouter()
|
|
|
|
|
|
@router.get("/gpu-status")
|
|
def get_gpu_status():
|
|
"""Get current GPU selection"""
|
|
from config_manager import config_manager
|
|
return {"gpu": config_manager.get_gpu()}
|
|
|
|
|
|
@router.post("/gpu-select")
|
|
async def select_gpu(request: Request):
|
|
"""Select which GPU to use for inference"""
|
|
data = await request.json()
|
|
gpu = data.get("gpu", "nvidia").lower()
|
|
|
|
if gpu not in ["nvidia", "amd"]:
|
|
return {"status": "error", "message": "Invalid GPU selection. Must be 'nvidia' or 'amd'"}
|
|
|
|
try:
|
|
from config_manager import config_manager
|
|
success = config_manager.set_gpu(gpu)
|
|
|
|
if success:
|
|
logger.info(f"GPU Selection: Switched to {gpu.upper()} GPU")
|
|
return {"status": "ok", "message": f"Switched to {gpu.upper()} GPU", "gpu": gpu}
|
|
else:
|
|
return {"status": "error", "message": "Failed to save GPU state"}
|
|
except Exception as e:
|
|
logger.error(f"GPU Selection Error: {e}")
|
|
return {"status": "error", "message": str(e)}
|