- 217 error returns across 18 route files + api.py now use JSONResponse with appropriate HTTP status codes instead of returning HTTP 200 - Status code distribution: 500 (121), 400 (39), 503 (28), 404 (24), 409 (3), 502 (2) - Fixed language.py tuple-return bug (was serializing as JSON array) - Fixed bare except clauses in bipolar_mode.py and voice.py - Body-level error schemas preserved (status/error + success/error patterns) so web UI continues working without changes - chat.py (SSE) unchanged: errors sent within stream protocol - All 170 tests pass
40 lines
1.3 KiB
Python
40 lines
1.3 KiB
Python
"""GPU selection routes."""
|
|
|
|
from fastapi import APIRouter, Request
|
|
from fastapi.responses import JSONResponse
|
|
from utils.logger import get_logger
|
|
|
|
logger = get_logger('api')
|
|
|
|
router = APIRouter()
|
|
|
|
|
|
@router.get("/gpu-status")
|
|
def get_gpu_status():
|
|
"""Get current GPU selection"""
|
|
from config_manager import config_manager
|
|
return {"gpu": config_manager.get_gpu()}
|
|
|
|
|
|
@router.post("/gpu-select")
|
|
async def select_gpu(request: Request):
|
|
"""Select which GPU to use for inference"""
|
|
data = await request.json()
|
|
gpu = data.get("gpu", "nvidia").lower()
|
|
|
|
if gpu not in ["nvidia", "amd"]:
|
|
return JSONResponse(status_code=400, content={"status": "error", "message": "Invalid GPU selection. Must be 'nvidia' or 'amd'"})
|
|
|
|
try:
|
|
from config_manager import config_manager
|
|
success = config_manager.set_gpu(gpu)
|
|
|
|
if success:
|
|
logger.info(f"GPU Selection: Switched to {gpu.upper()} GPU")
|
|
return {"status": "ok", "message": f"Switched to {gpu.upper()} GPU", "gpu": gpu}
|
|
else:
|
|
return JSONResponse(status_code=500, content={"status": "error", "message": "Failed to save GPU state"})
|
|
except Exception as e:
|
|
logger.error(f"GPU Selection Error: {e}")
|
|
return JSONResponse(status_code=500, content={"status": "error", "message": str(e)})
|