fix: PREFER_AMD_GPU now lives in globals so config API changes affect GPU routing

Previously gpu_router.py had its own module-level PREFER_AMD_GPU constant
that was frozen at import time. The config API wrote to globals.PREFER_AMD_GPU
which didn't exist, so runtime GPU preference changes never took effect.

Now globals.py owns PREFER_AMD_GPU and gpu_router reads it from there.
This commit is contained in:
2026-04-10 23:53:14 +03:00
parent 366bee2e43
commit 02686c3b96
2 changed files with 4 additions and 2 deletions

View File

@@ -37,7 +37,8 @@ MODEL_TO_GPU = {
}
# Configuration
PREFER_AMD_GPU = os.getenv("PREFER_AMD_GPU", "false").lower() == "true"
# PREFER_AMD_GPU lives in globals so the config API can update it at runtime.
# We read globals.PREFER_AMD_GPU in functions below instead of a frozen local.
AMD_MODELS_ENABLED = os.getenv("AMD_MODELS_ENABLED", "true").lower() == "true"
@@ -101,7 +102,7 @@ def get_llama_url_with_load_balancing(
return globals.LLAMA_URL, "llama3.1"
# AMD enabled - implement load balancing
use_amd = prefer_amd or PREFER_AMD_GPU or (random.random() < 0.5)
use_amd = prefer_amd or globals.PREFER_AMD_GPU or (random.random() < 0.5)
if task_type == "evil":
# Evil/uncensored models