Compare commits

...

2 Commits

Author SHA1 Message Date
8b14160028 refactor: consolidate conversation_history to ConversationHistory class
Remove legacy globals.conversation_history (defaultdict of deques) and
route all callers through utils.conversation_history.ConversationHistory:

- globals.py: remove conversation_history + unused collections imports
- llm.py: remove backward-compat dual-write to legacy system
- api.py: /conversation/{user_id} now reads from ConversationHistory
- actions.py: reset_conversation uses clear_channel()
- figurine_notifier.py: use add_message() instead of buggy setdefault()
- bipolar_mode.py: fix clear_history -> clear_channel (was AttributeError
  silently swallowed by bare except), fix bare except -> except Exception
2026-04-11 00:21:44 +03:00
02686c3b96 fix: PREFER_AMD_GPU now lives in globals so config API changes affect GPU routing
Previously gpu_router.py had its own module-level PREFER_AMD_GPU constant
that was frozen at import time. The config API wrote to globals.PREFER_AMD_GPU
which didn't exist, so runtime GPU preference changes never took effect.

Now globals.py owns PREFER_AMD_GPU and gpu_router reads it from there.
2026-04-10 23:53:14 +03:00
7 changed files with 15 additions and 24 deletions

View File

@@ -1746,9 +1746,9 @@ def get_autonomous_stats():
@app.get("/conversation/{user_id}")
def get_conversation(user_id: str):
if user_id in globals.conversation_history:
return {"conversation": list(globals.conversation_history[user_id])}
return {"conversation": []}
"""Get conversation history for a user/channel (uses centralized ConversationHistory)."""
messages = conversation_history.get_recent_messages(user_id)
return {"conversation": [{"author": author, "content": content, "is_bot": is_bot} for author, content, is_bot in messages]}
# ========== Figurine DM Subscription APIs ==========
@app.get("/figurines/subscribers")

View File

@@ -4,6 +4,7 @@ import asyncio
import globals
from utils.moods import load_mood_description
from utils.scheduled import send_bedtime_reminder
from utils.conversation_history import conversation_history
from utils.logger import get_logger
logger = get_logger('commands')
@@ -32,7 +33,7 @@ def calm_miku() -> str:
def reset_conversation(user_id):
globals.conversation_history[str(user_id)].clear()
conversation_history.clear_channel(str(user_id))
async def force_sleep() -> str:

View File

@@ -1,14 +1,10 @@
# globals.py
import os
from collections import defaultdict, deque
import discord
from apscheduler.schedulers.asyncio import AsyncIOScheduler
scheduler = AsyncIOScheduler()
# Stores last 5 exchanges per user (as deque)
conversation_history = defaultdict(lambda: deque(maxlen=5))
DISCORD_BOT_TOKEN = os.getenv("DISCORD_BOT_TOKEN")
# Autonomous V2 Debug Mode (set to True to see detailed decision logging)
@@ -26,6 +22,7 @@ VISION_MODEL = os.getenv("VISION_MODEL", "vision")
EVIL_TEXT_MODEL = os.getenv("EVIL_TEXT_MODEL", "darkidol") # Uncensored model for evil mode
JAPANESE_TEXT_MODEL = os.getenv("JAPANESE_TEXT_MODEL", "swallow") # Llama 3.1 Swallow model for Japanese
OWNER_USER_ID = int(os.getenv("OWNER_USER_ID", "209381657369772032")) # Bot owner's Discord user ID for reports
PREFER_AMD_GPU = os.getenv("PREFER_AMD_GPU", "false").lower() == "true" # Runtime-overridable via config API
# Cheshire Cat AI integration (Phase 3)
CHESHIRE_CAT_URL = os.getenv("CHESHIRE_CAT_URL", "http://cheshire-cat:80")

View File

@@ -1034,8 +1034,8 @@ async def run_argument(channel: discord.TextChannel, client, trigger_context: st
# Clean up argument conversation history
try:
conversation_history.clear_history(argument_user_id)
except:
conversation_history.clear_channel(argument_user_id)
except Exception:
pass # History cleanup is not critical
end_argument(channel_id)

View File

@@ -5,8 +5,8 @@ from datetime import datetime
from typing import List, Dict, Any, Tuple
import discord
import globals
from utils.conversation_history import conversation_history
from utils.twitter_fetcher import fetch_figurine_tweets_latest
from utils.image_handling import analyze_image_with_qwen, download_and_encode_image
from utils.llm import query_llama
@@ -204,15 +204,11 @@ async def send_figurine_dm_to_user(client: discord.Client, user_id: int, tweet:
# Log the comment message
dm_logger.log_user_message(user, comment_message, is_bot_message=True)
# IMPORTANT: Also add to globals.conversation_history for LLM context
# Add to conversation history for LLM context (uses centralized ConversationHistory)
user_id_str = str(user_id)
# Add the tweet URL as a "system message" about what Miku just sent (use original URL for context)
tweet_context = f"[I just sent you this figurine tweet: {tweet_url}]"
# Add the figurine comment to conversation history
# Use empty user prompt since this was initiated by Miku
globals.conversation_history.setdefault(user_id_str, []).append((tweet_context, miku_comment))
conversation_history.add_message(channel_id=user_id_str, author_name="Miku", content=tweet_context, is_bot=True)
conversation_history.add_message(channel_id=user_id_str, author_name="Miku", content=miku_comment, is_bot=True)
logger.debug(f"Messages logged to both DM history and conversation context for user {user_id}")

View File

@@ -37,7 +37,8 @@ MODEL_TO_GPU = {
}
# Configuration
PREFER_AMD_GPU = os.getenv("PREFER_AMD_GPU", "false").lower() == "true"
# PREFER_AMD_GPU lives in globals so the config API can update it at runtime.
# We read globals.PREFER_AMD_GPU in functions below instead of a frozen local.
AMD_MODELS_ENABLED = os.getenv("AMD_MODELS_ENABLED", "true").lower() == "true"
@@ -101,7 +102,7 @@ def get_llama_url_with_load_balancing(
return globals.LLAMA_URL, "llama3.1"
# AMD enabled - implement load balancing
use_amd = prefer_amd or PREFER_AMD_GPU or (random.random() < 0.5)
use_amd = prefer_amd or globals.PREFER_AMD_GPU or (random.random() < 0.5)
if task_type == "evil":
# Evil/uncensored models

View File

@@ -475,10 +475,6 @@ Please respond in a way that reflects this emotional tone.{pfp_context}"""
is_bot=True
)
# Also save to legacy globals for backward compatibility (skip error messages)
if user_prompt and user_prompt.strip() and reply and reply.strip() and reply != "Someone tell Koko-nii there is a problem with my AI.":
globals.conversation_history[user_id].append((user_prompt, reply))
return reply
else:
error_text = await response.text()