diff --git a/bot/bot.py b/bot/bot.py index 4634c0d..3ef3ee1 100644 --- a/bot/bot.py +++ b/bot/bot.py @@ -360,15 +360,24 @@ async def on_message(message): if globals.EVIL_MODE: effective_mood = f"EVIL:{getattr(globals, 'EVIL_DM_MOOD', 'evil_neutral')}" logger.info(f"🐱 Cat response for {author_name} (mood: {effective_mood})") - # Track Cat interaction for Web UI Last Prompt view + # Track Cat interaction in unified prompt history import datetime - globals.LAST_CAT_INTERACTION = { + globals._prompt_id_counter += 1 + guild_name = message.guild.name if message.guild else "DM" + channel_name = message.channel.name if message.guild else "DM" + globals.PROMPT_HISTORY.append({ + "id": globals._prompt_id_counter, + "source": "cat", "full_prompt": cat_full_prompt, - "response": response[:500] if response else "", + "response": response if response else "", "user": author_name, "mood": effective_mood, + "guild": guild_name, + "channel": channel_name, "timestamp": datetime.datetime.now().isoformat(), - } + "model": "Cat LLM", + "response_type": response_type, + }) except Exception as e: logger.warning(f"🐱 Cat pipeline error, falling back to query_llama: {e}") response = None diff --git a/bot/globals.py b/bot/globals.py index 934e463..dde963c 100644 --- a/bot/globals.py +++ b/bot/globals.py @@ -1,6 +1,7 @@ # globals.py import os import discord +from collections import deque from apscheduler.schedulers.asyncio import AsyncIOScheduler scheduler = AsyncIOScheduler() @@ -77,16 +78,25 @@ MIKU_NORMAL_AVATAR_URL = None # Cached CDN URL of the regular Miku pfp (valid e BOT_USER = None -LAST_FULL_PROMPT = "" +# Unified prompt history (replaces LAST_FULL_PROMPT and LAST_CAT_INTERACTION) +# Each entry: {id, source, full_prompt, response, user, mood, guild, channel, +# timestamp, model, response_type} +PROMPT_HISTORY = deque(maxlen=10) +_prompt_id_counter = 0 -# Cheshire Cat last interaction tracking (for Web UI Last Prompt toggle) -LAST_CAT_INTERACTION = { - "full_prompt": "", - "response": "", - "user": "", - "mood": "", - "timestamp": "", -} +# Legacy accessors for backward compatibility (routes, CLI, etc.) +# These are computed properties that read from PROMPT_HISTORY +def _get_last_fallback_prompt(): + for entry in reversed(PROMPT_HISTORY): + if entry.get("source") == "fallback": + return entry.get("full_prompt", "") + return "" + +def _get_last_cat_interaction(): + for entry in reversed(PROMPT_HISTORY): + if entry.get("source") == "cat": + return entry + return {"full_prompt": "", "response": "", "user": "", "mood": "", "timestamp": ""} # Persona Dialogue System (conversations between Miku and Evil Miku) LAST_PERSONA_DIALOGUE_TIME = 0 # Timestamp of last dialogue for cooldown diff --git a/bot/routes/core.py b/bot/routes/core.py index b0a3299..a56b471 100644 --- a/bot/routes/core.py +++ b/bot/routes/core.py @@ -31,18 +31,45 @@ def get_logs(): @router.get("/prompt") def get_last_prompt(): - return {"prompt": globals.LAST_FULL_PROMPT or "No prompt has been issued yet."} + """Legacy endpoint: returns the most recent fallback prompt (backward compat).""" + prompt_text = globals._get_last_fallback_prompt() + return {"prompt": prompt_text or "No prompt has been issued yet."} @router.get("/prompt/cat") def get_last_cat_prompt(): - """Get the last Cheshire Cat interaction (full prompt + response) for Web UI.""" - interaction = globals.LAST_CAT_INTERACTION + """Legacy endpoint: returns the most recent Cat interaction (backward compat).""" + interaction = globals._get_last_cat_interaction() if not interaction.get("full_prompt"): - return {"full_prompt": "No Cheshire Cat interaction has occurred yet.", "response": "", "user": "", "mood": "", "timestamp": ""} + return {"full_prompt": "No Cheshire Cat interaction has occurred yet.", + "response": "", "user": "", "mood": "", "timestamp": ""} return interaction +@router.get("/prompts") +def get_prompt_history(source: str = None): + """ + Return the unified prompt history. + Optional query param ?source=cat or ?source=fallback to filter. + """ + history = list(globals.PROMPT_HISTORY) + if source and source in ("cat", "fallback"): + history = [e for e in history if e.get("source") == source] + return {"history": history} + + +@router.get("/prompts/{prompt_id}") +def get_prompt_by_id(prompt_id: int): + """Return a single prompt history entry by ID.""" + for entry in globals.PROMPT_HISTORY: + if entry.get("id") == prompt_id: + return entry + return JSONResponse( + status_code=404, + content={"status": "error", "message": f"Prompt #{prompt_id} not found"} + ) + + @router.get("/status") def status(): # Get per-server mood summary diff --git a/bot/utils/image_handling.py b/bot/utils/image_handling.py index ba4754b..98983be 100644 --- a/bot/utils/image_handling.py +++ b/bot/utils/image_handling.py @@ -472,15 +472,22 @@ async def rephrase_as_miku(vision_output, user_prompt, guild_id=None, user_id=No if globals.EVIL_MODE: effective_mood = f"EVIL:{getattr(globals, 'EVIL_DM_MOOD', 'evil_neutral')}" logger.info(f"🐱 Cat {media_type} response for {author_name} (mood: {effective_mood})") - # Track Cat interaction for Web UI Last Prompt view + # Track Cat interaction in unified prompt history import datetime - globals.LAST_CAT_INTERACTION = { + globals._prompt_id_counter += 1 + globals.PROMPT_HISTORY.append({ + "id": globals._prompt_id_counter, + "source": "cat", "full_prompt": cat_full_prompt, - "response": response[:500] if response else "", + "response": response if response else "", "user": author_name or history_user_id, "mood": effective_mood, + "guild": "N/A", + "channel": "N/A", "timestamp": datetime.datetime.now().isoformat(), - } + "model": "Cat LLM", + "response_type": response_type, + }) except Exception as e: logger.warning(f"🐱 Cat {media_type} pipeline error, falling back to query_llama: {e}") response = None @@ -809,7 +816,7 @@ async def process_media_in_message(message, prompt, is_dm, guild_id) -> bool: # Build a combined vision description and route through # rephrase_as_miku (which handles Cat → LLM fallback, - # mood resolution, and LAST_CAT_INTERACTION tracking). + # mood resolution, and prompt history tracking). combined_description = "\n".join(embed_context_parts) miku_reply = await rephrase_as_miku( combined_description, prompt, diff --git a/bot/utils/llm.py b/bot/utils/llm.py index b898592..f486a6a 100644 --- a/bot/utils/llm.py +++ b/bot/utils/llm.py @@ -381,7 +381,23 @@ Please respond in a way that reflects this emotional tone.{pfp_context}""" media_note = media_descriptions.get(media_type, f"The user has sent you {media_type}.") full_system_prompt += f"\n\nšŸ“Ž MEDIA NOTE: {media_note}\nYour vision analysis of this {media_type} is included in the user's message with the [Looking at...] prefix." - globals.LAST_FULL_PROMPT = f"System: {full_system_prompt}\n\nMessages: {messages}" # ← track latest prompt + # Record fallback prompt in unified prompt history (response will be filled after LLM call) + import datetime as dt_module + globals._prompt_id_counter += 1 + prompt_entry = { + "id": globals._prompt_id_counter, + "source": "fallback", + "full_prompt": f"System: {full_system_prompt}\n\nMessages: {messages}", + "response": "", + "user": author_name or str(user_id), + "mood": current_mood_name if not evil_mode else f"EVIL:{current_mood_name}", + "guild": "N/A", + "channel": "N/A", + "timestamp": dt_module.datetime.now().isoformat(), + "model": model, + "response_type": response_type, + } + globals.PROMPT_HISTORY.append(prompt_entry) headers = {'Content-Type': 'application/json'} @@ -474,7 +490,10 @@ Please respond in a way that reflects this emotional tone.{pfp_context}""" content=reply, is_bot=True ) - + + # Update the prompt history entry with the actual response + prompt_entry["response"] = reply if reply else "" + return reply else: error_text = await response.text()