backend: replace LAST_FULL_PROMPT/LAST_CAT_INTERACTION with unified PROMPT_HISTORY deque

- globals.py: add collections.deque(maxlen=10) PROMPT_HISTORY with _prompt_id_counter
- globals.py: add legacy accessor functions _get_last_fallback_prompt() and _get_last_cat_interaction()
- bot.py: append to PROMPT_HISTORY instead of setting LAST_CAT_INTERACTION, remove 500-char truncation, add guild/channel/model fields
- image_handling.py: same pattern for Cat media responses
- llm.py: append fallback prompts to PROMPT_HISTORY with response filled after LLM reply
- routes/core.py: new GET /prompts and GET /prompts/{id} endpoints, legacy /prompt and /prompt/cat use accessor functions
This commit is contained in:
2026-05-02 15:17:15 +03:00
parent 2d0c80b7ef
commit 87de8f8b3a
5 changed files with 96 additions and 24 deletions

View File

@@ -360,15 +360,24 @@ async def on_message(message):
if globals.EVIL_MODE:
effective_mood = f"EVIL:{getattr(globals, 'EVIL_DM_MOOD', 'evil_neutral')}"
logger.info(f"🐱 Cat response for {author_name} (mood: {effective_mood})")
# Track Cat interaction for Web UI Last Prompt view
# Track Cat interaction in unified prompt history
import datetime
globals.LAST_CAT_INTERACTION = {
globals._prompt_id_counter += 1
guild_name = message.guild.name if message.guild else "DM"
channel_name = message.channel.name if message.guild else "DM"
globals.PROMPT_HISTORY.append({
"id": globals._prompt_id_counter,
"source": "cat",
"full_prompt": cat_full_prompt,
"response": response[:500] if response else "",
"response": response if response else "",
"user": author_name,
"mood": effective_mood,
"guild": guild_name,
"channel": channel_name,
"timestamp": datetime.datetime.now().isoformat(),
}
"model": "Cat LLM",
"response_type": response_type,
})
except Exception as e:
logger.warning(f"🐱 Cat pipeline error, falling back to query_llama: {e}")
response = None

View File

@@ -1,6 +1,7 @@
# globals.py
import os
import discord
from collections import deque
from apscheduler.schedulers.asyncio import AsyncIOScheduler
scheduler = AsyncIOScheduler()
@@ -77,16 +78,25 @@ MIKU_NORMAL_AVATAR_URL = None # Cached CDN URL of the regular Miku pfp (valid e
BOT_USER = None
LAST_FULL_PROMPT = ""
# Unified prompt history (replaces LAST_FULL_PROMPT and LAST_CAT_INTERACTION)
# Each entry: {id, source, full_prompt, response, user, mood, guild, channel,
# timestamp, model, response_type}
PROMPT_HISTORY = deque(maxlen=10)
_prompt_id_counter = 0
# Cheshire Cat last interaction tracking (for Web UI Last Prompt toggle)
LAST_CAT_INTERACTION = {
"full_prompt": "",
"response": "",
"user": "",
"mood": "",
"timestamp": "",
}
# Legacy accessors for backward compatibility (routes, CLI, etc.)
# These are computed properties that read from PROMPT_HISTORY
def _get_last_fallback_prompt():
for entry in reversed(PROMPT_HISTORY):
if entry.get("source") == "fallback":
return entry.get("full_prompt", "")
return ""
def _get_last_cat_interaction():
for entry in reversed(PROMPT_HISTORY):
if entry.get("source") == "cat":
return entry
return {"full_prompt": "", "response": "", "user": "", "mood": "", "timestamp": ""}
# Persona Dialogue System (conversations between Miku and Evil Miku)
LAST_PERSONA_DIALOGUE_TIME = 0 # Timestamp of last dialogue for cooldown

View File

@@ -31,18 +31,45 @@ def get_logs():
@router.get("/prompt")
def get_last_prompt():
return {"prompt": globals.LAST_FULL_PROMPT or "No prompt has been issued yet."}
"""Legacy endpoint: returns the most recent fallback prompt (backward compat)."""
prompt_text = globals._get_last_fallback_prompt()
return {"prompt": prompt_text or "No prompt has been issued yet."}
@router.get("/prompt/cat")
def get_last_cat_prompt():
"""Get the last Cheshire Cat interaction (full prompt + response) for Web UI."""
interaction = globals.LAST_CAT_INTERACTION
"""Legacy endpoint: returns the most recent Cat interaction (backward compat)."""
interaction = globals._get_last_cat_interaction()
if not interaction.get("full_prompt"):
return {"full_prompt": "No Cheshire Cat interaction has occurred yet.", "response": "", "user": "", "mood": "", "timestamp": ""}
return {"full_prompt": "No Cheshire Cat interaction has occurred yet.",
"response": "", "user": "", "mood": "", "timestamp": ""}
return interaction
@router.get("/prompts")
def get_prompt_history(source: str = None):
"""
Return the unified prompt history.
Optional query param ?source=cat or ?source=fallback to filter.
"""
history = list(globals.PROMPT_HISTORY)
if source and source in ("cat", "fallback"):
history = [e for e in history if e.get("source") == source]
return {"history": history}
@router.get("/prompts/{prompt_id}")
def get_prompt_by_id(prompt_id: int):
"""Return a single prompt history entry by ID."""
for entry in globals.PROMPT_HISTORY:
if entry.get("id") == prompt_id:
return entry
return JSONResponse(
status_code=404,
content={"status": "error", "message": f"Prompt #{prompt_id} not found"}
)
@router.get("/status")
def status():
# Get per-server mood summary

View File

@@ -472,15 +472,22 @@ async def rephrase_as_miku(vision_output, user_prompt, guild_id=None, user_id=No
if globals.EVIL_MODE:
effective_mood = f"EVIL:{getattr(globals, 'EVIL_DM_MOOD', 'evil_neutral')}"
logger.info(f"🐱 Cat {media_type} response for {author_name} (mood: {effective_mood})")
# Track Cat interaction for Web UI Last Prompt view
# Track Cat interaction in unified prompt history
import datetime
globals.LAST_CAT_INTERACTION = {
globals._prompt_id_counter += 1
globals.PROMPT_HISTORY.append({
"id": globals._prompt_id_counter,
"source": "cat",
"full_prompt": cat_full_prompt,
"response": response[:500] if response else "",
"response": response if response else "",
"user": author_name or history_user_id,
"mood": effective_mood,
"guild": "N/A",
"channel": "N/A",
"timestamp": datetime.datetime.now().isoformat(),
}
"model": "Cat LLM",
"response_type": response_type,
})
except Exception as e:
logger.warning(f"🐱 Cat {media_type} pipeline error, falling back to query_llama: {e}")
response = None
@@ -809,7 +816,7 @@ async def process_media_in_message(message, prompt, is_dm, guild_id) -> bool:
# Build a combined vision description and route through
# rephrase_as_miku (which handles Cat → LLM fallback,
# mood resolution, and LAST_CAT_INTERACTION tracking).
# mood resolution, and prompt history tracking).
combined_description = "\n".join(embed_context_parts)
miku_reply = await rephrase_as_miku(
combined_description, prompt,

View File

@@ -381,7 +381,23 @@ Please respond in a way that reflects this emotional tone.{pfp_context}"""
media_note = media_descriptions.get(media_type, f"The user has sent you {media_type}.")
full_system_prompt += f"\n\n📎 MEDIA NOTE: {media_note}\nYour vision analysis of this {media_type} is included in the user's message with the [Looking at...] prefix."
globals.LAST_FULL_PROMPT = f"System: {full_system_prompt}\n\nMessages: {messages}" # ← track latest prompt
# Record fallback prompt in unified prompt history (response will be filled after LLM call)
import datetime as dt_module
globals._prompt_id_counter += 1
prompt_entry = {
"id": globals._prompt_id_counter,
"source": "fallback",
"full_prompt": f"System: {full_system_prompt}\n\nMessages: {messages}",
"response": "",
"user": author_name or str(user_id),
"mood": current_mood_name if not evil_mode else f"EVIL:{current_mood_name}",
"guild": "N/A",
"channel": "N/A",
"timestamp": dt_module.datetime.now().isoformat(),
"model": model,
"response_type": response_type,
}
globals.PROMPT_HISTORY.append(prompt_entry)
headers = {'Content-Type': 'application/json'}
@@ -474,7 +490,10 @@ Please respond in a way that reflects this emotional tone.{pfp_context}"""
content=reply,
is_bot=True
)
# Update the prompt history entry with the actual response
prompt_entry["response"] = reply if reply else ""
return reply
else:
error_text = await response.text()