backend: replace LAST_FULL_PROMPT/LAST_CAT_INTERACTION with unified PROMPT_HISTORY deque

- globals.py: add collections.deque(maxlen=10) PROMPT_HISTORY with _prompt_id_counter
- globals.py: add legacy accessor functions _get_last_fallback_prompt() and _get_last_cat_interaction()
- bot.py: append to PROMPT_HISTORY instead of setting LAST_CAT_INTERACTION, remove 500-char truncation, add guild/channel/model fields
- image_handling.py: same pattern for Cat media responses
- llm.py: append fallback prompts to PROMPT_HISTORY with response filled after LLM reply
- routes/core.py: new GET /prompts and GET /prompts/{id} endpoints, legacy /prompt and /prompt/cat use accessor functions
This commit is contained in:
2026-05-02 15:17:15 +03:00
parent 2d0c80b7ef
commit 87de8f8b3a
5 changed files with 96 additions and 24 deletions

View File

@@ -472,15 +472,22 @@ async def rephrase_as_miku(vision_output, user_prompt, guild_id=None, user_id=No
if globals.EVIL_MODE:
effective_mood = f"EVIL:{getattr(globals, 'EVIL_DM_MOOD', 'evil_neutral')}"
logger.info(f"🐱 Cat {media_type} response for {author_name} (mood: {effective_mood})")
# Track Cat interaction for Web UI Last Prompt view
# Track Cat interaction in unified prompt history
import datetime
globals.LAST_CAT_INTERACTION = {
globals._prompt_id_counter += 1
globals.PROMPT_HISTORY.append({
"id": globals._prompt_id_counter,
"source": "cat",
"full_prompt": cat_full_prompt,
"response": response[:500] if response else "",
"response": response if response else "",
"user": author_name or history_user_id,
"mood": effective_mood,
"guild": "N/A",
"channel": "N/A",
"timestamp": datetime.datetime.now().isoformat(),
}
"model": "Cat LLM",
"response_type": response_type,
})
except Exception as e:
logger.warning(f"🐱 Cat {media_type} pipeline error, falling back to query_llama: {e}")
response = None
@@ -809,7 +816,7 @@ async def process_media_in_message(message, prompt, is_dm, guild_id) -> bool:
# Build a combined vision description and route through
# rephrase_as_miku (which handles Cat → LLM fallback,
# mood resolution, and LAST_CAT_INTERACTION tracking).
# mood resolution, and prompt history tracking).
combined_description = "\n".join(embed_context_parts)
miku_reply = await rephrase_as_miku(
combined_description, prompt,