backend: replace LAST_FULL_PROMPT/LAST_CAT_INTERACTION with unified PROMPT_HISTORY deque

- globals.py: add collections.deque(maxlen=10) PROMPT_HISTORY with _prompt_id_counter
- globals.py: add legacy accessor functions _get_last_fallback_prompt() and _get_last_cat_interaction()
- bot.py: append to PROMPT_HISTORY instead of setting LAST_CAT_INTERACTION, remove 500-char truncation, add guild/channel/model fields
- image_handling.py: same pattern for Cat media responses
- llm.py: append fallback prompts to PROMPT_HISTORY with response filled after LLM reply
- routes/core.py: new GET /prompts and GET /prompts/{id} endpoints, legacy /prompt and /prompt/cat use accessor functions
This commit is contained in:
2026-05-02 15:17:15 +03:00
parent 2d0c80b7ef
commit 87de8f8b3a
5 changed files with 96 additions and 24 deletions

View File

@@ -381,7 +381,23 @@ Please respond in a way that reflects this emotional tone.{pfp_context}"""
media_note = media_descriptions.get(media_type, f"The user has sent you {media_type}.")
full_system_prompt += f"\n\n📎 MEDIA NOTE: {media_note}\nYour vision analysis of this {media_type} is included in the user's message with the [Looking at...] prefix."
globals.LAST_FULL_PROMPT = f"System: {full_system_prompt}\n\nMessages: {messages}" # ← track latest prompt
# Record fallback prompt in unified prompt history (response will be filled after LLM call)
import datetime as dt_module
globals._prompt_id_counter += 1
prompt_entry = {
"id": globals._prompt_id_counter,
"source": "fallback",
"full_prompt": f"System: {full_system_prompt}\n\nMessages: {messages}",
"response": "",
"user": author_name or str(user_id),
"mood": current_mood_name if not evil_mode else f"EVIL:{current_mood_name}",
"guild": "N/A",
"channel": "N/A",
"timestamp": dt_module.datetime.now().isoformat(),
"model": model,
"response_type": response_type,
}
globals.PROMPT_HISTORY.append(prompt_entry)
headers = {'Content-Type': 'application/json'}
@@ -474,7 +490,10 @@ Please respond in a way that reflects this emotional tone.{pfp_context}"""
content=reply,
is_bot=True
)
# Update the prompt history entry with the actual response
prompt_entry["response"] = reply if reply else ""
return reply
else:
error_text = await response.text()