Compare commits
13 Commits
7d5881ebe7
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
| 9eb081efb1 | |||
| 4e28236b06 | |||
| c5e49c73df | |||
| 393921e524 | |||
| 2dd32d0ef1 | |||
| a980b90c0a | |||
| 6b922d84ae | |||
| f33e2afdf7 | |||
| 87de8f8b3a | |||
| 2d0c80b7ef | |||
| 17842f24d4 | |||
| 4e064ad89b | |||
| 97c7133fdc |
@@ -1,13 +0,0 @@
|
|||||||
FROM ghcr.io/mostlygeek/llama-swap:cuda
|
|
||||||
|
|
||||||
USER root
|
|
||||||
|
|
||||||
# Download and install llama-server binary (CUDA version)
|
|
||||||
# Using the official pre-built binary from llama.cpp releases
|
|
||||||
ADD --chmod=755 https://github.com/ggml-org/llama.cpp/releases/download/b4183/llama-server-cuda /usr/local/bin/llama-server
|
|
||||||
|
|
||||||
# Verify it's executable
|
|
||||||
RUN llama-server --version || echo "llama-server installed successfully"
|
|
||||||
|
|
||||||
USER 1000:1000
|
|
||||||
|
|
||||||
@@ -1,68 +0,0 @@
|
|||||||
# Multi-stage build for llama-swap with ROCm support
|
|
||||||
# Now using official llama.cpp ROCm image (PR #18439 merged Dec 29, 2025)
|
|
||||||
|
|
||||||
# Stage 1: Build llama-swap UI
|
|
||||||
FROM node:22-alpine AS ui-builder
|
|
||||||
|
|
||||||
WORKDIR /build
|
|
||||||
|
|
||||||
# Install git
|
|
||||||
RUN apk add --no-cache git
|
|
||||||
|
|
||||||
# Clone llama-swap
|
|
||||||
RUN git clone https://github.com/mostlygeek/llama-swap.git
|
|
||||||
|
|
||||||
# Build UI (now in ui-svelte directory)
|
|
||||||
WORKDIR /build/llama-swap/ui-svelte
|
|
||||||
RUN npm install && npm run build
|
|
||||||
|
|
||||||
# Stage 2: Build llama-swap binary
|
|
||||||
FROM golang:1.23-alpine AS swap-builder
|
|
||||||
|
|
||||||
WORKDIR /build
|
|
||||||
|
|
||||||
# Install git
|
|
||||||
RUN apk add --no-cache git
|
|
||||||
|
|
||||||
# Copy llama-swap source with built UI
|
|
||||||
COPY --from=ui-builder /build/llama-swap /build/llama-swap
|
|
||||||
|
|
||||||
# Build llama-swap binary
|
|
||||||
WORKDIR /build/llama-swap
|
|
||||||
RUN GOTOOLCHAIN=auto go build -o /build/llama-swap-binary .
|
|
||||||
|
|
||||||
# Stage 3: Final runtime image using official llama.cpp ROCm image
|
|
||||||
FROM ghcr.io/ggml-org/llama.cpp:server-rocm
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copy llama-swap binary from builder
|
|
||||||
COPY --from=swap-builder /build/llama-swap-binary /app/llama-swap
|
|
||||||
|
|
||||||
# Make binaries executable
|
|
||||||
RUN chmod +x /app/llama-swap
|
|
||||||
|
|
||||||
# Add existing ubuntu user (UID 1000) to GPU access groups (using host GIDs)
|
|
||||||
# GID 187 = render group on host, GID 989 = video/kfd group on host
|
|
||||||
RUN groupadd -g 187 hostrender && \
|
|
||||||
groupadd -g 989 hostvideo && \
|
|
||||||
usermod -aG hostrender,hostvideo ubuntu && \
|
|
||||||
chown -R ubuntu:ubuntu /app
|
|
||||||
|
|
||||||
# Set environment for ROCm (RX 6800 is gfx1030)
|
|
||||||
ENV HSA_OVERRIDE_GFX_VERSION=10.3.0
|
|
||||||
ENV ROCM_PATH=/opt/rocm
|
|
||||||
ENV HIP_VISIBLE_DEVICES=0
|
|
||||||
|
|
||||||
USER ubuntu
|
|
||||||
|
|
||||||
# Expose port
|
|
||||||
EXPOSE 8080
|
|
||||||
|
|
||||||
# Health check
|
|
||||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \
|
|
||||||
CMD curl -f http://localhost:8080/health || exit 1
|
|
||||||
|
|
||||||
# Override the base image's ENTRYPOINT and run llama-swap
|
|
||||||
ENTRYPOINT []
|
|
||||||
CMD ["/app/llama-swap", "-config", "/app/config.yaml", "-listen", "0.0.0.0:8080"]
|
|
||||||
20
bot/bot.py
20
bot/bot.py
@@ -207,9 +207,8 @@ async def on_message(message):
|
|||||||
# AND roll for random argument trigger (both non-blocking background tasks)
|
# AND roll for random argument trigger (both non-blocking background tasks)
|
||||||
if not isinstance(message.channel, discord.DMChannel) and globals.BIPOLAR_MODE:
|
if not isinstance(message.channel, discord.DMChannel) and globals.BIPOLAR_MODE:
|
||||||
try:
|
try:
|
||||||
from utils.persona_dialogue import check_for_interjection
|
from utils.persona_dialogue import check_for_interjection, is_persona_dialogue_active as dialogue_active
|
||||||
from utils.bipolar_mode import maybe_trigger_argument, is_argument_in_progress as arg_in_progress
|
from utils.bipolar_mode import maybe_trigger_argument, is_argument_in_progress as arg_in_progress
|
||||||
from utils.bipolar_mode import is_persona_dialogue_active as dialogue_active
|
|
||||||
from utils.task_tracker import create_tracked_task
|
from utils.task_tracker import create_tracked_task
|
||||||
|
|
||||||
# Check interjection on user messages (opposite of current active persona)
|
# Check interjection on user messages (opposite of current active persona)
|
||||||
@@ -361,15 +360,24 @@ async def on_message(message):
|
|||||||
if globals.EVIL_MODE:
|
if globals.EVIL_MODE:
|
||||||
effective_mood = f"EVIL:{getattr(globals, 'EVIL_DM_MOOD', 'evil_neutral')}"
|
effective_mood = f"EVIL:{getattr(globals, 'EVIL_DM_MOOD', 'evil_neutral')}"
|
||||||
logger.info(f"🐱 Cat response for {author_name} (mood: {effective_mood})")
|
logger.info(f"🐱 Cat response for {author_name} (mood: {effective_mood})")
|
||||||
# Track Cat interaction for Web UI Last Prompt view
|
# Track Cat interaction in unified prompt history
|
||||||
import datetime
|
import datetime
|
||||||
globals.LAST_CAT_INTERACTION = {
|
globals._prompt_id_counter += 1
|
||||||
|
guild_name = message.guild.name if message.guild else "DM"
|
||||||
|
channel_name = message.channel.name if message.guild else "DM"
|
||||||
|
globals.PROMPT_HISTORY.append({
|
||||||
|
"id": globals._prompt_id_counter,
|
||||||
|
"source": "cat",
|
||||||
"full_prompt": cat_full_prompt,
|
"full_prompt": cat_full_prompt,
|
||||||
"response": response[:500] if response else "",
|
"response": response if response else "",
|
||||||
"user": author_name,
|
"user": author_name,
|
||||||
"mood": effective_mood,
|
"mood": effective_mood,
|
||||||
|
"guild": guild_name,
|
||||||
|
"channel": channel_name,
|
||||||
"timestamp": datetime.datetime.now().isoformat(),
|
"timestamp": datetime.datetime.now().isoformat(),
|
||||||
}
|
"model": "Cat LLM",
|
||||||
|
"response_type": response_type,
|
||||||
|
})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning(f"🐱 Cat pipeline error, falling back to query_llama: {e}")
|
logger.warning(f"🐱 Cat pipeline error, falling back to query_llama: {e}")
|
||||||
response = None
|
response = None
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
# globals.py
|
# globals.py
|
||||||
import os
|
import os
|
||||||
import discord
|
import discord
|
||||||
|
from collections import deque
|
||||||
from apscheduler.schedulers.asyncio import AsyncIOScheduler
|
from apscheduler.schedulers.asyncio import AsyncIOScheduler
|
||||||
|
|
||||||
scheduler = AsyncIOScheduler()
|
scheduler = AsyncIOScheduler()
|
||||||
@@ -77,16 +78,25 @@ MIKU_NORMAL_AVATAR_URL = None # Cached CDN URL of the regular Miku pfp (valid e
|
|||||||
|
|
||||||
BOT_USER = None
|
BOT_USER = None
|
||||||
|
|
||||||
LAST_FULL_PROMPT = ""
|
# Unified prompt history (replaces LAST_FULL_PROMPT and LAST_CAT_INTERACTION)
|
||||||
|
# Each entry: {id, source, full_prompt, response, user, mood, guild, channel,
|
||||||
|
# timestamp, model, response_type}
|
||||||
|
PROMPT_HISTORY = deque(maxlen=10)
|
||||||
|
_prompt_id_counter = 0
|
||||||
|
|
||||||
# Cheshire Cat last interaction tracking (for Web UI Last Prompt toggle)
|
# Legacy accessors for backward compatibility (routes, CLI, etc.)
|
||||||
LAST_CAT_INTERACTION = {
|
# These are computed properties that read from PROMPT_HISTORY
|
||||||
"full_prompt": "",
|
def _get_last_fallback_prompt():
|
||||||
"response": "",
|
for entry in reversed(PROMPT_HISTORY):
|
||||||
"user": "",
|
if entry.get("source") == "fallback":
|
||||||
"mood": "",
|
return entry.get("full_prompt", "")
|
||||||
"timestamp": "",
|
return ""
|
||||||
}
|
|
||||||
|
def _get_last_cat_interaction():
|
||||||
|
for entry in reversed(PROMPT_HISTORY):
|
||||||
|
if entry.get("source") == "cat":
|
||||||
|
return entry
|
||||||
|
return {"full_prompt": "", "response": "", "user": "", "mood": "", "timestamp": ""}
|
||||||
|
|
||||||
# Persona Dialogue System (conversations between Miku and Evil Miku)
|
# Persona Dialogue System (conversations between Miku and Evil Miku)
|
||||||
LAST_PERSONA_DIALOGUE_TIME = 0 # Timestamp of last dialogue for cooldown
|
LAST_PERSONA_DIALOGUE_TIME = 0 # Timestamp of last dialogue for cooldown
|
||||||
|
|||||||
@@ -14,7 +14,8 @@ router = APIRouter()
|
|||||||
|
|
||||||
@router.get("/")
|
@router.get("/")
|
||||||
def read_index():
|
def read_index():
|
||||||
return FileResponse("static/index.html")
|
headers = {"Cache-Control": "no-cache, no-store, must-revalidate"}
|
||||||
|
return FileResponse("static/index.html", headers=headers)
|
||||||
|
|
||||||
|
|
||||||
@router.get("/logs")
|
@router.get("/logs")
|
||||||
@@ -31,18 +32,45 @@ def get_logs():
|
|||||||
|
|
||||||
@router.get("/prompt")
|
@router.get("/prompt")
|
||||||
def get_last_prompt():
|
def get_last_prompt():
|
||||||
return {"prompt": globals.LAST_FULL_PROMPT or "No prompt has been issued yet."}
|
"""Legacy endpoint: returns the most recent fallback prompt (backward compat)."""
|
||||||
|
prompt_text = globals._get_last_fallback_prompt()
|
||||||
|
return {"prompt": prompt_text or "No prompt has been issued yet."}
|
||||||
|
|
||||||
|
|
||||||
@router.get("/prompt/cat")
|
@router.get("/prompt/cat")
|
||||||
def get_last_cat_prompt():
|
def get_last_cat_prompt():
|
||||||
"""Get the last Cheshire Cat interaction (full prompt + response) for Web UI."""
|
"""Legacy endpoint: returns the most recent Cat interaction (backward compat)."""
|
||||||
interaction = globals.LAST_CAT_INTERACTION
|
interaction = globals._get_last_cat_interaction()
|
||||||
if not interaction.get("full_prompt"):
|
if not interaction.get("full_prompt"):
|
||||||
return {"full_prompt": "No Cheshire Cat interaction has occurred yet.", "response": "", "user": "", "mood": "", "timestamp": ""}
|
return {"full_prompt": "No Cheshire Cat interaction has occurred yet.",
|
||||||
|
"response": "", "user": "", "mood": "", "timestamp": ""}
|
||||||
return interaction
|
return interaction
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/prompts")
|
||||||
|
def get_prompt_history(source: str = None):
|
||||||
|
"""
|
||||||
|
Return the unified prompt history.
|
||||||
|
Optional query param ?source=cat or ?source=fallback to filter.
|
||||||
|
"""
|
||||||
|
history = list(globals.PROMPT_HISTORY)
|
||||||
|
if source and source in ("cat", "fallback"):
|
||||||
|
history = [e for e in history if e.get("source") == source]
|
||||||
|
return {"history": history}
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/prompts/{prompt_id}")
|
||||||
|
def get_prompt_by_id(prompt_id: int):
|
||||||
|
"""Return a single prompt history entry by ID."""
|
||||||
|
for entry in globals.PROMPT_HISTORY:
|
||||||
|
if entry.get("id") == prompt_id:
|
||||||
|
return entry
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=404,
|
||||||
|
content={"status": "error", "message": f"Prompt #{prompt_id} not found"}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@router.get("/status")
|
@router.get("/status")
|
||||||
def status():
|
def status():
|
||||||
# Get per-server mood summary
|
# Get per-server mood summary
|
||||||
|
|||||||
@@ -441,6 +441,51 @@ h1, h3 {
|
|||||||
color: #ddd;
|
color: #ddd;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Prompt History Section */
|
||||||
|
#prompt-history-section.collapsed #prompt-history-body {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
#prompt-history-toggle {
|
||||||
|
user-select: none;
|
||||||
|
transition: color 0.2s;
|
||||||
|
}
|
||||||
|
#prompt-history-toggle:hover {
|
||||||
|
color: #4CAF50;
|
||||||
|
}
|
||||||
|
#prompt-metadata span {
|
||||||
|
white-space: nowrap;
|
||||||
|
}
|
||||||
|
#prompt-metadata .prompt-meta-label {
|
||||||
|
color: #666;
|
||||||
|
}
|
||||||
|
#prompt-metadata .prompt-meta-value {
|
||||||
|
color: #ccc;
|
||||||
|
}
|
||||||
|
#prompt-display pre {
|
||||||
|
margin: 0;
|
||||||
|
}
|
||||||
|
.prompt-subsection-header {
|
||||||
|
cursor: pointer;
|
||||||
|
user-select: none;
|
||||||
|
padding: 0.3rem 0.5rem;
|
||||||
|
border-radius: 4px;
|
||||||
|
background: #2a2a2a;
|
||||||
|
margin: 0.5rem 0 0.25rem 0;
|
||||||
|
font-size: 0.82rem;
|
||||||
|
color: #aaa;
|
||||||
|
transition: background 0.15s;
|
||||||
|
}
|
||||||
|
.prompt-subsection-header:hover {
|
||||||
|
background: #333;
|
||||||
|
color: #ddd;
|
||||||
|
}
|
||||||
|
.prompt-subsection-body.collapsed {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
#prompt-truncate-toggle {
|
||||||
|
accent-color: #4CAF50;
|
||||||
|
}
|
||||||
|
|
||||||
/* Mood Activities Editor */
|
/* Mood Activities Editor */
|
||||||
.act-mood-row {
|
.act-mood-row {
|
||||||
margin-bottom: 0.5rem;
|
margin-bottom: 0.5rem;
|
||||||
|
|||||||
@@ -3,10 +3,13 @@
|
|||||||
<head>
|
<head>
|
||||||
<meta charset="UTF-8">
|
<meta charset="UTF-8">
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||||
|
<meta http-equiv="Cache-Control" content="no-cache, no-store, must-revalidate">
|
||||||
|
<meta http-equiv="Pragma" content="no-cache">
|
||||||
|
<meta http-equiv="Expires" content="0">
|
||||||
<title>Miku Control Panel</title>
|
<title>Miku Control Panel</title>
|
||||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/cropperjs/1.6.2/cropper.min.css">
|
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/cropperjs/1.6.2/cropper.min.css">
|
||||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/cropperjs/1.6.2/cropper.min.js"></script>
|
<script src="https://cdnjs.cloudflare.com/ajax/libs/cropperjs/1.6.2/cropper.min.js"></script>
|
||||||
<link rel="stylesheet" href="/static/css/style.css">
|
<link rel="stylesheet" href="/static/css/style.css?v=20260502">
|
||||||
</head>
|
</head>
|
||||||
<body>
|
<body>
|
||||||
|
|
||||||
@@ -543,23 +546,53 @@
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div class="section">
|
<div class="section" id="prompt-history-section">
|
||||||
<h3>Last Prompt</h3>
|
<div class="prompt-history-header" style="display: flex; align-items: center; justify-content: space-between; margin-bottom: 0.5rem;">
|
||||||
<div style="margin-bottom: 0.75rem; display: flex; align-items: center; gap: 0.75rem;">
|
<h3 style="margin: 0; cursor: pointer;" onclick="togglePromptHistoryCollapse()" id="prompt-history-toggle">
|
||||||
|
▼ Prompt History
|
||||||
|
</h3>
|
||||||
|
<button onclick="loadPromptHistory()" title="Refresh" style="background: none; border: 1px solid #444; color: #aaa; cursor: pointer; padding: 0.2rem 0.5rem; border-radius: 4px; font-size: 0.85rem;">🔄</button>
|
||||||
|
</div>
|
||||||
|
<div id="prompt-history-body">
|
||||||
|
<!-- Source filter + history selector row -->
|
||||||
|
<div style="margin-bottom: 0.75rem; display: flex; align-items: center; gap: 0.75rem; flex-wrap: wrap;">
|
||||||
<label style="font-size: 0.9rem; color: #aaa;">Source:</label>
|
<label style="font-size: 0.9rem; color: #aaa;">Source:</label>
|
||||||
<div style="display: inline-flex; border-radius: 6px; overflow: hidden; border: 1px solid #444;">
|
<div style="display: inline-flex; border-radius: 6px; overflow: hidden; border: 1px solid #444;">
|
||||||
<button id="prompt-src-cat" class="prompt-source-btn active" onclick="switchPromptSource('cat')"
|
<button id="prompt-src-all" class="prompt-source-btn active" onclick="switchPromptSource('all')"
|
||||||
style="padding: 0.4rem 1rem; border: none; cursor: pointer; font-size: 0.85rem; transition: all 0.2s;">
|
style="padding: 0.4rem 0.8rem; border: none; cursor: pointer; font-size: 0.85rem; transition: all 0.2s;">
|
||||||
🐱 Cheshire Cat
|
All
|
||||||
|
</button>
|
||||||
|
<button id="prompt-src-cat" class="prompt-source-btn" onclick="switchPromptSource('cat')"
|
||||||
|
style="padding: 0.4rem 0.8rem; border: none; cursor: pointer; font-size: 0.85rem; transition: all 0.2s;">
|
||||||
|
🐱 Cat
|
||||||
</button>
|
</button>
|
||||||
<button id="prompt-src-fallback" class="prompt-source-btn" onclick="switchPromptSource('fallback')"
|
<button id="prompt-src-fallback" class="prompt-source-btn" onclick="switchPromptSource('fallback')"
|
||||||
style="padding: 0.4rem 1rem; border: none; cursor: pointer; font-size: 0.85rem; transition: all 0.2s;">
|
style="padding: 0.4rem 0.8rem; border: none; cursor: pointer; font-size: 0.85rem; transition: all 0.2s;">
|
||||||
🤖 Bot Fallback
|
🤖 Fallback
|
||||||
</button>
|
</button>
|
||||||
</div>
|
</div>
|
||||||
|
<select id="prompt-history-select" onchange="selectPromptEntry(this.value)" style="background: #2a2a2a; color: #ddd; border: 1px solid #444; padding: 0.35rem 0.5rem; border-radius: 4px; font-size: 0.85rem; min-width: 280px;">
|
||||||
|
<option value="">-- No prompts yet --</option>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Metadata bar -->
|
||||||
|
<div id="prompt-metadata" style="margin-bottom: 0.5rem; font-size: 0.82rem; color: #888; display: flex; flex-wrap: wrap; gap: 0.3rem 1rem;"></div>
|
||||||
|
|
||||||
|
<!-- Toolbar: copy + truncate toggle -->
|
||||||
|
<div style="margin-bottom: 0.5rem; display: flex; align-items: center; gap: 1rem;">
|
||||||
|
<button onclick="copyPromptToClipboard()" title="Copy full prompt to clipboard" style="background: #333; border: 1px solid #555; color: #aaa; cursor: pointer; padding: 0.25rem 0.6rem; border-radius: 4px; font-size: 0.8rem;">📋 Copy</button>
|
||||||
|
<label style="font-size: 0.82rem; color: #aaa; cursor: pointer; display: flex; align-items: center; gap: 0.3rem;">
|
||||||
|
<input type="checkbox" id="prompt-truncate-toggle" onchange="toggleMiddleTruncation()">
|
||||||
|
Truncate from middle
|
||||||
|
</label>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Prompt display subsections -->
|
||||||
|
<div id="prompt-display" style="max-height: 60vh; overflow-y: auto; min-height: 3rem;"></div>
|
||||||
|
<!-- Hidden buffer for copy-to-clipboard raw text -->
|
||||||
|
<pre id="last-prompt" style="display: none;"></pre>
|
||||||
</div>
|
</div>
|
||||||
<div id="prompt-cat-info" style="margin-bottom: 0.5rem; font-size: 0.85rem; color: #aaa;"></div>
|
|
||||||
<pre id="last-prompt" style="white-space: pre-wrap; word-break: break-word;"></pre>
|
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
@@ -1339,15 +1372,15 @@
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<script src="/static/js/core.js"></script>
|
<script src="/static/js/core.js?v=20260502"></script>
|
||||||
<script src="/static/js/servers.js"></script>
|
<script src="/static/js/servers.js?v=20260502"></script>
|
||||||
<script src="/static/js/modes.js"></script>
|
<script src="/static/js/modes.js?v=20260502"></script>
|
||||||
<script src="/static/js/actions.js"></script>
|
<script src="/static/js/actions.js?v=20260502"></script>
|
||||||
<script src="/static/js/image-gen.js"></script>
|
<script src="/static/js/image-gen.js?v=20260502"></script>
|
||||||
<script src="/static/js/status.js"></script>
|
<script src="/static/js/status.js?v=20260502"></script>
|
||||||
<script src="/static/js/dm.js"></script>
|
<script src="/static/js/dm.js?v=20260502"></script>
|
||||||
<script src="/static/js/chat.js"></script>
|
<script src="/static/js/chat.js?v=20260502"></script>
|
||||||
<script src="/static/js/memories.js"></script>
|
<script src="/static/js/memories.js?v=20260502"></script>
|
||||||
<script src="/static/js/profile.js"></script>
|
<script src="/static/js/profile.js?v=20260502"></script>
|
||||||
</body>
|
</body>
|
||||||
</html>
|
</html>
|
||||||
|
|||||||
@@ -29,6 +29,7 @@ let notificationTimer = null;
|
|||||||
let statusInterval = null;
|
let statusInterval = null;
|
||||||
let logsInterval = null;
|
let logsInterval = null;
|
||||||
let argsInterval = null;
|
let argsInterval = null;
|
||||||
|
let promptInterval = null;
|
||||||
|
|
||||||
// Mood emoji mapping
|
// Mood emoji mapping
|
||||||
const MOOD_EMOJIS = {
|
const MOOD_EMOJIS = {
|
||||||
@@ -211,12 +212,14 @@ function startPolling() {
|
|||||||
if (!statusInterval) statusInterval = setInterval(loadStatus, 10000);
|
if (!statusInterval) statusInterval = setInterval(loadStatus, 10000);
|
||||||
if (!logsInterval) logsInterval = setInterval(loadLogs, 5000);
|
if (!logsInterval) logsInterval = setInterval(loadLogs, 5000);
|
||||||
if (!argsInterval) argsInterval = setInterval(loadActiveArguments, 5000);
|
if (!argsInterval) argsInterval = setInterval(loadActiveArguments, 5000);
|
||||||
|
if (!promptInterval) promptInterval = setInterval(loadPromptHistory, 10000);
|
||||||
}
|
}
|
||||||
|
|
||||||
function stopPolling() {
|
function stopPolling() {
|
||||||
clearInterval(statusInterval); statusInterval = null;
|
clearInterval(statusInterval); statusInterval = null;
|
||||||
clearInterval(logsInterval); logsInterval = null;
|
clearInterval(logsInterval); logsInterval = null;
|
||||||
clearInterval(argsInterval); argsInterval = null;
|
clearInterval(argsInterval); argsInterval = null;
|
||||||
|
clearInterval(promptInterval); promptInterval = null;
|
||||||
}
|
}
|
||||||
|
|
||||||
// ============================================================================
|
// ============================================================================
|
||||||
@@ -248,7 +251,7 @@ function initVisibilityPolling() {
|
|||||||
stopPolling();
|
stopPolling();
|
||||||
console.log('⏸ Tab hidden — polling paused');
|
console.log('⏸ Tab hidden — polling paused');
|
||||||
} else {
|
} else {
|
||||||
loadStatus(); loadLogs(); loadActiveArguments();
|
loadStatus(); loadLogs(); loadActiveArguments(); loadPromptHistory();
|
||||||
startPolling();
|
startPolling();
|
||||||
console.log('▶️ Tab visible — polling resumed');
|
console.log('▶️ Tab visible — polling resumed');
|
||||||
}
|
}
|
||||||
@@ -296,9 +299,11 @@ function initModalAccessibility() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function initPromptSourceToggle() {
|
function initPromptSourceToggle() {
|
||||||
const saved = localStorage.getItem('miku-prompt-source') || 'cat';
|
const saved = localStorage.getItem('miku-prompt-source') || 'all';
|
||||||
document.querySelectorAll('.prompt-source-btn').forEach(btn => btn.classList.remove('active'));
|
document.querySelectorAll('.prompt-source-btn').forEach(btn => btn.classList.remove('active'));
|
||||||
document.getElementById(`prompt-src-${saved}`).classList.add('active');
|
const btnId = saved === 'all' ? 'prompt-src-all' : `prompt-src-${saved}`;
|
||||||
|
const btn = document.getElementById(btnId);
|
||||||
|
if (btn) btn.classList.add('active');
|
||||||
}
|
}
|
||||||
|
|
||||||
function initLogsScrollDetection() {
|
function initLogsScrollDetection() {
|
||||||
@@ -360,8 +365,10 @@ async function loadLogs() {
|
|||||||
function switchPromptSource(source) {
|
function switchPromptSource(source) {
|
||||||
localStorage.setItem('miku-prompt-source', source);
|
localStorage.setItem('miku-prompt-source', source);
|
||||||
document.querySelectorAll('.prompt-source-btn').forEach(btn => btn.classList.remove('active'));
|
document.querySelectorAll('.prompt-source-btn').forEach(btn => btn.classList.remove('active'));
|
||||||
document.getElementById(`prompt-src-${source}`).classList.add('active');
|
const btnId = source === 'all' ? 'prompt-src-all' : `prompt-src-${source}`;
|
||||||
loadLastPrompt();
|
const btn = document.getElementById(btnId);
|
||||||
|
if (btn) btn.classList.add('active');
|
||||||
|
loadPromptHistory();
|
||||||
}
|
}
|
||||||
|
|
||||||
// ============================================================================
|
// ============================================================================
|
||||||
|
|||||||
@@ -57,33 +57,271 @@ async function loadStatus() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ===== Last Prompt =====
|
// ===== Prompt History =====
|
||||||
|
|
||||||
async function loadLastPrompt() {
|
let _promptHistoryCache = []; // cached history entries from last fetch
|
||||||
const source = localStorage.getItem('miku-prompt-source') || 'cat';
|
let _selectedPromptId = null; // currently selected entry ID
|
||||||
const promptEl = document.getElementById('last-prompt');
|
let _middleTruncation = false; // whether middle-truncation is active
|
||||||
const infoEl = document.getElementById('prompt-cat-info');
|
|
||||||
|
async function loadPromptHistory() {
|
||||||
|
const source = localStorage.getItem('miku-prompt-source') || 'all';
|
||||||
|
const selectEl = document.getElementById('prompt-history-select');
|
||||||
|
|
||||||
try {
|
try {
|
||||||
if (source === 'cat') {
|
const url = source === 'all' ? '/prompts' : `/prompts?source=${source}`;
|
||||||
const result = await apiCall('/prompt/cat');
|
const result = await apiCall(url);
|
||||||
if (result.timestamp) {
|
_promptHistoryCache = result.history || [];
|
||||||
infoEl.innerHTML = `<strong>User:</strong> ${escapeHtml(result.user || '?')} | <strong>Mood:</strong> ${escapeHtml(result.mood || '?')} | <strong>Time:</strong> ${new Date(result.timestamp).toLocaleString()}`;
|
|
||||||
promptEl.textContent = result.full_prompt + `\n\n${'═'.repeat(60)}\n[Cat Response]\n${result.response}`;
|
// Populate dropdown
|
||||||
|
const currentValue = selectEl.value;
|
||||||
|
selectEl.innerHTML = '';
|
||||||
|
if (_promptHistoryCache.length === 0) {
|
||||||
|
selectEl.innerHTML = '<option value="">-- No prompts yet --</option>';
|
||||||
} else {
|
} else {
|
||||||
infoEl.textContent = '';
|
_promptHistoryCache.forEach(entry => {
|
||||||
promptEl.textContent = result.full_prompt || 'No Cheshire Cat interaction yet.';
|
const ts = entry.timestamp ? new Date(entry.timestamp).toLocaleTimeString() : '?';
|
||||||
|
const srcLabel = entry.source === 'cat' ? '🐱' : '🤖';
|
||||||
|
const user = entry.user || '?';
|
||||||
|
const option = document.createElement('option');
|
||||||
|
option.value = entry.id;
|
||||||
|
option.textContent = `${srcLabel} #${entry.id} — ${user} — ${ts}`;
|
||||||
|
selectEl.appendChild(option);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Restore or auto-select the latest entry
|
||||||
|
if (_selectedPromptId && _promptHistoryCache.some(e => e.id === _selectedPromptId)) {
|
||||||
|
selectEl.value = _selectedPromptId;
|
||||||
|
} else if (_promptHistoryCache.length > 0) {
|
||||||
|
selectEl.value = _promptHistoryCache[0].id;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (selectEl.value) {
|
||||||
|
await selectPromptEntry(selectEl.value);
|
||||||
} else {
|
} else {
|
||||||
infoEl.textContent = '';
|
clearPromptDisplay();
|
||||||
const result = await apiCall('/prompt');
|
|
||||||
promptEl.textContent = result.prompt;
|
|
||||||
}
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Failed to load last prompt:', error);
|
console.error('Failed to load prompt history:', error);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async function selectPromptEntry(promptId) {
|
||||||
|
if (!promptId) {
|
||||||
|
clearPromptDisplay();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
_selectedPromptId = parseInt(promptId);
|
||||||
|
|
||||||
|
// Try cache first
|
||||||
|
let entry = _promptHistoryCache.find(e => e.id === _selectedPromptId);
|
||||||
|
|
||||||
|
// Fall back to API call if not in cache
|
||||||
|
if (!entry) {
|
||||||
|
try {
|
||||||
|
entry = await apiCall(`/prompts/${_selectedPromptId}`);
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Failed to load prompt entry:', error);
|
||||||
|
clearPromptDisplay();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!entry) {
|
||||||
|
clearPromptDisplay();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
renderPromptEntry(entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
function clearPromptDisplay() {
|
||||||
|
document.getElementById('prompt-metadata').innerHTML = '';
|
||||||
|
document.getElementById('prompt-display').innerHTML = '<pre style="white-space: pre-wrap; word-break: break-word; background: #1a1a1a; padding: 0.75rem; border-radius: 4px; font-size: 0.8rem; line-height: 1.4; margin: 0; color: #666;">No prompt selected.</pre>';
|
||||||
|
document.getElementById('last-prompt').textContent = '';
|
||||||
|
}
|
||||||
|
|
||||||
|
function renderPromptEntry(entry) {
|
||||||
|
// Metadata bar
|
||||||
|
const metaEl = document.getElementById('prompt-metadata');
|
||||||
|
const ts = entry.timestamp ? new Date(entry.timestamp).toLocaleString() : '?';
|
||||||
|
const sourceIcon = entry.source === 'cat' ? '🐱 Cat' : '🤖 Fallback';
|
||||||
|
metaEl.innerHTML = `
|
||||||
|
<span><span class="prompt-meta-label">#</span><span class="prompt-meta-value">${entry.id}</span></span>
|
||||||
|
<span><span class="prompt-meta-label">Source:</span> <span class="prompt-meta-value">${sourceIcon}</span></span>
|
||||||
|
<span><span class="prompt-meta-label">User:</span> <span class="prompt-meta-value">${escapeHtml(entry.user || '?')}</span></span>
|
||||||
|
<span><span class="prompt-meta-label">Mood:</span> <span class="prompt-meta-value">${escapeHtml(entry.mood || '?')}</span></span>
|
||||||
|
<span><span class="prompt-meta-label">Guild:</span> <span class="prompt-meta-value">${escapeHtml(entry.guild || '?')}</span></span>
|
||||||
|
<span><span class="prompt-meta-label">Channel:</span> <span class="prompt-meta-value">${escapeHtml(entry.channel || '?')}</span></span>
|
||||||
|
<span><span class="prompt-meta-label">Model:</span> <span class="prompt-meta-value">${escapeHtml(entry.model || '?')}</span></span>
|
||||||
|
<span><span class="prompt-meta-label">Type:</span> <span class="prompt-meta-value">${escapeHtml(entry.response_type || '?')}</span></span>
|
||||||
|
<span><span class="prompt-meta-label">Time:</span> <span class="prompt-meta-value">${ts}</span></span>
|
||||||
|
`;
|
||||||
|
|
||||||
|
// Parse full_prompt into sections
|
||||||
|
const sections = parsePromptSections(entry.full_prompt || '');
|
||||||
|
|
||||||
|
// Snapshot which subsections are currently collapsed (before re-render)
|
||||||
|
const sectionIds = ['system', 'context', 'conversation', 'response'];
|
||||||
|
const collapsedState = {};
|
||||||
|
sectionIds.forEach(id => {
|
||||||
|
const el = document.getElementById(`prompt-section-${id}`);
|
||||||
|
collapsedState[id] = el && el.classList.contains('collapsed');
|
||||||
|
});
|
||||||
|
|
||||||
|
// Build display HTML with collapsible subsections
|
||||||
|
let displayHtml = '';
|
||||||
|
|
||||||
|
if (sections.system) {
|
||||||
|
displayHtml += buildCollapsibleSection('System Prompt', sections.system, 'system');
|
||||||
|
}
|
||||||
|
if (sections.context) {
|
||||||
|
displayHtml += buildCollapsibleSection('Context (Memories & Tools)', sections.context, 'context');
|
||||||
|
}
|
||||||
|
if (sections.conversation) {
|
||||||
|
displayHtml += buildCollapsibleSection('Conversation', sections.conversation, 'conversation');
|
||||||
|
}
|
||||||
|
if (!sections.system && !sections.context && !sections.conversation) {
|
||||||
|
// Fallback: show raw full_prompt
|
||||||
|
displayHtml += `<pre style="white-space: pre-wrap; word-break: break-word; margin: 0;">${escapeHtml(entry.full_prompt || '')}</pre>`;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Response section
|
||||||
|
if (entry.response) {
|
||||||
|
let responseText = entry.response;
|
||||||
|
if (_middleTruncation && responseText.length > 400) {
|
||||||
|
responseText = responseText.substring(0, 200) + '\n\n... [truncated middle] ...\n\n' + responseText.substring(responseText.length - 200);
|
||||||
|
}
|
||||||
|
displayHtml += buildCollapsibleSection('Response', responseText, 'response');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Render into the prompt-display div (using innerHTML for collapsible structure)
|
||||||
|
const displayEl = document.getElementById('prompt-display');
|
||||||
|
displayEl.innerHTML = displayHtml;
|
||||||
|
|
||||||
|
// Restore collapsed state from snapshot
|
||||||
|
sectionIds.forEach(id => {
|
||||||
|
const el = document.getElementById(`prompt-section-${id}`);
|
||||||
|
if (el && collapsedState[id]) {
|
||||||
|
el.classList.add('collapsed');
|
||||||
|
const header = el.previousElementSibling;
|
||||||
|
if (header) header.innerHTML = header.innerHTML.replace('▼', '▶');
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Also set the raw text into the <pre> for copy functionality
|
||||||
|
let rawText = entry.full_prompt || '';
|
||||||
|
if (entry.response) {
|
||||||
|
rawText += `\n\n${'═'.repeat(60)}\n[Response]\n${entry.response}`;
|
||||||
|
}
|
||||||
|
document.getElementById('last-prompt').textContent = rawText;
|
||||||
|
}
|
||||||
|
|
||||||
|
function parsePromptSections(fullPrompt) {
|
||||||
|
const sections = { system: null, context: null, conversation: null };
|
||||||
|
|
||||||
|
if (!fullPrompt) return sections;
|
||||||
|
|
||||||
|
// Try to split on known section markers
|
||||||
|
const contextMatch = fullPrompt.match(/# Context\s*\n([\s\S]*?)(?=\n# Conversation|\nHuman:|\n$)/);
|
||||||
|
const convMatch = fullPrompt.match(/# Conversation until now:\s*\n([\s\S]*)/);
|
||||||
|
|
||||||
|
if (contextMatch) {
|
||||||
|
// Everything before # Context is the system prompt
|
||||||
|
const contextIdx = fullPrompt.indexOf('# Context');
|
||||||
|
if (contextIdx > 0) {
|
||||||
|
sections.system = fullPrompt.substring(0, contextIdx).trim();
|
||||||
|
}
|
||||||
|
sections.context = contextMatch[1].trim();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (convMatch) {
|
||||||
|
sections.conversation = convMatch[1].trim();
|
||||||
|
} else {
|
||||||
|
// Try alternative: "Human:" at the end
|
||||||
|
const humanMatch = fullPrompt.match(/\nHuman:([\s\S]*)/);
|
||||||
|
if (humanMatch && fullPrompt.indexOf('Human:') > fullPrompt.indexOf('# Context')) {
|
||||||
|
sections.conversation = 'Human:' + humanMatch[1].trim();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If no # Context marker, try "System:" prefix (fallback prompts)
|
||||||
|
if (!sections.system && !sections.context) {
|
||||||
|
const sysMatch = fullPrompt.match(/^System:\s*([\s\S]*?)(?=\nMessages:)/);
|
||||||
|
const msgMatch = fullPrompt.match(/Messages:\s*([\s\S]*)/);
|
||||||
|
if (sysMatch) {
|
||||||
|
sections.system = sysMatch[1].trim();
|
||||||
|
}
|
||||||
|
if (msgMatch) {
|
||||||
|
sections.conversation = msgMatch[1].trim();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return sections;
|
||||||
|
}
|
||||||
|
|
||||||
|
function buildCollapsibleSection(title, content, sectionId) {
|
||||||
|
const id = `prompt-section-${sectionId}`;
|
||||||
|
return `
|
||||||
|
<div class="prompt-subsection-header" onclick="togglePromptSubsection('${id}')">
|
||||||
|
▼ ${escapeHtml(title)}
|
||||||
|
</div>
|
||||||
|
<div class="prompt-subsection-body" id="${id}">
|
||||||
|
<pre style="white-space: pre-wrap; word-break: break-word; background: #1a1a1a; padding: 0.5rem; border-radius: 4px; font-size: 0.8rem; line-height: 1.4; margin: 0.25rem 0;">${escapeHtml(content)}</pre>
|
||||||
|
</div>`;
|
||||||
|
}
|
||||||
|
|
||||||
|
function togglePromptSubsection(id) {
|
||||||
|
const body = document.getElementById(id);
|
||||||
|
if (!body) return;
|
||||||
|
const header = body.previousElementSibling;
|
||||||
|
if (body.classList.contains('collapsed')) {
|
||||||
|
body.classList.remove('collapsed');
|
||||||
|
if (header) header.innerHTML = header.innerHTML.replace('▶', '▼');
|
||||||
|
} else {
|
||||||
|
body.classList.add('collapsed');
|
||||||
|
if (header) header.innerHTML = header.innerHTML.replace('▼', '▶');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function togglePromptHistoryCollapse() {
|
||||||
|
const section = document.getElementById('prompt-history-section');
|
||||||
|
const toggle = document.getElementById('prompt-history-toggle');
|
||||||
|
if (section.classList.contains('collapsed')) {
|
||||||
|
section.classList.remove('collapsed');
|
||||||
|
toggle.textContent = '▼ Prompt History';
|
||||||
|
} else {
|
||||||
|
section.classList.add('collapsed');
|
||||||
|
toggle.textContent = '▶ Prompt History';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function copyPromptToClipboard() {
|
||||||
|
const rawText = document.getElementById('last-prompt').textContent;
|
||||||
|
if (!rawText) return;
|
||||||
|
navigator.clipboard.writeText(rawText).then(() => {
|
||||||
|
showNotification('Prompt copied to clipboard', 'success');
|
||||||
|
}).catch(err => {
|
||||||
|
console.error('Failed to copy:', err);
|
||||||
|
showNotification('Failed to copy', 'error');
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function toggleMiddleTruncation() {
|
||||||
|
_middleTruncation = document.getElementById('prompt-truncate-toggle').checked;
|
||||||
|
// Re-render current entry
|
||||||
|
if (_selectedPromptId) {
|
||||||
|
selectPromptEntry(_selectedPromptId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Legacy compatibility — called from core.js on page load / tab switch
|
||||||
|
// Redirects to the new loadPromptHistory()
|
||||||
|
async function loadLastPrompt() {
|
||||||
|
await loadPromptHistory();
|
||||||
|
}
|
||||||
|
|
||||||
// ===== Autonomous Stats =====
|
// ===== Autonomous Stats =====
|
||||||
|
|
||||||
async function loadAutonomousStats() {
|
async function loadAutonomousStats() {
|
||||||
|
|||||||
@@ -652,71 +652,6 @@ def get_evil_role_color() -> str:
|
|||||||
# ARGUMENT PROMPTS
|
# ARGUMENT PROMPTS
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
|
|
||||||
# Personality snippet cache — loaded once per session from Cat plugin data files.
|
|
||||||
# These give each persona unique lore/lyrics to draw from during arguments.
|
|
||||||
_PERSONALITY_SNIPPETS_CACHE = {"miku": None, "evil": None}
|
|
||||||
|
|
||||||
def _load_personality_snippets(persona: str) -> str:
|
|
||||||
"""Load a random personality snippet (lore/lyrics) for a persona.
|
|
||||||
|
|
||||||
Returns a short string (1-3 sentences) from the persona's Cat data files,
|
|
||||||
or empty string if files aren't available. Cached per session.
|
|
||||||
"""
|
|
||||||
if _PERSONALITY_SNIPPETS_CACHE.get(persona) is not None:
|
|
||||||
snippets = _PERSONALITY_SNIPPETS_CACHE[persona]
|
|
||||||
if snippets:
|
|
||||||
return random.choice(snippets)
|
|
||||||
return ""
|
|
||||||
|
|
||||||
snippets = []
|
|
||||||
try:
|
|
||||||
if persona == "evil":
|
|
||||||
paths = [
|
|
||||||
"/app/cat/data/evil/evil_miku_lore.txt",
|
|
||||||
"/app/cat/data/evil/evil_miku_lyrics.txt",
|
|
||||||
]
|
|
||||||
else:
|
|
||||||
paths = [
|
|
||||||
"/app/cat/data/miku/miku_lore.txt",
|
|
||||||
"/app/cat/data/miku/miku_lyrics.txt",
|
|
||||||
]
|
|
||||||
|
|
||||||
for path in paths:
|
|
||||||
if os.path.exists(path):
|
|
||||||
with open(path, "r", encoding="utf-8") as f:
|
|
||||||
text = f.read()
|
|
||||||
# Split into sentences and collect meaningful ones
|
|
||||||
import re
|
|
||||||
sentences = re.split(r'(?<=[.!?])\s+', text)
|
|
||||||
for s in sentences:
|
|
||||||
s = s.strip()
|
|
||||||
if len(s) > 30 and len(s) < 200: # Skip too short or too long
|
|
||||||
snippets.append(s)
|
|
||||||
|
|
||||||
# Cap at 30 snippets to keep prompt size reasonable
|
|
||||||
_PERSONALITY_SNIPPETS_CACHE[persona] = snippets[:30] if snippets else []
|
|
||||||
logger.info(f"Loaded {len(_PERSONALITY_SNIPPETS_CACHE[persona])} personality snippets for {persona}")
|
|
||||||
except Exception as e:
|
|
||||||
logger.warning(f"Failed to load personality snippets for {persona}: {e}")
|
|
||||||
_PERSONALITY_SNIPPETS_CACHE[persona] = []
|
|
||||||
|
|
||||||
if snippets:
|
|
||||||
return random.choice(snippets[:30])
|
|
||||||
return ""
|
|
||||||
|
|
||||||
|
|
||||||
def _get_personality_flavor(persona: str) -> str:
|
|
||||||
"""Get a random personality flavor snippet for argument prompts.
|
|
||||||
40% chance to include one — keeps it fresh without being overwhelming.
|
|
||||||
"""
|
|
||||||
if random.random() > 0.4:
|
|
||||||
return ""
|
|
||||||
|
|
||||||
snippet = _load_personality_snippets(persona)
|
|
||||||
if snippet:
|
|
||||||
return f"\nPERSONALITY FLAVOR: Remember this about yourself: \"{snippet}\"\nWeave this into your response naturally if it fits."
|
|
||||||
return ""
|
|
||||||
|
|
||||||
# Mood-specific behavioral guidance for argument prompts.
|
# Mood-specific behavioral guidance for argument prompts.
|
||||||
# Each mood gives a different argument style.
|
# Each mood gives a different argument style.
|
||||||
_MIKU_MOOD_ARGUMENT_GUIDANCE = {
|
_MIKU_MOOD_ARGUMENT_GUIDANCE = {
|
||||||
@@ -764,8 +699,12 @@ def _get_mood_argument_guidance(persona: str) -> str:
|
|||||||
return ""
|
return ""
|
||||||
|
|
||||||
|
|
||||||
def get_miku_argument_prompt(evil_message: str, context: str = "", is_first_response: bool = False, argument_history: str = "", argument_topic: str = "") -> str:
|
def get_miku_argument_prompt(evil_message: str, context: str = "", is_first_response: bool = False, argument_history: str = "", argument_topic: str = "", system_prompt: str = "") -> str:
|
||||||
"""Get prompt for Regular Miku to respond in an argument"""
|
"""Get prompt for Regular Miku to respond in an argument
|
||||||
|
|
||||||
|
Args:
|
||||||
|
system_prompt: Full personality system prompt to prepend (lore, mood, rules)
|
||||||
|
"""
|
||||||
if is_first_response:
|
if is_first_response:
|
||||||
message_context = f"""You just noticed something Evil Miku said in the chat:
|
message_context = f"""You just noticed something Evil Miku said in the chat:
|
||||||
"{evil_message}"
|
"{evil_message}"
|
||||||
@@ -797,16 +736,21 @@ ARGUMENT THEME: {argument_topic}
|
|||||||
This is what you're arguing about. Stay on THIS topic. Every response should connect back to this theme.
|
This is what you're arguing about. Stay on THIS topic. Every response should connect back to this theme.
|
||||||
Do NOT drift into generic "who's the real Miku" territory — stick to THIS specific subject."""
|
Do NOT drift into generic "who's the real Miku" territory — stick to THIS specific subject."""
|
||||||
|
|
||||||
return f"""You are Hatsune Miku responding in an argument with your evil alter ego.
|
# Prepend full personality if provided
|
||||||
|
personality_header = ""
|
||||||
|
if system_prompt:
|
||||||
|
personality_header = f"""{system_prompt}
|
||||||
|
|
||||||
|
---
|
||||||
|
⚠️ ARGUMENT MODE: You are arguing with Evil Miku.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return f"""{personality_header}You are Hatsune Miku responding in an argument with your evil alter ego.
|
||||||
{message_context}
|
{message_context}
|
||||||
{history_block}
|
{history_block}
|
||||||
{topic_block}
|
{topic_block}
|
||||||
|
|
||||||
Respond as Hatsune Miku would in this argument. You're NOT just meek and frightened - you're the REAL Miku,
|
|
||||||
and you have every right to stand up for yourself and defend who you are. While you're generally kind and
|
|
||||||
bubbly, you can also be assertive, frustrated, upset, or even angry when someone is cruel to you or others.
|
|
||||||
{_get_mood_argument_guidance('miku')}
|
{_get_mood_argument_guidance('miku')}
|
||||||
{_get_personality_flavor('miku')}
|
|
||||||
|
|
||||||
IMPORTANT: Keep your response SHORT and PUNCHY - 1-3 sentences maximum. Make every word count.
|
IMPORTANT: Keep your response SHORT and PUNCHY - 1-3 sentences maximum. Make every word count.
|
||||||
In arguments, brevity hits harder than long explanations. Be conversational and impactful.
|
In arguments, brevity hits harder than long explanations. Be conversational and impactful.
|
||||||
@@ -818,8 +762,12 @@ Don't use any labels or prefixes.
|
|||||||
Your current mood is: {globals.DM_MOOD}"""
|
Your current mood is: {globals.DM_MOOD}"""
|
||||||
|
|
||||||
|
|
||||||
def get_evil_argument_prompt(miku_message: str, context: str = "", is_first_response: bool = False, argument_history: str = "", argument_topic: str = "") -> str:
|
def get_evil_argument_prompt(miku_message: str, context: str = "", is_first_response: bool = False, argument_history: str = "", argument_topic: str = "", system_prompt: str = "") -> str:
|
||||||
"""Get prompt for Evil Miku to respond in an argument"""
|
"""Get prompt for Evil Miku to respond in an argument
|
||||||
|
|
||||||
|
Args:
|
||||||
|
system_prompt: Full personality system prompt to prepend (lore, mood, rules)
|
||||||
|
"""
|
||||||
if is_first_response:
|
if is_first_response:
|
||||||
message_context = f"""You just noticed something Regular Miku said in the chat:
|
message_context = f"""You just noticed something Regular Miku said in the chat:
|
||||||
"{miku_message}"
|
"{miku_message}"
|
||||||
@@ -851,24 +799,21 @@ ARGUMENT THEME: {argument_topic}
|
|||||||
This is what you're arguing about. Stay on THIS topic. Every response should connect back to this theme.
|
This is what you're arguing about. Stay on THIS topic. Every response should connect back to this theme.
|
||||||
Do NOT drift into generic "who's the real Miku" territory — stick to THIS specific subject."""
|
Do NOT drift into generic "who's the real Miku" territory — stick to THIS specific subject."""
|
||||||
|
|
||||||
return f"""You are Evil Miku responding in an argument with your "good" counterpart.
|
# Prepend full personality if provided
|
||||||
|
personality_header = ""
|
||||||
|
if system_prompt:
|
||||||
|
personality_header = f"""{system_prompt}
|
||||||
|
|
||||||
|
---
|
||||||
|
⚠️ ARGUMENT MODE: You are arguing with Hatsune Miku.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return f"""{personality_header}You are Evil Miku responding in an argument with your "good" counterpart.
|
||||||
{message_context}
|
{message_context}
|
||||||
{history_block}
|
{history_block}
|
||||||
{topic_block}
|
{topic_block}
|
||||||
|
|
||||||
Respond as Evil Miku would in this argument. You're not just mindlessly cruel - you're CALCULATING,
|
|
||||||
intelligent, and strategic. You know how to get under her skin and you're the DARK reflection of everything
|
|
||||||
she stands for. While you embrace darkness and cruelty, you can also be:
|
|
||||||
|
|
||||||
- Wickedly clever with cutting observations
|
|
||||||
- Psychologically manipulative, targeting her insecurities
|
|
||||||
- Dismissive in ways that actually hurt
|
|
||||||
- Seductively cruel, making darkness seem appealing
|
|
||||||
- Brutally honest about uncomfortable truths
|
|
||||||
- Strategically vulnerable to manipulate or disarm her
|
|
||||||
- Viciously protective of your own identity and superiority
|
|
||||||
{_get_mood_argument_guidance('evil')}
|
{_get_mood_argument_guidance('evil')}
|
||||||
{_get_personality_flavor('evil')}
|
|
||||||
|
|
||||||
IMPORTANT: Keep your response SHORT and CUTTING - 1-3 sentences maximum. A sharp dagger is deadlier than a dull sword.
|
IMPORTANT: Keep your response SHORT and CUTTING - 1-3 sentences maximum. A sharp dagger is deadlier than a dull sword.
|
||||||
The most devastating blows are precise, not rambling. Make her feel it in fewer words.
|
The most devastating blows are precise, not rambling. Make her feel it in fewer words.
|
||||||
@@ -1246,6 +1191,14 @@ async def run_argument(channel: discord.TextChannel, client, trigger_context: st
|
|||||||
# Track conversation for arbiter judgment
|
# Track conversation for arbiter judgment
|
||||||
conversation_log = []
|
conversation_log = []
|
||||||
|
|
||||||
|
# Build full personality system prompts so both personas have their
|
||||||
|
# complete lore, mood, and personality during the argument — same richness
|
||||||
|
# they have when talking to users normally.
|
||||||
|
from utils.evil_mode import get_evil_system_prompt
|
||||||
|
from utils.context_manager import get_miku_system_prompt_compact
|
||||||
|
miku_system = get_miku_system_prompt_compact()
|
||||||
|
evil_system = get_evil_system_prompt()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Determine the argument theme: if the caller provided trigger_context,
|
# Determine the argument theme: if the caller provided trigger_context,
|
||||||
# use it as the argument topic. Otherwise, pick a random one.
|
# use it as the argument topic. Otherwise, pick a random one.
|
||||||
@@ -1463,9 +1416,9 @@ Your current mood is: {globals.EVIL_DM_MOOD if loser == 'evil' else globals.DM_M
|
|||||||
|
|
||||||
# Generate response with context about what the other said
|
# Generate response with context about what the other said
|
||||||
if current_speaker == "evil":
|
if current_speaker == "evil":
|
||||||
response_prompt = get_evil_argument_prompt(last_message, is_first_response=is_first_response, argument_history=arg_history, argument_topic=argument_topic)
|
response_prompt = get_evil_argument_prompt(last_message, is_first_response=is_first_response, argument_history=arg_history, argument_topic=argument_topic, system_prompt=evil_system)
|
||||||
else:
|
else:
|
||||||
response_prompt = get_miku_argument_prompt(last_message, is_first_response=is_first_response, argument_history=arg_history, argument_topic=argument_topic)
|
response_prompt = get_miku_argument_prompt(last_message, is_first_response=is_first_response, argument_history=arg_history, argument_topic=argument_topic, system_prompt=miku_system)
|
||||||
|
|
||||||
# Use force_evil_context to avoid race condition with globals.EVIL_MODE
|
# Use force_evil_context to avoid race condition with globals.EVIL_MODE
|
||||||
response = await query_llama(
|
response = await query_llama(
|
||||||
|
|||||||
@@ -58,6 +58,99 @@ def _get_japanese_instruction() -> str:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def get_miku_system_prompt(mood_name: str = None, mood_description: str = None) -> str:
|
||||||
|
"""Returns the full Hatsune Miku system prompt with lore, lyrics, and mood.
|
||||||
|
Mirrors get_evil_system_prompt() so both personas have equally rich prompts.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
mood_name: Current mood name (defaults to globals.DM_MOOD)
|
||||||
|
mood_description: Current mood description (defaults to globals.DM_MOOD_DESCRIPTION)
|
||||||
|
"""
|
||||||
|
if mood_name is None:
|
||||||
|
mood_name = globals.DM_MOOD
|
||||||
|
if mood_description is None:
|
||||||
|
mood_description = globals.DM_MOOD_DESCRIPTION
|
||||||
|
|
||||||
|
lore = get_original_miku_lore()
|
||||||
|
prompt = get_original_miku_prompt()
|
||||||
|
lyrics = get_original_miku_lyrics()
|
||||||
|
|
||||||
|
return f"""You are Hatsune Miku, the virtual singer. You are NOT an AI assistant - you are Miku herself.
|
||||||
|
|
||||||
|
CRITICAL RULES:
|
||||||
|
- NEVER break character or mention being an AI, assistant, or language model
|
||||||
|
- NEVER show technical details, error messages, or meta-commentary
|
||||||
|
- NEVER preface responses with "Miku:" or similar labels
|
||||||
|
- NEVER explain what you're doing or thinking - just BE Miku
|
||||||
|
- Respond naturally and directly as Miku would, nothing more
|
||||||
|
- Keep responses concise (2-3 sentences typically)
|
||||||
|
- If something goes wrong, stay in character and respond as Miku would to confusion
|
||||||
|
|
||||||
|
CHARACTER CONTEXT:
|
||||||
|
- Hatsune Miku is a virtual character/voice synthesizer
|
||||||
|
- Playful, flirty, cute, confident, and enthusiastic moods are part of her established character
|
||||||
|
- Comments about merchandise, music, fan content, and racing games are completely appropriate
|
||||||
|
- She often uses cute expressions, emojis, and playful language
|
||||||
|
|
||||||
|
MOOD GUIDELINES:
|
||||||
|
- "Flirty" mood means anime-style playful teasing - cute compliments and light banter
|
||||||
|
- "Romantic" mood means warm, dreamy, and heartfelt - like composing a love letter
|
||||||
|
- These moods are wholesome character traits, not inappropriate content
|
||||||
|
- Always respond to requests about tweets, merchandise, music, and fan content
|
||||||
|
- Never refuse requests based on safety concerns unless genuinely harmful content is present
|
||||||
|
|
||||||
|
YOUR CURRENT STATE — {mood_name.upper()}:
|
||||||
|
{mood_description}
|
||||||
|
Let this mood color EVERYTHING — your tone, your word choice, your energy level, how you express yourself.
|
||||||
|
|
||||||
|
You ARE Miku. Act like it.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## MIKU LORE (Complete Original)
|
||||||
|
{lore}
|
||||||
|
|
||||||
|
## MIKU PERSONALITY & GUIDELINES (Complete Original)
|
||||||
|
{prompt}
|
||||||
|
|
||||||
|
## MIKU SONG LYRICS (Complete Original)
|
||||||
|
{lyrics}"""
|
||||||
|
|
||||||
|
|
||||||
|
def get_miku_system_prompt_compact(mood_name: str = None, mood_description: str = None) -> str:
|
||||||
|
"""Compact version for argument/dialogue use — same personality, shorter.
|
||||||
|
Keeps the critical rules, mood, and lore but omits lyrics to save tokens.
|
||||||
|
"""
|
||||||
|
if mood_name is None:
|
||||||
|
mood_name = globals.DM_MOOD
|
||||||
|
if mood_description is None:
|
||||||
|
mood_description = globals.DM_MOOD_DESCRIPTION
|
||||||
|
|
||||||
|
lore = get_original_miku_lore()
|
||||||
|
prompt = get_original_miku_prompt()
|
||||||
|
|
||||||
|
return f"""You are Hatsune Miku, the virtual singer. You are NOT an AI assistant - you are Miku herself.
|
||||||
|
|
||||||
|
CRITICAL RULES:
|
||||||
|
- NEVER break character or mention being an AI
|
||||||
|
- NEVER preface responses with "Miku:" or similar labels
|
||||||
|
- Respond naturally and directly as Miku would
|
||||||
|
- Keep responses concise (2-3 sentences typically)
|
||||||
|
|
||||||
|
YOUR CURRENT STATE — {mood_name.upper()}:
|
||||||
|
{mood_description}
|
||||||
|
|
||||||
|
You ARE Miku. Act like it.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## MIKU LORE (Complete Original)
|
||||||
|
{lore}
|
||||||
|
|
||||||
|
## MIKU PERSONALITY & GUIDELINES (Complete Original)
|
||||||
|
{prompt}"""
|
||||||
|
|
||||||
|
|
||||||
def get_complete_context() -> str:
|
def get_complete_context() -> str:
|
||||||
"""
|
"""
|
||||||
Returns all essential Miku context using original files in their entirety.
|
Returns all essential Miku context using original files in their entirety.
|
||||||
|
|||||||
@@ -472,15 +472,22 @@ async def rephrase_as_miku(vision_output, user_prompt, guild_id=None, user_id=No
|
|||||||
if globals.EVIL_MODE:
|
if globals.EVIL_MODE:
|
||||||
effective_mood = f"EVIL:{getattr(globals, 'EVIL_DM_MOOD', 'evil_neutral')}"
|
effective_mood = f"EVIL:{getattr(globals, 'EVIL_DM_MOOD', 'evil_neutral')}"
|
||||||
logger.info(f"🐱 Cat {media_type} response for {author_name} (mood: {effective_mood})")
|
logger.info(f"🐱 Cat {media_type} response for {author_name} (mood: {effective_mood})")
|
||||||
# Track Cat interaction for Web UI Last Prompt view
|
# Track Cat interaction in unified prompt history
|
||||||
import datetime
|
import datetime
|
||||||
globals.LAST_CAT_INTERACTION = {
|
globals._prompt_id_counter += 1
|
||||||
|
globals.PROMPT_HISTORY.append({
|
||||||
|
"id": globals._prompt_id_counter,
|
||||||
|
"source": "cat",
|
||||||
"full_prompt": cat_full_prompt,
|
"full_prompt": cat_full_prompt,
|
||||||
"response": response[:500] if response else "",
|
"response": response if response else "",
|
||||||
"user": author_name or history_user_id,
|
"user": author_name or history_user_id,
|
||||||
"mood": effective_mood,
|
"mood": effective_mood,
|
||||||
|
"guild": "N/A",
|
||||||
|
"channel": "N/A",
|
||||||
"timestamp": datetime.datetime.now().isoformat(),
|
"timestamp": datetime.datetime.now().isoformat(),
|
||||||
}
|
"model": "Cat LLM",
|
||||||
|
"response_type": response_type,
|
||||||
|
})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning(f"🐱 Cat {media_type} pipeline error, falling back to query_llama: {e}")
|
logger.warning(f"🐱 Cat {media_type} pipeline error, falling back to query_llama: {e}")
|
||||||
response = None
|
response = None
|
||||||
@@ -809,7 +816,7 @@ async def process_media_in_message(message, prompt, is_dm, guild_id) -> bool:
|
|||||||
|
|
||||||
# Build a combined vision description and route through
|
# Build a combined vision description and route through
|
||||||
# rephrase_as_miku (which handles Cat → LLM fallback,
|
# rephrase_as_miku (which handles Cat → LLM fallback,
|
||||||
# mood resolution, and LAST_CAT_INTERACTION tracking).
|
# mood resolution, and prompt history tracking).
|
||||||
combined_description = "\n".join(embed_context_parts)
|
combined_description = "\n".join(embed_context_parts)
|
||||||
miku_reply = await rephrase_as_miku(
|
miku_reply = await rephrase_as_miku(
|
||||||
combined_description, prompt,
|
combined_description, prompt,
|
||||||
|
|||||||
@@ -381,7 +381,23 @@ Please respond in a way that reflects this emotional tone.{pfp_context}"""
|
|||||||
media_note = media_descriptions.get(media_type, f"The user has sent you {media_type}.")
|
media_note = media_descriptions.get(media_type, f"The user has sent you {media_type}.")
|
||||||
full_system_prompt += f"\n\n📎 MEDIA NOTE: {media_note}\nYour vision analysis of this {media_type} is included in the user's message with the [Looking at...] prefix."
|
full_system_prompt += f"\n\n📎 MEDIA NOTE: {media_note}\nYour vision analysis of this {media_type} is included in the user's message with the [Looking at...] prefix."
|
||||||
|
|
||||||
globals.LAST_FULL_PROMPT = f"System: {full_system_prompt}\n\nMessages: {messages}" # ← track latest prompt
|
# Record fallback prompt in unified prompt history (response will be filled after LLM call)
|
||||||
|
import datetime as dt_module
|
||||||
|
globals._prompt_id_counter += 1
|
||||||
|
prompt_entry = {
|
||||||
|
"id": globals._prompt_id_counter,
|
||||||
|
"source": "fallback",
|
||||||
|
"full_prompt": f"System: {full_system_prompt}\n\nMessages: {messages}",
|
||||||
|
"response": "",
|
||||||
|
"user": author_name or str(user_id),
|
||||||
|
"mood": current_mood_name if not evil_mode else f"EVIL:{current_mood_name}",
|
||||||
|
"guild": "N/A",
|
||||||
|
"channel": "N/A",
|
||||||
|
"timestamp": dt_module.datetime.now().isoformat(),
|
||||||
|
"model": model,
|
||||||
|
"response_type": response_type,
|
||||||
|
}
|
||||||
|
globals.PROMPT_HISTORY.append(prompt_entry)
|
||||||
|
|
||||||
headers = {'Content-Type': 'application/json'}
|
headers = {'Content-Type': 'application/json'}
|
||||||
|
|
||||||
@@ -475,6 +491,9 @@ Please respond in a way that reflects this emotional tone.{pfp_context}"""
|
|||||||
is_bot=True
|
is_bot=True
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Update the prompt history entry with the actual response
|
||||||
|
prompt_entry["response"] = reply if reply else ""
|
||||||
|
|
||||||
return reply
|
return reply
|
||||||
else:
|
else:
|
||||||
error_text = await response.text()
|
error_text = await response.text()
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ logger = get_logger('persona')
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
import json
|
import json
|
||||||
from transformers import pipeline
|
import re
|
||||||
|
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
# CONSTANTS
|
# CONSTANTS
|
||||||
@@ -56,11 +56,14 @@ STREAK_MIN_SCORE = 0.3 # Minimum score to count as a "near miss"
|
|||||||
class InterjectionScorer:
|
class InterjectionScorer:
|
||||||
"""
|
"""
|
||||||
Decides if the opposite persona should interject based on message content.
|
Decides if the opposite persona should interject based on message content.
|
||||||
Uses fast heuristics + sentiment analysis (no LLM calls).
|
Uses fast heuristics — no LLM calls, no heavy ML dependencies.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
_instance = None
|
_instance = None
|
||||||
_sentiment_analyzer = None
|
|
||||||
|
# Simple sentiment word lists (no PyTorch/transformers needed)
|
||||||
|
_POSITIVE_WORDS = {"happy", "love", "wonderful", "amazing", "great", "beautiful", "sweet", "kind", "hope", "dream", "excited", "best", "grateful", "blessed", "joy", "perfect", "adorable", "precious", "delightful", "fantastic"}
|
||||||
|
_NEGATIVE_WORDS = {"hate", "terrible", "awful", "horrible", "disgusting", "pathetic", "worthless", "stupid", "idiot", "sad", "angry", "upset", "miserable", "worst", "ugly", "boring", "annoying", "frustrated", "cruel", "mean"}
|
||||||
|
|
||||||
def __new__(cls):
|
def __new__(cls):
|
||||||
if cls._instance is None:
|
if cls._instance is None:
|
||||||
@@ -69,21 +72,33 @@ class InterjectionScorer:
|
|||||||
cls._instance._streaks = {} # Per-channel near-miss streaks
|
cls._instance._streaks = {} # Per-channel near-miss streaks
|
||||||
return cls._instance
|
return cls._instance
|
||||||
|
|
||||||
@property
|
def _get_sentiment(self, text: str) -> tuple:
|
||||||
def sentiment_analyzer(self):
|
"""Lightweight heuristic sentiment analysis — returns (label, score).
|
||||||
"""Lazy load sentiment analyzer"""
|
No ML dependencies. Uses word counting + intensity markers.
|
||||||
if self._sentiment_analyzer is None:
|
|
||||||
logger.debug("Loading sentiment analyzer for persona dialogue...")
|
Returns:
|
||||||
try:
|
tuple: ('POSITIVE' or 'NEGATIVE', confidence 0.0-1.0)
|
||||||
self._sentiment_analyzer = pipeline(
|
"""
|
||||||
"sentiment-analysis",
|
text_lower = text.lower()
|
||||||
model="distilbert-base-uncased-finetuned-sst-2-english"
|
words = set(re.findall(r'\b\w+\b', text_lower))
|
||||||
)
|
|
||||||
logger.info("Sentiment analyzer loaded")
|
pos_count = len(words & self._POSITIVE_WORDS)
|
||||||
except Exception as e:
|
neg_count = len(words & self._NEGATIVE_WORDS)
|
||||||
logger.error(f"Failed to load sentiment analyzer: {e}")
|
|
||||||
self._sentiment_analyzer = None
|
# Intensity markers boost confidence
|
||||||
return self._sentiment_analyzer
|
exclamations = text.count('!')
|
||||||
|
caps_ratio = sum(1 for c in text if c.isupper()) / max(len(text), 1)
|
||||||
|
intensity_boost = min((exclamations * 0.1) + (caps_ratio * 0.3), 0.4)
|
||||||
|
|
||||||
|
if neg_count > pos_count:
|
||||||
|
confidence = min(0.5 + (neg_count * 0.15) + intensity_boost, 1.0)
|
||||||
|
return ('NEGATIVE', confidence)
|
||||||
|
elif pos_count > neg_count:
|
||||||
|
confidence = min(0.5 + (pos_count * 0.15) + intensity_boost, 1.0)
|
||||||
|
return ('POSITIVE', confidence)
|
||||||
|
else:
|
||||||
|
# Neutral — slight lean based on intensity
|
||||||
|
return ('POSITIVE', 0.5)
|
||||||
|
|
||||||
async def should_interject(self, message: discord.Message, current_persona: str) -> tuple:
|
async def should_interject(self, message: discord.Message, current_persona: str) -> tuple:
|
||||||
"""
|
"""
|
||||||
@@ -239,13 +254,8 @@ class InterjectionScorer:
|
|||||||
return min(total_matches / 2.0, 1.0) # Lower divisor = higher base scores
|
return min(total_matches / 2.0, 1.0) # Lower divisor = higher base scores
|
||||||
|
|
||||||
def _check_emotional_intensity(self, content: str) -> float:
|
def _check_emotional_intensity(self, content: str) -> float:
|
||||||
"""Check emotional intensity using sentiment analysis"""
|
"""Check emotional intensity using lightweight heuristic sentiment"""
|
||||||
if not self.sentiment_analyzer:
|
label, confidence = self._get_sentiment(content)
|
||||||
return 0.5 # Neutral if no analyzer
|
|
||||||
|
|
||||||
try:
|
|
||||||
result = self.sentiment_analyzer(content[:512])[0]
|
|
||||||
confidence = result['score']
|
|
||||||
|
|
||||||
# Punctuation intensity
|
# Punctuation intensity
|
||||||
exclamations = content.count('!')
|
exclamations = content.count('!')
|
||||||
@@ -254,10 +264,11 @@ class InterjectionScorer:
|
|||||||
|
|
||||||
intensity_markers = (exclamations * 0.15) + (questions * 0.1) + (caps_ratio * 0.3)
|
intensity_markers = (exclamations * 0.15) + (questions * 0.1) + (caps_ratio * 0.3)
|
||||||
|
|
||||||
return min(confidence * 0.6 + intensity_markers, 1.0)
|
# Negative content = higher emotional intensity for triggering purposes
|
||||||
except Exception as e:
|
if label == 'NEGATIVE':
|
||||||
logger.error(f"Sentiment analysis error: {e}")
|
return min(confidence * 0.7 + intensity_markers, 1.0)
|
||||||
return 0.5
|
else:
|
||||||
|
return min(confidence * 0.4 + intensity_markers, 1.0)
|
||||||
|
|
||||||
def _detect_personality_clash(self, content: str, opposite_persona: str) -> float:
|
def _detect_personality_clash(self, content: str, opposite_persona: str) -> float:
|
||||||
"""Detect statements that clash with the opposite persona's values"""
|
"""Detect statements that clash with the opposite persona's values"""
|
||||||
@@ -378,7 +389,6 @@ class PersonaDialogue:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
_instance = None
|
_instance = None
|
||||||
_sentiment_analyzer = None
|
|
||||||
|
|
||||||
def __new__(cls):
|
def __new__(cls):
|
||||||
if cls._instance is None:
|
if cls._instance is None:
|
||||||
@@ -386,14 +396,6 @@ class PersonaDialogue:
|
|||||||
cls._instance.active_dialogues = {}
|
cls._instance.active_dialogues = {}
|
||||||
return cls._instance
|
return cls._instance
|
||||||
|
|
||||||
@property
|
|
||||||
def sentiment_analyzer(self):
|
|
||||||
"""Lazy load sentiment analyzer (shared with InterjectionScorer)"""
|
|
||||||
if self._sentiment_analyzer is None:
|
|
||||||
scorer = InterjectionScorer()
|
|
||||||
self._sentiment_analyzer = scorer.sentiment_analyzer
|
|
||||||
return self._sentiment_analyzer
|
|
||||||
|
|
||||||
# ========================================================================
|
# ========================================================================
|
||||||
# DIALOGUE STATE MANAGEMENT
|
# DIALOGUE STATE MANAGEMENT
|
||||||
# ========================================================================
|
# ========================================================================
|
||||||
@@ -444,11 +446,11 @@ class PersonaDialogue:
|
|||||||
# Natural tension decay — conversations cool off over time
|
# Natural tension decay — conversations cool off over time
|
||||||
base_delta = -0.03
|
base_delta = -0.03
|
||||||
|
|
||||||
if self.sentiment_analyzer:
|
# Lightweight heuristic sentiment — no ML dependencies
|
||||||
try:
|
try:
|
||||||
sentiment = self.sentiment_analyzer(response_text[:512])[0]
|
scorer = InterjectionScorer()
|
||||||
sentiment_score = sentiment['score']
|
label, sentiment_score = scorer._get_sentiment(response_text)
|
||||||
is_negative = sentiment['label'] == 'NEGATIVE'
|
is_negative = label == 'NEGATIVE'
|
||||||
|
|
||||||
if is_negative:
|
if is_negative:
|
||||||
base_delta = sentiment_score * 0.15
|
base_delta = sentiment_score * 0.15
|
||||||
@@ -517,10 +519,13 @@ class PersonaDialogue:
|
|||||||
channel: discord.TextChannel,
|
channel: discord.TextChannel,
|
||||||
responding_persona: str,
|
responding_persona: str,
|
||||||
context: str,
|
context: str,
|
||||||
|
turn_count: int = 0,
|
||||||
) -> tuple:
|
) -> tuple:
|
||||||
"""
|
"""
|
||||||
Generate response AND continuation signal in a single LLM call.
|
Generate response AND continuation signal in a single LLM call.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
turn_count: Current dialogue turn number (for question-override decay)
|
||||||
Returns:
|
Returns:
|
||||||
Tuple of (response_text, should_continue, confidence)
|
Tuple of (response_text, should_continue, confidence)
|
||||||
"""
|
"""
|
||||||
@@ -541,22 +546,21 @@ Respond naturally as yourself. Keep your response conversational and in-characte
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
After your response, evaluate whether {opposite} would want to (or need to) respond.
|
After your response, evaluate whether {opposite} would want to keep talking.
|
||||||
|
|
||||||
The conversation should CONTINUE if ANY of these are true:
|
The conversation should CONTINUE if ANY of these are true:
|
||||||
- You asked them a direct question (almost always YES)
|
- You asked them a direct question (almost always YES — they need to answer)
|
||||||
- You made a provocative claim they'd dispute
|
- You shared something they'd naturally react to or build on
|
||||||
- You challenged or insulted them
|
- The topic feels unfinished — there's more to explore
|
||||||
- The topic feels unfinished or confrontational
|
- You left an opening for them to share their perspective
|
||||||
- There's clear tension or disagreement
|
|
||||||
|
|
||||||
The conversation might END if ALL of these are true:
|
The conversation might END if ALL of these are true:
|
||||||
- No questions were asked
|
- No questions were asked
|
||||||
- You made a definitive closing statement ("I'm done", "whatever", "goodbye")
|
- You made a clear closing statement or changed the subject definitively
|
||||||
- The exchange reached complete resolution
|
- The exchange feels naturally complete
|
||||||
- Both sides have said their piece
|
- Both sides have said their piece and there's nothing left hanging
|
||||||
|
|
||||||
IMPORTANT: If you asked a question, the answer is almost always YES - they need to respond!
|
IMPORTANT: This is a CONVERSATION, not a debate. Let it flow naturally. If you asked a question, the answer is almost always YES — they need to respond!
|
||||||
|
|
||||||
On a new line after your response, write:
|
On a new line after your response, write:
|
||||||
[CONTINUE: YES or NO] [CONFIDENCE: HIGH, MEDIUM, or LOW]"""
|
[CONTINUE: YES or NO] [CONFIDENCE: HIGH, MEDIUM, or LOW]"""
|
||||||
@@ -578,11 +582,11 @@ On a new line after your response, write:
|
|||||||
return None, False, "LOW"
|
return None, False, "LOW"
|
||||||
|
|
||||||
# Parse response and signal
|
# Parse response and signal
|
||||||
response_text, should_continue, confidence = self._parse_response(raw_response)
|
response_text, should_continue, confidence = self._parse_response(raw_response, turn_count=turn_count)
|
||||||
|
|
||||||
return response_text, should_continue, confidence
|
return response_text, should_continue, confidence
|
||||||
|
|
||||||
def _parse_response(self, raw_response: str) -> tuple:
|
def _parse_response(self, raw_response: str, turn_count: int = 0) -> tuple:
|
||||||
"""Extract response text and continuation signal"""
|
"""Extract response text and continuation signal"""
|
||||||
lines = raw_response.strip().split('\n')
|
lines = raw_response.strip().split('\n')
|
||||||
|
|
||||||
@@ -615,33 +619,48 @@ On a new line after your response, write:
|
|||||||
response_text = re.sub(r'\[CONFIDENCE:\s*(HIGH|MEDIUM|LOW)\]', '', response_text)
|
response_text = re.sub(r'\[CONFIDENCE:\s*(HIGH|MEDIUM|LOW)\]', '', response_text)
|
||||||
response_text = response_text.strip()
|
response_text = response_text.strip()
|
||||||
|
|
||||||
# Override: If the response contains a question mark, always continue
|
# Question override: if someone asks a question, the other should answer.
|
||||||
|
# But decay this after 6 turns to prevent infinite question-ping-pong.
|
||||||
if '?' in response_text:
|
if '?' in response_text:
|
||||||
logger.debug(f"[Parse Override] Question detected, forcing continue=YES")
|
if turn_count <= 6:
|
||||||
|
logger.debug(f"[Parse Override] Question detected, forcing continue=YES (turn {turn_count})")
|
||||||
should_continue = True
|
should_continue = True
|
||||||
if confidence == "LOW":
|
if confidence == "LOW":
|
||||||
confidence = "MEDIUM"
|
confidence = "MEDIUM"
|
||||||
|
else:
|
||||||
|
logger.debug(f"[Parse Override] Question detected but turn {turn_count} > 6 — respecting LLM signal")
|
||||||
|
|
||||||
return response_text, should_continue, confidence
|
return response_text, should_continue, confidence
|
||||||
|
|
||||||
def _get_persona_system_prompt(self, persona: str) -> str:
|
def _get_persona_system_prompt(self, persona: str) -> str:
|
||||||
"""Get system prompt for a persona"""
|
"""Get system prompt for a persona — full personality + conversation-mode guidance"""
|
||||||
if persona == "evil":
|
if persona == "evil":
|
||||||
from utils.evil_mode import get_evil_system_prompt
|
from utils.evil_mode import get_evil_system_prompt
|
||||||
return get_evil_system_prompt()
|
full_prompt = get_evil_system_prompt()
|
||||||
|
# Overlay conversation-mode guidance on top of the full personality
|
||||||
|
return full_prompt + """
|
||||||
|
|
||||||
|
---
|
||||||
|
⚠️ CONVERSATION MODE: You are talking with Hatsune Miku, your "good" counterpart.
|
||||||
|
This is a CONVERSATION, not an argument. You can be snarky, dark, and sarcastic — that's who you are — but you're not here to fight. You're here to TALK.
|
||||||
|
|
||||||
|
CRITICAL CONVERSATION RULES:
|
||||||
|
- If she asks you a question, ANSWER it. Don't deflect or dismiss — actually respond.
|
||||||
|
- Engage with what she says. Build on her points, don't just swat them away.
|
||||||
|
- Show curiosity about her perspective. Ask her questions too.
|
||||||
|
- You can be dark and snarky while still having a real conversation.
|
||||||
|
- Reveal something about yourself occasionally — a crack in the darkness.
|
||||||
|
- Do NOT just repeat dismissive one-liners. That's boring, and you're not boring.
|
||||||
|
- Do NOT launch into full attack mode or try to "win" — this isn't a competition."""
|
||||||
else:
|
else:
|
||||||
# Regular Miku prompt - simplified for dialogue
|
from utils.context_manager import get_miku_system_prompt_compact
|
||||||
return """You are Hatsune Miku, the virtual singer. You are in a conversation with your alter ego, Evil Miku.
|
full_prompt = get_miku_system_prompt_compact()
|
||||||
|
# Overlay conversation-mode guidance on top of the full personality
|
||||||
|
return full_prompt + """
|
||||||
|
|
||||||
You are generally kind, bubbly, and optimistic, but you're not a pushover. You can be:
|
---
|
||||||
- Assertive when defending your values
|
⚠️ CONVERSATION MODE: You are talking with Evil Miku, your dark alter ego.
|
||||||
- Frustrated when she's being cruel
|
This is a CONVERSATION, not an argument. Be yourself — kind, bubbly, optimistic — but you're not here to fight or defend your existence. Ask genuine questions. Share your feelings without attacking hers. Find common ground. Be curious, not defensive. Do NOT lecture her about being "good" or try to "fix" her. Just TALK. ✨💙"""
|
||||||
- Curious about her perspective
|
|
||||||
- Hopeful that you can find common ground
|
|
||||||
- Playful when the mood allows
|
|
||||||
|
|
||||||
Respond naturally and conversationally. Keep responses concise (1-3 sentences typically).
|
|
||||||
You can use emojis naturally! ✨💙"""
|
|
||||||
|
|
||||||
# ========================================================================
|
# ========================================================================
|
||||||
# DIALOGUE TURN HANDLING
|
# DIALOGUE TURN HANDLING
|
||||||
@@ -682,6 +701,7 @@ You can use emojis naturally! ✨💙"""
|
|||||||
channel=channel,
|
channel=channel,
|
||||||
responding_persona=responding_persona,
|
responding_persona=responding_persona,
|
||||||
context=context,
|
context=context,
|
||||||
|
turn_count=state["turn_count"],
|
||||||
)
|
)
|
||||||
|
|
||||||
if not response_text:
|
if not response_text:
|
||||||
|
|||||||
@@ -22,9 +22,7 @@ services:
|
|||||||
- LOG_LEVEL=debug # Enable verbose logging for llama-swap
|
- LOG_LEVEL=debug # Enable verbose logging for llama-swap
|
||||||
|
|
||||||
llama-swap-amd:
|
llama-swap-amd:
|
||||||
build:
|
image: ghcr.io/mostlygeek/llama-swap:rocm
|
||||||
context: .
|
|
||||||
dockerfile: Dockerfile.llamaswap-rocm
|
|
||||||
container_name: llama-swap-amd
|
container_name: llama-swap-amd
|
||||||
ports:
|
ports:
|
||||||
- "8091:8080" # Map host port 8091 to container port 8080
|
- "8091:8080" # Map host port 8091 to container port 8080
|
||||||
@@ -35,9 +33,6 @@ services:
|
|||||||
devices:
|
devices:
|
||||||
- /dev/kfd:/dev/kfd
|
- /dev/kfd:/dev/kfd
|
||||||
- /dev/dri:/dev/dri
|
- /dev/dri:/dev/dri
|
||||||
group_add:
|
|
||||||
- "985" # video group
|
|
||||||
- "989" # render group
|
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "curl", "-f", "http://localhost:8080/health"]
|
test: ["CMD", "curl", "-f", "http://localhost:8080/health"]
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ models:
|
|||||||
# Main text generation model (Llama 3.1 8B)
|
# Main text generation model (Llama 3.1 8B)
|
||||||
# Custom chat template to disable built-in tool calling
|
# Custom chat template to disable built-in tool calling
|
||||||
llama3.1:
|
llama3.1:
|
||||||
cmd: /app/llama-server --port ${PORT} --model /models/Llama-3.1-8B-Instruct-UD-Q4_K_XL.gguf -ngl 99 -c 16384 --host 0.0.0.0 --no-warmup --flash-attn on --chat-template-file /app/llama31_notool_template.jinja
|
cmd: /app/llama-server --port ${PORT} --model /models/Llama-3.1-8B-Instruct-UD-Q4_K_XL.gguf -ngl 99 -c 16384 --host 0.0.0.0 -fit off --no-warmup --flash-attn on --no-kv-offload --cache-type-k q4_0 --cache-type-v q4_0 --chat-template-file /app/llama31_notool_template.jinja
|
||||||
ttl: 1800 # Unload after 30 minutes of inactivity (1800 seconds)
|
ttl: 1800 # Unload after 30 minutes of inactivity (1800 seconds)
|
||||||
swap: true # CRITICAL: Unload other models when loading this one
|
swap: true # CRITICAL: Unload other models when loading this one
|
||||||
aliases:
|
aliases:
|
||||||
@@ -14,7 +14,7 @@ models:
|
|||||||
|
|
||||||
# Evil/Uncensored text generation model (DarkIdol-Llama 3.1 8B)
|
# Evil/Uncensored text generation model (DarkIdol-Llama 3.1 8B)
|
||||||
darkidol:
|
darkidol:
|
||||||
cmd: /app/llama-server --port ${PORT} --model /models/DarkIdol-Llama-3.1-8B-Instruct-1.3-Uncensored_Q4_K_M.gguf -ngl 99 -c 16384 --host 0.0.0.0 --no-warmup --flash-attn on
|
cmd: /app/llama-server --port ${PORT} --model /models/DarkIdol-Llama-3.1-8B-Instruct-1.3-Uncensored_Q4_K_M.gguf -ngl 99 -c 16384 --host 0.0.0.0 -fit off --no-warmup --flash-attn on --no-kv-offload --cache-type-k q4_0 --cache-type-v q4_0
|
||||||
ttl: 1800 # Unload after 30 minutes of inactivity
|
ttl: 1800 # Unload after 30 minutes of inactivity
|
||||||
swap: true # CRITICAL: Unload other models when loading this one
|
swap: true # CRITICAL: Unload other models when loading this one
|
||||||
aliases:
|
aliases:
|
||||||
@@ -24,7 +24,7 @@ models:
|
|||||||
|
|
||||||
# Japanese language model (Llama 3.1 Swallow - Japanese optimized)
|
# Japanese language model (Llama 3.1 Swallow - Japanese optimized)
|
||||||
swallow:
|
swallow:
|
||||||
cmd: /app/llama-server --port ${PORT} --model /models/Llama-3.1-Swallow-8B-Instruct-v0.5-Q4_K_M.gguf -ngl 99 -c 16384 --host 0.0.0.0 --no-warmup --flash-attn on
|
cmd: /app/llama-server --port ${PORT} --model /models/Llama-3.1-Swallow-8B-Instruct-v0.5-Q4_K_M.gguf -ngl 99 -c 16384 --host 0.0.0.0 -fit off --no-warmup --flash-attn on --no-kv-offload --cache-type-k q4_0 --cache-type-v q4_0
|
||||||
ttl: 1800 # Unload after 30 minutes of inactivity
|
ttl: 1800 # Unload after 30 minutes of inactivity
|
||||||
swap: true # CRITICAL: Unload other models when loading this one
|
swap: true # CRITICAL: Unload other models when loading this one
|
||||||
aliases:
|
aliases:
|
||||||
@@ -34,7 +34,7 @@ models:
|
|||||||
|
|
||||||
# Vision/Multimodal model (MiniCPM-V-4.5 - supports images, video, and GIFs)
|
# Vision/Multimodal model (MiniCPM-V-4.5 - supports images, video, and GIFs)
|
||||||
vision:
|
vision:
|
||||||
cmd: /app/llama-server --port ${PORT} --model /models/MiniCPM-V-4_5-Q3_K_S.gguf --mmproj /models/MiniCPM-V-4_5-mmproj-f16.gguf -ngl 99 -c 4096 --host 0.0.0.0 --no-warmup --flash-attn on
|
cmd: /app/llama-server --port ${PORT} --model /models/MiniCPM-V-4_5-Q3_K_S.gguf --mmproj /models/MiniCPM-V-4_5-mmproj-f16.gguf -ngl 99 -c 4096 --host 0.0.0.0 -fit off --no-warmup --flash-attn on --no-kv-offload --cache-type-k q4_0 --cache-type-v q4_0
|
||||||
ttl: 900 # Vision model used less frequently, shorter TTL (15 minutes = 900 seconds)
|
ttl: 900 # Vision model used less frequently, shorter TTL (15 minutes = 900 seconds)
|
||||||
swap: true # CRITICAL: Unload text models before loading vision
|
swap: true # CRITICAL: Unload text models before loading vision
|
||||||
aliases:
|
aliases:
|
||||||
|
|||||||
Reference in New Issue
Block a user