Files
miku-discord/bot/utils/sentiment_analysis.py
koko210Serve d58be3b33e Remove all Ollama remnants and complete migration to llama.cpp
- Remove Ollama-specific files (Dockerfile.ollama, entrypoint.sh)
- Replace all query_ollama imports and calls with query_llama
- Remove langchain-ollama dependency from requirements.txt
- Update all utility files (autonomous, kindness, image_generation, etc.)
- Update README.md documentation references
- Maintain backward compatibility alias in llm.py
2025-12-07 17:50:28 +02:00

44 lines
1.6 KiB
Python

from utils.llm import query_llama
async def analyze_sentiment(messages: list) -> tuple[str, float]:
"""
Analyze the sentiment of a conversation using llama.cpp
Returns a tuple of (sentiment description, positivity score from 0-1)
"""
# Combine the last few messages for context (up to 5)
messages_to_analyze = messages[-5:] if len(messages) > 5 else messages
conversation_text = "\n".join([
f"{'Bot' if msg['is_bot_message'] else 'User'}: {msg['content']}"
for msg in messages_to_analyze
])
prompt = f"""Analyze the sentiment and tone of this conversation snippet between a user and a bot.
Focus on the overall mood, engagement level, and whether the interaction seems positive/neutral/negative.
Give a brief 1-2 sentence summary and a positivity score from 0-1 where:
0.0-0.3 = Negative/Hostile
0.3-0.7 = Neutral/Mixed
0.7-1.0 = Positive/Friendly
Conversation:
{conversation_text}
Format your response exactly like this example:
Summary: The conversation is friendly and engaging with good back-and-forth.
Score: 0.85
Response:"""
try:
response = await query_llama(prompt)
if not response or 'Score:' not in response:
return "Could not analyze sentiment", 0.5
# Parse the response
lines = response.strip().split('\n')
summary = lines[0].replace('Summary:', '').strip()
score = float(lines[1].replace('Score:', '').strip())
return summary, score
except Exception as e:
print(f"Error in sentiment analysis: {e}")
return "Error analyzing sentiment", 0.5