Remove all Ollama remnants and complete migration to llama.cpp
- Remove Ollama-specific files (Dockerfile.ollama, entrypoint.sh) - Replace all query_ollama imports and calls with query_llama - Remove langchain-ollama dependency from requirements.txt - Update all utility files (autonomous, kindness, image_generation, etc.) - Update README.md documentation references - Maintain backward compatibility alias in llm.py
This commit is contained in:
@@ -11,7 +11,7 @@ from discord import TextChannel
|
||||
from difflib import SequenceMatcher
|
||||
import globals
|
||||
from server_manager import server_manager
|
||||
from utils.llm import query_ollama
|
||||
from utils.llm import query_llama
|
||||
from utils.moods import MOOD_EMOJIS
|
||||
from utils.twitter_fetcher import fetch_miku_tweets
|
||||
from utils.image_handling import (
|
||||
@@ -107,7 +107,7 @@ async def miku_say_something_general_for_server(guild_id: int):
|
||||
for attempt in range(3): # retry up to 3 times if message is too similar
|
||||
# Use consistent user_id per guild for autonomous actions to enable conversation history
|
||||
# and prompt caching, rather than creating new IDs with timestamps
|
||||
message = await query_ollama(prompt, user_id=f"miku-autonomous-{guild_id}", guild_id=guild_id, response_type="autonomous_general")
|
||||
message = await query_llama(prompt, user_id=f"miku-autonomous-{guild_id}", guild_id=guild_id, response_type="autonomous_general")
|
||||
if not is_too_similar(message, _server_autonomous_messages[guild_id]):
|
||||
break
|
||||
print("🔁 Response was too similar to past messages, retrying...")
|
||||
@@ -202,7 +202,7 @@ async def miku_engage_random_user_for_server(guild_id: int):
|
||||
|
||||
try:
|
||||
# Use consistent user_id for engaging users to enable conversation history
|
||||
message = await query_ollama(prompt, user_id=f"miku-engage-{guild_id}", guild_id=guild_id)
|
||||
message = await query_llama(prompt, user_id=f"miku-engage-{guild_id}", guild_id=guild_id)
|
||||
await channel.send(f"{target.mention} {message}")
|
||||
_server_user_engagements[guild_id][target.id] = time.time()
|
||||
print(f"👤 Miku engaged {display_name} in server {server_config.guild_name}")
|
||||
@@ -263,7 +263,7 @@ async def miku_detect_and_join_conversation_for_server(guild_id: int):
|
||||
|
||||
try:
|
||||
# Use consistent user_id for joining conversations to enable conversation history
|
||||
reply = await query_ollama(prompt, user_id=f"miku-conversation-{guild_id}", guild_id=guild_id, response_type="conversation_join")
|
||||
reply = await query_llama(prompt, user_id=f"miku-conversation-{guild_id}", guild_id=guild_id, response_type="conversation_join")
|
||||
await channel.send(reply)
|
||||
print(f"💬 Miku joined an ongoing conversation in server {server_config.guild_name}")
|
||||
except Exception as e:
|
||||
@@ -309,7 +309,7 @@ async def share_miku_tweet_for_server(guild_id: int):
|
||||
img_desc = await analyze_image_with_qwen(base64_img)
|
||||
base_prompt += f"\n\nThe image looks like this: {img_desc}"
|
||||
|
||||
miku_comment = await query_ollama(base_prompt, user_id=f"autonomous-{guild_id}", guild_id=guild_id, response_type="autonomous_tweet")
|
||||
miku_comment = await query_llama(base_prompt, user_id=f"autonomous-{guild_id}", guild_id=guild_id, response_type="autonomous_tweet")
|
||||
|
||||
# Post to Discord (convert to fxtwitter for better embeds)
|
||||
fx_tweet_url = tweet['url'].replace("twitter.com", "fxtwitter.com").replace("x.com", "fxtwitter.com")
|
||||
@@ -342,7 +342,7 @@ async def handle_custom_prompt_for_server(guild_id: int, user_prompt: str):
|
||||
|
||||
try:
|
||||
# Use consistent user_id for manual prompts to enable conversation history
|
||||
message = await query_ollama(prompt, user_id=f"miku-manual-{guild_id}", guild_id=guild_id, response_type="autonomous_general")
|
||||
message = await query_llama(prompt, user_id=f"miku-manual-{guild_id}", guild_id=guild_id, response_type="autonomous_general")
|
||||
await channel.send(message)
|
||||
print(f"🎤 Miku responded to custom prompt in server {server_config.guild_name}")
|
||||
|
||||
@@ -585,7 +585,7 @@ async def miku_autonomous_reaction_for_server(guild_id: int, force_message=None,
|
||||
f"Be bold! Use uncommon emojis! Respond with ONLY the emoji character itself, no text."
|
||||
)
|
||||
|
||||
emoji = await query_ollama(
|
||||
emoji = await query_llama(
|
||||
prompt,
|
||||
user_id=f"miku-reaction-{guild_id}", # Use consistent user_id
|
||||
guild_id=guild_id,
|
||||
@@ -750,7 +750,7 @@ async def miku_autonomous_reaction_for_dm(user_id: int, force_message=None):
|
||||
f"Be bold! Use uncommon emojis! Respond with ONLY the emoji character itself, no text."
|
||||
)
|
||||
|
||||
emoji = await query_ollama(
|
||||
emoji = await query_llama(
|
||||
prompt,
|
||||
user_id=f"miku-dm-reaction-{user_id}", # Use consistent user_id per DM user
|
||||
guild_id=None, # DM doesn't have guild
|
||||
|
||||
Reference in New Issue
Block a user