Remove all Ollama remnants and complete migration to llama.cpp

- Remove Ollama-specific files (Dockerfile.ollama, entrypoint.sh)
- Replace all query_ollama imports and calls with query_llama
- Remove langchain-ollama dependency from requirements.txt
- Update all utility files (autonomous, kindness, image_generation, etc.)
- Update README.md documentation references
- Maintain backward compatibility alias in llm.py
This commit is contained in:
2025-12-07 17:50:08 +02:00
parent a6da4c0c2e
commit d58be3b33e
15 changed files with 39 additions and 286 deletions

View File

@@ -34,7 +34,7 @@ from utils.moods import (
from utils.media import(
overlay_username_with_ffmpeg
)
from utils.llm import query_ollama
from utils.llm import query_llama
from utils.autonomous import (
setup_autonomous_speaking,
load_last_sent_tweets,
@@ -100,7 +100,7 @@ async def on_ready():
# Start server-specific schedulers (includes DM mood rotation)
server_manager.start_all_schedulers(globals.client)
https://tea.koko210cloud.xyz/Koko210/miku-discord
# Start the global scheduler for other tasks
globals.scheduler.start()
@@ -367,7 +367,8 @@ async def on_message(message):
print(f"✅ Image downloaded, analyzing with vision model...")
# Analyze image
qwen_description = await analyze_image_with_qwen(base64_img)
truncated = (qwen_description[:50] + "...") if len(qwen_description) > 50 else qwen_description
truncated = (qwen_description[:50] + "...")
if not base64_img:if len(qwen_description) > 50 else qwen_description
print(f"📝 Vision analysis result: {truncated}")
if qwen_description and qwen_description.strip():
embed_context_parts.append(f"[Embedded image shows: {qwen_description}]")
@@ -413,7 +414,7 @@ async def on_message(message):
response_type = "dm_response" if is_dm else "server_response"
author_name = message.author.display_name
response = await query_ollama(
response = await query_llama(
enhanced_prompt,
user_id=str(message.author.id),
guild_id=guild_id,
@@ -454,7 +455,7 @@ async def on_message(message):
guild_id = message.guild.id if message.guild else None
response_type = "dm_response" if is_dm else "server_response"
author_name = message.author.display_name
response = await query_ollama(
response = await query_llama(
prompt,
user_id=str(message.author.id),
guild_id=guild_id,