Remove all Ollama remnants and complete migration to llama.cpp

- Remove Ollama-specific files (Dockerfile.ollama, entrypoint.sh)
- Replace all query_ollama imports and calls with query_llama
- Remove langchain-ollama dependency from requirements.txt
- Update all utility files (autonomous, kindness, image_generation, etc.)
- Update README.md documentation references
- Maintain backward compatibility alias in llm.py
This commit is contained in:
2025-12-07 17:50:08 +02:00
parent a6da4c0c2e
commit d58be3b33e
15 changed files with 39 additions and 286 deletions

View File

@@ -9,7 +9,7 @@ import globals
from utils.twitter_fetcher import fetch_figurine_tweets_latest
from utils.image_handling import analyze_image_with_qwen, download_and_encode_image
from utils.llm import query_ollama
from utils.llm import query_llama
from utils.dm_logger import dm_logger
@@ -165,7 +165,7 @@ async def send_figurine_dm_to_user(client: discord.Client, user_id: int, tweet:
base_prompt += "\n\nSign off as Miku with a cute emoji."
# Query LLM in DM context (no guild_id -> DM mood rules apply)
miku_comment = await query_ollama(base_prompt, user_id=f"figurine_dm_{user_id}", guild_id=None, response_type="dm_response")
miku_comment = await query_llama(base_prompt, user_id=f"figurine_dm_{user_id}", guild_id=None, response_type="dm_response")
dm = await user.create_dm()
tweet_url = tweet.get("url", "")