Remove all Ollama remnants and complete migration to llama.cpp

- Remove Ollama-specific files (Dockerfile.ollama, entrypoint.sh)
- Replace all query_ollama imports and calls with query_llama
- Remove langchain-ollama dependency from requirements.txt
- Update all utility files (autonomous, kindness, image_generation, etc.)
- Update README.md documentation references
- Maintain backward compatibility alias in llm.py
This commit is contained in:
2025-12-07 17:50:08 +02:00
parent a6da4c0c2e
commit d58be3b33e
15 changed files with 39 additions and 286 deletions

View File

@@ -2,7 +2,7 @@
import random
import globals
from utils.llm import query_ollama # Adjust path as needed
from utils.llm import query_llama # Adjust path as needed
async def detect_and_react_to_kindness(message, after_reply=False, server_context=None):
@@ -37,7 +37,7 @@ async def detect_and_react_to_kindness(message, after_reply=False, server_contex
"Answer with 'yes' or 'no' only.\n\n"
f"Message: \"{message.content}\""
)
result = await query_ollama(prompt, user_id="kindness-check", guild_id=None, response_type="dm_response")
result = await query_llama(prompt, user_id="kindness-check", guild_id=None, response_type="dm_response")
if result.strip().lower().startswith("yes"):
await message.add_reaction(emoji)