Remove all Ollama remnants and complete migration to llama.cpp

- Remove Ollama-specific files (Dockerfile.ollama, entrypoint.sh)
- Replace all query_ollama imports and calls with query_llama
- Remove langchain-ollama dependency from requirements.txt
- Update all utility files (autonomous, kindness, image_generation, etc.)
- Update README.md documentation references
- Maintain backward compatibility alias in llm.py
This commit is contained in:
2025-12-07 17:50:08 +02:00
parent a6da4c0c2e
commit d58be3b33e
15 changed files with 39 additions and 286 deletions

View File

@@ -13,7 +13,7 @@ import tempfile
import time
from typing import Optional, Tuple
import globals
from utils.llm import query_ollama
from utils.llm import query_llama
# Image generation detection patterns
IMAGE_REQUEST_PATTERNS = [
@@ -299,7 +299,7 @@ async def handle_image_generation_request(message, prompt: str) -> bool:
response_prompt = f"A user asked you to create an image with this description: '{prompt}'. Respond enthusiastically that you're creating this image for them. Keep it short and excited!"
response_type = "dm_response" if is_dm else "server_response"
initial_response = await query_ollama(response_prompt, user_id=user_id, guild_id=guild_id, response_type=response_type)
initial_response = await query_llama(response_prompt, user_id=user_id, guild_id=guild_id, response_type=response_type)
# Send initial response
initial_msg = await message.channel.send(initial_response)
@@ -318,7 +318,7 @@ async def handle_image_generation_request(message, prompt: str) -> bool:
# Create a follow-up message about the completed image
completion_prompt = f"You just finished creating an image based on '{prompt}'. Make a short, excited comment about the completed artwork!"
completion_response = await query_ollama(completion_prompt, user_id=user_id, guild_id=guild_id, response_type=response_type)
completion_response = await query_llama(completion_prompt, user_id=user_id, guild_id=guild_id, response_type=response_type)
await message.channel.send(completion_response, file=file)
@@ -333,7 +333,7 @@ async def handle_image_generation_request(message, prompt: str) -> bool:
else:
# Image generation failed
error_prompt = "You tried to create an image but something went wrong with the generation process. Apologize briefly and suggest they try again later."
error_response = await query_ollama(error_prompt, user_id=user_id, guild_id=guild_id, response_type=response_type)
error_response = await query_llama(error_prompt, user_id=user_id, guild_id=guild_id, response_type=response_type)
await message.channel.send(error_response)
print(f"❌ Image generation failed for prompt: {prompt}")