Remove all Ollama remnants and complete migration to llama.cpp

- Remove Ollama-specific files (Dockerfile.ollama, entrypoint.sh)
- Replace all query_ollama imports and calls with query_llama
- Remove langchain-ollama dependency from requirements.txt
- Update all utility files (autonomous, kindness, image_generation, etc.)
- Update README.md documentation references
- Maintain backward compatibility alias in llm.py
This commit is contained in:
2025-12-07 17:50:08 +02:00
parent a6da4c0c2e
commit d58be3b33e
15 changed files with 39 additions and 286 deletions

View File

@@ -833,11 +833,11 @@ async def send_custom_prompt_dm(user_id: str, req: CustomPromptRequest):
return {"status": "error", "message": f"User {user_id} not found"}
# Use the LLM query function for DM context
from utils.llm import query_ollama
from utils.llm import query_llama
async def send_dm_custom_prompt():
try:
response = await query_ollama(req.prompt, user_id=user_id, guild_id=None, response_type="dm_response")
response = await query_llama(req.prompt, user_id=user_id, guild_id=None, response_type="dm_response")
await user.send(response)
print(f"✅ Custom DM prompt sent to user {user_id}: {req.prompt[:50]}...")