Remove all Ollama remnants and complete migration to llama.cpp
- Remove Ollama-specific files (Dockerfile.ollama, entrypoint.sh) - Replace all query_ollama imports and calls with query_llama - Remove langchain-ollama dependency from requirements.txt - Update all utility files (autonomous, kindness, image_generation, etc.) - Update README.md documentation references - Maintain backward compatibility alias in llm.py
This commit is contained in:
@@ -2,7 +2,7 @@
|
||||
|
||||
import random
|
||||
import globals
|
||||
from utils.llm import query_ollama # Adjust path as needed
|
||||
from utils.llm import query_llama # Adjust path as needed
|
||||
|
||||
|
||||
async def detect_and_react_to_kindness(message, after_reply=False, server_context=None):
|
||||
@@ -37,7 +37,7 @@ async def detect_and_react_to_kindness(message, after_reply=False, server_contex
|
||||
"Answer with 'yes' or 'no' only.\n\n"
|
||||
f"Message: \"{message.content}\""
|
||||
)
|
||||
result = await query_ollama(prompt, user_id="kindness-check", guild_id=None, response_type="dm_response")
|
||||
result = await query_llama(prompt, user_id="kindness-check", guild_id=None, response_type="dm_response")
|
||||
|
||||
if result.strip().lower().startswith("yes"):
|
||||
await message.add_reaction(emoji)
|
||||
|
||||
Reference in New Issue
Block a user