Skip to content
Snippets Groups Projects

Integrate LangChain and Save Chat History

Merged Clemens Pohle requested to merge 11-langchain into Dev
6 files
+ 33
38
Compare changes
  • Side-by-side
  • Inline
Files
6
# ollama_views.py
from django.http import JsonResponse
import requests
import json
import os
from django.views.decorators.csrf import csrf_exempt
from langchain_core.messages import HumanMessage
from langchain_community.chat_models import ChatOllama
from langchain_community.chat_message_histories import ChatMessageHistory
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.runnables.history import RunnableWithMessageHistory
OLLAMA_URL = os.environ.get("OLLAMA_ENDPOINT") + '/api/chat'
OLLAMA_ENDPOINT = os.environ.get("OLLAMA_ENDPOINT")
OLLAMA_MODEL = os.environ.get("OLLAMA_MODEL")
store = {}
def get_session_history(session_id: str) -> BaseChatMessageHistory:
if session_id not in store:
store[session_id] = ChatMessageHistory()
return store[session_id]
chat = ChatOllama(base_url=OLLAMA_ENDPOINT, model=OLLAMA_MODEL)
with_message_history = RunnableWithMessageHistory(chat, get_session_history)
@csrf_exempt
def ollama_query(request):
@@ -21,41 +35,15 @@ def ollama_query(request):
if not message:
return JsonResponse({"error": "No message provided"}, status=400)
# Prepare the payload for Ollama API
payload = {
"model": "phi3:mini",
"messages": [
{ "role": "user", "content": message }
]
}
headers = {'Content-Type': 'application/json'}
# Send the request to Ollama API
response = requests.post(OLLAMA_URL, json=payload, headers=headers)
# Log the request and response for debugging
print(f"Request to Ollama: {payload}")
print(f"Response status code: {response.status_code}")
print(f"Response content: {response.content}")
# Check for successful response
if response.status_code != 200:
return JsonResponse({"error": "Failed to fetch response from Ollama"}, status=response.status_code)
# Parse the response from Ollama
response_content = response.content.decode('utf-8')
# Log the parsed response
print(f"Parsed response content: {response_content}")
session_id = "TODO: include in request"
config = {"configurable": {"session_id": session_id}}
# Split the response into lines and process each line
complete_message = ""
for line in response_content.split('\n'):
if line.strip():
json_line = json.loads(line)
complete_message += json_line['message']['content']
for chunks in with_message_history.stream([HumanMessage(message)], config=config):
complete_message += chunks.content
print(chunks.content)
# TODO: Send each chunk to the frontend as it is received
return JsonResponse({"message": complete_message.strip()}, safe=False)
return JsonResponse({"message": complete_message}, safe=False)
except Exception as e:
return JsonResponse({"error": str(e)}, status=500)
Loading