diff --git a/backend/artguide/ollama_views.py b/backend/artguide/ollama_views.py
index 236cf8fced1df11954f011895019bc9a94882ef7..4f1197719554c008e27cab29e560b088fda92a83 100644
--- a/backend/artguide/ollama_views.py
+++ b/backend/artguide/ollama_views.py
@@ -1,12 +1,13 @@
 # ollama_views.py
 
 from django.http import JsonResponse
-import requests
 import json
 import os
 from django.views.decorators.csrf import csrf_exempt
+from langchain_core.messages import HumanMessage
+from langchain_community.chat_models import ChatOllama
 
-OLLAMA_URL = os.environ.get("OLLAMA_ENDPOINT") + '/api/chat'
+OLLAMA_ENDPOINT = os.environ.get("OLLAMA_ENDPOINT")
 OLLAMA_MODEL = os.environ.get("OLLAMA_MODEL")
 
 @csrf_exempt
@@ -22,41 +23,13 @@ def ollama_query(request):
         if not message:
             return JsonResponse({"error": "No message provided"}, status=400)
 
-        # Prepare the payload for Ollama API
-        payload = {
-            "model": OLLAMA_MODEL,
-            "messages": [
-                { "role": "user", "content": message }
-            ]
-        }
-
-        headers = {'Content-Type': 'application/json'}
-
-        # Send the request to Ollama API
-        response = requests.post(OLLAMA_URL, json=payload, headers=headers)
-
-        # Log the request and response for debugging
-        print(f"Request to Ollama: {payload}")
-        print(f"Response status code: {response.status_code}")
-        print(f"Response content: {response.content}")
-
-        # Check for successful response
-        if response.status_code != 200:
-            return JsonResponse({"error": "Failed to fetch response from Ollama"}, status=response.status_code)
-
-        # Parse the response from Ollama
-        response_content = response.content.decode('utf-8')
-
-        # Log the parsed response
-        print(f"Parsed response content: {response_content}")
-
-        # Split the response into lines and process each line
+        chat = ChatOllama(base_url=OLLAMA_ENDPOINT, model=OLLAMA_MODEL)
         complete_message = ""
-        for line in response_content.split('\n'):
-            if line.strip():
-                json_line = json.loads(line)
-                complete_message += json_line['message']['content']
+        for chunks in chat.stream([HumanMessage(message)]):
+            complete_message += chunks.content
+            print(chunks.content)
+            # TODO: Send each chunk to the frontend as it is received
 
-        return JsonResponse({"message": complete_message.strip()}, safe=False)
+        return JsonResponse({"message": complete_message}, safe=False)
     except Exception as e:
         return JsonResponse({"error": str(e)}, status=500)
diff --git a/backend/requirements.txt b/backend/requirements.txt
index b69f5d2baa819854cb873eabc07bf525dc8b4732..32d925420e946170ad0bb5ad1055a4dbe70dec3f 100644
--- a/backend/requirements.txt
+++ b/backend/requirements.txt
@@ -1,4 +1,5 @@
 Django>=3.0,<4.0
 sparqlwrapper>=2.0.0
 django-cors-headers>=4.0.0
-requests
+langchain
+langchain_community