Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
A
ArtGuide
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Requirements
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Locked files
Build
Pipelines
Jobs
Pipeline schedules
Test cases
Artifacts
Deploy
Releases
Package registry
Container registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Code review analytics
Issue analytics
Insights
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Quentin Ulmer
ArtGuide
Merge requests
!10
Integrate LangChain and Save Chat History
Code
Review changes
Check out branch
Download
Patches
Plain diff
Merged
Integrate LangChain and Save Chat History
11-langchain
into
Dev
Overview
0
Commits
5
Pipelines
0
Changes
6
Merged
Clemens Pohle
requested to merge
11-langchain
into
Dev
11 months ago
Overview
0
Commits
5
Pipelines
0
Changes
6
Expand
Closes
#11 (closed)
0
0
Merge request reports
Compare
Dev
Dev (base)
and
latest version
latest version
53e2318f
5 commits,
11 months ago
6 files
+
33
−
38
Inline
Compare changes
Side-by-side
Inline
Show whitespace changes
Show one file at a time
Files
6
Search (e.g. *.vue) (Ctrl+P)
backend/artguide/ollama_views.py
+
23
−
35
Options
# ollama_views.py
from
django.http
import
JsonResponse
import
requests
import
json
import
os
from
django.views.decorators.csrf
import
csrf_exempt
from
langchain_core.messages
import
HumanMessage
from
langchain_community.chat_models
import
ChatOllama
from
langchain_community.chat_message_histories
import
ChatMessageHistory
from
langchain_core.chat_history
import
BaseChatMessageHistory
from
langchain_core.runnables.history
import
RunnableWithMessageHistory
OLLAMA_URL
=
os
.
environ
.
get
(
"
OLLAMA_ENDPOINT
"
)
+
'
/api/chat
'
OLLAMA_ENDPOINT
=
os
.
environ
.
get
(
"
OLLAMA_ENDPOINT
"
)
OLLAMA_MODEL
=
os
.
environ
.
get
(
"
OLLAMA_MODEL
"
)
store
=
{}
def
get_session_history
(
session_id
:
str
)
->
BaseChatMessageHistory
:
if
session_id
not
in
store
:
store
[
session_id
]
=
ChatMessageHistory
()
return
store
[
session_id
]
chat
=
ChatOllama
(
base_url
=
OLLAMA_ENDPOINT
,
model
=
OLLAMA_MODEL
)
with_message_history
=
RunnableWithMessageHistory
(
chat
,
get_session_history
)
@csrf_exempt
def
ollama_query
(
request
):
@@ -21,41 +35,15 @@ def ollama_query(request):
if
not
message
:
return
JsonResponse
({
"
error
"
:
"
No message provided
"
},
status
=
400
)
# Prepare the payload for Ollama API
payload
=
{
"
model
"
:
"
phi3:mini
"
,
"
messages
"
:
[
{
"
role
"
:
"
user
"
,
"
content
"
:
message
}
]
}
headers
=
{
'
Content-Type
'
:
'
application/json
'
}
# Send the request to Ollama API
response
=
requests
.
post
(
OLLAMA_URL
,
json
=
payload
,
headers
=
headers
)
# Log the request and response for debugging
print
(
f
"
Request to Ollama:
{
payload
}
"
)
print
(
f
"
Response status code:
{
response
.
status_code
}
"
)
print
(
f
"
Response content:
{
response
.
content
}
"
)
# Check for successful response
if
response
.
status_code
!=
200
:
return
JsonResponse
({
"
error
"
:
"
Failed to fetch response from Ollama
"
},
status
=
response
.
status_code
)
# Parse the response from Ollama
response_content
=
response
.
content
.
decode
(
'
utf-8
'
)
# Log the parsed response
print
(
f
"
Parsed response content:
{
response_content
}
"
)
session_id
=
"
TODO: include in request
"
config
=
{
"
configurable
"
:
{
"
session_id
"
:
session_id
}}
# Split the response into lines and process each line
complete_message
=
""
for
line
in
response_content
.
split
(
'
\n
'
):
if
line
.
strip
():
json_line
=
json
.
loads
(
line
)
complete_message
+=
json_line
[
'
message
'
][
'
content
'
]
for
chunks
in
with_message_history
.
stream
([
HumanMessage
(
message
)],
config
=
config
):
complete_message
+=
chunks
.
content
print
(
chunks
.
content
)
# TODO: Send each chunk to the frontend as it is received
return
JsonResponse
({
"
message
"
:
complete_message
.
strip
()
},
safe
=
False
)
return
JsonResponse
({
"
message
"
:
complete_message
},
safe
=
False
)
except
Exception
as
e
:
return
JsonResponse
({
"
error
"
:
str
(
e
)},
status
=
500
)
Loading