-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathutils.py
25 lines (22 loc) · 909 Bytes
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
# from langchain.llms import OpenAI
from langchain_community.llms import OpenAI
from langchain.chains import ConversationChain
from langchain.chains.conversation.memory import ConversationSummaryMemory
import streamlit as st
import os
os.environ.get("OPENAI_API_KEY")
def get_response(userInput):
if st.session_state['conversation'] is None:
llm = OpenAI(
temperature=0,
model_name='gpt-3.5-turbo-instruct' # 'text-davinci-003' model is depreciated now
)
# same as creating the conversation var. but in the state
st.session_state['conversation'] = ConversationChain(
llm=llm,
# verbose=True,
memory=ConversationSummaryMemory(llm=llm)
)
response = st.session_state['conversation'].predict(input=userInput)
# print(st.session_state['conversation'].memory.buffer) the summary
return response