-
Notifications
You must be signed in to change notification settings - Fork 66
/
app.py
87 lines (58 loc) · 2.4 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import streamlit as st
from langchain.document_loaders.csv_loader import CSVLoader
from langchain.vectorstores import FAISS
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.prompts import PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from dotenv import load_dotenv
load_dotenv()
# 1. Vectorise the sales response csv data
loader = CSVLoader(file_path="sales_response.csv")
documents = loader.load()
embeddings = OpenAIEmbeddings()
db = FAISS.from_documents(documents, embeddings)
# 2. Function for similarity search
def retrieve_info(query):
similar_response = db.similarity_search(query, k=3)
page_contents_array = [doc.page_content for doc in similar_response]
# print(page_contents_array)
return page_contents_array
# 3. Setup LLMChain & prompts
llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-16k-0613")
template = """
You are a world class business development representative.
I will share a prospect's message with you and you will give me the best answer that
I should send to this prospect based on past best practies,
and you will follow ALL of the rules below:
1/ Response should be very similar or even identical to the past best practies,
in terms of length, ton of voice, logical arguments and other details
2/ If the best practice are irrelevant, then try to mimic the style of the best practice to prospect's message
Below is a message I received from the prospect:
{message}
Here is a list of best practies of how we normally respond to prospect in similar scenarios:
{best_practice}
Please write the best response that I should send to this prospect:
"""
prompt = PromptTemplate(
input_variables=["message", "best_practice"],
template=template
)
chain = LLMChain(llm=llm, prompt=prompt)
# 4. Retrieval augmented generation
def generate_response(message):
best_practice = retrieve_info(message)
response = chain.run(message=message, best_practice=best_practice)
return response
# 5. Build an app with streamlit
def main():
st.set_page_config(
page_title="Customer response generator", page_icon=":bird:")
st.header("Customer response generator :bird:")
message = st.text_area("customer message")
if message:
st.write("Generating best practice message...")
result = generate_response(message)
st.info(result)
if __name__ == '__main__':
main()