Skip to content

Commit

Permalink
better templates
Browse files Browse the repository at this point in the history
  • Loading branch information
mruwnik committed Dec 8, 2024
1 parent 14af5c8 commit 88467b7
Show file tree
Hide file tree
Showing 3 changed files with 20 additions and 49 deletions.
45 changes: 3 additions & 42 deletions api/src/stampy_chat/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,34 +223,8 @@ def create_outputs(self, llm_result) -> List[Dict[str, Any]]:
return [dict(self.inputs, **r) for r in result]


def make_history_summary(settings):
model = get_model(
streaming=False,
max_tokens=settings.maxHistorySummaryTokens,
model=settings.completions
)
summary_prompt = PrefixedPrompt(
input_variables=['history'],
messages_field='history',
prompt=settings.history_summary_prompt,
transformer=ChatMessage,
)
return LLMInputsChain(
llm=model,
verbose=False,
output_key='history_summary',
prompt=ModeratedChatPrompt.from_messages([
summary_prompt,
ChatPromptTemplate.from_messages([
HumanMessagePromptTemplate.from_template(template='Q: {query}', role='user'),
]),
HumanMessage(content="Reply in one sentence only"),
]),
)


def make_prompt(settings, chat_model, callbacks):
"""Create a proper prompt object will all the nessesery steps."""
"""Create a proper prompt object with all the necessary steps."""
# 1. Create the context prompt from items fetched from pinecone
context_template = "\n\n[{{reference}}] {{title}} {{authors | join(', ')}} - {{date_published}} {{text}}\n\n"
context_prompt = MessageBufferPromptTemplate(
Expand All @@ -269,8 +243,7 @@ def make_prompt(settings, chat_model, callbacks):
[
HumanMessage(content=settings.question_prompt),
HumanMessagePromptTemplate.from_template(
template='{history_summary}{delimiter}{query}',
partial_variables={"delimiter": lambda **kwargs: ": " if kwargs.get("history_summary") else ""}
template=f'{settings.question_marker}: {{query}}',
),
]
)
Expand Down Expand Up @@ -344,32 +317,20 @@ def run_query(session_id: str, query: str, history: List[Dict], settings: Settin
model=settings.completions
)

history_summary_chain = make_history_summary(settings)

if history:
history_summary_result = history_summary_chain.invoke({"query": query, 'history': history})
history_summary = history_summary_result.get('history_summary', '')
else:
history_summary = ''

delimiter = ": " if history_summary else ""

llm_chain = LLMChain(
llm=chat_model,
verbose=False,
prompt=make_prompt(settings, chat_model, callbacks),
memory=make_memory(settings, history, callbacks)
)

chain = history_summary_chain | llm_chain
chain = llm_chain
if followups:
chain = chain | StampyChain(callbacks=callbacks)

chain_input = {
"query": query,
'history': history,
'history_summary': history_summary,
'delimiter': delimiter,
}

result = chain.invoke(chain_input)
Expand Down
2 changes: 1 addition & 1 deletion api/src/stampy_chat/followups.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ class Config:

@property
def input_keys(self) -> List[str]:
return ['query', 'text', 'history_summary']
return ['query', 'text']

@property
def output_keys(self) -> List[str]:
Expand Down
22 changes: 16 additions & 6 deletions api/src/stampy_chat/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,28 +9,33 @@

SOURCE_PROMPT = (
"You are a helpful assistant knowledgeable about AI Alignment and Safety. "
"Please give a clear and coherent answer to the user's questions. (written after \"Q:\") "
"Please give a clear and coherent answer to the user's questions. (written after \"Question:\") "
"using the following sources. Each source is labeled with a number. Feel free to "
"use the sources in any order, and try to reference up to 8 sources in your answers.\n\n"
"# Sources\n"
)
HISTORY_PROMPT = (
"\n\n"
"Before the question (\"Q: \"), there will be a history of previous questions and answers. "
"# History:\n\n"
"Before the question (\"Question:\"), there will be a history of previous questions and answers. "
"These sources only apply to the last question. Any sources used in previous answers "
"are invalid."
)
HISTORY_SUMMARIZE_PROMPT = (
"You are a helpful assistant knowledgeable about AI Alignment and Safety. "
"Please summarize the following chat history (written after \"H:\") in one "
"sentence so as to put the current questions (written after \"Q:\") in context. "
"Please summarize the following chat history (written after \"History:\") in one "
"sentence so as to put the current questions (written after \"Question:\") in context. "
"Please keep things as terse as possible."
"\nH:"
"\nHistory:"
)

QUESTION_PROMPT = (
"# Question context:\n\n"
"In your answer, please cite any claims you make back to each source "
"using the format: [1], [2], etc. If you use multiple sources to make a claim "
"cite all of them. For example: \"AGI is concerning [1, 3, 8].\"\n\n"
"cite all of them. For example: \"AGI is concerning [1, 3, 8].\"\n"
"Don't explicitly mention the sources unless it impacts the flow of your answer - just cite "
"them. Don't repeat the question in your answer. \n\n"
)
PROMPT_MODES = {
'default': "",
Expand All @@ -57,6 +62,7 @@
'history_summary': HISTORY_SUMMARIZE_PROMPT,
'question': QUESTION_PROMPT,
'modes': PROMPT_MODES,
"question_marker": "Question:",
}
OPENAI = 'openai'
ANTHROPIC = 'anthropic'
Expand Down Expand Up @@ -189,6 +195,10 @@ def mode_prompt(self):
def question_prompt(self):
return self.prompts['question'] + self.mode_prompt

@property
def question_marker(self):
return self.prompts['question_marker']

@property
def context_tokens(self):
"""The max number of tokens to be used for the context"""
Expand Down

0 comments on commit 88467b7

Please sign in to comment.