From 88467b74d64d255b3688b8658e4bf1db0e4d5921 Mon Sep 17 00:00:00 2001 From: Daniel O'Connell Date: Sun, 8 Dec 2024 22:25:58 +0100 Subject: [PATCH] better templates --- api/src/stampy_chat/chat.py | 45 +++----------------------------- api/src/stampy_chat/followups.py | 2 +- api/src/stampy_chat/settings.py | 22 +++++++++++----- 3 files changed, 20 insertions(+), 49 deletions(-) diff --git a/api/src/stampy_chat/chat.py b/api/src/stampy_chat/chat.py index b8e24e5..f7eabb7 100644 --- a/api/src/stampy_chat/chat.py +++ b/api/src/stampy_chat/chat.py @@ -223,34 +223,8 @@ def create_outputs(self, llm_result) -> List[Dict[str, Any]]: return [dict(self.inputs, **r) for r in result] -def make_history_summary(settings): - model = get_model( - streaming=False, - max_tokens=settings.maxHistorySummaryTokens, - model=settings.completions - ) - summary_prompt = PrefixedPrompt( - input_variables=['history'], - messages_field='history', - prompt=settings.history_summary_prompt, - transformer=ChatMessage, - ) - return LLMInputsChain( - llm=model, - verbose=False, - output_key='history_summary', - prompt=ModeratedChatPrompt.from_messages([ - summary_prompt, - ChatPromptTemplate.from_messages([ - HumanMessagePromptTemplate.from_template(template='Q: {query}', role='user'), - ]), - HumanMessage(content="Reply in one sentence only"), - ]), - ) - - def make_prompt(settings, chat_model, callbacks): - """Create a proper prompt object will all the nessesery steps.""" + """Create a proper prompt object with all the necessary steps.""" # 1. Create the context prompt from items fetched from pinecone context_template = "\n\n[{{reference}}] {{title}} {{authors | join(', ')}} - {{date_published}} {{text}}\n\n" context_prompt = MessageBufferPromptTemplate( @@ -269,8 +243,7 @@ def make_prompt(settings, chat_model, callbacks): [ HumanMessage(content=settings.question_prompt), HumanMessagePromptTemplate.from_template( - template='{history_summary}{delimiter}{query}', - partial_variables={"delimiter": lambda **kwargs: ": " if kwargs.get("history_summary") else ""} + template=f'{settings.question_marker}: {{query}}', ), ] ) @@ -344,16 +317,6 @@ def run_query(session_id: str, query: str, history: List[Dict], settings: Settin model=settings.completions ) - history_summary_chain = make_history_summary(settings) - - if history: - history_summary_result = history_summary_chain.invoke({"query": query, 'history': history}) - history_summary = history_summary_result.get('history_summary', '') - else: - history_summary = '' - - delimiter = ": " if history_summary else "" - llm_chain = LLMChain( llm=chat_model, verbose=False, @@ -361,15 +324,13 @@ def run_query(session_id: str, query: str, history: List[Dict], settings: Settin memory=make_memory(settings, history, callbacks) ) - chain = history_summary_chain | llm_chain + chain = llm_chain if followups: chain = chain | StampyChain(callbacks=callbacks) chain_input = { "query": query, 'history': history, - 'history_summary': history_summary, - 'delimiter': delimiter, } result = chain.invoke(chain_input) diff --git a/api/src/stampy_chat/followups.py b/api/src/stampy_chat/followups.py index 931a2e3..d169fb9 100644 --- a/api/src/stampy_chat/followups.py +++ b/api/src/stampy_chat/followups.py @@ -76,7 +76,7 @@ class Config: @property def input_keys(self) -> List[str]: - return ['query', 'text', 'history_summary'] + return ['query', 'text'] @property def output_keys(self) -> List[str]: diff --git a/api/src/stampy_chat/settings.py b/api/src/stampy_chat/settings.py index 195a014..6f1de6a 100644 --- a/api/src/stampy_chat/settings.py +++ b/api/src/stampy_chat/settings.py @@ -9,28 +9,33 @@ SOURCE_PROMPT = ( "You are a helpful assistant knowledgeable about AI Alignment and Safety. " - "Please give a clear and coherent answer to the user's questions. (written after \"Q:\") " + "Please give a clear and coherent answer to the user's questions. (written after \"Question:\") " "using the following sources. Each source is labeled with a number. Feel free to " "use the sources in any order, and try to reference up to 8 sources in your answers.\n\n" + "# Sources\n" ) HISTORY_PROMPT = ( "\n\n" - "Before the question (\"Q: \"), there will be a history of previous questions and answers. " + "# History:\n\n" + "Before the question (\"Question:\"), there will be a history of previous questions and answers. " "These sources only apply to the last question. Any sources used in previous answers " "are invalid." ) HISTORY_SUMMARIZE_PROMPT = ( "You are a helpful assistant knowledgeable about AI Alignment and Safety. " - "Please summarize the following chat history (written after \"H:\") in one " - "sentence so as to put the current questions (written after \"Q:\") in context. " + "Please summarize the following chat history (written after \"History:\") in one " + "sentence so as to put the current questions (written after \"Question:\") in context. " "Please keep things as terse as possible." - "\nH:" + "\nHistory:" ) QUESTION_PROMPT = ( + "# Question context:\n\n" "In your answer, please cite any claims you make back to each source " "using the format: [1], [2], etc. If you use multiple sources to make a claim " - "cite all of them. For example: \"AGI is concerning [1, 3, 8].\"\n\n" + "cite all of them. For example: \"AGI is concerning [1, 3, 8].\"\n" + "Don't explicitly mention the sources unless it impacts the flow of your answer - just cite " + "them. Don't repeat the question in your answer. \n\n" ) PROMPT_MODES = { 'default': "", @@ -57,6 +62,7 @@ 'history_summary': HISTORY_SUMMARIZE_PROMPT, 'question': QUESTION_PROMPT, 'modes': PROMPT_MODES, + "question_marker": "Question:", } OPENAI = 'openai' ANTHROPIC = 'anthropic' @@ -189,6 +195,10 @@ def mode_prompt(self): def question_prompt(self): return self.prompts['question'] + self.mode_prompt + @property + def question_marker(self): + return self.prompts['question_marker'] + @property def context_tokens(self): """The max number of tokens to be used for the context"""