Skip to content

Commit

Permalink
Dedicate one set of chat handlers per room (#9)
Browse files Browse the repository at this point in the history
* create new set of chat handlers per room

* make YChat an instance attribute on BaseChatHandler

* revert changes to chat handlers

* pre-commit

* use room_id local var

Co-authored-by: Nicolas Brichet <[email protected]>

---------

Co-authored-by: Nicolas Brichet <[email protected]>
  • Loading branch information
dlqqq and brichet authored Dec 5, 2024
1 parent 7498eb5 commit c1b3d5d
Show file tree
Hide file tree
Showing 13 changed files with 210 additions and 202 deletions.
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
from jupyter_ai.chat_handlers.base import BaseChatHandler, SlashCommandRoutingType
from jupyter_ai.models import HumanChatMessage
from jupyterlab_chat.ychat import YChat


class TestSlashCommand(BaseChatHandler):
Expand All @@ -26,5 +25,5 @@ class TestSlashCommand(BaseChatHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)

async def process_message(self, message: HumanChatMessage, chat: YChat):
self.reply("This is the `/test` slash command.", chat)
async def process_message(self, message: HumanChatMessage):
self.reply("This is the `/test` slash command.")
15 changes: 7 additions & 8 deletions packages/jupyter-ai/jupyter_ai/chat_handlers/ask.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
import argparse
from typing import Dict, Optional, Type
from typing import Dict, Type

from jupyter_ai.models import HumanChatMessage
from jupyter_ai_magics.providers import BaseProvider
from jupyterlab_chat.ychat import YChat
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferWindowMemory
from langchain_core.prompts import PromptTemplate
Expand Down Expand Up @@ -60,32 +59,32 @@ def create_llm_chain(
verbose=False,
)

async def process_message(self, message: HumanChatMessage, chat: Optional[YChat]):
args = self.parse_args(message, chat)
async def process_message(self, message: HumanChatMessage):
args = self.parse_args(message)
if args is None:
return
query = " ".join(args.query)
if not query:
self.reply(f"{self.parser.format_usage()}", chat, message)
self.reply(f"{self.parser.format_usage()}", message)
return

self.get_llm_chain()

try:
with self.pending("Searching learned documents", message, chat=chat):
with self.pending("Searching learned documents", message):
assert self.llm_chain
# TODO: migrate this class to use a LCEL `Runnable` instead of
# `Chain`, then remove the below ignore comment.
result = await self.llm_chain.acall( # type:ignore[attr-defined]
{"question": query}
)
response = result["answer"]
self.reply(response, chat, message)
self.reply(response, message)
except AssertionError as e:
self.log.error(e)
response = """Sorry, an error occurred while reading the from the learned documents.
If you have changed the embedding provider, try deleting the existing index by running
`/learn -d` command and then re-submitting the `learn <directory>` to learn the documents,
and then asking the question again.
"""
self.reply(response, chat, message)
self.reply(response, message)
Loading

0 comments on commit c1b3d5d

Please sign in to comment.