diff --git a/examples/example.py b/examples/example.py index 6214b265..165b78b7 100644 --- a/examples/example.py +++ b/examples/example.py @@ -20,19 +20,21 @@ async def main(): """Main""" talky = MyLLM() # asyncio.ensure_future(async_foo()) - - logger.info(await talky.chat("My name is Jack")) + chat = await talky.chat("My name is Jack") + logger.info(chat) # Hello Jack, it's nice to meet you! # I am an AI language model designed to provide helpful responses. # How can I help you today? time.sleep(10) - logger.info(await talky.chat("tell me who is president of the united states?")) + chat = await talky.chat("tell me who is president of the united states?") + logger.info(chat) # # As of my latest update, the current # # President of the United States is Joe Biden. # # He was inaugurated on January 20th, 2021 and # # is the 46th President of the United States. time.sleep(10) - logger.info(await talky.chat("what is my name")) + chat = await talky.chat("what is my name") + logger.info(chat) # Your name is Jack, as you mentioned earlier. diff --git a/myllm/default_settings.toml b/myllm/default_settings.toml index b2876ef7..e33d0799 100644 --- a/myllm/default_settings.toml +++ b/myllm/default_settings.toml @@ -26,10 +26,10 @@ llm_model = "gpt-3.5-turbo" # Refer to https://github.com/xtekky/gpt4free # for the list of supported provider llm_provider = "g4f.Provider.Bing" - +lag = 0 # Number of conversation history # between user and ai -max_memory = 5 +max_memory = 100 # help message listing the commands # available @@ -45,7 +45,7 @@ llm_ai_mode = false # template prompt context # not implemented llm_template = """ -test +You are a friendly AI, helping me with task """ # llm_template = """ # You operate within the following constraints: diff --git a/myllm/main.py b/myllm/main.py index 5a77a470..d240db43 100644 --- a/myllm/main.py +++ b/myllm/main.py @@ -92,10 +92,15 @@ async def chat(self, prompt): model=settings.llm_model, messages=self.conversation.get_messages(), ) - logger.debug("response {}", response) + self.conversation.add_message("ai", response) - sleep(10) - return response if response else "No response from the model" + sleep(settings.lag) + if response: + logger.debug("response received {}", response) + return response + else: + logger.debug("No response from the model") + return "No response from the model" async def clear_chat_history(self): """ @@ -113,10 +118,12 @@ class Conversation: def __init__(self, max_memory=settings.max_memory): self.messages = [] self.max_memory = max_memory + self.template = settings.llm_template + self.add_message("user", self.template) def add_message(self, role: str, content: str): if len(self.messages) >= self.max_memory: - self.messages.pop(0) # Remove the oldest message + self.messages.pop(0) self.messages.append({"role": role, "content": content}) def get_messages(self): diff --git a/pyproject.toml b/pyproject.toml index 87b8580d..ac303add 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,8 +32,6 @@ loguru = ">=0.6.0" httpx = ">=0.24.1" js2py = "^0.74" g4f = "0.0.3.4" -# langchain = "0.0.300" -# nest_asyncio = "*" curl-cffi ="0.5.7" PyExecJS2="1.6.1"