From 4b2b305f806ce5ce0aca04c3efa286e60a07b17f Mon Sep 17 00:00:00 2001 From: mraniki <8766259+mraniki@users.noreply.github.com> Date: Tue, 26 Sep 2023 11:09:09 +0200 Subject: [PATCH 1/4] =?UTF-8?q?=F0=9F=A5=85?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- examples/example.py | 10 ++++++---- myllm/main.py | 2 ++ 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/examples/example.py b/examples/example.py index 6214b265..165b78b7 100644 --- a/examples/example.py +++ b/examples/example.py @@ -20,19 +20,21 @@ async def main(): """Main""" talky = MyLLM() # asyncio.ensure_future(async_foo()) - - logger.info(await talky.chat("My name is Jack")) + chat = await talky.chat("My name is Jack") + logger.info(chat) # Hello Jack, it's nice to meet you! # I am an AI language model designed to provide helpful responses. # How can I help you today? time.sleep(10) - logger.info(await talky.chat("tell me who is president of the united states?")) + chat = await talky.chat("tell me who is president of the united states?") + logger.info(chat) # # As of my latest update, the current # # President of the United States is Joe Biden. # # He was inaugurated on January 20th, 2021 and # # is the 46th President of the United States. time.sleep(10) - logger.info(await talky.chat("what is my name")) + chat = await talky.chat("what is my name") + logger.info(chat) # Your name is Jack, as you mentioned earlier. diff --git a/myllm/main.py b/myllm/main.py index 5a77a470..4d870881 100644 --- a/myllm/main.py +++ b/myllm/main.py @@ -93,6 +93,8 @@ async def chat(self, prompt): messages=self.conversation.get_messages(), ) logger.debug("response {}", response) + # response = response.encode("utf-16") + # logger.debug("response {}", response) self.conversation.add_message("ai", response) sleep(10) return response if response else "No response from the model" From e9ea08408a80c4a9be7ea9e86269c2512f57a825 Mon Sep 17 00:00:00 2001 From: mraniki <8766259+mraniki@users.noreply.github.com> Date: Tue, 26 Sep 2023 12:16:12 +0200 Subject: [PATCH 2/4] =?UTF-8?q?=F0=9F=94=8A?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- myllm/main.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/myllm/main.py b/myllm/main.py index 4d870881..952f3280 100644 --- a/myllm/main.py +++ b/myllm/main.py @@ -92,12 +92,15 @@ async def chat(self, prompt): model=settings.llm_model, messages=self.conversation.get_messages(), ) - logger.debug("response {}", response) - # response = response.encode("utf-16") - # logger.debug("response {}", response) + self.conversation.add_message("ai", response) sleep(10) - return response if response else "No response from the model" + if response: + logger.debug("response received {}", response) + return response + else: + logger.debug("No response from the model") + return "No response from the model" async def clear_chat_history(self): """ From 89a826b066b0a5549b268344387a059e645d141b Mon Sep 17 00:00:00 2001 From: mraniki <8766259+mraniki@users.noreply.github.com> Date: Tue, 26 Sep 2023 13:35:34 +0200 Subject: [PATCH 3/4] =?UTF-8?q?=E2=99=BB=EF=B8=8F=20cleanup?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- myllm/default_settings.toml | 4 ++-- myllm/main.py | 3 ++- pyproject.toml | 2 -- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/myllm/default_settings.toml b/myllm/default_settings.toml index b2876ef7..f77a2fde 100644 --- a/myllm/default_settings.toml +++ b/myllm/default_settings.toml @@ -29,7 +29,7 @@ llm_provider = "g4f.Provider.Bing" # Number of conversation history # between user and ai -max_memory = 5 +max_memory = 100 # help message listing the commands # available @@ -45,7 +45,7 @@ llm_ai_mode = false # template prompt context # not implemented llm_template = """ -test +You are a friendly AI, help me with trading """ # llm_template = """ # You operate within the following constraints: diff --git a/myllm/main.py b/myllm/main.py index 952f3280..11fabb93 100644 --- a/myllm/main.py +++ b/myllm/main.py @@ -118,10 +118,11 @@ class Conversation: def __init__(self, max_memory=settings.max_memory): self.messages = [] self.max_memory = max_memory + self.template = settings.template def add_message(self, role: str, content: str): if len(self.messages) >= self.max_memory: - self.messages.pop(0) # Remove the oldest message + self.messages.pop(0) self.messages.append({"role": role, "content": content}) def get_messages(self): diff --git a/pyproject.toml b/pyproject.toml index 87b8580d..ac303add 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,8 +32,6 @@ loguru = ">=0.6.0" httpx = ">=0.24.1" js2py = "^0.74" g4f = "0.0.3.4" -# langchain = "0.0.300" -# nest_asyncio = "*" curl-cffi ="0.5.7" PyExecJS2="1.6.1" From 82d32742876fbb4413b64323ed4b622da34745c1 Mon Sep 17 00:00:00 2001 From: mraniki <8766259+mraniki@users.noreply.github.com> Date: Tue, 26 Sep 2023 13:50:03 +0200 Subject: [PATCH 4/4] =?UTF-8?q?=F0=9F=94=A7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- myllm/default_settings.toml | 4 ++-- myllm/main.py | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/myllm/default_settings.toml b/myllm/default_settings.toml index f77a2fde..e33d0799 100644 --- a/myllm/default_settings.toml +++ b/myllm/default_settings.toml @@ -26,7 +26,7 @@ llm_model = "gpt-3.5-turbo" # Refer to https://github.com/xtekky/gpt4free # for the list of supported provider llm_provider = "g4f.Provider.Bing" - +lag = 0 # Number of conversation history # between user and ai max_memory = 100 @@ -45,7 +45,7 @@ llm_ai_mode = false # template prompt context # not implemented llm_template = """ -You are a friendly AI, help me with trading +You are a friendly AI, helping me with task """ # llm_template = """ # You operate within the following constraints: diff --git a/myllm/main.py b/myllm/main.py index 11fabb93..d240db43 100644 --- a/myllm/main.py +++ b/myllm/main.py @@ -94,7 +94,7 @@ async def chat(self, prompt): ) self.conversation.add_message("ai", response) - sleep(10) + sleep(settings.lag) if response: logger.debug("response received {}", response) return response @@ -118,7 +118,8 @@ class Conversation: def __init__(self, max_memory=settings.max_memory): self.messages = [] self.max_memory = max_memory - self.template = settings.template + self.template = settings.llm_template + self.add_message("user", self.template) def add_message(self, role: str, content: str): if len(self.messages) >= self.max_memory: