Skip to content

Commit

Permalink
Merge pull request #115 from mraniki/dev
Browse files Browse the repository at this point in the history
🥅
  • Loading branch information
mraniki authored Sep 26, 2023
2 parents 1b7f094 + 82d3274 commit 31018d9
Show file tree
Hide file tree
Showing 4 changed files with 20 additions and 13 deletions.
10 changes: 6 additions & 4 deletions examples/example.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,19 +20,21 @@ async def main():
"""Main"""
talky = MyLLM()
# asyncio.ensure_future(async_foo())

logger.info(await talky.chat("My name is Jack"))
chat = await talky.chat("My name is Jack")
logger.info(chat)
# Hello Jack, it's nice to meet you!
# I am an AI language model designed to provide helpful responses.
# How can I help you today?
time.sleep(10)
logger.info(await talky.chat("tell me who is president of the united states?"))
chat = await talky.chat("tell me who is president of the united states?")
logger.info(chat)
# # As of my latest update, the current
# # President of the United States is Joe Biden.
# # He was inaugurated on January 20th, 2021 and
# # is the 46th President of the United States.
time.sleep(10)
logger.info(await talky.chat("what is my name"))
chat = await talky.chat("what is my name")
logger.info(chat)
# Your name is Jack, as you mentioned earlier.


Expand Down
6 changes: 3 additions & 3 deletions myllm/default_settings.toml
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,10 @@ llm_model = "gpt-3.5-turbo"
# Refer to https://github.com/xtekky/gpt4free
# for the list of supported provider
llm_provider = "g4f.Provider.Bing"

lag = 0
# Number of conversation history
# between user and ai
max_memory = 5
max_memory = 100

# help message listing the commands
# available
Expand All @@ -45,7 +45,7 @@ llm_ai_mode = false
# template prompt context
# not implemented
llm_template = """
test
You are a friendly AI, helping me with task
"""
# llm_template = """
# You operate within the following constraints:
Expand Down
15 changes: 11 additions & 4 deletions myllm/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,10 +92,15 @@ async def chat(self, prompt):
model=settings.llm_model,
messages=self.conversation.get_messages(),
)
logger.debug("response {}", response)

self.conversation.add_message("ai", response)
sleep(10)
return response if response else "No response from the model"
sleep(settings.lag)
if response:
logger.debug("response received {}", response)
return response
else:
logger.debug("No response from the model")
return "No response from the model"

async def clear_chat_history(self):
"""
Expand All @@ -113,10 +118,12 @@ class Conversation:
def __init__(self, max_memory=settings.max_memory):
self.messages = []
self.max_memory = max_memory
self.template = settings.llm_template
self.add_message("user", self.template)

def add_message(self, role: str, content: str):
if len(self.messages) >= self.max_memory:
self.messages.pop(0) # Remove the oldest message
self.messages.pop(0)
self.messages.append({"role": role, "content": content})

def get_messages(self):
Expand Down
2 changes: 0 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,6 @@ loguru = ">=0.6.0"
httpx = ">=0.24.1"
js2py = "^0.74"
g4f = "0.0.3.4"
# langchain = "0.0.300"
# nest_asyncio = "*"
curl-cffi ="0.5.7"
PyExecJS2="1.6.1"

Expand Down

0 comments on commit 31018d9

Please sign in to comment.