Skip to content

Commit

Permalink
Merge pull request #109 from mraniki/dev
Browse files Browse the repository at this point in the history
💥 breaking Remove langchain and simplify the conversation records
  • Loading branch information
mraniki authored Sep 24, 2023
2 parents eade16c + 5d4b4d1 commit 3a8c9da
Show file tree
Hide file tree
Showing 4 changed files with 66 additions and 87 deletions.
13 changes: 8 additions & 5 deletions examples/example.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@

import asyncio
import sys
import time

import uvicorn
from fastapi import FastAPI
Expand All @@ -18,17 +19,19 @@
async def main():
"""Main"""
talky = MyLLM()
# asyncio.ensure_future(async_foo())

logger.info(await talky.chat("My name is Jack"))
# Hello Jack, it's nice to meet you!
# I am an AI language model designed to provide helpful responses.
# How can I help you today?
time.sleep(10)
logger.info(await talky.chat("tell me who is president of the united states?"))
# As of my latest update, the current
# President of the United States is Joe Biden.
# He was inaugurated on January 20th, 2021 and
# is the 46th President of the United States.

# # As of my latest update, the current
# # President of the United States is Joe Biden.
# # He was inaugurated on January 20th, 2021 and
# # is the 46th President of the United States.
time.sleep(10)
logger.info(await talky.chat("what is my name"))
# Your name is Jack, as you mentioned earlier.

Expand Down
45 changes: 24 additions & 21 deletions myllm/default_settings.toml
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,8 @@ llm_model = "gpt-3.5-turbo"
# LLM Provider
# Refer to https://github.com/xtekky/gpt4free
# for the list of supported provider
llm_provider = "g4f.Provider.ChatgptAi"

# llm_provider = "g4f.Provider.ChatgptAi"
llm_provider = "g4f.Provider.Bing"
# help message listing the commands
# available
llm_commands = """
Expand All @@ -48,29 +48,32 @@ llm_ai_mode = false
# template prompt context
# not implemented
llm_template = """
You operate within the following constraints:
Your task is to devise up to 5 highly effective goals
and an appropriate role-based name (_GPT)
for an autonomous agent, ensuring that the goals
are optimally aligned with the successful
completion of its assigned task.
test
"""
# llm_template = """
# You operate within the following constraints:
# Your task is to devise up to 5 highly effective goals
# and an appropriate role-based name (_GPT)
# for an autonomous agent, ensuring that the goals
# are optimally aligned with the successful
# completion of its assigned task.

The user will provide the task,
you will provide only the output
in the exact format specified below
with no explanation or conversation.
# The user will provide the task,
# you will provide only the output
# in the exact format specified below
# with no explanation or conversation.

Example input:
Help me with a trading monitoring for EURUSD
# Example input:
# Help me with a trading monitoring for EURUSD

Example output:
- Query current trend via /trend
- Access realtime data via /indicator
- Query news via /news
- Advise on a low timeframe and high timeframe trading positioning
- Place an order if suitable and wait for user confirmation
# Example output:
# - Query current trend via /trend
# - Access realtime data via /indicator
# - Query news via /news
# - Advise on a low timeframe and high timeframe trading positioning
# - Place an order if suitable and wait for user confirmation

"""
# """

########################################
### END OF DEFAULT SETTINGS ###
Expand Down
87 changes: 29 additions & 58 deletions myllm/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,15 +6,10 @@

import asyncio
import importlib
from typing import Any, List, Mapping, Optional

import g4f
import nest_asyncio

from g4f import Provider
from langchain.chains import ConversationChain
from langchain.llms.base import LLM
from langchain.memory import ConversationBufferMemory
from langchain.prompts import PromptTemplate
from loguru import logger

from myllm import __version__
Expand Down Expand Up @@ -50,14 +45,16 @@ def __init__(self):
None
"""

self.logger = logger
self.enabled = settings.llm_enabled
if not self.enabled:
return
self.commands = settings.llm_commands
self.llm_ai_mode = settings.llm_ai_mode
self.llm = LangLLM()
self.conversation = None
provider_module_name = settings.llm_provider
provider_module = importlib.import_module(provider_module_name)
provider_class = getattr(provider_module, provider_module_name.split(".")[-1])
self.provider = provider_class()
self.conversation = Conversation()

async def get_myllm_info(self):
"""
Expand Down Expand Up @@ -89,67 +86,41 @@ async def chat(self, prompt):
Returns:
str: The predicted response from the conversation model.
"""

if self.conversation is None:
self.conversation = ConversationChain(
llm=self.llm,
# prompt=PromptTemplate(template=settings.llm_template),
memory=ConversationBufferMemory(),
)
return self.conversation.predict(input=prompt)
logger.debug("chat {}", prompt)
self.conversation.add_message("user", prompt)
logger.debug("conversation {}", self.conversation.get_messages())
response = await self.provider.create_async(
model=settings.llm_model,
messages=self.conversation.get_messages(),
)
logger.debug("response {}", response)
self.conversation.add_message("ai", response)
logger.debug("conversation {}", self.conversation.get_messages())
return response

async def clear_chat_history(self):
"""
Clears the chat history by setting the `conversation`
attribute to an empty string.
"""
self.conversation = ""
self.conversation = Conversation()

async def switch_continous_mode(self):
""" """
self.llm_ai_mode = not self.llm_ai_mode
return f"Continous mode {'enabled' if self.llm_ai_mode else 'disabled'}."


class LangLLM(LLM):
@property
def _llm_type(self) -> str:
"""
Returns the type of the _llm_type property.
:return: A string representing the type of the property.
:rtype: str
"""
return "custom"

def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
"""
Calls the ChatCompletion API to generate a response based on the given prompt.
Args:
prompt (str): The prompt for the ChatCompletion API.
stop (Optional[List[str]], optional): A list of strings that,
if found in the response,
indicates the response should be truncated. Defaults to None.
class Conversation:
def __init__(self, max_memory=5):
self.messages = []
self.max_memory = max_memory

Returns:
str: The generated response from the ChatCompletion API.
"""
def add_message(self, role: str, content: str):
if len(self.messages) >= self.max_memory:
self.messages.pop(0) # Remove the oldest message
self.messages.append({"role": role, "content": content})

nest_asyncio.apply()
provider_module_name = settings.llm_provider
provider_module = importlib.import_module(provider_module_name)
provider_class = getattr(provider_module, provider_module_name.split(".")[-1])
provider = provider_class()

out = g4f.ChatCompletion.create(
model=settings.llm_model,
provider=provider,
messages=[{"role": "user", "content": prompt}],
)
if stop:
stop_indexes = (out.find(s) for s in stop if s in out)
min_stop = min(stop_indexes, default=-1)
if min_stop > -1:
out = out[:min_stop]
return out
def get_messages(self):
logger.debug("messages {}", self.messages)
return self.messages
8 changes: 5 additions & 3 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -25,13 +25,15 @@ build-backend = "poetry.core.masonry.api"

[tool.poetry.dependencies]
python = "^3.10"
fastapi = ">=0.95.2"
uvicorn = ">=0.22.0"
dynaconf = ">=3.2.0"
loguru = ">=0.6.0"
httpx = ">=0.24.1"
js2py = "^0.74"
g4f = "0.0.3.0"
langchain = "0.0.300"
nest_asyncio = "*"
g4f = "0.0.3.4"
# langchain = "0.0.300"
# nest_asyncio = "*"
curl-cffi ="0.5.7"
PyExecJS2="1.6.1"

Expand Down

0 comments on commit 3a8c9da

Please sign in to comment.