From e05e639bc0a8e6cf72f1d682f919fdf0dac71032 Mon Sep 17 00:00:00 2001
From: mraniki <8766259+mraniki@users.noreply.github.com>
Date: Sun, 1 Oct 2023 19:06:15 +0200
Subject: [PATCH 1/7] Update README.md
---
README.md | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/README.md b/README.md
index 2ca25db9..83ca6180 100644
--- a/README.md
+++ b/README.md
@@ -29,10 +29,10 @@ Interact with LLM in simple way.
talky = MyLLM()
- logger.info(await talky.talk(
+ logger.info(await talky.chat(
prompt="tell me who is president of the united states?"))
# The current President of the United States is Joe Biden.
- logger.info(await talky.talk(prompt="let's start a conversation"))
+ logger.info(await talky.chat(prompt="let's start a conversation"))
# keep the chat history
From ba7da7ae4687181dc62ea46050816d8d5e8b755b Mon Sep 17 00:00:00 2001
From: mraniki <8766259+mraniki@users.noreply.github.com>
Date: Mon, 2 Oct 2023 16:45:02 +0200
Subject: [PATCH 2/7] =?UTF-8?q?=E2=9C=85=20Unit=20Test?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
tests/test_unit.py | 2 ++
1 file changed, 2 insertions(+)
diff --git a/tests/test_unit.py b/tests/test_unit.py
index 4488ccec..26394626 100644
--- a/tests/test_unit.py
+++ b/tests/test_unit.py
@@ -57,4 +57,6 @@ async def test_switch_continous_mode(talky):
@pytest.mark.asyncio
async def test_chat(talky):
result = await talky.chat("tell me a story")
+ print(talky.provider)
+ print(talky.model)
assert result is not None
From 577ce7d8ab4baf759c77090e9b916e1352fb3f7b Mon Sep 17 00:00:00 2001
From: mraniki <8766259+mraniki@users.noreply.github.com>
Date: Mon, 2 Oct 2023 16:51:15 +0200
Subject: [PATCH 3/7] =?UTF-8?q?=F0=9F=9A=A8?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
myllm/main.py | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
diff --git a/myllm/main.py b/myllm/main.py
index 588d0caa..27807210 100644
--- a/myllm/main.py
+++ b/myllm/main.py
@@ -49,7 +49,7 @@ def __init__(self):
provider_module = importlib.import_module(provider_module_name)
provider_class = getattr(provider_module, provider_module_name.split(".")[-1])
self.provider = provider_class()
- self.llm_model = settings.llm_model
+ self.model = settings.llm_model
self.lag = settings.lag
self.conversation = Conversation()
@@ -60,9 +60,7 @@ async def get_myllm_info(self):
Returns:
str: A string containing the MyLLM version, model, and provider.
"""
- return (
- f"ℹ️ MyLLM v{__version__}\n {self.llm_model}\n{self.provider}"
- )
+ return f"ℹ️ MyLLM v{__version__}\n {self.llm_model}\n{self.provider}"
async def chat(self, prompt):
"""
From 1cacbf96db5b47cc12c3f85d094538354a634abf Mon Sep 17 00:00:00 2001
From: mraniki <8766259+mraniki@users.noreply.github.com>
Date: Mon, 2 Oct 2023 16:53:51 +0200
Subject: [PATCH 4/7] =?UTF-8?q?=F0=9F=94=A7?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
myllm/default_settings.toml | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/myllm/default_settings.toml b/myllm/default_settings.toml
index 1b4356a3..3e962824 100644
--- a/myllm/default_settings.toml
+++ b/myllm/default_settings.toml
@@ -20,7 +20,8 @@ VALUE = "On default"
myllm_enabled = true
# LLM Model to use
-llm_model = "gpt-3.5-turbo"
+# llm_model = "gpt-3.5-turbo"
+llm_model= "gpt_4"
# LLM Provider
# Refer to https://github.com/xtekky/gpt4free
From 729d0b3fd16028d42ed5acaed94657f69c8c6061 Mon Sep 17 00:00:00 2001
From: mraniki <8766259+mraniki@users.noreply.github.com>
Date: Mon, 2 Oct 2023 16:55:48 +0200
Subject: [PATCH 5/7] =?UTF-8?q?=F0=9F=90=9B?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
myllm/main.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/myllm/main.py b/myllm/main.py
index 27807210..09725e0c 100644
--- a/myllm/main.py
+++ b/myllm/main.py
@@ -60,7 +60,7 @@ async def get_myllm_info(self):
Returns:
str: A string containing the MyLLM version, model, and provider.
"""
- return f"ℹ️ MyLLM v{__version__}\n {self.llm_model}\n{self.provider}"
+ return f"ℹ️ MyLLM v{__version__}\n {self.model}\n{self.provider}"
async def chat(self, prompt):
"""
@@ -75,7 +75,7 @@ async def chat(self, prompt):
try:
self.conversation.add_message("user", prompt)
response = await self.provider.create_async(
- model=self.llm_model,
+ model=self.model,
messages=self.conversation.get_messages(),
)
sleep(self.lag)
From 23c16a8f14a258e27ef7780c9e68a8e8abf3c32f Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Mon, 2 Oct 2023 15:41:55 +0000
Subject: [PATCH 6/7] =?UTF-8?q?=E2=AC=86=EF=B8=8F=20=F0=9F=9B=A0=EF=B8=8F(?=
=?UTF-8?q?deps):=20update=20dependency=20g4f=20to=20v0.1.4.4?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
pyproject.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pyproject.toml b/pyproject.toml
index 95dd58da..aa3a3032 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -31,7 +31,7 @@ dynaconf = ">=3.2.0"
loguru = ">=0.6.0"
httpx = ">=0.24.1"
js2py = "^0.74"
-g4f = "0.1.4.2"
+g4f = "0.1.4.4"
curl-cffi ="0.5.7"
PyExecJS2="1.6.1"
From ab6dca42d3f08e33bafec93673915d0254bbf506 Mon Sep 17 00:00:00 2001
From: "renovate[bot]"