Skip to content

Commit

Permalink
Merge pull request #13 from filip-michalsky/model_name_config
Browse files Browse the repository at this point in the history
Add the ability to configure model name when streaming. Right now supports openAI but need to add abstraction to swap in any LLM.
  • Loading branch information
filip-michalsky authored Jul 6, 2023
2 parents 8125086 + 4df1426 commit 3077a01
Show file tree
Hide file tree
Showing 4 changed files with 45 additions and 7 deletions.
5 changes: 3 additions & 2 deletions examples/streaming_generator_example.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import os
from sales_gpt import SalesGPT
from salesgpt.agents import SalesGPT

from langchain.chat_models import ChatOpenAI

with open('.env','r') as f:
Expand All @@ -25,7 +26,7 @@
sales_agent.seed_agent()

# get generator of the LLM output
generator = sales_agent.step(return_streaming_generator=True)
generator = sales_agent.step(return_streaming_generator=True, model_name="gpt-3.5-turbo-0613")

# operate on streaming LLM output in near-real time
# for instance, do something after each full sentence is generated
Expand Down
8 changes: 4 additions & 4 deletions salesgpt/agents.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def human_step(self, human_input):
self.conversation_history.append(human_input)

@time_logger
def step(self, return_streaming_generator: bool = False):
def step(self, return_streaming_generator: bool = False, model_name="gpt-3.5-turbo-0613"):
"""
Args:
return_streaming_generator (bool): whether or not return
Expand All @@ -80,11 +80,11 @@ def step(self, return_streaming_generator: bool = False):
if not return_streaming_generator:
self._call(inputs={})
else:
return self._streaming_generator()
return self._streaming_generator(model_name=model_name)

# TO-DO change this override "run" override the "run method" in the SalesConversation chain!
@time_logger
def _streaming_generator(self):
def _streaming_generator(self, model_name="gpt-3.5-turbo-0613"):
"""
Sometimes, the sales agent wants to take an action before the full LLM output is available.
For instance, if we want to do text to speech on the partial LLM output.
Expand Down Expand Up @@ -128,7 +128,7 @@ def _streaming_generator(self):
messages=messages,
stop="<END_OF_TURN>",
stream=True,
model="gpt-3.5-turbo-0613",
model=model_name,
)

def _call(self, inputs: Dict[str, Any]) -> None:
Expand Down
2 changes: 1 addition & 1 deletion salesgpt/version.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
"""Version information."""

__version__ = "0.0.2"
__version__ = "0.0.3"
37 changes: 37 additions & 0 deletions tests/test_salesgpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,3 +36,40 @@ def test_valid_inference(self, load_env):
assert agent_output is not None, "Agent output cannot be None."
assert isinstance(agent_output, str), "Agent output needs to be of type str"
assert len(agent_output) > 0, "Length of output needs to be greater than 0."


def test_valid_inference_stream(self, load_env):
"""Test that the agent will start and generate the first utterance when streaming."""

llm = ChatOpenAI(temperature=0.9)
model_name = 'gpt-3.5-turbo'

sales_agent = SalesGPT.from_llm(
llm,
verbose=False,
salesperson_name="Ted Lasso",
salesperson_role="Sales Representative",
company_name="Sleep Haven",
company_business="""Sleep Haven
is a premium mattress company that provides
customers with the most comfortable and
supportive sleeping experience possible.
We offer a range of high-quality mattresses,
pillows, and bedding accessories
that are designed to meet the unique
needs of our customers.""",
)

sales_agent.seed_agent()
sales_agent.determine_conversation_stage() # optional for demonstration, built into the prompt

# agent output sample
stream_generator = sales_agent.step(return_streaming_generator=True, model_name=model_name)
agent_output=''
for chunk in stream_generator:
token = chunk["choices"][0]["delta"].get("content", "")
agent_output += token

assert agent_output is not None, "Agent output cannot be None."
assert isinstance(agent_output, str), "Agent output needs to be of type str"
assert len(agent_output) > 0, "Length of output needs to be greater than 0."

0 comments on commit 3077a01

Please sign in to comment.