Skip to content

Commit

Permalink
add store and metadata params to chat_async
Browse files Browse the repository at this point in the history
  • Loading branch information
rishsriv committed Dec 6, 2024
1 parent bf32d0e commit 9ed21d8
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 0 deletions.
12 changes: 12 additions & 0 deletions defog_utils/utils_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,8 @@ async def chat_anthropic_async(
json_mode: bool = False,
response_format=None,
seed: int = 0,
store=True,
metadata=None,
) -> Optional[LLMResponse]:
"""
Returns the response from the Anthropic API, the time taken to generate the response, the number of input tokens used, and the number of output tokens used.
Expand Down Expand Up @@ -182,6 +184,8 @@ async def chat_openai_async(
json_mode: bool = False,
response_format=None,
seed: int = 0,
store=True,
metadata=None,
) -> Optional[LLMResponse]:
"""
Returns the response from the OpenAI API, the time taken to generate the response, the number of input tokens used, and the number of output tokens used.
Expand All @@ -201,6 +205,8 @@ async def chat_openai_async(
messages=messages,
model=model,
max_completion_tokens=max_completion_tokens,
store=store,
metadata=metadata,
)
else:
if response_format or json_mode:
Expand All @@ -212,6 +218,8 @@ async def chat_openai_async(
stop=stop,
response_format={"type": "json_object"} if json_mode else response_format,
seed=seed,
store=store,
metadata=metadata,
)
else:
response = await client_openai.chat.completions.create(
Expand All @@ -221,6 +229,8 @@ async def chat_openai_async(
temperature=temperature,
stop=stop,
seed=seed,
store=store,
metadata=metadata,
)

if response_format and model not in ["o1-mini", "o1-preview", "o1"]:
Expand Down Expand Up @@ -293,6 +303,8 @@ async def chat_together_async(
json_mode: bool = False,
response_format=None,
seed: int = 0,
store=True,
metadata=None,
) -> Optional[LLMResponse]:
"""
Returns the response from the Together API, the time taken to generate the response, the number of input tokens used, and the number of output tokens used.
Expand Down
4 changes: 4 additions & 0 deletions defog_utils/utils_multi_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,8 @@ async def chat_async(
json_mode=False,
response_format=None,
seed=0,
store=True,
metadata=None,
) -> LLMResponse:
"""
Returns the response from the LLM API for a single model that is passed in.
Expand All @@ -74,6 +76,8 @@ async def chat_async(
json_mode=json_mode,
response_format=response_format,
seed=seed,
store=store,
metadata=metadata,
)


Expand Down

0 comments on commit 9ed21d8

Please sign in to comment.