Skip to content

Commit

Permalink
Fix tests
Browse files Browse the repository at this point in the history
  • Loading branch information
alexeichhorn committed Apr 28, 2024
1 parent c83ea4e commit 0cb4b5d
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 7 deletions.
12 changes: 6 additions & 6 deletions tests/test_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -376,7 +376,7 @@ class Output(BaseLLMResponse):
client = AsyncTypeOpenAI(api_key="mock")

result = await client.chat.completions.generate_output(
model="gpt-3.5-turbo", prompt=FullExamplePrompt(), max_output_tokens=100, retry_on_parse_error=5
model="gpt-3.5-turbo-0613", prompt=FullExamplePrompt(), max_output_tokens=100, retry_on_parse_error=5
)

assert isinstance(result, FullExamplePrompt.Output)
Expand Down Expand Up @@ -404,7 +404,7 @@ class Output(BaseLLMResponse):
client = AsyncTypeOpenAI(api_key="mock")

result = await client.chat.completions.generate_output(
model="gpt-3.5-turbo",
model="gpt-3.5-turbo-0613",
prompt=non_reducing_prompt_100,
max_output_tokens=100,
)
Expand All @@ -413,7 +413,7 @@ class Output(BaseLLMResponse):

with pytest.raises(LLMTokenLimitExceeded):
result = await client.chat.completions.generate_output(
model="gpt-3.5-turbo",
model="gpt-3.5-turbo-0613",
prompt=non_reducing_prompt_1000,
max_output_tokens=100,
)
Expand Down Expand Up @@ -441,7 +441,7 @@ def reduce_if_possible(self) -> bool:
reducing_prompt_100 = ReducingTestPrompt(100)

result = await client.chat.completions.generate_output(
model="gpt-3.5-turbo",
model="gpt-3.5-turbo-0613",
prompt=reducing_prompt_100,
max_output_tokens=100,
)
Expand All @@ -451,7 +451,7 @@ def reduce_if_possible(self) -> bool:
reducing_prompt_1000 = ReducingTestPrompt(1000)

result = await client.chat.completions.generate_output(
model="gpt-3.5-turbo",
model="gpt-3.5-turbo-0613",
prompt=reducing_prompt_1000,
max_output_tokens=100,
)
Expand Down Expand Up @@ -482,7 +482,7 @@ class Output(BaseLLMResponse):
prompt = FullExamplePrompt("test")

result = client.chat.completions.generate_output(
model="gpt-3.5-turbo",
model="gpt-3.5-turbo-0613",
prompt=prompt,
output_type=prompt.Output,
max_output_tokens=100,
Expand Down
2 changes: 1 addition & 1 deletion typegpt/openai/base_chat_completion.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def num_tokens_from_messages(cls, messages: list[EncodedMessage], model: OpenAIC
print("Warning: model not found. Using cl100k_base encoding.")
encoding = tiktoken.get_encoding("cl100k_base")
if model == "gpt-3.5-turbo":
return cls.num_tokens_from_messages(messages, model="gpt-3.5-turbo-0613")
return cls.num_tokens_from_messages(messages, model="gpt-3.5-turbo-0125")
elif model == "gpt-3.5-turbo-16k":
return cls.num_tokens_from_messages(messages, model="gpt-3.5-turbo-16k-0613")
elif model == "gpt-4":
Expand Down

0 comments on commit 0cb4b5d

Please sign in to comment.